11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31853ac43aSMatt Mackall #include <linux/mm.h> 3246c9a946SArnd Bergmann #include <linux/random.h> 33174cd4b1SIngo Molnar #include <linux/sched/signal.h> 34b95f1b31SPaul Gortmaker #include <linux/export.h> 35853ac43aSMatt Mackall #include <linux/swap.h> 36e2e40f2cSChristoph Hellwig #include <linux/uio.h> 37f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h> 38749df87bSMike Kravetz #include <linux/hugetlb.h> 39b56a2d8aSVineeth Remanan Pillai #include <linux/frontswap.h> 40626c3920SAl Viro #include <linux/fs_parser.h> 41853ac43aSMatt Mackall 4295cc09d6SAndrea Arcangeli #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */ 4395cc09d6SAndrea Arcangeli 44853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 45853ac43aSMatt Mackall 46853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 471da177e4SLinus Torvalds /* 481da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 491da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 501da177e4SLinus Torvalds * which makes it a completely usable filesystem. 511da177e4SLinus Torvalds */ 521da177e4SLinus Torvalds 5339f0247dSAndreas Gruenbacher #include <linux/xattr.h> 54a5694255SChristoph Hellwig #include <linux/exportfs.h> 551c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 56feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 571da177e4SLinus Torvalds #include <linux/mman.h> 581da177e4SLinus Torvalds #include <linux/string.h> 591da177e4SLinus Torvalds #include <linux/slab.h> 601da177e4SLinus Torvalds #include <linux/backing-dev.h> 611da177e4SLinus Torvalds #include <linux/shmem_fs.h> 621da177e4SLinus Torvalds #include <linux/writeback.h> 631da177e4SLinus Torvalds #include <linux/blkdev.h> 64bda97eabSHugh Dickins #include <linux/pagevec.h> 6541ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 6683e4fa9cSHugh Dickins #include <linux/falloc.h> 67708e3508SHugh Dickins #include <linux/splice.h> 681da177e4SLinus Torvalds #include <linux/security.h> 691da177e4SLinus Torvalds #include <linux/swapops.h> 701da177e4SLinus Torvalds #include <linux/mempolicy.h> 711da177e4SLinus Torvalds #include <linux/namei.h> 72b00dc3adSHugh Dickins #include <linux/ctype.h> 73304dbdb7SLee Schermerhorn #include <linux/migrate.h> 74c1f60a5aSChristoph Lameter #include <linux/highmem.h> 75680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 7692562927SMimi Zohar #include <linux/magic.h> 779183df25SDavid Herrmann #include <linux/syscalls.h> 7840e041a2SDavid Herrmann #include <linux/fcntl.h> 799183df25SDavid Herrmann #include <uapi/linux/memfd.h> 80cfda0526SMike Rapoport #include <linux/userfaultfd_k.h> 814c27fe4cSMike Rapoport #include <linux/rmap.h> 822b4db796SAmir Goldstein #include <linux/uuid.h> 83304dbdb7SLee Schermerhorn 847c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 851da177e4SLinus Torvalds #include <asm/pgtable.h> 861da177e4SLinus Torvalds 87dd56b046SMel Gorman #include "internal.h" 88dd56b046SMel Gorman 8909cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 9009cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 911da177e4SLinus Torvalds 921da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 931da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 941da177e4SLinus Torvalds 9569f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 9669f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 9769f07ec9SHugh Dickins 981aac1400SHugh Dickins /* 99f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 100f00cdc6dSHugh Dickins * inode->i_private (with i_mutex making sure that it has only one user at 101f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 1021aac1400SHugh Dickins */ 1031aac1400SHugh Dickins struct shmem_falloc { 1048e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 1051aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 1061aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 1071aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 1081aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 1091aac1400SHugh Dickins }; 1101aac1400SHugh Dickins 1110b5071ddSAl Viro struct shmem_options { 1120b5071ddSAl Viro unsigned long long blocks; 1130b5071ddSAl Viro unsigned long long inodes; 1140b5071ddSAl Viro struct mempolicy *mpol; 1150b5071ddSAl Viro kuid_t uid; 1160b5071ddSAl Viro kgid_t gid; 1170b5071ddSAl Viro umode_t mode; 1180b5071ddSAl Viro int huge; 1190b5071ddSAl Viro int seen; 1200b5071ddSAl Viro #define SHMEM_SEEN_BLOCKS 1 1210b5071ddSAl Viro #define SHMEM_SEEN_INODES 2 1220b5071ddSAl Viro #define SHMEM_SEEN_HUGE 4 1230b5071ddSAl Viro }; 1240b5071ddSAl Viro 125b76db735SAndrew Morton #ifdef CONFIG_TMPFS 126680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 127680d794bSakpm@linux-foundation.org { 128ca79b0c2SArun KS return totalram_pages() / 2; 129680d794bSakpm@linux-foundation.org } 130680d794bSakpm@linux-foundation.org 131680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 132680d794bSakpm@linux-foundation.org { 133ca79b0c2SArun KS unsigned long nr_pages = totalram_pages(); 134ca79b0c2SArun KS 135ca79b0c2SArun KS return min(nr_pages - totalhigh_pages(), nr_pages / 2); 136680d794bSakpm@linux-foundation.org } 137b76db735SAndrew Morton #endif 138680d794bSakpm@linux-foundation.org 139bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 140bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 141bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index); 142c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index, 143c5bf121eSVineeth Remanan Pillai struct page **pagep, enum sgp_type sgp, 144c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 145c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type); 14668da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1479e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, 148cfda0526SMike Rapoport gfp_t gfp, struct vm_area_struct *vma, 1492b740303SSouptick Joarder struct vm_fault *vmf, vm_fault_t *fault_type); 15068da9f05SHugh Dickins 151f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index, 1529e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp) 15368da9f05SHugh Dickins { 15468da9f05SHugh Dickins return shmem_getpage_gfp(inode, index, pagep, sgp, 155cfda0526SMike Rapoport mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); 15668da9f05SHugh Dickins } 1571da177e4SLinus Torvalds 1581da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1591da177e4SLinus Torvalds { 1601da177e4SLinus Torvalds return sb->s_fs_info; 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds /* 1641da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1651da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1661da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1671da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1681da177e4SLinus Torvalds */ 1691da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1701da177e4SLinus Torvalds { 1710b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 172191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1731da177e4SLinus Torvalds } 1741da177e4SLinus Torvalds 1751da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1761da177e4SLinus Torvalds { 1770b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1781da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1791da177e4SLinus Torvalds } 1801da177e4SLinus Torvalds 18177142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 18277142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 18377142517SKonstantin Khlebnikov { 18477142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 18577142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 18677142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 18777142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 18877142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 18977142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 19077142517SKonstantin Khlebnikov } 19177142517SKonstantin Khlebnikov return 0; 19277142517SKonstantin Khlebnikov } 19377142517SKonstantin Khlebnikov 1941da177e4SLinus Torvalds /* 1951da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 19675edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 1971da177e4SLinus Torvalds * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1981da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1991da177e4SLinus Torvalds */ 200800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 2011da177e4SLinus Torvalds { 202800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 203800d8c63SKirill A. Shutemov return 0; 204800d8c63SKirill A. Shutemov 205800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 206800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 2071da177e4SLinus Torvalds } 2081da177e4SLinus Torvalds 2091da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 2101da177e4SLinus Torvalds { 2110b0a0806SHugh Dickins if (flags & VM_NORESERVE) 21209cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 2131da177e4SLinus Torvalds } 2141da177e4SLinus Torvalds 2150f079694SMike Rapoport static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 2160f079694SMike Rapoport { 2170f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2180f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2190f079694SMike Rapoport 2200f079694SMike Rapoport if (shmem_acct_block(info->flags, pages)) 2210f079694SMike Rapoport return false; 2220f079694SMike Rapoport 2230f079694SMike Rapoport if (sbinfo->max_blocks) { 2240f079694SMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks, 2250f079694SMike Rapoport sbinfo->max_blocks - pages) > 0) 2260f079694SMike Rapoport goto unacct; 2270f079694SMike Rapoport percpu_counter_add(&sbinfo->used_blocks, pages); 2280f079694SMike Rapoport } 2290f079694SMike Rapoport 2300f079694SMike Rapoport return true; 2310f079694SMike Rapoport 2320f079694SMike Rapoport unacct: 2330f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2340f079694SMike Rapoport return false; 2350f079694SMike Rapoport } 2360f079694SMike Rapoport 2370f079694SMike Rapoport static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 2380f079694SMike Rapoport { 2390f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2400f079694SMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2410f079694SMike Rapoport 2420f079694SMike Rapoport if (sbinfo->max_blocks) 2430f079694SMike Rapoport percpu_counter_sub(&sbinfo->used_blocks, pages); 2440f079694SMike Rapoport shmem_unacct_blocks(info->flags, pages); 2450f079694SMike Rapoport } 2460f079694SMike Rapoport 247759b9775SHugh Dickins static const struct super_operations shmem_ops; 248f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops; 24915ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 25092e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 25192e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 25292e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 253f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 254779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 2551da177e4SLinus Torvalds 256b0506e48SMike Rapoport bool vma_is_shmem(struct vm_area_struct *vma) 257b0506e48SMike Rapoport { 258b0506e48SMike Rapoport return vma->vm_ops == &shmem_vm_ops; 259b0506e48SMike Rapoport } 260b0506e48SMike Rapoport 2611da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 262cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 2631da177e4SLinus Torvalds 2645b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb) 2655b04c689SPavel Emelyanov { 2665b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2675b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2685b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2695b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 2705b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2715b04c689SPavel Emelyanov return -ENOSPC; 2725b04c689SPavel Emelyanov } 2735b04c689SPavel Emelyanov sbinfo->free_inodes--; 2745b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2755b04c689SPavel Emelyanov } 2765b04c689SPavel Emelyanov return 0; 2775b04c689SPavel Emelyanov } 2785b04c689SPavel Emelyanov 2795b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 2805b04c689SPavel Emelyanov { 2815b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2825b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2835b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2845b04c689SPavel Emelyanov sbinfo->free_inodes++; 2855b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2865b04c689SPavel Emelyanov } 2875b04c689SPavel Emelyanov } 2885b04c689SPavel Emelyanov 28946711810SRandy Dunlap /** 29041ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 2911da177e4SLinus Torvalds * @inode: inode to recalc 2921da177e4SLinus Torvalds * 2931da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 2941da177e4SLinus Torvalds * undirtied hole pages behind our back. 2951da177e4SLinus Torvalds * 2961da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 2971da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 2981da177e4SLinus Torvalds * 2991da177e4SLinus Torvalds * It has to be called with the spinlock held. 3001da177e4SLinus Torvalds */ 3011da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 3021da177e4SLinus Torvalds { 3031da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 3041da177e4SLinus Torvalds long freed; 3051da177e4SLinus Torvalds 3061da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 3071da177e4SLinus Torvalds if (freed > 0) { 3081da177e4SLinus Torvalds info->alloced -= freed; 30954af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 3100f079694SMike Rapoport shmem_inode_unacct_blocks(inode, freed); 3111da177e4SLinus Torvalds } 3121da177e4SLinus Torvalds } 3131da177e4SLinus Torvalds 314800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 315800d8c63SKirill A. Shutemov { 316800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3174595ef88SKirill A. Shutemov unsigned long flags; 318800d8c63SKirill A. Shutemov 3190f079694SMike Rapoport if (!shmem_inode_acct_block(inode, pages)) 320800d8c63SKirill A. Shutemov return false; 321b1cc94abSMike Rapoport 322aaa52e34SHugh Dickins /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 323aaa52e34SHugh Dickins inode->i_mapping->nrpages += pages; 324aaa52e34SHugh Dickins 3254595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 326800d8c63SKirill A. Shutemov info->alloced += pages; 327800d8c63SKirill A. Shutemov inode->i_blocks += pages * BLOCKS_PER_PAGE; 328800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3294595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 330800d8c63SKirill A. Shutemov 331800d8c63SKirill A. Shutemov return true; 332800d8c63SKirill A. Shutemov } 333800d8c63SKirill A. Shutemov 334800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 335800d8c63SKirill A. Shutemov { 336800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 3374595ef88SKirill A. Shutemov unsigned long flags; 338800d8c63SKirill A. Shutemov 339aaa52e34SHugh Dickins /* nrpages adjustment done by __delete_from_page_cache() or caller */ 340aaa52e34SHugh Dickins 3414595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 342800d8c63SKirill A. Shutemov info->alloced -= pages; 343800d8c63SKirill A. Shutemov inode->i_blocks -= pages * BLOCKS_PER_PAGE; 344800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 3454595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 346800d8c63SKirill A. Shutemov 3470f079694SMike Rapoport shmem_inode_unacct_blocks(inode, pages); 348800d8c63SKirill A. Shutemov } 349800d8c63SKirill A. Shutemov 3507a5d0fbbSHugh Dickins /* 35162f945b6SMatthew Wilcox * Replace item expected in xarray by a new item, while holding xa_lock. 3527a5d0fbbSHugh Dickins */ 35362f945b6SMatthew Wilcox static int shmem_replace_entry(struct address_space *mapping, 3547a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 3557a5d0fbbSHugh Dickins { 35662f945b6SMatthew Wilcox XA_STATE(xas, &mapping->i_pages, index); 3576dbaf22cSJohannes Weiner void *item; 3587a5d0fbbSHugh Dickins 3597a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 3606dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 36162f945b6SMatthew Wilcox item = xas_load(&xas); 3627a5d0fbbSHugh Dickins if (item != expected) 3637a5d0fbbSHugh Dickins return -ENOENT; 36462f945b6SMatthew Wilcox xas_store(&xas, replacement); 3657a5d0fbbSHugh Dickins return 0; 3667a5d0fbbSHugh Dickins } 3677a5d0fbbSHugh Dickins 3687a5d0fbbSHugh Dickins /* 369d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 370d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 371d1899228SHugh Dickins * 372d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 373d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 374d1899228SHugh Dickins */ 375d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 376d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 377d1899228SHugh Dickins { 378a12831bfSMatthew Wilcox return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 379d1899228SHugh Dickins } 380d1899228SHugh Dickins 381d1899228SHugh Dickins /* 3825a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 3835a6e75f8SKirill A. Shutemov * 3845a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 3855a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 3865a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 3875a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 3885a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 3895a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 3905a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 3915a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 3925a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 3935a6e75f8SKirill A. Shutemov */ 3945a6e75f8SKirill A. Shutemov 3955a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 3965a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 3975a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 3985a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 3995a6e75f8SKirill A. Shutemov 4005a6e75f8SKirill A. Shutemov /* 4015a6e75f8SKirill A. Shutemov * Special values. 4025a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 4035a6e75f8SKirill A. Shutemov * 4045a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 4055a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 4065a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 4075a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 4085a6e75f8SKirill A. Shutemov * 4095a6e75f8SKirill A. Shutemov */ 4105a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 4115a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 4125a6e75f8SKirill A. Shutemov 413e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4145a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 4155a6e75f8SKirill A. Shutemov 4165b9c98f3SMike Kravetz static int shmem_huge __read_mostly; 4175a6e75f8SKirill A. Shutemov 418e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) 4195a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 4205a6e75f8SKirill A. Shutemov { 4215a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 4225a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 4235a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 4245a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 4255a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 4265a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 4275a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 4285a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 4295a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 4305a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 4315a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 4325a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 4335a6e75f8SKirill A. Shutemov return -EINVAL; 4345a6e75f8SKirill A. Shutemov } 435e5f2249aSArnd Bergmann #endif 4365a6e75f8SKirill A. Shutemov 437e5f2249aSArnd Bergmann #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 4385a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 4395a6e75f8SKirill A. Shutemov { 4405a6e75f8SKirill A. Shutemov switch (huge) { 4415a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 4425a6e75f8SKirill A. Shutemov return "never"; 4435a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 4445a6e75f8SKirill A. Shutemov return "always"; 4455a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 4465a6e75f8SKirill A. Shutemov return "within_size"; 4475a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 4485a6e75f8SKirill A. Shutemov return "advise"; 4495a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 4505a6e75f8SKirill A. Shutemov return "deny"; 4515a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 4525a6e75f8SKirill A. Shutemov return "force"; 4535a6e75f8SKirill A. Shutemov default: 4545a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 4555a6e75f8SKirill A. Shutemov return "bad_val"; 4565a6e75f8SKirill A. Shutemov } 4575a6e75f8SKirill A. Shutemov } 458f1f5929cSJérémy Lefaure #endif 4595a6e75f8SKirill A. Shutemov 460779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 461779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 462779750d2SKirill A. Shutemov { 463779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 464253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove); 465779750d2SKirill A. Shutemov struct inode *inode; 466779750d2SKirill A. Shutemov struct shmem_inode_info *info; 467779750d2SKirill A. Shutemov struct page *page; 468779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 469779750d2SKirill A. Shutemov int removed = 0, split = 0; 470779750d2SKirill A. Shutemov 471779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 472779750d2SKirill A. Shutemov return SHRINK_STOP; 473779750d2SKirill A. Shutemov 474779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 475779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 476779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 477779750d2SKirill A. Shutemov 478779750d2SKirill A. Shutemov /* pin the inode */ 479779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 480779750d2SKirill A. Shutemov 481779750d2SKirill A. Shutemov /* inode is about to be evicted */ 482779750d2SKirill A. Shutemov if (!inode) { 483779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 484779750d2SKirill A. Shutemov removed++; 485779750d2SKirill A. Shutemov goto next; 486779750d2SKirill A. Shutemov } 487779750d2SKirill A. Shutemov 488779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 489779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 490779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 491253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove); 492779750d2SKirill A. Shutemov removed++; 493779750d2SKirill A. Shutemov goto next; 494779750d2SKirill A. Shutemov } 495779750d2SKirill A. Shutemov 496779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 497779750d2SKirill A. Shutemov next: 498779750d2SKirill A. Shutemov if (!--batch) 499779750d2SKirill A. Shutemov break; 500779750d2SKirill A. Shutemov } 501779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 502779750d2SKirill A. Shutemov 503253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) { 504253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 505253fd0f0SKirill A. Shutemov inode = &info->vfs_inode; 506253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist); 507253fd0f0SKirill A. Shutemov iput(inode); 508253fd0f0SKirill A. Shutemov } 509253fd0f0SKirill A. Shutemov 510779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 511779750d2SKirill A. Shutemov int ret; 512779750d2SKirill A. Shutemov 513779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 514779750d2SKirill A. Shutemov inode = &info->vfs_inode; 515779750d2SKirill A. Shutemov 516b3cd54b2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) 517b3cd54b2SKirill A. Shutemov goto leave; 518779750d2SKirill A. Shutemov 519b3cd54b2SKirill A. Shutemov page = find_get_page(inode->i_mapping, 520779750d2SKirill A. Shutemov (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 521779750d2SKirill A. Shutemov if (!page) 522779750d2SKirill A. Shutemov goto drop; 523779750d2SKirill A. Shutemov 524b3cd54b2SKirill A. Shutemov /* No huge page at the end of the file: nothing to split */ 525779750d2SKirill A. Shutemov if (!PageTransHuge(page)) { 526779750d2SKirill A. Shutemov put_page(page); 527779750d2SKirill A. Shutemov goto drop; 528779750d2SKirill A. Shutemov } 529779750d2SKirill A. Shutemov 530b3cd54b2SKirill A. Shutemov /* 531b3cd54b2SKirill A. Shutemov * Leave the inode on the list if we failed to lock 532b3cd54b2SKirill A. Shutemov * the page at this time. 533b3cd54b2SKirill A. Shutemov * 534b3cd54b2SKirill A. Shutemov * Waiting for the lock may lead to deadlock in the 535b3cd54b2SKirill A. Shutemov * reclaim path. 536b3cd54b2SKirill A. Shutemov */ 537b3cd54b2SKirill A. Shutemov if (!trylock_page(page)) { 538b3cd54b2SKirill A. Shutemov put_page(page); 539b3cd54b2SKirill A. Shutemov goto leave; 540b3cd54b2SKirill A. Shutemov } 541b3cd54b2SKirill A. Shutemov 542779750d2SKirill A. Shutemov ret = split_huge_page(page); 543779750d2SKirill A. Shutemov unlock_page(page); 544779750d2SKirill A. Shutemov put_page(page); 545779750d2SKirill A. Shutemov 546b3cd54b2SKirill A. Shutemov /* If split failed leave the inode on the list */ 547b3cd54b2SKirill A. Shutemov if (ret) 548b3cd54b2SKirill A. Shutemov goto leave; 549779750d2SKirill A. Shutemov 550779750d2SKirill A. Shutemov split++; 551779750d2SKirill A. Shutemov drop: 552779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 553779750d2SKirill A. Shutemov removed++; 554b3cd54b2SKirill A. Shutemov leave: 555779750d2SKirill A. Shutemov iput(inode); 556779750d2SKirill A. Shutemov } 557779750d2SKirill A. Shutemov 558779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 559779750d2SKirill A. Shutemov list_splice_tail(&list, &sbinfo->shrinklist); 560779750d2SKirill A. Shutemov sbinfo->shrinklist_len -= removed; 561779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 562779750d2SKirill A. Shutemov 563779750d2SKirill A. Shutemov return split; 564779750d2SKirill A. Shutemov } 565779750d2SKirill A. Shutemov 566779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 567779750d2SKirill A. Shutemov struct shrink_control *sc) 568779750d2SKirill A. Shutemov { 569779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 570779750d2SKirill A. Shutemov 571779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 572779750d2SKirill A. Shutemov return SHRINK_STOP; 573779750d2SKirill A. Shutemov 574779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 575779750d2SKirill A. Shutemov } 576779750d2SKirill A. Shutemov 577779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 578779750d2SKirill A. Shutemov struct shrink_control *sc) 579779750d2SKirill A. Shutemov { 580779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 581779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 582779750d2SKirill A. Shutemov } 583e496cf3dSKirill A. Shutemov #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5845a6e75f8SKirill A. Shutemov 5855a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 5865a6e75f8SKirill A. Shutemov 587779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 588779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 589779750d2SKirill A. Shutemov { 590779750d2SKirill A. Shutemov return 0; 591779750d2SKirill A. Shutemov } 592e496cf3dSKirill A. Shutemov #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5935a6e75f8SKirill A. Shutemov 59489fdcd26SYang Shi static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) 59589fdcd26SYang Shi { 59689fdcd26SYang Shi if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 59789fdcd26SYang Shi (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && 59889fdcd26SYang Shi shmem_huge != SHMEM_HUGE_DENY) 59989fdcd26SYang Shi return true; 60089fdcd26SYang Shi return false; 60189fdcd26SYang Shi } 60289fdcd26SYang Shi 6035a6e75f8SKirill A. Shutemov /* 60446f65ec1SHugh Dickins * Like add_to_page_cache_locked, but error if expected item has gone. 60546f65ec1SHugh Dickins */ 60646f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page, 60746f65ec1SHugh Dickins struct address_space *mapping, 608552446a4SMatthew Wilcox pgoff_t index, void *expected, gfp_t gfp) 60946f65ec1SHugh Dickins { 610552446a4SMatthew Wilcox XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); 611552446a4SMatthew Wilcox unsigned long i = 0; 612552446a4SMatthew Wilcox unsigned long nr = 1UL << compound_order(page); 61346f65ec1SHugh Dickins 614800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 615800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(index != round_down(index, nr), page); 616309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 617309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 618800d8c63SKirill A. Shutemov VM_BUG_ON(expected && PageTransHuge(page)); 61946f65ec1SHugh Dickins 620800d8c63SKirill A. Shutemov page_ref_add(page, nr); 62146f65ec1SHugh Dickins page->mapping = mapping; 62246f65ec1SHugh Dickins page->index = index; 62346f65ec1SHugh Dickins 624552446a4SMatthew Wilcox do { 625552446a4SMatthew Wilcox void *entry; 626552446a4SMatthew Wilcox xas_lock_irq(&xas); 627552446a4SMatthew Wilcox entry = xas_find_conflict(&xas); 628552446a4SMatthew Wilcox if (entry != expected) 629552446a4SMatthew Wilcox xas_set_err(&xas, -EEXIST); 630552446a4SMatthew Wilcox xas_create_range(&xas); 631552446a4SMatthew Wilcox if (xas_error(&xas)) 632552446a4SMatthew Wilcox goto unlock; 633552446a4SMatthew Wilcox next: 63469bf4b6bSLinus Torvalds xas_store(&xas, page + i); 635552446a4SMatthew Wilcox if (++i < nr) { 636552446a4SMatthew Wilcox xas_next(&xas); 637552446a4SMatthew Wilcox goto next; 638552446a4SMatthew Wilcox } 639800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 640800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 64111fb9989SMel Gorman __inc_node_page_state(page, NR_SHMEM_THPS); 642552446a4SMatthew Wilcox } 643552446a4SMatthew Wilcox mapping->nrpages += nr; 64411fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 64511fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); 646552446a4SMatthew Wilcox unlock: 647552446a4SMatthew Wilcox xas_unlock_irq(&xas); 648552446a4SMatthew Wilcox } while (xas_nomem(&xas, gfp)); 649552446a4SMatthew Wilcox 650552446a4SMatthew Wilcox if (xas_error(&xas)) { 65146f65ec1SHugh Dickins page->mapping = NULL; 652800d8c63SKirill A. Shutemov page_ref_sub(page, nr); 653552446a4SMatthew Wilcox return xas_error(&xas); 65446f65ec1SHugh Dickins } 655552446a4SMatthew Wilcox 656552446a4SMatthew Wilcox return 0; 65746f65ec1SHugh Dickins } 65846f65ec1SHugh Dickins 65946f65ec1SHugh Dickins /* 6606922c0c7SHugh Dickins * Like delete_from_page_cache, but substitutes swap for page. 6616922c0c7SHugh Dickins */ 6626922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap) 6636922c0c7SHugh Dickins { 6646922c0c7SHugh Dickins struct address_space *mapping = page->mapping; 6656922c0c7SHugh Dickins int error; 6666922c0c7SHugh Dickins 667800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 668800d8c63SKirill A. Shutemov 669b93b0163SMatthew Wilcox xa_lock_irq(&mapping->i_pages); 67062f945b6SMatthew Wilcox error = shmem_replace_entry(mapping, page->index, page, radswap); 6716922c0c7SHugh Dickins page->mapping = NULL; 6726922c0c7SHugh Dickins mapping->nrpages--; 67311fb9989SMel Gorman __dec_node_page_state(page, NR_FILE_PAGES); 67411fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM); 675b93b0163SMatthew Wilcox xa_unlock_irq(&mapping->i_pages); 67609cbfeafSKirill A. Shutemov put_page(page); 6776922c0c7SHugh Dickins BUG_ON(error); 6786922c0c7SHugh Dickins } 6796922c0c7SHugh Dickins 6806922c0c7SHugh Dickins /* 681c121d3bbSMatthew Wilcox * Remove swap entry from page cache, free the swap and its page cache. 6827a5d0fbbSHugh Dickins */ 6837a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 6847a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 6857a5d0fbbSHugh Dickins { 6866dbaf22cSJohannes Weiner void *old; 6877a5d0fbbSHugh Dickins 68855f3f7eaSMatthew Wilcox old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 6896dbaf22cSJohannes Weiner if (old != radswap) 6906dbaf22cSJohannes Weiner return -ENOENT; 6917a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 6926dbaf22cSJohannes Weiner return 0; 6937a5d0fbbSHugh Dickins } 6947a5d0fbbSHugh Dickins 6957a5d0fbbSHugh Dickins /* 6966a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 69748131e03SVlastimil Babka * given offsets are swapped out. 6986a15a370SVlastimil Babka * 699b93b0163SMatthew Wilcox * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 7006a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 7016a15a370SVlastimil Babka */ 70248131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 70348131e03SVlastimil Babka pgoff_t start, pgoff_t end) 7046a15a370SVlastimil Babka { 7057ae3424fSMatthew Wilcox XA_STATE(xas, &mapping->i_pages, start); 7066a15a370SVlastimil Babka struct page *page; 70748131e03SVlastimil Babka unsigned long swapped = 0; 7086a15a370SVlastimil Babka 7096a15a370SVlastimil Babka rcu_read_lock(); 7107ae3424fSMatthew Wilcox xas_for_each(&xas, page, end - 1) { 7117ae3424fSMatthew Wilcox if (xas_retry(&xas, page)) 7122cf938aaSMatthew Wilcox continue; 7133159f943SMatthew Wilcox if (xa_is_value(page)) 7146a15a370SVlastimil Babka swapped++; 7156a15a370SVlastimil Babka 7166a15a370SVlastimil Babka if (need_resched()) { 7177ae3424fSMatthew Wilcox xas_pause(&xas); 7186a15a370SVlastimil Babka cond_resched_rcu(); 7196a15a370SVlastimil Babka } 7206a15a370SVlastimil Babka } 7216a15a370SVlastimil Babka 7226a15a370SVlastimil Babka rcu_read_unlock(); 7236a15a370SVlastimil Babka 7246a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 7256a15a370SVlastimil Babka } 7266a15a370SVlastimil Babka 7276a15a370SVlastimil Babka /* 72848131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 72948131e03SVlastimil Babka * given vma is swapped out. 73048131e03SVlastimil Babka * 731b93b0163SMatthew Wilcox * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 73248131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 73348131e03SVlastimil Babka */ 73448131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 73548131e03SVlastimil Babka { 73648131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 73748131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 73848131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 73948131e03SVlastimil Babka unsigned long swapped; 74048131e03SVlastimil Babka 74148131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 74248131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 74348131e03SVlastimil Babka 74448131e03SVlastimil Babka /* 74548131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 74648131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 74748131e03SVlastimil Babka * already track. 74848131e03SVlastimil Babka */ 74948131e03SVlastimil Babka if (!swapped) 75048131e03SVlastimil Babka return 0; 75148131e03SVlastimil Babka 75248131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 75348131e03SVlastimil Babka return swapped << PAGE_SHIFT; 75448131e03SVlastimil Babka 75548131e03SVlastimil Babka /* Here comes the more involved part */ 75648131e03SVlastimil Babka return shmem_partial_swap_usage(mapping, 75748131e03SVlastimil Babka linear_page_index(vma, vma->vm_start), 75848131e03SVlastimil Babka linear_page_index(vma, vma->vm_end)); 75948131e03SVlastimil Babka } 76048131e03SVlastimil Babka 76148131e03SVlastimil Babka /* 76224513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 76324513264SHugh Dickins */ 76424513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 76524513264SHugh Dickins { 76624513264SHugh Dickins struct pagevec pvec; 76724513264SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 76824513264SHugh Dickins pgoff_t index = 0; 76924513264SHugh Dickins 77086679820SMel Gorman pagevec_init(&pvec); 77124513264SHugh Dickins /* 77224513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 77324513264SHugh Dickins */ 77424513264SHugh Dickins while (!mapping_unevictable(mapping)) { 77524513264SHugh Dickins /* 77624513264SHugh Dickins * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 77724513264SHugh Dickins * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 77824513264SHugh Dickins */ 7790cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 78024513264SHugh Dickins PAGEVEC_SIZE, pvec.pages, indices); 78124513264SHugh Dickins if (!pvec.nr) 78224513264SHugh Dickins break; 78324513264SHugh Dickins index = indices[pvec.nr - 1] + 1; 7840cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 78564e3d12fSKuo-Hsin Yang check_move_unevictable_pages(&pvec); 78624513264SHugh Dickins pagevec_release(&pvec); 78724513264SHugh Dickins cond_resched(); 78824513264SHugh Dickins } 7897a5d0fbbSHugh Dickins } 7907a5d0fbbSHugh Dickins 7917a5d0fbbSHugh Dickins /* 7927f4446eeSMatthew Wilcox * Remove range of pages and swap entries from page cache, and free them. 7931635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 7947a5d0fbbSHugh Dickins */ 7951635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 7961635f6a7SHugh Dickins bool unfalloc) 7971da177e4SLinus Torvalds { 798285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 7991da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 80009cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 80109cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 80209cbfeafSKirill A. Shutemov unsigned int partial_start = lstart & (PAGE_SIZE - 1); 80309cbfeafSKirill A. Shutemov unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); 804bda97eabSHugh Dickins struct pagevec pvec; 8057a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 8067a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 807285b2c4fSHugh Dickins pgoff_t index; 808bda97eabSHugh Dickins int i; 8091da177e4SLinus Torvalds 81083e4fa9cSHugh Dickins if (lend == -1) 81183e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 812bda97eabSHugh Dickins 81386679820SMel Gorman pagevec_init(&pvec); 814bda97eabSHugh Dickins index = start; 81583e4fa9cSHugh Dickins while (index < end) { 8160cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 81783e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 8187a5d0fbbSHugh Dickins pvec.pages, indices); 8197a5d0fbbSHugh Dickins if (!pvec.nr) 8207a5d0fbbSHugh Dickins break; 821bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 822bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 823bda97eabSHugh Dickins 8247a5d0fbbSHugh Dickins index = indices[i]; 82583e4fa9cSHugh Dickins if (index >= end) 826bda97eabSHugh Dickins break; 827bda97eabSHugh Dickins 8283159f943SMatthew Wilcox if (xa_is_value(page)) { 8291635f6a7SHugh Dickins if (unfalloc) 8301635f6a7SHugh Dickins continue; 8317a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 8327a5d0fbbSHugh Dickins index, page); 8337a5d0fbbSHugh Dickins continue; 8347a5d0fbbSHugh Dickins } 8357a5d0fbbSHugh Dickins 836800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); 837800d8c63SKirill A. Shutemov 838bda97eabSHugh Dickins if (!trylock_page(page)) 839bda97eabSHugh Dickins continue; 840800d8c63SKirill A. Shutemov 841800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 842800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 843800d8c63SKirill A. Shutemov clear_highpage(page); 844800d8c63SKirill A. Shutemov unlock_page(page); 845800d8c63SKirill A. Shutemov continue; 846800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 847800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 848800d8c63SKirill A. Shutemov /* 849800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 850800d8c63SKirill A. Shutemov * zero out the page 851800d8c63SKirill A. Shutemov */ 852800d8c63SKirill A. Shutemov clear_highpage(page); 853800d8c63SKirill A. Shutemov unlock_page(page); 854800d8c63SKirill A. Shutemov continue; 855800d8c63SKirill A. Shutemov } 856800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 857800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 858800d8c63SKirill A. Shutemov } 859800d8c63SKirill A. Shutemov 8601635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 861800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 862800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 863309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 864bda97eabSHugh Dickins truncate_inode_page(mapping, page); 8657a5d0fbbSHugh Dickins } 8661635f6a7SHugh Dickins } 867bda97eabSHugh Dickins unlock_page(page); 868bda97eabSHugh Dickins } 8690cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 87024513264SHugh Dickins pagevec_release(&pvec); 871bda97eabSHugh Dickins cond_resched(); 872bda97eabSHugh Dickins index++; 873bda97eabSHugh Dickins } 874bda97eabSHugh Dickins 87583e4fa9cSHugh Dickins if (partial_start) { 876bda97eabSHugh Dickins struct page *page = NULL; 8779e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, start - 1, &page, SGP_READ); 878bda97eabSHugh Dickins if (page) { 87909cbfeafSKirill A. Shutemov unsigned int top = PAGE_SIZE; 88083e4fa9cSHugh Dickins if (start > end) { 88183e4fa9cSHugh Dickins top = partial_end; 88283e4fa9cSHugh Dickins partial_end = 0; 88383e4fa9cSHugh Dickins } 88483e4fa9cSHugh Dickins zero_user_segment(page, partial_start, top); 885bda97eabSHugh Dickins set_page_dirty(page); 886bda97eabSHugh Dickins unlock_page(page); 88709cbfeafSKirill A. Shutemov put_page(page); 888bda97eabSHugh Dickins } 889bda97eabSHugh Dickins } 89083e4fa9cSHugh Dickins if (partial_end) { 89183e4fa9cSHugh Dickins struct page *page = NULL; 8929e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, end, &page, SGP_READ); 89383e4fa9cSHugh Dickins if (page) { 89483e4fa9cSHugh Dickins zero_user_segment(page, 0, partial_end); 89583e4fa9cSHugh Dickins set_page_dirty(page); 89683e4fa9cSHugh Dickins unlock_page(page); 89709cbfeafSKirill A. Shutemov put_page(page); 89883e4fa9cSHugh Dickins } 89983e4fa9cSHugh Dickins } 90083e4fa9cSHugh Dickins if (start >= end) 90183e4fa9cSHugh Dickins return; 902bda97eabSHugh Dickins 903bda97eabSHugh Dickins index = start; 904b1a36650SHugh Dickins while (index < end) { 905bda97eabSHugh Dickins cond_resched(); 9060cd6144aSJohannes Weiner 9070cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 90883e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 9097a5d0fbbSHugh Dickins pvec.pages, indices); 9107a5d0fbbSHugh Dickins if (!pvec.nr) { 911b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 912b1a36650SHugh Dickins if (index == start || end != -1) 913bda97eabSHugh Dickins break; 914b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 915bda97eabSHugh Dickins index = start; 916bda97eabSHugh Dickins continue; 917bda97eabSHugh Dickins } 918bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 919bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 920bda97eabSHugh Dickins 9217a5d0fbbSHugh Dickins index = indices[i]; 92283e4fa9cSHugh Dickins if (index >= end) 923bda97eabSHugh Dickins break; 924bda97eabSHugh Dickins 9253159f943SMatthew Wilcox if (xa_is_value(page)) { 9261635f6a7SHugh Dickins if (unfalloc) 9271635f6a7SHugh Dickins continue; 928b1a36650SHugh Dickins if (shmem_free_swap(mapping, index, page)) { 929b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 930b1a36650SHugh Dickins index--; 931b1a36650SHugh Dickins break; 932b1a36650SHugh Dickins } 933b1a36650SHugh Dickins nr_swaps_freed++; 9347a5d0fbbSHugh Dickins continue; 9357a5d0fbbSHugh Dickins } 9367a5d0fbbSHugh Dickins 937bda97eabSHugh Dickins lock_page(page); 938800d8c63SKirill A. Shutemov 939800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 940800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 941800d8c63SKirill A. Shutemov clear_highpage(page); 942800d8c63SKirill A. Shutemov unlock_page(page); 943800d8c63SKirill A. Shutemov /* 944800d8c63SKirill A. Shutemov * Partial thp truncate due 'start' in middle 945800d8c63SKirill A. Shutemov * of THP: don't need to look on these pages 946800d8c63SKirill A. Shutemov * again on !pvec.nr restart. 947800d8c63SKirill A. Shutemov */ 948800d8c63SKirill A. Shutemov if (index != round_down(end, HPAGE_PMD_NR)) 949800d8c63SKirill A. Shutemov start++; 950800d8c63SKirill A. Shutemov continue; 951800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 952800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 953800d8c63SKirill A. Shutemov /* 954800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 955800d8c63SKirill A. Shutemov * zero out the page 956800d8c63SKirill A. Shutemov */ 957800d8c63SKirill A. Shutemov clear_highpage(page); 958800d8c63SKirill A. Shutemov unlock_page(page); 959800d8c63SKirill A. Shutemov continue; 960800d8c63SKirill A. Shutemov } 961800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 962800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 963800d8c63SKirill A. Shutemov } 964800d8c63SKirill A. Shutemov 9651635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 966800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 967800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 968309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 969bda97eabSHugh Dickins truncate_inode_page(mapping, page); 970b1a36650SHugh Dickins } else { 971b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 972b1a36650SHugh Dickins unlock_page(page); 973b1a36650SHugh Dickins index--; 974b1a36650SHugh Dickins break; 9757a5d0fbbSHugh Dickins } 9761635f6a7SHugh Dickins } 977bda97eabSHugh Dickins unlock_page(page); 978bda97eabSHugh Dickins } 9790cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 98024513264SHugh Dickins pagevec_release(&pvec); 981bda97eabSHugh Dickins index++; 982bda97eabSHugh Dickins } 98394c1e62dSHugh Dickins 9844595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 9857a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 9861da177e4SLinus Torvalds shmem_recalc_inode(inode); 9874595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 9881635f6a7SHugh Dickins } 9891da177e4SLinus Torvalds 9901635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 9911635f6a7SHugh Dickins { 9921635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 993078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 9941da177e4SLinus Torvalds } 99594c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 9961da177e4SLinus Torvalds 997a528d35eSDavid Howells static int shmem_getattr(const struct path *path, struct kstat *stat, 998a528d35eSDavid Howells u32 request_mask, unsigned int query_flags) 99944a30220SYu Zhao { 1000a528d35eSDavid Howells struct inode *inode = path->dentry->d_inode; 100144a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 100289fdcd26SYang Shi struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb); 100344a30220SYu Zhao 1004d0424c42SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 10054595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 100644a30220SYu Zhao shmem_recalc_inode(inode); 10074595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1008d0424c42SHugh Dickins } 100944a30220SYu Zhao generic_fillattr(inode, stat); 101089fdcd26SYang Shi 101189fdcd26SYang Shi if (is_huge_enabled(sb_info)) 101289fdcd26SYang Shi stat->blksize = HPAGE_PMD_SIZE; 101389fdcd26SYang Shi 101444a30220SYu Zhao return 0; 101544a30220SYu Zhao } 101644a30220SYu Zhao 101794c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 10181da177e4SLinus Torvalds { 101975c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 102040e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 1021779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 10221da177e4SLinus Torvalds int error; 10231da177e4SLinus Torvalds 102431051c85SJan Kara error = setattr_prepare(dentry, attr); 1025db78b877SChristoph Hellwig if (error) 1026db78b877SChristoph Hellwig return error; 1027db78b877SChristoph Hellwig 102894c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 102994c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 103094c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 10313889e6e7Snpiggin@suse.de 103240e041a2SDavid Herrmann /* protected by i_mutex */ 103340e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 103440e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 103540e041a2SDavid Herrmann return -EPERM; 103640e041a2SDavid Herrmann 103794c1e62dSHugh Dickins if (newsize != oldsize) { 103877142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 103977142517SKonstantin Khlebnikov oldsize, newsize); 104077142517SKonstantin Khlebnikov if (error) 104177142517SKonstantin Khlebnikov return error; 104294c1e62dSHugh Dickins i_size_write(inode, newsize); 1043078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 104494c1e62dSHugh Dickins } 1045afa2db2fSJosef Bacik if (newsize <= oldsize) { 104694c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 1047d0424c42SHugh Dickins if (oldsize > holebegin) 1048d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1049d0424c42SHugh Dickins holebegin, 0, 1); 1050d0424c42SHugh Dickins if (info->alloced) 1051d0424c42SHugh Dickins shmem_truncate_range(inode, 1052d0424c42SHugh Dickins newsize, (loff_t)-1); 105394c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 1054d0424c42SHugh Dickins if (oldsize > holebegin) 1055d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1056d0424c42SHugh Dickins holebegin, 0, 1); 1057779750d2SKirill A. Shutemov 1058779750d2SKirill A. Shutemov /* 1059779750d2SKirill A. Shutemov * Part of the huge page can be beyond i_size: subject 1060779750d2SKirill A. Shutemov * to shrink under memory pressure. 1061779750d2SKirill A. Shutemov */ 1062779750d2SKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1063779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1064d041353dSCong Wang /* 1065d041353dSCong Wang * _careful to defend against unlocked access to 1066d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1067d041353dSCong Wang */ 1068d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1069779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1070779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1071779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1072779750d2SKirill A. Shutemov } 1073779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1074779750d2SKirill A. Shutemov } 107594c1e62dSHugh Dickins } 10761da177e4SLinus Torvalds } 10771da177e4SLinus Torvalds 10786a1a90adSChristoph Hellwig setattr_copy(inode, attr); 1079db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 1080feda821eSChristoph Hellwig error = posix_acl_chmod(inode, inode->i_mode); 10811da177e4SLinus Torvalds return error; 10821da177e4SLinus Torvalds } 10831da177e4SLinus Torvalds 10841f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 10851da177e4SLinus Torvalds { 10861da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1087779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 10881da177e4SLinus Torvalds 10893889e6e7Snpiggin@suse.de if (inode->i_mapping->a_ops == &shmem_aops) { 10901da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 10911da177e4SLinus Torvalds inode->i_size = 0; 10923889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1093779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1094779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1095779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1096779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1097779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1098779750d2SKirill A. Shutemov } 1099779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1100779750d2SKirill A. Shutemov } 1101af53d3e9SHugh Dickins while (!list_empty(&info->swaplist)) { 1102af53d3e9SHugh Dickins /* Wait while shmem_unuse() is scanning this inode... */ 1103af53d3e9SHugh Dickins wait_var_event(&info->stop_eviction, 1104af53d3e9SHugh Dickins !atomic_read(&info->stop_eviction)); 1105cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1106af53d3e9SHugh Dickins /* ...but beware of the race if we peeked too early */ 1107af53d3e9SHugh Dickins if (!atomic_read(&info->stop_eviction)) 11081da177e4SLinus Torvalds list_del_init(&info->swaplist); 1109cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 11101da177e4SLinus Torvalds } 11113ed47db3SAl Viro } 1112b09e0fa4SEric Paris 111338f38657SAristeu Rozanski simple_xattrs_free(&info->xattrs); 11140f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 11155b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 1116dbd5768fSJan Kara clear_inode(inode); 11171da177e4SLinus Torvalds } 11181da177e4SLinus Torvalds 1119b56a2d8aSVineeth Remanan Pillai extern struct swap_info_struct *swap_info[]; 1120b56a2d8aSVineeth Remanan Pillai 1121b56a2d8aSVineeth Remanan Pillai static int shmem_find_swap_entries(struct address_space *mapping, 1122b56a2d8aSVineeth Remanan Pillai pgoff_t start, unsigned int nr_entries, 1123b56a2d8aSVineeth Remanan Pillai struct page **entries, pgoff_t *indices, 112487039546SHugh Dickins unsigned int type, bool frontswap) 1125478922e2SMatthew Wilcox { 1126b56a2d8aSVineeth Remanan Pillai XA_STATE(xas, &mapping->i_pages, start); 1127b56a2d8aSVineeth Remanan Pillai struct page *page; 112887039546SHugh Dickins swp_entry_t entry; 1129b56a2d8aSVineeth Remanan Pillai unsigned int ret = 0; 1130b56a2d8aSVineeth Remanan Pillai 1131b56a2d8aSVineeth Remanan Pillai if (!nr_entries) 1132b56a2d8aSVineeth Remanan Pillai return 0; 1133478922e2SMatthew Wilcox 1134478922e2SMatthew Wilcox rcu_read_lock(); 1135b56a2d8aSVineeth Remanan Pillai xas_for_each(&xas, page, ULONG_MAX) { 1136b56a2d8aSVineeth Remanan Pillai if (xas_retry(&xas, page)) 11375b9c98f3SMike Kravetz continue; 1138b56a2d8aSVineeth Remanan Pillai 1139b56a2d8aSVineeth Remanan Pillai if (!xa_is_value(page)) 1140478922e2SMatthew Wilcox continue; 1141b56a2d8aSVineeth Remanan Pillai 114287039546SHugh Dickins entry = radix_to_swp_entry(page); 114387039546SHugh Dickins if (swp_type(entry) != type) 1144b56a2d8aSVineeth Remanan Pillai continue; 114587039546SHugh Dickins if (frontswap && 114687039546SHugh Dickins !frontswap_test(swap_info[type], swp_offset(entry))) 114787039546SHugh Dickins continue; 1148b56a2d8aSVineeth Remanan Pillai 1149b56a2d8aSVineeth Remanan Pillai indices[ret] = xas.xa_index; 1150b56a2d8aSVineeth Remanan Pillai entries[ret] = page; 1151b56a2d8aSVineeth Remanan Pillai 1152b56a2d8aSVineeth Remanan Pillai if (need_resched()) { 1153e21a2955SMatthew Wilcox xas_pause(&xas); 1154478922e2SMatthew Wilcox cond_resched_rcu(); 1155478922e2SMatthew Wilcox } 1156b56a2d8aSVineeth Remanan Pillai if (++ret == nr_entries) 1157b56a2d8aSVineeth Remanan Pillai break; 1158b56a2d8aSVineeth Remanan Pillai } 1159478922e2SMatthew Wilcox rcu_read_unlock(); 1160e21a2955SMatthew Wilcox 1161b56a2d8aSVineeth Remanan Pillai return ret; 1162b56a2d8aSVineeth Remanan Pillai } 1163b56a2d8aSVineeth Remanan Pillai 1164b56a2d8aSVineeth Remanan Pillai /* 1165b56a2d8aSVineeth Remanan Pillai * Move the swapped pages for an inode to page cache. Returns the count 1166b56a2d8aSVineeth Remanan Pillai * of pages swapped in, or the error in case of failure. 1167b56a2d8aSVineeth Remanan Pillai */ 1168b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, 1169b56a2d8aSVineeth Remanan Pillai pgoff_t *indices) 1170b56a2d8aSVineeth Remanan Pillai { 1171b56a2d8aSVineeth Remanan Pillai int i = 0; 1172b56a2d8aSVineeth Remanan Pillai int ret = 0; 1173b56a2d8aSVineeth Remanan Pillai int error = 0; 1174b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1175b56a2d8aSVineeth Remanan Pillai 1176b56a2d8aSVineeth Remanan Pillai for (i = 0; i < pvec.nr; i++) { 1177b56a2d8aSVineeth Remanan Pillai struct page *page = pvec.pages[i]; 1178b56a2d8aSVineeth Remanan Pillai 1179b56a2d8aSVineeth Remanan Pillai if (!xa_is_value(page)) 1180b56a2d8aSVineeth Remanan Pillai continue; 1181b56a2d8aSVineeth Remanan Pillai error = shmem_swapin_page(inode, indices[i], 1182b56a2d8aSVineeth Remanan Pillai &page, SGP_CACHE, 1183b56a2d8aSVineeth Remanan Pillai mapping_gfp_mask(mapping), 1184b56a2d8aSVineeth Remanan Pillai NULL, NULL); 1185b56a2d8aSVineeth Remanan Pillai if (error == 0) { 1186b56a2d8aSVineeth Remanan Pillai unlock_page(page); 1187b56a2d8aSVineeth Remanan Pillai put_page(page); 1188b56a2d8aSVineeth Remanan Pillai ret++; 1189b56a2d8aSVineeth Remanan Pillai } 1190b56a2d8aSVineeth Remanan Pillai if (error == -ENOMEM) 1191b56a2d8aSVineeth Remanan Pillai break; 1192b56a2d8aSVineeth Remanan Pillai error = 0; 1193b56a2d8aSVineeth Remanan Pillai } 1194b56a2d8aSVineeth Remanan Pillai return error ? error : ret; 1195478922e2SMatthew Wilcox } 1196478922e2SMatthew Wilcox 119746f65ec1SHugh Dickins /* 119846f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 119946f65ec1SHugh Dickins */ 1200b56a2d8aSVineeth Remanan Pillai static int shmem_unuse_inode(struct inode *inode, unsigned int type, 1201b56a2d8aSVineeth Remanan Pillai bool frontswap, unsigned long *fs_pages_to_unuse) 12021da177e4SLinus Torvalds { 1203b56a2d8aSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1204b56a2d8aSVineeth Remanan Pillai pgoff_t start = 0; 1205b56a2d8aSVineeth Remanan Pillai struct pagevec pvec; 1206b56a2d8aSVineeth Remanan Pillai pgoff_t indices[PAGEVEC_SIZE]; 1207b56a2d8aSVineeth Remanan Pillai bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0); 1208b56a2d8aSVineeth Remanan Pillai int ret = 0; 12091da177e4SLinus Torvalds 1210b56a2d8aSVineeth Remanan Pillai pagevec_init(&pvec); 1211b56a2d8aSVineeth Remanan Pillai do { 1212b56a2d8aSVineeth Remanan Pillai unsigned int nr_entries = PAGEVEC_SIZE; 12132e0e26c7SHugh Dickins 1214b56a2d8aSVineeth Remanan Pillai if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE) 1215b56a2d8aSVineeth Remanan Pillai nr_entries = *fs_pages_to_unuse; 12162e0e26c7SHugh Dickins 1217b56a2d8aSVineeth Remanan Pillai pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, 1218b56a2d8aSVineeth Remanan Pillai pvec.pages, indices, 121987039546SHugh Dickins type, frontswap); 1220b56a2d8aSVineeth Remanan Pillai if (pvec.nr == 0) { 1221b56a2d8aSVineeth Remanan Pillai ret = 0; 1222778dd893SHugh Dickins break; 1223b56a2d8aSVineeth Remanan Pillai } 1224b56a2d8aSVineeth Remanan Pillai 1225b56a2d8aSVineeth Remanan Pillai ret = shmem_unuse_swap_entries(inode, pvec, indices); 1226b56a2d8aSVineeth Remanan Pillai if (ret < 0) 1227b56a2d8aSVineeth Remanan Pillai break; 1228b56a2d8aSVineeth Remanan Pillai 1229b56a2d8aSVineeth Remanan Pillai if (frontswap_partial) { 1230b56a2d8aSVineeth Remanan Pillai *fs_pages_to_unuse -= ret; 1231b56a2d8aSVineeth Remanan Pillai if (*fs_pages_to_unuse == 0) { 1232b56a2d8aSVineeth Remanan Pillai ret = FRONTSWAP_PAGES_UNUSED; 1233b56a2d8aSVineeth Remanan Pillai break; 1234b56a2d8aSVineeth Remanan Pillai } 1235b56a2d8aSVineeth Remanan Pillai } 1236b56a2d8aSVineeth Remanan Pillai 1237b56a2d8aSVineeth Remanan Pillai start = indices[pvec.nr - 1]; 1238b56a2d8aSVineeth Remanan Pillai } while (true); 1239b56a2d8aSVineeth Remanan Pillai 1240b56a2d8aSVineeth Remanan Pillai return ret; 1241b56a2d8aSVineeth Remanan Pillai } 1242b56a2d8aSVineeth Remanan Pillai 1243b56a2d8aSVineeth Remanan Pillai /* 1244b56a2d8aSVineeth Remanan Pillai * Read all the shared memory data that resides in the swap 1245b56a2d8aSVineeth Remanan Pillai * device 'type' back into memory, so the swap device can be 1246b56a2d8aSVineeth Remanan Pillai * unused. 1247b56a2d8aSVineeth Remanan Pillai */ 1248b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap, 1249b56a2d8aSVineeth Remanan Pillai unsigned long *fs_pages_to_unuse) 1250b56a2d8aSVineeth Remanan Pillai { 1251b56a2d8aSVineeth Remanan Pillai struct shmem_inode_info *info, *next; 1252b56a2d8aSVineeth Remanan Pillai int error = 0; 1253b56a2d8aSVineeth Remanan Pillai 1254b56a2d8aSVineeth Remanan Pillai if (list_empty(&shmem_swaplist)) 1255b56a2d8aSVineeth Remanan Pillai return 0; 1256b56a2d8aSVineeth Remanan Pillai 1257b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1258b56a2d8aSVineeth Remanan Pillai list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1259b56a2d8aSVineeth Remanan Pillai if (!info->swapped) { 1260b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1261b56a2d8aSVineeth Remanan Pillai continue; 1262b56a2d8aSVineeth Remanan Pillai } 1263af53d3e9SHugh Dickins /* 1264af53d3e9SHugh Dickins * Drop the swaplist mutex while searching the inode for swap; 1265af53d3e9SHugh Dickins * but before doing so, make sure shmem_evict_inode() will not 1266af53d3e9SHugh Dickins * remove placeholder inode from swaplist, nor let it be freed 1267af53d3e9SHugh Dickins * (igrab() would protect from unlink, but not from unmount). 1268af53d3e9SHugh Dickins */ 1269af53d3e9SHugh Dickins atomic_inc(&info->stop_eviction); 1270b56a2d8aSVineeth Remanan Pillai mutex_unlock(&shmem_swaplist_mutex); 1271b56a2d8aSVineeth Remanan Pillai 1272af53d3e9SHugh Dickins error = shmem_unuse_inode(&info->vfs_inode, type, frontswap, 1273b56a2d8aSVineeth Remanan Pillai fs_pages_to_unuse); 1274b56a2d8aSVineeth Remanan Pillai cond_resched(); 1275b56a2d8aSVineeth Remanan Pillai 1276b56a2d8aSVineeth Remanan Pillai mutex_lock(&shmem_swaplist_mutex); 1277b56a2d8aSVineeth Remanan Pillai next = list_next_entry(info, swaplist); 1278b56a2d8aSVineeth Remanan Pillai if (!info->swapped) 1279b56a2d8aSVineeth Remanan Pillai list_del_init(&info->swaplist); 1280af53d3e9SHugh Dickins if (atomic_dec_and_test(&info->stop_eviction)) 1281af53d3e9SHugh Dickins wake_up_var(&info->stop_eviction); 1282b56a2d8aSVineeth Remanan Pillai if (error) 1283b56a2d8aSVineeth Remanan Pillai break; 12841da177e4SLinus Torvalds } 1285cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1286778dd893SHugh Dickins 1287778dd893SHugh Dickins return error; 12881da177e4SLinus Torvalds } 12891da177e4SLinus Torvalds 12901da177e4SLinus Torvalds /* 12911da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 12921da177e4SLinus Torvalds */ 12931da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 12941da177e4SLinus Torvalds { 12951da177e4SLinus Torvalds struct shmem_inode_info *info; 12961da177e4SLinus Torvalds struct address_space *mapping; 12971da177e4SLinus Torvalds struct inode *inode; 12986922c0c7SHugh Dickins swp_entry_t swap; 12996922c0c7SHugh Dickins pgoff_t index; 13001da177e4SLinus Torvalds 1301800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 13021da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 13031da177e4SLinus Torvalds mapping = page->mapping; 13041da177e4SLinus Torvalds index = page->index; 13051da177e4SLinus Torvalds inode = mapping->host; 13061da177e4SLinus Torvalds info = SHMEM_I(inode); 13071da177e4SLinus Torvalds if (info->flags & VM_LOCKED) 13081da177e4SLinus Torvalds goto redirty; 1309d9fe526aSHugh Dickins if (!total_swap_pages) 13101da177e4SLinus Torvalds goto redirty; 13111da177e4SLinus Torvalds 1312d9fe526aSHugh Dickins /* 131397b713baSChristoph Hellwig * Our capabilities prevent regular writeback or sync from ever calling 131497b713baSChristoph Hellwig * shmem_writepage; but a stacking filesystem might use ->writepage of 131597b713baSChristoph Hellwig * its underlying filesystem, in which case tmpfs should write out to 131697b713baSChristoph Hellwig * swap only in response to memory pressure, and not for the writeback 131797b713baSChristoph Hellwig * threads or sync. 1318d9fe526aSHugh Dickins */ 131948f170fbSHugh Dickins if (!wbc->for_reclaim) { 132048f170fbSHugh Dickins WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 132148f170fbSHugh Dickins goto redirty; 132248f170fbSHugh Dickins } 13231635f6a7SHugh Dickins 13241635f6a7SHugh Dickins /* 13251635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 13261635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 13271635f6a7SHugh Dickins * fallocated page arriving here is now to initialize it and write it. 13281aac1400SHugh Dickins * 13291aac1400SHugh Dickins * That's okay for a page already fallocated earlier, but if we have 13301aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 13311aac1400SHugh Dickins * of this page in case we have to undo it, and (b) it may not be a 13321aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 13331aac1400SHugh Dickins * reactivate the page, and let shmem_fallocate() quit when too many. 13341635f6a7SHugh Dickins */ 13351635f6a7SHugh Dickins if (!PageUptodate(page)) { 13361aac1400SHugh Dickins if (inode->i_private) { 13371aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 13381aac1400SHugh Dickins spin_lock(&inode->i_lock); 13391aac1400SHugh Dickins shmem_falloc = inode->i_private; 13401aac1400SHugh Dickins if (shmem_falloc && 13418e205f77SHugh Dickins !shmem_falloc->waitq && 13421aac1400SHugh Dickins index >= shmem_falloc->start && 13431aac1400SHugh Dickins index < shmem_falloc->next) 13441aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 13451aac1400SHugh Dickins else 13461aac1400SHugh Dickins shmem_falloc = NULL; 13471aac1400SHugh Dickins spin_unlock(&inode->i_lock); 13481aac1400SHugh Dickins if (shmem_falloc) 13491aac1400SHugh Dickins goto redirty; 13501aac1400SHugh Dickins } 13511635f6a7SHugh Dickins clear_highpage(page); 13521635f6a7SHugh Dickins flush_dcache_page(page); 13531635f6a7SHugh Dickins SetPageUptodate(page); 13541635f6a7SHugh Dickins } 13551635f6a7SHugh Dickins 135638d8b4e6SHuang Ying swap = get_swap_page(page); 135748f170fbSHugh Dickins if (!swap.val) 135848f170fbSHugh Dickins goto redirty; 1359d9fe526aSHugh Dickins 1360b1dea800SHugh Dickins /* 1361b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 13626922c0c7SHugh Dickins * if it's not already there. Do it now before the page is 13636922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1364b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 13656922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 13666922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1367b1dea800SHugh Dickins */ 1368b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 136905bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 1370b56a2d8aSVineeth Remanan Pillai list_add(&info->swaplist, &shmem_swaplist); 1371b1dea800SHugh Dickins 137248f170fbSHugh Dickins if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 13734595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1374267a4c76SHugh Dickins shmem_recalc_inode(inode); 1375267a4c76SHugh Dickins info->swapped++; 13764595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1377267a4c76SHugh Dickins 1378aaa46865SHugh Dickins swap_shmem_alloc(swap); 13796922c0c7SHugh Dickins shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 13806922c0c7SHugh Dickins 13816922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1382d9fe526aSHugh Dickins BUG_ON(page_mapped(page)); 13839fab5619SHugh Dickins swap_writepage(page, wbc); 13841da177e4SLinus Torvalds return 0; 13851da177e4SLinus Torvalds } 13861da177e4SLinus Torvalds 13876922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 138875f6d6d2SMinchan Kim put_swap_page(page, swap); 13891da177e4SLinus Torvalds redirty: 13901da177e4SLinus Torvalds set_page_dirty(page); 1391d9fe526aSHugh Dickins if (wbc->for_reclaim) 1392d9fe526aSHugh Dickins return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1393d9fe526aSHugh Dickins unlock_page(page); 1394d9fe526aSHugh Dickins return 0; 13951da177e4SLinus Torvalds } 13961da177e4SLinus Torvalds 139775edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 139871fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1399680d794bSakpm@linux-foundation.org { 1400680d794bSakpm@linux-foundation.org char buffer[64]; 1401680d794bSakpm@linux-foundation.org 140271fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1403095f1fc4SLee Schermerhorn return; /* show nothing */ 1404095f1fc4SLee Schermerhorn 1405a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1406095f1fc4SLee Schermerhorn 1407095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1408680d794bSakpm@linux-foundation.org } 140971fe804bSLee Schermerhorn 141071fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 141171fe804bSLee Schermerhorn { 141271fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 141371fe804bSLee Schermerhorn if (sbinfo->mpol) { 141471fe804bSLee Schermerhorn spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 141571fe804bSLee Schermerhorn mpol = sbinfo->mpol; 141671fe804bSLee Schermerhorn mpol_get(mpol); 141771fe804bSLee Schermerhorn spin_unlock(&sbinfo->stat_lock); 141871fe804bSLee Schermerhorn } 141971fe804bSLee Schermerhorn return mpol; 142071fe804bSLee Schermerhorn } 142175edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 142275edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 142375edd345SHugh Dickins { 142475edd345SHugh Dickins } 142575edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 142675edd345SHugh Dickins { 142775edd345SHugh Dickins return NULL; 142875edd345SHugh Dickins } 142975edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 143075edd345SHugh Dickins #ifndef CONFIG_NUMA 143175edd345SHugh Dickins #define vm_policy vm_private_data 143275edd345SHugh Dickins #endif 1433680d794bSakpm@linux-foundation.org 1434800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1435800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1436800d8c63SKirill A. Shutemov { 1437800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 14382c4541e2SKirill A. Shutemov vma_init(vma, NULL); 1439800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1440800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1441800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1442800d8c63SKirill A. Shutemov } 1443800d8c63SKirill A. Shutemov 1444800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1445800d8c63SKirill A. Shutemov { 1446800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1447800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1448800d8c63SKirill A. Shutemov } 1449800d8c63SKirill A. Shutemov 145041ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 145141ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 14521da177e4SLinus Torvalds { 14531da177e4SLinus Torvalds struct vm_area_struct pvma; 145418a2f371SMel Gorman struct page *page; 1455e9e9b7ecSMinchan Kim struct vm_fault vmf; 14561da177e4SLinus Torvalds 1457800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1458e9e9b7ecSMinchan Kim vmf.vma = &pvma; 1459e9e9b7ecSMinchan Kim vmf.address = 0; 1460e9e9b7ecSMinchan Kim page = swap_cluster_readahead(swap, gfp, &vmf); 1461800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 146218a2f371SMel Gorman 1463800d8c63SKirill A. Shutemov return page; 1464800d8c63SKirill A. Shutemov } 146518a2f371SMel Gorman 1466800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp, 1467800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1468800d8c63SKirill A. Shutemov { 1469800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 14707b8d046fSMatthew Wilcox struct address_space *mapping = info->vfs_inode.i_mapping; 14717b8d046fSMatthew Wilcox pgoff_t hindex; 1472800d8c63SKirill A. Shutemov struct page *page; 1473800d8c63SKirill A. Shutemov 1474e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1475800d8c63SKirill A. Shutemov return NULL; 1476800d8c63SKirill A. Shutemov 14774620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 14787b8d046fSMatthew Wilcox if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, 14797b8d046fSMatthew Wilcox XA_PRESENT)) 1480800d8c63SKirill A. Shutemov return NULL; 1481800d8c63SKirill A. Shutemov 1482800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1483800d8c63SKirill A. Shutemov page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 148492717d42SAndrea Arcangeli HPAGE_PMD_ORDER, &pvma, 0, numa_node_id()); 1485800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1486800d8c63SKirill A. Shutemov if (page) 1487800d8c63SKirill A. Shutemov prep_transhuge_page(page); 148818a2f371SMel Gorman return page; 148918a2f371SMel Gorman } 149018a2f371SMel Gorman 149118a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp, 149218a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 149318a2f371SMel Gorman { 149418a2f371SMel Gorman struct vm_area_struct pvma; 149518a2f371SMel Gorman struct page *page; 149618a2f371SMel Gorman 1497800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1498800d8c63SKirill A. Shutemov page = alloc_page_vma(gfp, &pvma, 0); 1499800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 150018a2f371SMel Gorman 1501800d8c63SKirill A. Shutemov return page; 1502800d8c63SKirill A. Shutemov } 1503800d8c63SKirill A. Shutemov 1504800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 15050f079694SMike Rapoport struct inode *inode, 1506800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1507800d8c63SKirill A. Shutemov { 15080f079694SMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 1509800d8c63SKirill A. Shutemov struct page *page; 1510800d8c63SKirill A. Shutemov int nr; 1511800d8c63SKirill A. Shutemov int err = -ENOSPC; 1512800d8c63SKirill A. Shutemov 1513e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1514800d8c63SKirill A. Shutemov huge = false; 1515800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1516800d8c63SKirill A. Shutemov 15170f079694SMike Rapoport if (!shmem_inode_acct_block(inode, nr)) 1518800d8c63SKirill A. Shutemov goto failed; 1519800d8c63SKirill A. Shutemov 1520800d8c63SKirill A. Shutemov if (huge) 1521800d8c63SKirill A. Shutemov page = shmem_alloc_hugepage(gfp, info, index); 1522800d8c63SKirill A. Shutemov else 1523800d8c63SKirill A. Shutemov page = shmem_alloc_page(gfp, info, index); 152475edd345SHugh Dickins if (page) { 152575edd345SHugh Dickins __SetPageLocked(page); 152675edd345SHugh Dickins __SetPageSwapBacked(page); 1527800d8c63SKirill A. Shutemov return page; 152875edd345SHugh Dickins } 152918a2f371SMel Gorman 1530800d8c63SKirill A. Shutemov err = -ENOMEM; 15310f079694SMike Rapoport shmem_inode_unacct_blocks(inode, nr); 1532800d8c63SKirill A. Shutemov failed: 1533800d8c63SKirill A. Shutemov return ERR_PTR(err); 15341da177e4SLinus Torvalds } 153571fe804bSLee Schermerhorn 15361da177e4SLinus Torvalds /* 1537bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1538bde05d1cSHugh Dickins * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1539bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1540bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1541bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1542bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1543bde05d1cSHugh Dickins * 1544bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1545bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1546bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1547bde05d1cSHugh Dickins */ 1548bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1549bde05d1cSHugh Dickins { 1550bde05d1cSHugh Dickins return page_zonenum(page) > gfp_zone(gfp); 1551bde05d1cSHugh Dickins } 1552bde05d1cSHugh Dickins 1553bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1554bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1555bde05d1cSHugh Dickins { 1556bde05d1cSHugh Dickins struct page *oldpage, *newpage; 1557bde05d1cSHugh Dickins struct address_space *swap_mapping; 1558c1cb20d4SYu Zhao swp_entry_t entry; 1559bde05d1cSHugh Dickins pgoff_t swap_index; 1560bde05d1cSHugh Dickins int error; 1561bde05d1cSHugh Dickins 1562bde05d1cSHugh Dickins oldpage = *pagep; 1563c1cb20d4SYu Zhao entry.val = page_private(oldpage); 1564c1cb20d4SYu Zhao swap_index = swp_offset(entry); 1565bde05d1cSHugh Dickins swap_mapping = page_mapping(oldpage); 1566bde05d1cSHugh Dickins 1567bde05d1cSHugh Dickins /* 1568bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1569bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1570bde05d1cSHugh Dickins */ 1571bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1572bde05d1cSHugh Dickins newpage = shmem_alloc_page(gfp, info, index); 1573bde05d1cSHugh Dickins if (!newpage) 1574bde05d1cSHugh Dickins return -ENOMEM; 1575bde05d1cSHugh Dickins 157609cbfeafSKirill A. Shutemov get_page(newpage); 1577bde05d1cSHugh Dickins copy_highpage(newpage, oldpage); 15780142ef6cSHugh Dickins flush_dcache_page(newpage); 1579bde05d1cSHugh Dickins 15809956edf3SHugh Dickins __SetPageLocked(newpage); 15819956edf3SHugh Dickins __SetPageSwapBacked(newpage); 1582bde05d1cSHugh Dickins SetPageUptodate(newpage); 1583c1cb20d4SYu Zhao set_page_private(newpage, entry.val); 1584bde05d1cSHugh Dickins SetPageSwapCache(newpage); 1585bde05d1cSHugh Dickins 1586bde05d1cSHugh Dickins /* 1587bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1588bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1589bde05d1cSHugh Dickins */ 1590b93b0163SMatthew Wilcox xa_lock_irq(&swap_mapping->i_pages); 159162f945b6SMatthew Wilcox error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage); 15920142ef6cSHugh Dickins if (!error) { 159311fb9989SMel Gorman __inc_node_page_state(newpage, NR_FILE_PAGES); 159411fb9989SMel Gorman __dec_node_page_state(oldpage, NR_FILE_PAGES); 15950142ef6cSHugh Dickins } 1596b93b0163SMatthew Wilcox xa_unlock_irq(&swap_mapping->i_pages); 1597bde05d1cSHugh Dickins 15980142ef6cSHugh Dickins if (unlikely(error)) { 15990142ef6cSHugh Dickins /* 16000142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 16010142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 16020142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 16030142ef6cSHugh Dickins */ 16040142ef6cSHugh Dickins oldpage = newpage; 16050142ef6cSHugh Dickins } else { 16066a93ca8fSJohannes Weiner mem_cgroup_migrate(oldpage, newpage); 1607bde05d1cSHugh Dickins lru_cache_add_anon(newpage); 16080142ef6cSHugh Dickins *pagep = newpage; 16090142ef6cSHugh Dickins } 1610bde05d1cSHugh Dickins 1611bde05d1cSHugh Dickins ClearPageSwapCache(oldpage); 1612bde05d1cSHugh Dickins set_page_private(oldpage, 0); 1613bde05d1cSHugh Dickins 1614bde05d1cSHugh Dickins unlock_page(oldpage); 161509cbfeafSKirill A. Shutemov put_page(oldpage); 161609cbfeafSKirill A. Shutemov put_page(oldpage); 16170142ef6cSHugh Dickins return error; 1618bde05d1cSHugh Dickins } 1619bde05d1cSHugh Dickins 1620bde05d1cSHugh Dickins /* 1621c5bf121eSVineeth Remanan Pillai * Swap in the page pointed to by *pagep. 1622c5bf121eSVineeth Remanan Pillai * Caller has to make sure that *pagep contains a valid swapped page. 1623c5bf121eSVineeth Remanan Pillai * Returns 0 and the page in pagep if success. On failure, returns the 1624c5bf121eSVineeth Remanan Pillai * the error code and NULL in *pagep. 16251da177e4SLinus Torvalds */ 1626c5bf121eSVineeth Remanan Pillai static int shmem_swapin_page(struct inode *inode, pgoff_t index, 1627c5bf121eSVineeth Remanan Pillai struct page **pagep, enum sgp_type sgp, 1628c5bf121eSVineeth Remanan Pillai gfp_t gfp, struct vm_area_struct *vma, 16292b740303SSouptick Joarder vm_fault_t *fault_type) 16301da177e4SLinus Torvalds { 16311da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 163223f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 1633c5bf121eSVineeth Remanan Pillai struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm; 163400501b53SJohannes Weiner struct mem_cgroup *memcg; 163527ab7006SHugh Dickins struct page *page; 16361da177e4SLinus Torvalds swp_entry_t swap; 16371da177e4SLinus Torvalds int error; 16381da177e4SLinus Torvalds 1639c5bf121eSVineeth Remanan Pillai VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); 1640c5bf121eSVineeth Remanan Pillai swap = radix_to_swp_entry(*pagep); 1641c5bf121eSVineeth Remanan Pillai *pagep = NULL; 164254af6042SHugh Dickins 16431da177e4SLinus Torvalds /* Look it up and read it in.. */ 1644ec560175SHuang Ying page = lookup_swap_cache(swap, NULL, 0); 164527ab7006SHugh Dickins if (!page) { 16469e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 16479e18eb29SAndres Lagar-Cavilla if (fault_type) { 164868da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 16499e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 16502262185cSRoman Gushchin count_memcg_event_mm(charge_mm, PGMAJFAULT); 16519e18eb29SAndres Lagar-Cavilla } 16529e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 165341ffe5d5SHugh Dickins page = shmem_swapin(swap, gfp, info, index); 165427ab7006SHugh Dickins if (!page) { 16551da177e4SLinus Torvalds error = -ENOMEM; 165654af6042SHugh Dickins goto failed; 1657285b2c4fSHugh Dickins } 16581da177e4SLinus Torvalds } 16591da177e4SLinus Torvalds 16601da177e4SLinus Torvalds /* We have to do this with page locked to prevent races */ 166154af6042SHugh Dickins lock_page(page); 16620142ef6cSHugh Dickins if (!PageSwapCache(page) || page_private(page) != swap.val || 1663d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1664c5bf121eSVineeth Remanan Pillai error = -EEXIST; 1665d1899228SHugh Dickins goto unlock; 1666bde05d1cSHugh Dickins } 166727ab7006SHugh Dickins if (!PageUptodate(page)) { 16681da177e4SLinus Torvalds error = -EIO; 166954af6042SHugh Dickins goto failed; 167054af6042SHugh Dickins } 167154af6042SHugh Dickins wait_on_page_writeback(page); 167254af6042SHugh Dickins 1673bde05d1cSHugh Dickins if (shmem_should_replace_page(page, gfp)) { 1674bde05d1cSHugh Dickins error = shmem_replace_page(&page, gfp, info, index); 1675bde05d1cSHugh Dickins if (error) 167654af6042SHugh Dickins goto failed; 16771da177e4SLinus Torvalds } 16781da177e4SLinus Torvalds 16792cf85583STejun Heo error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, 1680f627c2f5SKirill A. Shutemov false); 1681d1899228SHugh Dickins if (!error) { 168254af6042SHugh Dickins error = shmem_add_to_page_cache(page, mapping, index, 1683552446a4SMatthew Wilcox swp_to_radix_entry(swap), gfp); 1684215c02bcSHugh Dickins /* 1685215c02bcSHugh Dickins * We already confirmed swap under page lock, and make 1686215c02bcSHugh Dickins * no memory allocation here, so usually no possibility 1687215c02bcSHugh Dickins * of error; but free_swap_and_cache() only trylocks a 1688215c02bcSHugh Dickins * page, so it is just possible that the entry has been 1689215c02bcSHugh Dickins * truncated or holepunched since swap was confirmed. 1690215c02bcSHugh Dickins * shmem_undo_range() will have done some of the 1691215c02bcSHugh Dickins * unaccounting, now delete_from_swap_cache() will do 169293aa7d95SVladimir Davydov * the rest. 1693215c02bcSHugh Dickins */ 169400501b53SJohannes Weiner if (error) { 1695f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 1696215c02bcSHugh Dickins delete_from_swap_cache(page); 1697d1899228SHugh Dickins } 169800501b53SJohannes Weiner } 169954af6042SHugh Dickins if (error) 170054af6042SHugh Dickins goto failed; 170154af6042SHugh Dickins 1702f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 170300501b53SJohannes Weiner 17044595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 170554af6042SHugh Dickins info->swapped--; 170654af6042SHugh Dickins shmem_recalc_inode(inode); 17074595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 170827ab7006SHugh Dickins 170966d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 171066d2f4d2SHugh Dickins mark_page_accessed(page); 171166d2f4d2SHugh Dickins 171227ab7006SHugh Dickins delete_from_swap_cache(page); 171327ab7006SHugh Dickins set_page_dirty(page); 171427ab7006SHugh Dickins swap_free(swap); 171527ab7006SHugh Dickins 1716c5bf121eSVineeth Remanan Pillai *pagep = page; 1717c5bf121eSVineeth Remanan Pillai return 0; 1718c5bf121eSVineeth Remanan Pillai failed: 1719c5bf121eSVineeth Remanan Pillai if (!shmem_confirm_swap(mapping, index, swap)) 1720c5bf121eSVineeth Remanan Pillai error = -EEXIST; 1721c5bf121eSVineeth Remanan Pillai unlock: 1722c5bf121eSVineeth Remanan Pillai if (page) { 1723c5bf121eSVineeth Remanan Pillai unlock_page(page); 1724c5bf121eSVineeth Remanan Pillai put_page(page); 1725c5bf121eSVineeth Remanan Pillai } 1726c5bf121eSVineeth Remanan Pillai 1727c5bf121eSVineeth Remanan Pillai return error; 1728c5bf121eSVineeth Remanan Pillai } 1729c5bf121eSVineeth Remanan Pillai 1730c5bf121eSVineeth Remanan Pillai /* 1731c5bf121eSVineeth Remanan Pillai * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1732c5bf121eSVineeth Remanan Pillai * 1733c5bf121eSVineeth Remanan Pillai * If we allocate a new one we do not mark it dirty. That's up to the 1734c5bf121eSVineeth Remanan Pillai * vm. If we swap it in we mark it dirty since we also free the swap 1735c5bf121eSVineeth Remanan Pillai * entry since a page cannot live in both the swap and page cache. 1736c5bf121eSVineeth Remanan Pillai * 1737c5bf121eSVineeth Remanan Pillai * fault_mm and fault_type are only supplied by shmem_fault: 1738c5bf121eSVineeth Remanan Pillai * otherwise they are NULL. 1739c5bf121eSVineeth Remanan Pillai */ 1740c5bf121eSVineeth Remanan Pillai static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1741c5bf121eSVineeth Remanan Pillai struct page **pagep, enum sgp_type sgp, gfp_t gfp, 1742c5bf121eSVineeth Remanan Pillai struct vm_area_struct *vma, struct vm_fault *vmf, 1743c5bf121eSVineeth Remanan Pillai vm_fault_t *fault_type) 1744c5bf121eSVineeth Remanan Pillai { 1745c5bf121eSVineeth Remanan Pillai struct address_space *mapping = inode->i_mapping; 1746c5bf121eSVineeth Remanan Pillai struct shmem_inode_info *info = SHMEM_I(inode); 1747c5bf121eSVineeth Remanan Pillai struct shmem_sb_info *sbinfo; 1748c5bf121eSVineeth Remanan Pillai struct mm_struct *charge_mm; 1749c5bf121eSVineeth Remanan Pillai struct mem_cgroup *memcg; 1750c5bf121eSVineeth Remanan Pillai struct page *page; 1751c5bf121eSVineeth Remanan Pillai enum sgp_type sgp_huge = sgp; 1752c5bf121eSVineeth Remanan Pillai pgoff_t hindex = index; 1753c5bf121eSVineeth Remanan Pillai int error; 1754c5bf121eSVineeth Remanan Pillai int once = 0; 1755c5bf121eSVineeth Remanan Pillai int alloced = 0; 1756c5bf121eSVineeth Remanan Pillai 1757c5bf121eSVineeth Remanan Pillai if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 1758c5bf121eSVineeth Remanan Pillai return -EFBIG; 1759c5bf121eSVineeth Remanan Pillai if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) 1760c5bf121eSVineeth Remanan Pillai sgp = SGP_CACHE; 1761c5bf121eSVineeth Remanan Pillai repeat: 1762c5bf121eSVineeth Remanan Pillai if (sgp <= SGP_CACHE && 1763c5bf121eSVineeth Remanan Pillai ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1764c5bf121eSVineeth Remanan Pillai return -EINVAL; 1765c5bf121eSVineeth Remanan Pillai } 1766c5bf121eSVineeth Remanan Pillai 1767c5bf121eSVineeth Remanan Pillai sbinfo = SHMEM_SB(inode->i_sb); 1768c5bf121eSVineeth Remanan Pillai charge_mm = vma ? vma->vm_mm : current->mm; 1769c5bf121eSVineeth Remanan Pillai 1770c5bf121eSVineeth Remanan Pillai page = find_lock_entry(mapping, index); 1771c5bf121eSVineeth Remanan Pillai if (xa_is_value(page)) { 1772c5bf121eSVineeth Remanan Pillai error = shmem_swapin_page(inode, index, &page, 1773c5bf121eSVineeth Remanan Pillai sgp, gfp, vma, fault_type); 1774c5bf121eSVineeth Remanan Pillai if (error == -EEXIST) 1775c5bf121eSVineeth Remanan Pillai goto repeat; 1776c5bf121eSVineeth Remanan Pillai 1777c5bf121eSVineeth Remanan Pillai *pagep = page; 1778c5bf121eSVineeth Remanan Pillai return error; 1779c5bf121eSVineeth Remanan Pillai } 1780c5bf121eSVineeth Remanan Pillai 1781c5bf121eSVineeth Remanan Pillai if (page && sgp == SGP_WRITE) 1782c5bf121eSVineeth Remanan Pillai mark_page_accessed(page); 1783c5bf121eSVineeth Remanan Pillai 1784c5bf121eSVineeth Remanan Pillai /* fallocated page? */ 1785c5bf121eSVineeth Remanan Pillai if (page && !PageUptodate(page)) { 1786c5bf121eSVineeth Remanan Pillai if (sgp != SGP_READ) 1787c5bf121eSVineeth Remanan Pillai goto clear; 1788c5bf121eSVineeth Remanan Pillai unlock_page(page); 1789c5bf121eSVineeth Remanan Pillai put_page(page); 1790c5bf121eSVineeth Remanan Pillai page = NULL; 1791c5bf121eSVineeth Remanan Pillai } 1792c5bf121eSVineeth Remanan Pillai if (page || sgp == SGP_READ) { 1793c5bf121eSVineeth Remanan Pillai *pagep = page; 1794c5bf121eSVineeth Remanan Pillai return 0; 1795c5bf121eSVineeth Remanan Pillai } 1796c5bf121eSVineeth Remanan Pillai 1797c5bf121eSVineeth Remanan Pillai /* 1798c5bf121eSVineeth Remanan Pillai * Fast cache lookup did not find it: 1799c5bf121eSVineeth Remanan Pillai * bring it back from swap or allocate. 1800c5bf121eSVineeth Remanan Pillai */ 1801c5bf121eSVineeth Remanan Pillai 1802cfda0526SMike Rapoport if (vma && userfaultfd_missing(vma)) { 1803cfda0526SMike Rapoport *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 1804cfda0526SMike Rapoport return 0; 1805cfda0526SMike Rapoport } 1806cfda0526SMike Rapoport 1807800d8c63SKirill A. Shutemov /* shmem_symlink() */ 1808800d8c63SKirill A. Shutemov if (mapping->a_ops != &shmem_aops) 1809800d8c63SKirill A. Shutemov goto alloc_nohuge; 1810657e3038SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 1811800d8c63SKirill A. Shutemov goto alloc_nohuge; 1812800d8c63SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 1813800d8c63SKirill A. Shutemov goto alloc_huge; 1814800d8c63SKirill A. Shutemov switch (sbinfo->huge) { 1815800d8c63SKirill A. Shutemov loff_t i_size; 1816800d8c63SKirill A. Shutemov pgoff_t off; 1817800d8c63SKirill A. Shutemov case SHMEM_HUGE_NEVER: 1818800d8c63SKirill A. Shutemov goto alloc_nohuge; 1819800d8c63SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 1820800d8c63SKirill A. Shutemov off = round_up(index, HPAGE_PMD_NR); 1821800d8c63SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 1822800d8c63SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 1823800d8c63SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 1824800d8c63SKirill A. Shutemov goto alloc_huge; 1825800d8c63SKirill A. Shutemov /* fallthrough */ 1826800d8c63SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 1827657e3038SKirill A. Shutemov if (sgp_huge == SGP_HUGE) 1828657e3038SKirill A. Shutemov goto alloc_huge; 1829657e3038SKirill A. Shutemov /* TODO: implement fadvise() hints */ 1830800d8c63SKirill A. Shutemov goto alloc_nohuge; 183159a16eadSHugh Dickins } 18321da177e4SLinus Torvalds 1833800d8c63SKirill A. Shutemov alloc_huge: 18340f079694SMike Rapoport page = shmem_alloc_and_acct_page(gfp, inode, index, true); 1835800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1836c5bf121eSVineeth Remanan Pillai alloc_nohuge: 1837c5bf121eSVineeth Remanan Pillai page = shmem_alloc_and_acct_page(gfp, inode, 1838800d8c63SKirill A. Shutemov index, false); 183954af6042SHugh Dickins } 1840800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1841779750d2SKirill A. Shutemov int retry = 5; 1842c5bf121eSVineeth Remanan Pillai 1843800d8c63SKirill A. Shutemov error = PTR_ERR(page); 1844800d8c63SKirill A. Shutemov page = NULL; 1845779750d2SKirill A. Shutemov if (error != -ENOSPC) 1846c5bf121eSVineeth Remanan Pillai goto unlock; 1847779750d2SKirill A. Shutemov /* 1848c5bf121eSVineeth Remanan Pillai * Try to reclaim some space by splitting a huge page 1849779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 1850779750d2SKirill A. Shutemov */ 1851779750d2SKirill A. Shutemov while (retry--) { 1852779750d2SKirill A. Shutemov int ret; 1853c5bf121eSVineeth Remanan Pillai 1854779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1855779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 1856779750d2SKirill A. Shutemov break; 1857779750d2SKirill A. Shutemov if (ret) 1858779750d2SKirill A. Shutemov goto alloc_nohuge; 1859779750d2SKirill A. Shutemov } 1860c5bf121eSVineeth Remanan Pillai goto unlock; 1861800d8c63SKirill A. Shutemov } 1862800d8c63SKirill A. Shutemov 1863800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 1864800d8c63SKirill A. Shutemov hindex = round_down(index, HPAGE_PMD_NR); 1865800d8c63SKirill A. Shutemov else 1866800d8c63SKirill A. Shutemov hindex = index; 1867800d8c63SKirill A. Shutemov 186866d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1869eb39d618SHugh Dickins __SetPageReferenced(page); 187066d2f4d2SHugh Dickins 18712cf85583STejun Heo error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, 1872800d8c63SKirill A. Shutemov PageTransHuge(page)); 187354af6042SHugh Dickins if (error) 1874800d8c63SKirill A. Shutemov goto unacct; 1875800d8c63SKirill A. Shutemov error = shmem_add_to_page_cache(page, mapping, hindex, 1876552446a4SMatthew Wilcox NULL, gfp & GFP_RECLAIM_MASK); 1877b065b432SHugh Dickins if (error) { 1878800d8c63SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, 1879800d8c63SKirill A. Shutemov PageTransHuge(page)); 1880800d8c63SKirill A. Shutemov goto unacct; 1881b065b432SHugh Dickins } 1882800d8c63SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, 1883800d8c63SKirill A. Shutemov PageTransHuge(page)); 188454af6042SHugh Dickins lru_cache_add_anon(page); 188554af6042SHugh Dickins 18864595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1887800d8c63SKirill A. Shutemov info->alloced += 1 << compound_order(page); 1888800d8c63SKirill A. Shutemov inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 188954af6042SHugh Dickins shmem_recalc_inode(inode); 18904595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 18911635f6a7SHugh Dickins alloced = true; 189254af6042SHugh Dickins 1893779750d2SKirill A. Shutemov if (PageTransHuge(page) && 1894779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1895779750d2SKirill A. Shutemov hindex + HPAGE_PMD_NR - 1) { 1896779750d2SKirill A. Shutemov /* 1897779750d2SKirill A. Shutemov * Part of the huge page is beyond i_size: subject 1898779750d2SKirill A. Shutemov * to shrink under memory pressure. 1899779750d2SKirill A. Shutemov */ 1900779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1901d041353dSCong Wang /* 1902d041353dSCong Wang * _careful to defend against unlocked access to 1903d041353dSCong Wang * ->shrink_list in shmem_unused_huge_shrink() 1904d041353dSCong Wang */ 1905d041353dSCong Wang if (list_empty_careful(&info->shrinklist)) { 1906779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1907779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1908779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1909779750d2SKirill A. Shutemov } 1910779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1911779750d2SKirill A. Shutemov } 1912779750d2SKirill A. Shutemov 1913ec9516fbSHugh Dickins /* 19141635f6a7SHugh Dickins * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 19151635f6a7SHugh Dickins */ 19161635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 19171635f6a7SHugh Dickins sgp = SGP_WRITE; 19181635f6a7SHugh Dickins clear: 19191635f6a7SHugh Dickins /* 19201635f6a7SHugh Dickins * Let SGP_WRITE caller clear ends if write does not fill page; 19211635f6a7SHugh Dickins * but SGP_FALLOC on a page fallocated earlier must initialize 19221635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 1923ec9516fbSHugh Dickins */ 1924800d8c63SKirill A. Shutemov if (sgp != SGP_WRITE && !PageUptodate(page)) { 1925800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 1926800d8c63SKirill A. Shutemov int i; 1927800d8c63SKirill A. Shutemov 1928800d8c63SKirill A. Shutemov for (i = 0; i < (1 << compound_order(head)); i++) { 1929800d8c63SKirill A. Shutemov clear_highpage(head + i); 1930800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 1931800d8c63SKirill A. Shutemov } 1932800d8c63SKirill A. Shutemov SetPageUptodate(head); 1933ec9516fbSHugh Dickins } 1934bde05d1cSHugh Dickins 193554af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 193675edd345SHugh Dickins if (sgp <= SGP_CACHE && 193709cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1938267a4c76SHugh Dickins if (alloced) { 1939267a4c76SHugh Dickins ClearPageDirty(page); 1940267a4c76SHugh Dickins delete_from_page_cache(page); 19414595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1942267a4c76SHugh Dickins shmem_recalc_inode(inode); 19434595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1944267a4c76SHugh Dickins } 194554af6042SHugh Dickins error = -EINVAL; 1946267a4c76SHugh Dickins goto unlock; 1947ff36b801SShaohua Li } 1948800d8c63SKirill A. Shutemov *pagep = page + index - hindex; 194954af6042SHugh Dickins return 0; 1950d00806b1SNick Piggin 1951d0217ac0SNick Piggin /* 195254af6042SHugh Dickins * Error recovery. 19531da177e4SLinus Torvalds */ 195454af6042SHugh Dickins unacct: 19550f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1 << compound_order(page)); 1956800d8c63SKirill A. Shutemov 1957800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 1958800d8c63SKirill A. Shutemov unlock_page(page); 1959800d8c63SKirill A. Shutemov put_page(page); 1960800d8c63SKirill A. Shutemov goto alloc_nohuge; 1961800d8c63SKirill A. Shutemov } 1962d1899228SHugh Dickins unlock: 196327ab7006SHugh Dickins if (page) { 196454af6042SHugh Dickins unlock_page(page); 196509cbfeafSKirill A. Shutemov put_page(page); 196654af6042SHugh Dickins } 196754af6042SHugh Dickins if (error == -ENOSPC && !once++) { 19684595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 196954af6042SHugh Dickins shmem_recalc_inode(inode); 19704595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 19711da177e4SLinus Torvalds goto repeat; 1972d8dc74f2SAdrian Bunk } 19737f4446eeSMatthew Wilcox if (error == -EEXIST) 197454af6042SHugh Dickins goto repeat; 197554af6042SHugh Dickins return error; 19761da177e4SLinus Torvalds } 19771da177e4SLinus Torvalds 197810d20bd2SLinus Torvalds /* 197910d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 198010d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 198110d20bd2SLinus Torvalds * target. 198210d20bd2SLinus Torvalds */ 1983ac6424b9SIngo Molnar static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 198410d20bd2SLinus Torvalds { 198510d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 19862055da97SIngo Molnar list_del_init(&wait->entry); 198710d20bd2SLinus Torvalds return ret; 198810d20bd2SLinus Torvalds } 198910d20bd2SLinus Torvalds 199020acce67SSouptick Joarder static vm_fault_t shmem_fault(struct vm_fault *vmf) 19911da177e4SLinus Torvalds { 199211bac800SDave Jiang struct vm_area_struct *vma = vmf->vma; 1993496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 19949e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 1995657e3038SKirill A. Shutemov enum sgp_type sgp; 199620acce67SSouptick Joarder int err; 199720acce67SSouptick Joarder vm_fault_t ret = VM_FAULT_LOCKED; 19981da177e4SLinus Torvalds 1999f00cdc6dSHugh Dickins /* 2000f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 2001f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 2002f00cdc6dSHugh Dickins * locks writers out with its hold on i_mutex. So refrain from 20038e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 20048e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 20058e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 20068e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 20078e205f77SHugh Dickins * 20088e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 20098e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 20108e205f77SHugh Dickins * we just need to make racing faults a rare case. 20118e205f77SHugh Dickins * 20128e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 20138e205f77SHugh Dickins * standard mutex or completion: but we cannot take i_mutex in fault, 20148e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 2015f00cdc6dSHugh Dickins */ 2016f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 2017f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 2018f00cdc6dSHugh Dickins 2019f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2020f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 20218e205f77SHugh Dickins if (shmem_falloc && 20228e205f77SHugh Dickins shmem_falloc->waitq && 20238e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 20248e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 20258e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 202610d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 20278e205f77SHugh Dickins 20288e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 2029f00cdc6dSHugh Dickins if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 2030f00cdc6dSHugh Dickins !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 20318e205f77SHugh Dickins /* It's polite to up mmap_sem if we can */ 2032f00cdc6dSHugh Dickins up_read(&vma->vm_mm->mmap_sem); 20338e205f77SHugh Dickins ret = VM_FAULT_RETRY; 2034f00cdc6dSHugh Dickins } 20358e205f77SHugh Dickins 20368e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 20378e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 20388e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 20398e205f77SHugh Dickins spin_unlock(&inode->i_lock); 20408e205f77SHugh Dickins schedule(); 20418e205f77SHugh Dickins 20428e205f77SHugh Dickins /* 20438e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 20448e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 20458e205f77SHugh Dickins * is usually invalid by the time we reach here, but 20468e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 20478e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 20488e205f77SHugh Dickins */ 20498e205f77SHugh Dickins spin_lock(&inode->i_lock); 20508e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 20518e205f77SHugh Dickins spin_unlock(&inode->i_lock); 20528e205f77SHugh Dickins return ret; 2053f00cdc6dSHugh Dickins } 20548e205f77SHugh Dickins spin_unlock(&inode->i_lock); 2055f00cdc6dSHugh Dickins } 2056f00cdc6dSHugh Dickins 2057657e3038SKirill A. Shutemov sgp = SGP_CACHE; 205818600332SMichal Hocko 205918600332SMichal Hocko if ((vma->vm_flags & VM_NOHUGEPAGE) || 206018600332SMichal Hocko test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 2061657e3038SKirill A. Shutemov sgp = SGP_NOHUGE; 206218600332SMichal Hocko else if (vma->vm_flags & VM_HUGEPAGE) 206318600332SMichal Hocko sgp = SGP_HUGE; 2064657e3038SKirill A. Shutemov 206520acce67SSouptick Joarder err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, 2066cfda0526SMike Rapoport gfp, vma, vmf, &ret); 206720acce67SSouptick Joarder if (err) 206820acce67SSouptick Joarder return vmf_error(err); 206968da9f05SHugh Dickins return ret; 20701da177e4SLinus Torvalds } 20711da177e4SLinus Torvalds 2072c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 2073c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 2074c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 2075c01d5b30SHugh Dickins { 2076c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 2077c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 2078c01d5b30SHugh Dickins unsigned long addr; 2079c01d5b30SHugh Dickins unsigned long offset; 2080c01d5b30SHugh Dickins unsigned long inflated_len; 2081c01d5b30SHugh Dickins unsigned long inflated_addr; 2082c01d5b30SHugh Dickins unsigned long inflated_offset; 2083c01d5b30SHugh Dickins 2084c01d5b30SHugh Dickins if (len > TASK_SIZE) 2085c01d5b30SHugh Dickins return -ENOMEM; 2086c01d5b30SHugh Dickins 2087c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 2088c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 2089c01d5b30SHugh Dickins 2090e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 2091c01d5b30SHugh Dickins return addr; 2092c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 2093c01d5b30SHugh Dickins return addr; 2094c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 2095c01d5b30SHugh Dickins return addr; 2096c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 2097c01d5b30SHugh Dickins return addr; 2098c01d5b30SHugh Dickins 2099c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 2100c01d5b30SHugh Dickins return addr; 2101c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 2102c01d5b30SHugh Dickins return addr; 2103c01d5b30SHugh Dickins if (flags & MAP_FIXED) 2104c01d5b30SHugh Dickins return addr; 2105c01d5b30SHugh Dickins /* 2106c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 2107c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2108c01d5b30SHugh Dickins * But if caller specified an address hint, respect that as before. 2109c01d5b30SHugh Dickins */ 2110c01d5b30SHugh Dickins if (uaddr) 2111c01d5b30SHugh Dickins return addr; 2112c01d5b30SHugh Dickins 2113c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 2114c01d5b30SHugh Dickins struct super_block *sb; 2115c01d5b30SHugh Dickins 2116c01d5b30SHugh Dickins if (file) { 2117c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 2118c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 2119c01d5b30SHugh Dickins } else { 2120c01d5b30SHugh Dickins /* 2121c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 2122c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 2123c01d5b30SHugh Dickins */ 2124c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 2125c01d5b30SHugh Dickins return addr; 2126c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 2127c01d5b30SHugh Dickins } 21283089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2129c01d5b30SHugh Dickins return addr; 2130c01d5b30SHugh Dickins } 2131c01d5b30SHugh Dickins 2132c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2133c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2134c01d5b30SHugh Dickins return addr; 2135c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2136c01d5b30SHugh Dickins return addr; 2137c01d5b30SHugh Dickins 2138c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2139c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2140c01d5b30SHugh Dickins return addr; 2141c01d5b30SHugh Dickins if (inflated_len < len) 2142c01d5b30SHugh Dickins return addr; 2143c01d5b30SHugh Dickins 2144c01d5b30SHugh Dickins inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); 2145c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2146c01d5b30SHugh Dickins return addr; 2147c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2148c01d5b30SHugh Dickins return addr; 2149c01d5b30SHugh Dickins 2150c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2151c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2152c01d5b30SHugh Dickins if (inflated_offset > offset) 2153c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2154c01d5b30SHugh Dickins 2155c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2156c01d5b30SHugh Dickins return addr; 2157c01d5b30SHugh Dickins return inflated_addr; 2158c01d5b30SHugh Dickins } 2159c01d5b30SHugh Dickins 21601da177e4SLinus Torvalds #ifdef CONFIG_NUMA 216141ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 21621da177e4SLinus Torvalds { 2163496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 216441ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 21651da177e4SLinus Torvalds } 21661da177e4SLinus Torvalds 2167d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2168d8dc74f2SAdrian Bunk unsigned long addr) 21691da177e4SLinus Torvalds { 2170496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 217141ffe5d5SHugh Dickins pgoff_t index; 21721da177e4SLinus Torvalds 217341ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 217441ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 21751da177e4SLinus Torvalds } 21761da177e4SLinus Torvalds #endif 21771da177e4SLinus Torvalds 21781da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user) 21791da177e4SLinus Torvalds { 2180496ad9aaSAl Viro struct inode *inode = file_inode(file); 21811da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 21821da177e4SLinus Torvalds int retval = -ENOMEM; 21831da177e4SLinus Torvalds 21844595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 21851da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 21861da177e4SLinus Torvalds if (!user_shm_lock(inode->i_size, user)) 21871da177e4SLinus Torvalds goto out_nomem; 21881da177e4SLinus Torvalds info->flags |= VM_LOCKED; 218989e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 21901da177e4SLinus Torvalds } 21911da177e4SLinus Torvalds if (!lock && (info->flags & VM_LOCKED) && user) { 21921da177e4SLinus Torvalds user_shm_unlock(inode->i_size, user); 21931da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 219489e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 21951da177e4SLinus Torvalds } 21961da177e4SLinus Torvalds retval = 0; 219789e004eaSLee Schermerhorn 21981da177e4SLinus Torvalds out_nomem: 21994595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 22001da177e4SLinus Torvalds return retval; 22011da177e4SLinus Torvalds } 22021da177e4SLinus Torvalds 22039b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 22041da177e4SLinus Torvalds { 2205ab3948f5SJoel Fernandes (Google) struct shmem_inode_info *info = SHMEM_I(file_inode(file)); 2206ab3948f5SJoel Fernandes (Google) 2207ab3948f5SJoel Fernandes (Google) if (info->seals & F_SEAL_FUTURE_WRITE) { 2208ab3948f5SJoel Fernandes (Google) /* 2209ab3948f5SJoel Fernandes (Google) * New PROT_WRITE and MAP_SHARED mmaps are not allowed when 2210ab3948f5SJoel Fernandes (Google) * "future write" seal active. 2211ab3948f5SJoel Fernandes (Google) */ 2212ab3948f5SJoel Fernandes (Google) if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) 2213ab3948f5SJoel Fernandes (Google) return -EPERM; 2214ab3948f5SJoel Fernandes (Google) 2215ab3948f5SJoel Fernandes (Google) /* 2216ab3948f5SJoel Fernandes (Google) * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED 2217ab3948f5SJoel Fernandes (Google) * read-only mapping, take care to not allow mprotect to revert 2218ab3948f5SJoel Fernandes (Google) * protections. 2219ab3948f5SJoel Fernandes (Google) */ 2220ab3948f5SJoel Fernandes (Google) vma->vm_flags &= ~(VM_MAYWRITE); 2221ab3948f5SJoel Fernandes (Google) } 2222ab3948f5SJoel Fernandes (Google) 22231da177e4SLinus Torvalds file_accessed(file); 22241da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2225e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 2226f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2227f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 2228f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 2229f3f0e1d2SKirill A. Shutemov } 22301da177e4SLinus Torvalds return 0; 22311da177e4SLinus Torvalds } 22321da177e4SLinus Torvalds 2233454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 223409208d15SAl Viro umode_t mode, dev_t dev, unsigned long flags) 22351da177e4SLinus Torvalds { 22361da177e4SLinus Torvalds struct inode *inode; 22371da177e4SLinus Torvalds struct shmem_inode_info *info; 22381da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 22391da177e4SLinus Torvalds 22405b04c689SPavel Emelyanov if (shmem_reserve_inode(sb)) 22411da177e4SLinus Torvalds return NULL; 22421da177e4SLinus Torvalds 22431da177e4SLinus Torvalds inode = new_inode(sb); 22441da177e4SLinus Torvalds if (inode) { 224585fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 2246454abafeSDmitry Monakhov inode_init_owner(inode, dir, mode); 22471da177e4SLinus Torvalds inode->i_blocks = 0; 2248078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 224946c9a946SArnd Bergmann inode->i_generation = prandom_u32(); 22501da177e4SLinus Torvalds info = SHMEM_I(inode); 22511da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 22521da177e4SLinus Torvalds spin_lock_init(&info->lock); 2253af53d3e9SHugh Dickins atomic_set(&info->stop_eviction, 0); 225440e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 22550b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2256779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 22571da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 225838f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 225972c04902SAl Viro cache_no_acl(inode); 22601da177e4SLinus Torvalds 22611da177e4SLinus Torvalds switch (mode & S_IFMT) { 22621da177e4SLinus Torvalds default: 226339f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 22641da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 22651da177e4SLinus Torvalds break; 22661da177e4SLinus Torvalds case S_IFREG: 226714fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 22681da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 22691da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 227071fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 227171fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 22721da177e4SLinus Torvalds break; 22731da177e4SLinus Torvalds case S_IFDIR: 2274d8c76e6fSDave Hansen inc_nlink(inode); 22751da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 22761da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 22771da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 22781da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 22791da177e4SLinus Torvalds break; 22801da177e4SLinus Torvalds case S_IFLNK: 22811da177e4SLinus Torvalds /* 22821da177e4SLinus Torvalds * Must not load anything in the rbtree, 22831da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 22841da177e4SLinus Torvalds */ 228571fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 22861da177e4SLinus Torvalds break; 22871da177e4SLinus Torvalds } 2288b45d71fbSJoel Fernandes (Google) 2289b45d71fbSJoel Fernandes (Google) lockdep_annotate_inode_mutex_key(inode); 22905b04c689SPavel Emelyanov } else 22915b04c689SPavel Emelyanov shmem_free_inode(sb); 22921da177e4SLinus Torvalds return inode; 22931da177e4SLinus Torvalds } 22941da177e4SLinus Torvalds 22950cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping) 22960cd6144aSJohannes Weiner { 2297f8005451SHugh Dickins return mapping->a_ops == &shmem_aops; 22980cd6144aSJohannes Weiner } 22990cd6144aSJohannes Weiner 23008d103963SMike Rapoport static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 23014c27fe4cSMike Rapoport pmd_t *dst_pmd, 23024c27fe4cSMike Rapoport struct vm_area_struct *dst_vma, 23034c27fe4cSMike Rapoport unsigned long dst_addr, 23044c27fe4cSMike Rapoport unsigned long src_addr, 23058d103963SMike Rapoport bool zeropage, 23064c27fe4cSMike Rapoport struct page **pagep) 23074c27fe4cSMike Rapoport { 23084c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file); 23094c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 23104c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping; 23114c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping); 23124c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 23134c27fe4cSMike Rapoport struct mem_cgroup *memcg; 23144c27fe4cSMike Rapoport spinlock_t *ptl; 23154c27fe4cSMike Rapoport void *page_kaddr; 23164c27fe4cSMike Rapoport struct page *page; 23174c27fe4cSMike Rapoport pte_t _dst_pte, *dst_pte; 23184c27fe4cSMike Rapoport int ret; 2319e2a50c1fSAndrea Arcangeli pgoff_t offset, max_off; 23204c27fe4cSMike Rapoport 23214c27fe4cSMike Rapoport ret = -ENOMEM; 23220f079694SMike Rapoport if (!shmem_inode_acct_block(inode, 1)) 23234c27fe4cSMike Rapoport goto out; 23244c27fe4cSMike Rapoport 2325cb658a45SAndrea Arcangeli if (!*pagep) { 23264c27fe4cSMike Rapoport page = shmem_alloc_page(gfp, info, pgoff); 23274c27fe4cSMike Rapoport if (!page) 23280f079694SMike Rapoport goto out_unacct_blocks; 23294c27fe4cSMike Rapoport 23308d103963SMike Rapoport if (!zeropage) { /* mcopy_atomic */ 23314c27fe4cSMike Rapoport page_kaddr = kmap_atomic(page); 23328d103963SMike Rapoport ret = copy_from_user(page_kaddr, 23338d103963SMike Rapoport (const void __user *)src_addr, 23344c27fe4cSMike Rapoport PAGE_SIZE); 23354c27fe4cSMike Rapoport kunmap_atomic(page_kaddr); 23364c27fe4cSMike Rapoport 23374c27fe4cSMike Rapoport /* fallback to copy_from_user outside mmap_sem */ 23384c27fe4cSMike Rapoport if (unlikely(ret)) { 23394c27fe4cSMike Rapoport *pagep = page; 23400f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 23414c27fe4cSMike Rapoport /* don't free the page */ 23429e368259SAndrea Arcangeli return -ENOENT; 23434c27fe4cSMike Rapoport } 23448d103963SMike Rapoport } else { /* mfill_zeropage_atomic */ 23458d103963SMike Rapoport clear_highpage(page); 23468d103963SMike Rapoport } 23474c27fe4cSMike Rapoport } else { 23484c27fe4cSMike Rapoport page = *pagep; 23494c27fe4cSMike Rapoport *pagep = NULL; 23504c27fe4cSMike Rapoport } 23514c27fe4cSMike Rapoport 23529cc90c66SAndrea Arcangeli VM_BUG_ON(PageLocked(page) || PageSwapBacked(page)); 23539cc90c66SAndrea Arcangeli __SetPageLocked(page); 23549cc90c66SAndrea Arcangeli __SetPageSwapBacked(page); 2355a425d358SAndrea Arcangeli __SetPageUptodate(page); 23569cc90c66SAndrea Arcangeli 2357e2a50c1fSAndrea Arcangeli ret = -EFAULT; 2358e2a50c1fSAndrea Arcangeli offset = linear_page_index(dst_vma, dst_addr); 2359e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2360e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 2361e2a50c1fSAndrea Arcangeli goto out_release; 2362e2a50c1fSAndrea Arcangeli 23632cf85583STejun Heo ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false); 23644c27fe4cSMike Rapoport if (ret) 23654c27fe4cSMike Rapoport goto out_release; 23664c27fe4cSMike Rapoport 2367552446a4SMatthew Wilcox ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 2368552446a4SMatthew Wilcox gfp & GFP_RECLAIM_MASK); 23694c27fe4cSMike Rapoport if (ret) 23704c27fe4cSMike Rapoport goto out_release_uncharge; 23714c27fe4cSMike Rapoport 23724c27fe4cSMike Rapoport mem_cgroup_commit_charge(page, memcg, false, false); 23734c27fe4cSMike Rapoport 23744c27fe4cSMike Rapoport _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 23754c27fe4cSMike Rapoport if (dst_vma->vm_flags & VM_WRITE) 23764c27fe4cSMike Rapoport _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 2377dcf7fe9dSAndrea Arcangeli else { 2378dcf7fe9dSAndrea Arcangeli /* 2379dcf7fe9dSAndrea Arcangeli * We don't set the pte dirty if the vma has no 2380dcf7fe9dSAndrea Arcangeli * VM_WRITE permission, so mark the page dirty or it 2381dcf7fe9dSAndrea Arcangeli * could be freed from under us. We could do it 2382dcf7fe9dSAndrea Arcangeli * unconditionally before unlock_page(), but doing it 2383dcf7fe9dSAndrea Arcangeli * only if VM_WRITE is not set is faster. 2384dcf7fe9dSAndrea Arcangeli */ 2385dcf7fe9dSAndrea Arcangeli set_page_dirty(page); 2386dcf7fe9dSAndrea Arcangeli } 23874c27fe4cSMike Rapoport 23884c27fe4cSMike Rapoport dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 2389e2a50c1fSAndrea Arcangeli 2390e2a50c1fSAndrea Arcangeli ret = -EFAULT; 2391e2a50c1fSAndrea Arcangeli max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2392e2a50c1fSAndrea Arcangeli if (unlikely(offset >= max_off)) 2393e2a50c1fSAndrea Arcangeli goto out_release_uncharge_unlock; 2394e2a50c1fSAndrea Arcangeli 2395e2a50c1fSAndrea Arcangeli ret = -EEXIST; 23964c27fe4cSMike Rapoport if (!pte_none(*dst_pte)) 23974c27fe4cSMike Rapoport goto out_release_uncharge_unlock; 23984c27fe4cSMike Rapoport 23994c27fe4cSMike Rapoport lru_cache_add_anon(page); 24004c27fe4cSMike Rapoport 24014c27fe4cSMike Rapoport spin_lock(&info->lock); 24024c27fe4cSMike Rapoport info->alloced++; 24034c27fe4cSMike Rapoport inode->i_blocks += BLOCKS_PER_PAGE; 24044c27fe4cSMike Rapoport shmem_recalc_inode(inode); 24054c27fe4cSMike Rapoport spin_unlock(&info->lock); 24064c27fe4cSMike Rapoport 24074c27fe4cSMike Rapoport inc_mm_counter(dst_mm, mm_counter_file(page)); 24084c27fe4cSMike Rapoport page_add_file_rmap(page, false); 24094c27fe4cSMike Rapoport set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 24104c27fe4cSMike Rapoport 24114c27fe4cSMike Rapoport /* No need to invalidate - it was non-present before */ 24124c27fe4cSMike Rapoport update_mmu_cache(dst_vma, dst_addr, dst_pte); 24134c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 2414e2a50c1fSAndrea Arcangeli unlock_page(page); 24154c27fe4cSMike Rapoport ret = 0; 24164c27fe4cSMike Rapoport out: 24174c27fe4cSMike Rapoport return ret; 24184c27fe4cSMike Rapoport out_release_uncharge_unlock: 24194c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 2420dcf7fe9dSAndrea Arcangeli ClearPageDirty(page); 2421e2a50c1fSAndrea Arcangeli delete_from_page_cache(page); 24224c27fe4cSMike Rapoport out_release_uncharge: 24234c27fe4cSMike Rapoport mem_cgroup_cancel_charge(page, memcg, false); 24244c27fe4cSMike Rapoport out_release: 24259cc90c66SAndrea Arcangeli unlock_page(page); 24264c27fe4cSMike Rapoport put_page(page); 24274c27fe4cSMike Rapoport out_unacct_blocks: 24280f079694SMike Rapoport shmem_inode_unacct_blocks(inode, 1); 24294c27fe4cSMike Rapoport goto out; 24304c27fe4cSMike Rapoport } 24314c27fe4cSMike Rapoport 24328d103963SMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, 24338d103963SMike Rapoport pmd_t *dst_pmd, 24348d103963SMike Rapoport struct vm_area_struct *dst_vma, 24358d103963SMike Rapoport unsigned long dst_addr, 24368d103963SMike Rapoport unsigned long src_addr, 24378d103963SMike Rapoport struct page **pagep) 24388d103963SMike Rapoport { 24398d103963SMike Rapoport return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 24408d103963SMike Rapoport dst_addr, src_addr, false, pagep); 24418d103963SMike Rapoport } 24428d103963SMike Rapoport 24438d103963SMike Rapoport int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, 24448d103963SMike Rapoport pmd_t *dst_pmd, 24458d103963SMike Rapoport struct vm_area_struct *dst_vma, 24468d103963SMike Rapoport unsigned long dst_addr) 24478d103963SMike Rapoport { 24488d103963SMike Rapoport struct page *page = NULL; 24498d103963SMike Rapoport 24508d103963SMike Rapoport return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, 24518d103963SMike Rapoport dst_addr, 0, true, &page); 24528d103963SMike Rapoport } 24538d103963SMike Rapoport 24541da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 245592e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 245669f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 24571da177e4SLinus Torvalds 24586d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR 24596d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 24606d9d88d0SJarkko Sakkinen #else 24616d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL 24626d9d88d0SJarkko Sakkinen #endif 24636d9d88d0SJarkko Sakkinen 24641da177e4SLinus Torvalds static int 2465800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 2466800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 2467800d15a5SNick Piggin struct page **pagep, void **fsdata) 24681da177e4SLinus Torvalds { 2469800d15a5SNick Piggin struct inode *inode = mapping->host; 247040e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 247109cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 247240e041a2SDavid Herrmann 247340e041a2SDavid Herrmann /* i_mutex is held by caller */ 2474ab3948f5SJoel Fernandes (Google) if (unlikely(info->seals & (F_SEAL_GROW | 2475ab3948f5SJoel Fernandes (Google) F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 2476ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 247740e041a2SDavid Herrmann return -EPERM; 247840e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 247940e041a2SDavid Herrmann return -EPERM; 248040e041a2SDavid Herrmann } 248140e041a2SDavid Herrmann 24829e18eb29SAndres Lagar-Cavilla return shmem_getpage(inode, index, pagep, SGP_WRITE); 2483800d15a5SNick Piggin } 2484800d15a5SNick Piggin 2485800d15a5SNick Piggin static int 2486800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2487800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2488800d15a5SNick Piggin struct page *page, void *fsdata) 2489800d15a5SNick Piggin { 2490800d15a5SNick Piggin struct inode *inode = mapping->host; 2491800d15a5SNick Piggin 2492800d15a5SNick Piggin if (pos + copied > inode->i_size) 2493800d15a5SNick Piggin i_size_write(inode, pos + copied); 2494800d15a5SNick Piggin 2495ec9516fbSHugh Dickins if (!PageUptodate(page)) { 2496800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 2497800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 2498800d8c63SKirill A. Shutemov int i; 2499800d8c63SKirill A. Shutemov 2500800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2501800d8c63SKirill A. Shutemov if (head + i == page) 2502800d8c63SKirill A. Shutemov continue; 2503800d8c63SKirill A. Shutemov clear_highpage(head + i); 2504800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 2505800d8c63SKirill A. Shutemov } 2506800d8c63SKirill A. Shutemov } 250709cbfeafSKirill A. Shutemov if (copied < PAGE_SIZE) { 250809cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2509ec9516fbSHugh Dickins zero_user_segments(page, 0, from, 251009cbfeafSKirill A. Shutemov from + copied, PAGE_SIZE); 2511ec9516fbSHugh Dickins } 2512800d8c63SKirill A. Shutemov SetPageUptodate(head); 2513ec9516fbSHugh Dickins } 2514d3602444SHugh Dickins set_page_dirty(page); 25156746aff7SWu Fengguang unlock_page(page); 251609cbfeafSKirill A. Shutemov put_page(page); 2517d3602444SHugh Dickins 2518800d15a5SNick Piggin return copied; 25191da177e4SLinus Torvalds } 25201da177e4SLinus Torvalds 25212ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 25221da177e4SLinus Torvalds { 25236e58e79dSAl Viro struct file *file = iocb->ki_filp; 25246e58e79dSAl Viro struct inode *inode = file_inode(file); 25251da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 252641ffe5d5SHugh Dickins pgoff_t index; 252741ffe5d5SHugh Dickins unsigned long offset; 2528a0ee5ec5SHugh Dickins enum sgp_type sgp = SGP_READ; 2529f7c1d074SGeert Uytterhoeven int error = 0; 2530cb66a7a1SAl Viro ssize_t retval = 0; 25316e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2532a0ee5ec5SHugh Dickins 2533a0ee5ec5SHugh Dickins /* 2534a0ee5ec5SHugh Dickins * Might this read be for a stacking filesystem? Then when reading 2535a0ee5ec5SHugh Dickins * holes of a sparse file, we actually need to allocate those pages, 2536a0ee5ec5SHugh Dickins * and even mark them dirty, so it cannot exceed the max_blocks limit. 2537a0ee5ec5SHugh Dickins */ 2538777eda2cSAl Viro if (!iter_is_iovec(to)) 253975edd345SHugh Dickins sgp = SGP_CACHE; 25401da177e4SLinus Torvalds 254109cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 254209cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 25431da177e4SLinus Torvalds 25441da177e4SLinus Torvalds for (;;) { 25451da177e4SLinus Torvalds struct page *page = NULL; 254641ffe5d5SHugh Dickins pgoff_t end_index; 254741ffe5d5SHugh Dickins unsigned long nr, ret; 25481da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 25491da177e4SLinus Torvalds 255009cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 25511da177e4SLinus Torvalds if (index > end_index) 25521da177e4SLinus Torvalds break; 25531da177e4SLinus Torvalds if (index == end_index) { 255409cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 25551da177e4SLinus Torvalds if (nr <= offset) 25561da177e4SLinus Torvalds break; 25571da177e4SLinus Torvalds } 25581da177e4SLinus Torvalds 25599e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, sgp); 25606e58e79dSAl Viro if (error) { 25616e58e79dSAl Viro if (error == -EINVAL) 25626e58e79dSAl Viro error = 0; 25631da177e4SLinus Torvalds break; 25641da177e4SLinus Torvalds } 256575edd345SHugh Dickins if (page) { 256675edd345SHugh Dickins if (sgp == SGP_CACHE) 256775edd345SHugh Dickins set_page_dirty(page); 2568d3602444SHugh Dickins unlock_page(page); 256975edd345SHugh Dickins } 25701da177e4SLinus Torvalds 25711da177e4SLinus Torvalds /* 25721da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 25731b1dcc1bSJes Sorensen * are called without i_mutex protection against truncate 25741da177e4SLinus Torvalds */ 257509cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 25761da177e4SLinus Torvalds i_size = i_size_read(inode); 257709cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 25781da177e4SLinus Torvalds if (index == end_index) { 257909cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 25801da177e4SLinus Torvalds if (nr <= offset) { 25811da177e4SLinus Torvalds if (page) 258209cbfeafSKirill A. Shutemov put_page(page); 25831da177e4SLinus Torvalds break; 25841da177e4SLinus Torvalds } 25851da177e4SLinus Torvalds } 25861da177e4SLinus Torvalds nr -= offset; 25871da177e4SLinus Torvalds 25881da177e4SLinus Torvalds if (page) { 25891da177e4SLinus Torvalds /* 25901da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 25911da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 25921da177e4SLinus Torvalds * before reading the page on the kernel side. 25931da177e4SLinus Torvalds */ 25941da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 25951da177e4SLinus Torvalds flush_dcache_page(page); 25961da177e4SLinus Torvalds /* 25971da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 25981da177e4SLinus Torvalds */ 25991da177e4SLinus Torvalds if (!offset) 26001da177e4SLinus Torvalds mark_page_accessed(page); 2601b5810039SNick Piggin } else { 26021da177e4SLinus Torvalds page = ZERO_PAGE(0); 260309cbfeafSKirill A. Shutemov get_page(page); 2604b5810039SNick Piggin } 26051da177e4SLinus Torvalds 26061da177e4SLinus Torvalds /* 26071da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 26081da177e4SLinus Torvalds * now we can copy it to user space... 26091da177e4SLinus Torvalds */ 26102ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 26116e58e79dSAl Viro retval += ret; 26121da177e4SLinus Torvalds offset += ret; 261309cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 261409cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 26151da177e4SLinus Torvalds 261609cbfeafSKirill A. Shutemov put_page(page); 26172ba5bbedSAl Viro if (!iov_iter_count(to)) 26181da177e4SLinus Torvalds break; 26196e58e79dSAl Viro if (ret < nr) { 26206e58e79dSAl Viro error = -EFAULT; 26216e58e79dSAl Viro break; 26226e58e79dSAl Viro } 26231da177e4SLinus Torvalds cond_resched(); 26241da177e4SLinus Torvalds } 26251da177e4SLinus Torvalds 262609cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 26276e58e79dSAl Viro file_accessed(file); 26286e58e79dSAl Viro return retval ? retval : error; 26291da177e4SLinus Torvalds } 26301da177e4SLinus Torvalds 2631220f2ac9SHugh Dickins /* 26327f4446eeSMatthew Wilcox * llseek SEEK_DATA or SEEK_HOLE through the page cache. 2633220f2ac9SHugh Dickins */ 2634220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 2635965c8e59SAndrew Morton pgoff_t index, pgoff_t end, int whence) 2636220f2ac9SHugh Dickins { 2637220f2ac9SHugh Dickins struct page *page; 2638220f2ac9SHugh Dickins struct pagevec pvec; 2639220f2ac9SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 2640220f2ac9SHugh Dickins bool done = false; 2641220f2ac9SHugh Dickins int i; 2642220f2ac9SHugh Dickins 264386679820SMel Gorman pagevec_init(&pvec); 2644220f2ac9SHugh Dickins pvec.nr = 1; /* start small: we may be there already */ 2645220f2ac9SHugh Dickins while (!done) { 26460cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 2647220f2ac9SHugh Dickins pvec.nr, pvec.pages, indices); 2648220f2ac9SHugh Dickins if (!pvec.nr) { 2649965c8e59SAndrew Morton if (whence == SEEK_DATA) 2650220f2ac9SHugh Dickins index = end; 2651220f2ac9SHugh Dickins break; 2652220f2ac9SHugh Dickins } 2653220f2ac9SHugh Dickins for (i = 0; i < pvec.nr; i++, index++) { 2654220f2ac9SHugh Dickins if (index < indices[i]) { 2655965c8e59SAndrew Morton if (whence == SEEK_HOLE) { 2656220f2ac9SHugh Dickins done = true; 2657220f2ac9SHugh Dickins break; 2658220f2ac9SHugh Dickins } 2659220f2ac9SHugh Dickins index = indices[i]; 2660220f2ac9SHugh Dickins } 2661220f2ac9SHugh Dickins page = pvec.pages[i]; 26623159f943SMatthew Wilcox if (page && !xa_is_value(page)) { 2663220f2ac9SHugh Dickins if (!PageUptodate(page)) 2664220f2ac9SHugh Dickins page = NULL; 2665220f2ac9SHugh Dickins } 2666220f2ac9SHugh Dickins if (index >= end || 2667965c8e59SAndrew Morton (page && whence == SEEK_DATA) || 2668965c8e59SAndrew Morton (!page && whence == SEEK_HOLE)) { 2669220f2ac9SHugh Dickins done = true; 2670220f2ac9SHugh Dickins break; 2671220f2ac9SHugh Dickins } 2672220f2ac9SHugh Dickins } 26730cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 2674220f2ac9SHugh Dickins pagevec_release(&pvec); 2675220f2ac9SHugh Dickins pvec.nr = PAGEVEC_SIZE; 2676220f2ac9SHugh Dickins cond_resched(); 2677220f2ac9SHugh Dickins } 2678220f2ac9SHugh Dickins return index; 2679220f2ac9SHugh Dickins } 2680220f2ac9SHugh Dickins 2681965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2682220f2ac9SHugh Dickins { 2683220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 2684220f2ac9SHugh Dickins struct inode *inode = mapping->host; 2685220f2ac9SHugh Dickins pgoff_t start, end; 2686220f2ac9SHugh Dickins loff_t new_offset; 2687220f2ac9SHugh Dickins 2688965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 2689965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 2690220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 26915955102cSAl Viro inode_lock(inode); 2692220f2ac9SHugh Dickins /* We're holding i_mutex so we can access i_size directly */ 2693220f2ac9SHugh Dickins 26941a413646SYufen Yu if (offset < 0 || offset >= inode->i_size) 2695220f2ac9SHugh Dickins offset = -ENXIO; 2696220f2ac9SHugh Dickins else { 269709cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 269809cbfeafSKirill A. Shutemov end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2699965c8e59SAndrew Morton new_offset = shmem_seek_hole_data(mapping, start, end, whence); 270009cbfeafSKirill A. Shutemov new_offset <<= PAGE_SHIFT; 2701220f2ac9SHugh Dickins if (new_offset > offset) { 2702220f2ac9SHugh Dickins if (new_offset < inode->i_size) 2703220f2ac9SHugh Dickins offset = new_offset; 2704965c8e59SAndrew Morton else if (whence == SEEK_DATA) 2705220f2ac9SHugh Dickins offset = -ENXIO; 2706220f2ac9SHugh Dickins else 2707220f2ac9SHugh Dickins offset = inode->i_size; 2708220f2ac9SHugh Dickins } 2709220f2ac9SHugh Dickins } 2710220f2ac9SHugh Dickins 2711387aae6fSHugh Dickins if (offset >= 0) 271246a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 27135955102cSAl Viro inode_unlock(inode); 2714220f2ac9SHugh Dickins return offset; 2715220f2ac9SHugh Dickins } 2716220f2ac9SHugh Dickins 271783e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 271883e4fa9cSHugh Dickins loff_t len) 271983e4fa9cSHugh Dickins { 2720496ad9aaSAl Viro struct inode *inode = file_inode(file); 2721e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 272240e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 27231aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 2724e2d12e22SHugh Dickins pgoff_t start, index, end; 2725e2d12e22SHugh Dickins int error; 272683e4fa9cSHugh Dickins 272713ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 272813ace4d0SHugh Dickins return -EOPNOTSUPP; 272913ace4d0SHugh Dickins 27305955102cSAl Viro inode_lock(inode); 273183e4fa9cSHugh Dickins 273283e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 273383e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 273483e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 273583e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 27368e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 273783e4fa9cSHugh Dickins 273840e041a2SDavid Herrmann /* protected by i_mutex */ 2739ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 274040e041a2SDavid Herrmann error = -EPERM; 274140e041a2SDavid Herrmann goto out; 274240e041a2SDavid Herrmann } 274340e041a2SDavid Herrmann 27448e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 2745f00cdc6dSHugh Dickins shmem_falloc.start = unmap_start >> PAGE_SHIFT; 2746f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2747f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2748f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 2749f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 2750f00cdc6dSHugh Dickins 275183e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 275283e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 275383e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 275483e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 275583e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 27568e205f77SHugh Dickins 27578e205f77SHugh Dickins spin_lock(&inode->i_lock); 27588e205f77SHugh Dickins inode->i_private = NULL; 27598e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 27602055da97SIngo Molnar WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 27618e205f77SHugh Dickins spin_unlock(&inode->i_lock); 276283e4fa9cSHugh Dickins error = 0; 27638e205f77SHugh Dickins goto out; 276483e4fa9cSHugh Dickins } 276583e4fa9cSHugh Dickins 2766e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2767e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 2768e2d12e22SHugh Dickins if (error) 2769e2d12e22SHugh Dickins goto out; 2770e2d12e22SHugh Dickins 277140e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 277240e041a2SDavid Herrmann error = -EPERM; 277340e041a2SDavid Herrmann goto out; 277440e041a2SDavid Herrmann } 277540e041a2SDavid Herrmann 277609cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 277709cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2778e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 2779e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2780e2d12e22SHugh Dickins error = -ENOSPC; 2781e2d12e22SHugh Dickins goto out; 2782e2d12e22SHugh Dickins } 2783e2d12e22SHugh Dickins 27848e205f77SHugh Dickins shmem_falloc.waitq = NULL; 27851aac1400SHugh Dickins shmem_falloc.start = start; 27861aac1400SHugh Dickins shmem_falloc.next = start; 27871aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 27881aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 27891aac1400SHugh Dickins spin_lock(&inode->i_lock); 27901aac1400SHugh Dickins inode->i_private = &shmem_falloc; 27911aac1400SHugh Dickins spin_unlock(&inode->i_lock); 27921aac1400SHugh Dickins 2793e2d12e22SHugh Dickins for (index = start; index < end; index++) { 2794e2d12e22SHugh Dickins struct page *page; 2795e2d12e22SHugh Dickins 2796e2d12e22SHugh Dickins /* 2797e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 2798e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 2799e2d12e22SHugh Dickins */ 2800e2d12e22SHugh Dickins if (signal_pending(current)) 2801e2d12e22SHugh Dickins error = -EINTR; 28021aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 28031aac1400SHugh Dickins error = -ENOMEM; 2804e2d12e22SHugh Dickins else 28059e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, SGP_FALLOC); 2806e2d12e22SHugh Dickins if (error) { 28071635f6a7SHugh Dickins /* Remove the !PageUptodate pages we added */ 28087f556567SHugh Dickins if (index > start) { 28091635f6a7SHugh Dickins shmem_undo_range(inode, 281009cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 2811b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 28127f556567SHugh Dickins } 28131aac1400SHugh Dickins goto undone; 2814e2d12e22SHugh Dickins } 2815e2d12e22SHugh Dickins 2816e2d12e22SHugh Dickins /* 28171aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 28181aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 28191aac1400SHugh Dickins */ 28201aac1400SHugh Dickins shmem_falloc.next++; 28211aac1400SHugh Dickins if (!PageUptodate(page)) 28221aac1400SHugh Dickins shmem_falloc.nr_falloced++; 28231aac1400SHugh Dickins 28241aac1400SHugh Dickins /* 28251635f6a7SHugh Dickins * If !PageUptodate, leave it that way so that freeable pages 28261635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 28271635f6a7SHugh Dickins * But set_page_dirty so that memory pressure will swap rather 2828e2d12e22SHugh Dickins * than free the pages we are allocating (and SGP_CACHE pages 2829e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 2830e2d12e22SHugh Dickins */ 2831e2d12e22SHugh Dickins set_page_dirty(page); 2832e2d12e22SHugh Dickins unlock_page(page); 283309cbfeafSKirill A. Shutemov put_page(page); 2834e2d12e22SHugh Dickins cond_resched(); 2835e2d12e22SHugh Dickins } 2836e2d12e22SHugh Dickins 2837e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2838e2d12e22SHugh Dickins i_size_write(inode, offset + len); 2839078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 28401aac1400SHugh Dickins undone: 28411aac1400SHugh Dickins spin_lock(&inode->i_lock); 28421aac1400SHugh Dickins inode->i_private = NULL; 28431aac1400SHugh Dickins spin_unlock(&inode->i_lock); 2844e2d12e22SHugh Dickins out: 28455955102cSAl Viro inode_unlock(inode); 284683e4fa9cSHugh Dickins return error; 284783e4fa9cSHugh Dickins } 284883e4fa9cSHugh Dickins 2849726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 28501da177e4SLinus Torvalds { 2851726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 28521da177e4SLinus Torvalds 28531da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 285409cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 28551da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 28560edd73b3SHugh Dickins if (sbinfo->max_blocks) { 28571da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 285841ffe5d5SHugh Dickins buf->f_bavail = 285941ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 286041ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 28610edd73b3SHugh Dickins } 28620edd73b3SHugh Dickins if (sbinfo->max_inodes) { 28631da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 28641da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 28651da177e4SLinus Torvalds } 28661da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 28671da177e4SLinus Torvalds return 0; 28681da177e4SLinus Torvalds } 28691da177e4SLinus Torvalds 28701da177e4SLinus Torvalds /* 28711da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 28721da177e4SLinus Torvalds */ 28731da177e4SLinus Torvalds static int 28741a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 28751da177e4SLinus Torvalds { 28760b0a0806SHugh Dickins struct inode *inode; 28771da177e4SLinus Torvalds int error = -ENOSPC; 28781da177e4SLinus Torvalds 2879454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 28801da177e4SLinus Torvalds if (inode) { 2881feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2882feda821eSChristoph Hellwig if (error) 2883feda821eSChristoph Hellwig goto out_iput; 28842a7dba39SEric Paris error = security_inode_init_security(inode, dir, 28859d8f13baSMimi Zohar &dentry->d_name, 28866d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 2887feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2888feda821eSChristoph Hellwig goto out_iput; 288937ec43cdSMimi Zohar 2890718deb6bSAl Viro error = 0; 28911da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2892078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 28931da177e4SLinus Torvalds d_instantiate(dentry, inode); 28941da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 28951da177e4SLinus Torvalds } 28961da177e4SLinus Torvalds return error; 2897feda821eSChristoph Hellwig out_iput: 2898feda821eSChristoph Hellwig iput(inode); 2899feda821eSChristoph Hellwig return error; 29001da177e4SLinus Torvalds } 29011da177e4SLinus Torvalds 290260545d0dSAl Viro static int 290360545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 290460545d0dSAl Viro { 290560545d0dSAl Viro struct inode *inode; 290660545d0dSAl Viro int error = -ENOSPC; 290760545d0dSAl Viro 290860545d0dSAl Viro inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 290960545d0dSAl Viro if (inode) { 291060545d0dSAl Viro error = security_inode_init_security(inode, dir, 291160545d0dSAl Viro NULL, 291260545d0dSAl Viro shmem_initxattrs, NULL); 2913feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2914feda821eSChristoph Hellwig goto out_iput; 2915feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2916feda821eSChristoph Hellwig if (error) 2917feda821eSChristoph Hellwig goto out_iput; 291860545d0dSAl Viro d_tmpfile(dentry, inode); 291960545d0dSAl Viro } 292060545d0dSAl Viro return error; 2921feda821eSChristoph Hellwig out_iput: 2922feda821eSChristoph Hellwig iput(inode); 2923feda821eSChristoph Hellwig return error; 292460545d0dSAl Viro } 292560545d0dSAl Viro 292618bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 29271da177e4SLinus Torvalds { 29281da177e4SLinus Torvalds int error; 29291da177e4SLinus Torvalds 29301da177e4SLinus Torvalds if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 29311da177e4SLinus Torvalds return error; 2932d8c76e6fSDave Hansen inc_nlink(dir); 29331da177e4SLinus Torvalds return 0; 29341da177e4SLinus Torvalds } 29351da177e4SLinus Torvalds 29364acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 2937ebfc3b49SAl Viro bool excl) 29381da177e4SLinus Torvalds { 29391da177e4SLinus Torvalds return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 29401da177e4SLinus Torvalds } 29411da177e4SLinus Torvalds 29421da177e4SLinus Torvalds /* 29431da177e4SLinus Torvalds * Link a file.. 29441da177e4SLinus Torvalds */ 29451da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 29461da177e4SLinus Torvalds { 294775c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 294829b00e60SDarrick J. Wong int ret = 0; 29491da177e4SLinus Torvalds 29501da177e4SLinus Torvalds /* 29511da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 29521da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 29531da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 29541062af92SDarrick J. Wong * But if an O_TMPFILE file is linked into the tmpfs, the 29551062af92SDarrick J. Wong * first link must skip that, to get the accounting right. 29561da177e4SLinus Torvalds */ 29571062af92SDarrick J. Wong if (inode->i_nlink) { 29585b04c689SPavel Emelyanov ret = shmem_reserve_inode(inode->i_sb); 29595b04c689SPavel Emelyanov if (ret) 29605b04c689SPavel Emelyanov goto out; 29611062af92SDarrick J. Wong } 29621da177e4SLinus Torvalds 29631da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2964078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 2965d8c76e6fSDave Hansen inc_nlink(inode); 29667de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 29671da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 29681da177e4SLinus Torvalds d_instantiate(dentry, inode); 29695b04c689SPavel Emelyanov out: 29705b04c689SPavel Emelyanov return ret; 29711da177e4SLinus Torvalds } 29721da177e4SLinus Torvalds 29731da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 29741da177e4SLinus Torvalds { 297575c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 29761da177e4SLinus Torvalds 29775b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 29785b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 29791da177e4SLinus Torvalds 29801da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 2981078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 29829a53c3a7SDave Hansen drop_nlink(inode); 29831da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 29841da177e4SLinus Torvalds return 0; 29851da177e4SLinus Torvalds } 29861da177e4SLinus Torvalds 29871da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 29881da177e4SLinus Torvalds { 29891da177e4SLinus Torvalds if (!simple_empty(dentry)) 29901da177e4SLinus Torvalds return -ENOTEMPTY; 29911da177e4SLinus Torvalds 299275c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 29939a53c3a7SDave Hansen drop_nlink(dir); 29941da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 29951da177e4SLinus Torvalds } 29961da177e4SLinus Torvalds 299737456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 299837456771SMiklos Szeredi { 2999e36cb0b8SDavid Howells bool old_is_dir = d_is_dir(old_dentry); 3000e36cb0b8SDavid Howells bool new_is_dir = d_is_dir(new_dentry); 300137456771SMiklos Szeredi 300237456771SMiklos Szeredi if (old_dir != new_dir && old_is_dir != new_is_dir) { 300337456771SMiklos Szeredi if (old_is_dir) { 300437456771SMiklos Szeredi drop_nlink(old_dir); 300537456771SMiklos Szeredi inc_nlink(new_dir); 300637456771SMiklos Szeredi } else { 300737456771SMiklos Szeredi drop_nlink(new_dir); 300837456771SMiklos Szeredi inc_nlink(old_dir); 300937456771SMiklos Szeredi } 301037456771SMiklos Szeredi } 301137456771SMiklos Szeredi old_dir->i_ctime = old_dir->i_mtime = 301237456771SMiklos Szeredi new_dir->i_ctime = new_dir->i_mtime = 301375c3cfa8SDavid Howells d_inode(old_dentry)->i_ctime = 3014078cd827SDeepa Dinamani d_inode(new_dentry)->i_ctime = current_time(old_dir); 301537456771SMiklos Szeredi 301637456771SMiklos Szeredi return 0; 301737456771SMiklos Szeredi } 301837456771SMiklos Szeredi 301946fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) 302046fdb794SMiklos Szeredi { 302146fdb794SMiklos Szeredi struct dentry *whiteout; 302246fdb794SMiklos Szeredi int error; 302346fdb794SMiklos Szeredi 302446fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 302546fdb794SMiklos Szeredi if (!whiteout) 302646fdb794SMiklos Szeredi return -ENOMEM; 302746fdb794SMiklos Szeredi 302846fdb794SMiklos Szeredi error = shmem_mknod(old_dir, whiteout, 302946fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 303046fdb794SMiklos Szeredi dput(whiteout); 303146fdb794SMiklos Szeredi if (error) 303246fdb794SMiklos Szeredi return error; 303346fdb794SMiklos Szeredi 303446fdb794SMiklos Szeredi /* 303546fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 303646fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 303746fdb794SMiklos Szeredi * 303846fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 303946fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 304046fdb794SMiklos Szeredi */ 304146fdb794SMiklos Szeredi d_rehash(whiteout); 304246fdb794SMiklos Szeredi return 0; 304346fdb794SMiklos Szeredi } 304446fdb794SMiklos Szeredi 30451da177e4SLinus Torvalds /* 30461da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 30471da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 30481da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 30491da177e4SLinus Torvalds * gets overwritten. 30501da177e4SLinus Torvalds */ 30513b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) 30521da177e4SLinus Torvalds { 305375c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 30541da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 30551da177e4SLinus Torvalds 305646fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 30573b69ff51SMiklos Szeredi return -EINVAL; 30583b69ff51SMiklos Szeredi 305937456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 306037456771SMiklos Szeredi return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 306137456771SMiklos Szeredi 30621da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 30631da177e4SLinus Torvalds return -ENOTEMPTY; 30641da177e4SLinus Torvalds 306546fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 306646fdb794SMiklos Szeredi int error; 306746fdb794SMiklos Szeredi 306846fdb794SMiklos Szeredi error = shmem_whiteout(old_dir, old_dentry); 306946fdb794SMiklos Szeredi if (error) 307046fdb794SMiklos Szeredi return error; 307146fdb794SMiklos Szeredi } 307246fdb794SMiklos Szeredi 307375c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 30741da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 3075b928095bSMiklos Szeredi if (they_are_dirs) { 307675c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 30779a53c3a7SDave Hansen drop_nlink(old_dir); 3078b928095bSMiklos Szeredi } 30791da177e4SLinus Torvalds } else if (they_are_dirs) { 30809a53c3a7SDave Hansen drop_nlink(old_dir); 3081d8c76e6fSDave Hansen inc_nlink(new_dir); 30821da177e4SLinus Torvalds } 30831da177e4SLinus Torvalds 30841da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 30851da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 30861da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 30871da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 3088078cd827SDeepa Dinamani inode->i_ctime = current_time(old_dir); 30891da177e4SLinus Torvalds return 0; 30901da177e4SLinus Torvalds } 30911da177e4SLinus Torvalds 30921da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 30931da177e4SLinus Torvalds { 30941da177e4SLinus Torvalds int error; 30951da177e4SLinus Torvalds int len; 30961da177e4SLinus Torvalds struct inode *inode; 30979276aad6SHugh Dickins struct page *page; 30981da177e4SLinus Torvalds 30991da177e4SLinus Torvalds len = strlen(symname) + 1; 310009cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 31011da177e4SLinus Torvalds return -ENAMETOOLONG; 31021da177e4SLinus Torvalds 31030825a6f9SJoe Perches inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, 31040825a6f9SJoe Perches VM_NORESERVE); 31051da177e4SLinus Torvalds if (!inode) 31061da177e4SLinus Torvalds return -ENOSPC; 31071da177e4SLinus Torvalds 31089d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 31096d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3110570bc1c2SStephen Smalley if (error) { 3111570bc1c2SStephen Smalley if (error != -EOPNOTSUPP) { 3112570bc1c2SStephen Smalley iput(inode); 3113570bc1c2SStephen Smalley return error; 3114570bc1c2SStephen Smalley } 3115570bc1c2SStephen Smalley error = 0; 3116570bc1c2SStephen Smalley } 3117570bc1c2SStephen Smalley 31181da177e4SLinus Torvalds inode->i_size = len-1; 311969f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 31203ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 31213ed47db3SAl Viro if (!inode->i_link) { 312269f07ec9SHugh Dickins iput(inode); 312369f07ec9SHugh Dickins return -ENOMEM; 312469f07ec9SHugh Dickins } 312569f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 31261da177e4SLinus Torvalds } else { 3127e8ecde25SAl Viro inode_nohighmem(inode); 31289e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_WRITE); 31291da177e4SLinus Torvalds if (error) { 31301da177e4SLinus Torvalds iput(inode); 31311da177e4SLinus Torvalds return error; 31321da177e4SLinus Torvalds } 313314fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 31341da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 313521fc61c7SAl Viro memcpy(page_address(page), symname, len); 3136ec9516fbSHugh Dickins SetPageUptodate(page); 31371da177e4SLinus Torvalds set_page_dirty(page); 31386746aff7SWu Fengguang unlock_page(page); 313909cbfeafSKirill A. Shutemov put_page(page); 31401da177e4SLinus Torvalds } 31411da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3142078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 31431da177e4SLinus Torvalds d_instantiate(dentry, inode); 31441da177e4SLinus Torvalds dget(dentry); 31451da177e4SLinus Torvalds return 0; 31461da177e4SLinus Torvalds } 31471da177e4SLinus Torvalds 3148fceef393SAl Viro static void shmem_put_link(void *arg) 3149fceef393SAl Viro { 3150fceef393SAl Viro mark_page_accessed(arg); 3151fceef393SAl Viro put_page(arg); 3152fceef393SAl Viro } 3153fceef393SAl Viro 31546b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3155fceef393SAl Viro struct inode *inode, 3156fceef393SAl Viro struct delayed_call *done) 31571da177e4SLinus Torvalds { 31581da177e4SLinus Torvalds struct page *page = NULL; 31596b255391SAl Viro int error; 31606a6c9904SAl Viro if (!dentry) { 31616a6c9904SAl Viro page = find_get_page(inode->i_mapping, 0); 31626a6c9904SAl Viro if (!page) 31636b255391SAl Viro return ERR_PTR(-ECHILD); 31646a6c9904SAl Viro if (!PageUptodate(page)) { 31656a6c9904SAl Viro put_page(page); 31666a6c9904SAl Viro return ERR_PTR(-ECHILD); 31676a6c9904SAl Viro } 31686a6c9904SAl Viro } else { 31699e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_READ); 3170680baacbSAl Viro if (error) 3171680baacbSAl Viro return ERR_PTR(error); 3172d3602444SHugh Dickins unlock_page(page); 31731da177e4SLinus Torvalds } 3174fceef393SAl Viro set_delayed_call(done, shmem_put_link, page); 317521fc61c7SAl Viro return page_address(page); 31761da177e4SLinus Torvalds } 31771da177e4SLinus Torvalds 3178b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3179b09e0fa4SEric Paris /* 3180b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3181b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3182b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3183b09e0fa4SEric Paris * filesystem level, though. 3184b09e0fa4SEric Paris */ 3185b09e0fa4SEric Paris 31866d9d88d0SJarkko Sakkinen /* 31876d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 31886d9d88d0SJarkko Sakkinen */ 31896d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 31906d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 31916d9d88d0SJarkko Sakkinen void *fs_info) 31926d9d88d0SJarkko Sakkinen { 31936d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 31946d9d88d0SJarkko Sakkinen const struct xattr *xattr; 319538f38657SAristeu Rozanski struct simple_xattr *new_xattr; 31966d9d88d0SJarkko Sakkinen size_t len; 31976d9d88d0SJarkko Sakkinen 31986d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 319938f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 32006d9d88d0SJarkko Sakkinen if (!new_xattr) 32016d9d88d0SJarkko Sakkinen return -ENOMEM; 32026d9d88d0SJarkko Sakkinen 32036d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 32046d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 32056d9d88d0SJarkko Sakkinen GFP_KERNEL); 32066d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 32076d9d88d0SJarkko Sakkinen kfree(new_xattr); 32086d9d88d0SJarkko Sakkinen return -ENOMEM; 32096d9d88d0SJarkko Sakkinen } 32106d9d88d0SJarkko Sakkinen 32116d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 32126d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 32136d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 32146d9d88d0SJarkko Sakkinen xattr->name, len); 32156d9d88d0SJarkko Sakkinen 321638f38657SAristeu Rozanski simple_xattr_list_add(&info->xattrs, new_xattr); 32176d9d88d0SJarkko Sakkinen } 32186d9d88d0SJarkko Sakkinen 32196d9d88d0SJarkko Sakkinen return 0; 32206d9d88d0SJarkko Sakkinen } 32216d9d88d0SJarkko Sakkinen 3222aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3223b296821aSAl Viro struct dentry *unused, struct inode *inode, 3224b296821aSAl Viro const char *name, void *buffer, size_t size) 3225aa7c5241SAndreas Gruenbacher { 3226b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3227aa7c5241SAndreas Gruenbacher 3228aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3229aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3230aa7c5241SAndreas Gruenbacher } 3231aa7c5241SAndreas Gruenbacher 3232aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 323359301226SAl Viro struct dentry *unused, struct inode *inode, 323459301226SAl Viro const char *name, const void *value, 323559301226SAl Viro size_t size, int flags) 3236aa7c5241SAndreas Gruenbacher { 323759301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3238aa7c5241SAndreas Gruenbacher 3239aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3240aa7c5241SAndreas Gruenbacher return simple_xattr_set(&info->xattrs, name, value, size, flags); 3241aa7c5241SAndreas Gruenbacher } 3242aa7c5241SAndreas Gruenbacher 3243aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3244aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3245aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3246aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3247aa7c5241SAndreas Gruenbacher }; 3248aa7c5241SAndreas Gruenbacher 3249aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3250aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3251aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3252aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3253aa7c5241SAndreas Gruenbacher }; 3254aa7c5241SAndreas Gruenbacher 3255b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3256b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 3257feda821eSChristoph Hellwig &posix_acl_access_xattr_handler, 3258feda821eSChristoph Hellwig &posix_acl_default_xattr_handler, 3259b09e0fa4SEric Paris #endif 3260aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3261aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 3262b09e0fa4SEric Paris NULL 3263b09e0fa4SEric Paris }; 3264b09e0fa4SEric Paris 3265b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3266b09e0fa4SEric Paris { 326775c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3268786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3269b09e0fa4SEric Paris } 3270b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3271b09e0fa4SEric Paris 327269f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 32736b255391SAl Viro .get_link = simple_get_link, 3274b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3275b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3276b09e0fa4SEric Paris #endif 32771da177e4SLinus Torvalds }; 32781da177e4SLinus Torvalds 327992e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 32806b255391SAl Viro .get_link = shmem_get_link, 3281b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3282b09e0fa4SEric Paris .listxattr = shmem_listxattr, 328339f0247dSAndreas Gruenbacher #endif 3284b09e0fa4SEric Paris }; 328539f0247dSAndreas Gruenbacher 328691828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 328791828a40SDavid M. Grimes { 328891828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 328991828a40SDavid M. Grimes } 329091828a40SDavid M. Grimes 329191828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 329291828a40SDavid M. Grimes { 329391828a40SDavid M. Grimes __u32 *fh = vfh; 329491828a40SDavid M. Grimes __u64 inum = fh[2]; 329591828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 329691828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 329791828a40SDavid M. Grimes } 329891828a40SDavid M. Grimes 329912ba780dSAmir Goldstein /* Find any alias of inode, but prefer a hashed alias */ 330012ba780dSAmir Goldstein static struct dentry *shmem_find_alias(struct inode *inode) 330112ba780dSAmir Goldstein { 330212ba780dSAmir Goldstein struct dentry *alias = d_find_alias(inode); 330312ba780dSAmir Goldstein 330412ba780dSAmir Goldstein return alias ?: d_find_any_alias(inode); 330512ba780dSAmir Goldstein } 330612ba780dSAmir Goldstein 330712ba780dSAmir Goldstein 3308480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3309480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 331091828a40SDavid M. Grimes { 331191828a40SDavid M. Grimes struct inode *inode; 3312480b116cSChristoph Hellwig struct dentry *dentry = NULL; 331335c2a7f4SHugh Dickins u64 inum; 331491828a40SDavid M. Grimes 3315480b116cSChristoph Hellwig if (fh_len < 3) 3316480b116cSChristoph Hellwig return NULL; 3317480b116cSChristoph Hellwig 331835c2a7f4SHugh Dickins inum = fid->raw[2]; 331935c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 332035c2a7f4SHugh Dickins 3321480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3322480b116cSChristoph Hellwig shmem_match, fid->raw); 332391828a40SDavid M. Grimes if (inode) { 332412ba780dSAmir Goldstein dentry = shmem_find_alias(inode); 332591828a40SDavid M. Grimes iput(inode); 332691828a40SDavid M. Grimes } 332791828a40SDavid M. Grimes 3328480b116cSChristoph Hellwig return dentry; 332991828a40SDavid M. Grimes } 333091828a40SDavid M. Grimes 3331b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3332b0b0382bSAl Viro struct inode *parent) 333391828a40SDavid M. Grimes { 33345fe0c237SAneesh Kumar K.V if (*len < 3) { 33355fe0c237SAneesh Kumar K.V *len = 3; 333694e07a75SNamjae Jeon return FILEID_INVALID; 33375fe0c237SAneesh Kumar K.V } 333891828a40SDavid M. Grimes 33391d3382cbSAl Viro if (inode_unhashed(inode)) { 334091828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 334191828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 334291828a40SDavid M. Grimes * time, we need a lock to ensure we only try 334391828a40SDavid M. Grimes * to do it once 334491828a40SDavid M. Grimes */ 334591828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 334691828a40SDavid M. Grimes spin_lock(&lock); 33471d3382cbSAl Viro if (inode_unhashed(inode)) 334891828a40SDavid M. Grimes __insert_inode_hash(inode, 334991828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 335091828a40SDavid M. Grimes spin_unlock(&lock); 335191828a40SDavid M. Grimes } 335291828a40SDavid M. Grimes 335391828a40SDavid M. Grimes fh[0] = inode->i_generation; 335491828a40SDavid M. Grimes fh[1] = inode->i_ino; 335591828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 335691828a40SDavid M. Grimes 335791828a40SDavid M. Grimes *len = 3; 335891828a40SDavid M. Grimes return 1; 335991828a40SDavid M. Grimes } 336091828a40SDavid M. Grimes 336139655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 336291828a40SDavid M. Grimes .get_parent = shmem_get_parent, 336391828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3364480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 336591828a40SDavid M. Grimes }; 336691828a40SDavid M. Grimes 3367626c3920SAl Viro enum shmem_param { 3368626c3920SAl Viro Opt_gid, 3369626c3920SAl Viro Opt_huge, 3370626c3920SAl Viro Opt_mode, 3371626c3920SAl Viro Opt_mpol, 3372626c3920SAl Viro Opt_nr_blocks, 3373626c3920SAl Viro Opt_nr_inodes, 3374626c3920SAl Viro Opt_size, 3375626c3920SAl Viro Opt_uid, 3376626c3920SAl Viro }; 33771da177e4SLinus Torvalds 3378626c3920SAl Viro static const struct fs_parameter_spec shmem_param_specs[] = { 3379626c3920SAl Viro fsparam_u32 ("gid", Opt_gid), 3380626c3920SAl Viro fsparam_enum ("huge", Opt_huge), 3381626c3920SAl Viro fsparam_u32oct("mode", Opt_mode), 3382626c3920SAl Viro fsparam_string("mpol", Opt_mpol), 3383626c3920SAl Viro fsparam_string("nr_blocks", Opt_nr_blocks), 3384626c3920SAl Viro fsparam_string("nr_inodes", Opt_nr_inodes), 3385626c3920SAl Viro fsparam_string("size", Opt_size), 3386626c3920SAl Viro fsparam_u32 ("uid", Opt_uid), 3387626c3920SAl Viro {} 3388626c3920SAl Viro }; 3389626c3920SAl Viro 3390626c3920SAl Viro static const struct fs_parameter_enum shmem_param_enums[] = { 3391626c3920SAl Viro { Opt_huge, "never", SHMEM_HUGE_NEVER }, 3392626c3920SAl Viro { Opt_huge, "always", SHMEM_HUGE_ALWAYS }, 3393626c3920SAl Viro { Opt_huge, "within_size", SHMEM_HUGE_WITHIN_SIZE }, 3394626c3920SAl Viro { Opt_huge, "advise", SHMEM_HUGE_ADVISE }, 3395626c3920SAl Viro {} 3396626c3920SAl Viro }; 3397626c3920SAl Viro 3398626c3920SAl Viro const struct fs_parameter_description shmem_fs_parameters = { 3399626c3920SAl Viro .name = "tmpfs", 3400626c3920SAl Viro .specs = shmem_param_specs, 3401626c3920SAl Viro .enums = shmem_param_enums, 3402626c3920SAl Viro }; 3403626c3920SAl Viro 3404*f3235626SDavid Howells static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 3405626c3920SAl Viro { 3406*f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3407626c3920SAl Viro struct fs_parse_result result; 3408e04dc423SAl Viro unsigned long long size; 3409626c3920SAl Viro char *rest; 3410626c3920SAl Viro int opt; 3411626c3920SAl Viro 3412626c3920SAl Viro opt = fs_parse(fc, &shmem_fs_parameters, param, &result); 3413*f3235626SDavid Howells if (opt < 0) 3414626c3920SAl Viro return opt; 3415626c3920SAl Viro 3416626c3920SAl Viro switch (opt) { 3417626c3920SAl Viro case Opt_size: 3418626c3920SAl Viro size = memparse(param->string, &rest); 3419e04dc423SAl Viro if (*rest == '%') { 3420e04dc423SAl Viro size <<= PAGE_SHIFT; 3421e04dc423SAl Viro size *= totalram_pages(); 3422e04dc423SAl Viro do_div(size, 100); 3423e04dc423SAl Viro rest++; 3424e04dc423SAl Viro } 3425e04dc423SAl Viro if (*rest) 3426626c3920SAl Viro goto bad_value; 3427e04dc423SAl Viro ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 3428e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3429626c3920SAl Viro break; 3430626c3920SAl Viro case Opt_nr_blocks: 3431626c3920SAl Viro ctx->blocks = memparse(param->string, &rest); 3432e04dc423SAl Viro if (*rest) 3433626c3920SAl Viro goto bad_value; 3434e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_BLOCKS; 3435626c3920SAl Viro break; 3436626c3920SAl Viro case Opt_nr_inodes: 3437626c3920SAl Viro ctx->inodes = memparse(param->string, &rest); 3438e04dc423SAl Viro if (*rest) 3439626c3920SAl Viro goto bad_value; 3440e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_INODES; 3441626c3920SAl Viro break; 3442626c3920SAl Viro case Opt_mode: 3443626c3920SAl Viro ctx->mode = result.uint_32 & 07777; 3444626c3920SAl Viro break; 3445626c3920SAl Viro case Opt_uid: 3446626c3920SAl Viro ctx->uid = make_kuid(current_user_ns(), result.uint_32); 3447e04dc423SAl Viro if (!uid_valid(ctx->uid)) 3448626c3920SAl Viro goto bad_value; 3449626c3920SAl Viro break; 3450626c3920SAl Viro case Opt_gid: 3451626c3920SAl Viro ctx->gid = make_kgid(current_user_ns(), result.uint_32); 3452e04dc423SAl Viro if (!gid_valid(ctx->gid)) 3453626c3920SAl Viro goto bad_value; 3454626c3920SAl Viro break; 3455626c3920SAl Viro case Opt_huge: 3456626c3920SAl Viro ctx->huge = result.uint_32; 3457626c3920SAl Viro if (ctx->huge != SHMEM_HUGE_NEVER && 3458626c3920SAl Viro !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 3459626c3920SAl Viro has_transparent_hugepage())) 3460626c3920SAl Viro goto unsupported_parameter; 3461e04dc423SAl Viro ctx->seen |= SHMEM_SEEN_HUGE; 3462626c3920SAl Viro break; 3463626c3920SAl Viro case Opt_mpol: 3464626c3920SAl Viro if (IS_ENABLED(CONFIG_NUMA)) { 3465e04dc423SAl Viro mpol_put(ctx->mpol); 3466e04dc423SAl Viro ctx->mpol = NULL; 3467626c3920SAl Viro if (mpol_parse_str(param->string, &ctx->mpol)) 3468626c3920SAl Viro goto bad_value; 3469626c3920SAl Viro break; 3470626c3920SAl Viro } 3471626c3920SAl Viro goto unsupported_parameter; 3472e04dc423SAl Viro } 3473e04dc423SAl Viro return 0; 3474e04dc423SAl Viro 3475626c3920SAl Viro unsupported_parameter: 3476626c3920SAl Viro return invalf(fc, "tmpfs: Unsupported parameter '%s'", param->key); 3477626c3920SAl Viro bad_value: 3478626c3920SAl Viro return invalf(fc, "tmpfs: Bad value for '%s'", param->key); 3479e04dc423SAl Viro } 3480e04dc423SAl Viro 3481*f3235626SDavid Howells static int shmem_parse_options(struct fs_context *fc, void *data) 3482e04dc423SAl Viro { 3483*f3235626SDavid Howells char *options = data; 3484*f3235626SDavid Howells 3485b00dc3adSHugh Dickins while (options != NULL) { 3486626c3920SAl Viro char *this_char = options; 3487b00dc3adSHugh Dickins for (;;) { 3488b00dc3adSHugh Dickins /* 3489b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 3490b00dc3adSHugh Dickins * mount options form a comma-separated list, 3491b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 3492b00dc3adSHugh Dickins */ 3493b00dc3adSHugh Dickins options = strchr(options, ','); 3494b00dc3adSHugh Dickins if (options == NULL) 3495b00dc3adSHugh Dickins break; 3496b00dc3adSHugh Dickins options++; 3497b00dc3adSHugh Dickins if (!isdigit(*options)) { 3498b00dc3adSHugh Dickins options[-1] = '\0'; 3499b00dc3adSHugh Dickins break; 3500b00dc3adSHugh Dickins } 3501b00dc3adSHugh Dickins } 3502626c3920SAl Viro if (*this_char) { 3503626c3920SAl Viro char *value = strchr(this_char,'='); 3504*f3235626SDavid Howells size_t len = 0; 3505626c3920SAl Viro int err; 3506626c3920SAl Viro 3507626c3920SAl Viro if (value) { 3508626c3920SAl Viro *value++ = '\0'; 3509*f3235626SDavid Howells len = strlen(value); 35101da177e4SLinus Torvalds } 3511*f3235626SDavid Howells err = vfs_parse_fs_string(fc, this_char, value, len); 3512*f3235626SDavid Howells if (err < 0) 3513*f3235626SDavid Howells return err; 35141da177e4SLinus Torvalds } 3515626c3920SAl Viro } 35161da177e4SLinus Torvalds return 0; 35171da177e4SLinus Torvalds } 35181da177e4SLinus Torvalds 3519*f3235626SDavid Howells /* 3520*f3235626SDavid Howells * Reconfigure a shmem filesystem. 3521*f3235626SDavid Howells * 3522*f3235626SDavid Howells * Note that we disallow change from limited->unlimited blocks/inodes while any 3523*f3235626SDavid Howells * are in use; but we must separately disallow unlimited->limited, because in 3524*f3235626SDavid Howells * that case we have no record of how much is already in use. 3525*f3235626SDavid Howells */ 3526*f3235626SDavid Howells static int shmem_reconfigure(struct fs_context *fc) 35271da177e4SLinus Torvalds { 3528*f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3529*f3235626SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 35300edd73b3SHugh Dickins unsigned long inodes; 3531*f3235626SDavid Howells const char *err; 35320edd73b3SHugh Dickins 35330edd73b3SHugh Dickins spin_lock(&sbinfo->stat_lock); 35340edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 3535*f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 3536*f3235626SDavid Howells if (!sbinfo->max_blocks) { 3537*f3235626SDavid Howells err = "Cannot retroactively limit size"; 35380edd73b3SHugh Dickins goto out; 35390b5071ddSAl Viro } 3540*f3235626SDavid Howells if (percpu_counter_compare(&sbinfo->used_blocks, 3541*f3235626SDavid Howells ctx->blocks) > 0) { 3542*f3235626SDavid Howells err = "Too small a size for current use"; 35430b5071ddSAl Viro goto out; 3544*f3235626SDavid Howells } 3545*f3235626SDavid Howells } 3546*f3235626SDavid Howells if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 3547*f3235626SDavid Howells if (!sbinfo->max_inodes) { 3548*f3235626SDavid Howells err = "Cannot retroactively limit inodes"; 35490b5071ddSAl Viro goto out; 35500b5071ddSAl Viro } 3551*f3235626SDavid Howells if (ctx->inodes < inodes) { 3552*f3235626SDavid Howells err = "Too few inodes for current use"; 3553*f3235626SDavid Howells goto out; 3554*f3235626SDavid Howells } 3555*f3235626SDavid Howells } 35560edd73b3SHugh Dickins 3557*f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_HUGE) 3558*f3235626SDavid Howells sbinfo->huge = ctx->huge; 3559*f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_BLOCKS) 3560*f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3561*f3235626SDavid Howells if (ctx->seen & SHMEM_SEEN_INODES) { 3562*f3235626SDavid Howells sbinfo->max_inodes = ctx->inodes; 3563*f3235626SDavid Howells sbinfo->free_inodes = ctx->inodes - inodes; 35640b5071ddSAl Viro } 356571fe804bSLee Schermerhorn 35665f00110fSGreg Thelen /* 35675f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 35685f00110fSGreg Thelen */ 3569*f3235626SDavid Howells if (ctx->mpol) { 357071fe804bSLee Schermerhorn mpol_put(sbinfo->mpol); 3571*f3235626SDavid Howells sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 3572*f3235626SDavid Howells ctx->mpol = NULL; 35735f00110fSGreg Thelen } 3574*f3235626SDavid Howells spin_unlock(&sbinfo->stat_lock); 3575*f3235626SDavid Howells return 0; 35760edd73b3SHugh Dickins out: 35770edd73b3SHugh Dickins spin_unlock(&sbinfo->stat_lock); 3578*f3235626SDavid Howells return invalf(fc, "tmpfs: %s", err); 35791da177e4SLinus Torvalds } 3580680d794bSakpm@linux-foundation.org 358134c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3582680d794bSakpm@linux-foundation.org { 358334c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3584680d794bSakpm@linux-foundation.org 3585680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 3586680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 358709cbfeafSKirill A. Shutemov sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3588680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 3589680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 35900825a6f9SJoe Perches if (sbinfo->mode != (0777 | S_ISVTX)) 359109208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 35928751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 35938751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 35948751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 35958751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 35968751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 35978751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 3598e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 35995a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 36005a6e75f8SKirill A. Shutemov if (sbinfo->huge) 36015a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 36025a6e75f8SKirill A. Shutemov #endif 360371fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 3604680d794bSakpm@linux-foundation.org return 0; 3605680d794bSakpm@linux-foundation.org } 36069183df25SDavid Herrmann 3607680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 36081da177e4SLinus Torvalds 36091da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 36101da177e4SLinus Torvalds { 3611602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3612602586a8SHugh Dickins 3613602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 361449cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 3615602586a8SHugh Dickins kfree(sbinfo); 36161da177e4SLinus Torvalds sb->s_fs_info = NULL; 36171da177e4SLinus Torvalds } 36181da177e4SLinus Torvalds 3619*f3235626SDavid Howells static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 36201da177e4SLinus Torvalds { 3621*f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 36221da177e4SLinus Torvalds struct inode *inode; 36230edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 3624680d794bSakpm@linux-foundation.org int err = -ENOMEM; 3625680d794bSakpm@linux-foundation.org 3626680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 3627425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3628680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 3629680d794bSakpm@linux-foundation.org if (!sbinfo) 3630680d794bSakpm@linux-foundation.org return -ENOMEM; 3631680d794bSakpm@linux-foundation.org 3632680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 36331da177e4SLinus Torvalds 36340edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 36351da177e4SLinus Torvalds /* 36361da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 36371da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 36381da177e4SLinus Torvalds * but the internal instance is left unlimited. 36391da177e4SLinus Torvalds */ 36401751e8a6SLinus Torvalds if (!(sb->s_flags & SB_KERNMOUNT)) { 3641*f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 3642*f3235626SDavid Howells ctx->blocks = shmem_default_max_blocks(); 3643*f3235626SDavid Howells if (!(ctx->seen & SHMEM_SEEN_INODES)) 3644*f3235626SDavid Howells ctx->inodes = shmem_default_max_inodes(); 3645ca4e0519SAl Viro } else { 36461751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 36471da177e4SLinus Torvalds } 364891828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 36491751e8a6SLinus Torvalds sb->s_flags |= SB_NOSEC; 36500edd73b3SHugh Dickins #else 36511751e8a6SLinus Torvalds sb->s_flags |= SB_NOUSER; 36520edd73b3SHugh Dickins #endif 3653*f3235626SDavid Howells sbinfo->max_blocks = ctx->blocks; 3654*f3235626SDavid Howells sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; 3655*f3235626SDavid Howells sbinfo->uid = ctx->uid; 3656*f3235626SDavid Howells sbinfo->gid = ctx->gid; 3657*f3235626SDavid Howells sbinfo->mode = ctx->mode; 3658*f3235626SDavid Howells sbinfo->huge = ctx->huge; 3659*f3235626SDavid Howells sbinfo->mpol = ctx->mpol; 3660*f3235626SDavid Howells ctx->mpol = NULL; 36611da177e4SLinus Torvalds 36621da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 3663908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3664602586a8SHugh Dickins goto failed; 3665779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 3666779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 36671da177e4SLinus Torvalds 3668285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 366909cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 367009cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 36711da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 36721da177e4SLinus Torvalds sb->s_op = &shmem_ops; 3673cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 3674b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 367539f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 3676b09e0fa4SEric Paris #endif 3677b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 36781751e8a6SLinus Torvalds sb->s_flags |= SB_POSIXACL; 367939f0247dSAndreas Gruenbacher #endif 36802b4db796SAmir Goldstein uuid_gen(&sb->s_uuid); 36810edd73b3SHugh Dickins 3682454abafeSDmitry Monakhov inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 36831da177e4SLinus Torvalds if (!inode) 36841da177e4SLinus Torvalds goto failed; 3685680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 3686680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 3687318ceed0SAl Viro sb->s_root = d_make_root(inode); 3688318ceed0SAl Viro if (!sb->s_root) 368948fde701SAl Viro goto failed; 36901da177e4SLinus Torvalds return 0; 36911da177e4SLinus Torvalds 36921da177e4SLinus Torvalds failed: 36931da177e4SLinus Torvalds shmem_put_super(sb); 36941da177e4SLinus Torvalds return err; 36951da177e4SLinus Torvalds } 36961da177e4SLinus Torvalds 3697*f3235626SDavid Howells static int shmem_get_tree(struct fs_context *fc) 3698*f3235626SDavid Howells { 3699*f3235626SDavid Howells return get_tree_nodev(fc, shmem_fill_super); 3700*f3235626SDavid Howells } 3701*f3235626SDavid Howells 3702*f3235626SDavid Howells static void shmem_free_fc(struct fs_context *fc) 3703*f3235626SDavid Howells { 3704*f3235626SDavid Howells struct shmem_options *ctx = fc->fs_private; 3705*f3235626SDavid Howells 3706*f3235626SDavid Howells if (ctx) { 3707*f3235626SDavid Howells mpol_put(ctx->mpol); 3708*f3235626SDavid Howells kfree(ctx); 3709*f3235626SDavid Howells } 3710*f3235626SDavid Howells } 3711*f3235626SDavid Howells 3712*f3235626SDavid Howells static const struct fs_context_operations shmem_fs_context_ops = { 3713*f3235626SDavid Howells .free = shmem_free_fc, 3714*f3235626SDavid Howells .get_tree = shmem_get_tree, 3715*f3235626SDavid Howells #ifdef CONFIG_TMPFS 3716*f3235626SDavid Howells .parse_monolithic = shmem_parse_options, 3717*f3235626SDavid Howells .parse_param = shmem_parse_one, 3718*f3235626SDavid Howells .reconfigure = shmem_reconfigure, 3719*f3235626SDavid Howells #endif 3720*f3235626SDavid Howells }; 3721*f3235626SDavid Howells 3722fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 37231da177e4SLinus Torvalds 37241da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 37251da177e4SLinus Torvalds { 372641ffe5d5SHugh Dickins struct shmem_inode_info *info; 372741ffe5d5SHugh Dickins info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 372841ffe5d5SHugh Dickins if (!info) 37291da177e4SLinus Torvalds return NULL; 373041ffe5d5SHugh Dickins return &info->vfs_inode; 37311da177e4SLinus Torvalds } 37321da177e4SLinus Torvalds 373374b1da56SAl Viro static void shmem_free_in_core_inode(struct inode *inode) 3734fa0d7e3dSNick Piggin { 373584e710daSAl Viro if (S_ISLNK(inode->i_mode)) 37363ed47db3SAl Viro kfree(inode->i_link); 3737fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3738fa0d7e3dSNick Piggin } 3739fa0d7e3dSNick Piggin 37401da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 37411da177e4SLinus Torvalds { 374209208d15SAl Viro if (S_ISREG(inode->i_mode)) 37431da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 37441da177e4SLinus Torvalds } 37451da177e4SLinus Torvalds 374641ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 37471da177e4SLinus Torvalds { 374841ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 374941ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 37501da177e4SLinus Torvalds } 37511da177e4SLinus Torvalds 37529a8ec03eSweiping zhang static void shmem_init_inodecache(void) 37531da177e4SLinus Torvalds { 37541da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 37551da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 37565d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 37571da177e4SLinus Torvalds } 37581da177e4SLinus Torvalds 375941ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 37601da177e4SLinus Torvalds { 37611a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 37621da177e4SLinus Torvalds } 37631da177e4SLinus Torvalds 3764f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = { 37651da177e4SLinus Torvalds .writepage = shmem_writepage, 376676719325SKen Chen .set_page_dirty = __set_page_dirty_no_writeback, 37671da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3768800d15a5SNick Piggin .write_begin = shmem_write_begin, 3769800d15a5SNick Piggin .write_end = shmem_write_end, 37701da177e4SLinus Torvalds #endif 37711c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 3772304dbdb7SLee Schermerhorn .migratepage = migrate_page, 37731c93923cSAndrew Morton #endif 3774aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 37751da177e4SLinus Torvalds }; 37761da177e4SLinus Torvalds 377715ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 37781da177e4SLinus Torvalds .mmap = shmem_mmap, 3779c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 37801da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3781220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 37822ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 37838174202bSAl Viro .write_iter = generic_file_write_iter, 37841b061d92SChristoph Hellwig .fsync = noop_fsync, 378582c156f8SAl Viro .splice_read = generic_file_splice_read, 3786f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 378783e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 37881da177e4SLinus Torvalds #endif 37891da177e4SLinus Torvalds }; 37901da177e4SLinus Torvalds 379192e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 379244a30220SYu Zhao .getattr = shmem_getattr, 379394c1e62dSHugh Dickins .setattr = shmem_setattr, 3794b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3795b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3796feda821eSChristoph Hellwig .set_acl = simple_set_acl, 3797b09e0fa4SEric Paris #endif 37981da177e4SLinus Torvalds }; 37991da177e4SLinus Torvalds 380092e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 38011da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 38021da177e4SLinus Torvalds .create = shmem_create, 38031da177e4SLinus Torvalds .lookup = simple_lookup, 38041da177e4SLinus Torvalds .link = shmem_link, 38051da177e4SLinus Torvalds .unlink = shmem_unlink, 38061da177e4SLinus Torvalds .symlink = shmem_symlink, 38071da177e4SLinus Torvalds .mkdir = shmem_mkdir, 38081da177e4SLinus Torvalds .rmdir = shmem_rmdir, 38091da177e4SLinus Torvalds .mknod = shmem_mknod, 38102773bf00SMiklos Szeredi .rename = shmem_rename2, 381160545d0dSAl Viro .tmpfile = shmem_tmpfile, 38121da177e4SLinus Torvalds #endif 3813b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3814b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3815b09e0fa4SEric Paris #endif 381639f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 381794c1e62dSHugh Dickins .setattr = shmem_setattr, 3818feda821eSChristoph Hellwig .set_acl = simple_set_acl, 381939f0247dSAndreas Gruenbacher #endif 382039f0247dSAndreas Gruenbacher }; 382139f0247dSAndreas Gruenbacher 382292e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 3823b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3824b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3825b09e0fa4SEric Paris #endif 382639f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 382794c1e62dSHugh Dickins .setattr = shmem_setattr, 3828feda821eSChristoph Hellwig .set_acl = simple_set_acl, 382939f0247dSAndreas Gruenbacher #endif 38301da177e4SLinus Torvalds }; 38311da177e4SLinus Torvalds 3832759b9775SHugh Dickins static const struct super_operations shmem_ops = { 38331da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 383474b1da56SAl Viro .free_inode = shmem_free_in_core_inode, 38351da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 38361da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 38371da177e4SLinus Torvalds .statfs = shmem_statfs, 3838680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 38391da177e4SLinus Torvalds #endif 38401f895f75SAl Viro .evict_inode = shmem_evict_inode, 38411da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 38421da177e4SLinus Torvalds .put_super = shmem_put_super, 3843779750d2SKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3844779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 3845779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 3846779750d2SKirill A. Shutemov #endif 38471da177e4SLinus Torvalds }; 38481da177e4SLinus Torvalds 3849f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 385054cb8821SNick Piggin .fault = shmem_fault, 3851d7c17551SNing Qu .map_pages = filemap_map_pages, 38521da177e4SLinus Torvalds #ifdef CONFIG_NUMA 38531da177e4SLinus Torvalds .set_policy = shmem_set_policy, 38541da177e4SLinus Torvalds .get_policy = shmem_get_policy, 38551da177e4SLinus Torvalds #endif 38561da177e4SLinus Torvalds }; 38571da177e4SLinus Torvalds 3858*f3235626SDavid Howells int shmem_init_fs_context(struct fs_context *fc) 38591da177e4SLinus Torvalds { 3860*f3235626SDavid Howells struct shmem_options *ctx; 3861*f3235626SDavid Howells 3862*f3235626SDavid Howells ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 3863*f3235626SDavid Howells if (!ctx) 3864*f3235626SDavid Howells return -ENOMEM; 3865*f3235626SDavid Howells 3866*f3235626SDavid Howells ctx->mode = 0777 | S_ISVTX; 3867*f3235626SDavid Howells ctx->uid = current_fsuid(); 3868*f3235626SDavid Howells ctx->gid = current_fsgid(); 3869*f3235626SDavid Howells 3870*f3235626SDavid Howells fc->fs_private = ctx; 3871*f3235626SDavid Howells fc->ops = &shmem_fs_context_ops; 3872*f3235626SDavid Howells return 0; 38731da177e4SLinus Torvalds } 38741da177e4SLinus Torvalds 387541ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 38761da177e4SLinus Torvalds .owner = THIS_MODULE, 38771da177e4SLinus Torvalds .name = "tmpfs", 3878*f3235626SDavid Howells .init_fs_context = shmem_init_fs_context, 3879*f3235626SDavid Howells #ifdef CONFIG_TMPFS 3880*f3235626SDavid Howells .parameters = &shmem_fs_parameters, 3881*f3235626SDavid Howells #endif 38821da177e4SLinus Torvalds .kill_sb = kill_litter_super, 38832b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 38841da177e4SLinus Torvalds }; 38851da177e4SLinus Torvalds 388641ffe5d5SHugh Dickins int __init shmem_init(void) 38871da177e4SLinus Torvalds { 38881da177e4SLinus Torvalds int error; 38891da177e4SLinus Torvalds 38909a8ec03eSweiping zhang shmem_init_inodecache(); 38911da177e4SLinus Torvalds 389241ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 38931da177e4SLinus Torvalds if (error) { 38941170532bSJoe Perches pr_err("Could not register tmpfs\n"); 38951da177e4SLinus Torvalds goto out2; 38961da177e4SLinus Torvalds } 389795dc112aSGreg Kroah-Hartman 3898ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 38991da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 39001da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 39011170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 39021da177e4SLinus Torvalds goto out1; 39031da177e4SLinus Torvalds } 39045a6e75f8SKirill A. Shutemov 3905e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3906435c0b87SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 39075a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 39085a6e75f8SKirill A. Shutemov else 39095a6e75f8SKirill A. Shutemov shmem_huge = 0; /* just in case it was patched */ 39105a6e75f8SKirill A. Shutemov #endif 39111da177e4SLinus Torvalds return 0; 39121da177e4SLinus Torvalds 39131da177e4SLinus Torvalds out1: 391441ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 39151da177e4SLinus Torvalds out2: 391641ffe5d5SHugh Dickins shmem_destroy_inodecache(); 39171da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 39181da177e4SLinus Torvalds return error; 39191da177e4SLinus Torvalds } 3920853ac43aSMatt Mackall 3921e496cf3dSKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) 39225a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 39235a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 39245a6e75f8SKirill A. Shutemov { 39255a6e75f8SKirill A. Shutemov int values[] = { 39265a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 39275a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 39285a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 39295a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 39305a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 39315a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 39325a6e75f8SKirill A. Shutemov }; 39335a6e75f8SKirill A. Shutemov int i, count; 39345a6e75f8SKirill A. Shutemov 39355a6e75f8SKirill A. Shutemov for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) { 39365a6e75f8SKirill A. Shutemov const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s "; 39375a6e75f8SKirill A. Shutemov 39385a6e75f8SKirill A. Shutemov count += sprintf(buf + count, fmt, 39395a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 39405a6e75f8SKirill A. Shutemov } 39415a6e75f8SKirill A. Shutemov buf[count - 1] = '\n'; 39425a6e75f8SKirill A. Shutemov return count; 39435a6e75f8SKirill A. Shutemov } 39445a6e75f8SKirill A. Shutemov 39455a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 39465a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 39475a6e75f8SKirill A. Shutemov { 39485a6e75f8SKirill A. Shutemov char tmp[16]; 39495a6e75f8SKirill A. Shutemov int huge; 39505a6e75f8SKirill A. Shutemov 39515a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 39525a6e75f8SKirill A. Shutemov return -EINVAL; 39535a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 39545a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 39555a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 39565a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 39575a6e75f8SKirill A. Shutemov 39585a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 39595a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 39605a6e75f8SKirill A. Shutemov return -EINVAL; 39615a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 39625a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 39635a6e75f8SKirill A. Shutemov return -EINVAL; 39645a6e75f8SKirill A. Shutemov 39655a6e75f8SKirill A. Shutemov shmem_huge = huge; 3966435c0b87SKirill A. Shutemov if (shmem_huge > SHMEM_HUGE_DENY) 39675a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 39685a6e75f8SKirill A. Shutemov return count; 39695a6e75f8SKirill A. Shutemov } 39705a6e75f8SKirill A. Shutemov 39715a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr = 39725a6e75f8SKirill A. Shutemov __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 39733b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 3974f3f0e1d2SKirill A. Shutemov 39753b33719cSArnd Bergmann #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3976f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma) 3977f3f0e1d2SKirill A. Shutemov { 3978f3f0e1d2SKirill A. Shutemov struct inode *inode = file_inode(vma->vm_file); 3979f3f0e1d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3980f3f0e1d2SKirill A. Shutemov loff_t i_size; 3981f3f0e1d2SKirill A. Shutemov pgoff_t off; 3982f3f0e1d2SKirill A. Shutemov 3983c0630669SYang Shi if ((vma->vm_flags & VM_NOHUGEPAGE) || 3984c0630669SYang Shi test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 3985c0630669SYang Shi return false; 3986f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 3987f3f0e1d2SKirill A. Shutemov return true; 3988f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY) 3989f3f0e1d2SKirill A. Shutemov return false; 3990f3f0e1d2SKirill A. Shutemov switch (sbinfo->huge) { 3991f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_NEVER: 3992f3f0e1d2SKirill A. Shutemov return false; 3993f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 3994f3f0e1d2SKirill A. Shutemov return true; 3995f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 3996f3f0e1d2SKirill A. Shutemov off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 3997f3f0e1d2SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 3998f3f0e1d2SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 3999f3f0e1d2SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 4000f3f0e1d2SKirill A. Shutemov return true; 4001c8402871SGustavo A. R. Silva /* fall through */ 4002f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 4003f3f0e1d2SKirill A. Shutemov /* TODO: implement fadvise() hints */ 4004f3f0e1d2SKirill A. Shutemov return (vma->vm_flags & VM_HUGEPAGE); 4005f3f0e1d2SKirill A. Shutemov default: 4006f3f0e1d2SKirill A. Shutemov VM_BUG_ON(1); 4007f3f0e1d2SKirill A. Shutemov return false; 4008f3f0e1d2SKirill A. Shutemov } 4009f3f0e1d2SKirill A. Shutemov } 40103b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 40115a6e75f8SKirill A. Shutemov 4012853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 4013853ac43aSMatt Mackall 4014853ac43aSMatt Mackall /* 4015853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 4016853ac43aSMatt Mackall * 4017853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 4018853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 4019853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 4020853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 4021853ac43aSMatt Mackall */ 4022853ac43aSMatt Mackall 402341ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 4024853ac43aSMatt Mackall .name = "tmpfs", 4025*f3235626SDavid Howells .init_fs_context = ramfs_init_fs_context, 4026*f3235626SDavid Howells .parameters = &ramfs_fs_parameters, 4027853ac43aSMatt Mackall .kill_sb = kill_litter_super, 40282b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 4029853ac43aSMatt Mackall }; 4030853ac43aSMatt Mackall 403141ffe5d5SHugh Dickins int __init shmem_init(void) 4032853ac43aSMatt Mackall { 403341ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 4034853ac43aSMatt Mackall 403541ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 4036853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 4037853ac43aSMatt Mackall 4038853ac43aSMatt Mackall return 0; 4039853ac43aSMatt Mackall } 4040853ac43aSMatt Mackall 4041b56a2d8aSVineeth Remanan Pillai int shmem_unuse(unsigned int type, bool frontswap, 4042b56a2d8aSVineeth Remanan Pillai unsigned long *fs_pages_to_unuse) 4043853ac43aSMatt Mackall { 4044853ac43aSMatt Mackall return 0; 4045853ac43aSMatt Mackall } 4046853ac43aSMatt Mackall 40473f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user) 40483f96b79aSHugh Dickins { 40493f96b79aSHugh Dickins return 0; 40503f96b79aSHugh Dickins } 40513f96b79aSHugh Dickins 405224513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 405324513264SHugh Dickins { 405424513264SHugh Dickins } 405524513264SHugh Dickins 4056c01d5b30SHugh Dickins #ifdef CONFIG_MMU 4057c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 4058c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 4059c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 4060c01d5b30SHugh Dickins { 4061c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 4062c01d5b30SHugh Dickins } 4063c01d5b30SHugh Dickins #endif 4064c01d5b30SHugh Dickins 406541ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 406694c1e62dSHugh Dickins { 406741ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 406894c1e62dSHugh Dickins } 406994c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 407094c1e62dSHugh Dickins 4071853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 40720b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 4073454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 40740b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 40750b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 4076853ac43aSMatt Mackall 4077853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 4078853ac43aSMatt Mackall 4079853ac43aSMatt Mackall /* common code */ 40801da177e4SLinus Torvalds 4081703321b6SMatthew Auld static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 4082c7277090SEric Paris unsigned long flags, unsigned int i_flags) 40831da177e4SLinus Torvalds { 40841da177e4SLinus Torvalds struct inode *inode; 408593dec2daSAl Viro struct file *res; 40861da177e4SLinus Torvalds 4087703321b6SMatthew Auld if (IS_ERR(mnt)) 4088703321b6SMatthew Auld return ERR_CAST(mnt); 40891da177e4SLinus Torvalds 4090285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 40911da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 40921da177e4SLinus Torvalds 40931da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 40941da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 40951da177e4SLinus Torvalds 409693dec2daSAl Viro inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, 409793dec2daSAl Viro flags); 4098dac2d1f6SAl Viro if (unlikely(!inode)) { 4099dac2d1f6SAl Viro shmem_unacct_size(flags, size); 4100dac2d1f6SAl Viro return ERR_PTR(-ENOSPC); 4101dac2d1f6SAl Viro } 4102c7277090SEric Paris inode->i_flags |= i_flags; 41031da177e4SLinus Torvalds inode->i_size = size; 41046d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 410526567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 410693dec2daSAl Viro if (!IS_ERR(res)) 410793dec2daSAl Viro res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 41084b42af81SAl Viro &shmem_file_operations); 41096b4d0b27SAl Viro if (IS_ERR(res)) 411093dec2daSAl Viro iput(inode); 41116b4d0b27SAl Viro return res; 41121da177e4SLinus Torvalds } 4113c7277090SEric Paris 4114c7277090SEric Paris /** 4115c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4116c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 4117c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 4118e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 4119e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 4120c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4121c7277090SEric Paris * @size: size to be set for the file 4122c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4123c7277090SEric Paris */ 4124c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4125c7277090SEric Paris { 4126703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 4127c7277090SEric Paris } 4128c7277090SEric Paris 4129c7277090SEric Paris /** 4130c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 4131c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4132c7277090SEric Paris * @size: size to be set for the file 4133c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4134c7277090SEric Paris */ 4135c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4136c7277090SEric Paris { 4137703321b6SMatthew Auld return __shmem_file_setup(shm_mnt, name, size, flags, 0); 4138c7277090SEric Paris } 4139395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 41401da177e4SLinus Torvalds 414146711810SRandy Dunlap /** 4142703321b6SMatthew Auld * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 4143703321b6SMatthew Auld * @mnt: the tmpfs mount where the file will be created 4144703321b6SMatthew Auld * @name: name for dentry (to be seen in /proc/<pid>/maps 4145703321b6SMatthew Auld * @size: size to be set for the file 4146703321b6SMatthew Auld * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4147703321b6SMatthew Auld */ 4148703321b6SMatthew Auld struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 4149703321b6SMatthew Auld loff_t size, unsigned long flags) 4150703321b6SMatthew Auld { 4151703321b6SMatthew Auld return __shmem_file_setup(mnt, name, size, flags, 0); 4152703321b6SMatthew Auld } 4153703321b6SMatthew Auld EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4154703321b6SMatthew Auld 4155703321b6SMatthew Auld /** 41561da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 41571da177e4SLinus Torvalds * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 41581da177e4SLinus Torvalds */ 41591da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 41601da177e4SLinus Torvalds { 41611da177e4SLinus Torvalds struct file *file; 41621da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 41631da177e4SLinus Torvalds 416466fc1303SHugh Dickins /* 416566fc1303SHugh Dickins * Cloning a new file under mmap_sem leads to a lock ordering conflict 416666fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 416766fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 416866fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 416966fc1303SHugh Dickins */ 4170703321b6SMatthew Auld file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 41711da177e4SLinus Torvalds if (IS_ERR(file)) 41721da177e4SLinus Torvalds return PTR_ERR(file); 41731da177e4SLinus Torvalds 41741da177e4SLinus Torvalds if (vma->vm_file) 41751da177e4SLinus Torvalds fput(vma->vm_file); 41761da177e4SLinus Torvalds vma->vm_file = file; 41771da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 4178f3f0e1d2SKirill A. Shutemov 4179e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 4180f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 4181f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 4182f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 4183f3f0e1d2SKirill A. Shutemov } 4184f3f0e1d2SKirill A. Shutemov 41851da177e4SLinus Torvalds return 0; 41861da177e4SLinus Torvalds } 4187d9d90e5eSHugh Dickins 4188d9d90e5eSHugh Dickins /** 4189d9d90e5eSHugh Dickins * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 4190d9d90e5eSHugh Dickins * @mapping: the page's address_space 4191d9d90e5eSHugh Dickins * @index: the page index 4192d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4193d9d90e5eSHugh Dickins * 4194d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4195d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 4196d9d90e5eSHugh Dickins * But read_cache_page_gfp() uses the ->readpage() method: which does not 4197d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4198d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4199d9d90e5eSHugh Dickins * 420068da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 420168da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4202d9d90e5eSHugh Dickins */ 4203d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4204d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4205d9d90e5eSHugh Dickins { 420668da9f05SHugh Dickins #ifdef CONFIG_SHMEM 420768da9f05SHugh Dickins struct inode *inode = mapping->host; 42089276aad6SHugh Dickins struct page *page; 420968da9f05SHugh Dickins int error; 421068da9f05SHugh Dickins 421168da9f05SHugh Dickins BUG_ON(mapping->a_ops != &shmem_aops); 42129e18eb29SAndres Lagar-Cavilla error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, 4213cfda0526SMike Rapoport gfp, NULL, NULL, NULL); 421468da9f05SHugh Dickins if (error) 421568da9f05SHugh Dickins page = ERR_PTR(error); 421668da9f05SHugh Dickins else 421768da9f05SHugh Dickins unlock_page(page); 421868da9f05SHugh Dickins return page; 421968da9f05SHugh Dickins #else 422068da9f05SHugh Dickins /* 422168da9f05SHugh Dickins * The tiny !SHMEM case uses ramfs without swap 422268da9f05SHugh Dickins */ 4223d9d90e5eSHugh Dickins return read_cache_page_gfp(mapping, index, gfp); 422468da9f05SHugh Dickins #endif 4225d9d90e5eSHugh Dickins } 4226d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4227