11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31853ac43aSMatt Mackall #include <linux/mm.h> 32b95f1b31SPaul Gortmaker #include <linux/export.h> 33853ac43aSMatt Mackall #include <linux/swap.h> 34e2e40f2cSChristoph Hellwig #include <linux/uio.h> 35f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h> 36853ac43aSMatt Mackall 37853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 38853ac43aSMatt Mackall 39853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 401da177e4SLinus Torvalds /* 411da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 421da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 431da177e4SLinus Torvalds * which makes it a completely usable filesystem. 441da177e4SLinus Torvalds */ 451da177e4SLinus Torvalds 4639f0247dSAndreas Gruenbacher #include <linux/xattr.h> 47a5694255SChristoph Hellwig #include <linux/exportfs.h> 481c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 49feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 501da177e4SLinus Torvalds #include <linux/mman.h> 511da177e4SLinus Torvalds #include <linux/string.h> 521da177e4SLinus Torvalds #include <linux/slab.h> 531da177e4SLinus Torvalds #include <linux/backing-dev.h> 541da177e4SLinus Torvalds #include <linux/shmem_fs.h> 551da177e4SLinus Torvalds #include <linux/writeback.h> 561da177e4SLinus Torvalds #include <linux/blkdev.h> 57bda97eabSHugh Dickins #include <linux/pagevec.h> 5841ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 5983e4fa9cSHugh Dickins #include <linux/falloc.h> 60708e3508SHugh Dickins #include <linux/splice.h> 611da177e4SLinus Torvalds #include <linux/security.h> 621da177e4SLinus Torvalds #include <linux/swapops.h> 631da177e4SLinus Torvalds #include <linux/mempolicy.h> 641da177e4SLinus Torvalds #include <linux/namei.h> 65b00dc3adSHugh Dickins #include <linux/ctype.h> 66304dbdb7SLee Schermerhorn #include <linux/migrate.h> 67c1f60a5aSChristoph Lameter #include <linux/highmem.h> 68680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 6992562927SMimi Zohar #include <linux/magic.h> 709183df25SDavid Herrmann #include <linux/syscalls.h> 7140e041a2SDavid Herrmann #include <linux/fcntl.h> 729183df25SDavid Herrmann #include <uapi/linux/memfd.h> 73304dbdb7SLee Schermerhorn 741da177e4SLinus Torvalds #include <asm/uaccess.h> 751da177e4SLinus Torvalds #include <asm/pgtable.h> 761da177e4SLinus Torvalds 77dd56b046SMel Gorman #include "internal.h" 78dd56b046SMel Gorman 7909cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 8009cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 811da177e4SLinus Torvalds 821da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 831da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 841da177e4SLinus Torvalds 8569f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 8669f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 8769f07ec9SHugh Dickins 881aac1400SHugh Dickins /* 89f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 90f00cdc6dSHugh Dickins * inode->i_private (with i_mutex making sure that it has only one user at 91f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 921aac1400SHugh Dickins */ 931aac1400SHugh Dickins struct shmem_falloc { 948e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 951aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 961aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 971aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 981aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 991aac1400SHugh Dickins }; 1001aac1400SHugh Dickins 101b76db735SAndrew Morton #ifdef CONFIG_TMPFS 102680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 103680d794bSakpm@linux-foundation.org { 104680d794bSakpm@linux-foundation.org return totalram_pages / 2; 105680d794bSakpm@linux-foundation.org } 106680d794bSakpm@linux-foundation.org 107680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 108680d794bSakpm@linux-foundation.org { 109680d794bSakpm@linux-foundation.org return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 110680d794bSakpm@linux-foundation.org } 111b76db735SAndrew Morton #endif 112680d794bSakpm@linux-foundation.org 113bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 114bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 115bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index); 11668da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1179e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, 1189e18eb29SAndres Lagar-Cavilla gfp_t gfp, struct mm_struct *fault_mm, int *fault_type); 11968da9f05SHugh Dickins 120f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index, 1219e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp) 12268da9f05SHugh Dickins { 12368da9f05SHugh Dickins return shmem_getpage_gfp(inode, index, pagep, sgp, 1249e18eb29SAndres Lagar-Cavilla mapping_gfp_mask(inode->i_mapping), NULL, NULL); 12568da9f05SHugh Dickins } 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1281da177e4SLinus Torvalds { 1291da177e4SLinus Torvalds return sb->s_fs_info; 1301da177e4SLinus Torvalds } 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds /* 1331da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1341da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1351da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1361da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1371da177e4SLinus Torvalds */ 1381da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1391da177e4SLinus Torvalds { 1400b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 141191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1421da177e4SLinus Torvalds } 1431da177e4SLinus Torvalds 1441da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1451da177e4SLinus Torvalds { 1460b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1471da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1481da177e4SLinus Torvalds } 1491da177e4SLinus Torvalds 15077142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 15177142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 15277142517SKonstantin Khlebnikov { 15377142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 15477142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 15577142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 15677142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 15777142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 15877142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 15977142517SKonstantin Khlebnikov } 16077142517SKonstantin Khlebnikov return 0; 16177142517SKonstantin Khlebnikov } 16277142517SKonstantin Khlebnikov 1631da177e4SLinus Torvalds /* 1641da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 16575edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 1661da177e4SLinus Torvalds * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1671da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1681da177e4SLinus Torvalds */ 169800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 1701da177e4SLinus Torvalds { 171800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 172800d8c63SKirill A. Shutemov return 0; 173800d8c63SKirill A. Shutemov 174800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 175800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 1761da177e4SLinus Torvalds } 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 1791da177e4SLinus Torvalds { 1800b0a0806SHugh Dickins if (flags & VM_NORESERVE) 18109cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 1821da177e4SLinus Torvalds } 1831da177e4SLinus Torvalds 184759b9775SHugh Dickins static const struct super_operations shmem_ops; 185f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops; 18615ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 18792e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 18892e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 18992e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 190f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 191779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 1921da177e4SLinus Torvalds 1931da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 194cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 1951da177e4SLinus Torvalds 1965b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb) 1975b04c689SPavel Emelyanov { 1985b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1995b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2005b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2015b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 2025b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2035b04c689SPavel Emelyanov return -ENOSPC; 2045b04c689SPavel Emelyanov } 2055b04c689SPavel Emelyanov sbinfo->free_inodes--; 2065b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2075b04c689SPavel Emelyanov } 2085b04c689SPavel Emelyanov return 0; 2095b04c689SPavel Emelyanov } 2105b04c689SPavel Emelyanov 2115b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 2125b04c689SPavel Emelyanov { 2135b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2145b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2155b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2165b04c689SPavel Emelyanov sbinfo->free_inodes++; 2175b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2185b04c689SPavel Emelyanov } 2195b04c689SPavel Emelyanov } 2205b04c689SPavel Emelyanov 22146711810SRandy Dunlap /** 22241ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 2231da177e4SLinus Torvalds * @inode: inode to recalc 2241da177e4SLinus Torvalds * 2251da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 2261da177e4SLinus Torvalds * undirtied hole pages behind our back. 2271da177e4SLinus Torvalds * 2281da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 2291da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 2301da177e4SLinus Torvalds * 2311da177e4SLinus Torvalds * It has to be called with the spinlock held. 2321da177e4SLinus Torvalds */ 2331da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 2341da177e4SLinus Torvalds { 2351da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 2361da177e4SLinus Torvalds long freed; 2371da177e4SLinus Torvalds 2381da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 2391da177e4SLinus Torvalds if (freed > 0) { 24054af6042SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 24154af6042SHugh Dickins if (sbinfo->max_blocks) 24254af6042SHugh Dickins percpu_counter_add(&sbinfo->used_blocks, -freed); 2431da177e4SLinus Torvalds info->alloced -= freed; 24454af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 2451da177e4SLinus Torvalds shmem_unacct_blocks(info->flags, freed); 2461da177e4SLinus Torvalds } 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds 249800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 250800d8c63SKirill A. Shutemov { 251800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 252800d8c63SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2534595ef88SKirill A. Shutemov unsigned long flags; 254800d8c63SKirill A. Shutemov 255800d8c63SKirill A. Shutemov if (shmem_acct_block(info->flags, pages)) 256800d8c63SKirill A. Shutemov return false; 2574595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 258800d8c63SKirill A. Shutemov info->alloced += pages; 259800d8c63SKirill A. Shutemov inode->i_blocks += pages * BLOCKS_PER_PAGE; 260800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 2614595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 262800d8c63SKirill A. Shutemov inode->i_mapping->nrpages += pages; 263800d8c63SKirill A. Shutemov 264800d8c63SKirill A. Shutemov if (!sbinfo->max_blocks) 265800d8c63SKirill A. Shutemov return true; 266800d8c63SKirill A. Shutemov if (percpu_counter_compare(&sbinfo->used_blocks, 267800d8c63SKirill A. Shutemov sbinfo->max_blocks - pages) > 0) { 268800d8c63SKirill A. Shutemov inode->i_mapping->nrpages -= pages; 2694595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 270800d8c63SKirill A. Shutemov info->alloced -= pages; 271800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 2724595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 27371664665SHugh Dickins shmem_unacct_blocks(info->flags, pages); 274800d8c63SKirill A. Shutemov return false; 275800d8c63SKirill A. Shutemov } 276800d8c63SKirill A. Shutemov percpu_counter_add(&sbinfo->used_blocks, pages); 277800d8c63SKirill A. Shutemov return true; 278800d8c63SKirill A. Shutemov } 279800d8c63SKirill A. Shutemov 280800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 281800d8c63SKirill A. Shutemov { 282800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 283800d8c63SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2844595ef88SKirill A. Shutemov unsigned long flags; 285800d8c63SKirill A. Shutemov 2864595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 287800d8c63SKirill A. Shutemov info->alloced -= pages; 288800d8c63SKirill A. Shutemov inode->i_blocks -= pages * BLOCKS_PER_PAGE; 289800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 2904595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 291800d8c63SKirill A. Shutemov 292800d8c63SKirill A. Shutemov if (sbinfo->max_blocks) 293800d8c63SKirill A. Shutemov percpu_counter_sub(&sbinfo->used_blocks, pages); 29471664665SHugh Dickins shmem_unacct_blocks(info->flags, pages); 295800d8c63SKirill A. Shutemov } 296800d8c63SKirill A. Shutemov 2977a5d0fbbSHugh Dickins /* 2987a5d0fbbSHugh Dickins * Replace item expected in radix tree by a new item, while holding tree lock. 2997a5d0fbbSHugh Dickins */ 3007a5d0fbbSHugh Dickins static int shmem_radix_tree_replace(struct address_space *mapping, 3017a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 3027a5d0fbbSHugh Dickins { 303f7942430SJohannes Weiner struct radix_tree_node *node; 3047a5d0fbbSHugh Dickins void **pslot; 3056dbaf22cSJohannes Weiner void *item; 3067a5d0fbbSHugh Dickins 3077a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 3086dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 309f7942430SJohannes Weiner item = __radix_tree_lookup(&mapping->page_tree, index, &node, &pslot); 310f7942430SJohannes Weiner if (!item) 3116dbaf22cSJohannes Weiner return -ENOENT; 3127a5d0fbbSHugh Dickins if (item != expected) 3137a5d0fbbSHugh Dickins return -ENOENT; 314*4d693d08SJohannes Weiner __radix_tree_replace(&mapping->page_tree, node, pslot, 315*4d693d08SJohannes Weiner replacement, NULL, NULL); 3167a5d0fbbSHugh Dickins return 0; 3177a5d0fbbSHugh Dickins } 3187a5d0fbbSHugh Dickins 3197a5d0fbbSHugh Dickins /* 320d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 321d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 322d1899228SHugh Dickins * 323d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 324d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 325d1899228SHugh Dickins */ 326d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 327d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 328d1899228SHugh Dickins { 329d1899228SHugh Dickins void *item; 330d1899228SHugh Dickins 331d1899228SHugh Dickins rcu_read_lock(); 332d1899228SHugh Dickins item = radix_tree_lookup(&mapping->page_tree, index); 333d1899228SHugh Dickins rcu_read_unlock(); 334d1899228SHugh Dickins return item == swp_to_radix_entry(swap); 335d1899228SHugh Dickins } 336d1899228SHugh Dickins 337d1899228SHugh Dickins /* 3385a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 3395a6e75f8SKirill A. Shutemov * 3405a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 3415a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 3425a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 3435a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 3445a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 3455a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 3465a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 3475a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 3485a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 3495a6e75f8SKirill A. Shutemov */ 3505a6e75f8SKirill A. Shutemov 3515a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 3525a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 3535a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 3545a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 3555a6e75f8SKirill A. Shutemov 3565a6e75f8SKirill A. Shutemov /* 3575a6e75f8SKirill A. Shutemov * Special values. 3585a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 3595a6e75f8SKirill A. Shutemov * 3605a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 3615a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 3625a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 3635a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 3645a6e75f8SKirill A. Shutemov * 3655a6e75f8SKirill A. Shutemov */ 3665a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 3675a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 3685a6e75f8SKirill A. Shutemov 369e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3705a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 3715a6e75f8SKirill A. Shutemov 3725a6e75f8SKirill A. Shutemov int shmem_huge __read_mostly; 3735a6e75f8SKirill A. Shutemov 374f1f5929cSJérémy Lefaure #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 3755a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 3765a6e75f8SKirill A. Shutemov { 3775a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 3785a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 3795a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 3805a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 3815a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 3825a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 3835a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 3845a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 3855a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 3865a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 3875a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 3885a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 3895a6e75f8SKirill A. Shutemov return -EINVAL; 3905a6e75f8SKirill A. Shutemov } 3915a6e75f8SKirill A. Shutemov 3925a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 3935a6e75f8SKirill A. Shutemov { 3945a6e75f8SKirill A. Shutemov switch (huge) { 3955a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 3965a6e75f8SKirill A. Shutemov return "never"; 3975a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 3985a6e75f8SKirill A. Shutemov return "always"; 3995a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 4005a6e75f8SKirill A. Shutemov return "within_size"; 4015a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 4025a6e75f8SKirill A. Shutemov return "advise"; 4035a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 4045a6e75f8SKirill A. Shutemov return "deny"; 4055a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 4065a6e75f8SKirill A. Shutemov return "force"; 4075a6e75f8SKirill A. Shutemov default: 4085a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 4095a6e75f8SKirill A. Shutemov return "bad_val"; 4105a6e75f8SKirill A. Shutemov } 4115a6e75f8SKirill A. Shutemov } 412f1f5929cSJérémy Lefaure #endif 4135a6e75f8SKirill A. Shutemov 414779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 415779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 416779750d2SKirill A. Shutemov { 417779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 418779750d2SKirill A. Shutemov struct inode *inode; 419779750d2SKirill A. Shutemov struct shmem_inode_info *info; 420779750d2SKirill A. Shutemov struct page *page; 421779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 422779750d2SKirill A. Shutemov int removed = 0, split = 0; 423779750d2SKirill A. Shutemov 424779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 425779750d2SKirill A. Shutemov return SHRINK_STOP; 426779750d2SKirill A. Shutemov 427779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 428779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 429779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 430779750d2SKirill A. Shutemov 431779750d2SKirill A. Shutemov /* pin the inode */ 432779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 433779750d2SKirill A. Shutemov 434779750d2SKirill A. Shutemov /* inode is about to be evicted */ 435779750d2SKirill A. Shutemov if (!inode) { 436779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 437779750d2SKirill A. Shutemov removed++; 438779750d2SKirill A. Shutemov goto next; 439779750d2SKirill A. Shutemov } 440779750d2SKirill A. Shutemov 441779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 442779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 443779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 444779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 445779750d2SKirill A. Shutemov removed++; 446779750d2SKirill A. Shutemov iput(inode); 447779750d2SKirill A. Shutemov goto next; 448779750d2SKirill A. Shutemov } 449779750d2SKirill A. Shutemov 450779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 451779750d2SKirill A. Shutemov next: 452779750d2SKirill A. Shutemov if (!--batch) 453779750d2SKirill A. Shutemov break; 454779750d2SKirill A. Shutemov } 455779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 456779750d2SKirill A. Shutemov 457779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 458779750d2SKirill A. Shutemov int ret; 459779750d2SKirill A. Shutemov 460779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 461779750d2SKirill A. Shutemov inode = &info->vfs_inode; 462779750d2SKirill A. Shutemov 463779750d2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) { 464779750d2SKirill A. Shutemov iput(inode); 465779750d2SKirill A. Shutemov continue; 466779750d2SKirill A. Shutemov } 467779750d2SKirill A. Shutemov 468779750d2SKirill A. Shutemov page = find_lock_page(inode->i_mapping, 469779750d2SKirill A. Shutemov (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 470779750d2SKirill A. Shutemov if (!page) 471779750d2SKirill A. Shutemov goto drop; 472779750d2SKirill A. Shutemov 473779750d2SKirill A. Shutemov if (!PageTransHuge(page)) { 474779750d2SKirill A. Shutemov unlock_page(page); 475779750d2SKirill A. Shutemov put_page(page); 476779750d2SKirill A. Shutemov goto drop; 477779750d2SKirill A. Shutemov } 478779750d2SKirill A. Shutemov 479779750d2SKirill A. Shutemov ret = split_huge_page(page); 480779750d2SKirill A. Shutemov unlock_page(page); 481779750d2SKirill A. Shutemov put_page(page); 482779750d2SKirill A. Shutemov 483779750d2SKirill A. Shutemov if (ret) { 484779750d2SKirill A. Shutemov /* split failed: leave it on the list */ 485779750d2SKirill A. Shutemov iput(inode); 486779750d2SKirill A. Shutemov continue; 487779750d2SKirill A. Shutemov } 488779750d2SKirill A. Shutemov 489779750d2SKirill A. Shutemov split++; 490779750d2SKirill A. Shutemov drop: 491779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 492779750d2SKirill A. Shutemov removed++; 493779750d2SKirill A. Shutemov iput(inode); 494779750d2SKirill A. Shutemov } 495779750d2SKirill A. Shutemov 496779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 497779750d2SKirill A. Shutemov list_splice_tail(&list, &sbinfo->shrinklist); 498779750d2SKirill A. Shutemov sbinfo->shrinklist_len -= removed; 499779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 500779750d2SKirill A. Shutemov 501779750d2SKirill A. Shutemov return split; 502779750d2SKirill A. Shutemov } 503779750d2SKirill A. Shutemov 504779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 505779750d2SKirill A. Shutemov struct shrink_control *sc) 506779750d2SKirill A. Shutemov { 507779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 508779750d2SKirill A. Shutemov 509779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 510779750d2SKirill A. Shutemov return SHRINK_STOP; 511779750d2SKirill A. Shutemov 512779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 513779750d2SKirill A. Shutemov } 514779750d2SKirill A. Shutemov 515779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 516779750d2SKirill A. Shutemov struct shrink_control *sc) 517779750d2SKirill A. Shutemov { 518779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 519779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 520779750d2SKirill A. Shutemov } 521e496cf3dSKirill A. Shutemov #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5225a6e75f8SKirill A. Shutemov 5235a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 5245a6e75f8SKirill A. Shutemov 525779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 526779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 527779750d2SKirill A. Shutemov { 528779750d2SKirill A. Shutemov return 0; 529779750d2SKirill A. Shutemov } 530e496cf3dSKirill A. Shutemov #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5315a6e75f8SKirill A. Shutemov 5325a6e75f8SKirill A. Shutemov /* 53346f65ec1SHugh Dickins * Like add_to_page_cache_locked, but error if expected item has gone. 53446f65ec1SHugh Dickins */ 53546f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page, 53646f65ec1SHugh Dickins struct address_space *mapping, 537fed400a1SWang Sheng-Hui pgoff_t index, void *expected) 53846f65ec1SHugh Dickins { 539800d8c63SKirill A. Shutemov int error, nr = hpage_nr_pages(page); 54046f65ec1SHugh Dickins 541800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 542800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(index != round_down(index, nr), page); 543309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 544309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 545800d8c63SKirill A. Shutemov VM_BUG_ON(expected && PageTransHuge(page)); 54646f65ec1SHugh Dickins 547800d8c63SKirill A. Shutemov page_ref_add(page, nr); 54846f65ec1SHugh Dickins page->mapping = mapping; 54946f65ec1SHugh Dickins page->index = index; 55046f65ec1SHugh Dickins 55146f65ec1SHugh Dickins spin_lock_irq(&mapping->tree_lock); 552800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 553800d8c63SKirill A. Shutemov void __rcu **results; 554800d8c63SKirill A. Shutemov pgoff_t idx; 555800d8c63SKirill A. Shutemov int i; 556800d8c63SKirill A. Shutemov 557800d8c63SKirill A. Shutemov error = 0; 558800d8c63SKirill A. Shutemov if (radix_tree_gang_lookup_slot(&mapping->page_tree, 559800d8c63SKirill A. Shutemov &results, &idx, index, 1) && 560800d8c63SKirill A. Shutemov idx < index + HPAGE_PMD_NR) { 561800d8c63SKirill A. Shutemov error = -EEXIST; 562800d8c63SKirill A. Shutemov } 563800d8c63SKirill A. Shutemov 564800d8c63SKirill A. Shutemov if (!error) { 565800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 566800d8c63SKirill A. Shutemov error = radix_tree_insert(&mapping->page_tree, 567800d8c63SKirill A. Shutemov index + i, page + i); 568800d8c63SKirill A. Shutemov VM_BUG_ON(error); 569800d8c63SKirill A. Shutemov } 570800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 571800d8c63SKirill A. Shutemov } 572800d8c63SKirill A. Shutemov } else if (!expected) { 573b065b432SHugh Dickins error = radix_tree_insert(&mapping->page_tree, index, page); 574800d8c63SKirill A. Shutemov } else { 575b065b432SHugh Dickins error = shmem_radix_tree_replace(mapping, index, expected, 576b065b432SHugh Dickins page); 577800d8c63SKirill A. Shutemov } 578800d8c63SKirill A. Shutemov 57946f65ec1SHugh Dickins if (!error) { 580800d8c63SKirill A. Shutemov mapping->nrpages += nr; 581800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 58211fb9989SMel Gorman __inc_node_page_state(page, NR_SHMEM_THPS); 58311fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 58411fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); 58546f65ec1SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 58646f65ec1SHugh Dickins } else { 58746f65ec1SHugh Dickins page->mapping = NULL; 58846f65ec1SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 589800d8c63SKirill A. Shutemov page_ref_sub(page, nr); 59046f65ec1SHugh Dickins } 59146f65ec1SHugh Dickins return error; 59246f65ec1SHugh Dickins } 59346f65ec1SHugh Dickins 59446f65ec1SHugh Dickins /* 5956922c0c7SHugh Dickins * Like delete_from_page_cache, but substitutes swap for page. 5966922c0c7SHugh Dickins */ 5976922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap) 5986922c0c7SHugh Dickins { 5996922c0c7SHugh Dickins struct address_space *mapping = page->mapping; 6006922c0c7SHugh Dickins int error; 6016922c0c7SHugh Dickins 602800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 603800d8c63SKirill A. Shutemov 6046922c0c7SHugh Dickins spin_lock_irq(&mapping->tree_lock); 6056922c0c7SHugh Dickins error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 6066922c0c7SHugh Dickins page->mapping = NULL; 6076922c0c7SHugh Dickins mapping->nrpages--; 60811fb9989SMel Gorman __dec_node_page_state(page, NR_FILE_PAGES); 60911fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM); 6106922c0c7SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 61109cbfeafSKirill A. Shutemov put_page(page); 6126922c0c7SHugh Dickins BUG_ON(error); 6136922c0c7SHugh Dickins } 6146922c0c7SHugh Dickins 6156922c0c7SHugh Dickins /* 6167a5d0fbbSHugh Dickins * Remove swap entry from radix tree, free the swap and its page cache. 6177a5d0fbbSHugh Dickins */ 6187a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 6197a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 6207a5d0fbbSHugh Dickins { 6216dbaf22cSJohannes Weiner void *old; 6227a5d0fbbSHugh Dickins 6237a5d0fbbSHugh Dickins spin_lock_irq(&mapping->tree_lock); 6246dbaf22cSJohannes Weiner old = radix_tree_delete_item(&mapping->page_tree, index, radswap); 6257a5d0fbbSHugh Dickins spin_unlock_irq(&mapping->tree_lock); 6266dbaf22cSJohannes Weiner if (old != radswap) 6276dbaf22cSJohannes Weiner return -ENOENT; 6287a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 6296dbaf22cSJohannes Weiner return 0; 6307a5d0fbbSHugh Dickins } 6317a5d0fbbSHugh Dickins 6327a5d0fbbSHugh Dickins /* 6336a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 63448131e03SVlastimil Babka * given offsets are swapped out. 6356a15a370SVlastimil Babka * 6366a15a370SVlastimil Babka * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, 6376a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 6386a15a370SVlastimil Babka */ 63948131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 64048131e03SVlastimil Babka pgoff_t start, pgoff_t end) 6416a15a370SVlastimil Babka { 6426a15a370SVlastimil Babka struct radix_tree_iter iter; 6436a15a370SVlastimil Babka void **slot; 6446a15a370SVlastimil Babka struct page *page; 64548131e03SVlastimil Babka unsigned long swapped = 0; 6466a15a370SVlastimil Babka 6476a15a370SVlastimil Babka rcu_read_lock(); 6486a15a370SVlastimil Babka 6496a15a370SVlastimil Babka radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 6506a15a370SVlastimil Babka if (iter.index >= end) 6516a15a370SVlastimil Babka break; 6526a15a370SVlastimil Babka 6536a15a370SVlastimil Babka page = radix_tree_deref_slot(slot); 6546a15a370SVlastimil Babka 6552cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 6562cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 6572cf938aaSMatthew Wilcox continue; 6582cf938aaSMatthew Wilcox } 6596a15a370SVlastimil Babka 6606a15a370SVlastimil Babka if (radix_tree_exceptional_entry(page)) 6616a15a370SVlastimil Babka swapped++; 6626a15a370SVlastimil Babka 6636a15a370SVlastimil Babka if (need_resched()) { 6646a15a370SVlastimil Babka cond_resched_rcu(); 6657165092fSMatthew Wilcox slot = radix_tree_iter_next(&iter); 6666a15a370SVlastimil Babka } 6676a15a370SVlastimil Babka } 6686a15a370SVlastimil Babka 6696a15a370SVlastimil Babka rcu_read_unlock(); 6706a15a370SVlastimil Babka 6716a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 6726a15a370SVlastimil Babka } 6736a15a370SVlastimil Babka 6746a15a370SVlastimil Babka /* 67548131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 67648131e03SVlastimil Babka * given vma is swapped out. 67748131e03SVlastimil Babka * 67848131e03SVlastimil Babka * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, 67948131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 68048131e03SVlastimil Babka */ 68148131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 68248131e03SVlastimil Babka { 68348131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 68448131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 68548131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 68648131e03SVlastimil Babka unsigned long swapped; 68748131e03SVlastimil Babka 68848131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 68948131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 69048131e03SVlastimil Babka 69148131e03SVlastimil Babka /* 69248131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 69348131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 69448131e03SVlastimil Babka * already track. 69548131e03SVlastimil Babka */ 69648131e03SVlastimil Babka if (!swapped) 69748131e03SVlastimil Babka return 0; 69848131e03SVlastimil Babka 69948131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 70048131e03SVlastimil Babka return swapped << PAGE_SHIFT; 70148131e03SVlastimil Babka 70248131e03SVlastimil Babka /* Here comes the more involved part */ 70348131e03SVlastimil Babka return shmem_partial_swap_usage(mapping, 70448131e03SVlastimil Babka linear_page_index(vma, vma->vm_start), 70548131e03SVlastimil Babka linear_page_index(vma, vma->vm_end)); 70648131e03SVlastimil Babka } 70748131e03SVlastimil Babka 70848131e03SVlastimil Babka /* 70924513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 71024513264SHugh Dickins */ 71124513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 71224513264SHugh Dickins { 71324513264SHugh Dickins struct pagevec pvec; 71424513264SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 71524513264SHugh Dickins pgoff_t index = 0; 71624513264SHugh Dickins 71724513264SHugh Dickins pagevec_init(&pvec, 0); 71824513264SHugh Dickins /* 71924513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 72024513264SHugh Dickins */ 72124513264SHugh Dickins while (!mapping_unevictable(mapping)) { 72224513264SHugh Dickins /* 72324513264SHugh Dickins * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 72424513264SHugh Dickins * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 72524513264SHugh Dickins */ 7260cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 72724513264SHugh Dickins PAGEVEC_SIZE, pvec.pages, indices); 72824513264SHugh Dickins if (!pvec.nr) 72924513264SHugh Dickins break; 73024513264SHugh Dickins index = indices[pvec.nr - 1] + 1; 7310cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 73224513264SHugh Dickins check_move_unevictable_pages(pvec.pages, pvec.nr); 73324513264SHugh Dickins pagevec_release(&pvec); 73424513264SHugh Dickins cond_resched(); 73524513264SHugh Dickins } 7367a5d0fbbSHugh Dickins } 7377a5d0fbbSHugh Dickins 7387a5d0fbbSHugh Dickins /* 7397a5d0fbbSHugh Dickins * Remove range of pages and swap entries from radix tree, and free them. 7401635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 7417a5d0fbbSHugh Dickins */ 7421635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 7431635f6a7SHugh Dickins bool unfalloc) 7441da177e4SLinus Torvalds { 745285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 7461da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 74709cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 74809cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 74909cbfeafSKirill A. Shutemov unsigned int partial_start = lstart & (PAGE_SIZE - 1); 75009cbfeafSKirill A. Shutemov unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); 751bda97eabSHugh Dickins struct pagevec pvec; 7527a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 7537a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 754285b2c4fSHugh Dickins pgoff_t index; 755bda97eabSHugh Dickins int i; 7561da177e4SLinus Torvalds 75783e4fa9cSHugh Dickins if (lend == -1) 75883e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 759bda97eabSHugh Dickins 760bda97eabSHugh Dickins pagevec_init(&pvec, 0); 761bda97eabSHugh Dickins index = start; 76283e4fa9cSHugh Dickins while (index < end) { 7630cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 76483e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 7657a5d0fbbSHugh Dickins pvec.pages, indices); 7667a5d0fbbSHugh Dickins if (!pvec.nr) 7677a5d0fbbSHugh Dickins break; 768bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 769bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 770bda97eabSHugh Dickins 7717a5d0fbbSHugh Dickins index = indices[i]; 77283e4fa9cSHugh Dickins if (index >= end) 773bda97eabSHugh Dickins break; 774bda97eabSHugh Dickins 7757a5d0fbbSHugh Dickins if (radix_tree_exceptional_entry(page)) { 7761635f6a7SHugh Dickins if (unfalloc) 7771635f6a7SHugh Dickins continue; 7787a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 7797a5d0fbbSHugh Dickins index, page); 7807a5d0fbbSHugh Dickins continue; 7817a5d0fbbSHugh Dickins } 7827a5d0fbbSHugh Dickins 783800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); 784800d8c63SKirill A. Shutemov 785bda97eabSHugh Dickins if (!trylock_page(page)) 786bda97eabSHugh Dickins continue; 787800d8c63SKirill A. Shutemov 788800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 789800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 790800d8c63SKirill A. Shutemov clear_highpage(page); 791800d8c63SKirill A. Shutemov unlock_page(page); 792800d8c63SKirill A. Shutemov continue; 793800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 794800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 795800d8c63SKirill A. Shutemov /* 796800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 797800d8c63SKirill A. Shutemov * zero out the page 798800d8c63SKirill A. Shutemov */ 799800d8c63SKirill A. Shutemov clear_highpage(page); 800800d8c63SKirill A. Shutemov unlock_page(page); 801800d8c63SKirill A. Shutemov continue; 802800d8c63SKirill A. Shutemov } 803800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 804800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 805800d8c63SKirill A. Shutemov } 806800d8c63SKirill A. Shutemov 8071635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 808800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 809800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 810309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 811bda97eabSHugh Dickins truncate_inode_page(mapping, page); 8127a5d0fbbSHugh Dickins } 8131635f6a7SHugh Dickins } 814bda97eabSHugh Dickins unlock_page(page); 815bda97eabSHugh Dickins } 8160cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 81724513264SHugh Dickins pagevec_release(&pvec); 818bda97eabSHugh Dickins cond_resched(); 819bda97eabSHugh Dickins index++; 820bda97eabSHugh Dickins } 821bda97eabSHugh Dickins 82283e4fa9cSHugh Dickins if (partial_start) { 823bda97eabSHugh Dickins struct page *page = NULL; 8249e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, start - 1, &page, SGP_READ); 825bda97eabSHugh Dickins if (page) { 82609cbfeafSKirill A. Shutemov unsigned int top = PAGE_SIZE; 82783e4fa9cSHugh Dickins if (start > end) { 82883e4fa9cSHugh Dickins top = partial_end; 82983e4fa9cSHugh Dickins partial_end = 0; 83083e4fa9cSHugh Dickins } 83183e4fa9cSHugh Dickins zero_user_segment(page, partial_start, top); 832bda97eabSHugh Dickins set_page_dirty(page); 833bda97eabSHugh Dickins unlock_page(page); 83409cbfeafSKirill A. Shutemov put_page(page); 835bda97eabSHugh Dickins } 836bda97eabSHugh Dickins } 83783e4fa9cSHugh Dickins if (partial_end) { 83883e4fa9cSHugh Dickins struct page *page = NULL; 8399e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, end, &page, SGP_READ); 84083e4fa9cSHugh Dickins if (page) { 84183e4fa9cSHugh Dickins zero_user_segment(page, 0, partial_end); 84283e4fa9cSHugh Dickins set_page_dirty(page); 84383e4fa9cSHugh Dickins unlock_page(page); 84409cbfeafSKirill A. Shutemov put_page(page); 84583e4fa9cSHugh Dickins } 84683e4fa9cSHugh Dickins } 84783e4fa9cSHugh Dickins if (start >= end) 84883e4fa9cSHugh Dickins return; 849bda97eabSHugh Dickins 850bda97eabSHugh Dickins index = start; 851b1a36650SHugh Dickins while (index < end) { 852bda97eabSHugh Dickins cond_resched(); 8530cd6144aSJohannes Weiner 8540cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 85583e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 8567a5d0fbbSHugh Dickins pvec.pages, indices); 8577a5d0fbbSHugh Dickins if (!pvec.nr) { 858b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 859b1a36650SHugh Dickins if (index == start || end != -1) 860bda97eabSHugh Dickins break; 861b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 862bda97eabSHugh Dickins index = start; 863bda97eabSHugh Dickins continue; 864bda97eabSHugh Dickins } 865bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 866bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 867bda97eabSHugh Dickins 8687a5d0fbbSHugh Dickins index = indices[i]; 86983e4fa9cSHugh Dickins if (index >= end) 870bda97eabSHugh Dickins break; 871bda97eabSHugh Dickins 8727a5d0fbbSHugh Dickins if (radix_tree_exceptional_entry(page)) { 8731635f6a7SHugh Dickins if (unfalloc) 8741635f6a7SHugh Dickins continue; 875b1a36650SHugh Dickins if (shmem_free_swap(mapping, index, page)) { 876b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 877b1a36650SHugh Dickins index--; 878b1a36650SHugh Dickins break; 879b1a36650SHugh Dickins } 880b1a36650SHugh Dickins nr_swaps_freed++; 8817a5d0fbbSHugh Dickins continue; 8827a5d0fbbSHugh Dickins } 8837a5d0fbbSHugh Dickins 884bda97eabSHugh Dickins lock_page(page); 885800d8c63SKirill A. Shutemov 886800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 887800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 888800d8c63SKirill A. Shutemov clear_highpage(page); 889800d8c63SKirill A. Shutemov unlock_page(page); 890800d8c63SKirill A. Shutemov /* 891800d8c63SKirill A. Shutemov * Partial thp truncate due 'start' in middle 892800d8c63SKirill A. Shutemov * of THP: don't need to look on these pages 893800d8c63SKirill A. Shutemov * again on !pvec.nr restart. 894800d8c63SKirill A. Shutemov */ 895800d8c63SKirill A. Shutemov if (index != round_down(end, HPAGE_PMD_NR)) 896800d8c63SKirill A. Shutemov start++; 897800d8c63SKirill A. Shutemov continue; 898800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 899800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 900800d8c63SKirill A. Shutemov /* 901800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 902800d8c63SKirill A. Shutemov * zero out the page 903800d8c63SKirill A. Shutemov */ 904800d8c63SKirill A. Shutemov clear_highpage(page); 905800d8c63SKirill A. Shutemov unlock_page(page); 906800d8c63SKirill A. Shutemov continue; 907800d8c63SKirill A. Shutemov } 908800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 909800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 910800d8c63SKirill A. Shutemov } 911800d8c63SKirill A. Shutemov 9121635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 913800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 914800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 915309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 916bda97eabSHugh Dickins truncate_inode_page(mapping, page); 917b1a36650SHugh Dickins } else { 918b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 919b1a36650SHugh Dickins unlock_page(page); 920b1a36650SHugh Dickins index--; 921b1a36650SHugh Dickins break; 9227a5d0fbbSHugh Dickins } 9231635f6a7SHugh Dickins } 924bda97eabSHugh Dickins unlock_page(page); 925bda97eabSHugh Dickins } 9260cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 92724513264SHugh Dickins pagevec_release(&pvec); 928bda97eabSHugh Dickins index++; 929bda97eabSHugh Dickins } 93094c1e62dSHugh Dickins 9314595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 9327a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 9331da177e4SLinus Torvalds shmem_recalc_inode(inode); 9344595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 9351635f6a7SHugh Dickins } 9361da177e4SLinus Torvalds 9371635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 9381635f6a7SHugh Dickins { 9391635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 940078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 9411da177e4SLinus Torvalds } 94294c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 9431da177e4SLinus Torvalds 94444a30220SYu Zhao static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry, 94544a30220SYu Zhao struct kstat *stat) 94644a30220SYu Zhao { 94744a30220SYu Zhao struct inode *inode = dentry->d_inode; 94844a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 94944a30220SYu Zhao 950d0424c42SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 9514595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 95244a30220SYu Zhao shmem_recalc_inode(inode); 9534595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 954d0424c42SHugh Dickins } 95544a30220SYu Zhao generic_fillattr(inode, stat); 95644a30220SYu Zhao return 0; 95744a30220SYu Zhao } 95844a30220SYu Zhao 95994c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 9601da177e4SLinus Torvalds { 96175c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 96240e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 963779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 9641da177e4SLinus Torvalds int error; 9651da177e4SLinus Torvalds 96631051c85SJan Kara error = setattr_prepare(dentry, attr); 967db78b877SChristoph Hellwig if (error) 968db78b877SChristoph Hellwig return error; 969db78b877SChristoph Hellwig 97094c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 97194c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 97294c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 9733889e6e7Snpiggin@suse.de 97440e041a2SDavid Herrmann /* protected by i_mutex */ 97540e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 97640e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 97740e041a2SDavid Herrmann return -EPERM; 97840e041a2SDavid Herrmann 97994c1e62dSHugh Dickins if (newsize != oldsize) { 98077142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 98177142517SKonstantin Khlebnikov oldsize, newsize); 98277142517SKonstantin Khlebnikov if (error) 98377142517SKonstantin Khlebnikov return error; 98494c1e62dSHugh Dickins i_size_write(inode, newsize); 985078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 98694c1e62dSHugh Dickins } 987afa2db2fSJosef Bacik if (newsize <= oldsize) { 98894c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 989d0424c42SHugh Dickins if (oldsize > holebegin) 990d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 991d0424c42SHugh Dickins holebegin, 0, 1); 992d0424c42SHugh Dickins if (info->alloced) 993d0424c42SHugh Dickins shmem_truncate_range(inode, 994d0424c42SHugh Dickins newsize, (loff_t)-1); 99594c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 996d0424c42SHugh Dickins if (oldsize > holebegin) 997d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 998d0424c42SHugh Dickins holebegin, 0, 1); 999779750d2SKirill A. Shutemov 1000779750d2SKirill A. Shutemov /* 1001779750d2SKirill A. Shutemov * Part of the huge page can be beyond i_size: subject 1002779750d2SKirill A. Shutemov * to shrink under memory pressure. 1003779750d2SKirill A. Shutemov */ 1004779750d2SKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1005779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1006779750d2SKirill A. Shutemov if (list_empty(&info->shrinklist)) { 1007779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1008779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1009779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1010779750d2SKirill A. Shutemov } 1011779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1012779750d2SKirill A. Shutemov } 101394c1e62dSHugh Dickins } 10141da177e4SLinus Torvalds } 10151da177e4SLinus Torvalds 10166a1a90adSChristoph Hellwig setattr_copy(inode, attr); 1017db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 1018feda821eSChristoph Hellwig error = posix_acl_chmod(inode, inode->i_mode); 10191da177e4SLinus Torvalds return error; 10201da177e4SLinus Torvalds } 10211da177e4SLinus Torvalds 10221f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 10231da177e4SLinus Torvalds { 10241da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1025779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 10261da177e4SLinus Torvalds 10273889e6e7Snpiggin@suse.de if (inode->i_mapping->a_ops == &shmem_aops) { 10281da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 10291da177e4SLinus Torvalds inode->i_size = 0; 10303889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1031779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1032779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1033779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1034779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1035779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1036779750d2SKirill A. Shutemov } 1037779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1038779750d2SKirill A. Shutemov } 10391da177e4SLinus Torvalds if (!list_empty(&info->swaplist)) { 1040cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 10411da177e4SLinus Torvalds list_del_init(&info->swaplist); 1042cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 10431da177e4SLinus Torvalds } 10443ed47db3SAl Viro } 1045b09e0fa4SEric Paris 104638f38657SAristeu Rozanski simple_xattrs_free(&info->xattrs); 10470f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 10485b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 1049dbd5768fSJan Kara clear_inode(inode); 10501da177e4SLinus Torvalds } 10511da177e4SLinus Torvalds 105246f65ec1SHugh Dickins /* 105346f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 105446f65ec1SHugh Dickins */ 105541ffe5d5SHugh Dickins static int shmem_unuse_inode(struct shmem_inode_info *info, 1056bde05d1cSHugh Dickins swp_entry_t swap, struct page **pagep) 10571da177e4SLinus Torvalds { 1058285b2c4fSHugh Dickins struct address_space *mapping = info->vfs_inode.i_mapping; 105946f65ec1SHugh Dickins void *radswap; 106041ffe5d5SHugh Dickins pgoff_t index; 1061bde05d1cSHugh Dickins gfp_t gfp; 1062bde05d1cSHugh Dickins int error = 0; 10631da177e4SLinus Torvalds 106446f65ec1SHugh Dickins radswap = swp_to_radix_entry(swap); 1065e504f3fdSHugh Dickins index = radix_tree_locate_item(&mapping->page_tree, radswap); 106646f65ec1SHugh Dickins if (index == -1) 106700501b53SJohannes Weiner return -EAGAIN; /* tell shmem_unuse we found nothing */ 10682e0e26c7SHugh Dickins 10691b1b32f2SHugh Dickins /* 10701b1b32f2SHugh Dickins * Move _head_ to start search for next from here. 10711f895f75SAl Viro * But be careful: shmem_evict_inode checks list_empty without taking 10721b1b32f2SHugh Dickins * mutex, and there's an instant in list_move_tail when info->swaplist 1073285b2c4fSHugh Dickins * would appear empty, if it were the only one on shmem_swaplist. 10741b1b32f2SHugh Dickins */ 10751b1b32f2SHugh Dickins if (shmem_swaplist.next != &info->swaplist) 10762e0e26c7SHugh Dickins list_move_tail(&shmem_swaplist, &info->swaplist); 10772e0e26c7SHugh Dickins 1078bde05d1cSHugh Dickins gfp = mapping_gfp_mask(mapping); 1079bde05d1cSHugh Dickins if (shmem_should_replace_page(*pagep, gfp)) { 1080bde05d1cSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1081bde05d1cSHugh Dickins error = shmem_replace_page(pagep, gfp, info, index); 1082bde05d1cSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1083bde05d1cSHugh Dickins /* 1084bde05d1cSHugh Dickins * We needed to drop mutex to make that restrictive page 10850142ef6cSHugh Dickins * allocation, but the inode might have been freed while we 10860142ef6cSHugh Dickins * dropped it: although a racing shmem_evict_inode() cannot 10870142ef6cSHugh Dickins * complete without emptying the radix_tree, our page lock 10880142ef6cSHugh Dickins * on this swapcache page is not enough to prevent that - 10890142ef6cSHugh Dickins * free_swap_and_cache() of our swap entry will only 10900142ef6cSHugh Dickins * trylock_page(), removing swap from radix_tree whatever. 10910142ef6cSHugh Dickins * 10920142ef6cSHugh Dickins * We must not proceed to shmem_add_to_page_cache() if the 10930142ef6cSHugh Dickins * inode has been freed, but of course we cannot rely on 10940142ef6cSHugh Dickins * inode or mapping or info to check that. However, we can 10950142ef6cSHugh Dickins * safely check if our swap entry is still in use (and here 10960142ef6cSHugh Dickins * it can't have got reused for another page): if it's still 10970142ef6cSHugh Dickins * in use, then the inode cannot have been freed yet, and we 10980142ef6cSHugh Dickins * can safely proceed (if it's no longer in use, that tells 10990142ef6cSHugh Dickins * nothing about the inode, but we don't need to unuse swap). 1100bde05d1cSHugh Dickins */ 1101bde05d1cSHugh Dickins if (!page_swapcount(*pagep)) 1102bde05d1cSHugh Dickins error = -ENOENT; 1103bde05d1cSHugh Dickins } 1104bde05d1cSHugh Dickins 1105d13d1443SKAMEZAWA Hiroyuki /* 1106778dd893SHugh Dickins * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 1107778dd893SHugh Dickins * but also to hold up shmem_evict_inode(): so inode cannot be freed 1108778dd893SHugh Dickins * beneath us (pagelock doesn't help until the page is in pagecache). 1109d13d1443SKAMEZAWA Hiroyuki */ 1110bde05d1cSHugh Dickins if (!error) 1111bde05d1cSHugh Dickins error = shmem_add_to_page_cache(*pagep, mapping, index, 1112fed400a1SWang Sheng-Hui radswap); 111348f170fbSHugh Dickins if (error != -ENOMEM) { 111446f65ec1SHugh Dickins /* 111546f65ec1SHugh Dickins * Truncation and eviction use free_swap_and_cache(), which 111646f65ec1SHugh Dickins * only does trylock page: if we raced, best clean up here. 111746f65ec1SHugh Dickins */ 1118bde05d1cSHugh Dickins delete_from_swap_cache(*pagep); 1119bde05d1cSHugh Dickins set_page_dirty(*pagep); 112046f65ec1SHugh Dickins if (!error) { 11214595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1122285b2c4fSHugh Dickins info->swapped--; 11234595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 112441ffe5d5SHugh Dickins swap_free(swap); 112546f65ec1SHugh Dickins } 11261da177e4SLinus Torvalds } 11272e0e26c7SHugh Dickins return error; 11281da177e4SLinus Torvalds } 11291da177e4SLinus Torvalds 11301da177e4SLinus Torvalds /* 113146f65ec1SHugh Dickins * Search through swapped inodes to find and replace swap by page. 11321da177e4SLinus Torvalds */ 113341ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 11341da177e4SLinus Torvalds { 113541ffe5d5SHugh Dickins struct list_head *this, *next; 11361da177e4SLinus Torvalds struct shmem_inode_info *info; 113700501b53SJohannes Weiner struct mem_cgroup *memcg; 1138bde05d1cSHugh Dickins int error = 0; 1139bde05d1cSHugh Dickins 1140bde05d1cSHugh Dickins /* 1141bde05d1cSHugh Dickins * There's a faint possibility that swap page was replaced before 11420142ef6cSHugh Dickins * caller locked it: caller will come back later with the right page. 1143bde05d1cSHugh Dickins */ 11440142ef6cSHugh Dickins if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 1145bde05d1cSHugh Dickins goto out; 1146778dd893SHugh Dickins 1147778dd893SHugh Dickins /* 1148778dd893SHugh Dickins * Charge page using GFP_KERNEL while we can wait, before taking 1149778dd893SHugh Dickins * the shmem_swaplist_mutex which might hold up shmem_writepage(). 1150778dd893SHugh Dickins * Charged back to the user (not to caller) when swap account is used. 1151778dd893SHugh Dickins */ 1152f627c2f5SKirill A. Shutemov error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg, 1153f627c2f5SKirill A. Shutemov false); 1154778dd893SHugh Dickins if (error) 1155778dd893SHugh Dickins goto out; 115646f65ec1SHugh Dickins /* No radix_tree_preload: swap entry keeps a place for page in tree */ 115700501b53SJohannes Weiner error = -EAGAIN; 11581da177e4SLinus Torvalds 1159cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 116041ffe5d5SHugh Dickins list_for_each_safe(this, next, &shmem_swaplist) { 116141ffe5d5SHugh Dickins info = list_entry(this, struct shmem_inode_info, swaplist); 1162285b2c4fSHugh Dickins if (info->swapped) 116300501b53SJohannes Weiner error = shmem_unuse_inode(info, swap, &page); 11646922c0c7SHugh Dickins else 11656922c0c7SHugh Dickins list_del_init(&info->swaplist); 1166cb5f7b9aSHugh Dickins cond_resched(); 116700501b53SJohannes Weiner if (error != -EAGAIN) 1168778dd893SHugh Dickins break; 116900501b53SJohannes Weiner /* found nothing in this: move on to search the next */ 11701da177e4SLinus Torvalds } 1171cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1172778dd893SHugh Dickins 117300501b53SJohannes Weiner if (error) { 117400501b53SJohannes Weiner if (error != -ENOMEM) 117500501b53SJohannes Weiner error = 0; 1176f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 117700501b53SJohannes Weiner } else 1178f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 1179778dd893SHugh Dickins out: 1180aaa46865SHugh Dickins unlock_page(page); 118109cbfeafSKirill A. Shutemov put_page(page); 1182778dd893SHugh Dickins return error; 11831da177e4SLinus Torvalds } 11841da177e4SLinus Torvalds 11851da177e4SLinus Torvalds /* 11861da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 11871da177e4SLinus Torvalds */ 11881da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 11891da177e4SLinus Torvalds { 11901da177e4SLinus Torvalds struct shmem_inode_info *info; 11911da177e4SLinus Torvalds struct address_space *mapping; 11921da177e4SLinus Torvalds struct inode *inode; 11936922c0c7SHugh Dickins swp_entry_t swap; 11946922c0c7SHugh Dickins pgoff_t index; 11951da177e4SLinus Torvalds 1196800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 11971da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 11981da177e4SLinus Torvalds mapping = page->mapping; 11991da177e4SLinus Torvalds index = page->index; 12001da177e4SLinus Torvalds inode = mapping->host; 12011da177e4SLinus Torvalds info = SHMEM_I(inode); 12021da177e4SLinus Torvalds if (info->flags & VM_LOCKED) 12031da177e4SLinus Torvalds goto redirty; 1204d9fe526aSHugh Dickins if (!total_swap_pages) 12051da177e4SLinus Torvalds goto redirty; 12061da177e4SLinus Torvalds 1207d9fe526aSHugh Dickins /* 120897b713baSChristoph Hellwig * Our capabilities prevent regular writeback or sync from ever calling 120997b713baSChristoph Hellwig * shmem_writepage; but a stacking filesystem might use ->writepage of 121097b713baSChristoph Hellwig * its underlying filesystem, in which case tmpfs should write out to 121197b713baSChristoph Hellwig * swap only in response to memory pressure, and not for the writeback 121297b713baSChristoph Hellwig * threads or sync. 1213d9fe526aSHugh Dickins */ 121448f170fbSHugh Dickins if (!wbc->for_reclaim) { 121548f170fbSHugh Dickins WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 121648f170fbSHugh Dickins goto redirty; 121748f170fbSHugh Dickins } 12181635f6a7SHugh Dickins 12191635f6a7SHugh Dickins /* 12201635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 12211635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 12221635f6a7SHugh Dickins * fallocated page arriving here is now to initialize it and write it. 12231aac1400SHugh Dickins * 12241aac1400SHugh Dickins * That's okay for a page already fallocated earlier, but if we have 12251aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 12261aac1400SHugh Dickins * of this page in case we have to undo it, and (b) it may not be a 12271aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 12281aac1400SHugh Dickins * reactivate the page, and let shmem_fallocate() quit when too many. 12291635f6a7SHugh Dickins */ 12301635f6a7SHugh Dickins if (!PageUptodate(page)) { 12311aac1400SHugh Dickins if (inode->i_private) { 12321aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 12331aac1400SHugh Dickins spin_lock(&inode->i_lock); 12341aac1400SHugh Dickins shmem_falloc = inode->i_private; 12351aac1400SHugh Dickins if (shmem_falloc && 12368e205f77SHugh Dickins !shmem_falloc->waitq && 12371aac1400SHugh Dickins index >= shmem_falloc->start && 12381aac1400SHugh Dickins index < shmem_falloc->next) 12391aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 12401aac1400SHugh Dickins else 12411aac1400SHugh Dickins shmem_falloc = NULL; 12421aac1400SHugh Dickins spin_unlock(&inode->i_lock); 12431aac1400SHugh Dickins if (shmem_falloc) 12441aac1400SHugh Dickins goto redirty; 12451aac1400SHugh Dickins } 12461635f6a7SHugh Dickins clear_highpage(page); 12471635f6a7SHugh Dickins flush_dcache_page(page); 12481635f6a7SHugh Dickins SetPageUptodate(page); 12491635f6a7SHugh Dickins } 12501635f6a7SHugh Dickins 1251d9fe526aSHugh Dickins swap = get_swap_page(); 125248f170fbSHugh Dickins if (!swap.val) 125348f170fbSHugh Dickins goto redirty; 1254d9fe526aSHugh Dickins 125537e84351SVladimir Davydov if (mem_cgroup_try_charge_swap(page, swap)) 125637e84351SVladimir Davydov goto free_swap; 125737e84351SVladimir Davydov 1258b1dea800SHugh Dickins /* 1259b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 12606922c0c7SHugh Dickins * if it's not already there. Do it now before the page is 12616922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1262b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 12636922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 12646922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1265b1dea800SHugh Dickins */ 1266b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 126705bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 126805bf86b4SHugh Dickins list_add_tail(&info->swaplist, &shmem_swaplist); 1269b1dea800SHugh Dickins 127048f170fbSHugh Dickins if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 12714595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1272267a4c76SHugh Dickins shmem_recalc_inode(inode); 1273267a4c76SHugh Dickins info->swapped++; 12744595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1275267a4c76SHugh Dickins 1276aaa46865SHugh Dickins swap_shmem_alloc(swap); 12776922c0c7SHugh Dickins shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 12786922c0c7SHugh Dickins 12796922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1280d9fe526aSHugh Dickins BUG_ON(page_mapped(page)); 12819fab5619SHugh Dickins swap_writepage(page, wbc); 12821da177e4SLinus Torvalds return 0; 12831da177e4SLinus Torvalds } 12841da177e4SLinus Torvalds 12856922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 128637e84351SVladimir Davydov free_swap: 12870a31bc97SJohannes Weiner swapcache_free(swap); 12881da177e4SLinus Torvalds redirty: 12891da177e4SLinus Torvalds set_page_dirty(page); 1290d9fe526aSHugh Dickins if (wbc->for_reclaim) 1291d9fe526aSHugh Dickins return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1292d9fe526aSHugh Dickins unlock_page(page); 1293d9fe526aSHugh Dickins return 0; 12941da177e4SLinus Torvalds } 12951da177e4SLinus Torvalds 129675edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 129771fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1298680d794bSakpm@linux-foundation.org { 1299680d794bSakpm@linux-foundation.org char buffer[64]; 1300680d794bSakpm@linux-foundation.org 130171fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1302095f1fc4SLee Schermerhorn return; /* show nothing */ 1303095f1fc4SLee Schermerhorn 1304a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1305095f1fc4SLee Schermerhorn 1306095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1307680d794bSakpm@linux-foundation.org } 130871fe804bSLee Schermerhorn 130971fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 131071fe804bSLee Schermerhorn { 131171fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 131271fe804bSLee Schermerhorn if (sbinfo->mpol) { 131371fe804bSLee Schermerhorn spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 131471fe804bSLee Schermerhorn mpol = sbinfo->mpol; 131571fe804bSLee Schermerhorn mpol_get(mpol); 131671fe804bSLee Schermerhorn spin_unlock(&sbinfo->stat_lock); 131771fe804bSLee Schermerhorn } 131871fe804bSLee Schermerhorn return mpol; 131971fe804bSLee Schermerhorn } 132075edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 132175edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 132275edd345SHugh Dickins { 132375edd345SHugh Dickins } 132475edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 132575edd345SHugh Dickins { 132675edd345SHugh Dickins return NULL; 132775edd345SHugh Dickins } 132875edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 132975edd345SHugh Dickins #ifndef CONFIG_NUMA 133075edd345SHugh Dickins #define vm_policy vm_private_data 133175edd345SHugh Dickins #endif 1332680d794bSakpm@linux-foundation.org 1333800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1334800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1335800d8c63SKirill A. Shutemov { 1336800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 1337800d8c63SKirill A. Shutemov vma->vm_start = 0; 1338800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1339800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1340800d8c63SKirill A. Shutemov vma->vm_ops = NULL; 1341800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1342800d8c63SKirill A. Shutemov } 1343800d8c63SKirill A. Shutemov 1344800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1345800d8c63SKirill A. Shutemov { 1346800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1347800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1348800d8c63SKirill A. Shutemov } 1349800d8c63SKirill A. Shutemov 135041ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 135141ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 13521da177e4SLinus Torvalds { 13531da177e4SLinus Torvalds struct vm_area_struct pvma; 135418a2f371SMel Gorman struct page *page; 13551da177e4SLinus Torvalds 1356800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 135718a2f371SMel Gorman page = swapin_readahead(swap, gfp, &pvma, 0); 1358800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 135918a2f371SMel Gorman 1360800d8c63SKirill A. Shutemov return page; 1361800d8c63SKirill A. Shutemov } 136218a2f371SMel Gorman 1363800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp, 1364800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1365800d8c63SKirill A. Shutemov { 1366800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 1367800d8c63SKirill A. Shutemov struct inode *inode = &info->vfs_inode; 1368800d8c63SKirill A. Shutemov struct address_space *mapping = inode->i_mapping; 13694620a06eSGeert Uytterhoeven pgoff_t idx, hindex; 1370800d8c63SKirill A. Shutemov void __rcu **results; 1371800d8c63SKirill A. Shutemov struct page *page; 1372800d8c63SKirill A. Shutemov 1373e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1374800d8c63SKirill A. Shutemov return NULL; 1375800d8c63SKirill A. Shutemov 13764620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 1377800d8c63SKirill A. Shutemov rcu_read_lock(); 1378800d8c63SKirill A. Shutemov if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx, 1379800d8c63SKirill A. Shutemov hindex, 1) && idx < hindex + HPAGE_PMD_NR) { 1380800d8c63SKirill A. Shutemov rcu_read_unlock(); 1381800d8c63SKirill A. Shutemov return NULL; 1382800d8c63SKirill A. Shutemov } 1383800d8c63SKirill A. Shutemov rcu_read_unlock(); 1384800d8c63SKirill A. Shutemov 1385800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1386800d8c63SKirill A. Shutemov page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1387800d8c63SKirill A. Shutemov HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1388800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1389800d8c63SKirill A. Shutemov if (page) 1390800d8c63SKirill A. Shutemov prep_transhuge_page(page); 139118a2f371SMel Gorman return page; 139218a2f371SMel Gorman } 139318a2f371SMel Gorman 139418a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp, 139518a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 139618a2f371SMel Gorman { 139718a2f371SMel Gorman struct vm_area_struct pvma; 139818a2f371SMel Gorman struct page *page; 139918a2f371SMel Gorman 1400800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1401800d8c63SKirill A. Shutemov page = alloc_page_vma(gfp, &pvma, 0); 1402800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 140318a2f371SMel Gorman 1404800d8c63SKirill A. Shutemov return page; 1405800d8c63SKirill A. Shutemov } 1406800d8c63SKirill A. Shutemov 1407800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 1408800d8c63SKirill A. Shutemov struct shmem_inode_info *info, struct shmem_sb_info *sbinfo, 1409800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1410800d8c63SKirill A. Shutemov { 1411800d8c63SKirill A. Shutemov struct page *page; 1412800d8c63SKirill A. Shutemov int nr; 1413800d8c63SKirill A. Shutemov int err = -ENOSPC; 1414800d8c63SKirill A. Shutemov 1415e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1416800d8c63SKirill A. Shutemov huge = false; 1417800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1418800d8c63SKirill A. Shutemov 1419800d8c63SKirill A. Shutemov if (shmem_acct_block(info->flags, nr)) 1420800d8c63SKirill A. Shutemov goto failed; 1421800d8c63SKirill A. Shutemov if (sbinfo->max_blocks) { 1422800d8c63SKirill A. Shutemov if (percpu_counter_compare(&sbinfo->used_blocks, 1423800d8c63SKirill A. Shutemov sbinfo->max_blocks - nr) > 0) 1424800d8c63SKirill A. Shutemov goto unacct; 1425800d8c63SKirill A. Shutemov percpu_counter_add(&sbinfo->used_blocks, nr); 1426800d8c63SKirill A. Shutemov } 1427800d8c63SKirill A. Shutemov 1428800d8c63SKirill A. Shutemov if (huge) 1429800d8c63SKirill A. Shutemov page = shmem_alloc_hugepage(gfp, info, index); 1430800d8c63SKirill A. Shutemov else 1431800d8c63SKirill A. Shutemov page = shmem_alloc_page(gfp, info, index); 143275edd345SHugh Dickins if (page) { 143375edd345SHugh Dickins __SetPageLocked(page); 143475edd345SHugh Dickins __SetPageSwapBacked(page); 1435800d8c63SKirill A. Shutemov return page; 143675edd345SHugh Dickins } 143718a2f371SMel Gorman 1438800d8c63SKirill A. Shutemov err = -ENOMEM; 1439800d8c63SKirill A. Shutemov if (sbinfo->max_blocks) 1440800d8c63SKirill A. Shutemov percpu_counter_add(&sbinfo->used_blocks, -nr); 1441800d8c63SKirill A. Shutemov unacct: 1442800d8c63SKirill A. Shutemov shmem_unacct_blocks(info->flags, nr); 1443800d8c63SKirill A. Shutemov failed: 1444800d8c63SKirill A. Shutemov return ERR_PTR(err); 14451da177e4SLinus Torvalds } 144671fe804bSLee Schermerhorn 14471da177e4SLinus Torvalds /* 1448bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1449bde05d1cSHugh Dickins * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1450bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1451bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1452bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1453bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1454bde05d1cSHugh Dickins * 1455bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1456bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1457bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1458bde05d1cSHugh Dickins */ 1459bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1460bde05d1cSHugh Dickins { 1461bde05d1cSHugh Dickins return page_zonenum(page) > gfp_zone(gfp); 1462bde05d1cSHugh Dickins } 1463bde05d1cSHugh Dickins 1464bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1465bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1466bde05d1cSHugh Dickins { 1467bde05d1cSHugh Dickins struct page *oldpage, *newpage; 1468bde05d1cSHugh Dickins struct address_space *swap_mapping; 1469bde05d1cSHugh Dickins pgoff_t swap_index; 1470bde05d1cSHugh Dickins int error; 1471bde05d1cSHugh Dickins 1472bde05d1cSHugh Dickins oldpage = *pagep; 1473bde05d1cSHugh Dickins swap_index = page_private(oldpage); 1474bde05d1cSHugh Dickins swap_mapping = page_mapping(oldpage); 1475bde05d1cSHugh Dickins 1476bde05d1cSHugh Dickins /* 1477bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1478bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1479bde05d1cSHugh Dickins */ 1480bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1481bde05d1cSHugh Dickins newpage = shmem_alloc_page(gfp, info, index); 1482bde05d1cSHugh Dickins if (!newpage) 1483bde05d1cSHugh Dickins return -ENOMEM; 1484bde05d1cSHugh Dickins 148509cbfeafSKirill A. Shutemov get_page(newpage); 1486bde05d1cSHugh Dickins copy_highpage(newpage, oldpage); 14870142ef6cSHugh Dickins flush_dcache_page(newpage); 1488bde05d1cSHugh Dickins 14899956edf3SHugh Dickins __SetPageLocked(newpage); 14909956edf3SHugh Dickins __SetPageSwapBacked(newpage); 1491bde05d1cSHugh Dickins SetPageUptodate(newpage); 1492bde05d1cSHugh Dickins set_page_private(newpage, swap_index); 1493bde05d1cSHugh Dickins SetPageSwapCache(newpage); 1494bde05d1cSHugh Dickins 1495bde05d1cSHugh Dickins /* 1496bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1497bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1498bde05d1cSHugh Dickins */ 1499bde05d1cSHugh Dickins spin_lock_irq(&swap_mapping->tree_lock); 1500bde05d1cSHugh Dickins error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1501bde05d1cSHugh Dickins newpage); 15020142ef6cSHugh Dickins if (!error) { 150311fb9989SMel Gorman __inc_node_page_state(newpage, NR_FILE_PAGES); 150411fb9989SMel Gorman __dec_node_page_state(oldpage, NR_FILE_PAGES); 15050142ef6cSHugh Dickins } 1506bde05d1cSHugh Dickins spin_unlock_irq(&swap_mapping->tree_lock); 1507bde05d1cSHugh Dickins 15080142ef6cSHugh Dickins if (unlikely(error)) { 15090142ef6cSHugh Dickins /* 15100142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 15110142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 15120142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 15130142ef6cSHugh Dickins */ 15140142ef6cSHugh Dickins oldpage = newpage; 15150142ef6cSHugh Dickins } else { 15166a93ca8fSJohannes Weiner mem_cgroup_migrate(oldpage, newpage); 1517bde05d1cSHugh Dickins lru_cache_add_anon(newpage); 15180142ef6cSHugh Dickins *pagep = newpage; 15190142ef6cSHugh Dickins } 1520bde05d1cSHugh Dickins 1521bde05d1cSHugh Dickins ClearPageSwapCache(oldpage); 1522bde05d1cSHugh Dickins set_page_private(oldpage, 0); 1523bde05d1cSHugh Dickins 1524bde05d1cSHugh Dickins unlock_page(oldpage); 152509cbfeafSKirill A. Shutemov put_page(oldpage); 152609cbfeafSKirill A. Shutemov put_page(oldpage); 15270142ef6cSHugh Dickins return error; 1528bde05d1cSHugh Dickins } 1529bde05d1cSHugh Dickins 1530bde05d1cSHugh Dickins /* 153168da9f05SHugh Dickins * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 15321da177e4SLinus Torvalds * 15331da177e4SLinus Torvalds * If we allocate a new one we do not mark it dirty. That's up to the 15341da177e4SLinus Torvalds * vm. If we swap it in we mark it dirty since we also free the swap 15359e18eb29SAndres Lagar-Cavilla * entry since a page cannot live in both the swap and page cache. 15369e18eb29SAndres Lagar-Cavilla * 15379e18eb29SAndres Lagar-Cavilla * fault_mm and fault_type are only supplied by shmem_fault: 15389e18eb29SAndres Lagar-Cavilla * otherwise they are NULL. 15391da177e4SLinus Torvalds */ 154041ffe5d5SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 15419e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, gfp_t gfp, 15429e18eb29SAndres Lagar-Cavilla struct mm_struct *fault_mm, int *fault_type) 15431da177e4SLinus Torvalds { 15441da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 154523f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 15461da177e4SLinus Torvalds struct shmem_sb_info *sbinfo; 15479e18eb29SAndres Lagar-Cavilla struct mm_struct *charge_mm; 154800501b53SJohannes Weiner struct mem_cgroup *memcg; 154927ab7006SHugh Dickins struct page *page; 15501da177e4SLinus Torvalds swp_entry_t swap; 1551657e3038SKirill A. Shutemov enum sgp_type sgp_huge = sgp; 1552800d8c63SKirill A. Shutemov pgoff_t hindex = index; 15531da177e4SLinus Torvalds int error; 155454af6042SHugh Dickins int once = 0; 15551635f6a7SHugh Dickins int alloced = 0; 15561da177e4SLinus Torvalds 155709cbfeafSKirill A. Shutemov if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 15581da177e4SLinus Torvalds return -EFBIG; 1559657e3038SKirill A. Shutemov if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) 1560657e3038SKirill A. Shutemov sgp = SGP_CACHE; 15611da177e4SLinus Torvalds repeat: 156254af6042SHugh Dickins swap.val = 0; 15630cd6144aSJohannes Weiner page = find_lock_entry(mapping, index); 156454af6042SHugh Dickins if (radix_tree_exceptional_entry(page)) { 156554af6042SHugh Dickins swap = radix_to_swp_entry(page); 156654af6042SHugh Dickins page = NULL; 156754af6042SHugh Dickins } 156854af6042SHugh Dickins 156975edd345SHugh Dickins if (sgp <= SGP_CACHE && 157009cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 157154af6042SHugh Dickins error = -EINVAL; 1572267a4c76SHugh Dickins goto unlock; 157354af6042SHugh Dickins } 157454af6042SHugh Dickins 157566d2f4d2SHugh Dickins if (page && sgp == SGP_WRITE) 157666d2f4d2SHugh Dickins mark_page_accessed(page); 157766d2f4d2SHugh Dickins 15781635f6a7SHugh Dickins /* fallocated page? */ 15791635f6a7SHugh Dickins if (page && !PageUptodate(page)) { 15801635f6a7SHugh Dickins if (sgp != SGP_READ) 15811635f6a7SHugh Dickins goto clear; 15821635f6a7SHugh Dickins unlock_page(page); 158309cbfeafSKirill A. Shutemov put_page(page); 15841635f6a7SHugh Dickins page = NULL; 15851635f6a7SHugh Dickins } 158654af6042SHugh Dickins if (page || (sgp == SGP_READ && !swap.val)) { 158754af6042SHugh Dickins *pagep = page; 158854af6042SHugh Dickins return 0; 158927ab7006SHugh Dickins } 159027ab7006SHugh Dickins 1591b409f9fcSHugh Dickins /* 159254af6042SHugh Dickins * Fast cache lookup did not find it: 159354af6042SHugh Dickins * bring it back from swap or allocate. 1594b409f9fcSHugh Dickins */ 159554af6042SHugh Dickins sbinfo = SHMEM_SB(inode->i_sb); 15969e18eb29SAndres Lagar-Cavilla charge_mm = fault_mm ? : current->mm; 159727ab7006SHugh Dickins 15981da177e4SLinus Torvalds if (swap.val) { 15991da177e4SLinus Torvalds /* Look it up and read it in.. */ 160027ab7006SHugh Dickins page = lookup_swap_cache(swap); 160127ab7006SHugh Dickins if (!page) { 16029e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 16039e18eb29SAndres Lagar-Cavilla if (fault_type) { 160468da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 16059e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 16069e18eb29SAndres Lagar-Cavilla mem_cgroup_count_vm_event(fault_mm, PGMAJFAULT); 16079e18eb29SAndres Lagar-Cavilla } 16089e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 160941ffe5d5SHugh Dickins page = shmem_swapin(swap, gfp, info, index); 161027ab7006SHugh Dickins if (!page) { 16111da177e4SLinus Torvalds error = -ENOMEM; 161254af6042SHugh Dickins goto failed; 1613285b2c4fSHugh Dickins } 16141da177e4SLinus Torvalds } 16151da177e4SLinus Torvalds 16161da177e4SLinus Torvalds /* We have to do this with page locked to prevent races */ 161754af6042SHugh Dickins lock_page(page); 16180142ef6cSHugh Dickins if (!PageSwapCache(page) || page_private(page) != swap.val || 1619d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1620bde05d1cSHugh Dickins error = -EEXIST; /* try again */ 1621d1899228SHugh Dickins goto unlock; 1622bde05d1cSHugh Dickins } 162327ab7006SHugh Dickins if (!PageUptodate(page)) { 16241da177e4SLinus Torvalds error = -EIO; 162554af6042SHugh Dickins goto failed; 162654af6042SHugh Dickins } 162754af6042SHugh Dickins wait_on_page_writeback(page); 162854af6042SHugh Dickins 1629bde05d1cSHugh Dickins if (shmem_should_replace_page(page, gfp)) { 1630bde05d1cSHugh Dickins error = shmem_replace_page(&page, gfp, info, index); 1631bde05d1cSHugh Dickins if (error) 163254af6042SHugh Dickins goto failed; 16331da177e4SLinus Torvalds } 16341da177e4SLinus Torvalds 16359e18eb29SAndres Lagar-Cavilla error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, 1636f627c2f5SKirill A. Shutemov false); 1637d1899228SHugh Dickins if (!error) { 163854af6042SHugh Dickins error = shmem_add_to_page_cache(page, mapping, index, 1639fed400a1SWang Sheng-Hui swp_to_radix_entry(swap)); 1640215c02bcSHugh Dickins /* 1641215c02bcSHugh Dickins * We already confirmed swap under page lock, and make 1642215c02bcSHugh Dickins * no memory allocation here, so usually no possibility 1643215c02bcSHugh Dickins * of error; but free_swap_and_cache() only trylocks a 1644215c02bcSHugh Dickins * page, so it is just possible that the entry has been 1645215c02bcSHugh Dickins * truncated or holepunched since swap was confirmed. 1646215c02bcSHugh Dickins * shmem_undo_range() will have done some of the 1647215c02bcSHugh Dickins * unaccounting, now delete_from_swap_cache() will do 164893aa7d95SVladimir Davydov * the rest. 1649215c02bcSHugh Dickins * Reset swap.val? No, leave it so "failed" goes back to 1650215c02bcSHugh Dickins * "repeat": reading a hole and writing should succeed. 1651215c02bcSHugh Dickins */ 165200501b53SJohannes Weiner if (error) { 1653f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 1654215c02bcSHugh Dickins delete_from_swap_cache(page); 1655d1899228SHugh Dickins } 165600501b53SJohannes Weiner } 165754af6042SHugh Dickins if (error) 165854af6042SHugh Dickins goto failed; 165954af6042SHugh Dickins 1660f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 166100501b53SJohannes Weiner 16624595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 166354af6042SHugh Dickins info->swapped--; 166454af6042SHugh Dickins shmem_recalc_inode(inode); 16654595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 166627ab7006SHugh Dickins 166766d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 166866d2f4d2SHugh Dickins mark_page_accessed(page); 166966d2f4d2SHugh Dickins 167027ab7006SHugh Dickins delete_from_swap_cache(page); 167127ab7006SHugh Dickins set_page_dirty(page); 167227ab7006SHugh Dickins swap_free(swap); 167327ab7006SHugh Dickins 167454af6042SHugh Dickins } else { 1675800d8c63SKirill A. Shutemov /* shmem_symlink() */ 1676800d8c63SKirill A. Shutemov if (mapping->a_ops != &shmem_aops) 1677800d8c63SKirill A. Shutemov goto alloc_nohuge; 1678657e3038SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 1679800d8c63SKirill A. Shutemov goto alloc_nohuge; 1680800d8c63SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 1681800d8c63SKirill A. Shutemov goto alloc_huge; 1682800d8c63SKirill A. Shutemov switch (sbinfo->huge) { 1683800d8c63SKirill A. Shutemov loff_t i_size; 1684800d8c63SKirill A. Shutemov pgoff_t off; 1685800d8c63SKirill A. Shutemov case SHMEM_HUGE_NEVER: 1686800d8c63SKirill A. Shutemov goto alloc_nohuge; 1687800d8c63SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 1688800d8c63SKirill A. Shutemov off = round_up(index, HPAGE_PMD_NR); 1689800d8c63SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 1690800d8c63SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 1691800d8c63SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 1692800d8c63SKirill A. Shutemov goto alloc_huge; 1693800d8c63SKirill A. Shutemov /* fallthrough */ 1694800d8c63SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 1695657e3038SKirill A. Shutemov if (sgp_huge == SGP_HUGE) 1696657e3038SKirill A. Shutemov goto alloc_huge; 1697657e3038SKirill A. Shutemov /* TODO: implement fadvise() hints */ 1698800d8c63SKirill A. Shutemov goto alloc_nohuge; 169959a16eadSHugh Dickins } 17001da177e4SLinus Torvalds 1701800d8c63SKirill A. Shutemov alloc_huge: 1702800d8c63SKirill A. Shutemov page = shmem_alloc_and_acct_page(gfp, info, sbinfo, 1703800d8c63SKirill A. Shutemov index, true); 1704800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1705800d8c63SKirill A. Shutemov alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, 1706800d8c63SKirill A. Shutemov index, false); 170754af6042SHugh Dickins } 1708800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1709779750d2SKirill A. Shutemov int retry = 5; 1710800d8c63SKirill A. Shutemov error = PTR_ERR(page); 1711800d8c63SKirill A. Shutemov page = NULL; 1712779750d2SKirill A. Shutemov if (error != -ENOSPC) 1713779750d2SKirill A. Shutemov goto failed; 1714779750d2SKirill A. Shutemov /* 1715779750d2SKirill A. Shutemov * Try to reclaim some spece by splitting a huge page 1716779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 1717779750d2SKirill A. Shutemov */ 1718779750d2SKirill A. Shutemov while (retry--) { 1719779750d2SKirill A. Shutemov int ret; 1720779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1721779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 1722779750d2SKirill A. Shutemov break; 1723779750d2SKirill A. Shutemov if (ret) 1724779750d2SKirill A. Shutemov goto alloc_nohuge; 1725779750d2SKirill A. Shutemov } 1726800d8c63SKirill A. Shutemov goto failed; 1727800d8c63SKirill A. Shutemov } 1728800d8c63SKirill A. Shutemov 1729800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 1730800d8c63SKirill A. Shutemov hindex = round_down(index, HPAGE_PMD_NR); 1731800d8c63SKirill A. Shutemov else 1732800d8c63SKirill A. Shutemov hindex = index; 1733800d8c63SKirill A. Shutemov 173466d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1735eb39d618SHugh Dickins __SetPageReferenced(page); 173666d2f4d2SHugh Dickins 17379e18eb29SAndres Lagar-Cavilla error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, 1738800d8c63SKirill A. Shutemov PageTransHuge(page)); 173954af6042SHugh Dickins if (error) 1740800d8c63SKirill A. Shutemov goto unacct; 1741800d8c63SKirill A. Shutemov error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, 1742800d8c63SKirill A. Shutemov compound_order(page)); 1743b065b432SHugh Dickins if (!error) { 1744800d8c63SKirill A. Shutemov error = shmem_add_to_page_cache(page, mapping, hindex, 1745fed400a1SWang Sheng-Hui NULL); 1746b065b432SHugh Dickins radix_tree_preload_end(); 1747b065b432SHugh Dickins } 1748b065b432SHugh Dickins if (error) { 1749800d8c63SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, 1750800d8c63SKirill A. Shutemov PageTransHuge(page)); 1751800d8c63SKirill A. Shutemov goto unacct; 1752b065b432SHugh Dickins } 1753800d8c63SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, 1754800d8c63SKirill A. Shutemov PageTransHuge(page)); 175554af6042SHugh Dickins lru_cache_add_anon(page); 175654af6042SHugh Dickins 17574595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1758800d8c63SKirill A. Shutemov info->alloced += 1 << compound_order(page); 1759800d8c63SKirill A. Shutemov inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 176054af6042SHugh Dickins shmem_recalc_inode(inode); 17614595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 17621635f6a7SHugh Dickins alloced = true; 176354af6042SHugh Dickins 1764779750d2SKirill A. Shutemov if (PageTransHuge(page) && 1765779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1766779750d2SKirill A. Shutemov hindex + HPAGE_PMD_NR - 1) { 1767779750d2SKirill A. Shutemov /* 1768779750d2SKirill A. Shutemov * Part of the huge page is beyond i_size: subject 1769779750d2SKirill A. Shutemov * to shrink under memory pressure. 1770779750d2SKirill A. Shutemov */ 1771779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1772779750d2SKirill A. Shutemov if (list_empty(&info->shrinklist)) { 1773779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1774779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1775779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1776779750d2SKirill A. Shutemov } 1777779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1778779750d2SKirill A. Shutemov } 1779779750d2SKirill A. Shutemov 1780ec9516fbSHugh Dickins /* 17811635f6a7SHugh Dickins * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 17821635f6a7SHugh Dickins */ 17831635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 17841635f6a7SHugh Dickins sgp = SGP_WRITE; 17851635f6a7SHugh Dickins clear: 17861635f6a7SHugh Dickins /* 17871635f6a7SHugh Dickins * Let SGP_WRITE caller clear ends if write does not fill page; 17881635f6a7SHugh Dickins * but SGP_FALLOC on a page fallocated earlier must initialize 17891635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 1790ec9516fbSHugh Dickins */ 1791800d8c63SKirill A. Shutemov if (sgp != SGP_WRITE && !PageUptodate(page)) { 1792800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 1793800d8c63SKirill A. Shutemov int i; 1794800d8c63SKirill A. Shutemov 1795800d8c63SKirill A. Shutemov for (i = 0; i < (1 << compound_order(head)); i++) { 1796800d8c63SKirill A. Shutemov clear_highpage(head + i); 1797800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 1798800d8c63SKirill A. Shutemov } 1799800d8c63SKirill A. Shutemov SetPageUptodate(head); 1800ec9516fbSHugh Dickins } 18011da177e4SLinus Torvalds } 1802bde05d1cSHugh Dickins 180354af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 180475edd345SHugh Dickins if (sgp <= SGP_CACHE && 180509cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1806267a4c76SHugh Dickins if (alloced) { 1807267a4c76SHugh Dickins ClearPageDirty(page); 1808267a4c76SHugh Dickins delete_from_page_cache(page); 18094595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1810267a4c76SHugh Dickins shmem_recalc_inode(inode); 18114595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1812267a4c76SHugh Dickins } 181354af6042SHugh Dickins error = -EINVAL; 1814267a4c76SHugh Dickins goto unlock; 1815ff36b801SShaohua Li } 1816800d8c63SKirill A. Shutemov *pagep = page + index - hindex; 181754af6042SHugh Dickins return 0; 1818d00806b1SNick Piggin 1819d0217ac0SNick Piggin /* 182054af6042SHugh Dickins * Error recovery. 18211da177e4SLinus Torvalds */ 182254af6042SHugh Dickins unacct: 1823800d8c63SKirill A. Shutemov if (sbinfo->max_blocks) 1824800d8c63SKirill A. Shutemov percpu_counter_sub(&sbinfo->used_blocks, 1825800d8c63SKirill A. Shutemov 1 << compound_order(page)); 1826800d8c63SKirill A. Shutemov shmem_unacct_blocks(info->flags, 1 << compound_order(page)); 1827800d8c63SKirill A. Shutemov 1828800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 1829800d8c63SKirill A. Shutemov unlock_page(page); 1830800d8c63SKirill A. Shutemov put_page(page); 1831800d8c63SKirill A. Shutemov goto alloc_nohuge; 1832800d8c63SKirill A. Shutemov } 183354af6042SHugh Dickins failed: 1834267a4c76SHugh Dickins if (swap.val && !shmem_confirm_swap(mapping, index, swap)) 183554af6042SHugh Dickins error = -EEXIST; 1836d1899228SHugh Dickins unlock: 183727ab7006SHugh Dickins if (page) { 183854af6042SHugh Dickins unlock_page(page); 183909cbfeafSKirill A. Shutemov put_page(page); 184054af6042SHugh Dickins } 184154af6042SHugh Dickins if (error == -ENOSPC && !once++) { 18424595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 184354af6042SHugh Dickins shmem_recalc_inode(inode); 18444595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 18451da177e4SLinus Torvalds goto repeat; 1846d8dc74f2SAdrian Bunk } 1847d1899228SHugh Dickins if (error == -EEXIST) /* from above or from radix_tree_insert */ 184854af6042SHugh Dickins goto repeat; 184954af6042SHugh Dickins return error; 18501da177e4SLinus Torvalds } 18511da177e4SLinus Torvalds 185210d20bd2SLinus Torvalds /* 185310d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 185410d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 185510d20bd2SLinus Torvalds * target. 185610d20bd2SLinus Torvalds */ 185710d20bd2SLinus Torvalds static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 185810d20bd2SLinus Torvalds { 185910d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 186010d20bd2SLinus Torvalds list_del_init(&wait->task_list); 186110d20bd2SLinus Torvalds return ret; 186210d20bd2SLinus Torvalds } 186310d20bd2SLinus Torvalds 18641da177e4SLinus Torvalds static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 18651da177e4SLinus Torvalds { 1866496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 18679e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 1868657e3038SKirill A. Shutemov enum sgp_type sgp; 18691da177e4SLinus Torvalds int error; 187068da9f05SHugh Dickins int ret = VM_FAULT_LOCKED; 18711da177e4SLinus Torvalds 1872f00cdc6dSHugh Dickins /* 1873f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 1874f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 1875f00cdc6dSHugh Dickins * locks writers out with its hold on i_mutex. So refrain from 18768e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 18778e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 18788e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 18798e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 18808e205f77SHugh Dickins * 18818e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 18828e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 18838e205f77SHugh Dickins * we just need to make racing faults a rare case. 18848e205f77SHugh Dickins * 18858e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 18868e205f77SHugh Dickins * standard mutex or completion: but we cannot take i_mutex in fault, 18878e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 1888f00cdc6dSHugh Dickins */ 1889f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 1890f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 1891f00cdc6dSHugh Dickins 1892f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 1893f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 18948e205f77SHugh Dickins if (shmem_falloc && 18958e205f77SHugh Dickins shmem_falloc->waitq && 18968e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 18978e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 18988e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 189910d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 19008e205f77SHugh Dickins 19018e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 1902f00cdc6dSHugh Dickins if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1903f00cdc6dSHugh Dickins !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 19048e205f77SHugh Dickins /* It's polite to up mmap_sem if we can */ 1905f00cdc6dSHugh Dickins up_read(&vma->vm_mm->mmap_sem); 19068e205f77SHugh Dickins ret = VM_FAULT_RETRY; 1907f00cdc6dSHugh Dickins } 19088e205f77SHugh Dickins 19098e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 19108e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 19118e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 19128e205f77SHugh Dickins spin_unlock(&inode->i_lock); 19138e205f77SHugh Dickins schedule(); 19148e205f77SHugh Dickins 19158e205f77SHugh Dickins /* 19168e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 19178e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 19188e205f77SHugh Dickins * is usually invalid by the time we reach here, but 19198e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 19208e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 19218e205f77SHugh Dickins */ 19228e205f77SHugh Dickins spin_lock(&inode->i_lock); 19238e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 19248e205f77SHugh Dickins spin_unlock(&inode->i_lock); 19258e205f77SHugh Dickins return ret; 1926f00cdc6dSHugh Dickins } 19278e205f77SHugh Dickins spin_unlock(&inode->i_lock); 1928f00cdc6dSHugh Dickins } 1929f00cdc6dSHugh Dickins 1930657e3038SKirill A. Shutemov sgp = SGP_CACHE; 1931657e3038SKirill A. Shutemov if (vma->vm_flags & VM_HUGEPAGE) 1932657e3038SKirill A. Shutemov sgp = SGP_HUGE; 1933657e3038SKirill A. Shutemov else if (vma->vm_flags & VM_NOHUGEPAGE) 1934657e3038SKirill A. Shutemov sgp = SGP_NOHUGE; 1935657e3038SKirill A. Shutemov 1936657e3038SKirill A. Shutemov error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, 19379e18eb29SAndres Lagar-Cavilla gfp, vma->vm_mm, &ret); 19381da177e4SLinus Torvalds if (error) 19391da177e4SLinus Torvalds return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 194068da9f05SHugh Dickins return ret; 19411da177e4SLinus Torvalds } 19421da177e4SLinus Torvalds 1943c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 1944c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 1945c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 1946c01d5b30SHugh Dickins { 1947c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 1948c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 1949c01d5b30SHugh Dickins unsigned long addr; 1950c01d5b30SHugh Dickins unsigned long offset; 1951c01d5b30SHugh Dickins unsigned long inflated_len; 1952c01d5b30SHugh Dickins unsigned long inflated_addr; 1953c01d5b30SHugh Dickins unsigned long inflated_offset; 1954c01d5b30SHugh Dickins 1955c01d5b30SHugh Dickins if (len > TASK_SIZE) 1956c01d5b30SHugh Dickins return -ENOMEM; 1957c01d5b30SHugh Dickins 1958c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 1959c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 1960c01d5b30SHugh Dickins 1961e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1962c01d5b30SHugh Dickins return addr; 1963c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 1964c01d5b30SHugh Dickins return addr; 1965c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 1966c01d5b30SHugh Dickins return addr; 1967c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 1968c01d5b30SHugh Dickins return addr; 1969c01d5b30SHugh Dickins 1970c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 1971c01d5b30SHugh Dickins return addr; 1972c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 1973c01d5b30SHugh Dickins return addr; 1974c01d5b30SHugh Dickins if (flags & MAP_FIXED) 1975c01d5b30SHugh Dickins return addr; 1976c01d5b30SHugh Dickins /* 1977c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 1978c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 1979c01d5b30SHugh Dickins * But if caller specified an address hint, respect that as before. 1980c01d5b30SHugh Dickins */ 1981c01d5b30SHugh Dickins if (uaddr) 1982c01d5b30SHugh Dickins return addr; 1983c01d5b30SHugh Dickins 1984c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 1985c01d5b30SHugh Dickins struct super_block *sb; 1986c01d5b30SHugh Dickins 1987c01d5b30SHugh Dickins if (file) { 1988c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 1989c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 1990c01d5b30SHugh Dickins } else { 1991c01d5b30SHugh Dickins /* 1992c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 1993c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 1994c01d5b30SHugh Dickins */ 1995c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 1996c01d5b30SHugh Dickins return addr; 1997c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 1998c01d5b30SHugh Dickins } 19993089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2000c01d5b30SHugh Dickins return addr; 2001c01d5b30SHugh Dickins } 2002c01d5b30SHugh Dickins 2003c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2004c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2005c01d5b30SHugh Dickins return addr; 2006c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2007c01d5b30SHugh Dickins return addr; 2008c01d5b30SHugh Dickins 2009c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2010c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2011c01d5b30SHugh Dickins return addr; 2012c01d5b30SHugh Dickins if (inflated_len < len) 2013c01d5b30SHugh Dickins return addr; 2014c01d5b30SHugh Dickins 2015c01d5b30SHugh Dickins inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); 2016c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2017c01d5b30SHugh Dickins return addr; 2018c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2019c01d5b30SHugh Dickins return addr; 2020c01d5b30SHugh Dickins 2021c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2022c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2023c01d5b30SHugh Dickins if (inflated_offset > offset) 2024c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2025c01d5b30SHugh Dickins 2026c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2027c01d5b30SHugh Dickins return addr; 2028c01d5b30SHugh Dickins return inflated_addr; 2029c01d5b30SHugh Dickins } 2030c01d5b30SHugh Dickins 20311da177e4SLinus Torvalds #ifdef CONFIG_NUMA 203241ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 20331da177e4SLinus Torvalds { 2034496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 203541ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 20361da177e4SLinus Torvalds } 20371da177e4SLinus Torvalds 2038d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2039d8dc74f2SAdrian Bunk unsigned long addr) 20401da177e4SLinus Torvalds { 2041496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 204241ffe5d5SHugh Dickins pgoff_t index; 20431da177e4SLinus Torvalds 204441ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 204541ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 20461da177e4SLinus Torvalds } 20471da177e4SLinus Torvalds #endif 20481da177e4SLinus Torvalds 20491da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user) 20501da177e4SLinus Torvalds { 2051496ad9aaSAl Viro struct inode *inode = file_inode(file); 20521da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 20531da177e4SLinus Torvalds int retval = -ENOMEM; 20541da177e4SLinus Torvalds 20554595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 20561da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 20571da177e4SLinus Torvalds if (!user_shm_lock(inode->i_size, user)) 20581da177e4SLinus Torvalds goto out_nomem; 20591da177e4SLinus Torvalds info->flags |= VM_LOCKED; 206089e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 20611da177e4SLinus Torvalds } 20621da177e4SLinus Torvalds if (!lock && (info->flags & VM_LOCKED) && user) { 20631da177e4SLinus Torvalds user_shm_unlock(inode->i_size, user); 20641da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 206589e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 20661da177e4SLinus Torvalds } 20671da177e4SLinus Torvalds retval = 0; 206889e004eaSLee Schermerhorn 20691da177e4SLinus Torvalds out_nomem: 20704595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 20711da177e4SLinus Torvalds return retval; 20721da177e4SLinus Torvalds } 20731da177e4SLinus Torvalds 20749b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 20751da177e4SLinus Torvalds { 20761da177e4SLinus Torvalds file_accessed(file); 20771da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2078e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 2079f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2080f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 2081f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 2082f3f0e1d2SKirill A. Shutemov } 20831da177e4SLinus Torvalds return 0; 20841da177e4SLinus Torvalds } 20851da177e4SLinus Torvalds 2086454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 208709208d15SAl Viro umode_t mode, dev_t dev, unsigned long flags) 20881da177e4SLinus Torvalds { 20891da177e4SLinus Torvalds struct inode *inode; 20901da177e4SLinus Torvalds struct shmem_inode_info *info; 20911da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 20921da177e4SLinus Torvalds 20935b04c689SPavel Emelyanov if (shmem_reserve_inode(sb)) 20941da177e4SLinus Torvalds return NULL; 20951da177e4SLinus Torvalds 20961da177e4SLinus Torvalds inode = new_inode(sb); 20971da177e4SLinus Torvalds if (inode) { 209885fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 2099454abafeSDmitry Monakhov inode_init_owner(inode, dir, mode); 21001da177e4SLinus Torvalds inode->i_blocks = 0; 2101078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 210291828a40SDavid M. Grimes inode->i_generation = get_seconds(); 21031da177e4SLinus Torvalds info = SHMEM_I(inode); 21041da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 21051da177e4SLinus Torvalds spin_lock_init(&info->lock); 210640e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 21070b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2108779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 21091da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 211038f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 211172c04902SAl Viro cache_no_acl(inode); 21121da177e4SLinus Torvalds 21131da177e4SLinus Torvalds switch (mode & S_IFMT) { 21141da177e4SLinus Torvalds default: 211539f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 21161da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 21171da177e4SLinus Torvalds break; 21181da177e4SLinus Torvalds case S_IFREG: 211914fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 21201da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 21211da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 212271fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 212371fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 21241da177e4SLinus Torvalds break; 21251da177e4SLinus Torvalds case S_IFDIR: 2126d8c76e6fSDave Hansen inc_nlink(inode); 21271da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 21281da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 21291da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 21301da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 21311da177e4SLinus Torvalds break; 21321da177e4SLinus Torvalds case S_IFLNK: 21331da177e4SLinus Torvalds /* 21341da177e4SLinus Torvalds * Must not load anything in the rbtree, 21351da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 21361da177e4SLinus Torvalds */ 213771fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 21381da177e4SLinus Torvalds break; 21391da177e4SLinus Torvalds } 21405b04c689SPavel Emelyanov } else 21415b04c689SPavel Emelyanov shmem_free_inode(sb); 21421da177e4SLinus Torvalds return inode; 21431da177e4SLinus Torvalds } 21441da177e4SLinus Torvalds 21450cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping) 21460cd6144aSJohannes Weiner { 2147f0774d88SSasha Levin if (!mapping->host) 2148f0774d88SSasha Levin return false; 2149f0774d88SSasha Levin 215097b713baSChristoph Hellwig return mapping->host->i_sb->s_op == &shmem_ops; 21510cd6144aSJohannes Weiner } 21520cd6144aSJohannes Weiner 21531da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 215492e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 215569f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 21561da177e4SLinus Torvalds 21576d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR 21586d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 21596d9d88d0SJarkko Sakkinen #else 21606d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL 21616d9d88d0SJarkko Sakkinen #endif 21626d9d88d0SJarkko Sakkinen 21631da177e4SLinus Torvalds static int 2164800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 2165800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 2166800d15a5SNick Piggin struct page **pagep, void **fsdata) 21671da177e4SLinus Torvalds { 2168800d15a5SNick Piggin struct inode *inode = mapping->host; 216940e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 217009cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 217140e041a2SDavid Herrmann 217240e041a2SDavid Herrmann /* i_mutex is held by caller */ 217340e041a2SDavid Herrmann if (unlikely(info->seals)) { 217440e041a2SDavid Herrmann if (info->seals & F_SEAL_WRITE) 217540e041a2SDavid Herrmann return -EPERM; 217640e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 217740e041a2SDavid Herrmann return -EPERM; 217840e041a2SDavid Herrmann } 217940e041a2SDavid Herrmann 21809e18eb29SAndres Lagar-Cavilla return shmem_getpage(inode, index, pagep, SGP_WRITE); 2181800d15a5SNick Piggin } 2182800d15a5SNick Piggin 2183800d15a5SNick Piggin static int 2184800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2185800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2186800d15a5SNick Piggin struct page *page, void *fsdata) 2187800d15a5SNick Piggin { 2188800d15a5SNick Piggin struct inode *inode = mapping->host; 2189800d15a5SNick Piggin 2190800d15a5SNick Piggin if (pos + copied > inode->i_size) 2191800d15a5SNick Piggin i_size_write(inode, pos + copied); 2192800d15a5SNick Piggin 2193ec9516fbSHugh Dickins if (!PageUptodate(page)) { 2194800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 2195800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 2196800d8c63SKirill A. Shutemov int i; 2197800d8c63SKirill A. Shutemov 2198800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2199800d8c63SKirill A. Shutemov if (head + i == page) 2200800d8c63SKirill A. Shutemov continue; 2201800d8c63SKirill A. Shutemov clear_highpage(head + i); 2202800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 2203800d8c63SKirill A. Shutemov } 2204800d8c63SKirill A. Shutemov } 220509cbfeafSKirill A. Shutemov if (copied < PAGE_SIZE) { 220609cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2207ec9516fbSHugh Dickins zero_user_segments(page, 0, from, 220809cbfeafSKirill A. Shutemov from + copied, PAGE_SIZE); 2209ec9516fbSHugh Dickins } 2210800d8c63SKirill A. Shutemov SetPageUptodate(head); 2211ec9516fbSHugh Dickins } 2212d3602444SHugh Dickins set_page_dirty(page); 22136746aff7SWu Fengguang unlock_page(page); 221409cbfeafSKirill A. Shutemov put_page(page); 2215d3602444SHugh Dickins 2216800d15a5SNick Piggin return copied; 22171da177e4SLinus Torvalds } 22181da177e4SLinus Torvalds 22192ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 22201da177e4SLinus Torvalds { 22216e58e79dSAl Viro struct file *file = iocb->ki_filp; 22226e58e79dSAl Viro struct inode *inode = file_inode(file); 22231da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 222441ffe5d5SHugh Dickins pgoff_t index; 222541ffe5d5SHugh Dickins unsigned long offset; 2226a0ee5ec5SHugh Dickins enum sgp_type sgp = SGP_READ; 2227f7c1d074SGeert Uytterhoeven int error = 0; 2228cb66a7a1SAl Viro ssize_t retval = 0; 22296e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2230a0ee5ec5SHugh Dickins 2231a0ee5ec5SHugh Dickins /* 2232a0ee5ec5SHugh Dickins * Might this read be for a stacking filesystem? Then when reading 2233a0ee5ec5SHugh Dickins * holes of a sparse file, we actually need to allocate those pages, 2234a0ee5ec5SHugh Dickins * and even mark them dirty, so it cannot exceed the max_blocks limit. 2235a0ee5ec5SHugh Dickins */ 2236777eda2cSAl Viro if (!iter_is_iovec(to)) 223775edd345SHugh Dickins sgp = SGP_CACHE; 22381da177e4SLinus Torvalds 223909cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 224009cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 22411da177e4SLinus Torvalds 22421da177e4SLinus Torvalds for (;;) { 22431da177e4SLinus Torvalds struct page *page = NULL; 224441ffe5d5SHugh Dickins pgoff_t end_index; 224541ffe5d5SHugh Dickins unsigned long nr, ret; 22461da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 22471da177e4SLinus Torvalds 224809cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 22491da177e4SLinus Torvalds if (index > end_index) 22501da177e4SLinus Torvalds break; 22511da177e4SLinus Torvalds if (index == end_index) { 225209cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 22531da177e4SLinus Torvalds if (nr <= offset) 22541da177e4SLinus Torvalds break; 22551da177e4SLinus Torvalds } 22561da177e4SLinus Torvalds 22579e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, sgp); 22586e58e79dSAl Viro if (error) { 22596e58e79dSAl Viro if (error == -EINVAL) 22606e58e79dSAl Viro error = 0; 22611da177e4SLinus Torvalds break; 22621da177e4SLinus Torvalds } 226375edd345SHugh Dickins if (page) { 226475edd345SHugh Dickins if (sgp == SGP_CACHE) 226575edd345SHugh Dickins set_page_dirty(page); 2266d3602444SHugh Dickins unlock_page(page); 226775edd345SHugh Dickins } 22681da177e4SLinus Torvalds 22691da177e4SLinus Torvalds /* 22701da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 22711b1dcc1bSJes Sorensen * are called without i_mutex protection against truncate 22721da177e4SLinus Torvalds */ 227309cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 22741da177e4SLinus Torvalds i_size = i_size_read(inode); 227509cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 22761da177e4SLinus Torvalds if (index == end_index) { 227709cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 22781da177e4SLinus Torvalds if (nr <= offset) { 22791da177e4SLinus Torvalds if (page) 228009cbfeafSKirill A. Shutemov put_page(page); 22811da177e4SLinus Torvalds break; 22821da177e4SLinus Torvalds } 22831da177e4SLinus Torvalds } 22841da177e4SLinus Torvalds nr -= offset; 22851da177e4SLinus Torvalds 22861da177e4SLinus Torvalds if (page) { 22871da177e4SLinus Torvalds /* 22881da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 22891da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 22901da177e4SLinus Torvalds * before reading the page on the kernel side. 22911da177e4SLinus Torvalds */ 22921da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 22931da177e4SLinus Torvalds flush_dcache_page(page); 22941da177e4SLinus Torvalds /* 22951da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 22961da177e4SLinus Torvalds */ 22971da177e4SLinus Torvalds if (!offset) 22981da177e4SLinus Torvalds mark_page_accessed(page); 2299b5810039SNick Piggin } else { 23001da177e4SLinus Torvalds page = ZERO_PAGE(0); 230109cbfeafSKirill A. Shutemov get_page(page); 2302b5810039SNick Piggin } 23031da177e4SLinus Torvalds 23041da177e4SLinus Torvalds /* 23051da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 23061da177e4SLinus Torvalds * now we can copy it to user space... 23071da177e4SLinus Torvalds */ 23082ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 23096e58e79dSAl Viro retval += ret; 23101da177e4SLinus Torvalds offset += ret; 231109cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 231209cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 23131da177e4SLinus Torvalds 231409cbfeafSKirill A. Shutemov put_page(page); 23152ba5bbedSAl Viro if (!iov_iter_count(to)) 23161da177e4SLinus Torvalds break; 23176e58e79dSAl Viro if (ret < nr) { 23186e58e79dSAl Viro error = -EFAULT; 23196e58e79dSAl Viro break; 23206e58e79dSAl Viro } 23211da177e4SLinus Torvalds cond_resched(); 23221da177e4SLinus Torvalds } 23231da177e4SLinus Torvalds 232409cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 23256e58e79dSAl Viro file_accessed(file); 23266e58e79dSAl Viro return retval ? retval : error; 23271da177e4SLinus Torvalds } 23281da177e4SLinus Torvalds 2329220f2ac9SHugh Dickins /* 2330220f2ac9SHugh Dickins * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 2331220f2ac9SHugh Dickins */ 2332220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 2333965c8e59SAndrew Morton pgoff_t index, pgoff_t end, int whence) 2334220f2ac9SHugh Dickins { 2335220f2ac9SHugh Dickins struct page *page; 2336220f2ac9SHugh Dickins struct pagevec pvec; 2337220f2ac9SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 2338220f2ac9SHugh Dickins bool done = false; 2339220f2ac9SHugh Dickins int i; 2340220f2ac9SHugh Dickins 2341220f2ac9SHugh Dickins pagevec_init(&pvec, 0); 2342220f2ac9SHugh Dickins pvec.nr = 1; /* start small: we may be there already */ 2343220f2ac9SHugh Dickins while (!done) { 23440cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 2345220f2ac9SHugh Dickins pvec.nr, pvec.pages, indices); 2346220f2ac9SHugh Dickins if (!pvec.nr) { 2347965c8e59SAndrew Morton if (whence == SEEK_DATA) 2348220f2ac9SHugh Dickins index = end; 2349220f2ac9SHugh Dickins break; 2350220f2ac9SHugh Dickins } 2351220f2ac9SHugh Dickins for (i = 0; i < pvec.nr; i++, index++) { 2352220f2ac9SHugh Dickins if (index < indices[i]) { 2353965c8e59SAndrew Morton if (whence == SEEK_HOLE) { 2354220f2ac9SHugh Dickins done = true; 2355220f2ac9SHugh Dickins break; 2356220f2ac9SHugh Dickins } 2357220f2ac9SHugh Dickins index = indices[i]; 2358220f2ac9SHugh Dickins } 2359220f2ac9SHugh Dickins page = pvec.pages[i]; 2360220f2ac9SHugh Dickins if (page && !radix_tree_exceptional_entry(page)) { 2361220f2ac9SHugh Dickins if (!PageUptodate(page)) 2362220f2ac9SHugh Dickins page = NULL; 2363220f2ac9SHugh Dickins } 2364220f2ac9SHugh Dickins if (index >= end || 2365965c8e59SAndrew Morton (page && whence == SEEK_DATA) || 2366965c8e59SAndrew Morton (!page && whence == SEEK_HOLE)) { 2367220f2ac9SHugh Dickins done = true; 2368220f2ac9SHugh Dickins break; 2369220f2ac9SHugh Dickins } 2370220f2ac9SHugh Dickins } 23710cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 2372220f2ac9SHugh Dickins pagevec_release(&pvec); 2373220f2ac9SHugh Dickins pvec.nr = PAGEVEC_SIZE; 2374220f2ac9SHugh Dickins cond_resched(); 2375220f2ac9SHugh Dickins } 2376220f2ac9SHugh Dickins return index; 2377220f2ac9SHugh Dickins } 2378220f2ac9SHugh Dickins 2379965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2380220f2ac9SHugh Dickins { 2381220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 2382220f2ac9SHugh Dickins struct inode *inode = mapping->host; 2383220f2ac9SHugh Dickins pgoff_t start, end; 2384220f2ac9SHugh Dickins loff_t new_offset; 2385220f2ac9SHugh Dickins 2386965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 2387965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 2388220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 23895955102cSAl Viro inode_lock(inode); 2390220f2ac9SHugh Dickins /* We're holding i_mutex so we can access i_size directly */ 2391220f2ac9SHugh Dickins 2392220f2ac9SHugh Dickins if (offset < 0) 2393220f2ac9SHugh Dickins offset = -EINVAL; 2394220f2ac9SHugh Dickins else if (offset >= inode->i_size) 2395220f2ac9SHugh Dickins offset = -ENXIO; 2396220f2ac9SHugh Dickins else { 239709cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 239809cbfeafSKirill A. Shutemov end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2399965c8e59SAndrew Morton new_offset = shmem_seek_hole_data(mapping, start, end, whence); 240009cbfeafSKirill A. Shutemov new_offset <<= PAGE_SHIFT; 2401220f2ac9SHugh Dickins if (new_offset > offset) { 2402220f2ac9SHugh Dickins if (new_offset < inode->i_size) 2403220f2ac9SHugh Dickins offset = new_offset; 2404965c8e59SAndrew Morton else if (whence == SEEK_DATA) 2405220f2ac9SHugh Dickins offset = -ENXIO; 2406220f2ac9SHugh Dickins else 2407220f2ac9SHugh Dickins offset = inode->i_size; 2408220f2ac9SHugh Dickins } 2409220f2ac9SHugh Dickins } 2410220f2ac9SHugh Dickins 2411387aae6fSHugh Dickins if (offset >= 0) 241246a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 24135955102cSAl Viro inode_unlock(inode); 2414220f2ac9SHugh Dickins return offset; 2415220f2ac9SHugh Dickins } 2416220f2ac9SHugh Dickins 241705f65b5cSDavid Herrmann /* 241805f65b5cSDavid Herrmann * We need a tag: a new tag would expand every radix_tree_node by 8 bytes, 241905f65b5cSDavid Herrmann * so reuse a tag which we firmly believe is never set or cleared on shmem. 242005f65b5cSDavid Herrmann */ 242105f65b5cSDavid Herrmann #define SHMEM_TAG_PINNED PAGECACHE_TAG_TOWRITE 242205f65b5cSDavid Herrmann #define LAST_SCAN 4 /* about 150ms max */ 242305f65b5cSDavid Herrmann 242405f65b5cSDavid Herrmann static void shmem_tag_pins(struct address_space *mapping) 242505f65b5cSDavid Herrmann { 242605f65b5cSDavid Herrmann struct radix_tree_iter iter; 242705f65b5cSDavid Herrmann void **slot; 242805f65b5cSDavid Herrmann pgoff_t start; 242905f65b5cSDavid Herrmann struct page *page; 243005f65b5cSDavid Herrmann 243105f65b5cSDavid Herrmann lru_add_drain(); 243205f65b5cSDavid Herrmann start = 0; 243305f65b5cSDavid Herrmann rcu_read_lock(); 243405f65b5cSDavid Herrmann 243505f65b5cSDavid Herrmann radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 243605f65b5cSDavid Herrmann page = radix_tree_deref_slot(slot); 243705f65b5cSDavid Herrmann if (!page || radix_tree_exception(page)) { 24382cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 24392cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 24402cf938aaSMatthew Wilcox continue; 24412cf938aaSMatthew Wilcox } 244205f65b5cSDavid Herrmann } else if (page_count(page) - page_mapcount(page) > 1) { 244305f65b5cSDavid Herrmann spin_lock_irq(&mapping->tree_lock); 244405f65b5cSDavid Herrmann radix_tree_tag_set(&mapping->page_tree, iter.index, 244505f65b5cSDavid Herrmann SHMEM_TAG_PINNED); 244605f65b5cSDavid Herrmann spin_unlock_irq(&mapping->tree_lock); 244705f65b5cSDavid Herrmann } 244805f65b5cSDavid Herrmann 244905f65b5cSDavid Herrmann if (need_resched()) { 245005f65b5cSDavid Herrmann cond_resched_rcu(); 24517165092fSMatthew Wilcox slot = radix_tree_iter_next(&iter); 245205f65b5cSDavid Herrmann } 245305f65b5cSDavid Herrmann } 245405f65b5cSDavid Herrmann rcu_read_unlock(); 245505f65b5cSDavid Herrmann } 245605f65b5cSDavid Herrmann 245705f65b5cSDavid Herrmann /* 245805f65b5cSDavid Herrmann * Setting SEAL_WRITE requires us to verify there's no pending writer. However, 245905f65b5cSDavid Herrmann * via get_user_pages(), drivers might have some pending I/O without any active 246005f65b5cSDavid Herrmann * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages 246105f65b5cSDavid Herrmann * and see whether it has an elevated ref-count. If so, we tag them and wait for 246205f65b5cSDavid Herrmann * them to be dropped. 246305f65b5cSDavid Herrmann * The caller must guarantee that no new user will acquire writable references 246405f65b5cSDavid Herrmann * to those pages to avoid races. 246505f65b5cSDavid Herrmann */ 246640e041a2SDavid Herrmann static int shmem_wait_for_pins(struct address_space *mapping) 246740e041a2SDavid Herrmann { 246805f65b5cSDavid Herrmann struct radix_tree_iter iter; 246905f65b5cSDavid Herrmann void **slot; 247005f65b5cSDavid Herrmann pgoff_t start; 247105f65b5cSDavid Herrmann struct page *page; 247205f65b5cSDavid Herrmann int error, scan; 247305f65b5cSDavid Herrmann 247405f65b5cSDavid Herrmann shmem_tag_pins(mapping); 247505f65b5cSDavid Herrmann 247605f65b5cSDavid Herrmann error = 0; 247705f65b5cSDavid Herrmann for (scan = 0; scan <= LAST_SCAN; scan++) { 247805f65b5cSDavid Herrmann if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED)) 247905f65b5cSDavid Herrmann break; 248005f65b5cSDavid Herrmann 248105f65b5cSDavid Herrmann if (!scan) 248205f65b5cSDavid Herrmann lru_add_drain_all(); 248305f65b5cSDavid Herrmann else if (schedule_timeout_killable((HZ << scan) / 200)) 248405f65b5cSDavid Herrmann scan = LAST_SCAN; 248505f65b5cSDavid Herrmann 248605f65b5cSDavid Herrmann start = 0; 248705f65b5cSDavid Herrmann rcu_read_lock(); 248805f65b5cSDavid Herrmann radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 248905f65b5cSDavid Herrmann start, SHMEM_TAG_PINNED) { 249005f65b5cSDavid Herrmann 249105f65b5cSDavid Herrmann page = radix_tree_deref_slot(slot); 249205f65b5cSDavid Herrmann if (radix_tree_exception(page)) { 24932cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 24942cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 24952cf938aaSMatthew Wilcox continue; 24962cf938aaSMatthew Wilcox } 249705f65b5cSDavid Herrmann 249805f65b5cSDavid Herrmann page = NULL; 249905f65b5cSDavid Herrmann } 250005f65b5cSDavid Herrmann 250105f65b5cSDavid Herrmann if (page && 250205f65b5cSDavid Herrmann page_count(page) - page_mapcount(page) != 1) { 250305f65b5cSDavid Herrmann if (scan < LAST_SCAN) 250405f65b5cSDavid Herrmann goto continue_resched; 250505f65b5cSDavid Herrmann 250605f65b5cSDavid Herrmann /* 250705f65b5cSDavid Herrmann * On the last scan, we clean up all those tags 250805f65b5cSDavid Herrmann * we inserted; but make a note that we still 250905f65b5cSDavid Herrmann * found pages pinned. 251005f65b5cSDavid Herrmann */ 251105f65b5cSDavid Herrmann error = -EBUSY; 251205f65b5cSDavid Herrmann } 251305f65b5cSDavid Herrmann 251405f65b5cSDavid Herrmann spin_lock_irq(&mapping->tree_lock); 251505f65b5cSDavid Herrmann radix_tree_tag_clear(&mapping->page_tree, 251605f65b5cSDavid Herrmann iter.index, SHMEM_TAG_PINNED); 251705f65b5cSDavid Herrmann spin_unlock_irq(&mapping->tree_lock); 251805f65b5cSDavid Herrmann continue_resched: 251905f65b5cSDavid Herrmann if (need_resched()) { 252005f65b5cSDavid Herrmann cond_resched_rcu(); 25217165092fSMatthew Wilcox slot = radix_tree_iter_next(&iter); 252205f65b5cSDavid Herrmann } 252305f65b5cSDavid Herrmann } 252405f65b5cSDavid Herrmann rcu_read_unlock(); 252505f65b5cSDavid Herrmann } 252605f65b5cSDavid Herrmann 252705f65b5cSDavid Herrmann return error; 252840e041a2SDavid Herrmann } 252940e041a2SDavid Herrmann 253040e041a2SDavid Herrmann #define F_ALL_SEALS (F_SEAL_SEAL | \ 253140e041a2SDavid Herrmann F_SEAL_SHRINK | \ 253240e041a2SDavid Herrmann F_SEAL_GROW | \ 253340e041a2SDavid Herrmann F_SEAL_WRITE) 253440e041a2SDavid Herrmann 253540e041a2SDavid Herrmann int shmem_add_seals(struct file *file, unsigned int seals) 253640e041a2SDavid Herrmann { 253740e041a2SDavid Herrmann struct inode *inode = file_inode(file); 253840e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 253940e041a2SDavid Herrmann int error; 254040e041a2SDavid Herrmann 254140e041a2SDavid Herrmann /* 254240e041a2SDavid Herrmann * SEALING 254340e041a2SDavid Herrmann * Sealing allows multiple parties to share a shmem-file but restrict 254440e041a2SDavid Herrmann * access to a specific subset of file operations. Seals can only be 254540e041a2SDavid Herrmann * added, but never removed. This way, mutually untrusted parties can 254640e041a2SDavid Herrmann * share common memory regions with a well-defined policy. A malicious 254740e041a2SDavid Herrmann * peer can thus never perform unwanted operations on a shared object. 254840e041a2SDavid Herrmann * 254940e041a2SDavid Herrmann * Seals are only supported on special shmem-files and always affect 255040e041a2SDavid Herrmann * the whole underlying inode. Once a seal is set, it may prevent some 255140e041a2SDavid Herrmann * kinds of access to the file. Currently, the following seals are 255240e041a2SDavid Herrmann * defined: 255340e041a2SDavid Herrmann * SEAL_SEAL: Prevent further seals from being set on this file 255440e041a2SDavid Herrmann * SEAL_SHRINK: Prevent the file from shrinking 255540e041a2SDavid Herrmann * SEAL_GROW: Prevent the file from growing 255640e041a2SDavid Herrmann * SEAL_WRITE: Prevent write access to the file 255740e041a2SDavid Herrmann * 255840e041a2SDavid Herrmann * As we don't require any trust relationship between two parties, we 255940e041a2SDavid Herrmann * must prevent seals from being removed. Therefore, sealing a file 256040e041a2SDavid Herrmann * only adds a given set of seals to the file, it never touches 256140e041a2SDavid Herrmann * existing seals. Furthermore, the "setting seals"-operation can be 256240e041a2SDavid Herrmann * sealed itself, which basically prevents any further seal from being 256340e041a2SDavid Herrmann * added. 256440e041a2SDavid Herrmann * 256540e041a2SDavid Herrmann * Semantics of sealing are only defined on volatile files. Only 256640e041a2SDavid Herrmann * anonymous shmem files support sealing. More importantly, seals are 256740e041a2SDavid Herrmann * never written to disk. Therefore, there's no plan to support it on 256840e041a2SDavid Herrmann * other file types. 256940e041a2SDavid Herrmann */ 257040e041a2SDavid Herrmann 257140e041a2SDavid Herrmann if (file->f_op != &shmem_file_operations) 257240e041a2SDavid Herrmann return -EINVAL; 257340e041a2SDavid Herrmann if (!(file->f_mode & FMODE_WRITE)) 257440e041a2SDavid Herrmann return -EPERM; 257540e041a2SDavid Herrmann if (seals & ~(unsigned int)F_ALL_SEALS) 257640e041a2SDavid Herrmann return -EINVAL; 257740e041a2SDavid Herrmann 25785955102cSAl Viro inode_lock(inode); 257940e041a2SDavid Herrmann 258040e041a2SDavid Herrmann if (info->seals & F_SEAL_SEAL) { 258140e041a2SDavid Herrmann error = -EPERM; 258240e041a2SDavid Herrmann goto unlock; 258340e041a2SDavid Herrmann } 258440e041a2SDavid Herrmann 258540e041a2SDavid Herrmann if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) { 258640e041a2SDavid Herrmann error = mapping_deny_writable(file->f_mapping); 258740e041a2SDavid Herrmann if (error) 258840e041a2SDavid Herrmann goto unlock; 258940e041a2SDavid Herrmann 259040e041a2SDavid Herrmann error = shmem_wait_for_pins(file->f_mapping); 259140e041a2SDavid Herrmann if (error) { 259240e041a2SDavid Herrmann mapping_allow_writable(file->f_mapping); 259340e041a2SDavid Herrmann goto unlock; 259440e041a2SDavid Herrmann } 259540e041a2SDavid Herrmann } 259640e041a2SDavid Herrmann 259740e041a2SDavid Herrmann info->seals |= seals; 259840e041a2SDavid Herrmann error = 0; 259940e041a2SDavid Herrmann 260040e041a2SDavid Herrmann unlock: 26015955102cSAl Viro inode_unlock(inode); 260240e041a2SDavid Herrmann return error; 260340e041a2SDavid Herrmann } 260440e041a2SDavid Herrmann EXPORT_SYMBOL_GPL(shmem_add_seals); 260540e041a2SDavid Herrmann 260640e041a2SDavid Herrmann int shmem_get_seals(struct file *file) 260740e041a2SDavid Herrmann { 260840e041a2SDavid Herrmann if (file->f_op != &shmem_file_operations) 260940e041a2SDavid Herrmann return -EINVAL; 261040e041a2SDavid Herrmann 261140e041a2SDavid Herrmann return SHMEM_I(file_inode(file))->seals; 261240e041a2SDavid Herrmann } 261340e041a2SDavid Herrmann EXPORT_SYMBOL_GPL(shmem_get_seals); 261440e041a2SDavid Herrmann 261540e041a2SDavid Herrmann long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg) 261640e041a2SDavid Herrmann { 261740e041a2SDavid Herrmann long error; 261840e041a2SDavid Herrmann 261940e041a2SDavid Herrmann switch (cmd) { 262040e041a2SDavid Herrmann case F_ADD_SEALS: 262140e041a2SDavid Herrmann /* disallow upper 32bit */ 262240e041a2SDavid Herrmann if (arg > UINT_MAX) 262340e041a2SDavid Herrmann return -EINVAL; 262440e041a2SDavid Herrmann 262540e041a2SDavid Herrmann error = shmem_add_seals(file, arg); 262640e041a2SDavid Herrmann break; 262740e041a2SDavid Herrmann case F_GET_SEALS: 262840e041a2SDavid Herrmann error = shmem_get_seals(file); 262940e041a2SDavid Herrmann break; 263040e041a2SDavid Herrmann default: 263140e041a2SDavid Herrmann error = -EINVAL; 263240e041a2SDavid Herrmann break; 263340e041a2SDavid Herrmann } 263440e041a2SDavid Herrmann 263540e041a2SDavid Herrmann return error; 263640e041a2SDavid Herrmann } 263740e041a2SDavid Herrmann 263883e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 263983e4fa9cSHugh Dickins loff_t len) 264083e4fa9cSHugh Dickins { 2641496ad9aaSAl Viro struct inode *inode = file_inode(file); 2642e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 264340e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 26441aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 2645e2d12e22SHugh Dickins pgoff_t start, index, end; 2646e2d12e22SHugh Dickins int error; 264783e4fa9cSHugh Dickins 264813ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 264913ace4d0SHugh Dickins return -EOPNOTSUPP; 265013ace4d0SHugh Dickins 26515955102cSAl Viro inode_lock(inode); 265283e4fa9cSHugh Dickins 265383e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 265483e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 265583e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 265683e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 26578e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 265883e4fa9cSHugh Dickins 265940e041a2SDavid Herrmann /* protected by i_mutex */ 266040e041a2SDavid Herrmann if (info->seals & F_SEAL_WRITE) { 266140e041a2SDavid Herrmann error = -EPERM; 266240e041a2SDavid Herrmann goto out; 266340e041a2SDavid Herrmann } 266440e041a2SDavid Herrmann 26658e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 2666f00cdc6dSHugh Dickins shmem_falloc.start = unmap_start >> PAGE_SHIFT; 2667f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2668f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2669f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 2670f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 2671f00cdc6dSHugh Dickins 267283e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 267383e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 267483e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 267583e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 267683e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 26778e205f77SHugh Dickins 26788e205f77SHugh Dickins spin_lock(&inode->i_lock); 26798e205f77SHugh Dickins inode->i_private = NULL; 26808e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 268110d20bd2SLinus Torvalds WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list)); 26828e205f77SHugh Dickins spin_unlock(&inode->i_lock); 268383e4fa9cSHugh Dickins error = 0; 26848e205f77SHugh Dickins goto out; 268583e4fa9cSHugh Dickins } 268683e4fa9cSHugh Dickins 2687e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2688e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 2689e2d12e22SHugh Dickins if (error) 2690e2d12e22SHugh Dickins goto out; 2691e2d12e22SHugh Dickins 269240e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 269340e041a2SDavid Herrmann error = -EPERM; 269440e041a2SDavid Herrmann goto out; 269540e041a2SDavid Herrmann } 269640e041a2SDavid Herrmann 269709cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 269809cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2699e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 2700e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2701e2d12e22SHugh Dickins error = -ENOSPC; 2702e2d12e22SHugh Dickins goto out; 2703e2d12e22SHugh Dickins } 2704e2d12e22SHugh Dickins 27058e205f77SHugh Dickins shmem_falloc.waitq = NULL; 27061aac1400SHugh Dickins shmem_falloc.start = start; 27071aac1400SHugh Dickins shmem_falloc.next = start; 27081aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 27091aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 27101aac1400SHugh Dickins spin_lock(&inode->i_lock); 27111aac1400SHugh Dickins inode->i_private = &shmem_falloc; 27121aac1400SHugh Dickins spin_unlock(&inode->i_lock); 27131aac1400SHugh Dickins 2714e2d12e22SHugh Dickins for (index = start; index < end; index++) { 2715e2d12e22SHugh Dickins struct page *page; 2716e2d12e22SHugh Dickins 2717e2d12e22SHugh Dickins /* 2718e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 2719e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 2720e2d12e22SHugh Dickins */ 2721e2d12e22SHugh Dickins if (signal_pending(current)) 2722e2d12e22SHugh Dickins error = -EINTR; 27231aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 27241aac1400SHugh Dickins error = -ENOMEM; 2725e2d12e22SHugh Dickins else 27269e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, SGP_FALLOC); 2727e2d12e22SHugh Dickins if (error) { 27281635f6a7SHugh Dickins /* Remove the !PageUptodate pages we added */ 27297f556567SHugh Dickins if (index > start) { 27301635f6a7SHugh Dickins shmem_undo_range(inode, 273109cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 2732b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 27337f556567SHugh Dickins } 27341aac1400SHugh Dickins goto undone; 2735e2d12e22SHugh Dickins } 2736e2d12e22SHugh Dickins 2737e2d12e22SHugh Dickins /* 27381aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 27391aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 27401aac1400SHugh Dickins */ 27411aac1400SHugh Dickins shmem_falloc.next++; 27421aac1400SHugh Dickins if (!PageUptodate(page)) 27431aac1400SHugh Dickins shmem_falloc.nr_falloced++; 27441aac1400SHugh Dickins 27451aac1400SHugh Dickins /* 27461635f6a7SHugh Dickins * If !PageUptodate, leave it that way so that freeable pages 27471635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 27481635f6a7SHugh Dickins * But set_page_dirty so that memory pressure will swap rather 2749e2d12e22SHugh Dickins * than free the pages we are allocating (and SGP_CACHE pages 2750e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 2751e2d12e22SHugh Dickins */ 2752e2d12e22SHugh Dickins set_page_dirty(page); 2753e2d12e22SHugh Dickins unlock_page(page); 275409cbfeafSKirill A. Shutemov put_page(page); 2755e2d12e22SHugh Dickins cond_resched(); 2756e2d12e22SHugh Dickins } 2757e2d12e22SHugh Dickins 2758e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2759e2d12e22SHugh Dickins i_size_write(inode, offset + len); 2760078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 27611aac1400SHugh Dickins undone: 27621aac1400SHugh Dickins spin_lock(&inode->i_lock); 27631aac1400SHugh Dickins inode->i_private = NULL; 27641aac1400SHugh Dickins spin_unlock(&inode->i_lock); 2765e2d12e22SHugh Dickins out: 27665955102cSAl Viro inode_unlock(inode); 276783e4fa9cSHugh Dickins return error; 276883e4fa9cSHugh Dickins } 276983e4fa9cSHugh Dickins 2770726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 27711da177e4SLinus Torvalds { 2772726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 27731da177e4SLinus Torvalds 27741da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 277509cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 27761da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 27770edd73b3SHugh Dickins if (sbinfo->max_blocks) { 27781da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 277941ffe5d5SHugh Dickins buf->f_bavail = 278041ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 278141ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 27820edd73b3SHugh Dickins } 27830edd73b3SHugh Dickins if (sbinfo->max_inodes) { 27841da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 27851da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 27861da177e4SLinus Torvalds } 27871da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 27881da177e4SLinus Torvalds return 0; 27891da177e4SLinus Torvalds } 27901da177e4SLinus Torvalds 27911da177e4SLinus Torvalds /* 27921da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 27931da177e4SLinus Torvalds */ 27941da177e4SLinus Torvalds static int 27951a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 27961da177e4SLinus Torvalds { 27970b0a0806SHugh Dickins struct inode *inode; 27981da177e4SLinus Torvalds int error = -ENOSPC; 27991da177e4SLinus Torvalds 2800454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 28011da177e4SLinus Torvalds if (inode) { 2802feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2803feda821eSChristoph Hellwig if (error) 2804feda821eSChristoph Hellwig goto out_iput; 28052a7dba39SEric Paris error = security_inode_init_security(inode, dir, 28069d8f13baSMimi Zohar &dentry->d_name, 28076d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 2808feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2809feda821eSChristoph Hellwig goto out_iput; 281037ec43cdSMimi Zohar 2811718deb6bSAl Viro error = 0; 28121da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2813078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 28141da177e4SLinus Torvalds d_instantiate(dentry, inode); 28151da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 28161da177e4SLinus Torvalds } 28171da177e4SLinus Torvalds return error; 2818feda821eSChristoph Hellwig out_iput: 2819feda821eSChristoph Hellwig iput(inode); 2820feda821eSChristoph Hellwig return error; 28211da177e4SLinus Torvalds } 28221da177e4SLinus Torvalds 282360545d0dSAl Viro static int 282460545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 282560545d0dSAl Viro { 282660545d0dSAl Viro struct inode *inode; 282760545d0dSAl Viro int error = -ENOSPC; 282860545d0dSAl Viro 282960545d0dSAl Viro inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 283060545d0dSAl Viro if (inode) { 283160545d0dSAl Viro error = security_inode_init_security(inode, dir, 283260545d0dSAl Viro NULL, 283360545d0dSAl Viro shmem_initxattrs, NULL); 2834feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2835feda821eSChristoph Hellwig goto out_iput; 2836feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2837feda821eSChristoph Hellwig if (error) 2838feda821eSChristoph Hellwig goto out_iput; 283960545d0dSAl Viro d_tmpfile(dentry, inode); 284060545d0dSAl Viro } 284160545d0dSAl Viro return error; 2842feda821eSChristoph Hellwig out_iput: 2843feda821eSChristoph Hellwig iput(inode); 2844feda821eSChristoph Hellwig return error; 284560545d0dSAl Viro } 284660545d0dSAl Viro 284718bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 28481da177e4SLinus Torvalds { 28491da177e4SLinus Torvalds int error; 28501da177e4SLinus Torvalds 28511da177e4SLinus Torvalds if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 28521da177e4SLinus Torvalds return error; 2853d8c76e6fSDave Hansen inc_nlink(dir); 28541da177e4SLinus Torvalds return 0; 28551da177e4SLinus Torvalds } 28561da177e4SLinus Torvalds 28574acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 2858ebfc3b49SAl Viro bool excl) 28591da177e4SLinus Torvalds { 28601da177e4SLinus Torvalds return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 28611da177e4SLinus Torvalds } 28621da177e4SLinus Torvalds 28631da177e4SLinus Torvalds /* 28641da177e4SLinus Torvalds * Link a file.. 28651da177e4SLinus Torvalds */ 28661da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 28671da177e4SLinus Torvalds { 286875c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 28695b04c689SPavel Emelyanov int ret; 28701da177e4SLinus Torvalds 28711da177e4SLinus Torvalds /* 28721da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 28731da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 28741da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 28751da177e4SLinus Torvalds */ 28765b04c689SPavel Emelyanov ret = shmem_reserve_inode(inode->i_sb); 28775b04c689SPavel Emelyanov if (ret) 28785b04c689SPavel Emelyanov goto out; 28791da177e4SLinus Torvalds 28801da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2881078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 2882d8c76e6fSDave Hansen inc_nlink(inode); 28837de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 28841da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 28851da177e4SLinus Torvalds d_instantiate(dentry, inode); 28865b04c689SPavel Emelyanov out: 28875b04c689SPavel Emelyanov return ret; 28881da177e4SLinus Torvalds } 28891da177e4SLinus Torvalds 28901da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 28911da177e4SLinus Torvalds { 289275c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 28931da177e4SLinus Torvalds 28945b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 28955b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 28961da177e4SLinus Torvalds 28971da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 2898078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 28999a53c3a7SDave Hansen drop_nlink(inode); 29001da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 29011da177e4SLinus Torvalds return 0; 29021da177e4SLinus Torvalds } 29031da177e4SLinus Torvalds 29041da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 29051da177e4SLinus Torvalds { 29061da177e4SLinus Torvalds if (!simple_empty(dentry)) 29071da177e4SLinus Torvalds return -ENOTEMPTY; 29081da177e4SLinus Torvalds 290975c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 29109a53c3a7SDave Hansen drop_nlink(dir); 29111da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 29121da177e4SLinus Torvalds } 29131da177e4SLinus Torvalds 291437456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 291537456771SMiklos Szeredi { 2916e36cb0b8SDavid Howells bool old_is_dir = d_is_dir(old_dentry); 2917e36cb0b8SDavid Howells bool new_is_dir = d_is_dir(new_dentry); 291837456771SMiklos Szeredi 291937456771SMiklos Szeredi if (old_dir != new_dir && old_is_dir != new_is_dir) { 292037456771SMiklos Szeredi if (old_is_dir) { 292137456771SMiklos Szeredi drop_nlink(old_dir); 292237456771SMiklos Szeredi inc_nlink(new_dir); 292337456771SMiklos Szeredi } else { 292437456771SMiklos Szeredi drop_nlink(new_dir); 292537456771SMiklos Szeredi inc_nlink(old_dir); 292637456771SMiklos Szeredi } 292737456771SMiklos Szeredi } 292837456771SMiklos Szeredi old_dir->i_ctime = old_dir->i_mtime = 292937456771SMiklos Szeredi new_dir->i_ctime = new_dir->i_mtime = 293075c3cfa8SDavid Howells d_inode(old_dentry)->i_ctime = 2931078cd827SDeepa Dinamani d_inode(new_dentry)->i_ctime = current_time(old_dir); 293237456771SMiklos Szeredi 293337456771SMiklos Szeredi return 0; 293437456771SMiklos Szeredi } 293537456771SMiklos Szeredi 293646fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) 293746fdb794SMiklos Szeredi { 293846fdb794SMiklos Szeredi struct dentry *whiteout; 293946fdb794SMiklos Szeredi int error; 294046fdb794SMiklos Szeredi 294146fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 294246fdb794SMiklos Szeredi if (!whiteout) 294346fdb794SMiklos Szeredi return -ENOMEM; 294446fdb794SMiklos Szeredi 294546fdb794SMiklos Szeredi error = shmem_mknod(old_dir, whiteout, 294646fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 294746fdb794SMiklos Szeredi dput(whiteout); 294846fdb794SMiklos Szeredi if (error) 294946fdb794SMiklos Szeredi return error; 295046fdb794SMiklos Szeredi 295146fdb794SMiklos Szeredi /* 295246fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 295346fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 295446fdb794SMiklos Szeredi * 295546fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 295646fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 295746fdb794SMiklos Szeredi */ 295846fdb794SMiklos Szeredi d_rehash(whiteout); 295946fdb794SMiklos Szeredi return 0; 296046fdb794SMiklos Szeredi } 296146fdb794SMiklos Szeredi 29621da177e4SLinus Torvalds /* 29631da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 29641da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 29651da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 29661da177e4SLinus Torvalds * gets overwritten. 29671da177e4SLinus Torvalds */ 29683b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) 29691da177e4SLinus Torvalds { 297075c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 29711da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 29721da177e4SLinus Torvalds 297346fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 29743b69ff51SMiklos Szeredi return -EINVAL; 29753b69ff51SMiklos Szeredi 297637456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 297737456771SMiklos Szeredi return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 297837456771SMiklos Szeredi 29791da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 29801da177e4SLinus Torvalds return -ENOTEMPTY; 29811da177e4SLinus Torvalds 298246fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 298346fdb794SMiklos Szeredi int error; 298446fdb794SMiklos Szeredi 298546fdb794SMiklos Szeredi error = shmem_whiteout(old_dir, old_dentry); 298646fdb794SMiklos Szeredi if (error) 298746fdb794SMiklos Szeredi return error; 298846fdb794SMiklos Szeredi } 298946fdb794SMiklos Szeredi 299075c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 29911da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 2992b928095bSMiklos Szeredi if (they_are_dirs) { 299375c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 29949a53c3a7SDave Hansen drop_nlink(old_dir); 2995b928095bSMiklos Szeredi } 29961da177e4SLinus Torvalds } else if (they_are_dirs) { 29979a53c3a7SDave Hansen drop_nlink(old_dir); 2998d8c76e6fSDave Hansen inc_nlink(new_dir); 29991da177e4SLinus Torvalds } 30001da177e4SLinus Torvalds 30011da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 30021da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 30031da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 30041da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 3005078cd827SDeepa Dinamani inode->i_ctime = current_time(old_dir); 30061da177e4SLinus Torvalds return 0; 30071da177e4SLinus Torvalds } 30081da177e4SLinus Torvalds 30091da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 30101da177e4SLinus Torvalds { 30111da177e4SLinus Torvalds int error; 30121da177e4SLinus Torvalds int len; 30131da177e4SLinus Torvalds struct inode *inode; 30149276aad6SHugh Dickins struct page *page; 30151da177e4SLinus Torvalds struct shmem_inode_info *info; 30161da177e4SLinus Torvalds 30171da177e4SLinus Torvalds len = strlen(symname) + 1; 301809cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 30191da177e4SLinus Torvalds return -ENAMETOOLONG; 30201da177e4SLinus Torvalds 3021454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 30221da177e4SLinus Torvalds if (!inode) 30231da177e4SLinus Torvalds return -ENOSPC; 30241da177e4SLinus Torvalds 30259d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 30266d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3027570bc1c2SStephen Smalley if (error) { 3028570bc1c2SStephen Smalley if (error != -EOPNOTSUPP) { 3029570bc1c2SStephen Smalley iput(inode); 3030570bc1c2SStephen Smalley return error; 3031570bc1c2SStephen Smalley } 3032570bc1c2SStephen Smalley error = 0; 3033570bc1c2SStephen Smalley } 3034570bc1c2SStephen Smalley 30351da177e4SLinus Torvalds info = SHMEM_I(inode); 30361da177e4SLinus Torvalds inode->i_size = len-1; 303769f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 30383ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 30393ed47db3SAl Viro if (!inode->i_link) { 304069f07ec9SHugh Dickins iput(inode); 304169f07ec9SHugh Dickins return -ENOMEM; 304269f07ec9SHugh Dickins } 304369f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 30441da177e4SLinus Torvalds } else { 3045e8ecde25SAl Viro inode_nohighmem(inode); 30469e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_WRITE); 30471da177e4SLinus Torvalds if (error) { 30481da177e4SLinus Torvalds iput(inode); 30491da177e4SLinus Torvalds return error; 30501da177e4SLinus Torvalds } 305114fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 30521da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 305321fc61c7SAl Viro memcpy(page_address(page), symname, len); 3054ec9516fbSHugh Dickins SetPageUptodate(page); 30551da177e4SLinus Torvalds set_page_dirty(page); 30566746aff7SWu Fengguang unlock_page(page); 305709cbfeafSKirill A. Shutemov put_page(page); 30581da177e4SLinus Torvalds } 30591da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3060078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 30611da177e4SLinus Torvalds d_instantiate(dentry, inode); 30621da177e4SLinus Torvalds dget(dentry); 30631da177e4SLinus Torvalds return 0; 30641da177e4SLinus Torvalds } 30651da177e4SLinus Torvalds 3066fceef393SAl Viro static void shmem_put_link(void *arg) 3067fceef393SAl Viro { 3068fceef393SAl Viro mark_page_accessed(arg); 3069fceef393SAl Viro put_page(arg); 3070fceef393SAl Viro } 3071fceef393SAl Viro 30726b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3073fceef393SAl Viro struct inode *inode, 3074fceef393SAl Viro struct delayed_call *done) 30751da177e4SLinus Torvalds { 30761da177e4SLinus Torvalds struct page *page = NULL; 30776b255391SAl Viro int error; 30786a6c9904SAl Viro if (!dentry) { 30796a6c9904SAl Viro page = find_get_page(inode->i_mapping, 0); 30806a6c9904SAl Viro if (!page) 30816b255391SAl Viro return ERR_PTR(-ECHILD); 30826a6c9904SAl Viro if (!PageUptodate(page)) { 30836a6c9904SAl Viro put_page(page); 30846a6c9904SAl Viro return ERR_PTR(-ECHILD); 30856a6c9904SAl Viro } 30866a6c9904SAl Viro } else { 30879e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_READ); 3088680baacbSAl Viro if (error) 3089680baacbSAl Viro return ERR_PTR(error); 3090d3602444SHugh Dickins unlock_page(page); 30911da177e4SLinus Torvalds } 3092fceef393SAl Viro set_delayed_call(done, shmem_put_link, page); 309321fc61c7SAl Viro return page_address(page); 30941da177e4SLinus Torvalds } 30951da177e4SLinus Torvalds 3096b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3097b09e0fa4SEric Paris /* 3098b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3099b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3100b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3101b09e0fa4SEric Paris * filesystem level, though. 3102b09e0fa4SEric Paris */ 3103b09e0fa4SEric Paris 31046d9d88d0SJarkko Sakkinen /* 31056d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 31066d9d88d0SJarkko Sakkinen */ 31076d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 31086d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 31096d9d88d0SJarkko Sakkinen void *fs_info) 31106d9d88d0SJarkko Sakkinen { 31116d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 31126d9d88d0SJarkko Sakkinen const struct xattr *xattr; 311338f38657SAristeu Rozanski struct simple_xattr *new_xattr; 31146d9d88d0SJarkko Sakkinen size_t len; 31156d9d88d0SJarkko Sakkinen 31166d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 311738f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 31186d9d88d0SJarkko Sakkinen if (!new_xattr) 31196d9d88d0SJarkko Sakkinen return -ENOMEM; 31206d9d88d0SJarkko Sakkinen 31216d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 31226d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 31236d9d88d0SJarkko Sakkinen GFP_KERNEL); 31246d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 31256d9d88d0SJarkko Sakkinen kfree(new_xattr); 31266d9d88d0SJarkko Sakkinen return -ENOMEM; 31276d9d88d0SJarkko Sakkinen } 31286d9d88d0SJarkko Sakkinen 31296d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 31306d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 31316d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 31326d9d88d0SJarkko Sakkinen xattr->name, len); 31336d9d88d0SJarkko Sakkinen 313438f38657SAristeu Rozanski simple_xattr_list_add(&info->xattrs, new_xattr); 31356d9d88d0SJarkko Sakkinen } 31366d9d88d0SJarkko Sakkinen 31376d9d88d0SJarkko Sakkinen return 0; 31386d9d88d0SJarkko Sakkinen } 31396d9d88d0SJarkko Sakkinen 3140aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3141b296821aSAl Viro struct dentry *unused, struct inode *inode, 3142b296821aSAl Viro const char *name, void *buffer, size_t size) 3143aa7c5241SAndreas Gruenbacher { 3144b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3145aa7c5241SAndreas Gruenbacher 3146aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3147aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3148aa7c5241SAndreas Gruenbacher } 3149aa7c5241SAndreas Gruenbacher 3150aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 315159301226SAl Viro struct dentry *unused, struct inode *inode, 315259301226SAl Viro const char *name, const void *value, 315359301226SAl Viro size_t size, int flags) 3154aa7c5241SAndreas Gruenbacher { 315559301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3156aa7c5241SAndreas Gruenbacher 3157aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3158aa7c5241SAndreas Gruenbacher return simple_xattr_set(&info->xattrs, name, value, size, flags); 3159aa7c5241SAndreas Gruenbacher } 3160aa7c5241SAndreas Gruenbacher 3161aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3162aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3163aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3164aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3165aa7c5241SAndreas Gruenbacher }; 3166aa7c5241SAndreas Gruenbacher 3167aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3168aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3169aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3170aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3171aa7c5241SAndreas Gruenbacher }; 3172aa7c5241SAndreas Gruenbacher 3173b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3174b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 3175feda821eSChristoph Hellwig &posix_acl_access_xattr_handler, 3176feda821eSChristoph Hellwig &posix_acl_default_xattr_handler, 3177b09e0fa4SEric Paris #endif 3178aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3179aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 3180b09e0fa4SEric Paris NULL 3181b09e0fa4SEric Paris }; 3182b09e0fa4SEric Paris 3183b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3184b09e0fa4SEric Paris { 318575c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3186786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3187b09e0fa4SEric Paris } 3188b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3189b09e0fa4SEric Paris 319069f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 31911da177e4SLinus Torvalds .readlink = generic_readlink, 31926b255391SAl Viro .get_link = simple_get_link, 3193b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3194b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3195b09e0fa4SEric Paris #endif 31961da177e4SLinus Torvalds }; 31971da177e4SLinus Torvalds 319892e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 31991da177e4SLinus Torvalds .readlink = generic_readlink, 32006b255391SAl Viro .get_link = shmem_get_link, 3201b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3202b09e0fa4SEric Paris .listxattr = shmem_listxattr, 320339f0247dSAndreas Gruenbacher #endif 3204b09e0fa4SEric Paris }; 320539f0247dSAndreas Gruenbacher 320691828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 320791828a40SDavid M. Grimes { 320891828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 320991828a40SDavid M. Grimes } 321091828a40SDavid M. Grimes 321191828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 321291828a40SDavid M. Grimes { 321391828a40SDavid M. Grimes __u32 *fh = vfh; 321491828a40SDavid M. Grimes __u64 inum = fh[2]; 321591828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 321691828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 321791828a40SDavid M. Grimes } 321891828a40SDavid M. Grimes 3219480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3220480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 322191828a40SDavid M. Grimes { 322291828a40SDavid M. Grimes struct inode *inode; 3223480b116cSChristoph Hellwig struct dentry *dentry = NULL; 322435c2a7f4SHugh Dickins u64 inum; 322591828a40SDavid M. Grimes 3226480b116cSChristoph Hellwig if (fh_len < 3) 3227480b116cSChristoph Hellwig return NULL; 3228480b116cSChristoph Hellwig 322935c2a7f4SHugh Dickins inum = fid->raw[2]; 323035c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 323135c2a7f4SHugh Dickins 3232480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3233480b116cSChristoph Hellwig shmem_match, fid->raw); 323491828a40SDavid M. Grimes if (inode) { 3235480b116cSChristoph Hellwig dentry = d_find_alias(inode); 323691828a40SDavid M. Grimes iput(inode); 323791828a40SDavid M. Grimes } 323891828a40SDavid M. Grimes 3239480b116cSChristoph Hellwig return dentry; 324091828a40SDavid M. Grimes } 324191828a40SDavid M. Grimes 3242b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3243b0b0382bSAl Viro struct inode *parent) 324491828a40SDavid M. Grimes { 32455fe0c237SAneesh Kumar K.V if (*len < 3) { 32465fe0c237SAneesh Kumar K.V *len = 3; 324794e07a75SNamjae Jeon return FILEID_INVALID; 32485fe0c237SAneesh Kumar K.V } 324991828a40SDavid M. Grimes 32501d3382cbSAl Viro if (inode_unhashed(inode)) { 325191828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 325291828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 325391828a40SDavid M. Grimes * time, we need a lock to ensure we only try 325491828a40SDavid M. Grimes * to do it once 325591828a40SDavid M. Grimes */ 325691828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 325791828a40SDavid M. Grimes spin_lock(&lock); 32581d3382cbSAl Viro if (inode_unhashed(inode)) 325991828a40SDavid M. Grimes __insert_inode_hash(inode, 326091828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 326191828a40SDavid M. Grimes spin_unlock(&lock); 326291828a40SDavid M. Grimes } 326391828a40SDavid M. Grimes 326491828a40SDavid M. Grimes fh[0] = inode->i_generation; 326591828a40SDavid M. Grimes fh[1] = inode->i_ino; 326691828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 326791828a40SDavid M. Grimes 326891828a40SDavid M. Grimes *len = 3; 326991828a40SDavid M. Grimes return 1; 327091828a40SDavid M. Grimes } 327191828a40SDavid M. Grimes 327239655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 327391828a40SDavid M. Grimes .get_parent = shmem_get_parent, 327491828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3275480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 327691828a40SDavid M. Grimes }; 327791828a40SDavid M. Grimes 3278680d794bSakpm@linux-foundation.org static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 3279680d794bSakpm@linux-foundation.org bool remount) 32801da177e4SLinus Torvalds { 32811da177e4SLinus Torvalds char *this_char, *value, *rest; 328249cd0a5cSGreg Thelen struct mempolicy *mpol = NULL; 32838751e039SEric W. Biederman uid_t uid; 32848751e039SEric W. Biederman gid_t gid; 32851da177e4SLinus Torvalds 3286b00dc3adSHugh Dickins while (options != NULL) { 3287b00dc3adSHugh Dickins this_char = options; 3288b00dc3adSHugh Dickins for (;;) { 3289b00dc3adSHugh Dickins /* 3290b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 3291b00dc3adSHugh Dickins * mount options form a comma-separated list, 3292b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 3293b00dc3adSHugh Dickins */ 3294b00dc3adSHugh Dickins options = strchr(options, ','); 3295b00dc3adSHugh Dickins if (options == NULL) 3296b00dc3adSHugh Dickins break; 3297b00dc3adSHugh Dickins options++; 3298b00dc3adSHugh Dickins if (!isdigit(*options)) { 3299b00dc3adSHugh Dickins options[-1] = '\0'; 3300b00dc3adSHugh Dickins break; 3301b00dc3adSHugh Dickins } 3302b00dc3adSHugh Dickins } 33031da177e4SLinus Torvalds if (!*this_char) 33041da177e4SLinus Torvalds continue; 33051da177e4SLinus Torvalds if ((value = strchr(this_char,'=')) != NULL) { 33061da177e4SLinus Torvalds *value++ = 0; 33071da177e4SLinus Torvalds } else { 33081170532bSJoe Perches pr_err("tmpfs: No value for mount option '%s'\n", 33091da177e4SLinus Torvalds this_char); 331049cd0a5cSGreg Thelen goto error; 33111da177e4SLinus Torvalds } 33121da177e4SLinus Torvalds 33131da177e4SLinus Torvalds if (!strcmp(this_char,"size")) { 33141da177e4SLinus Torvalds unsigned long long size; 33151da177e4SLinus Torvalds size = memparse(value,&rest); 33161da177e4SLinus Torvalds if (*rest == '%') { 33171da177e4SLinus Torvalds size <<= PAGE_SHIFT; 33181da177e4SLinus Torvalds size *= totalram_pages; 33191da177e4SLinus Torvalds do_div(size, 100); 33201da177e4SLinus Torvalds rest++; 33211da177e4SLinus Torvalds } 33221da177e4SLinus Torvalds if (*rest) 33231da177e4SLinus Torvalds goto bad_val; 3324680d794bSakpm@linux-foundation.org sbinfo->max_blocks = 332509cbfeafSKirill A. Shutemov DIV_ROUND_UP(size, PAGE_SIZE); 33261da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_blocks")) { 3327680d794bSakpm@linux-foundation.org sbinfo->max_blocks = memparse(value, &rest); 33281da177e4SLinus Torvalds if (*rest) 33291da177e4SLinus Torvalds goto bad_val; 33301da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_inodes")) { 3331680d794bSakpm@linux-foundation.org sbinfo->max_inodes = memparse(value, &rest); 33321da177e4SLinus Torvalds if (*rest) 33331da177e4SLinus Torvalds goto bad_val; 33341da177e4SLinus Torvalds } else if (!strcmp(this_char,"mode")) { 3335680d794bSakpm@linux-foundation.org if (remount) 33361da177e4SLinus Torvalds continue; 3337680d794bSakpm@linux-foundation.org sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 33381da177e4SLinus Torvalds if (*rest) 33391da177e4SLinus Torvalds goto bad_val; 33401da177e4SLinus Torvalds } else if (!strcmp(this_char,"uid")) { 3341680d794bSakpm@linux-foundation.org if (remount) 33421da177e4SLinus Torvalds continue; 33438751e039SEric W. Biederman uid = simple_strtoul(value, &rest, 0); 33441da177e4SLinus Torvalds if (*rest) 33451da177e4SLinus Torvalds goto bad_val; 33468751e039SEric W. Biederman sbinfo->uid = make_kuid(current_user_ns(), uid); 33478751e039SEric W. Biederman if (!uid_valid(sbinfo->uid)) 33488751e039SEric W. Biederman goto bad_val; 33491da177e4SLinus Torvalds } else if (!strcmp(this_char,"gid")) { 3350680d794bSakpm@linux-foundation.org if (remount) 33511da177e4SLinus Torvalds continue; 33528751e039SEric W. Biederman gid = simple_strtoul(value, &rest, 0); 33531da177e4SLinus Torvalds if (*rest) 33541da177e4SLinus Torvalds goto bad_val; 33558751e039SEric W. Biederman sbinfo->gid = make_kgid(current_user_ns(), gid); 33568751e039SEric W. Biederman if (!gid_valid(sbinfo->gid)) 33578751e039SEric W. Biederman goto bad_val; 3358e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 33595a6e75f8SKirill A. Shutemov } else if (!strcmp(this_char, "huge")) { 33605a6e75f8SKirill A. Shutemov int huge; 33615a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(value); 33625a6e75f8SKirill A. Shutemov if (huge < 0) 33635a6e75f8SKirill A. Shutemov goto bad_val; 33645a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 33655a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER) 33665a6e75f8SKirill A. Shutemov goto bad_val; 33675a6e75f8SKirill A. Shutemov sbinfo->huge = huge; 33685a6e75f8SKirill A. Shutemov #endif 33695a6e75f8SKirill A. Shutemov #ifdef CONFIG_NUMA 33707339ff83SRobin Holt } else if (!strcmp(this_char,"mpol")) { 337149cd0a5cSGreg Thelen mpol_put(mpol); 337249cd0a5cSGreg Thelen mpol = NULL; 337349cd0a5cSGreg Thelen if (mpol_parse_str(value, &mpol)) 33747339ff83SRobin Holt goto bad_val; 33755a6e75f8SKirill A. Shutemov #endif 33761da177e4SLinus Torvalds } else { 33771170532bSJoe Perches pr_err("tmpfs: Bad mount option %s\n", this_char); 337849cd0a5cSGreg Thelen goto error; 33791da177e4SLinus Torvalds } 33801da177e4SLinus Torvalds } 338149cd0a5cSGreg Thelen sbinfo->mpol = mpol; 33821da177e4SLinus Torvalds return 0; 33831da177e4SLinus Torvalds 33841da177e4SLinus Torvalds bad_val: 33851170532bSJoe Perches pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", 33861da177e4SLinus Torvalds value, this_char); 338749cd0a5cSGreg Thelen error: 338849cd0a5cSGreg Thelen mpol_put(mpol); 33891da177e4SLinus Torvalds return 1; 33901da177e4SLinus Torvalds 33911da177e4SLinus Torvalds } 33921da177e4SLinus Torvalds 33931da177e4SLinus Torvalds static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 33941da177e4SLinus Torvalds { 33951da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3396680d794bSakpm@linux-foundation.org struct shmem_sb_info config = *sbinfo; 33970edd73b3SHugh Dickins unsigned long inodes; 33980edd73b3SHugh Dickins int error = -EINVAL; 33991da177e4SLinus Torvalds 34005f00110fSGreg Thelen config.mpol = NULL; 3401680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, &config, true)) 34020edd73b3SHugh Dickins return error; 34030edd73b3SHugh Dickins 34040edd73b3SHugh Dickins spin_lock(&sbinfo->stat_lock); 34050edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 34067e496299STim Chen if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 34070edd73b3SHugh Dickins goto out; 3408680d794bSakpm@linux-foundation.org if (config.max_inodes < inodes) 34090edd73b3SHugh Dickins goto out; 34100edd73b3SHugh Dickins /* 341154af6042SHugh Dickins * Those tests disallow limited->unlimited while any are in use; 34120edd73b3SHugh Dickins * but we must separately disallow unlimited->limited, because 34130edd73b3SHugh Dickins * in that case we have no record of how much is already in use. 34140edd73b3SHugh Dickins */ 3415680d794bSakpm@linux-foundation.org if (config.max_blocks && !sbinfo->max_blocks) 34160edd73b3SHugh Dickins goto out; 3417680d794bSakpm@linux-foundation.org if (config.max_inodes && !sbinfo->max_inodes) 34180edd73b3SHugh Dickins goto out; 34190edd73b3SHugh Dickins 34200edd73b3SHugh Dickins error = 0; 34215a6e75f8SKirill A. Shutemov sbinfo->huge = config.huge; 3422680d794bSakpm@linux-foundation.org sbinfo->max_blocks = config.max_blocks; 3423680d794bSakpm@linux-foundation.org sbinfo->max_inodes = config.max_inodes; 3424680d794bSakpm@linux-foundation.org sbinfo->free_inodes = config.max_inodes - inodes; 342571fe804bSLee Schermerhorn 34265f00110fSGreg Thelen /* 34275f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 34285f00110fSGreg Thelen */ 34295f00110fSGreg Thelen if (config.mpol) { 343071fe804bSLee Schermerhorn mpol_put(sbinfo->mpol); 343171fe804bSLee Schermerhorn sbinfo->mpol = config.mpol; /* transfers initial ref */ 34325f00110fSGreg Thelen } 34330edd73b3SHugh Dickins out: 34340edd73b3SHugh Dickins spin_unlock(&sbinfo->stat_lock); 34350edd73b3SHugh Dickins return error; 34361da177e4SLinus Torvalds } 3437680d794bSakpm@linux-foundation.org 343834c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3439680d794bSakpm@linux-foundation.org { 344034c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3441680d794bSakpm@linux-foundation.org 3442680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 3443680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 344409cbfeafSKirill A. Shutemov sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3445680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 3446680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 3447680d794bSakpm@linux-foundation.org if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 344809208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 34498751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 34508751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 34518751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 34528751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 34538751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 34548751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 3455e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 34565a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 34575a6e75f8SKirill A. Shutemov if (sbinfo->huge) 34585a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 34595a6e75f8SKirill A. Shutemov #endif 346071fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 3461680d794bSakpm@linux-foundation.org return 0; 3462680d794bSakpm@linux-foundation.org } 34639183df25SDavid Herrmann 34649183df25SDavid Herrmann #define MFD_NAME_PREFIX "memfd:" 34659183df25SDavid Herrmann #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1) 34669183df25SDavid Herrmann #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN) 34679183df25SDavid Herrmann 34689183df25SDavid Herrmann #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING) 34699183df25SDavid Herrmann 34709183df25SDavid Herrmann SYSCALL_DEFINE2(memfd_create, 34719183df25SDavid Herrmann const char __user *, uname, 34729183df25SDavid Herrmann unsigned int, flags) 34739183df25SDavid Herrmann { 34749183df25SDavid Herrmann struct shmem_inode_info *info; 34759183df25SDavid Herrmann struct file *file; 34769183df25SDavid Herrmann int fd, error; 34779183df25SDavid Herrmann char *name; 34789183df25SDavid Herrmann long len; 34799183df25SDavid Herrmann 34809183df25SDavid Herrmann if (flags & ~(unsigned int)MFD_ALL_FLAGS) 34819183df25SDavid Herrmann return -EINVAL; 34829183df25SDavid Herrmann 34839183df25SDavid Herrmann /* length includes terminating zero */ 34849183df25SDavid Herrmann len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1); 34859183df25SDavid Herrmann if (len <= 0) 34869183df25SDavid Herrmann return -EFAULT; 34879183df25SDavid Herrmann if (len > MFD_NAME_MAX_LEN + 1) 34889183df25SDavid Herrmann return -EINVAL; 34899183df25SDavid Herrmann 34909183df25SDavid Herrmann name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY); 34919183df25SDavid Herrmann if (!name) 34929183df25SDavid Herrmann return -ENOMEM; 34939183df25SDavid Herrmann 34949183df25SDavid Herrmann strcpy(name, MFD_NAME_PREFIX); 34959183df25SDavid Herrmann if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) { 34969183df25SDavid Herrmann error = -EFAULT; 34979183df25SDavid Herrmann goto err_name; 34989183df25SDavid Herrmann } 34999183df25SDavid Herrmann 35009183df25SDavid Herrmann /* terminating-zero may have changed after strnlen_user() returned */ 35019183df25SDavid Herrmann if (name[len + MFD_NAME_PREFIX_LEN - 1]) { 35029183df25SDavid Herrmann error = -EFAULT; 35039183df25SDavid Herrmann goto err_name; 35049183df25SDavid Herrmann } 35059183df25SDavid Herrmann 35069183df25SDavid Herrmann fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); 35079183df25SDavid Herrmann if (fd < 0) { 35089183df25SDavid Herrmann error = fd; 35099183df25SDavid Herrmann goto err_name; 35109183df25SDavid Herrmann } 35119183df25SDavid Herrmann 35129183df25SDavid Herrmann file = shmem_file_setup(name, 0, VM_NORESERVE); 35139183df25SDavid Herrmann if (IS_ERR(file)) { 35149183df25SDavid Herrmann error = PTR_ERR(file); 35159183df25SDavid Herrmann goto err_fd; 35169183df25SDavid Herrmann } 35179183df25SDavid Herrmann info = SHMEM_I(file_inode(file)); 35189183df25SDavid Herrmann file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; 35199183df25SDavid Herrmann file->f_flags |= O_RDWR | O_LARGEFILE; 35209183df25SDavid Herrmann if (flags & MFD_ALLOW_SEALING) 35219183df25SDavid Herrmann info->seals &= ~F_SEAL_SEAL; 35229183df25SDavid Herrmann 35239183df25SDavid Herrmann fd_install(fd, file); 35249183df25SDavid Herrmann kfree(name); 35259183df25SDavid Herrmann return fd; 35269183df25SDavid Herrmann 35279183df25SDavid Herrmann err_fd: 35289183df25SDavid Herrmann put_unused_fd(fd); 35299183df25SDavid Herrmann err_name: 35309183df25SDavid Herrmann kfree(name); 35319183df25SDavid Herrmann return error; 35329183df25SDavid Herrmann } 35339183df25SDavid Herrmann 3534680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 35351da177e4SLinus Torvalds 35361da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 35371da177e4SLinus Torvalds { 3538602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3539602586a8SHugh Dickins 3540602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 354149cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 3542602586a8SHugh Dickins kfree(sbinfo); 35431da177e4SLinus Torvalds sb->s_fs_info = NULL; 35441da177e4SLinus Torvalds } 35451da177e4SLinus Torvalds 35462b2af54aSKay Sievers int shmem_fill_super(struct super_block *sb, void *data, int silent) 35471da177e4SLinus Torvalds { 35481da177e4SLinus Torvalds struct inode *inode; 35490edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 3550680d794bSakpm@linux-foundation.org int err = -ENOMEM; 3551680d794bSakpm@linux-foundation.org 3552680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 3553425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3554680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 3555680d794bSakpm@linux-foundation.org if (!sbinfo) 3556680d794bSakpm@linux-foundation.org return -ENOMEM; 3557680d794bSakpm@linux-foundation.org 3558680d794bSakpm@linux-foundation.org sbinfo->mode = S_IRWXUGO | S_ISVTX; 355976aac0e9SDavid Howells sbinfo->uid = current_fsuid(); 356076aac0e9SDavid Howells sbinfo->gid = current_fsgid(); 3561680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 35621da177e4SLinus Torvalds 35630edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 35641da177e4SLinus Torvalds /* 35651da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 35661da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 35671da177e4SLinus Torvalds * but the internal instance is left unlimited. 35681da177e4SLinus Torvalds */ 3569ca4e0519SAl Viro if (!(sb->s_flags & MS_KERNMOUNT)) { 3570680d794bSakpm@linux-foundation.org sbinfo->max_blocks = shmem_default_max_blocks(); 3571680d794bSakpm@linux-foundation.org sbinfo->max_inodes = shmem_default_max_inodes(); 3572680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, sbinfo, false)) { 3573680d794bSakpm@linux-foundation.org err = -EINVAL; 3574680d794bSakpm@linux-foundation.org goto failed; 3575680d794bSakpm@linux-foundation.org } 3576ca4e0519SAl Viro } else { 3577ca4e0519SAl Viro sb->s_flags |= MS_NOUSER; 35781da177e4SLinus Torvalds } 357991828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 35802f6e38f3SHugh Dickins sb->s_flags |= MS_NOSEC; 35810edd73b3SHugh Dickins #else 35820edd73b3SHugh Dickins sb->s_flags |= MS_NOUSER; 35830edd73b3SHugh Dickins #endif 35841da177e4SLinus Torvalds 35851da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 3586908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3587602586a8SHugh Dickins goto failed; 3588680d794bSakpm@linux-foundation.org sbinfo->free_inodes = sbinfo->max_inodes; 3589779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 3590779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 35911da177e4SLinus Torvalds 3592285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 359309cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 359409cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 35951da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 35961da177e4SLinus Torvalds sb->s_op = &shmem_ops; 3597cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 3598b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 359939f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 3600b09e0fa4SEric Paris #endif 3601b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 360239f0247dSAndreas Gruenbacher sb->s_flags |= MS_POSIXACL; 360339f0247dSAndreas Gruenbacher #endif 36040edd73b3SHugh Dickins 3605454abafeSDmitry Monakhov inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 36061da177e4SLinus Torvalds if (!inode) 36071da177e4SLinus Torvalds goto failed; 3608680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 3609680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 3610318ceed0SAl Viro sb->s_root = d_make_root(inode); 3611318ceed0SAl Viro if (!sb->s_root) 361248fde701SAl Viro goto failed; 36131da177e4SLinus Torvalds return 0; 36141da177e4SLinus Torvalds 36151da177e4SLinus Torvalds failed: 36161da177e4SLinus Torvalds shmem_put_super(sb); 36171da177e4SLinus Torvalds return err; 36181da177e4SLinus Torvalds } 36191da177e4SLinus Torvalds 3620fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 36211da177e4SLinus Torvalds 36221da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 36231da177e4SLinus Torvalds { 362441ffe5d5SHugh Dickins struct shmem_inode_info *info; 362541ffe5d5SHugh Dickins info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 362641ffe5d5SHugh Dickins if (!info) 36271da177e4SLinus Torvalds return NULL; 362841ffe5d5SHugh Dickins return &info->vfs_inode; 36291da177e4SLinus Torvalds } 36301da177e4SLinus Torvalds 363141ffe5d5SHugh Dickins static void shmem_destroy_callback(struct rcu_head *head) 3632fa0d7e3dSNick Piggin { 3633fa0d7e3dSNick Piggin struct inode *inode = container_of(head, struct inode, i_rcu); 363484e710daSAl Viro if (S_ISLNK(inode->i_mode)) 36353ed47db3SAl Viro kfree(inode->i_link); 3636fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3637fa0d7e3dSNick Piggin } 3638fa0d7e3dSNick Piggin 36391da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 36401da177e4SLinus Torvalds { 364109208d15SAl Viro if (S_ISREG(inode->i_mode)) 36421da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 364341ffe5d5SHugh Dickins call_rcu(&inode->i_rcu, shmem_destroy_callback); 36441da177e4SLinus Torvalds } 36451da177e4SLinus Torvalds 364641ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 36471da177e4SLinus Torvalds { 364841ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 364941ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 36501da177e4SLinus Torvalds } 36511da177e4SLinus Torvalds 365241ffe5d5SHugh Dickins static int shmem_init_inodecache(void) 36531da177e4SLinus Torvalds { 36541da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 36551da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 36565d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 36571da177e4SLinus Torvalds return 0; 36581da177e4SLinus Torvalds } 36591da177e4SLinus Torvalds 366041ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 36611da177e4SLinus Torvalds { 36621a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 36631da177e4SLinus Torvalds } 36641da177e4SLinus Torvalds 3665f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = { 36661da177e4SLinus Torvalds .writepage = shmem_writepage, 366776719325SKen Chen .set_page_dirty = __set_page_dirty_no_writeback, 36681da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3669800d15a5SNick Piggin .write_begin = shmem_write_begin, 3670800d15a5SNick Piggin .write_end = shmem_write_end, 36711da177e4SLinus Torvalds #endif 36721c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 3673304dbdb7SLee Schermerhorn .migratepage = migrate_page, 36741c93923cSAndrew Morton #endif 3675aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 36761da177e4SLinus Torvalds }; 36771da177e4SLinus Torvalds 367815ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 36791da177e4SLinus Torvalds .mmap = shmem_mmap, 3680c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 36811da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3682220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 36832ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 36848174202bSAl Viro .write_iter = generic_file_write_iter, 36851b061d92SChristoph Hellwig .fsync = noop_fsync, 368682c156f8SAl Viro .splice_read = generic_file_splice_read, 3687f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 368883e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 36891da177e4SLinus Torvalds #endif 36901da177e4SLinus Torvalds }; 36911da177e4SLinus Torvalds 369292e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 369344a30220SYu Zhao .getattr = shmem_getattr, 369494c1e62dSHugh Dickins .setattr = shmem_setattr, 3695b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3696b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3697feda821eSChristoph Hellwig .set_acl = simple_set_acl, 3698b09e0fa4SEric Paris #endif 36991da177e4SLinus Torvalds }; 37001da177e4SLinus Torvalds 370192e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 37021da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 37031da177e4SLinus Torvalds .create = shmem_create, 37041da177e4SLinus Torvalds .lookup = simple_lookup, 37051da177e4SLinus Torvalds .link = shmem_link, 37061da177e4SLinus Torvalds .unlink = shmem_unlink, 37071da177e4SLinus Torvalds .symlink = shmem_symlink, 37081da177e4SLinus Torvalds .mkdir = shmem_mkdir, 37091da177e4SLinus Torvalds .rmdir = shmem_rmdir, 37101da177e4SLinus Torvalds .mknod = shmem_mknod, 37112773bf00SMiklos Szeredi .rename = shmem_rename2, 371260545d0dSAl Viro .tmpfile = shmem_tmpfile, 37131da177e4SLinus Torvalds #endif 3714b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3715b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3716b09e0fa4SEric Paris #endif 371739f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 371894c1e62dSHugh Dickins .setattr = shmem_setattr, 3719feda821eSChristoph Hellwig .set_acl = simple_set_acl, 372039f0247dSAndreas Gruenbacher #endif 372139f0247dSAndreas Gruenbacher }; 372239f0247dSAndreas Gruenbacher 372392e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 3724b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3725b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3726b09e0fa4SEric Paris #endif 372739f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 372894c1e62dSHugh Dickins .setattr = shmem_setattr, 3729feda821eSChristoph Hellwig .set_acl = simple_set_acl, 373039f0247dSAndreas Gruenbacher #endif 37311da177e4SLinus Torvalds }; 37321da177e4SLinus Torvalds 3733759b9775SHugh Dickins static const struct super_operations shmem_ops = { 37341da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 37351da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 37361da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 37371da177e4SLinus Torvalds .statfs = shmem_statfs, 37381da177e4SLinus Torvalds .remount_fs = shmem_remount_fs, 3739680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 37401da177e4SLinus Torvalds #endif 37411f895f75SAl Viro .evict_inode = shmem_evict_inode, 37421da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 37431da177e4SLinus Torvalds .put_super = shmem_put_super, 3744779750d2SKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3745779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 3746779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 3747779750d2SKirill A. Shutemov #endif 37481da177e4SLinus Torvalds }; 37491da177e4SLinus Torvalds 3750f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 375154cb8821SNick Piggin .fault = shmem_fault, 3752d7c17551SNing Qu .map_pages = filemap_map_pages, 37531da177e4SLinus Torvalds #ifdef CONFIG_NUMA 37541da177e4SLinus Torvalds .set_policy = shmem_set_policy, 37551da177e4SLinus Torvalds .get_policy = shmem_get_policy, 37561da177e4SLinus Torvalds #endif 37571da177e4SLinus Torvalds }; 37581da177e4SLinus Torvalds 37593c26ff6eSAl Viro static struct dentry *shmem_mount(struct file_system_type *fs_type, 37603c26ff6eSAl Viro int flags, const char *dev_name, void *data) 37611da177e4SLinus Torvalds { 37623c26ff6eSAl Viro return mount_nodev(fs_type, flags, data, shmem_fill_super); 37631da177e4SLinus Torvalds } 37641da177e4SLinus Torvalds 376541ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 37661da177e4SLinus Torvalds .owner = THIS_MODULE, 37671da177e4SLinus Torvalds .name = "tmpfs", 37683c26ff6eSAl Viro .mount = shmem_mount, 37691da177e4SLinus Torvalds .kill_sb = kill_litter_super, 37702b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 37711da177e4SLinus Torvalds }; 37721da177e4SLinus Torvalds 377341ffe5d5SHugh Dickins int __init shmem_init(void) 37741da177e4SLinus Torvalds { 37751da177e4SLinus Torvalds int error; 37761da177e4SLinus Torvalds 377716203a7aSRob Landley /* If rootfs called this, don't re-init */ 377816203a7aSRob Landley if (shmem_inode_cachep) 377916203a7aSRob Landley return 0; 378016203a7aSRob Landley 378141ffe5d5SHugh Dickins error = shmem_init_inodecache(); 37821da177e4SLinus Torvalds if (error) 37831da177e4SLinus Torvalds goto out3; 37841da177e4SLinus Torvalds 378541ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 37861da177e4SLinus Torvalds if (error) { 37871170532bSJoe Perches pr_err("Could not register tmpfs\n"); 37881da177e4SLinus Torvalds goto out2; 37891da177e4SLinus Torvalds } 379095dc112aSGreg Kroah-Hartman 3791ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 37921da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 37931da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 37941170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 37951da177e4SLinus Torvalds goto out1; 37961da177e4SLinus Torvalds } 37975a6e75f8SKirill A. Shutemov 3798e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 37995a6e75f8SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) 38005a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 38015a6e75f8SKirill A. Shutemov else 38025a6e75f8SKirill A. Shutemov shmem_huge = 0; /* just in case it was patched */ 38035a6e75f8SKirill A. Shutemov #endif 38041da177e4SLinus Torvalds return 0; 38051da177e4SLinus Torvalds 38061da177e4SLinus Torvalds out1: 380741ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 38081da177e4SLinus Torvalds out2: 380941ffe5d5SHugh Dickins shmem_destroy_inodecache(); 38101da177e4SLinus Torvalds out3: 38111da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 38121da177e4SLinus Torvalds return error; 38131da177e4SLinus Torvalds } 3814853ac43aSMatt Mackall 3815e496cf3dSKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) 38165a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 38175a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 38185a6e75f8SKirill A. Shutemov { 38195a6e75f8SKirill A. Shutemov int values[] = { 38205a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 38215a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 38225a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 38235a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 38245a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 38255a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 38265a6e75f8SKirill A. Shutemov }; 38275a6e75f8SKirill A. Shutemov int i, count; 38285a6e75f8SKirill A. Shutemov 38295a6e75f8SKirill A. Shutemov for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) { 38305a6e75f8SKirill A. Shutemov const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s "; 38315a6e75f8SKirill A. Shutemov 38325a6e75f8SKirill A. Shutemov count += sprintf(buf + count, fmt, 38335a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 38345a6e75f8SKirill A. Shutemov } 38355a6e75f8SKirill A. Shutemov buf[count - 1] = '\n'; 38365a6e75f8SKirill A. Shutemov return count; 38375a6e75f8SKirill A. Shutemov } 38385a6e75f8SKirill A. Shutemov 38395a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 38405a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 38415a6e75f8SKirill A. Shutemov { 38425a6e75f8SKirill A. Shutemov char tmp[16]; 38435a6e75f8SKirill A. Shutemov int huge; 38445a6e75f8SKirill A. Shutemov 38455a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 38465a6e75f8SKirill A. Shutemov return -EINVAL; 38475a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 38485a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 38495a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 38505a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 38515a6e75f8SKirill A. Shutemov 38525a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 38535a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 38545a6e75f8SKirill A. Shutemov return -EINVAL; 38555a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 38565a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 38575a6e75f8SKirill A. Shutemov return -EINVAL; 38585a6e75f8SKirill A. Shutemov 38595a6e75f8SKirill A. Shutemov shmem_huge = huge; 38605a6e75f8SKirill A. Shutemov if (shmem_huge < SHMEM_HUGE_DENY) 38615a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 38625a6e75f8SKirill A. Shutemov return count; 38635a6e75f8SKirill A. Shutemov } 38645a6e75f8SKirill A. Shutemov 38655a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr = 38665a6e75f8SKirill A. Shutemov __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 38673b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 3868f3f0e1d2SKirill A. Shutemov 38693b33719cSArnd Bergmann #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3870f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma) 3871f3f0e1d2SKirill A. Shutemov { 3872f3f0e1d2SKirill A. Shutemov struct inode *inode = file_inode(vma->vm_file); 3873f3f0e1d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3874f3f0e1d2SKirill A. Shutemov loff_t i_size; 3875f3f0e1d2SKirill A. Shutemov pgoff_t off; 3876f3f0e1d2SKirill A. Shutemov 3877f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 3878f3f0e1d2SKirill A. Shutemov return true; 3879f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY) 3880f3f0e1d2SKirill A. Shutemov return false; 3881f3f0e1d2SKirill A. Shutemov switch (sbinfo->huge) { 3882f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_NEVER: 3883f3f0e1d2SKirill A. Shutemov return false; 3884f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 3885f3f0e1d2SKirill A. Shutemov return true; 3886f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 3887f3f0e1d2SKirill A. Shutemov off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 3888f3f0e1d2SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 3889f3f0e1d2SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 3890f3f0e1d2SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 3891f3f0e1d2SKirill A. Shutemov return true; 3892f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 3893f3f0e1d2SKirill A. Shutemov /* TODO: implement fadvise() hints */ 3894f3f0e1d2SKirill A. Shutemov return (vma->vm_flags & VM_HUGEPAGE); 3895f3f0e1d2SKirill A. Shutemov default: 3896f3f0e1d2SKirill A. Shutemov VM_BUG_ON(1); 3897f3f0e1d2SKirill A. Shutemov return false; 3898f3f0e1d2SKirill A. Shutemov } 3899f3f0e1d2SKirill A. Shutemov } 39003b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 39015a6e75f8SKirill A. Shutemov 3902853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 3903853ac43aSMatt Mackall 3904853ac43aSMatt Mackall /* 3905853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 3906853ac43aSMatt Mackall * 3907853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 3908853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 3909853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 3910853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 3911853ac43aSMatt Mackall */ 3912853ac43aSMatt Mackall 391341ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 3914853ac43aSMatt Mackall .name = "tmpfs", 39153c26ff6eSAl Viro .mount = ramfs_mount, 3916853ac43aSMatt Mackall .kill_sb = kill_litter_super, 39172b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 3918853ac43aSMatt Mackall }; 3919853ac43aSMatt Mackall 392041ffe5d5SHugh Dickins int __init shmem_init(void) 3921853ac43aSMatt Mackall { 392241ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 3923853ac43aSMatt Mackall 392441ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 3925853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 3926853ac43aSMatt Mackall 3927853ac43aSMatt Mackall return 0; 3928853ac43aSMatt Mackall } 3929853ac43aSMatt Mackall 393041ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 3931853ac43aSMatt Mackall { 3932853ac43aSMatt Mackall return 0; 3933853ac43aSMatt Mackall } 3934853ac43aSMatt Mackall 39353f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user) 39363f96b79aSHugh Dickins { 39373f96b79aSHugh Dickins return 0; 39383f96b79aSHugh Dickins } 39393f96b79aSHugh Dickins 394024513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 394124513264SHugh Dickins { 394224513264SHugh Dickins } 394324513264SHugh Dickins 3944c01d5b30SHugh Dickins #ifdef CONFIG_MMU 3945c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 3946c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 3947c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 3948c01d5b30SHugh Dickins { 3949c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 3950c01d5b30SHugh Dickins } 3951c01d5b30SHugh Dickins #endif 3952c01d5b30SHugh Dickins 395341ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 395494c1e62dSHugh Dickins { 395541ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 395694c1e62dSHugh Dickins } 395794c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 395894c1e62dSHugh Dickins 3959853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 39600b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 3961454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 39620b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 39630b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 3964853ac43aSMatt Mackall 3965853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 3966853ac43aSMatt Mackall 3967853ac43aSMatt Mackall /* common code */ 39681da177e4SLinus Torvalds 396919938e35SRasmus Villemoes static const struct dentry_operations anon_ops = { 3970118b2302SAl Viro .d_dname = simple_dname 39713451538aSAl Viro }; 39723451538aSAl Viro 3973c7277090SEric Paris static struct file *__shmem_file_setup(const char *name, loff_t size, 3974c7277090SEric Paris unsigned long flags, unsigned int i_flags) 39751da177e4SLinus Torvalds { 39766b4d0b27SAl Viro struct file *res; 39771da177e4SLinus Torvalds struct inode *inode; 39782c48b9c4SAl Viro struct path path; 39793451538aSAl Viro struct super_block *sb; 39801da177e4SLinus Torvalds struct qstr this; 39811da177e4SLinus Torvalds 39821da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) 39836b4d0b27SAl Viro return ERR_CAST(shm_mnt); 39841da177e4SLinus Torvalds 3985285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 39861da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 39871da177e4SLinus Torvalds 39881da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 39891da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 39901da177e4SLinus Torvalds 39916b4d0b27SAl Viro res = ERR_PTR(-ENOMEM); 39921da177e4SLinus Torvalds this.name = name; 39931da177e4SLinus Torvalds this.len = strlen(name); 39941da177e4SLinus Torvalds this.hash = 0; /* will go */ 39953451538aSAl Viro sb = shm_mnt->mnt_sb; 399666ee4b88SKonstantin Khlebnikov path.mnt = mntget(shm_mnt); 39973451538aSAl Viro path.dentry = d_alloc_pseudo(sb, &this); 39982c48b9c4SAl Viro if (!path.dentry) 39991da177e4SLinus Torvalds goto put_memory; 40003451538aSAl Viro d_set_d_op(path.dentry, &anon_ops); 40011da177e4SLinus Torvalds 40026b4d0b27SAl Viro res = ERR_PTR(-ENOSPC); 40033451538aSAl Viro inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 40041da177e4SLinus Torvalds if (!inode) 400566ee4b88SKonstantin Khlebnikov goto put_memory; 40061da177e4SLinus Torvalds 4007c7277090SEric Paris inode->i_flags |= i_flags; 40082c48b9c4SAl Viro d_instantiate(path.dentry, inode); 40091da177e4SLinus Torvalds inode->i_size = size; 40106d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 401126567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 401226567cdbSAl Viro if (IS_ERR(res)) 401366ee4b88SKonstantin Khlebnikov goto put_path; 40144b42af81SAl Viro 40156b4d0b27SAl Viro res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 40164b42af81SAl Viro &shmem_file_operations); 40176b4d0b27SAl Viro if (IS_ERR(res)) 401866ee4b88SKonstantin Khlebnikov goto put_path; 40194b42af81SAl Viro 40206b4d0b27SAl Viro return res; 40211da177e4SLinus Torvalds 40221da177e4SLinus Torvalds put_memory: 40231da177e4SLinus Torvalds shmem_unacct_size(flags, size); 402466ee4b88SKonstantin Khlebnikov put_path: 402566ee4b88SKonstantin Khlebnikov path_put(&path); 40266b4d0b27SAl Viro return res; 40271da177e4SLinus Torvalds } 4028c7277090SEric Paris 4029c7277090SEric Paris /** 4030c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4031c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 4032c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 4033e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 4034e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 4035c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4036c7277090SEric Paris * @size: size to be set for the file 4037c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4038c7277090SEric Paris */ 4039c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4040c7277090SEric Paris { 4041c7277090SEric Paris return __shmem_file_setup(name, size, flags, S_PRIVATE); 4042c7277090SEric Paris } 4043c7277090SEric Paris 4044c7277090SEric Paris /** 4045c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 4046c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4047c7277090SEric Paris * @size: size to be set for the file 4048c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4049c7277090SEric Paris */ 4050c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4051c7277090SEric Paris { 4052c7277090SEric Paris return __shmem_file_setup(name, size, flags, 0); 4053c7277090SEric Paris } 4054395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 40551da177e4SLinus Torvalds 405646711810SRandy Dunlap /** 40571da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 40581da177e4SLinus Torvalds * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 40591da177e4SLinus Torvalds */ 40601da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 40611da177e4SLinus Torvalds { 40621da177e4SLinus Torvalds struct file *file; 40631da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 40641da177e4SLinus Torvalds 406566fc1303SHugh Dickins /* 406666fc1303SHugh Dickins * Cloning a new file under mmap_sem leads to a lock ordering conflict 406766fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 406866fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 406966fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 407066fc1303SHugh Dickins */ 407166fc1303SHugh Dickins file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE); 40721da177e4SLinus Torvalds if (IS_ERR(file)) 40731da177e4SLinus Torvalds return PTR_ERR(file); 40741da177e4SLinus Torvalds 40751da177e4SLinus Torvalds if (vma->vm_file) 40761da177e4SLinus Torvalds fput(vma->vm_file); 40771da177e4SLinus Torvalds vma->vm_file = file; 40781da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 4079f3f0e1d2SKirill A. Shutemov 4080e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 4081f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 4082f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 4083f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 4084f3f0e1d2SKirill A. Shutemov } 4085f3f0e1d2SKirill A. Shutemov 40861da177e4SLinus Torvalds return 0; 40871da177e4SLinus Torvalds } 4088d9d90e5eSHugh Dickins 4089d9d90e5eSHugh Dickins /** 4090d9d90e5eSHugh Dickins * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 4091d9d90e5eSHugh Dickins * @mapping: the page's address_space 4092d9d90e5eSHugh Dickins * @index: the page index 4093d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4094d9d90e5eSHugh Dickins * 4095d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4096d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 4097d9d90e5eSHugh Dickins * But read_cache_page_gfp() uses the ->readpage() method: which does not 4098d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4099d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4100d9d90e5eSHugh Dickins * 410168da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 410268da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4103d9d90e5eSHugh Dickins */ 4104d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4105d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4106d9d90e5eSHugh Dickins { 410768da9f05SHugh Dickins #ifdef CONFIG_SHMEM 410868da9f05SHugh Dickins struct inode *inode = mapping->host; 41099276aad6SHugh Dickins struct page *page; 411068da9f05SHugh Dickins int error; 411168da9f05SHugh Dickins 411268da9f05SHugh Dickins BUG_ON(mapping->a_ops != &shmem_aops); 41139e18eb29SAndres Lagar-Cavilla error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, 41149e18eb29SAndres Lagar-Cavilla gfp, NULL, NULL); 411568da9f05SHugh Dickins if (error) 411668da9f05SHugh Dickins page = ERR_PTR(error); 411768da9f05SHugh Dickins else 411868da9f05SHugh Dickins unlock_page(page); 411968da9f05SHugh Dickins return page; 412068da9f05SHugh Dickins #else 412168da9f05SHugh Dickins /* 412268da9f05SHugh Dickins * The tiny !SHMEM case uses ramfs without swap 412368da9f05SHugh Dickins */ 4124d9d90e5eSHugh Dickins return read_cache_page_gfp(mapping, index, gfp); 412568da9f05SHugh Dickins #endif 4126d9d90e5eSHugh Dickins } 4127d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4128