11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28250297edSAndrew Morton #include <linux/ramfs.h> 29caefba17SHugh Dickins #include <linux/pagemap.h> 30853ac43aSMatt Mackall #include <linux/file.h> 31853ac43aSMatt Mackall #include <linux/mm.h> 32b95f1b31SPaul Gortmaker #include <linux/export.h> 33853ac43aSMatt Mackall #include <linux/swap.h> 34e2e40f2cSChristoph Hellwig #include <linux/uio.h> 35f3f0e1d2SKirill A. Shutemov #include <linux/khugepaged.h> 36853ac43aSMatt Mackall 37853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 38853ac43aSMatt Mackall 39853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 401da177e4SLinus Torvalds /* 411da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 421da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 431da177e4SLinus Torvalds * which makes it a completely usable filesystem. 441da177e4SLinus Torvalds */ 451da177e4SLinus Torvalds 4639f0247dSAndreas Gruenbacher #include <linux/xattr.h> 47a5694255SChristoph Hellwig #include <linux/exportfs.h> 481c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 49feda821eSChristoph Hellwig #include <linux/posix_acl_xattr.h> 501da177e4SLinus Torvalds #include <linux/mman.h> 511da177e4SLinus Torvalds #include <linux/string.h> 521da177e4SLinus Torvalds #include <linux/slab.h> 531da177e4SLinus Torvalds #include <linux/backing-dev.h> 541da177e4SLinus Torvalds #include <linux/shmem_fs.h> 551da177e4SLinus Torvalds #include <linux/writeback.h> 561da177e4SLinus Torvalds #include <linux/blkdev.h> 57bda97eabSHugh Dickins #include <linux/pagevec.h> 5841ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 5983e4fa9cSHugh Dickins #include <linux/falloc.h> 60708e3508SHugh Dickins #include <linux/splice.h> 611da177e4SLinus Torvalds #include <linux/security.h> 621da177e4SLinus Torvalds #include <linux/swapops.h> 631da177e4SLinus Torvalds #include <linux/mempolicy.h> 641da177e4SLinus Torvalds #include <linux/namei.h> 65b00dc3adSHugh Dickins #include <linux/ctype.h> 66304dbdb7SLee Schermerhorn #include <linux/migrate.h> 67c1f60a5aSChristoph Lameter #include <linux/highmem.h> 68680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 6992562927SMimi Zohar #include <linux/magic.h> 709183df25SDavid Herrmann #include <linux/syscalls.h> 7140e041a2SDavid Herrmann #include <linux/fcntl.h> 729183df25SDavid Herrmann #include <uapi/linux/memfd.h> 73*4c27fe4cSMike Rapoport #include <linux/rmap.h> 74304dbdb7SLee Schermerhorn 757c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 761da177e4SLinus Torvalds #include <asm/pgtable.h> 771da177e4SLinus Torvalds 78dd56b046SMel Gorman #include "internal.h" 79dd56b046SMel Gorman 8009cbfeafSKirill A. Shutemov #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 8109cbfeafSKirill A. Shutemov #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 821da177e4SLinus Torvalds 831da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 841da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 851da177e4SLinus Torvalds 8669f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 8769f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 8869f07ec9SHugh Dickins 891aac1400SHugh Dickins /* 90f00cdc6dSHugh Dickins * shmem_fallocate communicates with shmem_fault or shmem_writepage via 91f00cdc6dSHugh Dickins * inode->i_private (with i_mutex making sure that it has only one user at 92f00cdc6dSHugh Dickins * a time): we would prefer not to enlarge the shmem inode just for that. 931aac1400SHugh Dickins */ 941aac1400SHugh Dickins struct shmem_falloc { 958e205f77SHugh Dickins wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 961aac1400SHugh Dickins pgoff_t start; /* start of range currently being fallocated */ 971aac1400SHugh Dickins pgoff_t next; /* the next page offset to be fallocated */ 981aac1400SHugh Dickins pgoff_t nr_falloced; /* how many new pages have been fallocated */ 991aac1400SHugh Dickins pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 1001aac1400SHugh Dickins }; 1011aac1400SHugh Dickins 102b76db735SAndrew Morton #ifdef CONFIG_TMPFS 103680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 104680d794bSakpm@linux-foundation.org { 105680d794bSakpm@linux-foundation.org return totalram_pages / 2; 106680d794bSakpm@linux-foundation.org } 107680d794bSakpm@linux-foundation.org 108680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 109680d794bSakpm@linux-foundation.org { 110680d794bSakpm@linux-foundation.org return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 111680d794bSakpm@linux-foundation.org } 112b76db735SAndrew Morton #endif 113680d794bSakpm@linux-foundation.org 114bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 115bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 116bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index); 11768da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1189e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, 1199e18eb29SAndres Lagar-Cavilla gfp_t gfp, struct mm_struct *fault_mm, int *fault_type); 12068da9f05SHugh Dickins 121f3f0e1d2SKirill A. Shutemov int shmem_getpage(struct inode *inode, pgoff_t index, 1229e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp) 12368da9f05SHugh Dickins { 12468da9f05SHugh Dickins return shmem_getpage_gfp(inode, index, pagep, sgp, 1259e18eb29SAndres Lagar-Cavilla mapping_gfp_mask(inode->i_mapping), NULL, NULL); 12668da9f05SHugh Dickins } 1271da177e4SLinus Torvalds 1281da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1291da177e4SLinus Torvalds { 1301da177e4SLinus Torvalds return sb->s_fs_info; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 1331da177e4SLinus Torvalds /* 1341da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1351da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1361da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1371da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1381da177e4SLinus Torvalds */ 1391da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1401da177e4SLinus Torvalds { 1410b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 142191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1431da177e4SLinus Torvalds } 1441da177e4SLinus Torvalds 1451da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1461da177e4SLinus Torvalds { 1470b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1481da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 15177142517SKonstantin Khlebnikov static inline int shmem_reacct_size(unsigned long flags, 15277142517SKonstantin Khlebnikov loff_t oldsize, loff_t newsize) 15377142517SKonstantin Khlebnikov { 15477142517SKonstantin Khlebnikov if (!(flags & VM_NORESERVE)) { 15577142517SKonstantin Khlebnikov if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 15677142517SKonstantin Khlebnikov return security_vm_enough_memory_mm(current->mm, 15777142517SKonstantin Khlebnikov VM_ACCT(newsize) - VM_ACCT(oldsize)); 15877142517SKonstantin Khlebnikov else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 15977142517SKonstantin Khlebnikov vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 16077142517SKonstantin Khlebnikov } 16177142517SKonstantin Khlebnikov return 0; 16277142517SKonstantin Khlebnikov } 16377142517SKonstantin Khlebnikov 1641da177e4SLinus Torvalds /* 1651da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 16675edd345SHugh Dickins * pages are allocated, in order to allow large sparse files. 1671da177e4SLinus Torvalds * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1681da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1691da177e4SLinus Torvalds */ 170800d8c63SKirill A. Shutemov static inline int shmem_acct_block(unsigned long flags, long pages) 1711da177e4SLinus Torvalds { 172800d8c63SKirill A. Shutemov if (!(flags & VM_NORESERVE)) 173800d8c63SKirill A. Shutemov return 0; 174800d8c63SKirill A. Shutemov 175800d8c63SKirill A. Shutemov return security_vm_enough_memory_mm(current->mm, 176800d8c63SKirill A. Shutemov pages * VM_ACCT(PAGE_SIZE)); 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 1801da177e4SLinus Torvalds { 1810b0a0806SHugh Dickins if (flags & VM_NORESERVE) 18209cbfeafSKirill A. Shutemov vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 1831da177e4SLinus Torvalds } 1841da177e4SLinus Torvalds 185759b9775SHugh Dickins static const struct super_operations shmem_ops; 186f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops; 18715ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 18892e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 18992e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 19092e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 191f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 192779750d2SKirill A. Shutemov static struct file_system_type shmem_fs_type; 1931da177e4SLinus Torvalds 1941da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 195cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 1961da177e4SLinus Torvalds 1975b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb) 1985b04c689SPavel Emelyanov { 1995b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2005b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2015b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2025b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 2035b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2045b04c689SPavel Emelyanov return -ENOSPC; 2055b04c689SPavel Emelyanov } 2065b04c689SPavel Emelyanov sbinfo->free_inodes--; 2075b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2085b04c689SPavel Emelyanov } 2095b04c689SPavel Emelyanov return 0; 2105b04c689SPavel Emelyanov } 2115b04c689SPavel Emelyanov 2125b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 2135b04c689SPavel Emelyanov { 2145b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2155b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 2165b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 2175b04c689SPavel Emelyanov sbinfo->free_inodes++; 2185b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2195b04c689SPavel Emelyanov } 2205b04c689SPavel Emelyanov } 2215b04c689SPavel Emelyanov 22246711810SRandy Dunlap /** 22341ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 2241da177e4SLinus Torvalds * @inode: inode to recalc 2251da177e4SLinus Torvalds * 2261da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 2271da177e4SLinus Torvalds * undirtied hole pages behind our back. 2281da177e4SLinus Torvalds * 2291da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 2301da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 2311da177e4SLinus Torvalds * 2321da177e4SLinus Torvalds * It has to be called with the spinlock held. 2331da177e4SLinus Torvalds */ 2341da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 2351da177e4SLinus Torvalds { 2361da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 2371da177e4SLinus Torvalds long freed; 2381da177e4SLinus Torvalds 2391da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 2401da177e4SLinus Torvalds if (freed > 0) { 24154af6042SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 24254af6042SHugh Dickins if (sbinfo->max_blocks) 24354af6042SHugh Dickins percpu_counter_add(&sbinfo->used_blocks, -freed); 2441da177e4SLinus Torvalds info->alloced -= freed; 24554af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 2461da177e4SLinus Torvalds shmem_unacct_blocks(info->flags, freed); 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds } 2491da177e4SLinus Torvalds 250800d8c63SKirill A. Shutemov bool shmem_charge(struct inode *inode, long pages) 251800d8c63SKirill A. Shutemov { 252800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 253800d8c63SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2544595ef88SKirill A. Shutemov unsigned long flags; 255800d8c63SKirill A. Shutemov 256800d8c63SKirill A. Shutemov if (shmem_acct_block(info->flags, pages)) 257800d8c63SKirill A. Shutemov return false; 2584595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 259800d8c63SKirill A. Shutemov info->alloced += pages; 260800d8c63SKirill A. Shutemov inode->i_blocks += pages * BLOCKS_PER_PAGE; 261800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 2624595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 263800d8c63SKirill A. Shutemov inode->i_mapping->nrpages += pages; 264800d8c63SKirill A. Shutemov 265800d8c63SKirill A. Shutemov if (!sbinfo->max_blocks) 266800d8c63SKirill A. Shutemov return true; 267800d8c63SKirill A. Shutemov if (percpu_counter_compare(&sbinfo->used_blocks, 268800d8c63SKirill A. Shutemov sbinfo->max_blocks - pages) > 0) { 269800d8c63SKirill A. Shutemov inode->i_mapping->nrpages -= pages; 2704595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 271800d8c63SKirill A. Shutemov info->alloced -= pages; 272800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 2734595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 27471664665SHugh Dickins shmem_unacct_blocks(info->flags, pages); 275800d8c63SKirill A. Shutemov return false; 276800d8c63SKirill A. Shutemov } 277800d8c63SKirill A. Shutemov percpu_counter_add(&sbinfo->used_blocks, pages); 278800d8c63SKirill A. Shutemov return true; 279800d8c63SKirill A. Shutemov } 280800d8c63SKirill A. Shutemov 281800d8c63SKirill A. Shutemov void shmem_uncharge(struct inode *inode, long pages) 282800d8c63SKirill A. Shutemov { 283800d8c63SKirill A. Shutemov struct shmem_inode_info *info = SHMEM_I(inode); 284800d8c63SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2854595ef88SKirill A. Shutemov unsigned long flags; 286800d8c63SKirill A. Shutemov 2874595ef88SKirill A. Shutemov spin_lock_irqsave(&info->lock, flags); 288800d8c63SKirill A. Shutemov info->alloced -= pages; 289800d8c63SKirill A. Shutemov inode->i_blocks -= pages * BLOCKS_PER_PAGE; 290800d8c63SKirill A. Shutemov shmem_recalc_inode(inode); 2914595ef88SKirill A. Shutemov spin_unlock_irqrestore(&info->lock, flags); 292800d8c63SKirill A. Shutemov 293800d8c63SKirill A. Shutemov if (sbinfo->max_blocks) 294800d8c63SKirill A. Shutemov percpu_counter_sub(&sbinfo->used_blocks, pages); 29571664665SHugh Dickins shmem_unacct_blocks(info->flags, pages); 296800d8c63SKirill A. Shutemov } 297800d8c63SKirill A. Shutemov 2987a5d0fbbSHugh Dickins /* 2997a5d0fbbSHugh Dickins * Replace item expected in radix tree by a new item, while holding tree lock. 3007a5d0fbbSHugh Dickins */ 3017a5d0fbbSHugh Dickins static int shmem_radix_tree_replace(struct address_space *mapping, 3027a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 3037a5d0fbbSHugh Dickins { 304f7942430SJohannes Weiner struct radix_tree_node *node; 3057a5d0fbbSHugh Dickins void **pslot; 3066dbaf22cSJohannes Weiner void *item; 3077a5d0fbbSHugh Dickins 3087a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 3096dbaf22cSJohannes Weiner VM_BUG_ON(!replacement); 310f7942430SJohannes Weiner item = __radix_tree_lookup(&mapping->page_tree, index, &node, &pslot); 311f7942430SJohannes Weiner if (!item) 3126dbaf22cSJohannes Weiner return -ENOENT; 3137a5d0fbbSHugh Dickins if (item != expected) 3147a5d0fbbSHugh Dickins return -ENOENT; 3154d693d08SJohannes Weiner __radix_tree_replace(&mapping->page_tree, node, pslot, 3164d693d08SJohannes Weiner replacement, NULL, NULL); 3177a5d0fbbSHugh Dickins return 0; 3187a5d0fbbSHugh Dickins } 3197a5d0fbbSHugh Dickins 3207a5d0fbbSHugh Dickins /* 321d1899228SHugh Dickins * Sometimes, before we decide whether to proceed or to fail, we must check 322d1899228SHugh Dickins * that an entry was not already brought back from swap by a racing thread. 323d1899228SHugh Dickins * 324d1899228SHugh Dickins * Checking page is not enough: by the time a SwapCache page is locked, it 325d1899228SHugh Dickins * might be reused, and again be SwapCache, using the same swap as before. 326d1899228SHugh Dickins */ 327d1899228SHugh Dickins static bool shmem_confirm_swap(struct address_space *mapping, 328d1899228SHugh Dickins pgoff_t index, swp_entry_t swap) 329d1899228SHugh Dickins { 330d1899228SHugh Dickins void *item; 331d1899228SHugh Dickins 332d1899228SHugh Dickins rcu_read_lock(); 333d1899228SHugh Dickins item = radix_tree_lookup(&mapping->page_tree, index); 334d1899228SHugh Dickins rcu_read_unlock(); 335d1899228SHugh Dickins return item == swp_to_radix_entry(swap); 336d1899228SHugh Dickins } 337d1899228SHugh Dickins 338d1899228SHugh Dickins /* 3395a6e75f8SKirill A. Shutemov * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 3405a6e75f8SKirill A. Shutemov * 3415a6e75f8SKirill A. Shutemov * SHMEM_HUGE_NEVER: 3425a6e75f8SKirill A. Shutemov * disables huge pages for the mount; 3435a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ALWAYS: 3445a6e75f8SKirill A. Shutemov * enables huge pages for the mount; 3455a6e75f8SKirill A. Shutemov * SHMEM_HUGE_WITHIN_SIZE: 3465a6e75f8SKirill A. Shutemov * only allocate huge pages if the page will be fully within i_size, 3475a6e75f8SKirill A. Shutemov * also respect fadvise()/madvise() hints; 3485a6e75f8SKirill A. Shutemov * SHMEM_HUGE_ADVISE: 3495a6e75f8SKirill A. Shutemov * only allocate huge pages if requested with fadvise()/madvise(); 3505a6e75f8SKirill A. Shutemov */ 3515a6e75f8SKirill A. Shutemov 3525a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_NEVER 0 3535a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ALWAYS 1 3545a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_WITHIN_SIZE 2 3555a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_ADVISE 3 3565a6e75f8SKirill A. Shutemov 3575a6e75f8SKirill A. Shutemov /* 3585a6e75f8SKirill A. Shutemov * Special values. 3595a6e75f8SKirill A. Shutemov * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 3605a6e75f8SKirill A. Shutemov * 3615a6e75f8SKirill A. Shutemov * SHMEM_HUGE_DENY: 3625a6e75f8SKirill A. Shutemov * disables huge on shm_mnt and all mounts, for emergency use; 3635a6e75f8SKirill A. Shutemov * SHMEM_HUGE_FORCE: 3645a6e75f8SKirill A. Shutemov * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 3655a6e75f8SKirill A. Shutemov * 3665a6e75f8SKirill A. Shutemov */ 3675a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_DENY (-1) 3685a6e75f8SKirill A. Shutemov #define SHMEM_HUGE_FORCE (-2) 3695a6e75f8SKirill A. Shutemov 370e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3715a6e75f8SKirill A. Shutemov /* ifdef here to avoid bloating shmem.o when not necessary */ 3725a6e75f8SKirill A. Shutemov 3735a6e75f8SKirill A. Shutemov int shmem_huge __read_mostly; 3745a6e75f8SKirill A. Shutemov 375f1f5929cSJérémy Lefaure #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 3765a6e75f8SKirill A. Shutemov static int shmem_parse_huge(const char *str) 3775a6e75f8SKirill A. Shutemov { 3785a6e75f8SKirill A. Shutemov if (!strcmp(str, "never")) 3795a6e75f8SKirill A. Shutemov return SHMEM_HUGE_NEVER; 3805a6e75f8SKirill A. Shutemov if (!strcmp(str, "always")) 3815a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ALWAYS; 3825a6e75f8SKirill A. Shutemov if (!strcmp(str, "within_size")) 3835a6e75f8SKirill A. Shutemov return SHMEM_HUGE_WITHIN_SIZE; 3845a6e75f8SKirill A. Shutemov if (!strcmp(str, "advise")) 3855a6e75f8SKirill A. Shutemov return SHMEM_HUGE_ADVISE; 3865a6e75f8SKirill A. Shutemov if (!strcmp(str, "deny")) 3875a6e75f8SKirill A. Shutemov return SHMEM_HUGE_DENY; 3885a6e75f8SKirill A. Shutemov if (!strcmp(str, "force")) 3895a6e75f8SKirill A. Shutemov return SHMEM_HUGE_FORCE; 3905a6e75f8SKirill A. Shutemov return -EINVAL; 3915a6e75f8SKirill A. Shutemov } 3925a6e75f8SKirill A. Shutemov 3935a6e75f8SKirill A. Shutemov static const char *shmem_format_huge(int huge) 3945a6e75f8SKirill A. Shutemov { 3955a6e75f8SKirill A. Shutemov switch (huge) { 3965a6e75f8SKirill A. Shutemov case SHMEM_HUGE_NEVER: 3975a6e75f8SKirill A. Shutemov return "never"; 3985a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 3995a6e75f8SKirill A. Shutemov return "always"; 4005a6e75f8SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 4015a6e75f8SKirill A. Shutemov return "within_size"; 4025a6e75f8SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 4035a6e75f8SKirill A. Shutemov return "advise"; 4045a6e75f8SKirill A. Shutemov case SHMEM_HUGE_DENY: 4055a6e75f8SKirill A. Shutemov return "deny"; 4065a6e75f8SKirill A. Shutemov case SHMEM_HUGE_FORCE: 4075a6e75f8SKirill A. Shutemov return "force"; 4085a6e75f8SKirill A. Shutemov default: 4095a6e75f8SKirill A. Shutemov VM_BUG_ON(1); 4105a6e75f8SKirill A. Shutemov return "bad_val"; 4115a6e75f8SKirill A. Shutemov } 4125a6e75f8SKirill A. Shutemov } 413f1f5929cSJérémy Lefaure #endif 4145a6e75f8SKirill A. Shutemov 415779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 416779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 417779750d2SKirill A. Shutemov { 418779750d2SKirill A. Shutemov LIST_HEAD(list), *pos, *next; 419253fd0f0SKirill A. Shutemov LIST_HEAD(to_remove); 420779750d2SKirill A. Shutemov struct inode *inode; 421779750d2SKirill A. Shutemov struct shmem_inode_info *info; 422779750d2SKirill A. Shutemov struct page *page; 423779750d2SKirill A. Shutemov unsigned long batch = sc ? sc->nr_to_scan : 128; 424779750d2SKirill A. Shutemov int removed = 0, split = 0; 425779750d2SKirill A. Shutemov 426779750d2SKirill A. Shutemov if (list_empty(&sbinfo->shrinklist)) 427779750d2SKirill A. Shutemov return SHRINK_STOP; 428779750d2SKirill A. Shutemov 429779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 430779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &sbinfo->shrinklist) { 431779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 432779750d2SKirill A. Shutemov 433779750d2SKirill A. Shutemov /* pin the inode */ 434779750d2SKirill A. Shutemov inode = igrab(&info->vfs_inode); 435779750d2SKirill A. Shutemov 436779750d2SKirill A. Shutemov /* inode is about to be evicted */ 437779750d2SKirill A. Shutemov if (!inode) { 438779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 439779750d2SKirill A. Shutemov removed++; 440779750d2SKirill A. Shutemov goto next; 441779750d2SKirill A. Shutemov } 442779750d2SKirill A. Shutemov 443779750d2SKirill A. Shutemov /* Check if there's anything to gain */ 444779750d2SKirill A. Shutemov if (round_up(inode->i_size, PAGE_SIZE) == 445779750d2SKirill A. Shutemov round_up(inode->i_size, HPAGE_PMD_SIZE)) { 446253fd0f0SKirill A. Shutemov list_move(&info->shrinklist, &to_remove); 447779750d2SKirill A. Shutemov removed++; 448779750d2SKirill A. Shutemov goto next; 449779750d2SKirill A. Shutemov } 450779750d2SKirill A. Shutemov 451779750d2SKirill A. Shutemov list_move(&info->shrinklist, &list); 452779750d2SKirill A. Shutemov next: 453779750d2SKirill A. Shutemov if (!--batch) 454779750d2SKirill A. Shutemov break; 455779750d2SKirill A. Shutemov } 456779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 457779750d2SKirill A. Shutemov 458253fd0f0SKirill A. Shutemov list_for_each_safe(pos, next, &to_remove) { 459253fd0f0SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 460253fd0f0SKirill A. Shutemov inode = &info->vfs_inode; 461253fd0f0SKirill A. Shutemov list_del_init(&info->shrinklist); 462253fd0f0SKirill A. Shutemov iput(inode); 463253fd0f0SKirill A. Shutemov } 464253fd0f0SKirill A. Shutemov 465779750d2SKirill A. Shutemov list_for_each_safe(pos, next, &list) { 466779750d2SKirill A. Shutemov int ret; 467779750d2SKirill A. Shutemov 468779750d2SKirill A. Shutemov info = list_entry(pos, struct shmem_inode_info, shrinklist); 469779750d2SKirill A. Shutemov inode = &info->vfs_inode; 470779750d2SKirill A. Shutemov 471779750d2SKirill A. Shutemov if (nr_to_split && split >= nr_to_split) { 472779750d2SKirill A. Shutemov iput(inode); 473779750d2SKirill A. Shutemov continue; 474779750d2SKirill A. Shutemov } 475779750d2SKirill A. Shutemov 476779750d2SKirill A. Shutemov page = find_lock_page(inode->i_mapping, 477779750d2SKirill A. Shutemov (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 478779750d2SKirill A. Shutemov if (!page) 479779750d2SKirill A. Shutemov goto drop; 480779750d2SKirill A. Shutemov 481779750d2SKirill A. Shutemov if (!PageTransHuge(page)) { 482779750d2SKirill A. Shutemov unlock_page(page); 483779750d2SKirill A. Shutemov put_page(page); 484779750d2SKirill A. Shutemov goto drop; 485779750d2SKirill A. Shutemov } 486779750d2SKirill A. Shutemov 487779750d2SKirill A. Shutemov ret = split_huge_page(page); 488779750d2SKirill A. Shutemov unlock_page(page); 489779750d2SKirill A. Shutemov put_page(page); 490779750d2SKirill A. Shutemov 491779750d2SKirill A. Shutemov if (ret) { 492779750d2SKirill A. Shutemov /* split failed: leave it on the list */ 493779750d2SKirill A. Shutemov iput(inode); 494779750d2SKirill A. Shutemov continue; 495779750d2SKirill A. Shutemov } 496779750d2SKirill A. Shutemov 497779750d2SKirill A. Shutemov split++; 498779750d2SKirill A. Shutemov drop: 499779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 500779750d2SKirill A. Shutemov removed++; 501779750d2SKirill A. Shutemov iput(inode); 502779750d2SKirill A. Shutemov } 503779750d2SKirill A. Shutemov 504779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 505779750d2SKirill A. Shutemov list_splice_tail(&list, &sbinfo->shrinklist); 506779750d2SKirill A. Shutemov sbinfo->shrinklist_len -= removed; 507779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 508779750d2SKirill A. Shutemov 509779750d2SKirill A. Shutemov return split; 510779750d2SKirill A. Shutemov } 511779750d2SKirill A. Shutemov 512779750d2SKirill A. Shutemov static long shmem_unused_huge_scan(struct super_block *sb, 513779750d2SKirill A. Shutemov struct shrink_control *sc) 514779750d2SKirill A. Shutemov { 515779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 516779750d2SKirill A. Shutemov 517779750d2SKirill A. Shutemov if (!READ_ONCE(sbinfo->shrinklist_len)) 518779750d2SKirill A. Shutemov return SHRINK_STOP; 519779750d2SKirill A. Shutemov 520779750d2SKirill A. Shutemov return shmem_unused_huge_shrink(sbinfo, sc, 0); 521779750d2SKirill A. Shutemov } 522779750d2SKirill A. Shutemov 523779750d2SKirill A. Shutemov static long shmem_unused_huge_count(struct super_block *sb, 524779750d2SKirill A. Shutemov struct shrink_control *sc) 525779750d2SKirill A. Shutemov { 526779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 527779750d2SKirill A. Shutemov return READ_ONCE(sbinfo->shrinklist_len); 528779750d2SKirill A. Shutemov } 529e496cf3dSKirill A. Shutemov #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5305a6e75f8SKirill A. Shutemov 5315a6e75f8SKirill A. Shutemov #define shmem_huge SHMEM_HUGE_DENY 5325a6e75f8SKirill A. Shutemov 533779750d2SKirill A. Shutemov static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 534779750d2SKirill A. Shutemov struct shrink_control *sc, unsigned long nr_to_split) 535779750d2SKirill A. Shutemov { 536779750d2SKirill A. Shutemov return 0; 537779750d2SKirill A. Shutemov } 538e496cf3dSKirill A. Shutemov #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5395a6e75f8SKirill A. Shutemov 5405a6e75f8SKirill A. Shutemov /* 54146f65ec1SHugh Dickins * Like add_to_page_cache_locked, but error if expected item has gone. 54246f65ec1SHugh Dickins */ 54346f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page, 54446f65ec1SHugh Dickins struct address_space *mapping, 545fed400a1SWang Sheng-Hui pgoff_t index, void *expected) 54646f65ec1SHugh Dickins { 547800d8c63SKirill A. Shutemov int error, nr = hpage_nr_pages(page); 54846f65ec1SHugh Dickins 549800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 550800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(index != round_down(index, nr), page); 551309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 552309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 553800d8c63SKirill A. Shutemov VM_BUG_ON(expected && PageTransHuge(page)); 55446f65ec1SHugh Dickins 555800d8c63SKirill A. Shutemov page_ref_add(page, nr); 55646f65ec1SHugh Dickins page->mapping = mapping; 55746f65ec1SHugh Dickins page->index = index; 55846f65ec1SHugh Dickins 55946f65ec1SHugh Dickins spin_lock_irq(&mapping->tree_lock); 560800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 561800d8c63SKirill A. Shutemov void __rcu **results; 562800d8c63SKirill A. Shutemov pgoff_t idx; 563800d8c63SKirill A. Shutemov int i; 564800d8c63SKirill A. Shutemov 565800d8c63SKirill A. Shutemov error = 0; 566800d8c63SKirill A. Shutemov if (radix_tree_gang_lookup_slot(&mapping->page_tree, 567800d8c63SKirill A. Shutemov &results, &idx, index, 1) && 568800d8c63SKirill A. Shutemov idx < index + HPAGE_PMD_NR) { 569800d8c63SKirill A. Shutemov error = -EEXIST; 570800d8c63SKirill A. Shutemov } 571800d8c63SKirill A. Shutemov 572800d8c63SKirill A. Shutemov if (!error) { 573800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 574800d8c63SKirill A. Shutemov error = radix_tree_insert(&mapping->page_tree, 575800d8c63SKirill A. Shutemov index + i, page + i); 576800d8c63SKirill A. Shutemov VM_BUG_ON(error); 577800d8c63SKirill A. Shutemov } 578800d8c63SKirill A. Shutemov count_vm_event(THP_FILE_ALLOC); 579800d8c63SKirill A. Shutemov } 580800d8c63SKirill A. Shutemov } else if (!expected) { 581b065b432SHugh Dickins error = radix_tree_insert(&mapping->page_tree, index, page); 582800d8c63SKirill A. Shutemov } else { 583b065b432SHugh Dickins error = shmem_radix_tree_replace(mapping, index, expected, 584b065b432SHugh Dickins page); 585800d8c63SKirill A. Shutemov } 586800d8c63SKirill A. Shutemov 58746f65ec1SHugh Dickins if (!error) { 588800d8c63SKirill A. Shutemov mapping->nrpages += nr; 589800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 59011fb9989SMel Gorman __inc_node_page_state(page, NR_SHMEM_THPS); 59111fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 59211fb9989SMel Gorman __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); 59346f65ec1SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 59446f65ec1SHugh Dickins } else { 59546f65ec1SHugh Dickins page->mapping = NULL; 59646f65ec1SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 597800d8c63SKirill A. Shutemov page_ref_sub(page, nr); 59846f65ec1SHugh Dickins } 59946f65ec1SHugh Dickins return error; 60046f65ec1SHugh Dickins } 60146f65ec1SHugh Dickins 60246f65ec1SHugh Dickins /* 6036922c0c7SHugh Dickins * Like delete_from_page_cache, but substitutes swap for page. 6046922c0c7SHugh Dickins */ 6056922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap) 6066922c0c7SHugh Dickins { 6076922c0c7SHugh Dickins struct address_space *mapping = page->mapping; 6086922c0c7SHugh Dickins int error; 6096922c0c7SHugh Dickins 610800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 611800d8c63SKirill A. Shutemov 6126922c0c7SHugh Dickins spin_lock_irq(&mapping->tree_lock); 6136922c0c7SHugh Dickins error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 6146922c0c7SHugh Dickins page->mapping = NULL; 6156922c0c7SHugh Dickins mapping->nrpages--; 61611fb9989SMel Gorman __dec_node_page_state(page, NR_FILE_PAGES); 61711fb9989SMel Gorman __dec_node_page_state(page, NR_SHMEM); 6186922c0c7SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 61909cbfeafSKirill A. Shutemov put_page(page); 6206922c0c7SHugh Dickins BUG_ON(error); 6216922c0c7SHugh Dickins } 6226922c0c7SHugh Dickins 6236922c0c7SHugh Dickins /* 6247a5d0fbbSHugh Dickins * Remove swap entry from radix tree, free the swap and its page cache. 6257a5d0fbbSHugh Dickins */ 6267a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 6277a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 6287a5d0fbbSHugh Dickins { 6296dbaf22cSJohannes Weiner void *old; 6307a5d0fbbSHugh Dickins 6317a5d0fbbSHugh Dickins spin_lock_irq(&mapping->tree_lock); 6326dbaf22cSJohannes Weiner old = radix_tree_delete_item(&mapping->page_tree, index, radswap); 6337a5d0fbbSHugh Dickins spin_unlock_irq(&mapping->tree_lock); 6346dbaf22cSJohannes Weiner if (old != radswap) 6356dbaf22cSJohannes Weiner return -ENOENT; 6367a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 6376dbaf22cSJohannes Weiner return 0; 6387a5d0fbbSHugh Dickins } 6397a5d0fbbSHugh Dickins 6407a5d0fbbSHugh Dickins /* 6416a15a370SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 64248131e03SVlastimil Babka * given offsets are swapped out. 6436a15a370SVlastimil Babka * 6446a15a370SVlastimil Babka * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, 6456a15a370SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 6466a15a370SVlastimil Babka */ 64748131e03SVlastimil Babka unsigned long shmem_partial_swap_usage(struct address_space *mapping, 64848131e03SVlastimil Babka pgoff_t start, pgoff_t end) 6496a15a370SVlastimil Babka { 6506a15a370SVlastimil Babka struct radix_tree_iter iter; 6516a15a370SVlastimil Babka void **slot; 6526a15a370SVlastimil Babka struct page *page; 65348131e03SVlastimil Babka unsigned long swapped = 0; 6546a15a370SVlastimil Babka 6556a15a370SVlastimil Babka rcu_read_lock(); 6566a15a370SVlastimil Babka 6576a15a370SVlastimil Babka radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 6586a15a370SVlastimil Babka if (iter.index >= end) 6596a15a370SVlastimil Babka break; 6606a15a370SVlastimil Babka 6616a15a370SVlastimil Babka page = radix_tree_deref_slot(slot); 6626a15a370SVlastimil Babka 6632cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 6642cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 6652cf938aaSMatthew Wilcox continue; 6662cf938aaSMatthew Wilcox } 6676a15a370SVlastimil Babka 6686a15a370SVlastimil Babka if (radix_tree_exceptional_entry(page)) 6696a15a370SVlastimil Babka swapped++; 6706a15a370SVlastimil Babka 6716a15a370SVlastimil Babka if (need_resched()) { 672148deab2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 6736a15a370SVlastimil Babka cond_resched_rcu(); 6746a15a370SVlastimil Babka } 6756a15a370SVlastimil Babka } 6766a15a370SVlastimil Babka 6776a15a370SVlastimil Babka rcu_read_unlock(); 6786a15a370SVlastimil Babka 6796a15a370SVlastimil Babka return swapped << PAGE_SHIFT; 6806a15a370SVlastimil Babka } 6816a15a370SVlastimil Babka 6826a15a370SVlastimil Babka /* 68348131e03SVlastimil Babka * Determine (in bytes) how many of the shmem object's pages mapped by the 68448131e03SVlastimil Babka * given vma is swapped out. 68548131e03SVlastimil Babka * 68648131e03SVlastimil Babka * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, 68748131e03SVlastimil Babka * as long as the inode doesn't go away and racy results are not a problem. 68848131e03SVlastimil Babka */ 68948131e03SVlastimil Babka unsigned long shmem_swap_usage(struct vm_area_struct *vma) 69048131e03SVlastimil Babka { 69148131e03SVlastimil Babka struct inode *inode = file_inode(vma->vm_file); 69248131e03SVlastimil Babka struct shmem_inode_info *info = SHMEM_I(inode); 69348131e03SVlastimil Babka struct address_space *mapping = inode->i_mapping; 69448131e03SVlastimil Babka unsigned long swapped; 69548131e03SVlastimil Babka 69648131e03SVlastimil Babka /* Be careful as we don't hold info->lock */ 69748131e03SVlastimil Babka swapped = READ_ONCE(info->swapped); 69848131e03SVlastimil Babka 69948131e03SVlastimil Babka /* 70048131e03SVlastimil Babka * The easier cases are when the shmem object has nothing in swap, or 70148131e03SVlastimil Babka * the vma maps it whole. Then we can simply use the stats that we 70248131e03SVlastimil Babka * already track. 70348131e03SVlastimil Babka */ 70448131e03SVlastimil Babka if (!swapped) 70548131e03SVlastimil Babka return 0; 70648131e03SVlastimil Babka 70748131e03SVlastimil Babka if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 70848131e03SVlastimil Babka return swapped << PAGE_SHIFT; 70948131e03SVlastimil Babka 71048131e03SVlastimil Babka /* Here comes the more involved part */ 71148131e03SVlastimil Babka return shmem_partial_swap_usage(mapping, 71248131e03SVlastimil Babka linear_page_index(vma, vma->vm_start), 71348131e03SVlastimil Babka linear_page_index(vma, vma->vm_end)); 71448131e03SVlastimil Babka } 71548131e03SVlastimil Babka 71648131e03SVlastimil Babka /* 71724513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 71824513264SHugh Dickins */ 71924513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 72024513264SHugh Dickins { 72124513264SHugh Dickins struct pagevec pvec; 72224513264SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 72324513264SHugh Dickins pgoff_t index = 0; 72424513264SHugh Dickins 72524513264SHugh Dickins pagevec_init(&pvec, 0); 72624513264SHugh Dickins /* 72724513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 72824513264SHugh Dickins */ 72924513264SHugh Dickins while (!mapping_unevictable(mapping)) { 73024513264SHugh Dickins /* 73124513264SHugh Dickins * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 73224513264SHugh Dickins * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 73324513264SHugh Dickins */ 7340cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 73524513264SHugh Dickins PAGEVEC_SIZE, pvec.pages, indices); 73624513264SHugh Dickins if (!pvec.nr) 73724513264SHugh Dickins break; 73824513264SHugh Dickins index = indices[pvec.nr - 1] + 1; 7390cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 74024513264SHugh Dickins check_move_unevictable_pages(pvec.pages, pvec.nr); 74124513264SHugh Dickins pagevec_release(&pvec); 74224513264SHugh Dickins cond_resched(); 74324513264SHugh Dickins } 7447a5d0fbbSHugh Dickins } 7457a5d0fbbSHugh Dickins 7467a5d0fbbSHugh Dickins /* 7477a5d0fbbSHugh Dickins * Remove range of pages and swap entries from radix tree, and free them. 7481635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 7497a5d0fbbSHugh Dickins */ 7501635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 7511635f6a7SHugh Dickins bool unfalloc) 7521da177e4SLinus Torvalds { 753285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 7541da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 75509cbfeafSKirill A. Shutemov pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 75609cbfeafSKirill A. Shutemov pgoff_t end = (lend + 1) >> PAGE_SHIFT; 75709cbfeafSKirill A. Shutemov unsigned int partial_start = lstart & (PAGE_SIZE - 1); 75809cbfeafSKirill A. Shutemov unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); 759bda97eabSHugh Dickins struct pagevec pvec; 7607a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 7617a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 762285b2c4fSHugh Dickins pgoff_t index; 763bda97eabSHugh Dickins int i; 7641da177e4SLinus Torvalds 76583e4fa9cSHugh Dickins if (lend == -1) 76683e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 767bda97eabSHugh Dickins 768bda97eabSHugh Dickins pagevec_init(&pvec, 0); 769bda97eabSHugh Dickins index = start; 77083e4fa9cSHugh Dickins while (index < end) { 7710cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 77283e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 7737a5d0fbbSHugh Dickins pvec.pages, indices); 7747a5d0fbbSHugh Dickins if (!pvec.nr) 7757a5d0fbbSHugh Dickins break; 776bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 777bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 778bda97eabSHugh Dickins 7797a5d0fbbSHugh Dickins index = indices[i]; 78083e4fa9cSHugh Dickins if (index >= end) 781bda97eabSHugh Dickins break; 782bda97eabSHugh Dickins 7837a5d0fbbSHugh Dickins if (radix_tree_exceptional_entry(page)) { 7841635f6a7SHugh Dickins if (unfalloc) 7851635f6a7SHugh Dickins continue; 7867a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 7877a5d0fbbSHugh Dickins index, page); 7887a5d0fbbSHugh Dickins continue; 7897a5d0fbbSHugh Dickins } 7907a5d0fbbSHugh Dickins 791800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); 792800d8c63SKirill A. Shutemov 793bda97eabSHugh Dickins if (!trylock_page(page)) 794bda97eabSHugh Dickins continue; 795800d8c63SKirill A. Shutemov 796800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 797800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 798800d8c63SKirill A. Shutemov clear_highpage(page); 799800d8c63SKirill A. Shutemov unlock_page(page); 800800d8c63SKirill A. Shutemov continue; 801800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 802800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 803800d8c63SKirill A. Shutemov /* 804800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 805800d8c63SKirill A. Shutemov * zero out the page 806800d8c63SKirill A. Shutemov */ 807800d8c63SKirill A. Shutemov clear_highpage(page); 808800d8c63SKirill A. Shutemov unlock_page(page); 809800d8c63SKirill A. Shutemov continue; 810800d8c63SKirill A. Shutemov } 811800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 812800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 813800d8c63SKirill A. Shutemov } 814800d8c63SKirill A. Shutemov 8151635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 816800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 817800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 818309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 819bda97eabSHugh Dickins truncate_inode_page(mapping, page); 8207a5d0fbbSHugh Dickins } 8211635f6a7SHugh Dickins } 822bda97eabSHugh Dickins unlock_page(page); 823bda97eabSHugh Dickins } 8240cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 82524513264SHugh Dickins pagevec_release(&pvec); 826bda97eabSHugh Dickins cond_resched(); 827bda97eabSHugh Dickins index++; 828bda97eabSHugh Dickins } 829bda97eabSHugh Dickins 83083e4fa9cSHugh Dickins if (partial_start) { 831bda97eabSHugh Dickins struct page *page = NULL; 8329e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, start - 1, &page, SGP_READ); 833bda97eabSHugh Dickins if (page) { 83409cbfeafSKirill A. Shutemov unsigned int top = PAGE_SIZE; 83583e4fa9cSHugh Dickins if (start > end) { 83683e4fa9cSHugh Dickins top = partial_end; 83783e4fa9cSHugh Dickins partial_end = 0; 83883e4fa9cSHugh Dickins } 83983e4fa9cSHugh Dickins zero_user_segment(page, partial_start, top); 840bda97eabSHugh Dickins set_page_dirty(page); 841bda97eabSHugh Dickins unlock_page(page); 84209cbfeafSKirill A. Shutemov put_page(page); 843bda97eabSHugh Dickins } 844bda97eabSHugh Dickins } 84583e4fa9cSHugh Dickins if (partial_end) { 84683e4fa9cSHugh Dickins struct page *page = NULL; 8479e18eb29SAndres Lagar-Cavilla shmem_getpage(inode, end, &page, SGP_READ); 84883e4fa9cSHugh Dickins if (page) { 84983e4fa9cSHugh Dickins zero_user_segment(page, 0, partial_end); 85083e4fa9cSHugh Dickins set_page_dirty(page); 85183e4fa9cSHugh Dickins unlock_page(page); 85209cbfeafSKirill A. Shutemov put_page(page); 85383e4fa9cSHugh Dickins } 85483e4fa9cSHugh Dickins } 85583e4fa9cSHugh Dickins if (start >= end) 85683e4fa9cSHugh Dickins return; 857bda97eabSHugh Dickins 858bda97eabSHugh Dickins index = start; 859b1a36650SHugh Dickins while (index < end) { 860bda97eabSHugh Dickins cond_resched(); 8610cd6144aSJohannes Weiner 8620cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 86383e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 8647a5d0fbbSHugh Dickins pvec.pages, indices); 8657a5d0fbbSHugh Dickins if (!pvec.nr) { 866b1a36650SHugh Dickins /* If all gone or hole-punch or unfalloc, we're done */ 867b1a36650SHugh Dickins if (index == start || end != -1) 868bda97eabSHugh Dickins break; 869b1a36650SHugh Dickins /* But if truncating, restart to make sure all gone */ 870bda97eabSHugh Dickins index = start; 871bda97eabSHugh Dickins continue; 872bda97eabSHugh Dickins } 873bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 874bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 875bda97eabSHugh Dickins 8767a5d0fbbSHugh Dickins index = indices[i]; 87783e4fa9cSHugh Dickins if (index >= end) 878bda97eabSHugh Dickins break; 879bda97eabSHugh Dickins 8807a5d0fbbSHugh Dickins if (radix_tree_exceptional_entry(page)) { 8811635f6a7SHugh Dickins if (unfalloc) 8821635f6a7SHugh Dickins continue; 883b1a36650SHugh Dickins if (shmem_free_swap(mapping, index, page)) { 884b1a36650SHugh Dickins /* Swap was replaced by page: retry */ 885b1a36650SHugh Dickins index--; 886b1a36650SHugh Dickins break; 887b1a36650SHugh Dickins } 888b1a36650SHugh Dickins nr_swaps_freed++; 8897a5d0fbbSHugh Dickins continue; 8907a5d0fbbSHugh Dickins } 8917a5d0fbbSHugh Dickins 892bda97eabSHugh Dickins lock_page(page); 893800d8c63SKirill A. Shutemov 894800d8c63SKirill A. Shutemov if (PageTransTail(page)) { 895800d8c63SKirill A. Shutemov /* Middle of THP: zero out the page */ 896800d8c63SKirill A. Shutemov clear_highpage(page); 897800d8c63SKirill A. Shutemov unlock_page(page); 898800d8c63SKirill A. Shutemov /* 899800d8c63SKirill A. Shutemov * Partial thp truncate due 'start' in middle 900800d8c63SKirill A. Shutemov * of THP: don't need to look on these pages 901800d8c63SKirill A. Shutemov * again on !pvec.nr restart. 902800d8c63SKirill A. Shutemov */ 903800d8c63SKirill A. Shutemov if (index != round_down(end, HPAGE_PMD_NR)) 904800d8c63SKirill A. Shutemov start++; 905800d8c63SKirill A. Shutemov continue; 906800d8c63SKirill A. Shutemov } else if (PageTransHuge(page)) { 907800d8c63SKirill A. Shutemov if (index == round_down(end, HPAGE_PMD_NR)) { 908800d8c63SKirill A. Shutemov /* 909800d8c63SKirill A. Shutemov * Range ends in the middle of THP: 910800d8c63SKirill A. Shutemov * zero out the page 911800d8c63SKirill A. Shutemov */ 912800d8c63SKirill A. Shutemov clear_highpage(page); 913800d8c63SKirill A. Shutemov unlock_page(page); 914800d8c63SKirill A. Shutemov continue; 915800d8c63SKirill A. Shutemov } 916800d8c63SKirill A. Shutemov index += HPAGE_PMD_NR - 1; 917800d8c63SKirill A. Shutemov i += HPAGE_PMD_NR - 1; 918800d8c63SKirill A. Shutemov } 919800d8c63SKirill A. Shutemov 9201635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 921800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageTail(page), page); 922800d8c63SKirill A. Shutemov if (page_mapping(page) == mapping) { 923309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 924bda97eabSHugh Dickins truncate_inode_page(mapping, page); 925b1a36650SHugh Dickins } else { 926b1a36650SHugh Dickins /* Page was replaced by swap: retry */ 927b1a36650SHugh Dickins unlock_page(page); 928b1a36650SHugh Dickins index--; 929b1a36650SHugh Dickins break; 9307a5d0fbbSHugh Dickins } 9311635f6a7SHugh Dickins } 932bda97eabSHugh Dickins unlock_page(page); 933bda97eabSHugh Dickins } 9340cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 93524513264SHugh Dickins pagevec_release(&pvec); 936bda97eabSHugh Dickins index++; 937bda97eabSHugh Dickins } 93894c1e62dSHugh Dickins 9394595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 9407a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 9411da177e4SLinus Torvalds shmem_recalc_inode(inode); 9424595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 9431635f6a7SHugh Dickins } 9441da177e4SLinus Torvalds 9451635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 9461635f6a7SHugh Dickins { 9471635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 948078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 9491da177e4SLinus Torvalds } 95094c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 9511da177e4SLinus Torvalds 95244a30220SYu Zhao static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry, 95344a30220SYu Zhao struct kstat *stat) 95444a30220SYu Zhao { 95544a30220SYu Zhao struct inode *inode = dentry->d_inode; 95644a30220SYu Zhao struct shmem_inode_info *info = SHMEM_I(inode); 95744a30220SYu Zhao 958d0424c42SHugh Dickins if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 9594595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 96044a30220SYu Zhao shmem_recalc_inode(inode); 9614595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 962d0424c42SHugh Dickins } 96344a30220SYu Zhao generic_fillattr(inode, stat); 96444a30220SYu Zhao return 0; 96544a30220SYu Zhao } 96644a30220SYu Zhao 96794c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 9681da177e4SLinus Torvalds { 96975c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 97040e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 971779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 9721da177e4SLinus Torvalds int error; 9731da177e4SLinus Torvalds 97431051c85SJan Kara error = setattr_prepare(dentry, attr); 975db78b877SChristoph Hellwig if (error) 976db78b877SChristoph Hellwig return error; 977db78b877SChristoph Hellwig 97894c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 97994c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 98094c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 9813889e6e7Snpiggin@suse.de 98240e041a2SDavid Herrmann /* protected by i_mutex */ 98340e041a2SDavid Herrmann if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 98440e041a2SDavid Herrmann (newsize > oldsize && (info->seals & F_SEAL_GROW))) 98540e041a2SDavid Herrmann return -EPERM; 98640e041a2SDavid Herrmann 98794c1e62dSHugh Dickins if (newsize != oldsize) { 98877142517SKonstantin Khlebnikov error = shmem_reacct_size(SHMEM_I(inode)->flags, 98977142517SKonstantin Khlebnikov oldsize, newsize); 99077142517SKonstantin Khlebnikov if (error) 99177142517SKonstantin Khlebnikov return error; 99294c1e62dSHugh Dickins i_size_write(inode, newsize); 993078cd827SDeepa Dinamani inode->i_ctime = inode->i_mtime = current_time(inode); 99494c1e62dSHugh Dickins } 995afa2db2fSJosef Bacik if (newsize <= oldsize) { 99694c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 997d0424c42SHugh Dickins if (oldsize > holebegin) 998d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 999d0424c42SHugh Dickins holebegin, 0, 1); 1000d0424c42SHugh Dickins if (info->alloced) 1001d0424c42SHugh Dickins shmem_truncate_range(inode, 1002d0424c42SHugh Dickins newsize, (loff_t)-1); 100394c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 1004d0424c42SHugh Dickins if (oldsize > holebegin) 1005d0424c42SHugh Dickins unmap_mapping_range(inode->i_mapping, 1006d0424c42SHugh Dickins holebegin, 0, 1); 1007779750d2SKirill A. Shutemov 1008779750d2SKirill A. Shutemov /* 1009779750d2SKirill A. Shutemov * Part of the huge page can be beyond i_size: subject 1010779750d2SKirill A. Shutemov * to shrink under memory pressure. 1011779750d2SKirill A. Shutemov */ 1012779750d2SKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1013779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1014779750d2SKirill A. Shutemov if (list_empty(&info->shrinklist)) { 1015779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1016779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1017779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1018779750d2SKirill A. Shutemov } 1019779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1020779750d2SKirill A. Shutemov } 102194c1e62dSHugh Dickins } 10221da177e4SLinus Torvalds } 10231da177e4SLinus Torvalds 10246a1a90adSChristoph Hellwig setattr_copy(inode, attr); 1025db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 1026feda821eSChristoph Hellwig error = posix_acl_chmod(inode, inode->i_mode); 10271da177e4SLinus Torvalds return error; 10281da177e4SLinus Torvalds } 10291da177e4SLinus Torvalds 10301f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 10311da177e4SLinus Torvalds { 10321da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 1033779750d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 10341da177e4SLinus Torvalds 10353889e6e7Snpiggin@suse.de if (inode->i_mapping->a_ops == &shmem_aops) { 10361da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 10371da177e4SLinus Torvalds inode->i_size = 0; 10383889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 1039779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1040779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1041779750d2SKirill A. Shutemov if (!list_empty(&info->shrinklist)) { 1042779750d2SKirill A. Shutemov list_del_init(&info->shrinklist); 1043779750d2SKirill A. Shutemov sbinfo->shrinklist_len--; 1044779750d2SKirill A. Shutemov } 1045779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1046779750d2SKirill A. Shutemov } 10471da177e4SLinus Torvalds if (!list_empty(&info->swaplist)) { 1048cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 10491da177e4SLinus Torvalds list_del_init(&info->swaplist); 1050cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 10511da177e4SLinus Torvalds } 10523ed47db3SAl Viro } 1053b09e0fa4SEric Paris 105438f38657SAristeu Rozanski simple_xattrs_free(&info->xattrs); 10550f3c42f5SHugh Dickins WARN_ON(inode->i_blocks); 10565b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 1057dbd5768fSJan Kara clear_inode(inode); 10581da177e4SLinus Torvalds } 10591da177e4SLinus Torvalds 1060478922e2SMatthew Wilcox static unsigned long find_swap_entry(struct radix_tree_root *root, void *item) 1061478922e2SMatthew Wilcox { 1062478922e2SMatthew Wilcox struct radix_tree_iter iter; 1063478922e2SMatthew Wilcox void **slot; 1064478922e2SMatthew Wilcox unsigned long found = -1; 1065478922e2SMatthew Wilcox unsigned int checked = 0; 1066478922e2SMatthew Wilcox 1067478922e2SMatthew Wilcox rcu_read_lock(); 1068478922e2SMatthew Wilcox radix_tree_for_each_slot(slot, root, &iter, 0) { 1069478922e2SMatthew Wilcox if (*slot == item) { 1070478922e2SMatthew Wilcox found = iter.index; 1071478922e2SMatthew Wilcox break; 1072478922e2SMatthew Wilcox } 1073478922e2SMatthew Wilcox checked++; 1074478922e2SMatthew Wilcox if ((checked % 4096) != 0) 1075478922e2SMatthew Wilcox continue; 1076478922e2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 1077478922e2SMatthew Wilcox cond_resched_rcu(); 1078478922e2SMatthew Wilcox } 1079478922e2SMatthew Wilcox 1080478922e2SMatthew Wilcox rcu_read_unlock(); 1081478922e2SMatthew Wilcox return found; 1082478922e2SMatthew Wilcox } 1083478922e2SMatthew Wilcox 108446f65ec1SHugh Dickins /* 108546f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 108646f65ec1SHugh Dickins */ 108741ffe5d5SHugh Dickins static int shmem_unuse_inode(struct shmem_inode_info *info, 1088bde05d1cSHugh Dickins swp_entry_t swap, struct page **pagep) 10891da177e4SLinus Torvalds { 1090285b2c4fSHugh Dickins struct address_space *mapping = info->vfs_inode.i_mapping; 109146f65ec1SHugh Dickins void *radswap; 109241ffe5d5SHugh Dickins pgoff_t index; 1093bde05d1cSHugh Dickins gfp_t gfp; 1094bde05d1cSHugh Dickins int error = 0; 10951da177e4SLinus Torvalds 109646f65ec1SHugh Dickins radswap = swp_to_radix_entry(swap); 1097478922e2SMatthew Wilcox index = find_swap_entry(&mapping->page_tree, radswap); 109846f65ec1SHugh Dickins if (index == -1) 109900501b53SJohannes Weiner return -EAGAIN; /* tell shmem_unuse we found nothing */ 11002e0e26c7SHugh Dickins 11011b1b32f2SHugh Dickins /* 11021b1b32f2SHugh Dickins * Move _head_ to start search for next from here. 11031f895f75SAl Viro * But be careful: shmem_evict_inode checks list_empty without taking 11041b1b32f2SHugh Dickins * mutex, and there's an instant in list_move_tail when info->swaplist 1105285b2c4fSHugh Dickins * would appear empty, if it were the only one on shmem_swaplist. 11061b1b32f2SHugh Dickins */ 11071b1b32f2SHugh Dickins if (shmem_swaplist.next != &info->swaplist) 11082e0e26c7SHugh Dickins list_move_tail(&shmem_swaplist, &info->swaplist); 11092e0e26c7SHugh Dickins 1110bde05d1cSHugh Dickins gfp = mapping_gfp_mask(mapping); 1111bde05d1cSHugh Dickins if (shmem_should_replace_page(*pagep, gfp)) { 1112bde05d1cSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1113bde05d1cSHugh Dickins error = shmem_replace_page(pagep, gfp, info, index); 1114bde05d1cSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 1115bde05d1cSHugh Dickins /* 1116bde05d1cSHugh Dickins * We needed to drop mutex to make that restrictive page 11170142ef6cSHugh Dickins * allocation, but the inode might have been freed while we 11180142ef6cSHugh Dickins * dropped it: although a racing shmem_evict_inode() cannot 11190142ef6cSHugh Dickins * complete without emptying the radix_tree, our page lock 11200142ef6cSHugh Dickins * on this swapcache page is not enough to prevent that - 11210142ef6cSHugh Dickins * free_swap_and_cache() of our swap entry will only 11220142ef6cSHugh Dickins * trylock_page(), removing swap from radix_tree whatever. 11230142ef6cSHugh Dickins * 11240142ef6cSHugh Dickins * We must not proceed to shmem_add_to_page_cache() if the 11250142ef6cSHugh Dickins * inode has been freed, but of course we cannot rely on 11260142ef6cSHugh Dickins * inode or mapping or info to check that. However, we can 11270142ef6cSHugh Dickins * safely check if our swap entry is still in use (and here 11280142ef6cSHugh Dickins * it can't have got reused for another page): if it's still 11290142ef6cSHugh Dickins * in use, then the inode cannot have been freed yet, and we 11300142ef6cSHugh Dickins * can safely proceed (if it's no longer in use, that tells 11310142ef6cSHugh Dickins * nothing about the inode, but we don't need to unuse swap). 1132bde05d1cSHugh Dickins */ 1133bde05d1cSHugh Dickins if (!page_swapcount(*pagep)) 1134bde05d1cSHugh Dickins error = -ENOENT; 1135bde05d1cSHugh Dickins } 1136bde05d1cSHugh Dickins 1137d13d1443SKAMEZAWA Hiroyuki /* 1138778dd893SHugh Dickins * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 1139778dd893SHugh Dickins * but also to hold up shmem_evict_inode(): so inode cannot be freed 1140778dd893SHugh Dickins * beneath us (pagelock doesn't help until the page is in pagecache). 1141d13d1443SKAMEZAWA Hiroyuki */ 1142bde05d1cSHugh Dickins if (!error) 1143bde05d1cSHugh Dickins error = shmem_add_to_page_cache(*pagep, mapping, index, 1144fed400a1SWang Sheng-Hui radswap); 114548f170fbSHugh Dickins if (error != -ENOMEM) { 114646f65ec1SHugh Dickins /* 114746f65ec1SHugh Dickins * Truncation and eviction use free_swap_and_cache(), which 114846f65ec1SHugh Dickins * only does trylock page: if we raced, best clean up here. 114946f65ec1SHugh Dickins */ 1150bde05d1cSHugh Dickins delete_from_swap_cache(*pagep); 1151bde05d1cSHugh Dickins set_page_dirty(*pagep); 115246f65ec1SHugh Dickins if (!error) { 11534595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1154285b2c4fSHugh Dickins info->swapped--; 11554595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 115641ffe5d5SHugh Dickins swap_free(swap); 115746f65ec1SHugh Dickins } 11581da177e4SLinus Torvalds } 11592e0e26c7SHugh Dickins return error; 11601da177e4SLinus Torvalds } 11611da177e4SLinus Torvalds 11621da177e4SLinus Torvalds /* 116346f65ec1SHugh Dickins * Search through swapped inodes to find and replace swap by page. 11641da177e4SLinus Torvalds */ 116541ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 11661da177e4SLinus Torvalds { 116741ffe5d5SHugh Dickins struct list_head *this, *next; 11681da177e4SLinus Torvalds struct shmem_inode_info *info; 116900501b53SJohannes Weiner struct mem_cgroup *memcg; 1170bde05d1cSHugh Dickins int error = 0; 1171bde05d1cSHugh Dickins 1172bde05d1cSHugh Dickins /* 1173bde05d1cSHugh Dickins * There's a faint possibility that swap page was replaced before 11740142ef6cSHugh Dickins * caller locked it: caller will come back later with the right page. 1175bde05d1cSHugh Dickins */ 11760142ef6cSHugh Dickins if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 1177bde05d1cSHugh Dickins goto out; 1178778dd893SHugh Dickins 1179778dd893SHugh Dickins /* 1180778dd893SHugh Dickins * Charge page using GFP_KERNEL while we can wait, before taking 1181778dd893SHugh Dickins * the shmem_swaplist_mutex which might hold up shmem_writepage(). 1182778dd893SHugh Dickins * Charged back to the user (not to caller) when swap account is used. 1183778dd893SHugh Dickins */ 1184f627c2f5SKirill A. Shutemov error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg, 1185f627c2f5SKirill A. Shutemov false); 1186778dd893SHugh Dickins if (error) 1187778dd893SHugh Dickins goto out; 118846f65ec1SHugh Dickins /* No radix_tree_preload: swap entry keeps a place for page in tree */ 118900501b53SJohannes Weiner error = -EAGAIN; 11901da177e4SLinus Torvalds 1191cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 119241ffe5d5SHugh Dickins list_for_each_safe(this, next, &shmem_swaplist) { 119341ffe5d5SHugh Dickins info = list_entry(this, struct shmem_inode_info, swaplist); 1194285b2c4fSHugh Dickins if (info->swapped) 119500501b53SJohannes Weiner error = shmem_unuse_inode(info, swap, &page); 11966922c0c7SHugh Dickins else 11976922c0c7SHugh Dickins list_del_init(&info->swaplist); 1198cb5f7b9aSHugh Dickins cond_resched(); 119900501b53SJohannes Weiner if (error != -EAGAIN) 1200778dd893SHugh Dickins break; 120100501b53SJohannes Weiner /* found nothing in this: move on to search the next */ 12021da177e4SLinus Torvalds } 1203cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1204778dd893SHugh Dickins 120500501b53SJohannes Weiner if (error) { 120600501b53SJohannes Weiner if (error != -ENOMEM) 120700501b53SJohannes Weiner error = 0; 1208f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 120900501b53SJohannes Weiner } else 1210f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 1211778dd893SHugh Dickins out: 1212aaa46865SHugh Dickins unlock_page(page); 121309cbfeafSKirill A. Shutemov put_page(page); 1214778dd893SHugh Dickins return error; 12151da177e4SLinus Torvalds } 12161da177e4SLinus Torvalds 12171da177e4SLinus Torvalds /* 12181da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 12191da177e4SLinus Torvalds */ 12201da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 12211da177e4SLinus Torvalds { 12221da177e4SLinus Torvalds struct shmem_inode_info *info; 12231da177e4SLinus Torvalds struct address_space *mapping; 12241da177e4SLinus Torvalds struct inode *inode; 12256922c0c7SHugh Dickins swp_entry_t swap; 12266922c0c7SHugh Dickins pgoff_t index; 12271da177e4SLinus Torvalds 1228800d8c63SKirill A. Shutemov VM_BUG_ON_PAGE(PageCompound(page), page); 12291da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 12301da177e4SLinus Torvalds mapping = page->mapping; 12311da177e4SLinus Torvalds index = page->index; 12321da177e4SLinus Torvalds inode = mapping->host; 12331da177e4SLinus Torvalds info = SHMEM_I(inode); 12341da177e4SLinus Torvalds if (info->flags & VM_LOCKED) 12351da177e4SLinus Torvalds goto redirty; 1236d9fe526aSHugh Dickins if (!total_swap_pages) 12371da177e4SLinus Torvalds goto redirty; 12381da177e4SLinus Torvalds 1239d9fe526aSHugh Dickins /* 124097b713baSChristoph Hellwig * Our capabilities prevent regular writeback or sync from ever calling 124197b713baSChristoph Hellwig * shmem_writepage; but a stacking filesystem might use ->writepage of 124297b713baSChristoph Hellwig * its underlying filesystem, in which case tmpfs should write out to 124397b713baSChristoph Hellwig * swap only in response to memory pressure, and not for the writeback 124497b713baSChristoph Hellwig * threads or sync. 1245d9fe526aSHugh Dickins */ 124648f170fbSHugh Dickins if (!wbc->for_reclaim) { 124748f170fbSHugh Dickins WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 124848f170fbSHugh Dickins goto redirty; 124948f170fbSHugh Dickins } 12501635f6a7SHugh Dickins 12511635f6a7SHugh Dickins /* 12521635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 12531635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 12541635f6a7SHugh Dickins * fallocated page arriving here is now to initialize it and write it. 12551aac1400SHugh Dickins * 12561aac1400SHugh Dickins * That's okay for a page already fallocated earlier, but if we have 12571aac1400SHugh Dickins * not yet completed the fallocation, then (a) we want to keep track 12581aac1400SHugh Dickins * of this page in case we have to undo it, and (b) it may not be a 12591aac1400SHugh Dickins * good idea to continue anyway, once we're pushing into swap. So 12601aac1400SHugh Dickins * reactivate the page, and let shmem_fallocate() quit when too many. 12611635f6a7SHugh Dickins */ 12621635f6a7SHugh Dickins if (!PageUptodate(page)) { 12631aac1400SHugh Dickins if (inode->i_private) { 12641aac1400SHugh Dickins struct shmem_falloc *shmem_falloc; 12651aac1400SHugh Dickins spin_lock(&inode->i_lock); 12661aac1400SHugh Dickins shmem_falloc = inode->i_private; 12671aac1400SHugh Dickins if (shmem_falloc && 12688e205f77SHugh Dickins !shmem_falloc->waitq && 12691aac1400SHugh Dickins index >= shmem_falloc->start && 12701aac1400SHugh Dickins index < shmem_falloc->next) 12711aac1400SHugh Dickins shmem_falloc->nr_unswapped++; 12721aac1400SHugh Dickins else 12731aac1400SHugh Dickins shmem_falloc = NULL; 12741aac1400SHugh Dickins spin_unlock(&inode->i_lock); 12751aac1400SHugh Dickins if (shmem_falloc) 12761aac1400SHugh Dickins goto redirty; 12771aac1400SHugh Dickins } 12781635f6a7SHugh Dickins clear_highpage(page); 12791635f6a7SHugh Dickins flush_dcache_page(page); 12801635f6a7SHugh Dickins SetPageUptodate(page); 12811635f6a7SHugh Dickins } 12821635f6a7SHugh Dickins 1283d9fe526aSHugh Dickins swap = get_swap_page(); 128448f170fbSHugh Dickins if (!swap.val) 128548f170fbSHugh Dickins goto redirty; 1286d9fe526aSHugh Dickins 128737e84351SVladimir Davydov if (mem_cgroup_try_charge_swap(page, swap)) 128837e84351SVladimir Davydov goto free_swap; 128937e84351SVladimir Davydov 1290b1dea800SHugh Dickins /* 1291b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 12926922c0c7SHugh Dickins * if it's not already there. Do it now before the page is 12936922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 1294b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 12956922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 12966922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 1297b1dea800SHugh Dickins */ 1298b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 129905bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 130005bf86b4SHugh Dickins list_add_tail(&info->swaplist, &shmem_swaplist); 1301b1dea800SHugh Dickins 130248f170fbSHugh Dickins if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 13034595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1304267a4c76SHugh Dickins shmem_recalc_inode(inode); 1305267a4c76SHugh Dickins info->swapped++; 13064595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1307267a4c76SHugh Dickins 1308aaa46865SHugh Dickins swap_shmem_alloc(swap); 13096922c0c7SHugh Dickins shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 13106922c0c7SHugh Dickins 13116922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 1312d9fe526aSHugh Dickins BUG_ON(page_mapped(page)); 13139fab5619SHugh Dickins swap_writepage(page, wbc); 13141da177e4SLinus Torvalds return 0; 13151da177e4SLinus Torvalds } 13161da177e4SLinus Torvalds 13176922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 131837e84351SVladimir Davydov free_swap: 13190a31bc97SJohannes Weiner swapcache_free(swap); 13201da177e4SLinus Torvalds redirty: 13211da177e4SLinus Torvalds set_page_dirty(page); 1322d9fe526aSHugh Dickins if (wbc->for_reclaim) 1323d9fe526aSHugh Dickins return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1324d9fe526aSHugh Dickins unlock_page(page); 1325d9fe526aSHugh Dickins return 0; 13261da177e4SLinus Torvalds } 13271da177e4SLinus Torvalds 132875edd345SHugh Dickins #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 132971fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1330680d794bSakpm@linux-foundation.org { 1331680d794bSakpm@linux-foundation.org char buffer[64]; 1332680d794bSakpm@linux-foundation.org 133371fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 1334095f1fc4SLee Schermerhorn return; /* show nothing */ 1335095f1fc4SLee Schermerhorn 1336a7a88b23SHugh Dickins mpol_to_str(buffer, sizeof(buffer), mpol); 1337095f1fc4SLee Schermerhorn 1338095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 1339680d794bSakpm@linux-foundation.org } 134071fe804bSLee Schermerhorn 134171fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 134271fe804bSLee Schermerhorn { 134371fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 134471fe804bSLee Schermerhorn if (sbinfo->mpol) { 134571fe804bSLee Schermerhorn spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 134671fe804bSLee Schermerhorn mpol = sbinfo->mpol; 134771fe804bSLee Schermerhorn mpol_get(mpol); 134871fe804bSLee Schermerhorn spin_unlock(&sbinfo->stat_lock); 134971fe804bSLee Schermerhorn } 135071fe804bSLee Schermerhorn return mpol; 135171fe804bSLee Schermerhorn } 135275edd345SHugh Dickins #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 135375edd345SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 135475edd345SHugh Dickins { 135575edd345SHugh Dickins } 135675edd345SHugh Dickins static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 135775edd345SHugh Dickins { 135875edd345SHugh Dickins return NULL; 135975edd345SHugh Dickins } 136075edd345SHugh Dickins #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 136175edd345SHugh Dickins #ifndef CONFIG_NUMA 136275edd345SHugh Dickins #define vm_policy vm_private_data 136375edd345SHugh Dickins #endif 1364680d794bSakpm@linux-foundation.org 1365800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1366800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1367800d8c63SKirill A. Shutemov { 1368800d8c63SKirill A. Shutemov /* Create a pseudo vma that just contains the policy */ 1369800d8c63SKirill A. Shutemov vma->vm_start = 0; 1370800d8c63SKirill A. Shutemov /* Bias interleave by inode number to distribute better across nodes */ 1371800d8c63SKirill A. Shutemov vma->vm_pgoff = index + info->vfs_inode.i_ino; 1372800d8c63SKirill A. Shutemov vma->vm_ops = NULL; 1373800d8c63SKirill A. Shutemov vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1374800d8c63SKirill A. Shutemov } 1375800d8c63SKirill A. Shutemov 1376800d8c63SKirill A. Shutemov static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1377800d8c63SKirill A. Shutemov { 1378800d8c63SKirill A. Shutemov /* Drop reference taken by mpol_shared_policy_lookup() */ 1379800d8c63SKirill A. Shutemov mpol_cond_put(vma->vm_policy); 1380800d8c63SKirill A. Shutemov } 1381800d8c63SKirill A. Shutemov 138241ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 138341ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 13841da177e4SLinus Torvalds { 13851da177e4SLinus Torvalds struct vm_area_struct pvma; 138618a2f371SMel Gorman struct page *page; 13871da177e4SLinus Torvalds 1388800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 138918a2f371SMel Gorman page = swapin_readahead(swap, gfp, &pvma, 0); 1390800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 139118a2f371SMel Gorman 1392800d8c63SKirill A. Shutemov return page; 1393800d8c63SKirill A. Shutemov } 139418a2f371SMel Gorman 1395800d8c63SKirill A. Shutemov static struct page *shmem_alloc_hugepage(gfp_t gfp, 1396800d8c63SKirill A. Shutemov struct shmem_inode_info *info, pgoff_t index) 1397800d8c63SKirill A. Shutemov { 1398800d8c63SKirill A. Shutemov struct vm_area_struct pvma; 1399800d8c63SKirill A. Shutemov struct inode *inode = &info->vfs_inode; 1400800d8c63SKirill A. Shutemov struct address_space *mapping = inode->i_mapping; 14014620a06eSGeert Uytterhoeven pgoff_t idx, hindex; 1402800d8c63SKirill A. Shutemov void __rcu **results; 1403800d8c63SKirill A. Shutemov struct page *page; 1404800d8c63SKirill A. Shutemov 1405e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1406800d8c63SKirill A. Shutemov return NULL; 1407800d8c63SKirill A. Shutemov 14084620a06eSGeert Uytterhoeven hindex = round_down(index, HPAGE_PMD_NR); 1409800d8c63SKirill A. Shutemov rcu_read_lock(); 1410800d8c63SKirill A. Shutemov if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx, 1411800d8c63SKirill A. Shutemov hindex, 1) && idx < hindex + HPAGE_PMD_NR) { 1412800d8c63SKirill A. Shutemov rcu_read_unlock(); 1413800d8c63SKirill A. Shutemov return NULL; 1414800d8c63SKirill A. Shutemov } 1415800d8c63SKirill A. Shutemov rcu_read_unlock(); 1416800d8c63SKirill A. Shutemov 1417800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, hindex); 1418800d8c63SKirill A. Shutemov page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1419800d8c63SKirill A. Shutemov HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1420800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 1421800d8c63SKirill A. Shutemov if (page) 1422800d8c63SKirill A. Shutemov prep_transhuge_page(page); 142318a2f371SMel Gorman return page; 142418a2f371SMel Gorman } 142518a2f371SMel Gorman 142618a2f371SMel Gorman static struct page *shmem_alloc_page(gfp_t gfp, 142718a2f371SMel Gorman struct shmem_inode_info *info, pgoff_t index) 142818a2f371SMel Gorman { 142918a2f371SMel Gorman struct vm_area_struct pvma; 143018a2f371SMel Gorman struct page *page; 143118a2f371SMel Gorman 1432800d8c63SKirill A. Shutemov shmem_pseudo_vma_init(&pvma, info, index); 1433800d8c63SKirill A. Shutemov page = alloc_page_vma(gfp, &pvma, 0); 1434800d8c63SKirill A. Shutemov shmem_pseudo_vma_destroy(&pvma); 143518a2f371SMel Gorman 1436800d8c63SKirill A. Shutemov return page; 1437800d8c63SKirill A. Shutemov } 1438800d8c63SKirill A. Shutemov 1439800d8c63SKirill A. Shutemov static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 1440800d8c63SKirill A. Shutemov struct shmem_inode_info *info, struct shmem_sb_info *sbinfo, 1441800d8c63SKirill A. Shutemov pgoff_t index, bool huge) 1442800d8c63SKirill A. Shutemov { 1443800d8c63SKirill A. Shutemov struct page *page; 1444800d8c63SKirill A. Shutemov int nr; 1445800d8c63SKirill A. Shutemov int err = -ENOSPC; 1446800d8c63SKirill A. Shutemov 1447e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1448800d8c63SKirill A. Shutemov huge = false; 1449800d8c63SKirill A. Shutemov nr = huge ? HPAGE_PMD_NR : 1; 1450800d8c63SKirill A. Shutemov 1451800d8c63SKirill A. Shutemov if (shmem_acct_block(info->flags, nr)) 1452800d8c63SKirill A. Shutemov goto failed; 1453800d8c63SKirill A. Shutemov if (sbinfo->max_blocks) { 1454800d8c63SKirill A. Shutemov if (percpu_counter_compare(&sbinfo->used_blocks, 1455800d8c63SKirill A. Shutemov sbinfo->max_blocks - nr) > 0) 1456800d8c63SKirill A. Shutemov goto unacct; 1457800d8c63SKirill A. Shutemov percpu_counter_add(&sbinfo->used_blocks, nr); 1458800d8c63SKirill A. Shutemov } 1459800d8c63SKirill A. Shutemov 1460800d8c63SKirill A. Shutemov if (huge) 1461800d8c63SKirill A. Shutemov page = shmem_alloc_hugepage(gfp, info, index); 1462800d8c63SKirill A. Shutemov else 1463800d8c63SKirill A. Shutemov page = shmem_alloc_page(gfp, info, index); 146475edd345SHugh Dickins if (page) { 146575edd345SHugh Dickins __SetPageLocked(page); 146675edd345SHugh Dickins __SetPageSwapBacked(page); 1467800d8c63SKirill A. Shutemov return page; 146875edd345SHugh Dickins } 146918a2f371SMel Gorman 1470800d8c63SKirill A. Shutemov err = -ENOMEM; 1471800d8c63SKirill A. Shutemov if (sbinfo->max_blocks) 1472800d8c63SKirill A. Shutemov percpu_counter_add(&sbinfo->used_blocks, -nr); 1473800d8c63SKirill A. Shutemov unacct: 1474800d8c63SKirill A. Shutemov shmem_unacct_blocks(info->flags, nr); 1475800d8c63SKirill A. Shutemov failed: 1476800d8c63SKirill A. Shutemov return ERR_PTR(err); 14771da177e4SLinus Torvalds } 147871fe804bSLee Schermerhorn 14791da177e4SLinus Torvalds /* 1480bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 1481bde05d1cSHugh Dickins * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1482bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 1483bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 1484bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1485bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 1486bde05d1cSHugh Dickins * 1487bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 1488bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1489bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 1490bde05d1cSHugh Dickins */ 1491bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1492bde05d1cSHugh Dickins { 1493bde05d1cSHugh Dickins return page_zonenum(page) > gfp_zone(gfp); 1494bde05d1cSHugh Dickins } 1495bde05d1cSHugh Dickins 1496bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1497bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 1498bde05d1cSHugh Dickins { 1499bde05d1cSHugh Dickins struct page *oldpage, *newpage; 1500bde05d1cSHugh Dickins struct address_space *swap_mapping; 1501bde05d1cSHugh Dickins pgoff_t swap_index; 1502bde05d1cSHugh Dickins int error; 1503bde05d1cSHugh Dickins 1504bde05d1cSHugh Dickins oldpage = *pagep; 1505bde05d1cSHugh Dickins swap_index = page_private(oldpage); 1506bde05d1cSHugh Dickins swap_mapping = page_mapping(oldpage); 1507bde05d1cSHugh Dickins 1508bde05d1cSHugh Dickins /* 1509bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 1510bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 1511bde05d1cSHugh Dickins */ 1512bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 1513bde05d1cSHugh Dickins newpage = shmem_alloc_page(gfp, info, index); 1514bde05d1cSHugh Dickins if (!newpage) 1515bde05d1cSHugh Dickins return -ENOMEM; 1516bde05d1cSHugh Dickins 151709cbfeafSKirill A. Shutemov get_page(newpage); 1518bde05d1cSHugh Dickins copy_highpage(newpage, oldpage); 15190142ef6cSHugh Dickins flush_dcache_page(newpage); 1520bde05d1cSHugh Dickins 15219956edf3SHugh Dickins __SetPageLocked(newpage); 15229956edf3SHugh Dickins __SetPageSwapBacked(newpage); 1523bde05d1cSHugh Dickins SetPageUptodate(newpage); 1524bde05d1cSHugh Dickins set_page_private(newpage, swap_index); 1525bde05d1cSHugh Dickins SetPageSwapCache(newpage); 1526bde05d1cSHugh Dickins 1527bde05d1cSHugh Dickins /* 1528bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 1529bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 1530bde05d1cSHugh Dickins */ 1531bde05d1cSHugh Dickins spin_lock_irq(&swap_mapping->tree_lock); 1532bde05d1cSHugh Dickins error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1533bde05d1cSHugh Dickins newpage); 15340142ef6cSHugh Dickins if (!error) { 153511fb9989SMel Gorman __inc_node_page_state(newpage, NR_FILE_PAGES); 153611fb9989SMel Gorman __dec_node_page_state(oldpage, NR_FILE_PAGES); 15370142ef6cSHugh Dickins } 1538bde05d1cSHugh Dickins spin_unlock_irq(&swap_mapping->tree_lock); 1539bde05d1cSHugh Dickins 15400142ef6cSHugh Dickins if (unlikely(error)) { 15410142ef6cSHugh Dickins /* 15420142ef6cSHugh Dickins * Is this possible? I think not, now that our callers check 15430142ef6cSHugh Dickins * both PageSwapCache and page_private after getting page lock; 15440142ef6cSHugh Dickins * but be defensive. Reverse old to newpage for clear and free. 15450142ef6cSHugh Dickins */ 15460142ef6cSHugh Dickins oldpage = newpage; 15470142ef6cSHugh Dickins } else { 15486a93ca8fSJohannes Weiner mem_cgroup_migrate(oldpage, newpage); 1549bde05d1cSHugh Dickins lru_cache_add_anon(newpage); 15500142ef6cSHugh Dickins *pagep = newpage; 15510142ef6cSHugh Dickins } 1552bde05d1cSHugh Dickins 1553bde05d1cSHugh Dickins ClearPageSwapCache(oldpage); 1554bde05d1cSHugh Dickins set_page_private(oldpage, 0); 1555bde05d1cSHugh Dickins 1556bde05d1cSHugh Dickins unlock_page(oldpage); 155709cbfeafSKirill A. Shutemov put_page(oldpage); 155809cbfeafSKirill A. Shutemov put_page(oldpage); 15590142ef6cSHugh Dickins return error; 1560bde05d1cSHugh Dickins } 1561bde05d1cSHugh Dickins 1562bde05d1cSHugh Dickins /* 156368da9f05SHugh Dickins * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 15641da177e4SLinus Torvalds * 15651da177e4SLinus Torvalds * If we allocate a new one we do not mark it dirty. That's up to the 15661da177e4SLinus Torvalds * vm. If we swap it in we mark it dirty since we also free the swap 15679e18eb29SAndres Lagar-Cavilla * entry since a page cannot live in both the swap and page cache. 15689e18eb29SAndres Lagar-Cavilla * 15699e18eb29SAndres Lagar-Cavilla * fault_mm and fault_type are only supplied by shmem_fault: 15709e18eb29SAndres Lagar-Cavilla * otherwise they are NULL. 15711da177e4SLinus Torvalds */ 157241ffe5d5SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 15739e18eb29SAndres Lagar-Cavilla struct page **pagep, enum sgp_type sgp, gfp_t gfp, 15749e18eb29SAndres Lagar-Cavilla struct mm_struct *fault_mm, int *fault_type) 15751da177e4SLinus Torvalds { 15761da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 157723f919d4SArnd Bergmann struct shmem_inode_info *info = SHMEM_I(inode); 15781da177e4SLinus Torvalds struct shmem_sb_info *sbinfo; 15799e18eb29SAndres Lagar-Cavilla struct mm_struct *charge_mm; 158000501b53SJohannes Weiner struct mem_cgroup *memcg; 158127ab7006SHugh Dickins struct page *page; 15821da177e4SLinus Torvalds swp_entry_t swap; 1583657e3038SKirill A. Shutemov enum sgp_type sgp_huge = sgp; 1584800d8c63SKirill A. Shutemov pgoff_t hindex = index; 15851da177e4SLinus Torvalds int error; 158654af6042SHugh Dickins int once = 0; 15871635f6a7SHugh Dickins int alloced = 0; 15881da177e4SLinus Torvalds 158909cbfeafSKirill A. Shutemov if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 15901da177e4SLinus Torvalds return -EFBIG; 1591657e3038SKirill A. Shutemov if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) 1592657e3038SKirill A. Shutemov sgp = SGP_CACHE; 15931da177e4SLinus Torvalds repeat: 159454af6042SHugh Dickins swap.val = 0; 15950cd6144aSJohannes Weiner page = find_lock_entry(mapping, index); 159654af6042SHugh Dickins if (radix_tree_exceptional_entry(page)) { 159754af6042SHugh Dickins swap = radix_to_swp_entry(page); 159854af6042SHugh Dickins page = NULL; 159954af6042SHugh Dickins } 160054af6042SHugh Dickins 160175edd345SHugh Dickins if (sgp <= SGP_CACHE && 160209cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 160354af6042SHugh Dickins error = -EINVAL; 1604267a4c76SHugh Dickins goto unlock; 160554af6042SHugh Dickins } 160654af6042SHugh Dickins 160766d2f4d2SHugh Dickins if (page && sgp == SGP_WRITE) 160866d2f4d2SHugh Dickins mark_page_accessed(page); 160966d2f4d2SHugh Dickins 16101635f6a7SHugh Dickins /* fallocated page? */ 16111635f6a7SHugh Dickins if (page && !PageUptodate(page)) { 16121635f6a7SHugh Dickins if (sgp != SGP_READ) 16131635f6a7SHugh Dickins goto clear; 16141635f6a7SHugh Dickins unlock_page(page); 161509cbfeafSKirill A. Shutemov put_page(page); 16161635f6a7SHugh Dickins page = NULL; 16171635f6a7SHugh Dickins } 161854af6042SHugh Dickins if (page || (sgp == SGP_READ && !swap.val)) { 161954af6042SHugh Dickins *pagep = page; 162054af6042SHugh Dickins return 0; 162127ab7006SHugh Dickins } 162227ab7006SHugh Dickins 1623b409f9fcSHugh Dickins /* 162454af6042SHugh Dickins * Fast cache lookup did not find it: 162554af6042SHugh Dickins * bring it back from swap or allocate. 1626b409f9fcSHugh Dickins */ 162754af6042SHugh Dickins sbinfo = SHMEM_SB(inode->i_sb); 16289e18eb29SAndres Lagar-Cavilla charge_mm = fault_mm ? : current->mm; 162927ab7006SHugh Dickins 16301da177e4SLinus Torvalds if (swap.val) { 16311da177e4SLinus Torvalds /* Look it up and read it in.. */ 163227ab7006SHugh Dickins page = lookup_swap_cache(swap); 163327ab7006SHugh Dickins if (!page) { 16349e18eb29SAndres Lagar-Cavilla /* Or update major stats only when swapin succeeds?? */ 16359e18eb29SAndres Lagar-Cavilla if (fault_type) { 163668da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 16379e18eb29SAndres Lagar-Cavilla count_vm_event(PGMAJFAULT); 16389e18eb29SAndres Lagar-Cavilla mem_cgroup_count_vm_event(fault_mm, PGMAJFAULT); 16399e18eb29SAndres Lagar-Cavilla } 16409e18eb29SAndres Lagar-Cavilla /* Here we actually start the io */ 164141ffe5d5SHugh Dickins page = shmem_swapin(swap, gfp, info, index); 164227ab7006SHugh Dickins if (!page) { 16431da177e4SLinus Torvalds error = -ENOMEM; 164454af6042SHugh Dickins goto failed; 1645285b2c4fSHugh Dickins } 16461da177e4SLinus Torvalds } 16471da177e4SLinus Torvalds 16481da177e4SLinus Torvalds /* We have to do this with page locked to prevent races */ 164954af6042SHugh Dickins lock_page(page); 16500142ef6cSHugh Dickins if (!PageSwapCache(page) || page_private(page) != swap.val || 1651d1899228SHugh Dickins !shmem_confirm_swap(mapping, index, swap)) { 1652bde05d1cSHugh Dickins error = -EEXIST; /* try again */ 1653d1899228SHugh Dickins goto unlock; 1654bde05d1cSHugh Dickins } 165527ab7006SHugh Dickins if (!PageUptodate(page)) { 16561da177e4SLinus Torvalds error = -EIO; 165754af6042SHugh Dickins goto failed; 165854af6042SHugh Dickins } 165954af6042SHugh Dickins wait_on_page_writeback(page); 166054af6042SHugh Dickins 1661bde05d1cSHugh Dickins if (shmem_should_replace_page(page, gfp)) { 1662bde05d1cSHugh Dickins error = shmem_replace_page(&page, gfp, info, index); 1663bde05d1cSHugh Dickins if (error) 166454af6042SHugh Dickins goto failed; 16651da177e4SLinus Torvalds } 16661da177e4SLinus Torvalds 16679e18eb29SAndres Lagar-Cavilla error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, 1668f627c2f5SKirill A. Shutemov false); 1669d1899228SHugh Dickins if (!error) { 167054af6042SHugh Dickins error = shmem_add_to_page_cache(page, mapping, index, 1671fed400a1SWang Sheng-Hui swp_to_radix_entry(swap)); 1672215c02bcSHugh Dickins /* 1673215c02bcSHugh Dickins * We already confirmed swap under page lock, and make 1674215c02bcSHugh Dickins * no memory allocation here, so usually no possibility 1675215c02bcSHugh Dickins * of error; but free_swap_and_cache() only trylocks a 1676215c02bcSHugh Dickins * page, so it is just possible that the entry has been 1677215c02bcSHugh Dickins * truncated or holepunched since swap was confirmed. 1678215c02bcSHugh Dickins * shmem_undo_range() will have done some of the 1679215c02bcSHugh Dickins * unaccounting, now delete_from_swap_cache() will do 168093aa7d95SVladimir Davydov * the rest. 1681215c02bcSHugh Dickins * Reset swap.val? No, leave it so "failed" goes back to 1682215c02bcSHugh Dickins * "repeat": reading a hole and writing should succeed. 1683215c02bcSHugh Dickins */ 168400501b53SJohannes Weiner if (error) { 1685f627c2f5SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, false); 1686215c02bcSHugh Dickins delete_from_swap_cache(page); 1687d1899228SHugh Dickins } 168800501b53SJohannes Weiner } 168954af6042SHugh Dickins if (error) 169054af6042SHugh Dickins goto failed; 169154af6042SHugh Dickins 1692f627c2f5SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, true, false); 169300501b53SJohannes Weiner 16944595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 169554af6042SHugh Dickins info->swapped--; 169654af6042SHugh Dickins shmem_recalc_inode(inode); 16974595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 169827ab7006SHugh Dickins 169966d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 170066d2f4d2SHugh Dickins mark_page_accessed(page); 170166d2f4d2SHugh Dickins 170227ab7006SHugh Dickins delete_from_swap_cache(page); 170327ab7006SHugh Dickins set_page_dirty(page); 170427ab7006SHugh Dickins swap_free(swap); 170527ab7006SHugh Dickins 170654af6042SHugh Dickins } else { 1707800d8c63SKirill A. Shutemov /* shmem_symlink() */ 1708800d8c63SKirill A. Shutemov if (mapping->a_ops != &shmem_aops) 1709800d8c63SKirill A. Shutemov goto alloc_nohuge; 1710657e3038SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 1711800d8c63SKirill A. Shutemov goto alloc_nohuge; 1712800d8c63SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 1713800d8c63SKirill A. Shutemov goto alloc_huge; 1714800d8c63SKirill A. Shutemov switch (sbinfo->huge) { 1715800d8c63SKirill A. Shutemov loff_t i_size; 1716800d8c63SKirill A. Shutemov pgoff_t off; 1717800d8c63SKirill A. Shutemov case SHMEM_HUGE_NEVER: 1718800d8c63SKirill A. Shutemov goto alloc_nohuge; 1719800d8c63SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 1720800d8c63SKirill A. Shutemov off = round_up(index, HPAGE_PMD_NR); 1721800d8c63SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 1722800d8c63SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 1723800d8c63SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 1724800d8c63SKirill A. Shutemov goto alloc_huge; 1725800d8c63SKirill A. Shutemov /* fallthrough */ 1726800d8c63SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 1727657e3038SKirill A. Shutemov if (sgp_huge == SGP_HUGE) 1728657e3038SKirill A. Shutemov goto alloc_huge; 1729657e3038SKirill A. Shutemov /* TODO: implement fadvise() hints */ 1730800d8c63SKirill A. Shutemov goto alloc_nohuge; 173159a16eadSHugh Dickins } 17321da177e4SLinus Torvalds 1733800d8c63SKirill A. Shutemov alloc_huge: 1734800d8c63SKirill A. Shutemov page = shmem_alloc_and_acct_page(gfp, info, sbinfo, 1735800d8c63SKirill A. Shutemov index, true); 1736800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1737800d8c63SKirill A. Shutemov alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, 1738800d8c63SKirill A. Shutemov index, false); 173954af6042SHugh Dickins } 1740800d8c63SKirill A. Shutemov if (IS_ERR(page)) { 1741779750d2SKirill A. Shutemov int retry = 5; 1742800d8c63SKirill A. Shutemov error = PTR_ERR(page); 1743800d8c63SKirill A. Shutemov page = NULL; 1744779750d2SKirill A. Shutemov if (error != -ENOSPC) 1745779750d2SKirill A. Shutemov goto failed; 1746779750d2SKirill A. Shutemov /* 1747779750d2SKirill A. Shutemov * Try to reclaim some spece by splitting a huge page 1748779750d2SKirill A. Shutemov * beyond i_size on the filesystem. 1749779750d2SKirill A. Shutemov */ 1750779750d2SKirill A. Shutemov while (retry--) { 1751779750d2SKirill A. Shutemov int ret; 1752779750d2SKirill A. Shutemov ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 1753779750d2SKirill A. Shutemov if (ret == SHRINK_STOP) 1754779750d2SKirill A. Shutemov break; 1755779750d2SKirill A. Shutemov if (ret) 1756779750d2SKirill A. Shutemov goto alloc_nohuge; 1757779750d2SKirill A. Shutemov } 1758800d8c63SKirill A. Shutemov goto failed; 1759800d8c63SKirill A. Shutemov } 1760800d8c63SKirill A. Shutemov 1761800d8c63SKirill A. Shutemov if (PageTransHuge(page)) 1762800d8c63SKirill A. Shutemov hindex = round_down(index, HPAGE_PMD_NR); 1763800d8c63SKirill A. Shutemov else 1764800d8c63SKirill A. Shutemov hindex = index; 1765800d8c63SKirill A. Shutemov 176666d2f4d2SHugh Dickins if (sgp == SGP_WRITE) 1767eb39d618SHugh Dickins __SetPageReferenced(page); 176866d2f4d2SHugh Dickins 17699e18eb29SAndres Lagar-Cavilla error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, 1770800d8c63SKirill A. Shutemov PageTransHuge(page)); 177154af6042SHugh Dickins if (error) 1772800d8c63SKirill A. Shutemov goto unacct; 1773800d8c63SKirill A. Shutemov error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, 1774800d8c63SKirill A. Shutemov compound_order(page)); 1775b065b432SHugh Dickins if (!error) { 1776800d8c63SKirill A. Shutemov error = shmem_add_to_page_cache(page, mapping, hindex, 1777fed400a1SWang Sheng-Hui NULL); 1778b065b432SHugh Dickins radix_tree_preload_end(); 1779b065b432SHugh Dickins } 1780b065b432SHugh Dickins if (error) { 1781800d8c63SKirill A. Shutemov mem_cgroup_cancel_charge(page, memcg, 1782800d8c63SKirill A. Shutemov PageTransHuge(page)); 1783800d8c63SKirill A. Shutemov goto unacct; 1784b065b432SHugh Dickins } 1785800d8c63SKirill A. Shutemov mem_cgroup_commit_charge(page, memcg, false, 1786800d8c63SKirill A. Shutemov PageTransHuge(page)); 178754af6042SHugh Dickins lru_cache_add_anon(page); 178854af6042SHugh Dickins 17894595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1790800d8c63SKirill A. Shutemov info->alloced += 1 << compound_order(page); 1791800d8c63SKirill A. Shutemov inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 179254af6042SHugh Dickins shmem_recalc_inode(inode); 17934595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 17941635f6a7SHugh Dickins alloced = true; 179554af6042SHugh Dickins 1796779750d2SKirill A. Shutemov if (PageTransHuge(page) && 1797779750d2SKirill A. Shutemov DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 1798779750d2SKirill A. Shutemov hindex + HPAGE_PMD_NR - 1) { 1799779750d2SKirill A. Shutemov /* 1800779750d2SKirill A. Shutemov * Part of the huge page is beyond i_size: subject 1801779750d2SKirill A. Shutemov * to shrink under memory pressure. 1802779750d2SKirill A. Shutemov */ 1803779750d2SKirill A. Shutemov spin_lock(&sbinfo->shrinklist_lock); 1804779750d2SKirill A. Shutemov if (list_empty(&info->shrinklist)) { 1805779750d2SKirill A. Shutemov list_add_tail(&info->shrinklist, 1806779750d2SKirill A. Shutemov &sbinfo->shrinklist); 1807779750d2SKirill A. Shutemov sbinfo->shrinklist_len++; 1808779750d2SKirill A. Shutemov } 1809779750d2SKirill A. Shutemov spin_unlock(&sbinfo->shrinklist_lock); 1810779750d2SKirill A. Shutemov } 1811779750d2SKirill A. Shutemov 1812ec9516fbSHugh Dickins /* 18131635f6a7SHugh Dickins * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 18141635f6a7SHugh Dickins */ 18151635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 18161635f6a7SHugh Dickins sgp = SGP_WRITE; 18171635f6a7SHugh Dickins clear: 18181635f6a7SHugh Dickins /* 18191635f6a7SHugh Dickins * Let SGP_WRITE caller clear ends if write does not fill page; 18201635f6a7SHugh Dickins * but SGP_FALLOC on a page fallocated earlier must initialize 18211635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 1822ec9516fbSHugh Dickins */ 1823800d8c63SKirill A. Shutemov if (sgp != SGP_WRITE && !PageUptodate(page)) { 1824800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 1825800d8c63SKirill A. Shutemov int i; 1826800d8c63SKirill A. Shutemov 1827800d8c63SKirill A. Shutemov for (i = 0; i < (1 << compound_order(head)); i++) { 1828800d8c63SKirill A. Shutemov clear_highpage(head + i); 1829800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 1830800d8c63SKirill A. Shutemov } 1831800d8c63SKirill A. Shutemov SetPageUptodate(head); 1832ec9516fbSHugh Dickins } 18331da177e4SLinus Torvalds } 1834bde05d1cSHugh Dickins 183554af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 183675edd345SHugh Dickins if (sgp <= SGP_CACHE && 183709cbfeafSKirill A. Shutemov ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 1838267a4c76SHugh Dickins if (alloced) { 1839267a4c76SHugh Dickins ClearPageDirty(page); 1840267a4c76SHugh Dickins delete_from_page_cache(page); 18414595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 1842267a4c76SHugh Dickins shmem_recalc_inode(inode); 18434595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 1844267a4c76SHugh Dickins } 184554af6042SHugh Dickins error = -EINVAL; 1846267a4c76SHugh Dickins goto unlock; 1847ff36b801SShaohua Li } 1848800d8c63SKirill A. Shutemov *pagep = page + index - hindex; 184954af6042SHugh Dickins return 0; 1850d00806b1SNick Piggin 1851d0217ac0SNick Piggin /* 185254af6042SHugh Dickins * Error recovery. 18531da177e4SLinus Torvalds */ 185454af6042SHugh Dickins unacct: 1855800d8c63SKirill A. Shutemov if (sbinfo->max_blocks) 1856800d8c63SKirill A. Shutemov percpu_counter_sub(&sbinfo->used_blocks, 1857800d8c63SKirill A. Shutemov 1 << compound_order(page)); 1858800d8c63SKirill A. Shutemov shmem_unacct_blocks(info->flags, 1 << compound_order(page)); 1859800d8c63SKirill A. Shutemov 1860800d8c63SKirill A. Shutemov if (PageTransHuge(page)) { 1861800d8c63SKirill A. Shutemov unlock_page(page); 1862800d8c63SKirill A. Shutemov put_page(page); 1863800d8c63SKirill A. Shutemov goto alloc_nohuge; 1864800d8c63SKirill A. Shutemov } 186554af6042SHugh Dickins failed: 1866267a4c76SHugh Dickins if (swap.val && !shmem_confirm_swap(mapping, index, swap)) 186754af6042SHugh Dickins error = -EEXIST; 1868d1899228SHugh Dickins unlock: 186927ab7006SHugh Dickins if (page) { 187054af6042SHugh Dickins unlock_page(page); 187109cbfeafSKirill A. Shutemov put_page(page); 187254af6042SHugh Dickins } 187354af6042SHugh Dickins if (error == -ENOSPC && !once++) { 18744595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 187554af6042SHugh Dickins shmem_recalc_inode(inode); 18764595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 18771da177e4SLinus Torvalds goto repeat; 1878d8dc74f2SAdrian Bunk } 1879d1899228SHugh Dickins if (error == -EEXIST) /* from above or from radix_tree_insert */ 188054af6042SHugh Dickins goto repeat; 188154af6042SHugh Dickins return error; 18821da177e4SLinus Torvalds } 18831da177e4SLinus Torvalds 188410d20bd2SLinus Torvalds /* 188510d20bd2SLinus Torvalds * This is like autoremove_wake_function, but it removes the wait queue 188610d20bd2SLinus Torvalds * entry unconditionally - even if something else had already woken the 188710d20bd2SLinus Torvalds * target. 188810d20bd2SLinus Torvalds */ 188910d20bd2SLinus Torvalds static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 189010d20bd2SLinus Torvalds { 189110d20bd2SLinus Torvalds int ret = default_wake_function(wait, mode, sync, key); 189210d20bd2SLinus Torvalds list_del_init(&wait->task_list); 189310d20bd2SLinus Torvalds return ret; 189410d20bd2SLinus Torvalds } 189510d20bd2SLinus Torvalds 18961da177e4SLinus Torvalds static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 18971da177e4SLinus Torvalds { 1898496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 18999e18eb29SAndres Lagar-Cavilla gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 1900657e3038SKirill A. Shutemov enum sgp_type sgp; 19011da177e4SLinus Torvalds int error; 190268da9f05SHugh Dickins int ret = VM_FAULT_LOCKED; 19031da177e4SLinus Torvalds 1904f00cdc6dSHugh Dickins /* 1905f00cdc6dSHugh Dickins * Trinity finds that probing a hole which tmpfs is punching can 1906f00cdc6dSHugh Dickins * prevent the hole-punch from ever completing: which in turn 1907f00cdc6dSHugh Dickins * locks writers out with its hold on i_mutex. So refrain from 19088e205f77SHugh Dickins * faulting pages into the hole while it's being punched. Although 19098e205f77SHugh Dickins * shmem_undo_range() does remove the additions, it may be unable to 19108e205f77SHugh Dickins * keep up, as each new page needs its own unmap_mapping_range() call, 19118e205f77SHugh Dickins * and the i_mmap tree grows ever slower to scan if new vmas are added. 19128e205f77SHugh Dickins * 19138e205f77SHugh Dickins * It does not matter if we sometimes reach this check just before the 19148e205f77SHugh Dickins * hole-punch begins, so that one fault then races with the punch: 19158e205f77SHugh Dickins * we just need to make racing faults a rare case. 19168e205f77SHugh Dickins * 19178e205f77SHugh Dickins * The implementation below would be much simpler if we just used a 19188e205f77SHugh Dickins * standard mutex or completion: but we cannot take i_mutex in fault, 19198e205f77SHugh Dickins * and bloating every shmem inode for this unlikely case would be sad. 1920f00cdc6dSHugh Dickins */ 1921f00cdc6dSHugh Dickins if (unlikely(inode->i_private)) { 1922f00cdc6dSHugh Dickins struct shmem_falloc *shmem_falloc; 1923f00cdc6dSHugh Dickins 1924f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 1925f00cdc6dSHugh Dickins shmem_falloc = inode->i_private; 19268e205f77SHugh Dickins if (shmem_falloc && 19278e205f77SHugh Dickins shmem_falloc->waitq && 19288e205f77SHugh Dickins vmf->pgoff >= shmem_falloc->start && 19298e205f77SHugh Dickins vmf->pgoff < shmem_falloc->next) { 19308e205f77SHugh Dickins wait_queue_head_t *shmem_falloc_waitq; 193110d20bd2SLinus Torvalds DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 19328e205f77SHugh Dickins 19338e205f77SHugh Dickins ret = VM_FAULT_NOPAGE; 1934f00cdc6dSHugh Dickins if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1935f00cdc6dSHugh Dickins !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 19368e205f77SHugh Dickins /* It's polite to up mmap_sem if we can */ 1937f00cdc6dSHugh Dickins up_read(&vma->vm_mm->mmap_sem); 19388e205f77SHugh Dickins ret = VM_FAULT_RETRY; 1939f00cdc6dSHugh Dickins } 19408e205f77SHugh Dickins 19418e205f77SHugh Dickins shmem_falloc_waitq = shmem_falloc->waitq; 19428e205f77SHugh Dickins prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 19438e205f77SHugh Dickins TASK_UNINTERRUPTIBLE); 19448e205f77SHugh Dickins spin_unlock(&inode->i_lock); 19458e205f77SHugh Dickins schedule(); 19468e205f77SHugh Dickins 19478e205f77SHugh Dickins /* 19488e205f77SHugh Dickins * shmem_falloc_waitq points into the shmem_fallocate() 19498e205f77SHugh Dickins * stack of the hole-punching task: shmem_falloc_waitq 19508e205f77SHugh Dickins * is usually invalid by the time we reach here, but 19518e205f77SHugh Dickins * finish_wait() does not dereference it in that case; 19528e205f77SHugh Dickins * though i_lock needed lest racing with wake_up_all(). 19538e205f77SHugh Dickins */ 19548e205f77SHugh Dickins spin_lock(&inode->i_lock); 19558e205f77SHugh Dickins finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 19568e205f77SHugh Dickins spin_unlock(&inode->i_lock); 19578e205f77SHugh Dickins return ret; 1958f00cdc6dSHugh Dickins } 19598e205f77SHugh Dickins spin_unlock(&inode->i_lock); 1960f00cdc6dSHugh Dickins } 1961f00cdc6dSHugh Dickins 1962657e3038SKirill A. Shutemov sgp = SGP_CACHE; 1963657e3038SKirill A. Shutemov if (vma->vm_flags & VM_HUGEPAGE) 1964657e3038SKirill A. Shutemov sgp = SGP_HUGE; 1965657e3038SKirill A. Shutemov else if (vma->vm_flags & VM_NOHUGEPAGE) 1966657e3038SKirill A. Shutemov sgp = SGP_NOHUGE; 1967657e3038SKirill A. Shutemov 1968657e3038SKirill A. Shutemov error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, 19699e18eb29SAndres Lagar-Cavilla gfp, vma->vm_mm, &ret); 19701da177e4SLinus Torvalds if (error) 19711da177e4SLinus Torvalds return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 197268da9f05SHugh Dickins return ret; 19731da177e4SLinus Torvalds } 19741da177e4SLinus Torvalds 1975c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 1976c01d5b30SHugh Dickins unsigned long uaddr, unsigned long len, 1977c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 1978c01d5b30SHugh Dickins { 1979c01d5b30SHugh Dickins unsigned long (*get_area)(struct file *, 1980c01d5b30SHugh Dickins unsigned long, unsigned long, unsigned long, unsigned long); 1981c01d5b30SHugh Dickins unsigned long addr; 1982c01d5b30SHugh Dickins unsigned long offset; 1983c01d5b30SHugh Dickins unsigned long inflated_len; 1984c01d5b30SHugh Dickins unsigned long inflated_addr; 1985c01d5b30SHugh Dickins unsigned long inflated_offset; 1986c01d5b30SHugh Dickins 1987c01d5b30SHugh Dickins if (len > TASK_SIZE) 1988c01d5b30SHugh Dickins return -ENOMEM; 1989c01d5b30SHugh Dickins 1990c01d5b30SHugh Dickins get_area = current->mm->get_unmapped_area; 1991c01d5b30SHugh Dickins addr = get_area(file, uaddr, len, pgoff, flags); 1992c01d5b30SHugh Dickins 1993e496cf3dSKirill A. Shutemov if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1994c01d5b30SHugh Dickins return addr; 1995c01d5b30SHugh Dickins if (IS_ERR_VALUE(addr)) 1996c01d5b30SHugh Dickins return addr; 1997c01d5b30SHugh Dickins if (addr & ~PAGE_MASK) 1998c01d5b30SHugh Dickins return addr; 1999c01d5b30SHugh Dickins if (addr > TASK_SIZE - len) 2000c01d5b30SHugh Dickins return addr; 2001c01d5b30SHugh Dickins 2002c01d5b30SHugh Dickins if (shmem_huge == SHMEM_HUGE_DENY) 2003c01d5b30SHugh Dickins return addr; 2004c01d5b30SHugh Dickins if (len < HPAGE_PMD_SIZE) 2005c01d5b30SHugh Dickins return addr; 2006c01d5b30SHugh Dickins if (flags & MAP_FIXED) 2007c01d5b30SHugh Dickins return addr; 2008c01d5b30SHugh Dickins /* 2009c01d5b30SHugh Dickins * Our priority is to support MAP_SHARED mapped hugely; 2010c01d5b30SHugh Dickins * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2011c01d5b30SHugh Dickins * But if caller specified an address hint, respect that as before. 2012c01d5b30SHugh Dickins */ 2013c01d5b30SHugh Dickins if (uaddr) 2014c01d5b30SHugh Dickins return addr; 2015c01d5b30SHugh Dickins 2016c01d5b30SHugh Dickins if (shmem_huge != SHMEM_HUGE_FORCE) { 2017c01d5b30SHugh Dickins struct super_block *sb; 2018c01d5b30SHugh Dickins 2019c01d5b30SHugh Dickins if (file) { 2020c01d5b30SHugh Dickins VM_BUG_ON(file->f_op != &shmem_file_operations); 2021c01d5b30SHugh Dickins sb = file_inode(file)->i_sb; 2022c01d5b30SHugh Dickins } else { 2023c01d5b30SHugh Dickins /* 2024c01d5b30SHugh Dickins * Called directly from mm/mmap.c, or drivers/char/mem.c 2025c01d5b30SHugh Dickins * for "/dev/zero", to create a shared anonymous object. 2026c01d5b30SHugh Dickins */ 2027c01d5b30SHugh Dickins if (IS_ERR(shm_mnt)) 2028c01d5b30SHugh Dickins return addr; 2029c01d5b30SHugh Dickins sb = shm_mnt->mnt_sb; 2030c01d5b30SHugh Dickins } 20313089bf61SToshi Kani if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2032c01d5b30SHugh Dickins return addr; 2033c01d5b30SHugh Dickins } 2034c01d5b30SHugh Dickins 2035c01d5b30SHugh Dickins offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2036c01d5b30SHugh Dickins if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2037c01d5b30SHugh Dickins return addr; 2038c01d5b30SHugh Dickins if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2039c01d5b30SHugh Dickins return addr; 2040c01d5b30SHugh Dickins 2041c01d5b30SHugh Dickins inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2042c01d5b30SHugh Dickins if (inflated_len > TASK_SIZE) 2043c01d5b30SHugh Dickins return addr; 2044c01d5b30SHugh Dickins if (inflated_len < len) 2045c01d5b30SHugh Dickins return addr; 2046c01d5b30SHugh Dickins 2047c01d5b30SHugh Dickins inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); 2048c01d5b30SHugh Dickins if (IS_ERR_VALUE(inflated_addr)) 2049c01d5b30SHugh Dickins return addr; 2050c01d5b30SHugh Dickins if (inflated_addr & ~PAGE_MASK) 2051c01d5b30SHugh Dickins return addr; 2052c01d5b30SHugh Dickins 2053c01d5b30SHugh Dickins inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2054c01d5b30SHugh Dickins inflated_addr += offset - inflated_offset; 2055c01d5b30SHugh Dickins if (inflated_offset > offset) 2056c01d5b30SHugh Dickins inflated_addr += HPAGE_PMD_SIZE; 2057c01d5b30SHugh Dickins 2058c01d5b30SHugh Dickins if (inflated_addr > TASK_SIZE - len) 2059c01d5b30SHugh Dickins return addr; 2060c01d5b30SHugh Dickins return inflated_addr; 2061c01d5b30SHugh Dickins } 2062c01d5b30SHugh Dickins 20631da177e4SLinus Torvalds #ifdef CONFIG_NUMA 206441ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 20651da177e4SLinus Torvalds { 2066496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 206741ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 20681da177e4SLinus Torvalds } 20691da177e4SLinus Torvalds 2070d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2071d8dc74f2SAdrian Bunk unsigned long addr) 20721da177e4SLinus Torvalds { 2073496ad9aaSAl Viro struct inode *inode = file_inode(vma->vm_file); 207441ffe5d5SHugh Dickins pgoff_t index; 20751da177e4SLinus Torvalds 207641ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 207741ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 20781da177e4SLinus Torvalds } 20791da177e4SLinus Torvalds #endif 20801da177e4SLinus Torvalds 20811da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user) 20821da177e4SLinus Torvalds { 2083496ad9aaSAl Viro struct inode *inode = file_inode(file); 20841da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 20851da177e4SLinus Torvalds int retval = -ENOMEM; 20861da177e4SLinus Torvalds 20874595ef88SKirill A. Shutemov spin_lock_irq(&info->lock); 20881da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 20891da177e4SLinus Torvalds if (!user_shm_lock(inode->i_size, user)) 20901da177e4SLinus Torvalds goto out_nomem; 20911da177e4SLinus Torvalds info->flags |= VM_LOCKED; 209289e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 20931da177e4SLinus Torvalds } 20941da177e4SLinus Torvalds if (!lock && (info->flags & VM_LOCKED) && user) { 20951da177e4SLinus Torvalds user_shm_unlock(inode->i_size, user); 20961da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 209789e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 20981da177e4SLinus Torvalds } 20991da177e4SLinus Torvalds retval = 0; 210089e004eaSLee Schermerhorn 21011da177e4SLinus Torvalds out_nomem: 21024595ef88SKirill A. Shutemov spin_unlock_irq(&info->lock); 21031da177e4SLinus Torvalds return retval; 21041da177e4SLinus Torvalds } 21051da177e4SLinus Torvalds 21069b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 21071da177e4SLinus Torvalds { 21081da177e4SLinus Torvalds file_accessed(file); 21091da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2110e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 2111f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2112f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 2113f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 2114f3f0e1d2SKirill A. Shutemov } 21151da177e4SLinus Torvalds return 0; 21161da177e4SLinus Torvalds } 21171da177e4SLinus Torvalds 2118454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 211909208d15SAl Viro umode_t mode, dev_t dev, unsigned long flags) 21201da177e4SLinus Torvalds { 21211da177e4SLinus Torvalds struct inode *inode; 21221da177e4SLinus Torvalds struct shmem_inode_info *info; 21231da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 21241da177e4SLinus Torvalds 21255b04c689SPavel Emelyanov if (shmem_reserve_inode(sb)) 21261da177e4SLinus Torvalds return NULL; 21271da177e4SLinus Torvalds 21281da177e4SLinus Torvalds inode = new_inode(sb); 21291da177e4SLinus Torvalds if (inode) { 213085fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 2131454abafeSDmitry Monakhov inode_init_owner(inode, dir, mode); 21321da177e4SLinus Torvalds inode->i_blocks = 0; 2133078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 213491828a40SDavid M. Grimes inode->i_generation = get_seconds(); 21351da177e4SLinus Torvalds info = SHMEM_I(inode); 21361da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 21371da177e4SLinus Torvalds spin_lock_init(&info->lock); 213840e041a2SDavid Herrmann info->seals = F_SEAL_SEAL; 21390b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 2140779750d2SKirill A. Shutemov INIT_LIST_HEAD(&info->shrinklist); 21411da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 214238f38657SAristeu Rozanski simple_xattrs_init(&info->xattrs); 214372c04902SAl Viro cache_no_acl(inode); 21441da177e4SLinus Torvalds 21451da177e4SLinus Torvalds switch (mode & S_IFMT) { 21461da177e4SLinus Torvalds default: 214739f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 21481da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 21491da177e4SLinus Torvalds break; 21501da177e4SLinus Torvalds case S_IFREG: 215114fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 21521da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 21531da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 215471fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 215571fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 21561da177e4SLinus Torvalds break; 21571da177e4SLinus Torvalds case S_IFDIR: 2158d8c76e6fSDave Hansen inc_nlink(inode); 21591da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 21601da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 21611da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 21621da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 21631da177e4SLinus Torvalds break; 21641da177e4SLinus Torvalds case S_IFLNK: 21651da177e4SLinus Torvalds /* 21661da177e4SLinus Torvalds * Must not load anything in the rbtree, 21671da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 21681da177e4SLinus Torvalds */ 216971fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 21701da177e4SLinus Torvalds break; 21711da177e4SLinus Torvalds } 21725b04c689SPavel Emelyanov } else 21735b04c689SPavel Emelyanov shmem_free_inode(sb); 21741da177e4SLinus Torvalds return inode; 21751da177e4SLinus Torvalds } 21761da177e4SLinus Torvalds 21770cd6144aSJohannes Weiner bool shmem_mapping(struct address_space *mapping) 21780cd6144aSJohannes Weiner { 2179f8005451SHugh Dickins return mapping->a_ops == &shmem_aops; 21800cd6144aSJohannes Weiner } 21810cd6144aSJohannes Weiner 2182*4c27fe4cSMike Rapoport int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, 2183*4c27fe4cSMike Rapoport pmd_t *dst_pmd, 2184*4c27fe4cSMike Rapoport struct vm_area_struct *dst_vma, 2185*4c27fe4cSMike Rapoport unsigned long dst_addr, 2186*4c27fe4cSMike Rapoport unsigned long src_addr, 2187*4c27fe4cSMike Rapoport struct page **pagep) 2188*4c27fe4cSMike Rapoport { 2189*4c27fe4cSMike Rapoport struct inode *inode = file_inode(dst_vma->vm_file); 2190*4c27fe4cSMike Rapoport struct shmem_inode_info *info = SHMEM_I(inode); 2191*4c27fe4cSMike Rapoport struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2192*4c27fe4cSMike Rapoport struct address_space *mapping = inode->i_mapping; 2193*4c27fe4cSMike Rapoport gfp_t gfp = mapping_gfp_mask(mapping); 2194*4c27fe4cSMike Rapoport pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 2195*4c27fe4cSMike Rapoport struct mem_cgroup *memcg; 2196*4c27fe4cSMike Rapoport spinlock_t *ptl; 2197*4c27fe4cSMike Rapoport void *page_kaddr; 2198*4c27fe4cSMike Rapoport struct page *page; 2199*4c27fe4cSMike Rapoport pte_t _dst_pte, *dst_pte; 2200*4c27fe4cSMike Rapoport int ret; 2201*4c27fe4cSMike Rapoport 2202*4c27fe4cSMike Rapoport if (!*pagep) { 2203*4c27fe4cSMike Rapoport ret = -ENOMEM; 2204*4c27fe4cSMike Rapoport if (shmem_acct_block(info->flags, 1)) 2205*4c27fe4cSMike Rapoport goto out; 2206*4c27fe4cSMike Rapoport if (sbinfo->max_blocks) { 2207*4c27fe4cSMike Rapoport if (percpu_counter_compare(&sbinfo->used_blocks, 2208*4c27fe4cSMike Rapoport sbinfo->max_blocks) >= 0) 2209*4c27fe4cSMike Rapoport goto out_unacct_blocks; 2210*4c27fe4cSMike Rapoport percpu_counter_inc(&sbinfo->used_blocks); 2211*4c27fe4cSMike Rapoport } 2212*4c27fe4cSMike Rapoport 2213*4c27fe4cSMike Rapoport page = shmem_alloc_page(gfp, info, pgoff); 2214*4c27fe4cSMike Rapoport if (!page) 2215*4c27fe4cSMike Rapoport goto out_dec_used_blocks; 2216*4c27fe4cSMike Rapoport 2217*4c27fe4cSMike Rapoport page_kaddr = kmap_atomic(page); 2218*4c27fe4cSMike Rapoport ret = copy_from_user(page_kaddr, (const void __user *)src_addr, 2219*4c27fe4cSMike Rapoport PAGE_SIZE); 2220*4c27fe4cSMike Rapoport kunmap_atomic(page_kaddr); 2221*4c27fe4cSMike Rapoport 2222*4c27fe4cSMike Rapoport /* fallback to copy_from_user outside mmap_sem */ 2223*4c27fe4cSMike Rapoport if (unlikely(ret)) { 2224*4c27fe4cSMike Rapoport *pagep = page; 2225*4c27fe4cSMike Rapoport /* don't free the page */ 2226*4c27fe4cSMike Rapoport return -EFAULT; 2227*4c27fe4cSMike Rapoport } 2228*4c27fe4cSMike Rapoport } else { 2229*4c27fe4cSMike Rapoport page = *pagep; 2230*4c27fe4cSMike Rapoport *pagep = NULL; 2231*4c27fe4cSMike Rapoport } 2232*4c27fe4cSMike Rapoport 2233*4c27fe4cSMike Rapoport ret = mem_cgroup_try_charge(page, dst_mm, gfp, &memcg, false); 2234*4c27fe4cSMike Rapoport if (ret) 2235*4c27fe4cSMike Rapoport goto out_release; 2236*4c27fe4cSMike Rapoport 2237*4c27fe4cSMike Rapoport ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 2238*4c27fe4cSMike Rapoport if (!ret) { 2239*4c27fe4cSMike Rapoport ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL); 2240*4c27fe4cSMike Rapoport radix_tree_preload_end(); 2241*4c27fe4cSMike Rapoport } 2242*4c27fe4cSMike Rapoport if (ret) 2243*4c27fe4cSMike Rapoport goto out_release_uncharge; 2244*4c27fe4cSMike Rapoport 2245*4c27fe4cSMike Rapoport mem_cgroup_commit_charge(page, memcg, false, false); 2246*4c27fe4cSMike Rapoport 2247*4c27fe4cSMike Rapoport _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 2248*4c27fe4cSMike Rapoport if (dst_vma->vm_flags & VM_WRITE) 2249*4c27fe4cSMike Rapoport _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); 2250*4c27fe4cSMike Rapoport 2251*4c27fe4cSMike Rapoport ret = -EEXIST; 2252*4c27fe4cSMike Rapoport dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 2253*4c27fe4cSMike Rapoport if (!pte_none(*dst_pte)) 2254*4c27fe4cSMike Rapoport goto out_release_uncharge_unlock; 2255*4c27fe4cSMike Rapoport 2256*4c27fe4cSMike Rapoport __SetPageUptodate(page); 2257*4c27fe4cSMike Rapoport 2258*4c27fe4cSMike Rapoport lru_cache_add_anon(page); 2259*4c27fe4cSMike Rapoport 2260*4c27fe4cSMike Rapoport spin_lock(&info->lock); 2261*4c27fe4cSMike Rapoport info->alloced++; 2262*4c27fe4cSMike Rapoport inode->i_blocks += BLOCKS_PER_PAGE; 2263*4c27fe4cSMike Rapoport shmem_recalc_inode(inode); 2264*4c27fe4cSMike Rapoport spin_unlock(&info->lock); 2265*4c27fe4cSMike Rapoport 2266*4c27fe4cSMike Rapoport inc_mm_counter(dst_mm, mm_counter_file(page)); 2267*4c27fe4cSMike Rapoport page_add_file_rmap(page, false); 2268*4c27fe4cSMike Rapoport set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 2269*4c27fe4cSMike Rapoport 2270*4c27fe4cSMike Rapoport /* No need to invalidate - it was non-present before */ 2271*4c27fe4cSMike Rapoport update_mmu_cache(dst_vma, dst_addr, dst_pte); 2272*4c27fe4cSMike Rapoport unlock_page(page); 2273*4c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 2274*4c27fe4cSMike Rapoport ret = 0; 2275*4c27fe4cSMike Rapoport out: 2276*4c27fe4cSMike Rapoport return ret; 2277*4c27fe4cSMike Rapoport out_release_uncharge_unlock: 2278*4c27fe4cSMike Rapoport pte_unmap_unlock(dst_pte, ptl); 2279*4c27fe4cSMike Rapoport out_release_uncharge: 2280*4c27fe4cSMike Rapoport mem_cgroup_cancel_charge(page, memcg, false); 2281*4c27fe4cSMike Rapoport out_release: 2282*4c27fe4cSMike Rapoport put_page(page); 2283*4c27fe4cSMike Rapoport out_dec_used_blocks: 2284*4c27fe4cSMike Rapoport if (sbinfo->max_blocks) 2285*4c27fe4cSMike Rapoport percpu_counter_add(&sbinfo->used_blocks, -1); 2286*4c27fe4cSMike Rapoport out_unacct_blocks: 2287*4c27fe4cSMike Rapoport shmem_unacct_blocks(info->flags, 1); 2288*4c27fe4cSMike Rapoport goto out; 2289*4c27fe4cSMike Rapoport } 2290*4c27fe4cSMike Rapoport 22911da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 229292e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 229369f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 22941da177e4SLinus Torvalds 22956d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR 22966d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 22976d9d88d0SJarkko Sakkinen #else 22986d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL 22996d9d88d0SJarkko Sakkinen #endif 23006d9d88d0SJarkko Sakkinen 23011da177e4SLinus Torvalds static int 2302800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 2303800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 2304800d15a5SNick Piggin struct page **pagep, void **fsdata) 23051da177e4SLinus Torvalds { 2306800d15a5SNick Piggin struct inode *inode = mapping->host; 230740e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 230809cbfeafSKirill A. Shutemov pgoff_t index = pos >> PAGE_SHIFT; 230940e041a2SDavid Herrmann 231040e041a2SDavid Herrmann /* i_mutex is held by caller */ 231140e041a2SDavid Herrmann if (unlikely(info->seals)) { 231240e041a2SDavid Herrmann if (info->seals & F_SEAL_WRITE) 231340e041a2SDavid Herrmann return -EPERM; 231440e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 231540e041a2SDavid Herrmann return -EPERM; 231640e041a2SDavid Herrmann } 231740e041a2SDavid Herrmann 23189e18eb29SAndres Lagar-Cavilla return shmem_getpage(inode, index, pagep, SGP_WRITE); 2319800d15a5SNick Piggin } 2320800d15a5SNick Piggin 2321800d15a5SNick Piggin static int 2322800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 2323800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 2324800d15a5SNick Piggin struct page *page, void *fsdata) 2325800d15a5SNick Piggin { 2326800d15a5SNick Piggin struct inode *inode = mapping->host; 2327800d15a5SNick Piggin 2328800d15a5SNick Piggin if (pos + copied > inode->i_size) 2329800d15a5SNick Piggin i_size_write(inode, pos + copied); 2330800d15a5SNick Piggin 2331ec9516fbSHugh Dickins if (!PageUptodate(page)) { 2332800d8c63SKirill A. Shutemov struct page *head = compound_head(page); 2333800d8c63SKirill A. Shutemov if (PageTransCompound(page)) { 2334800d8c63SKirill A. Shutemov int i; 2335800d8c63SKirill A. Shutemov 2336800d8c63SKirill A. Shutemov for (i = 0; i < HPAGE_PMD_NR; i++) { 2337800d8c63SKirill A. Shutemov if (head + i == page) 2338800d8c63SKirill A. Shutemov continue; 2339800d8c63SKirill A. Shutemov clear_highpage(head + i); 2340800d8c63SKirill A. Shutemov flush_dcache_page(head + i); 2341800d8c63SKirill A. Shutemov } 2342800d8c63SKirill A. Shutemov } 234309cbfeafSKirill A. Shutemov if (copied < PAGE_SIZE) { 234409cbfeafSKirill A. Shutemov unsigned from = pos & (PAGE_SIZE - 1); 2345ec9516fbSHugh Dickins zero_user_segments(page, 0, from, 234609cbfeafSKirill A. Shutemov from + copied, PAGE_SIZE); 2347ec9516fbSHugh Dickins } 2348800d8c63SKirill A. Shutemov SetPageUptodate(head); 2349ec9516fbSHugh Dickins } 2350d3602444SHugh Dickins set_page_dirty(page); 23516746aff7SWu Fengguang unlock_page(page); 235209cbfeafSKirill A. Shutemov put_page(page); 2353d3602444SHugh Dickins 2354800d15a5SNick Piggin return copied; 23551da177e4SLinus Torvalds } 23561da177e4SLinus Torvalds 23572ba5bbedSAl Viro static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 23581da177e4SLinus Torvalds { 23596e58e79dSAl Viro struct file *file = iocb->ki_filp; 23606e58e79dSAl Viro struct inode *inode = file_inode(file); 23611da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 236241ffe5d5SHugh Dickins pgoff_t index; 236341ffe5d5SHugh Dickins unsigned long offset; 2364a0ee5ec5SHugh Dickins enum sgp_type sgp = SGP_READ; 2365f7c1d074SGeert Uytterhoeven int error = 0; 2366cb66a7a1SAl Viro ssize_t retval = 0; 23676e58e79dSAl Viro loff_t *ppos = &iocb->ki_pos; 2368a0ee5ec5SHugh Dickins 2369a0ee5ec5SHugh Dickins /* 2370a0ee5ec5SHugh Dickins * Might this read be for a stacking filesystem? Then when reading 2371a0ee5ec5SHugh Dickins * holes of a sparse file, we actually need to allocate those pages, 2372a0ee5ec5SHugh Dickins * and even mark them dirty, so it cannot exceed the max_blocks limit. 2373a0ee5ec5SHugh Dickins */ 2374777eda2cSAl Viro if (!iter_is_iovec(to)) 237575edd345SHugh Dickins sgp = SGP_CACHE; 23761da177e4SLinus Torvalds 237709cbfeafSKirill A. Shutemov index = *ppos >> PAGE_SHIFT; 237809cbfeafSKirill A. Shutemov offset = *ppos & ~PAGE_MASK; 23791da177e4SLinus Torvalds 23801da177e4SLinus Torvalds for (;;) { 23811da177e4SLinus Torvalds struct page *page = NULL; 238241ffe5d5SHugh Dickins pgoff_t end_index; 238341ffe5d5SHugh Dickins unsigned long nr, ret; 23841da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 23851da177e4SLinus Torvalds 238609cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 23871da177e4SLinus Torvalds if (index > end_index) 23881da177e4SLinus Torvalds break; 23891da177e4SLinus Torvalds if (index == end_index) { 239009cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 23911da177e4SLinus Torvalds if (nr <= offset) 23921da177e4SLinus Torvalds break; 23931da177e4SLinus Torvalds } 23941da177e4SLinus Torvalds 23959e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, sgp); 23966e58e79dSAl Viro if (error) { 23976e58e79dSAl Viro if (error == -EINVAL) 23986e58e79dSAl Viro error = 0; 23991da177e4SLinus Torvalds break; 24001da177e4SLinus Torvalds } 240175edd345SHugh Dickins if (page) { 240275edd345SHugh Dickins if (sgp == SGP_CACHE) 240375edd345SHugh Dickins set_page_dirty(page); 2404d3602444SHugh Dickins unlock_page(page); 240575edd345SHugh Dickins } 24061da177e4SLinus Torvalds 24071da177e4SLinus Torvalds /* 24081da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 24091b1dcc1bSJes Sorensen * are called without i_mutex protection against truncate 24101da177e4SLinus Torvalds */ 241109cbfeafSKirill A. Shutemov nr = PAGE_SIZE; 24121da177e4SLinus Torvalds i_size = i_size_read(inode); 241309cbfeafSKirill A. Shutemov end_index = i_size >> PAGE_SHIFT; 24141da177e4SLinus Torvalds if (index == end_index) { 241509cbfeafSKirill A. Shutemov nr = i_size & ~PAGE_MASK; 24161da177e4SLinus Torvalds if (nr <= offset) { 24171da177e4SLinus Torvalds if (page) 241809cbfeafSKirill A. Shutemov put_page(page); 24191da177e4SLinus Torvalds break; 24201da177e4SLinus Torvalds } 24211da177e4SLinus Torvalds } 24221da177e4SLinus Torvalds nr -= offset; 24231da177e4SLinus Torvalds 24241da177e4SLinus Torvalds if (page) { 24251da177e4SLinus Torvalds /* 24261da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 24271da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 24281da177e4SLinus Torvalds * before reading the page on the kernel side. 24291da177e4SLinus Torvalds */ 24301da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 24311da177e4SLinus Torvalds flush_dcache_page(page); 24321da177e4SLinus Torvalds /* 24331da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 24341da177e4SLinus Torvalds */ 24351da177e4SLinus Torvalds if (!offset) 24361da177e4SLinus Torvalds mark_page_accessed(page); 2437b5810039SNick Piggin } else { 24381da177e4SLinus Torvalds page = ZERO_PAGE(0); 243909cbfeafSKirill A. Shutemov get_page(page); 2440b5810039SNick Piggin } 24411da177e4SLinus Torvalds 24421da177e4SLinus Torvalds /* 24431da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 24441da177e4SLinus Torvalds * now we can copy it to user space... 24451da177e4SLinus Torvalds */ 24462ba5bbedSAl Viro ret = copy_page_to_iter(page, offset, nr, to); 24476e58e79dSAl Viro retval += ret; 24481da177e4SLinus Torvalds offset += ret; 244909cbfeafSKirill A. Shutemov index += offset >> PAGE_SHIFT; 245009cbfeafSKirill A. Shutemov offset &= ~PAGE_MASK; 24511da177e4SLinus Torvalds 245209cbfeafSKirill A. Shutemov put_page(page); 24532ba5bbedSAl Viro if (!iov_iter_count(to)) 24541da177e4SLinus Torvalds break; 24556e58e79dSAl Viro if (ret < nr) { 24566e58e79dSAl Viro error = -EFAULT; 24576e58e79dSAl Viro break; 24586e58e79dSAl Viro } 24591da177e4SLinus Torvalds cond_resched(); 24601da177e4SLinus Torvalds } 24611da177e4SLinus Torvalds 246209cbfeafSKirill A. Shutemov *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 24636e58e79dSAl Viro file_accessed(file); 24646e58e79dSAl Viro return retval ? retval : error; 24651da177e4SLinus Torvalds } 24661da177e4SLinus Torvalds 2467220f2ac9SHugh Dickins /* 2468220f2ac9SHugh Dickins * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 2469220f2ac9SHugh Dickins */ 2470220f2ac9SHugh Dickins static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 2471965c8e59SAndrew Morton pgoff_t index, pgoff_t end, int whence) 2472220f2ac9SHugh Dickins { 2473220f2ac9SHugh Dickins struct page *page; 2474220f2ac9SHugh Dickins struct pagevec pvec; 2475220f2ac9SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 2476220f2ac9SHugh Dickins bool done = false; 2477220f2ac9SHugh Dickins int i; 2478220f2ac9SHugh Dickins 2479220f2ac9SHugh Dickins pagevec_init(&pvec, 0); 2480220f2ac9SHugh Dickins pvec.nr = 1; /* start small: we may be there already */ 2481220f2ac9SHugh Dickins while (!done) { 24820cd6144aSJohannes Weiner pvec.nr = find_get_entries(mapping, index, 2483220f2ac9SHugh Dickins pvec.nr, pvec.pages, indices); 2484220f2ac9SHugh Dickins if (!pvec.nr) { 2485965c8e59SAndrew Morton if (whence == SEEK_DATA) 2486220f2ac9SHugh Dickins index = end; 2487220f2ac9SHugh Dickins break; 2488220f2ac9SHugh Dickins } 2489220f2ac9SHugh Dickins for (i = 0; i < pvec.nr; i++, index++) { 2490220f2ac9SHugh Dickins if (index < indices[i]) { 2491965c8e59SAndrew Morton if (whence == SEEK_HOLE) { 2492220f2ac9SHugh Dickins done = true; 2493220f2ac9SHugh Dickins break; 2494220f2ac9SHugh Dickins } 2495220f2ac9SHugh Dickins index = indices[i]; 2496220f2ac9SHugh Dickins } 2497220f2ac9SHugh Dickins page = pvec.pages[i]; 2498220f2ac9SHugh Dickins if (page && !radix_tree_exceptional_entry(page)) { 2499220f2ac9SHugh Dickins if (!PageUptodate(page)) 2500220f2ac9SHugh Dickins page = NULL; 2501220f2ac9SHugh Dickins } 2502220f2ac9SHugh Dickins if (index >= end || 2503965c8e59SAndrew Morton (page && whence == SEEK_DATA) || 2504965c8e59SAndrew Morton (!page && whence == SEEK_HOLE)) { 2505220f2ac9SHugh Dickins done = true; 2506220f2ac9SHugh Dickins break; 2507220f2ac9SHugh Dickins } 2508220f2ac9SHugh Dickins } 25090cd6144aSJohannes Weiner pagevec_remove_exceptionals(&pvec); 2510220f2ac9SHugh Dickins pagevec_release(&pvec); 2511220f2ac9SHugh Dickins pvec.nr = PAGEVEC_SIZE; 2512220f2ac9SHugh Dickins cond_resched(); 2513220f2ac9SHugh Dickins } 2514220f2ac9SHugh Dickins return index; 2515220f2ac9SHugh Dickins } 2516220f2ac9SHugh Dickins 2517965c8e59SAndrew Morton static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 2518220f2ac9SHugh Dickins { 2519220f2ac9SHugh Dickins struct address_space *mapping = file->f_mapping; 2520220f2ac9SHugh Dickins struct inode *inode = mapping->host; 2521220f2ac9SHugh Dickins pgoff_t start, end; 2522220f2ac9SHugh Dickins loff_t new_offset; 2523220f2ac9SHugh Dickins 2524965c8e59SAndrew Morton if (whence != SEEK_DATA && whence != SEEK_HOLE) 2525965c8e59SAndrew Morton return generic_file_llseek_size(file, offset, whence, 2526220f2ac9SHugh Dickins MAX_LFS_FILESIZE, i_size_read(inode)); 25275955102cSAl Viro inode_lock(inode); 2528220f2ac9SHugh Dickins /* We're holding i_mutex so we can access i_size directly */ 2529220f2ac9SHugh Dickins 2530220f2ac9SHugh Dickins if (offset < 0) 2531220f2ac9SHugh Dickins offset = -EINVAL; 2532220f2ac9SHugh Dickins else if (offset >= inode->i_size) 2533220f2ac9SHugh Dickins offset = -ENXIO; 2534220f2ac9SHugh Dickins else { 253509cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 253609cbfeafSKirill A. Shutemov end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2537965c8e59SAndrew Morton new_offset = shmem_seek_hole_data(mapping, start, end, whence); 253809cbfeafSKirill A. Shutemov new_offset <<= PAGE_SHIFT; 2539220f2ac9SHugh Dickins if (new_offset > offset) { 2540220f2ac9SHugh Dickins if (new_offset < inode->i_size) 2541220f2ac9SHugh Dickins offset = new_offset; 2542965c8e59SAndrew Morton else if (whence == SEEK_DATA) 2543220f2ac9SHugh Dickins offset = -ENXIO; 2544220f2ac9SHugh Dickins else 2545220f2ac9SHugh Dickins offset = inode->i_size; 2546220f2ac9SHugh Dickins } 2547220f2ac9SHugh Dickins } 2548220f2ac9SHugh Dickins 2549387aae6fSHugh Dickins if (offset >= 0) 255046a1c2c7SJie Liu offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 25515955102cSAl Viro inode_unlock(inode); 2552220f2ac9SHugh Dickins return offset; 2553220f2ac9SHugh Dickins } 2554220f2ac9SHugh Dickins 255505f65b5cSDavid Herrmann /* 255605f65b5cSDavid Herrmann * We need a tag: a new tag would expand every radix_tree_node by 8 bytes, 255705f65b5cSDavid Herrmann * so reuse a tag which we firmly believe is never set or cleared on shmem. 255805f65b5cSDavid Herrmann */ 255905f65b5cSDavid Herrmann #define SHMEM_TAG_PINNED PAGECACHE_TAG_TOWRITE 256005f65b5cSDavid Herrmann #define LAST_SCAN 4 /* about 150ms max */ 256105f65b5cSDavid Herrmann 256205f65b5cSDavid Herrmann static void shmem_tag_pins(struct address_space *mapping) 256305f65b5cSDavid Herrmann { 256405f65b5cSDavid Herrmann struct radix_tree_iter iter; 256505f65b5cSDavid Herrmann void **slot; 256605f65b5cSDavid Herrmann pgoff_t start; 256705f65b5cSDavid Herrmann struct page *page; 256805f65b5cSDavid Herrmann 256905f65b5cSDavid Herrmann lru_add_drain(); 257005f65b5cSDavid Herrmann start = 0; 257105f65b5cSDavid Herrmann rcu_read_lock(); 257205f65b5cSDavid Herrmann 257305f65b5cSDavid Herrmann radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 257405f65b5cSDavid Herrmann page = radix_tree_deref_slot(slot); 257505f65b5cSDavid Herrmann if (!page || radix_tree_exception(page)) { 25762cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 25772cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 25782cf938aaSMatthew Wilcox continue; 25792cf938aaSMatthew Wilcox } 258005f65b5cSDavid Herrmann } else if (page_count(page) - page_mapcount(page) > 1) { 258105f65b5cSDavid Herrmann spin_lock_irq(&mapping->tree_lock); 258205f65b5cSDavid Herrmann radix_tree_tag_set(&mapping->page_tree, iter.index, 258305f65b5cSDavid Herrmann SHMEM_TAG_PINNED); 258405f65b5cSDavid Herrmann spin_unlock_irq(&mapping->tree_lock); 258505f65b5cSDavid Herrmann } 258605f65b5cSDavid Herrmann 258705f65b5cSDavid Herrmann if (need_resched()) { 2588148deab2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 258905f65b5cSDavid Herrmann cond_resched_rcu(); 259005f65b5cSDavid Herrmann } 259105f65b5cSDavid Herrmann } 259205f65b5cSDavid Herrmann rcu_read_unlock(); 259305f65b5cSDavid Herrmann } 259405f65b5cSDavid Herrmann 259505f65b5cSDavid Herrmann /* 259605f65b5cSDavid Herrmann * Setting SEAL_WRITE requires us to verify there's no pending writer. However, 259705f65b5cSDavid Herrmann * via get_user_pages(), drivers might have some pending I/O without any active 259805f65b5cSDavid Herrmann * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages 259905f65b5cSDavid Herrmann * and see whether it has an elevated ref-count. If so, we tag them and wait for 260005f65b5cSDavid Herrmann * them to be dropped. 260105f65b5cSDavid Herrmann * The caller must guarantee that no new user will acquire writable references 260205f65b5cSDavid Herrmann * to those pages to avoid races. 260305f65b5cSDavid Herrmann */ 260440e041a2SDavid Herrmann static int shmem_wait_for_pins(struct address_space *mapping) 260540e041a2SDavid Herrmann { 260605f65b5cSDavid Herrmann struct radix_tree_iter iter; 260705f65b5cSDavid Herrmann void **slot; 260805f65b5cSDavid Herrmann pgoff_t start; 260905f65b5cSDavid Herrmann struct page *page; 261005f65b5cSDavid Herrmann int error, scan; 261105f65b5cSDavid Herrmann 261205f65b5cSDavid Herrmann shmem_tag_pins(mapping); 261305f65b5cSDavid Herrmann 261405f65b5cSDavid Herrmann error = 0; 261505f65b5cSDavid Herrmann for (scan = 0; scan <= LAST_SCAN; scan++) { 261605f65b5cSDavid Herrmann if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED)) 261705f65b5cSDavid Herrmann break; 261805f65b5cSDavid Herrmann 261905f65b5cSDavid Herrmann if (!scan) 262005f65b5cSDavid Herrmann lru_add_drain_all(); 262105f65b5cSDavid Herrmann else if (schedule_timeout_killable((HZ << scan) / 200)) 262205f65b5cSDavid Herrmann scan = LAST_SCAN; 262305f65b5cSDavid Herrmann 262405f65b5cSDavid Herrmann start = 0; 262505f65b5cSDavid Herrmann rcu_read_lock(); 262605f65b5cSDavid Herrmann radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 262705f65b5cSDavid Herrmann start, SHMEM_TAG_PINNED) { 262805f65b5cSDavid Herrmann 262905f65b5cSDavid Herrmann page = radix_tree_deref_slot(slot); 263005f65b5cSDavid Herrmann if (radix_tree_exception(page)) { 26312cf938aaSMatthew Wilcox if (radix_tree_deref_retry(page)) { 26322cf938aaSMatthew Wilcox slot = radix_tree_iter_retry(&iter); 26332cf938aaSMatthew Wilcox continue; 26342cf938aaSMatthew Wilcox } 263505f65b5cSDavid Herrmann 263605f65b5cSDavid Herrmann page = NULL; 263705f65b5cSDavid Herrmann } 263805f65b5cSDavid Herrmann 263905f65b5cSDavid Herrmann if (page && 264005f65b5cSDavid Herrmann page_count(page) - page_mapcount(page) != 1) { 264105f65b5cSDavid Herrmann if (scan < LAST_SCAN) 264205f65b5cSDavid Herrmann goto continue_resched; 264305f65b5cSDavid Herrmann 264405f65b5cSDavid Herrmann /* 264505f65b5cSDavid Herrmann * On the last scan, we clean up all those tags 264605f65b5cSDavid Herrmann * we inserted; but make a note that we still 264705f65b5cSDavid Herrmann * found pages pinned. 264805f65b5cSDavid Herrmann */ 264905f65b5cSDavid Herrmann error = -EBUSY; 265005f65b5cSDavid Herrmann } 265105f65b5cSDavid Herrmann 265205f65b5cSDavid Herrmann spin_lock_irq(&mapping->tree_lock); 265305f65b5cSDavid Herrmann radix_tree_tag_clear(&mapping->page_tree, 265405f65b5cSDavid Herrmann iter.index, SHMEM_TAG_PINNED); 265505f65b5cSDavid Herrmann spin_unlock_irq(&mapping->tree_lock); 265605f65b5cSDavid Herrmann continue_resched: 265705f65b5cSDavid Herrmann if (need_resched()) { 2658148deab2SMatthew Wilcox slot = radix_tree_iter_resume(slot, &iter); 265905f65b5cSDavid Herrmann cond_resched_rcu(); 266005f65b5cSDavid Herrmann } 266105f65b5cSDavid Herrmann } 266205f65b5cSDavid Herrmann rcu_read_unlock(); 266305f65b5cSDavid Herrmann } 266405f65b5cSDavid Herrmann 266505f65b5cSDavid Herrmann return error; 266640e041a2SDavid Herrmann } 266740e041a2SDavid Herrmann 266840e041a2SDavid Herrmann #define F_ALL_SEALS (F_SEAL_SEAL | \ 266940e041a2SDavid Herrmann F_SEAL_SHRINK | \ 267040e041a2SDavid Herrmann F_SEAL_GROW | \ 267140e041a2SDavid Herrmann F_SEAL_WRITE) 267240e041a2SDavid Herrmann 267340e041a2SDavid Herrmann int shmem_add_seals(struct file *file, unsigned int seals) 267440e041a2SDavid Herrmann { 267540e041a2SDavid Herrmann struct inode *inode = file_inode(file); 267640e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 267740e041a2SDavid Herrmann int error; 267840e041a2SDavid Herrmann 267940e041a2SDavid Herrmann /* 268040e041a2SDavid Herrmann * SEALING 268140e041a2SDavid Herrmann * Sealing allows multiple parties to share a shmem-file but restrict 268240e041a2SDavid Herrmann * access to a specific subset of file operations. Seals can only be 268340e041a2SDavid Herrmann * added, but never removed. This way, mutually untrusted parties can 268440e041a2SDavid Herrmann * share common memory regions with a well-defined policy. A malicious 268540e041a2SDavid Herrmann * peer can thus never perform unwanted operations on a shared object. 268640e041a2SDavid Herrmann * 268740e041a2SDavid Herrmann * Seals are only supported on special shmem-files and always affect 268840e041a2SDavid Herrmann * the whole underlying inode. Once a seal is set, it may prevent some 268940e041a2SDavid Herrmann * kinds of access to the file. Currently, the following seals are 269040e041a2SDavid Herrmann * defined: 269140e041a2SDavid Herrmann * SEAL_SEAL: Prevent further seals from being set on this file 269240e041a2SDavid Herrmann * SEAL_SHRINK: Prevent the file from shrinking 269340e041a2SDavid Herrmann * SEAL_GROW: Prevent the file from growing 269440e041a2SDavid Herrmann * SEAL_WRITE: Prevent write access to the file 269540e041a2SDavid Herrmann * 269640e041a2SDavid Herrmann * As we don't require any trust relationship between two parties, we 269740e041a2SDavid Herrmann * must prevent seals from being removed. Therefore, sealing a file 269840e041a2SDavid Herrmann * only adds a given set of seals to the file, it never touches 269940e041a2SDavid Herrmann * existing seals. Furthermore, the "setting seals"-operation can be 270040e041a2SDavid Herrmann * sealed itself, which basically prevents any further seal from being 270140e041a2SDavid Herrmann * added. 270240e041a2SDavid Herrmann * 270340e041a2SDavid Herrmann * Semantics of sealing are only defined on volatile files. Only 270440e041a2SDavid Herrmann * anonymous shmem files support sealing. More importantly, seals are 270540e041a2SDavid Herrmann * never written to disk. Therefore, there's no plan to support it on 270640e041a2SDavid Herrmann * other file types. 270740e041a2SDavid Herrmann */ 270840e041a2SDavid Herrmann 270940e041a2SDavid Herrmann if (file->f_op != &shmem_file_operations) 271040e041a2SDavid Herrmann return -EINVAL; 271140e041a2SDavid Herrmann if (!(file->f_mode & FMODE_WRITE)) 271240e041a2SDavid Herrmann return -EPERM; 271340e041a2SDavid Herrmann if (seals & ~(unsigned int)F_ALL_SEALS) 271440e041a2SDavid Herrmann return -EINVAL; 271540e041a2SDavid Herrmann 27165955102cSAl Viro inode_lock(inode); 271740e041a2SDavid Herrmann 271840e041a2SDavid Herrmann if (info->seals & F_SEAL_SEAL) { 271940e041a2SDavid Herrmann error = -EPERM; 272040e041a2SDavid Herrmann goto unlock; 272140e041a2SDavid Herrmann } 272240e041a2SDavid Herrmann 272340e041a2SDavid Herrmann if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) { 272440e041a2SDavid Herrmann error = mapping_deny_writable(file->f_mapping); 272540e041a2SDavid Herrmann if (error) 272640e041a2SDavid Herrmann goto unlock; 272740e041a2SDavid Herrmann 272840e041a2SDavid Herrmann error = shmem_wait_for_pins(file->f_mapping); 272940e041a2SDavid Herrmann if (error) { 273040e041a2SDavid Herrmann mapping_allow_writable(file->f_mapping); 273140e041a2SDavid Herrmann goto unlock; 273240e041a2SDavid Herrmann } 273340e041a2SDavid Herrmann } 273440e041a2SDavid Herrmann 273540e041a2SDavid Herrmann info->seals |= seals; 273640e041a2SDavid Herrmann error = 0; 273740e041a2SDavid Herrmann 273840e041a2SDavid Herrmann unlock: 27395955102cSAl Viro inode_unlock(inode); 274040e041a2SDavid Herrmann return error; 274140e041a2SDavid Herrmann } 274240e041a2SDavid Herrmann EXPORT_SYMBOL_GPL(shmem_add_seals); 274340e041a2SDavid Herrmann 274440e041a2SDavid Herrmann int shmem_get_seals(struct file *file) 274540e041a2SDavid Herrmann { 274640e041a2SDavid Herrmann if (file->f_op != &shmem_file_operations) 274740e041a2SDavid Herrmann return -EINVAL; 274840e041a2SDavid Herrmann 274940e041a2SDavid Herrmann return SHMEM_I(file_inode(file))->seals; 275040e041a2SDavid Herrmann } 275140e041a2SDavid Herrmann EXPORT_SYMBOL_GPL(shmem_get_seals); 275240e041a2SDavid Herrmann 275340e041a2SDavid Herrmann long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg) 275440e041a2SDavid Herrmann { 275540e041a2SDavid Herrmann long error; 275640e041a2SDavid Herrmann 275740e041a2SDavid Herrmann switch (cmd) { 275840e041a2SDavid Herrmann case F_ADD_SEALS: 275940e041a2SDavid Herrmann /* disallow upper 32bit */ 276040e041a2SDavid Herrmann if (arg > UINT_MAX) 276140e041a2SDavid Herrmann return -EINVAL; 276240e041a2SDavid Herrmann 276340e041a2SDavid Herrmann error = shmem_add_seals(file, arg); 276440e041a2SDavid Herrmann break; 276540e041a2SDavid Herrmann case F_GET_SEALS: 276640e041a2SDavid Herrmann error = shmem_get_seals(file); 276740e041a2SDavid Herrmann break; 276840e041a2SDavid Herrmann default: 276940e041a2SDavid Herrmann error = -EINVAL; 277040e041a2SDavid Herrmann break; 277140e041a2SDavid Herrmann } 277240e041a2SDavid Herrmann 277340e041a2SDavid Herrmann return error; 277440e041a2SDavid Herrmann } 277540e041a2SDavid Herrmann 277683e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 277783e4fa9cSHugh Dickins loff_t len) 277883e4fa9cSHugh Dickins { 2779496ad9aaSAl Viro struct inode *inode = file_inode(file); 2780e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 278140e041a2SDavid Herrmann struct shmem_inode_info *info = SHMEM_I(inode); 27821aac1400SHugh Dickins struct shmem_falloc shmem_falloc; 2783e2d12e22SHugh Dickins pgoff_t start, index, end; 2784e2d12e22SHugh Dickins int error; 278583e4fa9cSHugh Dickins 278613ace4d0SHugh Dickins if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 278713ace4d0SHugh Dickins return -EOPNOTSUPP; 278813ace4d0SHugh Dickins 27895955102cSAl Viro inode_lock(inode); 279083e4fa9cSHugh Dickins 279183e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 279283e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 279383e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 279483e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 27958e205f77SHugh Dickins DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 279683e4fa9cSHugh Dickins 279740e041a2SDavid Herrmann /* protected by i_mutex */ 279840e041a2SDavid Herrmann if (info->seals & F_SEAL_WRITE) { 279940e041a2SDavid Herrmann error = -EPERM; 280040e041a2SDavid Herrmann goto out; 280140e041a2SDavid Herrmann } 280240e041a2SDavid Herrmann 28038e205f77SHugh Dickins shmem_falloc.waitq = &shmem_falloc_waitq; 2804f00cdc6dSHugh Dickins shmem_falloc.start = unmap_start >> PAGE_SHIFT; 2805f00cdc6dSHugh Dickins shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 2806f00cdc6dSHugh Dickins spin_lock(&inode->i_lock); 2807f00cdc6dSHugh Dickins inode->i_private = &shmem_falloc; 2808f00cdc6dSHugh Dickins spin_unlock(&inode->i_lock); 2809f00cdc6dSHugh Dickins 281083e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 281183e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 281283e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 281383e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 281483e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 28158e205f77SHugh Dickins 28168e205f77SHugh Dickins spin_lock(&inode->i_lock); 28178e205f77SHugh Dickins inode->i_private = NULL; 28188e205f77SHugh Dickins wake_up_all(&shmem_falloc_waitq); 281910d20bd2SLinus Torvalds WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list)); 28208e205f77SHugh Dickins spin_unlock(&inode->i_lock); 282183e4fa9cSHugh Dickins error = 0; 28228e205f77SHugh Dickins goto out; 282383e4fa9cSHugh Dickins } 282483e4fa9cSHugh Dickins 2825e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 2826e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 2827e2d12e22SHugh Dickins if (error) 2828e2d12e22SHugh Dickins goto out; 2829e2d12e22SHugh Dickins 283040e041a2SDavid Herrmann if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 283140e041a2SDavid Herrmann error = -EPERM; 283240e041a2SDavid Herrmann goto out; 283340e041a2SDavid Herrmann } 283440e041a2SDavid Herrmann 283509cbfeafSKirill A. Shutemov start = offset >> PAGE_SHIFT; 283609cbfeafSKirill A. Shutemov end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 2837e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 2838e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 2839e2d12e22SHugh Dickins error = -ENOSPC; 2840e2d12e22SHugh Dickins goto out; 2841e2d12e22SHugh Dickins } 2842e2d12e22SHugh Dickins 28438e205f77SHugh Dickins shmem_falloc.waitq = NULL; 28441aac1400SHugh Dickins shmem_falloc.start = start; 28451aac1400SHugh Dickins shmem_falloc.next = start; 28461aac1400SHugh Dickins shmem_falloc.nr_falloced = 0; 28471aac1400SHugh Dickins shmem_falloc.nr_unswapped = 0; 28481aac1400SHugh Dickins spin_lock(&inode->i_lock); 28491aac1400SHugh Dickins inode->i_private = &shmem_falloc; 28501aac1400SHugh Dickins spin_unlock(&inode->i_lock); 28511aac1400SHugh Dickins 2852e2d12e22SHugh Dickins for (index = start; index < end; index++) { 2853e2d12e22SHugh Dickins struct page *page; 2854e2d12e22SHugh Dickins 2855e2d12e22SHugh Dickins /* 2856e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 2857e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 2858e2d12e22SHugh Dickins */ 2859e2d12e22SHugh Dickins if (signal_pending(current)) 2860e2d12e22SHugh Dickins error = -EINTR; 28611aac1400SHugh Dickins else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 28621aac1400SHugh Dickins error = -ENOMEM; 2863e2d12e22SHugh Dickins else 28649e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, index, &page, SGP_FALLOC); 2865e2d12e22SHugh Dickins if (error) { 28661635f6a7SHugh Dickins /* Remove the !PageUptodate pages we added */ 28677f556567SHugh Dickins if (index > start) { 28681635f6a7SHugh Dickins shmem_undo_range(inode, 286909cbfeafSKirill A. Shutemov (loff_t)start << PAGE_SHIFT, 2870b9b4bb26SAnthony Romano ((loff_t)index << PAGE_SHIFT) - 1, true); 28717f556567SHugh Dickins } 28721aac1400SHugh Dickins goto undone; 2873e2d12e22SHugh Dickins } 2874e2d12e22SHugh Dickins 2875e2d12e22SHugh Dickins /* 28761aac1400SHugh Dickins * Inform shmem_writepage() how far we have reached. 28771aac1400SHugh Dickins * No need for lock or barrier: we have the page lock. 28781aac1400SHugh Dickins */ 28791aac1400SHugh Dickins shmem_falloc.next++; 28801aac1400SHugh Dickins if (!PageUptodate(page)) 28811aac1400SHugh Dickins shmem_falloc.nr_falloced++; 28821aac1400SHugh Dickins 28831aac1400SHugh Dickins /* 28841635f6a7SHugh Dickins * If !PageUptodate, leave it that way so that freeable pages 28851635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 28861635f6a7SHugh Dickins * But set_page_dirty so that memory pressure will swap rather 2887e2d12e22SHugh Dickins * than free the pages we are allocating (and SGP_CACHE pages 2888e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 2889e2d12e22SHugh Dickins */ 2890e2d12e22SHugh Dickins set_page_dirty(page); 2891e2d12e22SHugh Dickins unlock_page(page); 289209cbfeafSKirill A. Shutemov put_page(page); 2893e2d12e22SHugh Dickins cond_resched(); 2894e2d12e22SHugh Dickins } 2895e2d12e22SHugh Dickins 2896e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 2897e2d12e22SHugh Dickins i_size_write(inode, offset + len); 2898078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 28991aac1400SHugh Dickins undone: 29001aac1400SHugh Dickins spin_lock(&inode->i_lock); 29011aac1400SHugh Dickins inode->i_private = NULL; 29021aac1400SHugh Dickins spin_unlock(&inode->i_lock); 2903e2d12e22SHugh Dickins out: 29045955102cSAl Viro inode_unlock(inode); 290583e4fa9cSHugh Dickins return error; 290683e4fa9cSHugh Dickins } 290783e4fa9cSHugh Dickins 2908726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 29091da177e4SLinus Torvalds { 2910726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 29111da177e4SLinus Torvalds 29121da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 291309cbfeafSKirill A. Shutemov buf->f_bsize = PAGE_SIZE; 29141da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 29150edd73b3SHugh Dickins if (sbinfo->max_blocks) { 29161da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 291741ffe5d5SHugh Dickins buf->f_bavail = 291841ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 291941ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 29200edd73b3SHugh Dickins } 29210edd73b3SHugh Dickins if (sbinfo->max_inodes) { 29221da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 29231da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 29241da177e4SLinus Torvalds } 29251da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 29261da177e4SLinus Torvalds return 0; 29271da177e4SLinus Torvalds } 29281da177e4SLinus Torvalds 29291da177e4SLinus Torvalds /* 29301da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 29311da177e4SLinus Torvalds */ 29321da177e4SLinus Torvalds static int 29331a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 29341da177e4SLinus Torvalds { 29350b0a0806SHugh Dickins struct inode *inode; 29361da177e4SLinus Torvalds int error = -ENOSPC; 29371da177e4SLinus Torvalds 2938454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 29391da177e4SLinus Torvalds if (inode) { 2940feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2941feda821eSChristoph Hellwig if (error) 2942feda821eSChristoph Hellwig goto out_iput; 29432a7dba39SEric Paris error = security_inode_init_security(inode, dir, 29449d8f13baSMimi Zohar &dentry->d_name, 29456d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 2946feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2947feda821eSChristoph Hellwig goto out_iput; 294837ec43cdSMimi Zohar 2949718deb6bSAl Viro error = 0; 29501da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 2951078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 29521da177e4SLinus Torvalds d_instantiate(dentry, inode); 29531da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 29541da177e4SLinus Torvalds } 29551da177e4SLinus Torvalds return error; 2956feda821eSChristoph Hellwig out_iput: 2957feda821eSChristoph Hellwig iput(inode); 2958feda821eSChristoph Hellwig return error; 29591da177e4SLinus Torvalds } 29601da177e4SLinus Torvalds 296160545d0dSAl Viro static int 296260545d0dSAl Viro shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 296360545d0dSAl Viro { 296460545d0dSAl Viro struct inode *inode; 296560545d0dSAl Viro int error = -ENOSPC; 296660545d0dSAl Viro 296760545d0dSAl Viro inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 296860545d0dSAl Viro if (inode) { 296960545d0dSAl Viro error = security_inode_init_security(inode, dir, 297060545d0dSAl Viro NULL, 297160545d0dSAl Viro shmem_initxattrs, NULL); 2972feda821eSChristoph Hellwig if (error && error != -EOPNOTSUPP) 2973feda821eSChristoph Hellwig goto out_iput; 2974feda821eSChristoph Hellwig error = simple_acl_create(dir, inode); 2975feda821eSChristoph Hellwig if (error) 2976feda821eSChristoph Hellwig goto out_iput; 297760545d0dSAl Viro d_tmpfile(dentry, inode); 297860545d0dSAl Viro } 297960545d0dSAl Viro return error; 2980feda821eSChristoph Hellwig out_iput: 2981feda821eSChristoph Hellwig iput(inode); 2982feda821eSChristoph Hellwig return error; 298360545d0dSAl Viro } 298460545d0dSAl Viro 298518bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 29861da177e4SLinus Torvalds { 29871da177e4SLinus Torvalds int error; 29881da177e4SLinus Torvalds 29891da177e4SLinus Torvalds if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 29901da177e4SLinus Torvalds return error; 2991d8c76e6fSDave Hansen inc_nlink(dir); 29921da177e4SLinus Torvalds return 0; 29931da177e4SLinus Torvalds } 29941da177e4SLinus Torvalds 29954acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 2996ebfc3b49SAl Viro bool excl) 29971da177e4SLinus Torvalds { 29981da177e4SLinus Torvalds return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 29991da177e4SLinus Torvalds } 30001da177e4SLinus Torvalds 30011da177e4SLinus Torvalds /* 30021da177e4SLinus Torvalds * Link a file.. 30031da177e4SLinus Torvalds */ 30041da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 30051da177e4SLinus Torvalds { 300675c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 30075b04c689SPavel Emelyanov int ret; 30081da177e4SLinus Torvalds 30091da177e4SLinus Torvalds /* 30101da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 30111da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 30121da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 30131da177e4SLinus Torvalds */ 30145b04c689SPavel Emelyanov ret = shmem_reserve_inode(inode->i_sb); 30155b04c689SPavel Emelyanov if (ret) 30165b04c689SPavel Emelyanov goto out; 30171da177e4SLinus Torvalds 30181da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3019078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 3020d8c76e6fSDave Hansen inc_nlink(inode); 30217de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 30221da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 30231da177e4SLinus Torvalds d_instantiate(dentry, inode); 30245b04c689SPavel Emelyanov out: 30255b04c689SPavel Emelyanov return ret; 30261da177e4SLinus Torvalds } 30271da177e4SLinus Torvalds 30281da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 30291da177e4SLinus Torvalds { 303075c3cfa8SDavid Howells struct inode *inode = d_inode(dentry); 30311da177e4SLinus Torvalds 30325b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 30335b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 30341da177e4SLinus Torvalds 30351da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 3036078cd827SDeepa Dinamani inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 30379a53c3a7SDave Hansen drop_nlink(inode); 30381da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 30391da177e4SLinus Torvalds return 0; 30401da177e4SLinus Torvalds } 30411da177e4SLinus Torvalds 30421da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 30431da177e4SLinus Torvalds { 30441da177e4SLinus Torvalds if (!simple_empty(dentry)) 30451da177e4SLinus Torvalds return -ENOTEMPTY; 30461da177e4SLinus Torvalds 304775c3cfa8SDavid Howells drop_nlink(d_inode(dentry)); 30489a53c3a7SDave Hansen drop_nlink(dir); 30491da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 30501da177e4SLinus Torvalds } 30511da177e4SLinus Torvalds 305237456771SMiklos Szeredi static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 305337456771SMiklos Szeredi { 3054e36cb0b8SDavid Howells bool old_is_dir = d_is_dir(old_dentry); 3055e36cb0b8SDavid Howells bool new_is_dir = d_is_dir(new_dentry); 305637456771SMiklos Szeredi 305737456771SMiklos Szeredi if (old_dir != new_dir && old_is_dir != new_is_dir) { 305837456771SMiklos Szeredi if (old_is_dir) { 305937456771SMiklos Szeredi drop_nlink(old_dir); 306037456771SMiklos Szeredi inc_nlink(new_dir); 306137456771SMiklos Szeredi } else { 306237456771SMiklos Szeredi drop_nlink(new_dir); 306337456771SMiklos Szeredi inc_nlink(old_dir); 306437456771SMiklos Szeredi } 306537456771SMiklos Szeredi } 306637456771SMiklos Szeredi old_dir->i_ctime = old_dir->i_mtime = 306737456771SMiklos Szeredi new_dir->i_ctime = new_dir->i_mtime = 306875c3cfa8SDavid Howells d_inode(old_dentry)->i_ctime = 3069078cd827SDeepa Dinamani d_inode(new_dentry)->i_ctime = current_time(old_dir); 307037456771SMiklos Szeredi 307137456771SMiklos Szeredi return 0; 307237456771SMiklos Szeredi } 307337456771SMiklos Szeredi 307446fdb794SMiklos Szeredi static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) 307546fdb794SMiklos Szeredi { 307646fdb794SMiklos Szeredi struct dentry *whiteout; 307746fdb794SMiklos Szeredi int error; 307846fdb794SMiklos Szeredi 307946fdb794SMiklos Szeredi whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 308046fdb794SMiklos Szeredi if (!whiteout) 308146fdb794SMiklos Szeredi return -ENOMEM; 308246fdb794SMiklos Szeredi 308346fdb794SMiklos Szeredi error = shmem_mknod(old_dir, whiteout, 308446fdb794SMiklos Szeredi S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 308546fdb794SMiklos Szeredi dput(whiteout); 308646fdb794SMiklos Szeredi if (error) 308746fdb794SMiklos Szeredi return error; 308846fdb794SMiklos Szeredi 308946fdb794SMiklos Szeredi /* 309046fdb794SMiklos Szeredi * Cheat and hash the whiteout while the old dentry is still in 309146fdb794SMiklos Szeredi * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 309246fdb794SMiklos Szeredi * 309346fdb794SMiklos Szeredi * d_lookup() will consistently find one of them at this point, 309446fdb794SMiklos Szeredi * not sure which one, but that isn't even important. 309546fdb794SMiklos Szeredi */ 309646fdb794SMiklos Szeredi d_rehash(whiteout); 309746fdb794SMiklos Szeredi return 0; 309846fdb794SMiklos Szeredi } 309946fdb794SMiklos Szeredi 31001da177e4SLinus Torvalds /* 31011da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 31021da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 31031da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 31041da177e4SLinus Torvalds * gets overwritten. 31051da177e4SLinus Torvalds */ 31063b69ff51SMiklos Szeredi static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) 31071da177e4SLinus Torvalds { 310875c3cfa8SDavid Howells struct inode *inode = d_inode(old_dentry); 31091da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 31101da177e4SLinus Torvalds 311146fdb794SMiklos Szeredi if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 31123b69ff51SMiklos Szeredi return -EINVAL; 31133b69ff51SMiklos Szeredi 311437456771SMiklos Szeredi if (flags & RENAME_EXCHANGE) 311537456771SMiklos Szeredi return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 311637456771SMiklos Szeredi 31171da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 31181da177e4SLinus Torvalds return -ENOTEMPTY; 31191da177e4SLinus Torvalds 312046fdb794SMiklos Szeredi if (flags & RENAME_WHITEOUT) { 312146fdb794SMiklos Szeredi int error; 312246fdb794SMiklos Szeredi 312346fdb794SMiklos Szeredi error = shmem_whiteout(old_dir, old_dentry); 312446fdb794SMiklos Szeredi if (error) 312546fdb794SMiklos Szeredi return error; 312646fdb794SMiklos Szeredi } 312746fdb794SMiklos Szeredi 312875c3cfa8SDavid Howells if (d_really_is_positive(new_dentry)) { 31291da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 3130b928095bSMiklos Szeredi if (they_are_dirs) { 313175c3cfa8SDavid Howells drop_nlink(d_inode(new_dentry)); 31329a53c3a7SDave Hansen drop_nlink(old_dir); 3133b928095bSMiklos Szeredi } 31341da177e4SLinus Torvalds } else if (they_are_dirs) { 31359a53c3a7SDave Hansen drop_nlink(old_dir); 3136d8c76e6fSDave Hansen inc_nlink(new_dir); 31371da177e4SLinus Torvalds } 31381da177e4SLinus Torvalds 31391da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 31401da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 31411da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 31421da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 3143078cd827SDeepa Dinamani inode->i_ctime = current_time(old_dir); 31441da177e4SLinus Torvalds return 0; 31451da177e4SLinus Torvalds } 31461da177e4SLinus Torvalds 31471da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 31481da177e4SLinus Torvalds { 31491da177e4SLinus Torvalds int error; 31501da177e4SLinus Torvalds int len; 31511da177e4SLinus Torvalds struct inode *inode; 31529276aad6SHugh Dickins struct page *page; 31531da177e4SLinus Torvalds struct shmem_inode_info *info; 31541da177e4SLinus Torvalds 31551da177e4SLinus Torvalds len = strlen(symname) + 1; 315609cbfeafSKirill A. Shutemov if (len > PAGE_SIZE) 31571da177e4SLinus Torvalds return -ENAMETOOLONG; 31581da177e4SLinus Torvalds 3159454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 31601da177e4SLinus Torvalds if (!inode) 31611da177e4SLinus Torvalds return -ENOSPC; 31621da177e4SLinus Torvalds 31639d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 31646d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 3165570bc1c2SStephen Smalley if (error) { 3166570bc1c2SStephen Smalley if (error != -EOPNOTSUPP) { 3167570bc1c2SStephen Smalley iput(inode); 3168570bc1c2SStephen Smalley return error; 3169570bc1c2SStephen Smalley } 3170570bc1c2SStephen Smalley error = 0; 3171570bc1c2SStephen Smalley } 3172570bc1c2SStephen Smalley 31731da177e4SLinus Torvalds info = SHMEM_I(inode); 31741da177e4SLinus Torvalds inode->i_size = len-1; 317569f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 31763ed47db3SAl Viro inode->i_link = kmemdup(symname, len, GFP_KERNEL); 31773ed47db3SAl Viro if (!inode->i_link) { 317869f07ec9SHugh Dickins iput(inode); 317969f07ec9SHugh Dickins return -ENOMEM; 318069f07ec9SHugh Dickins } 318169f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 31821da177e4SLinus Torvalds } else { 3183e8ecde25SAl Viro inode_nohighmem(inode); 31849e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_WRITE); 31851da177e4SLinus Torvalds if (error) { 31861da177e4SLinus Torvalds iput(inode); 31871da177e4SLinus Torvalds return error; 31881da177e4SLinus Torvalds } 318914fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 31901da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 319121fc61c7SAl Viro memcpy(page_address(page), symname, len); 3192ec9516fbSHugh Dickins SetPageUptodate(page); 31931da177e4SLinus Torvalds set_page_dirty(page); 31946746aff7SWu Fengguang unlock_page(page); 319509cbfeafSKirill A. Shutemov put_page(page); 31961da177e4SLinus Torvalds } 31971da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 3198078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 31991da177e4SLinus Torvalds d_instantiate(dentry, inode); 32001da177e4SLinus Torvalds dget(dentry); 32011da177e4SLinus Torvalds return 0; 32021da177e4SLinus Torvalds } 32031da177e4SLinus Torvalds 3204fceef393SAl Viro static void shmem_put_link(void *arg) 3205fceef393SAl Viro { 3206fceef393SAl Viro mark_page_accessed(arg); 3207fceef393SAl Viro put_page(arg); 3208fceef393SAl Viro } 3209fceef393SAl Viro 32106b255391SAl Viro static const char *shmem_get_link(struct dentry *dentry, 3211fceef393SAl Viro struct inode *inode, 3212fceef393SAl Viro struct delayed_call *done) 32131da177e4SLinus Torvalds { 32141da177e4SLinus Torvalds struct page *page = NULL; 32156b255391SAl Viro int error; 32166a6c9904SAl Viro if (!dentry) { 32176a6c9904SAl Viro page = find_get_page(inode->i_mapping, 0); 32186a6c9904SAl Viro if (!page) 32196b255391SAl Viro return ERR_PTR(-ECHILD); 32206a6c9904SAl Viro if (!PageUptodate(page)) { 32216a6c9904SAl Viro put_page(page); 32226a6c9904SAl Viro return ERR_PTR(-ECHILD); 32236a6c9904SAl Viro } 32246a6c9904SAl Viro } else { 32259e18eb29SAndres Lagar-Cavilla error = shmem_getpage(inode, 0, &page, SGP_READ); 3226680baacbSAl Viro if (error) 3227680baacbSAl Viro return ERR_PTR(error); 3228d3602444SHugh Dickins unlock_page(page); 32291da177e4SLinus Torvalds } 3230fceef393SAl Viro set_delayed_call(done, shmem_put_link, page); 323121fc61c7SAl Viro return page_address(page); 32321da177e4SLinus Torvalds } 32331da177e4SLinus Torvalds 3234b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3235b09e0fa4SEric Paris /* 3236b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 3237b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 3238b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 3239b09e0fa4SEric Paris * filesystem level, though. 3240b09e0fa4SEric Paris */ 3241b09e0fa4SEric Paris 32426d9d88d0SJarkko Sakkinen /* 32436d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 32446d9d88d0SJarkko Sakkinen */ 32456d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 32466d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 32476d9d88d0SJarkko Sakkinen void *fs_info) 32486d9d88d0SJarkko Sakkinen { 32496d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 32506d9d88d0SJarkko Sakkinen const struct xattr *xattr; 325138f38657SAristeu Rozanski struct simple_xattr *new_xattr; 32526d9d88d0SJarkko Sakkinen size_t len; 32536d9d88d0SJarkko Sakkinen 32546d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 325538f38657SAristeu Rozanski new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 32566d9d88d0SJarkko Sakkinen if (!new_xattr) 32576d9d88d0SJarkko Sakkinen return -ENOMEM; 32586d9d88d0SJarkko Sakkinen 32596d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 32606d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 32616d9d88d0SJarkko Sakkinen GFP_KERNEL); 32626d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 32636d9d88d0SJarkko Sakkinen kfree(new_xattr); 32646d9d88d0SJarkko Sakkinen return -ENOMEM; 32656d9d88d0SJarkko Sakkinen } 32666d9d88d0SJarkko Sakkinen 32676d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 32686d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 32696d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 32706d9d88d0SJarkko Sakkinen xattr->name, len); 32716d9d88d0SJarkko Sakkinen 327238f38657SAristeu Rozanski simple_xattr_list_add(&info->xattrs, new_xattr); 32736d9d88d0SJarkko Sakkinen } 32746d9d88d0SJarkko Sakkinen 32756d9d88d0SJarkko Sakkinen return 0; 32766d9d88d0SJarkko Sakkinen } 32776d9d88d0SJarkko Sakkinen 3278aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3279b296821aSAl Viro struct dentry *unused, struct inode *inode, 3280b296821aSAl Viro const char *name, void *buffer, size_t size) 3281aa7c5241SAndreas Gruenbacher { 3282b296821aSAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3283aa7c5241SAndreas Gruenbacher 3284aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3285aa7c5241SAndreas Gruenbacher return simple_xattr_get(&info->xattrs, name, buffer, size); 3286aa7c5241SAndreas Gruenbacher } 3287aa7c5241SAndreas Gruenbacher 3288aa7c5241SAndreas Gruenbacher static int shmem_xattr_handler_set(const struct xattr_handler *handler, 328959301226SAl Viro struct dentry *unused, struct inode *inode, 329059301226SAl Viro const char *name, const void *value, 329159301226SAl Viro size_t size, int flags) 3292aa7c5241SAndreas Gruenbacher { 329359301226SAl Viro struct shmem_inode_info *info = SHMEM_I(inode); 3294aa7c5241SAndreas Gruenbacher 3295aa7c5241SAndreas Gruenbacher name = xattr_full_name(handler, name); 3296aa7c5241SAndreas Gruenbacher return simple_xattr_set(&info->xattrs, name, value, size, flags); 3297aa7c5241SAndreas Gruenbacher } 3298aa7c5241SAndreas Gruenbacher 3299aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_security_xattr_handler = { 3300aa7c5241SAndreas Gruenbacher .prefix = XATTR_SECURITY_PREFIX, 3301aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3302aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3303aa7c5241SAndreas Gruenbacher }; 3304aa7c5241SAndreas Gruenbacher 3305aa7c5241SAndreas Gruenbacher static const struct xattr_handler shmem_trusted_xattr_handler = { 3306aa7c5241SAndreas Gruenbacher .prefix = XATTR_TRUSTED_PREFIX, 3307aa7c5241SAndreas Gruenbacher .get = shmem_xattr_handler_get, 3308aa7c5241SAndreas Gruenbacher .set = shmem_xattr_handler_set, 3309aa7c5241SAndreas Gruenbacher }; 3310aa7c5241SAndreas Gruenbacher 3311b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 3312b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 3313feda821eSChristoph Hellwig &posix_acl_access_xattr_handler, 3314feda821eSChristoph Hellwig &posix_acl_default_xattr_handler, 3315b09e0fa4SEric Paris #endif 3316aa7c5241SAndreas Gruenbacher &shmem_security_xattr_handler, 3317aa7c5241SAndreas Gruenbacher &shmem_trusted_xattr_handler, 3318b09e0fa4SEric Paris NULL 3319b09e0fa4SEric Paris }; 3320b09e0fa4SEric Paris 3321b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 3322b09e0fa4SEric Paris { 332375c3cfa8SDavid Howells struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3324786534b9SAndreas Gruenbacher return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 3325b09e0fa4SEric Paris } 3326b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 3327b09e0fa4SEric Paris 332869f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 33296b255391SAl Viro .get_link = simple_get_link, 3330b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3331b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3332b09e0fa4SEric Paris #endif 33331da177e4SLinus Torvalds }; 33341da177e4SLinus Torvalds 333592e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 33366b255391SAl Viro .get_link = shmem_get_link, 3337b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3338b09e0fa4SEric Paris .listxattr = shmem_listxattr, 333939f0247dSAndreas Gruenbacher #endif 3340b09e0fa4SEric Paris }; 334139f0247dSAndreas Gruenbacher 334291828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 334391828a40SDavid M. Grimes { 334491828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 334591828a40SDavid M. Grimes } 334691828a40SDavid M. Grimes 334791828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 334891828a40SDavid M. Grimes { 334991828a40SDavid M. Grimes __u32 *fh = vfh; 335091828a40SDavid M. Grimes __u64 inum = fh[2]; 335191828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 335291828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 335391828a40SDavid M. Grimes } 335491828a40SDavid M. Grimes 3355480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 3356480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 335791828a40SDavid M. Grimes { 335891828a40SDavid M. Grimes struct inode *inode; 3359480b116cSChristoph Hellwig struct dentry *dentry = NULL; 336035c2a7f4SHugh Dickins u64 inum; 336191828a40SDavid M. Grimes 3362480b116cSChristoph Hellwig if (fh_len < 3) 3363480b116cSChristoph Hellwig return NULL; 3364480b116cSChristoph Hellwig 336535c2a7f4SHugh Dickins inum = fid->raw[2]; 336635c2a7f4SHugh Dickins inum = (inum << 32) | fid->raw[1]; 336735c2a7f4SHugh Dickins 3368480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 3369480b116cSChristoph Hellwig shmem_match, fid->raw); 337091828a40SDavid M. Grimes if (inode) { 3371480b116cSChristoph Hellwig dentry = d_find_alias(inode); 337291828a40SDavid M. Grimes iput(inode); 337391828a40SDavid M. Grimes } 337491828a40SDavid M. Grimes 3375480b116cSChristoph Hellwig return dentry; 337691828a40SDavid M. Grimes } 337791828a40SDavid M. Grimes 3378b0b0382bSAl Viro static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 3379b0b0382bSAl Viro struct inode *parent) 338091828a40SDavid M. Grimes { 33815fe0c237SAneesh Kumar K.V if (*len < 3) { 33825fe0c237SAneesh Kumar K.V *len = 3; 338394e07a75SNamjae Jeon return FILEID_INVALID; 33845fe0c237SAneesh Kumar K.V } 338591828a40SDavid M. Grimes 33861d3382cbSAl Viro if (inode_unhashed(inode)) { 338791828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 338891828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 338991828a40SDavid M. Grimes * time, we need a lock to ensure we only try 339091828a40SDavid M. Grimes * to do it once 339191828a40SDavid M. Grimes */ 339291828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 339391828a40SDavid M. Grimes spin_lock(&lock); 33941d3382cbSAl Viro if (inode_unhashed(inode)) 339591828a40SDavid M. Grimes __insert_inode_hash(inode, 339691828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 339791828a40SDavid M. Grimes spin_unlock(&lock); 339891828a40SDavid M. Grimes } 339991828a40SDavid M. Grimes 340091828a40SDavid M. Grimes fh[0] = inode->i_generation; 340191828a40SDavid M. Grimes fh[1] = inode->i_ino; 340291828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 340391828a40SDavid M. Grimes 340491828a40SDavid M. Grimes *len = 3; 340591828a40SDavid M. Grimes return 1; 340691828a40SDavid M. Grimes } 340791828a40SDavid M. Grimes 340839655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 340991828a40SDavid M. Grimes .get_parent = shmem_get_parent, 341091828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 3411480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 341291828a40SDavid M. Grimes }; 341391828a40SDavid M. Grimes 3414680d794bSakpm@linux-foundation.org static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 3415680d794bSakpm@linux-foundation.org bool remount) 34161da177e4SLinus Torvalds { 34171da177e4SLinus Torvalds char *this_char, *value, *rest; 341849cd0a5cSGreg Thelen struct mempolicy *mpol = NULL; 34198751e039SEric W. Biederman uid_t uid; 34208751e039SEric W. Biederman gid_t gid; 34211da177e4SLinus Torvalds 3422b00dc3adSHugh Dickins while (options != NULL) { 3423b00dc3adSHugh Dickins this_char = options; 3424b00dc3adSHugh Dickins for (;;) { 3425b00dc3adSHugh Dickins /* 3426b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 3427b00dc3adSHugh Dickins * mount options form a comma-separated list, 3428b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 3429b00dc3adSHugh Dickins */ 3430b00dc3adSHugh Dickins options = strchr(options, ','); 3431b00dc3adSHugh Dickins if (options == NULL) 3432b00dc3adSHugh Dickins break; 3433b00dc3adSHugh Dickins options++; 3434b00dc3adSHugh Dickins if (!isdigit(*options)) { 3435b00dc3adSHugh Dickins options[-1] = '\0'; 3436b00dc3adSHugh Dickins break; 3437b00dc3adSHugh Dickins } 3438b00dc3adSHugh Dickins } 34391da177e4SLinus Torvalds if (!*this_char) 34401da177e4SLinus Torvalds continue; 34411da177e4SLinus Torvalds if ((value = strchr(this_char,'=')) != NULL) { 34421da177e4SLinus Torvalds *value++ = 0; 34431da177e4SLinus Torvalds } else { 34441170532bSJoe Perches pr_err("tmpfs: No value for mount option '%s'\n", 34451da177e4SLinus Torvalds this_char); 344649cd0a5cSGreg Thelen goto error; 34471da177e4SLinus Torvalds } 34481da177e4SLinus Torvalds 34491da177e4SLinus Torvalds if (!strcmp(this_char,"size")) { 34501da177e4SLinus Torvalds unsigned long long size; 34511da177e4SLinus Torvalds size = memparse(value,&rest); 34521da177e4SLinus Torvalds if (*rest == '%') { 34531da177e4SLinus Torvalds size <<= PAGE_SHIFT; 34541da177e4SLinus Torvalds size *= totalram_pages; 34551da177e4SLinus Torvalds do_div(size, 100); 34561da177e4SLinus Torvalds rest++; 34571da177e4SLinus Torvalds } 34581da177e4SLinus Torvalds if (*rest) 34591da177e4SLinus Torvalds goto bad_val; 3460680d794bSakpm@linux-foundation.org sbinfo->max_blocks = 346109cbfeafSKirill A. Shutemov DIV_ROUND_UP(size, PAGE_SIZE); 34621da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_blocks")) { 3463680d794bSakpm@linux-foundation.org sbinfo->max_blocks = memparse(value, &rest); 34641da177e4SLinus Torvalds if (*rest) 34651da177e4SLinus Torvalds goto bad_val; 34661da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_inodes")) { 3467680d794bSakpm@linux-foundation.org sbinfo->max_inodes = memparse(value, &rest); 34681da177e4SLinus Torvalds if (*rest) 34691da177e4SLinus Torvalds goto bad_val; 34701da177e4SLinus Torvalds } else if (!strcmp(this_char,"mode")) { 3471680d794bSakpm@linux-foundation.org if (remount) 34721da177e4SLinus Torvalds continue; 3473680d794bSakpm@linux-foundation.org sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 34741da177e4SLinus Torvalds if (*rest) 34751da177e4SLinus Torvalds goto bad_val; 34761da177e4SLinus Torvalds } else if (!strcmp(this_char,"uid")) { 3477680d794bSakpm@linux-foundation.org if (remount) 34781da177e4SLinus Torvalds continue; 34798751e039SEric W. Biederman uid = simple_strtoul(value, &rest, 0); 34801da177e4SLinus Torvalds if (*rest) 34811da177e4SLinus Torvalds goto bad_val; 34828751e039SEric W. Biederman sbinfo->uid = make_kuid(current_user_ns(), uid); 34838751e039SEric W. Biederman if (!uid_valid(sbinfo->uid)) 34848751e039SEric W. Biederman goto bad_val; 34851da177e4SLinus Torvalds } else if (!strcmp(this_char,"gid")) { 3486680d794bSakpm@linux-foundation.org if (remount) 34871da177e4SLinus Torvalds continue; 34888751e039SEric W. Biederman gid = simple_strtoul(value, &rest, 0); 34891da177e4SLinus Torvalds if (*rest) 34901da177e4SLinus Torvalds goto bad_val; 34918751e039SEric W. Biederman sbinfo->gid = make_kgid(current_user_ns(), gid); 34928751e039SEric W. Biederman if (!gid_valid(sbinfo->gid)) 34938751e039SEric W. Biederman goto bad_val; 3494e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 34955a6e75f8SKirill A. Shutemov } else if (!strcmp(this_char, "huge")) { 34965a6e75f8SKirill A. Shutemov int huge; 34975a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(value); 34985a6e75f8SKirill A. Shutemov if (huge < 0) 34995a6e75f8SKirill A. Shutemov goto bad_val; 35005a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 35015a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER) 35025a6e75f8SKirill A. Shutemov goto bad_val; 35035a6e75f8SKirill A. Shutemov sbinfo->huge = huge; 35045a6e75f8SKirill A. Shutemov #endif 35055a6e75f8SKirill A. Shutemov #ifdef CONFIG_NUMA 35067339ff83SRobin Holt } else if (!strcmp(this_char,"mpol")) { 350749cd0a5cSGreg Thelen mpol_put(mpol); 350849cd0a5cSGreg Thelen mpol = NULL; 350949cd0a5cSGreg Thelen if (mpol_parse_str(value, &mpol)) 35107339ff83SRobin Holt goto bad_val; 35115a6e75f8SKirill A. Shutemov #endif 35121da177e4SLinus Torvalds } else { 35131170532bSJoe Perches pr_err("tmpfs: Bad mount option %s\n", this_char); 351449cd0a5cSGreg Thelen goto error; 35151da177e4SLinus Torvalds } 35161da177e4SLinus Torvalds } 351749cd0a5cSGreg Thelen sbinfo->mpol = mpol; 35181da177e4SLinus Torvalds return 0; 35191da177e4SLinus Torvalds 35201da177e4SLinus Torvalds bad_val: 35211170532bSJoe Perches pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", 35221da177e4SLinus Torvalds value, this_char); 352349cd0a5cSGreg Thelen error: 352449cd0a5cSGreg Thelen mpol_put(mpol); 35251da177e4SLinus Torvalds return 1; 35261da177e4SLinus Torvalds 35271da177e4SLinus Torvalds } 35281da177e4SLinus Torvalds 35291da177e4SLinus Torvalds static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 35301da177e4SLinus Torvalds { 35311da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3532680d794bSakpm@linux-foundation.org struct shmem_sb_info config = *sbinfo; 35330edd73b3SHugh Dickins unsigned long inodes; 35340edd73b3SHugh Dickins int error = -EINVAL; 35351da177e4SLinus Torvalds 35365f00110fSGreg Thelen config.mpol = NULL; 3537680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, &config, true)) 35380edd73b3SHugh Dickins return error; 35390edd73b3SHugh Dickins 35400edd73b3SHugh Dickins spin_lock(&sbinfo->stat_lock); 35410edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 35427e496299STim Chen if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 35430edd73b3SHugh Dickins goto out; 3544680d794bSakpm@linux-foundation.org if (config.max_inodes < inodes) 35450edd73b3SHugh Dickins goto out; 35460edd73b3SHugh Dickins /* 354754af6042SHugh Dickins * Those tests disallow limited->unlimited while any are in use; 35480edd73b3SHugh Dickins * but we must separately disallow unlimited->limited, because 35490edd73b3SHugh Dickins * in that case we have no record of how much is already in use. 35500edd73b3SHugh Dickins */ 3551680d794bSakpm@linux-foundation.org if (config.max_blocks && !sbinfo->max_blocks) 35520edd73b3SHugh Dickins goto out; 3553680d794bSakpm@linux-foundation.org if (config.max_inodes && !sbinfo->max_inodes) 35540edd73b3SHugh Dickins goto out; 35550edd73b3SHugh Dickins 35560edd73b3SHugh Dickins error = 0; 35575a6e75f8SKirill A. Shutemov sbinfo->huge = config.huge; 3558680d794bSakpm@linux-foundation.org sbinfo->max_blocks = config.max_blocks; 3559680d794bSakpm@linux-foundation.org sbinfo->max_inodes = config.max_inodes; 3560680d794bSakpm@linux-foundation.org sbinfo->free_inodes = config.max_inodes - inodes; 356171fe804bSLee Schermerhorn 35625f00110fSGreg Thelen /* 35635f00110fSGreg Thelen * Preserve previous mempolicy unless mpol remount option was specified. 35645f00110fSGreg Thelen */ 35655f00110fSGreg Thelen if (config.mpol) { 356671fe804bSLee Schermerhorn mpol_put(sbinfo->mpol); 356771fe804bSLee Schermerhorn sbinfo->mpol = config.mpol; /* transfers initial ref */ 35685f00110fSGreg Thelen } 35690edd73b3SHugh Dickins out: 35700edd73b3SHugh Dickins spin_unlock(&sbinfo->stat_lock); 35710edd73b3SHugh Dickins return error; 35721da177e4SLinus Torvalds } 3573680d794bSakpm@linux-foundation.org 357434c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 3575680d794bSakpm@linux-foundation.org { 357634c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 3577680d794bSakpm@linux-foundation.org 3578680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 3579680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 358009cbfeafSKirill A. Shutemov sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3581680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 3582680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 3583680d794bSakpm@linux-foundation.org if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 358409208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 35858751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 35868751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 35878751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 35888751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 35898751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 35908751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 3591e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 35925a6e75f8SKirill A. Shutemov /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 35935a6e75f8SKirill A. Shutemov if (sbinfo->huge) 35945a6e75f8SKirill A. Shutemov seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 35955a6e75f8SKirill A. Shutemov #endif 359671fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 3597680d794bSakpm@linux-foundation.org return 0; 3598680d794bSakpm@linux-foundation.org } 35999183df25SDavid Herrmann 36009183df25SDavid Herrmann #define MFD_NAME_PREFIX "memfd:" 36019183df25SDavid Herrmann #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1) 36029183df25SDavid Herrmann #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN) 36039183df25SDavid Herrmann 36049183df25SDavid Herrmann #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING) 36059183df25SDavid Herrmann 36069183df25SDavid Herrmann SYSCALL_DEFINE2(memfd_create, 36079183df25SDavid Herrmann const char __user *, uname, 36089183df25SDavid Herrmann unsigned int, flags) 36099183df25SDavid Herrmann { 36109183df25SDavid Herrmann struct shmem_inode_info *info; 36119183df25SDavid Herrmann struct file *file; 36129183df25SDavid Herrmann int fd, error; 36139183df25SDavid Herrmann char *name; 36149183df25SDavid Herrmann long len; 36159183df25SDavid Herrmann 36169183df25SDavid Herrmann if (flags & ~(unsigned int)MFD_ALL_FLAGS) 36179183df25SDavid Herrmann return -EINVAL; 36189183df25SDavid Herrmann 36199183df25SDavid Herrmann /* length includes terminating zero */ 36209183df25SDavid Herrmann len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1); 36219183df25SDavid Herrmann if (len <= 0) 36229183df25SDavid Herrmann return -EFAULT; 36239183df25SDavid Herrmann if (len > MFD_NAME_MAX_LEN + 1) 36249183df25SDavid Herrmann return -EINVAL; 36259183df25SDavid Herrmann 36269183df25SDavid Herrmann name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY); 36279183df25SDavid Herrmann if (!name) 36289183df25SDavid Herrmann return -ENOMEM; 36299183df25SDavid Herrmann 36309183df25SDavid Herrmann strcpy(name, MFD_NAME_PREFIX); 36319183df25SDavid Herrmann if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) { 36329183df25SDavid Herrmann error = -EFAULT; 36339183df25SDavid Herrmann goto err_name; 36349183df25SDavid Herrmann } 36359183df25SDavid Herrmann 36369183df25SDavid Herrmann /* terminating-zero may have changed after strnlen_user() returned */ 36379183df25SDavid Herrmann if (name[len + MFD_NAME_PREFIX_LEN - 1]) { 36389183df25SDavid Herrmann error = -EFAULT; 36399183df25SDavid Herrmann goto err_name; 36409183df25SDavid Herrmann } 36419183df25SDavid Herrmann 36429183df25SDavid Herrmann fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); 36439183df25SDavid Herrmann if (fd < 0) { 36449183df25SDavid Herrmann error = fd; 36459183df25SDavid Herrmann goto err_name; 36469183df25SDavid Herrmann } 36479183df25SDavid Herrmann 36489183df25SDavid Herrmann file = shmem_file_setup(name, 0, VM_NORESERVE); 36499183df25SDavid Herrmann if (IS_ERR(file)) { 36509183df25SDavid Herrmann error = PTR_ERR(file); 36519183df25SDavid Herrmann goto err_fd; 36529183df25SDavid Herrmann } 36539183df25SDavid Herrmann info = SHMEM_I(file_inode(file)); 36549183df25SDavid Herrmann file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; 36559183df25SDavid Herrmann file->f_flags |= O_RDWR | O_LARGEFILE; 36569183df25SDavid Herrmann if (flags & MFD_ALLOW_SEALING) 36579183df25SDavid Herrmann info->seals &= ~F_SEAL_SEAL; 36589183df25SDavid Herrmann 36599183df25SDavid Herrmann fd_install(fd, file); 36609183df25SDavid Herrmann kfree(name); 36619183df25SDavid Herrmann return fd; 36629183df25SDavid Herrmann 36639183df25SDavid Herrmann err_fd: 36649183df25SDavid Herrmann put_unused_fd(fd); 36659183df25SDavid Herrmann err_name: 36669183df25SDavid Herrmann kfree(name); 36679183df25SDavid Herrmann return error; 36689183df25SDavid Herrmann } 36699183df25SDavid Herrmann 3670680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 36711da177e4SLinus Torvalds 36721da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 36731da177e4SLinus Torvalds { 3674602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 3675602586a8SHugh Dickins 3676602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 367749cd0a5cSGreg Thelen mpol_put(sbinfo->mpol); 3678602586a8SHugh Dickins kfree(sbinfo); 36791da177e4SLinus Torvalds sb->s_fs_info = NULL; 36801da177e4SLinus Torvalds } 36811da177e4SLinus Torvalds 36822b2af54aSKay Sievers int shmem_fill_super(struct super_block *sb, void *data, int silent) 36831da177e4SLinus Torvalds { 36841da177e4SLinus Torvalds struct inode *inode; 36850edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 3686680d794bSakpm@linux-foundation.org int err = -ENOMEM; 3687680d794bSakpm@linux-foundation.org 3688680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 3689425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 3690680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 3691680d794bSakpm@linux-foundation.org if (!sbinfo) 3692680d794bSakpm@linux-foundation.org return -ENOMEM; 3693680d794bSakpm@linux-foundation.org 3694680d794bSakpm@linux-foundation.org sbinfo->mode = S_IRWXUGO | S_ISVTX; 369576aac0e9SDavid Howells sbinfo->uid = current_fsuid(); 369676aac0e9SDavid Howells sbinfo->gid = current_fsgid(); 3697680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 36981da177e4SLinus Torvalds 36990edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 37001da177e4SLinus Torvalds /* 37011da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 37021da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 37031da177e4SLinus Torvalds * but the internal instance is left unlimited. 37041da177e4SLinus Torvalds */ 3705ca4e0519SAl Viro if (!(sb->s_flags & MS_KERNMOUNT)) { 3706680d794bSakpm@linux-foundation.org sbinfo->max_blocks = shmem_default_max_blocks(); 3707680d794bSakpm@linux-foundation.org sbinfo->max_inodes = shmem_default_max_inodes(); 3708680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, sbinfo, false)) { 3709680d794bSakpm@linux-foundation.org err = -EINVAL; 3710680d794bSakpm@linux-foundation.org goto failed; 3711680d794bSakpm@linux-foundation.org } 3712ca4e0519SAl Viro } else { 3713ca4e0519SAl Viro sb->s_flags |= MS_NOUSER; 37141da177e4SLinus Torvalds } 371591828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 37162f6e38f3SHugh Dickins sb->s_flags |= MS_NOSEC; 37170edd73b3SHugh Dickins #else 37180edd73b3SHugh Dickins sb->s_flags |= MS_NOUSER; 37190edd73b3SHugh Dickins #endif 37201da177e4SLinus Torvalds 37211da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 3722908c7f19STejun Heo if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 3723602586a8SHugh Dickins goto failed; 3724680d794bSakpm@linux-foundation.org sbinfo->free_inodes = sbinfo->max_inodes; 3725779750d2SKirill A. Shutemov spin_lock_init(&sbinfo->shrinklist_lock); 3726779750d2SKirill A. Shutemov INIT_LIST_HEAD(&sbinfo->shrinklist); 37271da177e4SLinus Torvalds 3728285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 372909cbfeafSKirill A. Shutemov sb->s_blocksize = PAGE_SIZE; 373009cbfeafSKirill A. Shutemov sb->s_blocksize_bits = PAGE_SHIFT; 37311da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 37321da177e4SLinus Torvalds sb->s_op = &shmem_ops; 3733cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 3734b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 373539f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 3736b09e0fa4SEric Paris #endif 3737b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 373839f0247dSAndreas Gruenbacher sb->s_flags |= MS_POSIXACL; 373939f0247dSAndreas Gruenbacher #endif 37400edd73b3SHugh Dickins 3741454abafeSDmitry Monakhov inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 37421da177e4SLinus Torvalds if (!inode) 37431da177e4SLinus Torvalds goto failed; 3744680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 3745680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 3746318ceed0SAl Viro sb->s_root = d_make_root(inode); 3747318ceed0SAl Viro if (!sb->s_root) 374848fde701SAl Viro goto failed; 37491da177e4SLinus Torvalds return 0; 37501da177e4SLinus Torvalds 37511da177e4SLinus Torvalds failed: 37521da177e4SLinus Torvalds shmem_put_super(sb); 37531da177e4SLinus Torvalds return err; 37541da177e4SLinus Torvalds } 37551da177e4SLinus Torvalds 3756fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 37571da177e4SLinus Torvalds 37581da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 37591da177e4SLinus Torvalds { 376041ffe5d5SHugh Dickins struct shmem_inode_info *info; 376141ffe5d5SHugh Dickins info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 376241ffe5d5SHugh Dickins if (!info) 37631da177e4SLinus Torvalds return NULL; 376441ffe5d5SHugh Dickins return &info->vfs_inode; 37651da177e4SLinus Torvalds } 37661da177e4SLinus Torvalds 376741ffe5d5SHugh Dickins static void shmem_destroy_callback(struct rcu_head *head) 3768fa0d7e3dSNick Piggin { 3769fa0d7e3dSNick Piggin struct inode *inode = container_of(head, struct inode, i_rcu); 377084e710daSAl Viro if (S_ISLNK(inode->i_mode)) 37713ed47db3SAl Viro kfree(inode->i_link); 3772fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 3773fa0d7e3dSNick Piggin } 3774fa0d7e3dSNick Piggin 37751da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 37761da177e4SLinus Torvalds { 377709208d15SAl Viro if (S_ISREG(inode->i_mode)) 37781da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 377941ffe5d5SHugh Dickins call_rcu(&inode->i_rcu, shmem_destroy_callback); 37801da177e4SLinus Torvalds } 37811da177e4SLinus Torvalds 378241ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 37831da177e4SLinus Torvalds { 378441ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 378541ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 37861da177e4SLinus Torvalds } 37871da177e4SLinus Torvalds 378841ffe5d5SHugh Dickins static int shmem_init_inodecache(void) 37891da177e4SLinus Torvalds { 37901da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 37911da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 37925d097056SVladimir Davydov 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 37931da177e4SLinus Torvalds return 0; 37941da177e4SLinus Torvalds } 37951da177e4SLinus Torvalds 379641ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 37971da177e4SLinus Torvalds { 37981a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 37991da177e4SLinus Torvalds } 38001da177e4SLinus Torvalds 3801f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = { 38021da177e4SLinus Torvalds .writepage = shmem_writepage, 380376719325SKen Chen .set_page_dirty = __set_page_dirty_no_writeback, 38041da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3805800d15a5SNick Piggin .write_begin = shmem_write_begin, 3806800d15a5SNick Piggin .write_end = shmem_write_end, 38071da177e4SLinus Torvalds #endif 38081c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 3809304dbdb7SLee Schermerhorn .migratepage = migrate_page, 38101c93923cSAndrew Morton #endif 3811aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 38121da177e4SLinus Torvalds }; 38131da177e4SLinus Torvalds 381415ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 38151da177e4SLinus Torvalds .mmap = shmem_mmap, 3816c01d5b30SHugh Dickins .get_unmapped_area = shmem_get_unmapped_area, 38171da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 3818220f2ac9SHugh Dickins .llseek = shmem_file_llseek, 38192ba5bbedSAl Viro .read_iter = shmem_file_read_iter, 38208174202bSAl Viro .write_iter = generic_file_write_iter, 38211b061d92SChristoph Hellwig .fsync = noop_fsync, 382282c156f8SAl Viro .splice_read = generic_file_splice_read, 3823f6cb85d0SAl Viro .splice_write = iter_file_splice_write, 382483e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 38251da177e4SLinus Torvalds #endif 38261da177e4SLinus Torvalds }; 38271da177e4SLinus Torvalds 382892e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 382944a30220SYu Zhao .getattr = shmem_getattr, 383094c1e62dSHugh Dickins .setattr = shmem_setattr, 3831b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3832b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3833feda821eSChristoph Hellwig .set_acl = simple_set_acl, 3834b09e0fa4SEric Paris #endif 38351da177e4SLinus Torvalds }; 38361da177e4SLinus Torvalds 383792e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 38381da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 38391da177e4SLinus Torvalds .create = shmem_create, 38401da177e4SLinus Torvalds .lookup = simple_lookup, 38411da177e4SLinus Torvalds .link = shmem_link, 38421da177e4SLinus Torvalds .unlink = shmem_unlink, 38431da177e4SLinus Torvalds .symlink = shmem_symlink, 38441da177e4SLinus Torvalds .mkdir = shmem_mkdir, 38451da177e4SLinus Torvalds .rmdir = shmem_rmdir, 38461da177e4SLinus Torvalds .mknod = shmem_mknod, 38472773bf00SMiklos Szeredi .rename = shmem_rename2, 384860545d0dSAl Viro .tmpfile = shmem_tmpfile, 38491da177e4SLinus Torvalds #endif 3850b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3851b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3852b09e0fa4SEric Paris #endif 385339f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 385494c1e62dSHugh Dickins .setattr = shmem_setattr, 3855feda821eSChristoph Hellwig .set_acl = simple_set_acl, 385639f0247dSAndreas Gruenbacher #endif 385739f0247dSAndreas Gruenbacher }; 385839f0247dSAndreas Gruenbacher 385992e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 3860b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 3861b09e0fa4SEric Paris .listxattr = shmem_listxattr, 3862b09e0fa4SEric Paris #endif 386339f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 386494c1e62dSHugh Dickins .setattr = shmem_setattr, 3865feda821eSChristoph Hellwig .set_acl = simple_set_acl, 386639f0247dSAndreas Gruenbacher #endif 38671da177e4SLinus Torvalds }; 38681da177e4SLinus Torvalds 3869759b9775SHugh Dickins static const struct super_operations shmem_ops = { 38701da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 38711da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 38721da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 38731da177e4SLinus Torvalds .statfs = shmem_statfs, 38741da177e4SLinus Torvalds .remount_fs = shmem_remount_fs, 3875680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 38761da177e4SLinus Torvalds #endif 38771f895f75SAl Viro .evict_inode = shmem_evict_inode, 38781da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 38791da177e4SLinus Torvalds .put_super = shmem_put_super, 3880779750d2SKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3881779750d2SKirill A. Shutemov .nr_cached_objects = shmem_unused_huge_count, 3882779750d2SKirill A. Shutemov .free_cached_objects = shmem_unused_huge_scan, 3883779750d2SKirill A. Shutemov #endif 38841da177e4SLinus Torvalds }; 38851da177e4SLinus Torvalds 3886f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 388754cb8821SNick Piggin .fault = shmem_fault, 3888d7c17551SNing Qu .map_pages = filemap_map_pages, 38891da177e4SLinus Torvalds #ifdef CONFIG_NUMA 38901da177e4SLinus Torvalds .set_policy = shmem_set_policy, 38911da177e4SLinus Torvalds .get_policy = shmem_get_policy, 38921da177e4SLinus Torvalds #endif 38931da177e4SLinus Torvalds }; 38941da177e4SLinus Torvalds 38953c26ff6eSAl Viro static struct dentry *shmem_mount(struct file_system_type *fs_type, 38963c26ff6eSAl Viro int flags, const char *dev_name, void *data) 38971da177e4SLinus Torvalds { 38983c26ff6eSAl Viro return mount_nodev(fs_type, flags, data, shmem_fill_super); 38991da177e4SLinus Torvalds } 39001da177e4SLinus Torvalds 390141ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 39021da177e4SLinus Torvalds .owner = THIS_MODULE, 39031da177e4SLinus Torvalds .name = "tmpfs", 39043c26ff6eSAl Viro .mount = shmem_mount, 39051da177e4SLinus Torvalds .kill_sb = kill_litter_super, 39062b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 39071da177e4SLinus Torvalds }; 39081da177e4SLinus Torvalds 390941ffe5d5SHugh Dickins int __init shmem_init(void) 39101da177e4SLinus Torvalds { 39111da177e4SLinus Torvalds int error; 39121da177e4SLinus Torvalds 391316203a7aSRob Landley /* If rootfs called this, don't re-init */ 391416203a7aSRob Landley if (shmem_inode_cachep) 391516203a7aSRob Landley return 0; 391616203a7aSRob Landley 391741ffe5d5SHugh Dickins error = shmem_init_inodecache(); 39181da177e4SLinus Torvalds if (error) 39191da177e4SLinus Torvalds goto out3; 39201da177e4SLinus Torvalds 392141ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 39221da177e4SLinus Torvalds if (error) { 39231170532bSJoe Perches pr_err("Could not register tmpfs\n"); 39241da177e4SLinus Torvalds goto out2; 39251da177e4SLinus Torvalds } 392695dc112aSGreg Kroah-Hartman 3927ca4e0519SAl Viro shm_mnt = kern_mount(&shmem_fs_type); 39281da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 39291da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 39301170532bSJoe Perches pr_err("Could not kern_mount tmpfs\n"); 39311da177e4SLinus Torvalds goto out1; 39321da177e4SLinus Torvalds } 39335a6e75f8SKirill A. Shutemov 3934e496cf3dSKirill A. Shutemov #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 39355a6e75f8SKirill A. Shutemov if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) 39365a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 39375a6e75f8SKirill A. Shutemov else 39385a6e75f8SKirill A. Shutemov shmem_huge = 0; /* just in case it was patched */ 39395a6e75f8SKirill A. Shutemov #endif 39401da177e4SLinus Torvalds return 0; 39411da177e4SLinus Torvalds 39421da177e4SLinus Torvalds out1: 394341ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 39441da177e4SLinus Torvalds out2: 394541ffe5d5SHugh Dickins shmem_destroy_inodecache(); 39461da177e4SLinus Torvalds out3: 39471da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 39481da177e4SLinus Torvalds return error; 39491da177e4SLinus Torvalds } 3950853ac43aSMatt Mackall 3951e496cf3dSKirill A. Shutemov #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) 39525a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_show(struct kobject *kobj, 39535a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, char *buf) 39545a6e75f8SKirill A. Shutemov { 39555a6e75f8SKirill A. Shutemov int values[] = { 39565a6e75f8SKirill A. Shutemov SHMEM_HUGE_ALWAYS, 39575a6e75f8SKirill A. Shutemov SHMEM_HUGE_WITHIN_SIZE, 39585a6e75f8SKirill A. Shutemov SHMEM_HUGE_ADVISE, 39595a6e75f8SKirill A. Shutemov SHMEM_HUGE_NEVER, 39605a6e75f8SKirill A. Shutemov SHMEM_HUGE_DENY, 39615a6e75f8SKirill A. Shutemov SHMEM_HUGE_FORCE, 39625a6e75f8SKirill A. Shutemov }; 39635a6e75f8SKirill A. Shutemov int i, count; 39645a6e75f8SKirill A. Shutemov 39655a6e75f8SKirill A. Shutemov for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) { 39665a6e75f8SKirill A. Shutemov const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s "; 39675a6e75f8SKirill A. Shutemov 39685a6e75f8SKirill A. Shutemov count += sprintf(buf + count, fmt, 39695a6e75f8SKirill A. Shutemov shmem_format_huge(values[i])); 39705a6e75f8SKirill A. Shutemov } 39715a6e75f8SKirill A. Shutemov buf[count - 1] = '\n'; 39725a6e75f8SKirill A. Shutemov return count; 39735a6e75f8SKirill A. Shutemov } 39745a6e75f8SKirill A. Shutemov 39755a6e75f8SKirill A. Shutemov static ssize_t shmem_enabled_store(struct kobject *kobj, 39765a6e75f8SKirill A. Shutemov struct kobj_attribute *attr, const char *buf, size_t count) 39775a6e75f8SKirill A. Shutemov { 39785a6e75f8SKirill A. Shutemov char tmp[16]; 39795a6e75f8SKirill A. Shutemov int huge; 39805a6e75f8SKirill A. Shutemov 39815a6e75f8SKirill A. Shutemov if (count + 1 > sizeof(tmp)) 39825a6e75f8SKirill A. Shutemov return -EINVAL; 39835a6e75f8SKirill A. Shutemov memcpy(tmp, buf, count); 39845a6e75f8SKirill A. Shutemov tmp[count] = '\0'; 39855a6e75f8SKirill A. Shutemov if (count && tmp[count - 1] == '\n') 39865a6e75f8SKirill A. Shutemov tmp[count - 1] = '\0'; 39875a6e75f8SKirill A. Shutemov 39885a6e75f8SKirill A. Shutemov huge = shmem_parse_huge(tmp); 39895a6e75f8SKirill A. Shutemov if (huge == -EINVAL) 39905a6e75f8SKirill A. Shutemov return -EINVAL; 39915a6e75f8SKirill A. Shutemov if (!has_transparent_hugepage() && 39925a6e75f8SKirill A. Shutemov huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 39935a6e75f8SKirill A. Shutemov return -EINVAL; 39945a6e75f8SKirill A. Shutemov 39955a6e75f8SKirill A. Shutemov shmem_huge = huge; 39965a6e75f8SKirill A. Shutemov if (shmem_huge < SHMEM_HUGE_DENY) 39975a6e75f8SKirill A. Shutemov SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 39985a6e75f8SKirill A. Shutemov return count; 39995a6e75f8SKirill A. Shutemov } 40005a6e75f8SKirill A. Shutemov 40015a6e75f8SKirill A. Shutemov struct kobj_attribute shmem_enabled_attr = 40025a6e75f8SKirill A. Shutemov __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 40033b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 4004f3f0e1d2SKirill A. Shutemov 40053b33719cSArnd Bergmann #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4006f3f0e1d2SKirill A. Shutemov bool shmem_huge_enabled(struct vm_area_struct *vma) 4007f3f0e1d2SKirill A. Shutemov { 4008f3f0e1d2SKirill A. Shutemov struct inode *inode = file_inode(vma->vm_file); 4009f3f0e1d2SKirill A. Shutemov struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 4010f3f0e1d2SKirill A. Shutemov loff_t i_size; 4011f3f0e1d2SKirill A. Shutemov pgoff_t off; 4012f3f0e1d2SKirill A. Shutemov 4013f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_FORCE) 4014f3f0e1d2SKirill A. Shutemov return true; 4015f3f0e1d2SKirill A. Shutemov if (shmem_huge == SHMEM_HUGE_DENY) 4016f3f0e1d2SKirill A. Shutemov return false; 4017f3f0e1d2SKirill A. Shutemov switch (sbinfo->huge) { 4018f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_NEVER: 4019f3f0e1d2SKirill A. Shutemov return false; 4020f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ALWAYS: 4021f3f0e1d2SKirill A. Shutemov return true; 4022f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_WITHIN_SIZE: 4023f3f0e1d2SKirill A. Shutemov off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 4024f3f0e1d2SKirill A. Shutemov i_size = round_up(i_size_read(inode), PAGE_SIZE); 4025f3f0e1d2SKirill A. Shutemov if (i_size >= HPAGE_PMD_SIZE && 4026f3f0e1d2SKirill A. Shutemov i_size >> PAGE_SHIFT >= off) 4027f3f0e1d2SKirill A. Shutemov return true; 4028f3f0e1d2SKirill A. Shutemov case SHMEM_HUGE_ADVISE: 4029f3f0e1d2SKirill A. Shutemov /* TODO: implement fadvise() hints */ 4030f3f0e1d2SKirill A. Shutemov return (vma->vm_flags & VM_HUGEPAGE); 4031f3f0e1d2SKirill A. Shutemov default: 4032f3f0e1d2SKirill A. Shutemov VM_BUG_ON(1); 4033f3f0e1d2SKirill A. Shutemov return false; 4034f3f0e1d2SKirill A. Shutemov } 4035f3f0e1d2SKirill A. Shutemov } 40363b33719cSArnd Bergmann #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 40375a6e75f8SKirill A. Shutemov 4038853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 4039853ac43aSMatt Mackall 4040853ac43aSMatt Mackall /* 4041853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 4042853ac43aSMatt Mackall * 4043853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 4044853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 4045853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 4046853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 4047853ac43aSMatt Mackall */ 4048853ac43aSMatt Mackall 404941ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 4050853ac43aSMatt Mackall .name = "tmpfs", 40513c26ff6eSAl Viro .mount = ramfs_mount, 4052853ac43aSMatt Mackall .kill_sb = kill_litter_super, 40532b8576cbSEric W. Biederman .fs_flags = FS_USERNS_MOUNT, 4054853ac43aSMatt Mackall }; 4055853ac43aSMatt Mackall 405641ffe5d5SHugh Dickins int __init shmem_init(void) 4057853ac43aSMatt Mackall { 405841ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 4059853ac43aSMatt Mackall 406041ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 4061853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 4062853ac43aSMatt Mackall 4063853ac43aSMatt Mackall return 0; 4064853ac43aSMatt Mackall } 4065853ac43aSMatt Mackall 406641ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 4067853ac43aSMatt Mackall { 4068853ac43aSMatt Mackall return 0; 4069853ac43aSMatt Mackall } 4070853ac43aSMatt Mackall 40713f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user) 40723f96b79aSHugh Dickins { 40733f96b79aSHugh Dickins return 0; 40743f96b79aSHugh Dickins } 40753f96b79aSHugh Dickins 407624513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 407724513264SHugh Dickins { 407824513264SHugh Dickins } 407924513264SHugh Dickins 4080c01d5b30SHugh Dickins #ifdef CONFIG_MMU 4081c01d5b30SHugh Dickins unsigned long shmem_get_unmapped_area(struct file *file, 4082c01d5b30SHugh Dickins unsigned long addr, unsigned long len, 4083c01d5b30SHugh Dickins unsigned long pgoff, unsigned long flags) 4084c01d5b30SHugh Dickins { 4085c01d5b30SHugh Dickins return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 4086c01d5b30SHugh Dickins } 4087c01d5b30SHugh Dickins #endif 4088c01d5b30SHugh Dickins 408941ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 409094c1e62dSHugh Dickins { 409141ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 409294c1e62dSHugh Dickins } 409394c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 409494c1e62dSHugh Dickins 4095853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 40960b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 4097454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 40980b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 40990b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 4100853ac43aSMatt Mackall 4101853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 4102853ac43aSMatt Mackall 4103853ac43aSMatt Mackall /* common code */ 41041da177e4SLinus Torvalds 410519938e35SRasmus Villemoes static const struct dentry_operations anon_ops = { 4106118b2302SAl Viro .d_dname = simple_dname 41073451538aSAl Viro }; 41083451538aSAl Viro 4109c7277090SEric Paris static struct file *__shmem_file_setup(const char *name, loff_t size, 4110c7277090SEric Paris unsigned long flags, unsigned int i_flags) 41111da177e4SLinus Torvalds { 41126b4d0b27SAl Viro struct file *res; 41131da177e4SLinus Torvalds struct inode *inode; 41142c48b9c4SAl Viro struct path path; 41153451538aSAl Viro struct super_block *sb; 41161da177e4SLinus Torvalds struct qstr this; 41171da177e4SLinus Torvalds 41181da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) 41196b4d0b27SAl Viro return ERR_CAST(shm_mnt); 41201da177e4SLinus Torvalds 4121285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 41221da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 41231da177e4SLinus Torvalds 41241da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 41251da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 41261da177e4SLinus Torvalds 41276b4d0b27SAl Viro res = ERR_PTR(-ENOMEM); 41281da177e4SLinus Torvalds this.name = name; 41291da177e4SLinus Torvalds this.len = strlen(name); 41301da177e4SLinus Torvalds this.hash = 0; /* will go */ 41313451538aSAl Viro sb = shm_mnt->mnt_sb; 413266ee4b88SKonstantin Khlebnikov path.mnt = mntget(shm_mnt); 41333451538aSAl Viro path.dentry = d_alloc_pseudo(sb, &this); 41342c48b9c4SAl Viro if (!path.dentry) 41351da177e4SLinus Torvalds goto put_memory; 41363451538aSAl Viro d_set_d_op(path.dentry, &anon_ops); 41371da177e4SLinus Torvalds 41386b4d0b27SAl Viro res = ERR_PTR(-ENOSPC); 41393451538aSAl Viro inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 41401da177e4SLinus Torvalds if (!inode) 414166ee4b88SKonstantin Khlebnikov goto put_memory; 41421da177e4SLinus Torvalds 4143c7277090SEric Paris inode->i_flags |= i_flags; 41442c48b9c4SAl Viro d_instantiate(path.dentry, inode); 41451da177e4SLinus Torvalds inode->i_size = size; 41466d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 414726567cdbSAl Viro res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 414826567cdbSAl Viro if (IS_ERR(res)) 414966ee4b88SKonstantin Khlebnikov goto put_path; 41504b42af81SAl Viro 41516b4d0b27SAl Viro res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 41524b42af81SAl Viro &shmem_file_operations); 41536b4d0b27SAl Viro if (IS_ERR(res)) 415466ee4b88SKonstantin Khlebnikov goto put_path; 41554b42af81SAl Viro 41566b4d0b27SAl Viro return res; 41571da177e4SLinus Torvalds 41581da177e4SLinus Torvalds put_memory: 41591da177e4SLinus Torvalds shmem_unacct_size(flags, size); 416066ee4b88SKonstantin Khlebnikov put_path: 416166ee4b88SKonstantin Khlebnikov path_put(&path); 41626b4d0b27SAl Viro return res; 41631da177e4SLinus Torvalds } 4164c7277090SEric Paris 4165c7277090SEric Paris /** 4166c7277090SEric Paris * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 4167c7277090SEric Paris * kernel internal. There will be NO LSM permission checks against the 4168c7277090SEric Paris * underlying inode. So users of this interface must do LSM checks at a 4169e1832f29SStephen Smalley * higher layer. The users are the big_key and shm implementations. LSM 4170e1832f29SStephen Smalley * checks are provided at the key or shm level rather than the inode. 4171c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4172c7277090SEric Paris * @size: size to be set for the file 4173c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4174c7277090SEric Paris */ 4175c7277090SEric Paris struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 4176c7277090SEric Paris { 4177c7277090SEric Paris return __shmem_file_setup(name, size, flags, S_PRIVATE); 4178c7277090SEric Paris } 4179c7277090SEric Paris 4180c7277090SEric Paris /** 4181c7277090SEric Paris * shmem_file_setup - get an unlinked file living in tmpfs 4182c7277090SEric Paris * @name: name for dentry (to be seen in /proc/<pid>/maps 4183c7277090SEric Paris * @size: size to be set for the file 4184c7277090SEric Paris * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 4185c7277090SEric Paris */ 4186c7277090SEric Paris struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 4187c7277090SEric Paris { 4188c7277090SEric Paris return __shmem_file_setup(name, size, flags, 0); 4189c7277090SEric Paris } 4190395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 41911da177e4SLinus Torvalds 419246711810SRandy Dunlap /** 41931da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 41941da177e4SLinus Torvalds * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 41951da177e4SLinus Torvalds */ 41961da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 41971da177e4SLinus Torvalds { 41981da177e4SLinus Torvalds struct file *file; 41991da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 42001da177e4SLinus Torvalds 420166fc1303SHugh Dickins /* 420266fc1303SHugh Dickins * Cloning a new file under mmap_sem leads to a lock ordering conflict 420366fc1303SHugh Dickins * between XFS directory reading and selinux: since this file is only 420466fc1303SHugh Dickins * accessible to the user through its mapping, use S_PRIVATE flag to 420566fc1303SHugh Dickins * bypass file security, in the same way as shmem_kernel_file_setup(). 420666fc1303SHugh Dickins */ 420766fc1303SHugh Dickins file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE); 42081da177e4SLinus Torvalds if (IS_ERR(file)) 42091da177e4SLinus Torvalds return PTR_ERR(file); 42101da177e4SLinus Torvalds 42111da177e4SLinus Torvalds if (vma->vm_file) 42121da177e4SLinus Torvalds fput(vma->vm_file); 42131da177e4SLinus Torvalds vma->vm_file = file; 42141da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 4215f3f0e1d2SKirill A. Shutemov 4216e496cf3dSKirill A. Shutemov if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 4217f3f0e1d2SKirill A. Shutemov ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 4218f3f0e1d2SKirill A. Shutemov (vma->vm_end & HPAGE_PMD_MASK)) { 4219f3f0e1d2SKirill A. Shutemov khugepaged_enter(vma, vma->vm_flags); 4220f3f0e1d2SKirill A. Shutemov } 4221f3f0e1d2SKirill A. Shutemov 42221da177e4SLinus Torvalds return 0; 42231da177e4SLinus Torvalds } 4224d9d90e5eSHugh Dickins 4225d9d90e5eSHugh Dickins /** 4226d9d90e5eSHugh Dickins * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 4227d9d90e5eSHugh Dickins * @mapping: the page's address_space 4228d9d90e5eSHugh Dickins * @index: the page index 4229d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 4230d9d90e5eSHugh Dickins * 4231d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 4232d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 4233d9d90e5eSHugh Dickins * But read_cache_page_gfp() uses the ->readpage() method: which does not 4234d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 4235d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 4236d9d90e5eSHugh Dickins * 423768da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 423868da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 4239d9d90e5eSHugh Dickins */ 4240d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 4241d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 4242d9d90e5eSHugh Dickins { 424368da9f05SHugh Dickins #ifdef CONFIG_SHMEM 424468da9f05SHugh Dickins struct inode *inode = mapping->host; 42459276aad6SHugh Dickins struct page *page; 424668da9f05SHugh Dickins int error; 424768da9f05SHugh Dickins 424868da9f05SHugh Dickins BUG_ON(mapping->a_ops != &shmem_aops); 42499e18eb29SAndres Lagar-Cavilla error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, 42509e18eb29SAndres Lagar-Cavilla gfp, NULL, NULL); 425168da9f05SHugh Dickins if (error) 425268da9f05SHugh Dickins page = ERR_PTR(error); 425368da9f05SHugh Dickins else 425468da9f05SHugh Dickins unlock_page(page); 425568da9f05SHugh Dickins return page; 425668da9f05SHugh Dickins #else 425768da9f05SHugh Dickins /* 425868da9f05SHugh Dickins * The tiny !SHMEM case uses ramfs without swap 425968da9f05SHugh Dickins */ 4260d9d90e5eSHugh Dickins return read_cache_page_gfp(mapping, index, gfp); 426168da9f05SHugh Dickins #endif 4262d9d90e5eSHugh Dickins } 4263d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 4264