11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Resizable virtual memory filesystem for Linux. 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 2000 Linus Torvalds. 51da177e4SLinus Torvalds * 2000 Transmeta Corp. 61da177e4SLinus Torvalds * 2000-2001 Christoph Rohland 71da177e4SLinus Torvalds * 2000-2001 SAP AG 81da177e4SLinus Torvalds * 2002 Red Hat Inc. 96922c0c7SHugh Dickins * Copyright (C) 2002-2011 Hugh Dickins. 106922c0c7SHugh Dickins * Copyright (C) 2011 Google Inc. 110edd73b3SHugh Dickins * Copyright (C) 2002-2005 VERITAS Software Corporation. 121da177e4SLinus Torvalds * Copyright (C) 2004 Andi Kleen, SuSE Labs 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Extended attribute support for tmpfs: 151da177e4SLinus Torvalds * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 161da177e4SLinus Torvalds * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 171da177e4SLinus Torvalds * 18853ac43aSMatt Mackall * tiny-shmem: 19853ac43aSMatt Mackall * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20853ac43aSMatt Mackall * 211da177e4SLinus Torvalds * This file is released under the GPL. 221da177e4SLinus Torvalds */ 231da177e4SLinus Torvalds 24853ac43aSMatt Mackall #include <linux/fs.h> 25853ac43aSMatt Mackall #include <linux/init.h> 26853ac43aSMatt Mackall #include <linux/vfs.h> 27853ac43aSMatt Mackall #include <linux/mount.h> 28caefba17SHugh Dickins #include <linux/pagemap.h> 29853ac43aSMatt Mackall #include <linux/file.h> 30853ac43aSMatt Mackall #include <linux/mm.h> 31b95f1b31SPaul Gortmaker #include <linux/export.h> 32853ac43aSMatt Mackall #include <linux/swap.h> 33853ac43aSMatt Mackall 34853ac43aSMatt Mackall static struct vfsmount *shm_mnt; 35853ac43aSMatt Mackall 36853ac43aSMatt Mackall #ifdef CONFIG_SHMEM 371da177e4SLinus Torvalds /* 381da177e4SLinus Torvalds * This virtual memory filesystem is heavily based on the ramfs. It 391da177e4SLinus Torvalds * extends ramfs by the ability to use swap and honor resource limits 401da177e4SLinus Torvalds * which makes it a completely usable filesystem. 411da177e4SLinus Torvalds */ 421da177e4SLinus Torvalds 4339f0247dSAndreas Gruenbacher #include <linux/xattr.h> 44a5694255SChristoph Hellwig #include <linux/exportfs.h> 451c7c474cSChristoph Hellwig #include <linux/posix_acl.h> 4639f0247dSAndreas Gruenbacher #include <linux/generic_acl.h> 471da177e4SLinus Torvalds #include <linux/mman.h> 481da177e4SLinus Torvalds #include <linux/string.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/backing-dev.h> 511da177e4SLinus Torvalds #include <linux/shmem_fs.h> 521da177e4SLinus Torvalds #include <linux/writeback.h> 531da177e4SLinus Torvalds #include <linux/blkdev.h> 54bda97eabSHugh Dickins #include <linux/pagevec.h> 5541ffe5d5SHugh Dickins #include <linux/percpu_counter.h> 5683e4fa9cSHugh Dickins #include <linux/falloc.h> 57708e3508SHugh Dickins #include <linux/splice.h> 581da177e4SLinus Torvalds #include <linux/security.h> 591da177e4SLinus Torvalds #include <linux/swapops.h> 601da177e4SLinus Torvalds #include <linux/mempolicy.h> 611da177e4SLinus Torvalds #include <linux/namei.h> 62b00dc3adSHugh Dickins #include <linux/ctype.h> 63304dbdb7SLee Schermerhorn #include <linux/migrate.h> 64c1f60a5aSChristoph Lameter #include <linux/highmem.h> 65680d794bSakpm@linux-foundation.org #include <linux/seq_file.h> 6692562927SMimi Zohar #include <linux/magic.h> 67304dbdb7SLee Schermerhorn 681da177e4SLinus Torvalds #include <asm/uaccess.h> 691da177e4SLinus Torvalds #include <asm/pgtable.h> 701da177e4SLinus Torvalds 711da177e4SLinus Torvalds #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 721da177e4SLinus Torvalds #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 731da177e4SLinus Torvalds 741da177e4SLinus Torvalds /* Pretend that each entry is of this size in directory's i_size */ 751da177e4SLinus Torvalds #define BOGO_DIRENT_SIZE 20 761da177e4SLinus Torvalds 7769f07ec9SHugh Dickins /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 7869f07ec9SHugh Dickins #define SHORT_SYMLINK_LEN 128 7969f07ec9SHugh Dickins 80b09e0fa4SEric Paris struct shmem_xattr { 81b09e0fa4SEric Paris struct list_head list; /* anchored by shmem_inode_info->xattr_list */ 82b09e0fa4SEric Paris char *name; /* xattr name */ 83b09e0fa4SEric Paris size_t size; 84b09e0fa4SEric Paris char value[0]; 85b09e0fa4SEric Paris }; 86b09e0fa4SEric Paris 87285b2c4fSHugh Dickins /* Flag allocation requirements to shmem_getpage */ 881da177e4SLinus Torvalds enum sgp_type { 891da177e4SLinus Torvalds SGP_READ, /* don't exceed i_size, don't allocate page */ 901da177e4SLinus Torvalds SGP_CACHE, /* don't exceed i_size, may allocate page */ 91a0ee5ec5SHugh Dickins SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 92*1635f6a7SHugh Dickins SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 93*1635f6a7SHugh Dickins SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 941da177e4SLinus Torvalds }; 951da177e4SLinus Torvalds 96b76db735SAndrew Morton #ifdef CONFIG_TMPFS 97680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_blocks(void) 98680d794bSakpm@linux-foundation.org { 99680d794bSakpm@linux-foundation.org return totalram_pages / 2; 100680d794bSakpm@linux-foundation.org } 101680d794bSakpm@linux-foundation.org 102680d794bSakpm@linux-foundation.org static unsigned long shmem_default_max_inodes(void) 103680d794bSakpm@linux-foundation.org { 104680d794bSakpm@linux-foundation.org return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 105680d794bSakpm@linux-foundation.org } 106b76db735SAndrew Morton #endif 107680d794bSakpm@linux-foundation.org 108bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 109bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 110bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index); 11168da9f05SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 11268da9f05SHugh Dickins struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 11368da9f05SHugh Dickins 11468da9f05SHugh Dickins static inline int shmem_getpage(struct inode *inode, pgoff_t index, 11568da9f05SHugh Dickins struct page **pagep, enum sgp_type sgp, int *fault_type) 11668da9f05SHugh Dickins { 11768da9f05SHugh Dickins return shmem_getpage_gfp(inode, index, pagep, sgp, 11868da9f05SHugh Dickins mapping_gfp_mask(inode->i_mapping), fault_type); 11968da9f05SHugh Dickins } 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 1221da177e4SLinus Torvalds { 1231da177e4SLinus Torvalds return sb->s_fs_info; 1241da177e4SLinus Torvalds } 1251da177e4SLinus Torvalds 1261da177e4SLinus Torvalds /* 1271da177e4SLinus Torvalds * shmem_file_setup pre-accounts the whole fixed size of a VM object, 1281da177e4SLinus Torvalds * for shared memory and for shared anonymous (/dev/zero) mappings 1291da177e4SLinus Torvalds * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 1301da177e4SLinus Torvalds * consistent with the pre-accounting of private mappings ... 1311da177e4SLinus Torvalds */ 1321da177e4SLinus Torvalds static inline int shmem_acct_size(unsigned long flags, loff_t size) 1331da177e4SLinus Torvalds { 1340b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 135191c5424SAl Viro 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 1361da177e4SLinus Torvalds } 1371da177e4SLinus Torvalds 1381da177e4SLinus Torvalds static inline void shmem_unacct_size(unsigned long flags, loff_t size) 1391da177e4SLinus Torvalds { 1400b0a0806SHugh Dickins if (!(flags & VM_NORESERVE)) 1411da177e4SLinus Torvalds vm_unacct_memory(VM_ACCT(size)); 1421da177e4SLinus Torvalds } 1431da177e4SLinus Torvalds 1441da177e4SLinus Torvalds /* 1451da177e4SLinus Torvalds * ... whereas tmpfs objects are accounted incrementally as 1461da177e4SLinus Torvalds * pages are allocated, in order to allow huge sparse files. 1471da177e4SLinus Torvalds * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 1481da177e4SLinus Torvalds * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 1491da177e4SLinus Torvalds */ 1501da177e4SLinus Torvalds static inline int shmem_acct_block(unsigned long flags) 1511da177e4SLinus Torvalds { 1520b0a0806SHugh Dickins return (flags & VM_NORESERVE) ? 153191c5424SAl Viro security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 1541da177e4SLinus Torvalds } 1551da177e4SLinus Torvalds 1561da177e4SLinus Torvalds static inline void shmem_unacct_blocks(unsigned long flags, long pages) 1571da177e4SLinus Torvalds { 1580b0a0806SHugh Dickins if (flags & VM_NORESERVE) 1591da177e4SLinus Torvalds vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 1601da177e4SLinus Torvalds } 1611da177e4SLinus Torvalds 162759b9775SHugh Dickins static const struct super_operations shmem_ops; 163f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops; 16415ad7cdcSHelge Deller static const struct file_operations shmem_file_operations; 16592e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations; 16692e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations; 16792e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations; 168f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops; 1691da177e4SLinus Torvalds 1706c231b7bSRavikiran G Thirumalai static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 1711da177e4SLinus Torvalds .ra_pages = 0, /* No readahead */ 1724f98a2feSRik van Riel .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 1731da177e4SLinus Torvalds }; 1741da177e4SLinus Torvalds 1751da177e4SLinus Torvalds static LIST_HEAD(shmem_swaplist); 176cb5f7b9aSHugh Dickins static DEFINE_MUTEX(shmem_swaplist_mutex); 1771da177e4SLinus Torvalds 1785b04c689SPavel Emelyanov static int shmem_reserve_inode(struct super_block *sb) 1795b04c689SPavel Emelyanov { 1805b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1815b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 1825b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 1835b04c689SPavel Emelyanov if (!sbinfo->free_inodes) { 1845b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 1855b04c689SPavel Emelyanov return -ENOSPC; 1865b04c689SPavel Emelyanov } 1875b04c689SPavel Emelyanov sbinfo->free_inodes--; 1885b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 1895b04c689SPavel Emelyanov } 1905b04c689SPavel Emelyanov return 0; 1915b04c689SPavel Emelyanov } 1925b04c689SPavel Emelyanov 1935b04c689SPavel Emelyanov static void shmem_free_inode(struct super_block *sb) 1945b04c689SPavel Emelyanov { 1955b04c689SPavel Emelyanov struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1965b04c689SPavel Emelyanov if (sbinfo->max_inodes) { 1975b04c689SPavel Emelyanov spin_lock(&sbinfo->stat_lock); 1985b04c689SPavel Emelyanov sbinfo->free_inodes++; 1995b04c689SPavel Emelyanov spin_unlock(&sbinfo->stat_lock); 2005b04c689SPavel Emelyanov } 2015b04c689SPavel Emelyanov } 2025b04c689SPavel Emelyanov 20346711810SRandy Dunlap /** 20441ffe5d5SHugh Dickins * shmem_recalc_inode - recalculate the block usage of an inode 2051da177e4SLinus Torvalds * @inode: inode to recalc 2061da177e4SLinus Torvalds * 2071da177e4SLinus Torvalds * We have to calculate the free blocks since the mm can drop 2081da177e4SLinus Torvalds * undirtied hole pages behind our back. 2091da177e4SLinus Torvalds * 2101da177e4SLinus Torvalds * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 2111da177e4SLinus Torvalds * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 2121da177e4SLinus Torvalds * 2131da177e4SLinus Torvalds * It has to be called with the spinlock held. 2141da177e4SLinus Torvalds */ 2151da177e4SLinus Torvalds static void shmem_recalc_inode(struct inode *inode) 2161da177e4SLinus Torvalds { 2171da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 2181da177e4SLinus Torvalds long freed; 2191da177e4SLinus Torvalds 2201da177e4SLinus Torvalds freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 2211da177e4SLinus Torvalds if (freed > 0) { 22254af6042SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 22354af6042SHugh Dickins if (sbinfo->max_blocks) 22454af6042SHugh Dickins percpu_counter_add(&sbinfo->used_blocks, -freed); 2251da177e4SLinus Torvalds info->alloced -= freed; 22654af6042SHugh Dickins inode->i_blocks -= freed * BLOCKS_PER_PAGE; 2271da177e4SLinus Torvalds shmem_unacct_blocks(info->flags, freed); 2281da177e4SLinus Torvalds } 2291da177e4SLinus Torvalds } 2301da177e4SLinus Torvalds 2317a5d0fbbSHugh Dickins /* 2327a5d0fbbSHugh Dickins * Replace item expected in radix tree by a new item, while holding tree lock. 2337a5d0fbbSHugh Dickins */ 2347a5d0fbbSHugh Dickins static int shmem_radix_tree_replace(struct address_space *mapping, 2357a5d0fbbSHugh Dickins pgoff_t index, void *expected, void *replacement) 2367a5d0fbbSHugh Dickins { 2377a5d0fbbSHugh Dickins void **pslot; 2387a5d0fbbSHugh Dickins void *item = NULL; 2397a5d0fbbSHugh Dickins 2407a5d0fbbSHugh Dickins VM_BUG_ON(!expected); 2417a5d0fbbSHugh Dickins pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 2427a5d0fbbSHugh Dickins if (pslot) 2437a5d0fbbSHugh Dickins item = radix_tree_deref_slot_protected(pslot, 2447a5d0fbbSHugh Dickins &mapping->tree_lock); 2457a5d0fbbSHugh Dickins if (item != expected) 2467a5d0fbbSHugh Dickins return -ENOENT; 2477a5d0fbbSHugh Dickins if (replacement) 2487a5d0fbbSHugh Dickins radix_tree_replace_slot(pslot, replacement); 2497a5d0fbbSHugh Dickins else 2507a5d0fbbSHugh Dickins radix_tree_delete(&mapping->page_tree, index); 2517a5d0fbbSHugh Dickins return 0; 2527a5d0fbbSHugh Dickins } 2537a5d0fbbSHugh Dickins 2547a5d0fbbSHugh Dickins /* 25546f65ec1SHugh Dickins * Like add_to_page_cache_locked, but error if expected item has gone. 25646f65ec1SHugh Dickins */ 25746f65ec1SHugh Dickins static int shmem_add_to_page_cache(struct page *page, 25846f65ec1SHugh Dickins struct address_space *mapping, 25946f65ec1SHugh Dickins pgoff_t index, gfp_t gfp, void *expected) 26046f65ec1SHugh Dickins { 261aa3b1895SHugh Dickins int error = 0; 26246f65ec1SHugh Dickins 26346f65ec1SHugh Dickins VM_BUG_ON(!PageLocked(page)); 26446f65ec1SHugh Dickins VM_BUG_ON(!PageSwapBacked(page)); 26546f65ec1SHugh Dickins 26646f65ec1SHugh Dickins if (!expected) 26746f65ec1SHugh Dickins error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); 26846f65ec1SHugh Dickins if (!error) { 26946f65ec1SHugh Dickins page_cache_get(page); 27046f65ec1SHugh Dickins page->mapping = mapping; 27146f65ec1SHugh Dickins page->index = index; 27246f65ec1SHugh Dickins 27346f65ec1SHugh Dickins spin_lock_irq(&mapping->tree_lock); 27446f65ec1SHugh Dickins if (!expected) 27546f65ec1SHugh Dickins error = radix_tree_insert(&mapping->page_tree, 27646f65ec1SHugh Dickins index, page); 27746f65ec1SHugh Dickins else 27846f65ec1SHugh Dickins error = shmem_radix_tree_replace(mapping, index, 27946f65ec1SHugh Dickins expected, page); 28046f65ec1SHugh Dickins if (!error) { 28146f65ec1SHugh Dickins mapping->nrpages++; 28246f65ec1SHugh Dickins __inc_zone_page_state(page, NR_FILE_PAGES); 28346f65ec1SHugh Dickins __inc_zone_page_state(page, NR_SHMEM); 28446f65ec1SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 28546f65ec1SHugh Dickins } else { 28646f65ec1SHugh Dickins page->mapping = NULL; 28746f65ec1SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 28846f65ec1SHugh Dickins page_cache_release(page); 28946f65ec1SHugh Dickins } 29046f65ec1SHugh Dickins if (!expected) 29146f65ec1SHugh Dickins radix_tree_preload_end(); 29246f65ec1SHugh Dickins } 29346f65ec1SHugh Dickins if (error) 29446f65ec1SHugh Dickins mem_cgroup_uncharge_cache_page(page); 29546f65ec1SHugh Dickins return error; 29646f65ec1SHugh Dickins } 29746f65ec1SHugh Dickins 29846f65ec1SHugh Dickins /* 2996922c0c7SHugh Dickins * Like delete_from_page_cache, but substitutes swap for page. 3006922c0c7SHugh Dickins */ 3016922c0c7SHugh Dickins static void shmem_delete_from_page_cache(struct page *page, void *radswap) 3026922c0c7SHugh Dickins { 3036922c0c7SHugh Dickins struct address_space *mapping = page->mapping; 3046922c0c7SHugh Dickins int error; 3056922c0c7SHugh Dickins 3066922c0c7SHugh Dickins spin_lock_irq(&mapping->tree_lock); 3076922c0c7SHugh Dickins error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 3086922c0c7SHugh Dickins page->mapping = NULL; 3096922c0c7SHugh Dickins mapping->nrpages--; 3106922c0c7SHugh Dickins __dec_zone_page_state(page, NR_FILE_PAGES); 3116922c0c7SHugh Dickins __dec_zone_page_state(page, NR_SHMEM); 3126922c0c7SHugh Dickins spin_unlock_irq(&mapping->tree_lock); 3136922c0c7SHugh Dickins page_cache_release(page); 3146922c0c7SHugh Dickins BUG_ON(error); 3156922c0c7SHugh Dickins } 3166922c0c7SHugh Dickins 3176922c0c7SHugh Dickins /* 3187a5d0fbbSHugh Dickins * Like find_get_pages, but collecting swap entries as well as pages. 3197a5d0fbbSHugh Dickins */ 3207a5d0fbbSHugh Dickins static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, 3217a5d0fbbSHugh Dickins pgoff_t start, unsigned int nr_pages, 3227a5d0fbbSHugh Dickins struct page **pages, pgoff_t *indices) 3237a5d0fbbSHugh Dickins { 3247a5d0fbbSHugh Dickins unsigned int i; 3257a5d0fbbSHugh Dickins unsigned int ret; 3267a5d0fbbSHugh Dickins unsigned int nr_found; 3277a5d0fbbSHugh Dickins 3287a5d0fbbSHugh Dickins rcu_read_lock(); 3297a5d0fbbSHugh Dickins restart: 3307a5d0fbbSHugh Dickins nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 3317a5d0fbbSHugh Dickins (void ***)pages, indices, start, nr_pages); 3327a5d0fbbSHugh Dickins ret = 0; 3337a5d0fbbSHugh Dickins for (i = 0; i < nr_found; i++) { 3347a5d0fbbSHugh Dickins struct page *page; 3357a5d0fbbSHugh Dickins repeat: 3367a5d0fbbSHugh Dickins page = radix_tree_deref_slot((void **)pages[i]); 3377a5d0fbbSHugh Dickins if (unlikely(!page)) 3387a5d0fbbSHugh Dickins continue; 3397a5d0fbbSHugh Dickins if (radix_tree_exception(page)) { 3408079b1c8SHugh Dickins if (radix_tree_deref_retry(page)) 3417a5d0fbbSHugh Dickins goto restart; 3428079b1c8SHugh Dickins /* 3438079b1c8SHugh Dickins * Otherwise, we must be storing a swap entry 3448079b1c8SHugh Dickins * here as an exceptional entry: so return it 3458079b1c8SHugh Dickins * without attempting to raise page count. 3468079b1c8SHugh Dickins */ 3478079b1c8SHugh Dickins goto export; 3487a5d0fbbSHugh Dickins } 3497a5d0fbbSHugh Dickins if (!page_cache_get_speculative(page)) 3507a5d0fbbSHugh Dickins goto repeat; 3517a5d0fbbSHugh Dickins 3527a5d0fbbSHugh Dickins /* Has the page moved? */ 3537a5d0fbbSHugh Dickins if (unlikely(page != *((void **)pages[i]))) { 3547a5d0fbbSHugh Dickins page_cache_release(page); 3557a5d0fbbSHugh Dickins goto repeat; 3567a5d0fbbSHugh Dickins } 3577a5d0fbbSHugh Dickins export: 3587a5d0fbbSHugh Dickins indices[ret] = indices[i]; 3597a5d0fbbSHugh Dickins pages[ret] = page; 3607a5d0fbbSHugh Dickins ret++; 3617a5d0fbbSHugh Dickins } 3627a5d0fbbSHugh Dickins if (unlikely(!ret && nr_found)) 3637a5d0fbbSHugh Dickins goto restart; 3647a5d0fbbSHugh Dickins rcu_read_unlock(); 3657a5d0fbbSHugh Dickins return ret; 3667a5d0fbbSHugh Dickins } 3677a5d0fbbSHugh Dickins 3687a5d0fbbSHugh Dickins /* 3697a5d0fbbSHugh Dickins * Remove swap entry from radix tree, free the swap and its page cache. 3707a5d0fbbSHugh Dickins */ 3717a5d0fbbSHugh Dickins static int shmem_free_swap(struct address_space *mapping, 3727a5d0fbbSHugh Dickins pgoff_t index, void *radswap) 3737a5d0fbbSHugh Dickins { 3747a5d0fbbSHugh Dickins int error; 3757a5d0fbbSHugh Dickins 3767a5d0fbbSHugh Dickins spin_lock_irq(&mapping->tree_lock); 3777a5d0fbbSHugh Dickins error = shmem_radix_tree_replace(mapping, index, radswap, NULL); 3787a5d0fbbSHugh Dickins spin_unlock_irq(&mapping->tree_lock); 3797a5d0fbbSHugh Dickins if (!error) 3807a5d0fbbSHugh Dickins free_swap_and_cache(radix_to_swp_entry(radswap)); 3817a5d0fbbSHugh Dickins return error; 3827a5d0fbbSHugh Dickins } 3837a5d0fbbSHugh Dickins 3847a5d0fbbSHugh Dickins /* 3857a5d0fbbSHugh Dickins * Pagevec may contain swap entries, so shuffle up pages before releasing. 3867a5d0fbbSHugh Dickins */ 38724513264SHugh Dickins static void shmem_deswap_pagevec(struct pagevec *pvec) 3887a5d0fbbSHugh Dickins { 3897a5d0fbbSHugh Dickins int i, j; 3907a5d0fbbSHugh Dickins 3917a5d0fbbSHugh Dickins for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 3927a5d0fbbSHugh Dickins struct page *page = pvec->pages[i]; 3937a5d0fbbSHugh Dickins if (!radix_tree_exceptional_entry(page)) 3947a5d0fbbSHugh Dickins pvec->pages[j++] = page; 3957a5d0fbbSHugh Dickins } 3967a5d0fbbSHugh Dickins pvec->nr = j; 39724513264SHugh Dickins } 39824513264SHugh Dickins 39924513264SHugh Dickins /* 40024513264SHugh Dickins * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 40124513264SHugh Dickins */ 40224513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 40324513264SHugh Dickins { 40424513264SHugh Dickins struct pagevec pvec; 40524513264SHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 40624513264SHugh Dickins pgoff_t index = 0; 40724513264SHugh Dickins 40824513264SHugh Dickins pagevec_init(&pvec, 0); 40924513264SHugh Dickins /* 41024513264SHugh Dickins * Minor point, but we might as well stop if someone else SHM_LOCKs it. 41124513264SHugh Dickins */ 41224513264SHugh Dickins while (!mapping_unevictable(mapping)) { 41324513264SHugh Dickins /* 41424513264SHugh Dickins * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 41524513264SHugh Dickins * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 41624513264SHugh Dickins */ 41724513264SHugh Dickins pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 41824513264SHugh Dickins PAGEVEC_SIZE, pvec.pages, indices); 41924513264SHugh Dickins if (!pvec.nr) 42024513264SHugh Dickins break; 42124513264SHugh Dickins index = indices[pvec.nr - 1] + 1; 42224513264SHugh Dickins shmem_deswap_pagevec(&pvec); 42324513264SHugh Dickins check_move_unevictable_pages(pvec.pages, pvec.nr); 42424513264SHugh Dickins pagevec_release(&pvec); 42524513264SHugh Dickins cond_resched(); 42624513264SHugh Dickins } 4277a5d0fbbSHugh Dickins } 4287a5d0fbbSHugh Dickins 4297a5d0fbbSHugh Dickins /* 4307a5d0fbbSHugh Dickins * Remove range of pages and swap entries from radix tree, and free them. 431*1635f6a7SHugh Dickins * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 4327a5d0fbbSHugh Dickins */ 433*1635f6a7SHugh Dickins static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 434*1635f6a7SHugh Dickins bool unfalloc) 4351da177e4SLinus Torvalds { 436285b2c4fSHugh Dickins struct address_space *mapping = inode->i_mapping; 4371da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 438285b2c4fSHugh Dickins pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 43983e4fa9cSHugh Dickins pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 44083e4fa9cSHugh Dickins unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 44183e4fa9cSHugh Dickins unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 442bda97eabSHugh Dickins struct pagevec pvec; 4437a5d0fbbSHugh Dickins pgoff_t indices[PAGEVEC_SIZE]; 4447a5d0fbbSHugh Dickins long nr_swaps_freed = 0; 445285b2c4fSHugh Dickins pgoff_t index; 446bda97eabSHugh Dickins int i; 4471da177e4SLinus Torvalds 44883e4fa9cSHugh Dickins if (lend == -1) 44983e4fa9cSHugh Dickins end = -1; /* unsigned, so actually very big */ 450bda97eabSHugh Dickins 451bda97eabSHugh Dickins pagevec_init(&pvec, 0); 452bda97eabSHugh Dickins index = start; 45383e4fa9cSHugh Dickins while (index < end) { 4547a5d0fbbSHugh Dickins pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 45583e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 4567a5d0fbbSHugh Dickins pvec.pages, indices); 4577a5d0fbbSHugh Dickins if (!pvec.nr) 4587a5d0fbbSHugh Dickins break; 459bda97eabSHugh Dickins mem_cgroup_uncharge_start(); 460bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 461bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 462bda97eabSHugh Dickins 4637a5d0fbbSHugh Dickins index = indices[i]; 46483e4fa9cSHugh Dickins if (index >= end) 465bda97eabSHugh Dickins break; 466bda97eabSHugh Dickins 4677a5d0fbbSHugh Dickins if (radix_tree_exceptional_entry(page)) { 468*1635f6a7SHugh Dickins if (unfalloc) 469*1635f6a7SHugh Dickins continue; 4707a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 4717a5d0fbbSHugh Dickins index, page); 4727a5d0fbbSHugh Dickins continue; 4737a5d0fbbSHugh Dickins } 4747a5d0fbbSHugh Dickins 475bda97eabSHugh Dickins if (!trylock_page(page)) 476bda97eabSHugh Dickins continue; 477*1635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 4787a5d0fbbSHugh Dickins if (page->mapping == mapping) { 4797a5d0fbbSHugh Dickins VM_BUG_ON(PageWriteback(page)); 480bda97eabSHugh Dickins truncate_inode_page(mapping, page); 4817a5d0fbbSHugh Dickins } 482*1635f6a7SHugh Dickins } 483bda97eabSHugh Dickins unlock_page(page); 484bda97eabSHugh Dickins } 48524513264SHugh Dickins shmem_deswap_pagevec(&pvec); 48624513264SHugh Dickins pagevec_release(&pvec); 487bda97eabSHugh Dickins mem_cgroup_uncharge_end(); 488bda97eabSHugh Dickins cond_resched(); 489bda97eabSHugh Dickins index++; 490bda97eabSHugh Dickins } 491bda97eabSHugh Dickins 49283e4fa9cSHugh Dickins if (partial_start) { 493bda97eabSHugh Dickins struct page *page = NULL; 494bda97eabSHugh Dickins shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 495bda97eabSHugh Dickins if (page) { 49683e4fa9cSHugh Dickins unsigned int top = PAGE_CACHE_SIZE; 49783e4fa9cSHugh Dickins if (start > end) { 49883e4fa9cSHugh Dickins top = partial_end; 49983e4fa9cSHugh Dickins partial_end = 0; 50083e4fa9cSHugh Dickins } 50183e4fa9cSHugh Dickins zero_user_segment(page, partial_start, top); 502bda97eabSHugh Dickins set_page_dirty(page); 503bda97eabSHugh Dickins unlock_page(page); 504bda97eabSHugh Dickins page_cache_release(page); 505bda97eabSHugh Dickins } 506bda97eabSHugh Dickins } 50783e4fa9cSHugh Dickins if (partial_end) { 50883e4fa9cSHugh Dickins struct page *page = NULL; 50983e4fa9cSHugh Dickins shmem_getpage(inode, end, &page, SGP_READ, NULL); 51083e4fa9cSHugh Dickins if (page) { 51183e4fa9cSHugh Dickins zero_user_segment(page, 0, partial_end); 51283e4fa9cSHugh Dickins set_page_dirty(page); 51383e4fa9cSHugh Dickins unlock_page(page); 51483e4fa9cSHugh Dickins page_cache_release(page); 51583e4fa9cSHugh Dickins } 51683e4fa9cSHugh Dickins } 51783e4fa9cSHugh Dickins if (start >= end) 51883e4fa9cSHugh Dickins return; 519bda97eabSHugh Dickins 520bda97eabSHugh Dickins index = start; 521bda97eabSHugh Dickins for ( ; ; ) { 522bda97eabSHugh Dickins cond_resched(); 5237a5d0fbbSHugh Dickins pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 52483e4fa9cSHugh Dickins min(end - index, (pgoff_t)PAGEVEC_SIZE), 5257a5d0fbbSHugh Dickins pvec.pages, indices); 5267a5d0fbbSHugh Dickins if (!pvec.nr) { 527*1635f6a7SHugh Dickins if (index == start || unfalloc) 528bda97eabSHugh Dickins break; 529bda97eabSHugh Dickins index = start; 530bda97eabSHugh Dickins continue; 531bda97eabSHugh Dickins } 532*1635f6a7SHugh Dickins if ((index == start || unfalloc) && indices[0] >= end) { 53324513264SHugh Dickins shmem_deswap_pagevec(&pvec); 53424513264SHugh Dickins pagevec_release(&pvec); 535bda97eabSHugh Dickins break; 536bda97eabSHugh Dickins } 537bda97eabSHugh Dickins mem_cgroup_uncharge_start(); 538bda97eabSHugh Dickins for (i = 0; i < pagevec_count(&pvec); i++) { 539bda97eabSHugh Dickins struct page *page = pvec.pages[i]; 540bda97eabSHugh Dickins 5417a5d0fbbSHugh Dickins index = indices[i]; 54283e4fa9cSHugh Dickins if (index >= end) 543bda97eabSHugh Dickins break; 544bda97eabSHugh Dickins 5457a5d0fbbSHugh Dickins if (radix_tree_exceptional_entry(page)) { 546*1635f6a7SHugh Dickins if (unfalloc) 547*1635f6a7SHugh Dickins continue; 5487a5d0fbbSHugh Dickins nr_swaps_freed += !shmem_free_swap(mapping, 5497a5d0fbbSHugh Dickins index, page); 5507a5d0fbbSHugh Dickins continue; 5517a5d0fbbSHugh Dickins } 5527a5d0fbbSHugh Dickins 553bda97eabSHugh Dickins lock_page(page); 554*1635f6a7SHugh Dickins if (!unfalloc || !PageUptodate(page)) { 5557a5d0fbbSHugh Dickins if (page->mapping == mapping) { 5567a5d0fbbSHugh Dickins VM_BUG_ON(PageWriteback(page)); 557bda97eabSHugh Dickins truncate_inode_page(mapping, page); 5587a5d0fbbSHugh Dickins } 559*1635f6a7SHugh Dickins } 560bda97eabSHugh Dickins unlock_page(page); 561bda97eabSHugh Dickins } 56224513264SHugh Dickins shmem_deswap_pagevec(&pvec); 56324513264SHugh Dickins pagevec_release(&pvec); 564bda97eabSHugh Dickins mem_cgroup_uncharge_end(); 565bda97eabSHugh Dickins index++; 566bda97eabSHugh Dickins } 56794c1e62dSHugh Dickins 5681da177e4SLinus Torvalds spin_lock(&info->lock); 5697a5d0fbbSHugh Dickins info->swapped -= nr_swaps_freed; 5701da177e4SLinus Torvalds shmem_recalc_inode(inode); 5711da177e4SLinus Torvalds spin_unlock(&info->lock); 572*1635f6a7SHugh Dickins } 5731da177e4SLinus Torvalds 574*1635f6a7SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 575*1635f6a7SHugh Dickins { 576*1635f6a7SHugh Dickins shmem_undo_range(inode, lstart, lend, false); 577285b2c4fSHugh Dickins inode->i_ctime = inode->i_mtime = CURRENT_TIME; 5781da177e4SLinus Torvalds } 57994c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 5801da177e4SLinus Torvalds 58194c1e62dSHugh Dickins static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 5821da177e4SLinus Torvalds { 5831da177e4SLinus Torvalds struct inode *inode = dentry->d_inode; 5841da177e4SLinus Torvalds int error; 5851da177e4SLinus Torvalds 586db78b877SChristoph Hellwig error = inode_change_ok(inode, attr); 587db78b877SChristoph Hellwig if (error) 588db78b877SChristoph Hellwig return error; 589db78b877SChristoph Hellwig 59094c1e62dSHugh Dickins if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 59194c1e62dSHugh Dickins loff_t oldsize = inode->i_size; 59294c1e62dSHugh Dickins loff_t newsize = attr->ia_size; 5933889e6e7Snpiggin@suse.de 59494c1e62dSHugh Dickins if (newsize != oldsize) { 59594c1e62dSHugh Dickins i_size_write(inode, newsize); 59694c1e62dSHugh Dickins inode->i_ctime = inode->i_mtime = CURRENT_TIME; 59794c1e62dSHugh Dickins } 59894c1e62dSHugh Dickins if (newsize < oldsize) { 59994c1e62dSHugh Dickins loff_t holebegin = round_up(newsize, PAGE_SIZE); 60094c1e62dSHugh Dickins unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 60194c1e62dSHugh Dickins shmem_truncate_range(inode, newsize, (loff_t)-1); 60294c1e62dSHugh Dickins /* unmap again to remove racily COWed private pages */ 60394c1e62dSHugh Dickins unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 60494c1e62dSHugh Dickins } 6051da177e4SLinus Torvalds } 6061da177e4SLinus Torvalds 6076a1a90adSChristoph Hellwig setattr_copy(inode, attr); 60839f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 609db78b877SChristoph Hellwig if (attr->ia_valid & ATTR_MODE) 6101c7c474cSChristoph Hellwig error = generic_acl_chmod(inode); 61139f0247dSAndreas Gruenbacher #endif 6121da177e4SLinus Torvalds return error; 6131da177e4SLinus Torvalds } 6141da177e4SLinus Torvalds 6151f895f75SAl Viro static void shmem_evict_inode(struct inode *inode) 6161da177e4SLinus Torvalds { 6171da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 618b09e0fa4SEric Paris struct shmem_xattr *xattr, *nxattr; 6191da177e4SLinus Torvalds 6203889e6e7Snpiggin@suse.de if (inode->i_mapping->a_ops == &shmem_aops) { 6211da177e4SLinus Torvalds shmem_unacct_size(info->flags, inode->i_size); 6221da177e4SLinus Torvalds inode->i_size = 0; 6233889e6e7Snpiggin@suse.de shmem_truncate_range(inode, 0, (loff_t)-1); 6241da177e4SLinus Torvalds if (!list_empty(&info->swaplist)) { 625cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 6261da177e4SLinus Torvalds list_del_init(&info->swaplist); 627cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 6281da177e4SLinus Torvalds } 62969f07ec9SHugh Dickins } else 63069f07ec9SHugh Dickins kfree(info->symlink); 631b09e0fa4SEric Paris 632b09e0fa4SEric Paris list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) { 633b09e0fa4SEric Paris kfree(xattr->name); 634b09e0fa4SEric Paris kfree(xattr); 635b09e0fa4SEric Paris } 6361da177e4SLinus Torvalds BUG_ON(inode->i_blocks); 6375b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 638dbd5768fSJan Kara clear_inode(inode); 6391da177e4SLinus Torvalds } 6401da177e4SLinus Torvalds 64146f65ec1SHugh Dickins /* 64246f65ec1SHugh Dickins * If swap found in inode, free it and move page from swapcache to filecache. 64346f65ec1SHugh Dickins */ 64441ffe5d5SHugh Dickins static int shmem_unuse_inode(struct shmem_inode_info *info, 645bde05d1cSHugh Dickins swp_entry_t swap, struct page **pagep) 6461da177e4SLinus Torvalds { 647285b2c4fSHugh Dickins struct address_space *mapping = info->vfs_inode.i_mapping; 64846f65ec1SHugh Dickins void *radswap; 64941ffe5d5SHugh Dickins pgoff_t index; 650bde05d1cSHugh Dickins gfp_t gfp; 651bde05d1cSHugh Dickins int error = 0; 6521da177e4SLinus Torvalds 65346f65ec1SHugh Dickins radswap = swp_to_radix_entry(swap); 654e504f3fdSHugh Dickins index = radix_tree_locate_item(&mapping->page_tree, radswap); 65546f65ec1SHugh Dickins if (index == -1) 6561da177e4SLinus Torvalds return 0; 6572e0e26c7SHugh Dickins 6581b1b32f2SHugh Dickins /* 6591b1b32f2SHugh Dickins * Move _head_ to start search for next from here. 6601f895f75SAl Viro * But be careful: shmem_evict_inode checks list_empty without taking 6611b1b32f2SHugh Dickins * mutex, and there's an instant in list_move_tail when info->swaplist 662285b2c4fSHugh Dickins * would appear empty, if it were the only one on shmem_swaplist. 6631b1b32f2SHugh Dickins */ 6641b1b32f2SHugh Dickins if (shmem_swaplist.next != &info->swaplist) 6652e0e26c7SHugh Dickins list_move_tail(&shmem_swaplist, &info->swaplist); 6662e0e26c7SHugh Dickins 667bde05d1cSHugh Dickins gfp = mapping_gfp_mask(mapping); 668bde05d1cSHugh Dickins if (shmem_should_replace_page(*pagep, gfp)) { 669bde05d1cSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 670bde05d1cSHugh Dickins error = shmem_replace_page(pagep, gfp, info, index); 671bde05d1cSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 672bde05d1cSHugh Dickins /* 673bde05d1cSHugh Dickins * We needed to drop mutex to make that restrictive page 674bde05d1cSHugh Dickins * allocation; but the inode might already be freed by now, 675bde05d1cSHugh Dickins * and we cannot refer to inode or mapping or info to check. 676bde05d1cSHugh Dickins * However, we do hold page lock on the PageSwapCache page, 677bde05d1cSHugh Dickins * so can check if that still has our reference remaining. 678bde05d1cSHugh Dickins */ 679bde05d1cSHugh Dickins if (!page_swapcount(*pagep)) 680bde05d1cSHugh Dickins error = -ENOENT; 681bde05d1cSHugh Dickins } 682bde05d1cSHugh Dickins 683d13d1443SKAMEZAWA Hiroyuki /* 684778dd893SHugh Dickins * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 685778dd893SHugh Dickins * but also to hold up shmem_evict_inode(): so inode cannot be freed 686778dd893SHugh Dickins * beneath us (pagelock doesn't help until the page is in pagecache). 687d13d1443SKAMEZAWA Hiroyuki */ 688bde05d1cSHugh Dickins if (!error) 689bde05d1cSHugh Dickins error = shmem_add_to_page_cache(*pagep, mapping, index, 69046f65ec1SHugh Dickins GFP_NOWAIT, radswap); 69148f170fbSHugh Dickins if (error != -ENOMEM) { 69246f65ec1SHugh Dickins /* 69346f65ec1SHugh Dickins * Truncation and eviction use free_swap_and_cache(), which 69446f65ec1SHugh Dickins * only does trylock page: if we raced, best clean up here. 69546f65ec1SHugh Dickins */ 696bde05d1cSHugh Dickins delete_from_swap_cache(*pagep); 697bde05d1cSHugh Dickins set_page_dirty(*pagep); 69846f65ec1SHugh Dickins if (!error) { 69946f65ec1SHugh Dickins spin_lock(&info->lock); 700285b2c4fSHugh Dickins info->swapped--; 70146f65ec1SHugh Dickins spin_unlock(&info->lock); 70241ffe5d5SHugh Dickins swap_free(swap); 70346f65ec1SHugh Dickins } 7042e0e26c7SHugh Dickins error = 1; /* not an error, but entry was found */ 7051da177e4SLinus Torvalds } 7062e0e26c7SHugh Dickins return error; 7071da177e4SLinus Torvalds } 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds /* 71046f65ec1SHugh Dickins * Search through swapped inodes to find and replace swap by page. 7111da177e4SLinus Torvalds */ 71241ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 7131da177e4SLinus Torvalds { 71441ffe5d5SHugh Dickins struct list_head *this, *next; 7151da177e4SLinus Torvalds struct shmem_inode_info *info; 7161da177e4SLinus Torvalds int found = 0; 717bde05d1cSHugh Dickins int error = 0; 718bde05d1cSHugh Dickins 719bde05d1cSHugh Dickins /* 720bde05d1cSHugh Dickins * There's a faint possibility that swap page was replaced before 721bde05d1cSHugh Dickins * caller locked it: it will come back later with the right page. 722bde05d1cSHugh Dickins */ 723bde05d1cSHugh Dickins if (unlikely(!PageSwapCache(page))) 724bde05d1cSHugh Dickins goto out; 725778dd893SHugh Dickins 726778dd893SHugh Dickins /* 727778dd893SHugh Dickins * Charge page using GFP_KERNEL while we can wait, before taking 728778dd893SHugh Dickins * the shmem_swaplist_mutex which might hold up shmem_writepage(). 729778dd893SHugh Dickins * Charged back to the user (not to caller) when swap account is used. 730778dd893SHugh Dickins */ 731778dd893SHugh Dickins error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 732778dd893SHugh Dickins if (error) 733778dd893SHugh Dickins goto out; 73446f65ec1SHugh Dickins /* No radix_tree_preload: swap entry keeps a place for page in tree */ 7351da177e4SLinus Torvalds 736cb5f7b9aSHugh Dickins mutex_lock(&shmem_swaplist_mutex); 73741ffe5d5SHugh Dickins list_for_each_safe(this, next, &shmem_swaplist) { 73841ffe5d5SHugh Dickins info = list_entry(this, struct shmem_inode_info, swaplist); 739285b2c4fSHugh Dickins if (info->swapped) 740bde05d1cSHugh Dickins found = shmem_unuse_inode(info, swap, &page); 7416922c0c7SHugh Dickins else 7426922c0c7SHugh Dickins list_del_init(&info->swaplist); 743cb5f7b9aSHugh Dickins cond_resched(); 7442e0e26c7SHugh Dickins if (found) 745778dd893SHugh Dickins break; 7461da177e4SLinus Torvalds } 747cb5f7b9aSHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 748778dd893SHugh Dickins 749778dd893SHugh Dickins if (found < 0) 750778dd893SHugh Dickins error = found; 751778dd893SHugh Dickins out: 752aaa46865SHugh Dickins unlock_page(page); 753aaa46865SHugh Dickins page_cache_release(page); 754778dd893SHugh Dickins return error; 7551da177e4SLinus Torvalds } 7561da177e4SLinus Torvalds 7571da177e4SLinus Torvalds /* 7581da177e4SLinus Torvalds * Move the page from the page cache to the swap cache. 7591da177e4SLinus Torvalds */ 7601da177e4SLinus Torvalds static int shmem_writepage(struct page *page, struct writeback_control *wbc) 7611da177e4SLinus Torvalds { 7621da177e4SLinus Torvalds struct shmem_inode_info *info; 7631da177e4SLinus Torvalds struct address_space *mapping; 7641da177e4SLinus Torvalds struct inode *inode; 7656922c0c7SHugh Dickins swp_entry_t swap; 7666922c0c7SHugh Dickins pgoff_t index; 7671da177e4SLinus Torvalds 7681da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 7691da177e4SLinus Torvalds mapping = page->mapping; 7701da177e4SLinus Torvalds index = page->index; 7711da177e4SLinus Torvalds inode = mapping->host; 7721da177e4SLinus Torvalds info = SHMEM_I(inode); 7731da177e4SLinus Torvalds if (info->flags & VM_LOCKED) 7741da177e4SLinus Torvalds goto redirty; 775d9fe526aSHugh Dickins if (!total_swap_pages) 7761da177e4SLinus Torvalds goto redirty; 7771da177e4SLinus Torvalds 778d9fe526aSHugh Dickins /* 779d9fe526aSHugh Dickins * shmem_backing_dev_info's capabilities prevent regular writeback or 780d9fe526aSHugh Dickins * sync from ever calling shmem_writepage; but a stacking filesystem 78148f170fbSHugh Dickins * might use ->writepage of its underlying filesystem, in which case 782d9fe526aSHugh Dickins * tmpfs should write out to swap only in response to memory pressure, 78348f170fbSHugh Dickins * and not for the writeback threads or sync. 784d9fe526aSHugh Dickins */ 78548f170fbSHugh Dickins if (!wbc->for_reclaim) { 78648f170fbSHugh Dickins WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 78748f170fbSHugh Dickins goto redirty; 78848f170fbSHugh Dickins } 789*1635f6a7SHugh Dickins 790*1635f6a7SHugh Dickins /* 791*1635f6a7SHugh Dickins * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 792*1635f6a7SHugh Dickins * value into swapfile.c, the only way we can correctly account for a 793*1635f6a7SHugh Dickins * fallocated page arriving here is now to initialize it and write it. 794*1635f6a7SHugh Dickins */ 795*1635f6a7SHugh Dickins if (!PageUptodate(page)) { 796*1635f6a7SHugh Dickins clear_highpage(page); 797*1635f6a7SHugh Dickins flush_dcache_page(page); 798*1635f6a7SHugh Dickins SetPageUptodate(page); 799*1635f6a7SHugh Dickins } 800*1635f6a7SHugh Dickins 801d9fe526aSHugh Dickins swap = get_swap_page(); 80248f170fbSHugh Dickins if (!swap.val) 80348f170fbSHugh Dickins goto redirty; 804d9fe526aSHugh Dickins 805b1dea800SHugh Dickins /* 806b1dea800SHugh Dickins * Add inode to shmem_unuse()'s list of swapped-out inodes, 8076922c0c7SHugh Dickins * if it's not already there. Do it now before the page is 8086922c0c7SHugh Dickins * moved to swap cache, when its pagelock no longer protects 809b1dea800SHugh Dickins * the inode from eviction. But don't unlock the mutex until 8106922c0c7SHugh Dickins * we've incremented swapped, because shmem_unuse_inode() will 8116922c0c7SHugh Dickins * prune a !swapped inode from the swaplist under this mutex. 812b1dea800SHugh Dickins */ 813b1dea800SHugh Dickins mutex_lock(&shmem_swaplist_mutex); 81405bf86b4SHugh Dickins if (list_empty(&info->swaplist)) 81505bf86b4SHugh Dickins list_add_tail(&info->swaplist, &shmem_swaplist); 816b1dea800SHugh Dickins 81748f170fbSHugh Dickins if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 818aaa46865SHugh Dickins swap_shmem_alloc(swap); 8196922c0c7SHugh Dickins shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 8206922c0c7SHugh Dickins 8216922c0c7SHugh Dickins spin_lock(&info->lock); 8226922c0c7SHugh Dickins info->swapped++; 8236922c0c7SHugh Dickins shmem_recalc_inode(inode); 824826267cfSHugh Dickins spin_unlock(&info->lock); 8256922c0c7SHugh Dickins 8266922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 827d9fe526aSHugh Dickins BUG_ON(page_mapped(page)); 8289fab5619SHugh Dickins swap_writepage(page, wbc); 8291da177e4SLinus Torvalds return 0; 8301da177e4SLinus Torvalds } 8311da177e4SLinus Torvalds 8326922c0c7SHugh Dickins mutex_unlock(&shmem_swaplist_mutex); 833cb4b86baSKAMEZAWA Hiroyuki swapcache_free(swap, NULL); 8341da177e4SLinus Torvalds redirty: 8351da177e4SLinus Torvalds set_page_dirty(page); 836d9fe526aSHugh Dickins if (wbc->for_reclaim) 837d9fe526aSHugh Dickins return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 838d9fe526aSHugh Dickins unlock_page(page); 839d9fe526aSHugh Dickins return 0; 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds 8421da177e4SLinus Torvalds #ifdef CONFIG_NUMA 843680d794bSakpm@linux-foundation.org #ifdef CONFIG_TMPFS 84471fe804bSLee Schermerhorn static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 845680d794bSakpm@linux-foundation.org { 846680d794bSakpm@linux-foundation.org char buffer[64]; 847680d794bSakpm@linux-foundation.org 84871fe804bSLee Schermerhorn if (!mpol || mpol->mode == MPOL_DEFAULT) 849095f1fc4SLee Schermerhorn return; /* show nothing */ 850095f1fc4SLee Schermerhorn 85171fe804bSLee Schermerhorn mpol_to_str(buffer, sizeof(buffer), mpol, 1); 852095f1fc4SLee Schermerhorn 853095f1fc4SLee Schermerhorn seq_printf(seq, ",mpol=%s", buffer); 854680d794bSakpm@linux-foundation.org } 85571fe804bSLee Schermerhorn 85671fe804bSLee Schermerhorn static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 85771fe804bSLee Schermerhorn { 85871fe804bSLee Schermerhorn struct mempolicy *mpol = NULL; 85971fe804bSLee Schermerhorn if (sbinfo->mpol) { 86071fe804bSLee Schermerhorn spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 86171fe804bSLee Schermerhorn mpol = sbinfo->mpol; 86271fe804bSLee Schermerhorn mpol_get(mpol); 86371fe804bSLee Schermerhorn spin_unlock(&sbinfo->stat_lock); 86471fe804bSLee Schermerhorn } 86571fe804bSLee Schermerhorn return mpol; 86671fe804bSLee Schermerhorn } 867680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 868680d794bSakpm@linux-foundation.org 86941ffe5d5SHugh Dickins static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 87041ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 8711da177e4SLinus Torvalds { 87252cd3b07SLee Schermerhorn struct mempolicy mpol, *spol; 8731da177e4SLinus Torvalds struct vm_area_struct pvma; 8741da177e4SLinus Torvalds 87552cd3b07SLee Schermerhorn spol = mpol_cond_copy(&mpol, 87641ffe5d5SHugh Dickins mpol_shared_policy_lookup(&info->policy, index)); 87752cd3b07SLee Schermerhorn 8781da177e4SLinus Torvalds /* Create a pseudo vma that just contains the policy */ 879c4cc6d07SHugh Dickins pvma.vm_start = 0; 88041ffe5d5SHugh Dickins pvma.vm_pgoff = index; 881c4cc6d07SHugh Dickins pvma.vm_ops = NULL; 88252cd3b07SLee Schermerhorn pvma.vm_policy = spol; 88341ffe5d5SHugh Dickins return swapin_readahead(swap, gfp, &pvma, 0); 8841da177e4SLinus Torvalds } 8851da177e4SLinus Torvalds 88602098feaSHugh Dickins static struct page *shmem_alloc_page(gfp_t gfp, 88741ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 8881da177e4SLinus Torvalds { 8891da177e4SLinus Torvalds struct vm_area_struct pvma; 8901da177e4SLinus Torvalds 891c4cc6d07SHugh Dickins /* Create a pseudo vma that just contains the policy */ 892c4cc6d07SHugh Dickins pvma.vm_start = 0; 89341ffe5d5SHugh Dickins pvma.vm_pgoff = index; 894c4cc6d07SHugh Dickins pvma.vm_ops = NULL; 89541ffe5d5SHugh Dickins pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 89652cd3b07SLee Schermerhorn 89752cd3b07SLee Schermerhorn /* 89852cd3b07SLee Schermerhorn * alloc_page_vma() will drop the shared policy reference 89952cd3b07SLee Schermerhorn */ 90052cd3b07SLee Schermerhorn return alloc_page_vma(gfp, &pvma, 0); 9011da177e4SLinus Torvalds } 902680d794bSakpm@linux-foundation.org #else /* !CONFIG_NUMA */ 903680d794bSakpm@linux-foundation.org #ifdef CONFIG_TMPFS 90441ffe5d5SHugh Dickins static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 905680d794bSakpm@linux-foundation.org { 906680d794bSakpm@linux-foundation.org } 907680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 908680d794bSakpm@linux-foundation.org 90941ffe5d5SHugh Dickins static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 91041ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 9111da177e4SLinus Torvalds { 91241ffe5d5SHugh Dickins return swapin_readahead(swap, gfp, NULL, 0); 9131da177e4SLinus Torvalds } 9141da177e4SLinus Torvalds 91502098feaSHugh Dickins static inline struct page *shmem_alloc_page(gfp_t gfp, 91641ffe5d5SHugh Dickins struct shmem_inode_info *info, pgoff_t index) 9171da177e4SLinus Torvalds { 918e84e2e13SHugh Dickins return alloc_page(gfp); 9191da177e4SLinus Torvalds } 920680d794bSakpm@linux-foundation.org #endif /* CONFIG_NUMA */ 9211da177e4SLinus Torvalds 92271fe804bSLee Schermerhorn #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 92371fe804bSLee Schermerhorn static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 92471fe804bSLee Schermerhorn { 92571fe804bSLee Schermerhorn return NULL; 92671fe804bSLee Schermerhorn } 92771fe804bSLee Schermerhorn #endif 92871fe804bSLee Schermerhorn 9291da177e4SLinus Torvalds /* 930bde05d1cSHugh Dickins * When a page is moved from swapcache to shmem filecache (either by the 931bde05d1cSHugh Dickins * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 932bde05d1cSHugh Dickins * shmem_unuse_inode()), it may have been read in earlier from swap, in 933bde05d1cSHugh Dickins * ignorance of the mapping it belongs to. If that mapping has special 934bde05d1cSHugh Dickins * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 935bde05d1cSHugh Dickins * we may need to copy to a suitable page before moving to filecache. 936bde05d1cSHugh Dickins * 937bde05d1cSHugh Dickins * In a future release, this may well be extended to respect cpuset and 938bde05d1cSHugh Dickins * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 939bde05d1cSHugh Dickins * but for now it is a simple matter of zone. 940bde05d1cSHugh Dickins */ 941bde05d1cSHugh Dickins static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 942bde05d1cSHugh Dickins { 943bde05d1cSHugh Dickins return page_zonenum(page) > gfp_zone(gfp); 944bde05d1cSHugh Dickins } 945bde05d1cSHugh Dickins 946bde05d1cSHugh Dickins static int shmem_replace_page(struct page **pagep, gfp_t gfp, 947bde05d1cSHugh Dickins struct shmem_inode_info *info, pgoff_t index) 948bde05d1cSHugh Dickins { 949bde05d1cSHugh Dickins struct page *oldpage, *newpage; 950bde05d1cSHugh Dickins struct address_space *swap_mapping; 951bde05d1cSHugh Dickins pgoff_t swap_index; 952bde05d1cSHugh Dickins int error; 953bde05d1cSHugh Dickins 954bde05d1cSHugh Dickins oldpage = *pagep; 955bde05d1cSHugh Dickins swap_index = page_private(oldpage); 956bde05d1cSHugh Dickins swap_mapping = page_mapping(oldpage); 957bde05d1cSHugh Dickins 958bde05d1cSHugh Dickins /* 959bde05d1cSHugh Dickins * We have arrived here because our zones are constrained, so don't 960bde05d1cSHugh Dickins * limit chance of success by further cpuset and node constraints. 961bde05d1cSHugh Dickins */ 962bde05d1cSHugh Dickins gfp &= ~GFP_CONSTRAINT_MASK; 963bde05d1cSHugh Dickins newpage = shmem_alloc_page(gfp, info, index); 964bde05d1cSHugh Dickins if (!newpage) 965bde05d1cSHugh Dickins return -ENOMEM; 966bde05d1cSHugh Dickins VM_BUG_ON(shmem_should_replace_page(newpage, gfp)); 967bde05d1cSHugh Dickins 968bde05d1cSHugh Dickins *pagep = newpage; 969bde05d1cSHugh Dickins page_cache_get(newpage); 970bde05d1cSHugh Dickins copy_highpage(newpage, oldpage); 971bde05d1cSHugh Dickins 972bde05d1cSHugh Dickins VM_BUG_ON(!PageLocked(oldpage)); 973bde05d1cSHugh Dickins __set_page_locked(newpage); 974bde05d1cSHugh Dickins VM_BUG_ON(!PageUptodate(oldpage)); 975bde05d1cSHugh Dickins SetPageUptodate(newpage); 976bde05d1cSHugh Dickins VM_BUG_ON(!PageSwapBacked(oldpage)); 977bde05d1cSHugh Dickins SetPageSwapBacked(newpage); 978bde05d1cSHugh Dickins VM_BUG_ON(!swap_index); 979bde05d1cSHugh Dickins set_page_private(newpage, swap_index); 980bde05d1cSHugh Dickins VM_BUG_ON(!PageSwapCache(oldpage)); 981bde05d1cSHugh Dickins SetPageSwapCache(newpage); 982bde05d1cSHugh Dickins 983bde05d1cSHugh Dickins /* 984bde05d1cSHugh Dickins * Our caller will very soon move newpage out of swapcache, but it's 985bde05d1cSHugh Dickins * a nice clean interface for us to replace oldpage by newpage there. 986bde05d1cSHugh Dickins */ 987bde05d1cSHugh Dickins spin_lock_irq(&swap_mapping->tree_lock); 988bde05d1cSHugh Dickins error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 989bde05d1cSHugh Dickins newpage); 990bde05d1cSHugh Dickins __inc_zone_page_state(newpage, NR_FILE_PAGES); 991bde05d1cSHugh Dickins __dec_zone_page_state(oldpage, NR_FILE_PAGES); 992bde05d1cSHugh Dickins spin_unlock_irq(&swap_mapping->tree_lock); 993bde05d1cSHugh Dickins BUG_ON(error); 994bde05d1cSHugh Dickins 995bde05d1cSHugh Dickins mem_cgroup_replace_page_cache(oldpage, newpage); 996bde05d1cSHugh Dickins lru_cache_add_anon(newpage); 997bde05d1cSHugh Dickins 998bde05d1cSHugh Dickins ClearPageSwapCache(oldpage); 999bde05d1cSHugh Dickins set_page_private(oldpage, 0); 1000bde05d1cSHugh Dickins 1001bde05d1cSHugh Dickins unlock_page(oldpage); 1002bde05d1cSHugh Dickins page_cache_release(oldpage); 1003bde05d1cSHugh Dickins page_cache_release(oldpage); 1004bde05d1cSHugh Dickins return 0; 1005bde05d1cSHugh Dickins } 1006bde05d1cSHugh Dickins 1007bde05d1cSHugh Dickins /* 100868da9f05SHugh Dickins * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 10091da177e4SLinus Torvalds * 10101da177e4SLinus Torvalds * If we allocate a new one we do not mark it dirty. That's up to the 10111da177e4SLinus Torvalds * vm. If we swap it in we mark it dirty since we also free the swap 10121da177e4SLinus Torvalds * entry since a page cannot live in both the swap and page cache 10131da177e4SLinus Torvalds */ 101441ffe5d5SHugh Dickins static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 101568da9f05SHugh Dickins struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 10161da177e4SLinus Torvalds { 10171da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 101854af6042SHugh Dickins struct shmem_inode_info *info; 10191da177e4SLinus Torvalds struct shmem_sb_info *sbinfo; 102027ab7006SHugh Dickins struct page *page; 10211da177e4SLinus Torvalds swp_entry_t swap; 10221da177e4SLinus Torvalds int error; 102354af6042SHugh Dickins int once = 0; 1024*1635f6a7SHugh Dickins int alloced = 0; 10251da177e4SLinus Torvalds 102641ffe5d5SHugh Dickins if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 10271da177e4SLinus Torvalds return -EFBIG; 10281da177e4SLinus Torvalds repeat: 102954af6042SHugh Dickins swap.val = 0; 103041ffe5d5SHugh Dickins page = find_lock_page(mapping, index); 103154af6042SHugh Dickins if (radix_tree_exceptional_entry(page)) { 103254af6042SHugh Dickins swap = radix_to_swp_entry(page); 103354af6042SHugh Dickins page = NULL; 103454af6042SHugh Dickins } 103554af6042SHugh Dickins 1036*1635f6a7SHugh Dickins if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 103754af6042SHugh Dickins ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 103854af6042SHugh Dickins error = -EINVAL; 103954af6042SHugh Dickins goto failed; 104054af6042SHugh Dickins } 104154af6042SHugh Dickins 1042*1635f6a7SHugh Dickins /* fallocated page? */ 1043*1635f6a7SHugh Dickins if (page && !PageUptodate(page)) { 1044*1635f6a7SHugh Dickins if (sgp != SGP_READ) 1045*1635f6a7SHugh Dickins goto clear; 1046*1635f6a7SHugh Dickins unlock_page(page); 1047*1635f6a7SHugh Dickins page_cache_release(page); 1048*1635f6a7SHugh Dickins page = NULL; 1049*1635f6a7SHugh Dickins } 105054af6042SHugh Dickins if (page || (sgp == SGP_READ && !swap.val)) { 105154af6042SHugh Dickins *pagep = page; 105254af6042SHugh Dickins return 0; 105327ab7006SHugh Dickins } 105427ab7006SHugh Dickins 1055b409f9fcSHugh Dickins /* 105654af6042SHugh Dickins * Fast cache lookup did not find it: 105754af6042SHugh Dickins * bring it back from swap or allocate. 1058b409f9fcSHugh Dickins */ 105954af6042SHugh Dickins info = SHMEM_I(inode); 106054af6042SHugh Dickins sbinfo = SHMEM_SB(inode->i_sb); 106127ab7006SHugh Dickins 10621da177e4SLinus Torvalds if (swap.val) { 10631da177e4SLinus Torvalds /* Look it up and read it in.. */ 106427ab7006SHugh Dickins page = lookup_swap_cache(swap); 106527ab7006SHugh Dickins if (!page) { 1066456f998eSYing Han /* here we actually do the io */ 106768da9f05SHugh Dickins if (fault_type) 106868da9f05SHugh Dickins *fault_type |= VM_FAULT_MAJOR; 106941ffe5d5SHugh Dickins page = shmem_swapin(swap, gfp, info, index); 107027ab7006SHugh Dickins if (!page) { 10711da177e4SLinus Torvalds error = -ENOMEM; 107254af6042SHugh Dickins goto failed; 1073285b2c4fSHugh Dickins } 10741da177e4SLinus Torvalds } 10751da177e4SLinus Torvalds 10761da177e4SLinus Torvalds /* We have to do this with page locked to prevent races */ 107754af6042SHugh Dickins lock_page(page); 1078bde05d1cSHugh Dickins if (!PageSwapCache(page) || page->mapping) { 1079bde05d1cSHugh Dickins error = -EEXIST; /* try again */ 1080bde05d1cSHugh Dickins goto failed; 1081bde05d1cSHugh Dickins } 108227ab7006SHugh Dickins if (!PageUptodate(page)) { 10831da177e4SLinus Torvalds error = -EIO; 108454af6042SHugh Dickins goto failed; 108554af6042SHugh Dickins } 108654af6042SHugh Dickins wait_on_page_writeback(page); 108754af6042SHugh Dickins 1088bde05d1cSHugh Dickins if (shmem_should_replace_page(page, gfp)) { 1089bde05d1cSHugh Dickins error = shmem_replace_page(&page, gfp, info, index); 1090bde05d1cSHugh Dickins if (error) 109154af6042SHugh Dickins goto failed; 10921da177e4SLinus Torvalds } 10931da177e4SLinus Torvalds 1094aa3b1895SHugh Dickins error = mem_cgroup_cache_charge(page, current->mm, 1095aa3b1895SHugh Dickins gfp & GFP_RECLAIM_MASK); 1096aa3b1895SHugh Dickins if (!error) 109754af6042SHugh Dickins error = shmem_add_to_page_cache(page, mapping, index, 109854af6042SHugh Dickins gfp, swp_to_radix_entry(swap)); 109954af6042SHugh Dickins if (error) 110054af6042SHugh Dickins goto failed; 110154af6042SHugh Dickins 110254af6042SHugh Dickins spin_lock(&info->lock); 110354af6042SHugh Dickins info->swapped--; 110454af6042SHugh Dickins shmem_recalc_inode(inode); 11051da177e4SLinus Torvalds spin_unlock(&info->lock); 110627ab7006SHugh Dickins 110727ab7006SHugh Dickins delete_from_swap_cache(page); 110827ab7006SHugh Dickins set_page_dirty(page); 110927ab7006SHugh Dickins swap_free(swap); 111027ab7006SHugh Dickins 111154af6042SHugh Dickins } else { 111254af6042SHugh Dickins if (shmem_acct_block(info->flags)) { 111354af6042SHugh Dickins error = -ENOSPC; 111454af6042SHugh Dickins goto failed; 11151da177e4SLinus Torvalds } 11160edd73b3SHugh Dickins if (sbinfo->max_blocks) { 1117fc5da22aSHugh Dickins if (percpu_counter_compare(&sbinfo->used_blocks, 111854af6042SHugh Dickins sbinfo->max_blocks) >= 0) { 111954af6042SHugh Dickins error = -ENOSPC; 112054af6042SHugh Dickins goto unacct; 112154af6042SHugh Dickins } 11227e496299STim Chen percpu_counter_inc(&sbinfo->used_blocks); 112359a16eadSHugh Dickins } 11241da177e4SLinus Torvalds 112554af6042SHugh Dickins page = shmem_alloc_page(gfp, info, index); 112654af6042SHugh Dickins if (!page) { 112754af6042SHugh Dickins error = -ENOMEM; 112854af6042SHugh Dickins goto decused; 112954af6042SHugh Dickins } 113054af6042SHugh Dickins 113154af6042SHugh Dickins SetPageSwapBacked(page); 113254af6042SHugh Dickins __set_page_locked(page); 1133aa3b1895SHugh Dickins error = mem_cgroup_cache_charge(page, current->mm, 1134aa3b1895SHugh Dickins gfp & GFP_RECLAIM_MASK); 1135aa3b1895SHugh Dickins if (!error) 113654af6042SHugh Dickins error = shmem_add_to_page_cache(page, mapping, index, 113754af6042SHugh Dickins gfp, NULL); 113854af6042SHugh Dickins if (error) 113954af6042SHugh Dickins goto decused; 114054af6042SHugh Dickins lru_cache_add_anon(page); 114154af6042SHugh Dickins 114254af6042SHugh Dickins spin_lock(&info->lock); 11431da177e4SLinus Torvalds info->alloced++; 114454af6042SHugh Dickins inode->i_blocks += BLOCKS_PER_PAGE; 114554af6042SHugh Dickins shmem_recalc_inode(inode); 114659a16eadSHugh Dickins spin_unlock(&info->lock); 1147*1635f6a7SHugh Dickins alloced = true; 114854af6042SHugh Dickins 1149ec9516fbSHugh Dickins /* 1150*1635f6a7SHugh Dickins * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1151*1635f6a7SHugh Dickins */ 1152*1635f6a7SHugh Dickins if (sgp == SGP_FALLOC) 1153*1635f6a7SHugh Dickins sgp = SGP_WRITE; 1154*1635f6a7SHugh Dickins clear: 1155*1635f6a7SHugh Dickins /* 1156*1635f6a7SHugh Dickins * Let SGP_WRITE caller clear ends if write does not fill page; 1157*1635f6a7SHugh Dickins * but SGP_FALLOC on a page fallocated earlier must initialize 1158*1635f6a7SHugh Dickins * it now, lest undo on failure cancel our earlier guarantee. 1159ec9516fbSHugh Dickins */ 1160ec9516fbSHugh Dickins if (sgp != SGP_WRITE) { 116127ab7006SHugh Dickins clear_highpage(page); 116227ab7006SHugh Dickins flush_dcache_page(page); 116327ab7006SHugh Dickins SetPageUptodate(page); 1164ec9516fbSHugh Dickins } 1165a0ee5ec5SHugh Dickins if (sgp == SGP_DIRTY) 116627ab7006SHugh Dickins set_page_dirty(page); 11671da177e4SLinus Torvalds } 1168bde05d1cSHugh Dickins 116954af6042SHugh Dickins /* Perhaps the file has been truncated since we checked */ 1170*1635f6a7SHugh Dickins if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 117154af6042SHugh Dickins ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 117254af6042SHugh Dickins error = -EINVAL; 1173*1635f6a7SHugh Dickins if (alloced) 117454af6042SHugh Dickins goto trunc; 1175*1635f6a7SHugh Dickins else 1176*1635f6a7SHugh Dickins goto failed; 1177ff36b801SShaohua Li } 117854af6042SHugh Dickins *pagep = page; 117954af6042SHugh Dickins return 0; 1180d00806b1SNick Piggin 1181d0217ac0SNick Piggin /* 118254af6042SHugh Dickins * Error recovery. 11831da177e4SLinus Torvalds */ 118454af6042SHugh Dickins trunc: 1185*1635f6a7SHugh Dickins info = SHMEM_I(inode); 118654af6042SHugh Dickins ClearPageDirty(page); 118754af6042SHugh Dickins delete_from_page_cache(page); 118854af6042SHugh Dickins spin_lock(&info->lock); 118954af6042SHugh Dickins info->alloced--; 119054af6042SHugh Dickins inode->i_blocks -= BLOCKS_PER_PAGE; 11911da177e4SLinus Torvalds spin_unlock(&info->lock); 119254af6042SHugh Dickins decused: 1193*1635f6a7SHugh Dickins sbinfo = SHMEM_SB(inode->i_sb); 119454af6042SHugh Dickins if (sbinfo->max_blocks) 119554af6042SHugh Dickins percpu_counter_add(&sbinfo->used_blocks, -1); 119654af6042SHugh Dickins unacct: 119754af6042SHugh Dickins shmem_unacct_blocks(info->flags, 1); 119854af6042SHugh Dickins failed: 119954af6042SHugh Dickins if (swap.val && error != -EINVAL) { 120054af6042SHugh Dickins struct page *test = find_get_page(mapping, index); 120154af6042SHugh Dickins if (test && !radix_tree_exceptional_entry(test)) 120254af6042SHugh Dickins page_cache_release(test); 120354af6042SHugh Dickins /* Have another try if the entry has changed */ 120454af6042SHugh Dickins if (test != swp_to_radix_entry(swap)) 120554af6042SHugh Dickins error = -EEXIST; 120654af6042SHugh Dickins } 120727ab7006SHugh Dickins if (page) { 120854af6042SHugh Dickins unlock_page(page); 12091da177e4SLinus Torvalds page_cache_release(page); 121054af6042SHugh Dickins } 121154af6042SHugh Dickins if (error == -ENOSPC && !once++) { 121254af6042SHugh Dickins info = SHMEM_I(inode); 121354af6042SHugh Dickins spin_lock(&info->lock); 121454af6042SHugh Dickins shmem_recalc_inode(inode); 121554af6042SHugh Dickins spin_unlock(&info->lock); 12161da177e4SLinus Torvalds goto repeat; 1217d8dc74f2SAdrian Bunk } 121854af6042SHugh Dickins if (error == -EEXIST) 121954af6042SHugh Dickins goto repeat; 122054af6042SHugh Dickins return error; 12211da177e4SLinus Torvalds } 12221da177e4SLinus Torvalds 12231da177e4SLinus Torvalds static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 12241da177e4SLinus Torvalds { 12251da177e4SLinus Torvalds struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 12261da177e4SLinus Torvalds int error; 122768da9f05SHugh Dickins int ret = VM_FAULT_LOCKED; 12281da177e4SLinus Torvalds 12291da177e4SLinus Torvalds error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 12301da177e4SLinus Torvalds if (error) 12311da177e4SLinus Torvalds return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 123268da9f05SHugh Dickins 1233456f998eSYing Han if (ret & VM_FAULT_MAJOR) { 1234456f998eSYing Han count_vm_event(PGMAJFAULT); 1235456f998eSYing Han mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1236456f998eSYing Han } 123768da9f05SHugh Dickins return ret; 12381da177e4SLinus Torvalds } 12391da177e4SLinus Torvalds 12401da177e4SLinus Torvalds #ifdef CONFIG_NUMA 124141ffe5d5SHugh Dickins static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 12421da177e4SLinus Torvalds { 124341ffe5d5SHugh Dickins struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 124441ffe5d5SHugh Dickins return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 12451da177e4SLinus Torvalds } 12461da177e4SLinus Torvalds 1247d8dc74f2SAdrian Bunk static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1248d8dc74f2SAdrian Bunk unsigned long addr) 12491da177e4SLinus Torvalds { 125041ffe5d5SHugh Dickins struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 125141ffe5d5SHugh Dickins pgoff_t index; 12521da177e4SLinus Torvalds 125341ffe5d5SHugh Dickins index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 125441ffe5d5SHugh Dickins return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 12551da177e4SLinus Torvalds } 12561da177e4SLinus Torvalds #endif 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds int shmem_lock(struct file *file, int lock, struct user_struct *user) 12591da177e4SLinus Torvalds { 1260d3ac7f89SJosef "Jeff" Sipek struct inode *inode = file->f_path.dentry->d_inode; 12611da177e4SLinus Torvalds struct shmem_inode_info *info = SHMEM_I(inode); 12621da177e4SLinus Torvalds int retval = -ENOMEM; 12631da177e4SLinus Torvalds 12641da177e4SLinus Torvalds spin_lock(&info->lock); 12651da177e4SLinus Torvalds if (lock && !(info->flags & VM_LOCKED)) { 12661da177e4SLinus Torvalds if (!user_shm_lock(inode->i_size, user)) 12671da177e4SLinus Torvalds goto out_nomem; 12681da177e4SLinus Torvalds info->flags |= VM_LOCKED; 126989e004eaSLee Schermerhorn mapping_set_unevictable(file->f_mapping); 12701da177e4SLinus Torvalds } 12711da177e4SLinus Torvalds if (!lock && (info->flags & VM_LOCKED) && user) { 12721da177e4SLinus Torvalds user_shm_unlock(inode->i_size, user); 12731da177e4SLinus Torvalds info->flags &= ~VM_LOCKED; 127489e004eaSLee Schermerhorn mapping_clear_unevictable(file->f_mapping); 12751da177e4SLinus Torvalds } 12761da177e4SLinus Torvalds retval = 0; 127789e004eaSLee Schermerhorn 12781da177e4SLinus Torvalds out_nomem: 12791da177e4SLinus Torvalds spin_unlock(&info->lock); 12801da177e4SLinus Torvalds return retval; 12811da177e4SLinus Torvalds } 12821da177e4SLinus Torvalds 12839b83a6a8SAdrian Bunk static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 12841da177e4SLinus Torvalds { 12851da177e4SLinus Torvalds file_accessed(file); 12861da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 1287d0217ac0SNick Piggin vma->vm_flags |= VM_CAN_NONLINEAR; 12881da177e4SLinus Torvalds return 0; 12891da177e4SLinus Torvalds } 12901da177e4SLinus Torvalds 1291454abafeSDmitry Monakhov static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 129209208d15SAl Viro umode_t mode, dev_t dev, unsigned long flags) 12931da177e4SLinus Torvalds { 12941da177e4SLinus Torvalds struct inode *inode; 12951da177e4SLinus Torvalds struct shmem_inode_info *info; 12961da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 12971da177e4SLinus Torvalds 12985b04c689SPavel Emelyanov if (shmem_reserve_inode(sb)) 12991da177e4SLinus Torvalds return NULL; 13001da177e4SLinus Torvalds 13011da177e4SLinus Torvalds inode = new_inode(sb); 13021da177e4SLinus Torvalds if (inode) { 130385fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 1304454abafeSDmitry Monakhov inode_init_owner(inode, dir, mode); 13051da177e4SLinus Torvalds inode->i_blocks = 0; 13061da177e4SLinus Torvalds inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 13071da177e4SLinus Torvalds inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 130891828a40SDavid M. Grimes inode->i_generation = get_seconds(); 13091da177e4SLinus Torvalds info = SHMEM_I(inode); 13101da177e4SLinus Torvalds memset(info, 0, (char *)inode - (char *)info); 13111da177e4SLinus Torvalds spin_lock_init(&info->lock); 13120b0a0806SHugh Dickins info->flags = flags & VM_NORESERVE; 13131da177e4SLinus Torvalds INIT_LIST_HEAD(&info->swaplist); 1314b09e0fa4SEric Paris INIT_LIST_HEAD(&info->xattr_list); 131572c04902SAl Viro cache_no_acl(inode); 13161da177e4SLinus Torvalds 13171da177e4SLinus Torvalds switch (mode & S_IFMT) { 13181da177e4SLinus Torvalds default: 131939f0247dSAndreas Gruenbacher inode->i_op = &shmem_special_inode_operations; 13201da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 13211da177e4SLinus Torvalds break; 13221da177e4SLinus Torvalds case S_IFREG: 132314fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 13241da177e4SLinus Torvalds inode->i_op = &shmem_inode_operations; 13251da177e4SLinus Torvalds inode->i_fop = &shmem_file_operations; 132671fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, 132771fe804bSLee Schermerhorn shmem_get_sbmpol(sbinfo)); 13281da177e4SLinus Torvalds break; 13291da177e4SLinus Torvalds case S_IFDIR: 1330d8c76e6fSDave Hansen inc_nlink(inode); 13311da177e4SLinus Torvalds /* Some things misbehave if size == 0 on a directory */ 13321da177e4SLinus Torvalds inode->i_size = 2 * BOGO_DIRENT_SIZE; 13331da177e4SLinus Torvalds inode->i_op = &shmem_dir_inode_operations; 13341da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 13351da177e4SLinus Torvalds break; 13361da177e4SLinus Torvalds case S_IFLNK: 13371da177e4SLinus Torvalds /* 13381da177e4SLinus Torvalds * Must not load anything in the rbtree, 13391da177e4SLinus Torvalds * mpol_free_shared_policy will not be called. 13401da177e4SLinus Torvalds */ 134171fe804bSLee Schermerhorn mpol_shared_policy_init(&info->policy, NULL); 13421da177e4SLinus Torvalds break; 13431da177e4SLinus Torvalds } 13445b04c689SPavel Emelyanov } else 13455b04c689SPavel Emelyanov shmem_free_inode(sb); 13461da177e4SLinus Torvalds return inode; 13471da177e4SLinus Torvalds } 13481da177e4SLinus Torvalds 13491da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 135092e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations; 135169f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations; 13521da177e4SLinus Torvalds 13536d9d88d0SJarkko Sakkinen #ifdef CONFIG_TMPFS_XATTR 13546d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 13556d9d88d0SJarkko Sakkinen #else 13566d9d88d0SJarkko Sakkinen #define shmem_initxattrs NULL 13576d9d88d0SJarkko Sakkinen #endif 13586d9d88d0SJarkko Sakkinen 13591da177e4SLinus Torvalds static int 1360800d15a5SNick Piggin shmem_write_begin(struct file *file, struct address_space *mapping, 1361800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 1362800d15a5SNick Piggin struct page **pagep, void **fsdata) 13631da177e4SLinus Torvalds { 1364800d15a5SNick Piggin struct inode *inode = mapping->host; 1365800d15a5SNick Piggin pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1366800d15a5SNick Piggin return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1367800d15a5SNick Piggin } 1368800d15a5SNick Piggin 1369800d15a5SNick Piggin static int 1370800d15a5SNick Piggin shmem_write_end(struct file *file, struct address_space *mapping, 1371800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 1372800d15a5SNick Piggin struct page *page, void *fsdata) 1373800d15a5SNick Piggin { 1374800d15a5SNick Piggin struct inode *inode = mapping->host; 1375800d15a5SNick Piggin 1376800d15a5SNick Piggin if (pos + copied > inode->i_size) 1377800d15a5SNick Piggin i_size_write(inode, pos + copied); 1378800d15a5SNick Piggin 1379ec9516fbSHugh Dickins if (!PageUptodate(page)) { 1380ec9516fbSHugh Dickins if (copied < PAGE_CACHE_SIZE) { 1381ec9516fbSHugh Dickins unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1382ec9516fbSHugh Dickins zero_user_segments(page, 0, from, 1383ec9516fbSHugh Dickins from + copied, PAGE_CACHE_SIZE); 1384ec9516fbSHugh Dickins } 1385ec9516fbSHugh Dickins SetPageUptodate(page); 1386ec9516fbSHugh Dickins } 1387d3602444SHugh Dickins set_page_dirty(page); 13886746aff7SWu Fengguang unlock_page(page); 1389d3602444SHugh Dickins page_cache_release(page); 1390d3602444SHugh Dickins 1391800d15a5SNick Piggin return copied; 13921da177e4SLinus Torvalds } 13931da177e4SLinus Torvalds 13941da177e4SLinus Torvalds static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 13951da177e4SLinus Torvalds { 1396d3ac7f89SJosef "Jeff" Sipek struct inode *inode = filp->f_path.dentry->d_inode; 13971da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 139841ffe5d5SHugh Dickins pgoff_t index; 139941ffe5d5SHugh Dickins unsigned long offset; 1400a0ee5ec5SHugh Dickins enum sgp_type sgp = SGP_READ; 1401a0ee5ec5SHugh Dickins 1402a0ee5ec5SHugh Dickins /* 1403a0ee5ec5SHugh Dickins * Might this read be for a stacking filesystem? Then when reading 1404a0ee5ec5SHugh Dickins * holes of a sparse file, we actually need to allocate those pages, 1405a0ee5ec5SHugh Dickins * and even mark them dirty, so it cannot exceed the max_blocks limit. 1406a0ee5ec5SHugh Dickins */ 1407a0ee5ec5SHugh Dickins if (segment_eq(get_fs(), KERNEL_DS)) 1408a0ee5ec5SHugh Dickins sgp = SGP_DIRTY; 14091da177e4SLinus Torvalds 14101da177e4SLinus Torvalds index = *ppos >> PAGE_CACHE_SHIFT; 14111da177e4SLinus Torvalds offset = *ppos & ~PAGE_CACHE_MASK; 14121da177e4SLinus Torvalds 14131da177e4SLinus Torvalds for (;;) { 14141da177e4SLinus Torvalds struct page *page = NULL; 141541ffe5d5SHugh Dickins pgoff_t end_index; 141641ffe5d5SHugh Dickins unsigned long nr, ret; 14171da177e4SLinus Torvalds loff_t i_size = i_size_read(inode); 14181da177e4SLinus Torvalds 14191da177e4SLinus Torvalds end_index = i_size >> PAGE_CACHE_SHIFT; 14201da177e4SLinus Torvalds if (index > end_index) 14211da177e4SLinus Torvalds break; 14221da177e4SLinus Torvalds if (index == end_index) { 14231da177e4SLinus Torvalds nr = i_size & ~PAGE_CACHE_MASK; 14241da177e4SLinus Torvalds if (nr <= offset) 14251da177e4SLinus Torvalds break; 14261da177e4SLinus Torvalds } 14271da177e4SLinus Torvalds 1428a0ee5ec5SHugh Dickins desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 14291da177e4SLinus Torvalds if (desc->error) { 14301da177e4SLinus Torvalds if (desc->error == -EINVAL) 14311da177e4SLinus Torvalds desc->error = 0; 14321da177e4SLinus Torvalds break; 14331da177e4SLinus Torvalds } 1434d3602444SHugh Dickins if (page) 1435d3602444SHugh Dickins unlock_page(page); 14361da177e4SLinus Torvalds 14371da177e4SLinus Torvalds /* 14381da177e4SLinus Torvalds * We must evaluate after, since reads (unlike writes) 14391b1dcc1bSJes Sorensen * are called without i_mutex protection against truncate 14401da177e4SLinus Torvalds */ 14411da177e4SLinus Torvalds nr = PAGE_CACHE_SIZE; 14421da177e4SLinus Torvalds i_size = i_size_read(inode); 14431da177e4SLinus Torvalds end_index = i_size >> PAGE_CACHE_SHIFT; 14441da177e4SLinus Torvalds if (index == end_index) { 14451da177e4SLinus Torvalds nr = i_size & ~PAGE_CACHE_MASK; 14461da177e4SLinus Torvalds if (nr <= offset) { 14471da177e4SLinus Torvalds if (page) 14481da177e4SLinus Torvalds page_cache_release(page); 14491da177e4SLinus Torvalds break; 14501da177e4SLinus Torvalds } 14511da177e4SLinus Torvalds } 14521da177e4SLinus Torvalds nr -= offset; 14531da177e4SLinus Torvalds 14541da177e4SLinus Torvalds if (page) { 14551da177e4SLinus Torvalds /* 14561da177e4SLinus Torvalds * If users can be writing to this page using arbitrary 14571da177e4SLinus Torvalds * virtual addresses, take care about potential aliasing 14581da177e4SLinus Torvalds * before reading the page on the kernel side. 14591da177e4SLinus Torvalds */ 14601da177e4SLinus Torvalds if (mapping_writably_mapped(mapping)) 14611da177e4SLinus Torvalds flush_dcache_page(page); 14621da177e4SLinus Torvalds /* 14631da177e4SLinus Torvalds * Mark the page accessed if we read the beginning. 14641da177e4SLinus Torvalds */ 14651da177e4SLinus Torvalds if (!offset) 14661da177e4SLinus Torvalds mark_page_accessed(page); 1467b5810039SNick Piggin } else { 14681da177e4SLinus Torvalds page = ZERO_PAGE(0); 1469b5810039SNick Piggin page_cache_get(page); 1470b5810039SNick Piggin } 14711da177e4SLinus Torvalds 14721da177e4SLinus Torvalds /* 14731da177e4SLinus Torvalds * Ok, we have the page, and it's up-to-date, so 14741da177e4SLinus Torvalds * now we can copy it to user space... 14751da177e4SLinus Torvalds * 14761da177e4SLinus Torvalds * The actor routine returns how many bytes were actually used.. 14771da177e4SLinus Torvalds * NOTE! This may not be the same as how much of a user buffer 14781da177e4SLinus Torvalds * we filled up (we may be padding etc), so we can only update 14791da177e4SLinus Torvalds * "pos" here (the actor routine has to update the user buffer 14801da177e4SLinus Torvalds * pointers and the remaining count). 14811da177e4SLinus Torvalds */ 14821da177e4SLinus Torvalds ret = actor(desc, page, offset, nr); 14831da177e4SLinus Torvalds offset += ret; 14841da177e4SLinus Torvalds index += offset >> PAGE_CACHE_SHIFT; 14851da177e4SLinus Torvalds offset &= ~PAGE_CACHE_MASK; 14861da177e4SLinus Torvalds 14871da177e4SLinus Torvalds page_cache_release(page); 14881da177e4SLinus Torvalds if (ret != nr || !desc->count) 14891da177e4SLinus Torvalds break; 14901da177e4SLinus Torvalds 14911da177e4SLinus Torvalds cond_resched(); 14921da177e4SLinus Torvalds } 14931da177e4SLinus Torvalds 14941da177e4SLinus Torvalds *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 14951da177e4SLinus Torvalds file_accessed(filp); 14961da177e4SLinus Torvalds } 14971da177e4SLinus Torvalds 1498bcd78e49SHugh Dickins static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1499bcd78e49SHugh Dickins const struct iovec *iov, unsigned long nr_segs, loff_t pos) 15001da177e4SLinus Torvalds { 1501bcd78e49SHugh Dickins struct file *filp = iocb->ki_filp; 1502bcd78e49SHugh Dickins ssize_t retval; 1503bcd78e49SHugh Dickins unsigned long seg; 1504bcd78e49SHugh Dickins size_t count; 1505bcd78e49SHugh Dickins loff_t *ppos = &iocb->ki_pos; 1506bcd78e49SHugh Dickins 1507bcd78e49SHugh Dickins retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1508bcd78e49SHugh Dickins if (retval) 1509bcd78e49SHugh Dickins return retval; 1510bcd78e49SHugh Dickins 1511bcd78e49SHugh Dickins for (seg = 0; seg < nr_segs; seg++) { 15121da177e4SLinus Torvalds read_descriptor_t desc; 15131da177e4SLinus Torvalds 15141da177e4SLinus Torvalds desc.written = 0; 1515bcd78e49SHugh Dickins desc.arg.buf = iov[seg].iov_base; 1516bcd78e49SHugh Dickins desc.count = iov[seg].iov_len; 1517bcd78e49SHugh Dickins if (desc.count == 0) 1518bcd78e49SHugh Dickins continue; 15191da177e4SLinus Torvalds desc.error = 0; 15201da177e4SLinus Torvalds do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1521bcd78e49SHugh Dickins retval += desc.written; 1522bcd78e49SHugh Dickins if (desc.error) { 1523bcd78e49SHugh Dickins retval = retval ?: desc.error; 1524bcd78e49SHugh Dickins break; 1525bcd78e49SHugh Dickins } 1526bcd78e49SHugh Dickins if (desc.count > 0) 1527bcd78e49SHugh Dickins break; 1528bcd78e49SHugh Dickins } 1529bcd78e49SHugh Dickins return retval; 15301da177e4SLinus Torvalds } 15311da177e4SLinus Torvalds 1532708e3508SHugh Dickins static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1533708e3508SHugh Dickins struct pipe_inode_info *pipe, size_t len, 1534708e3508SHugh Dickins unsigned int flags) 1535708e3508SHugh Dickins { 1536708e3508SHugh Dickins struct address_space *mapping = in->f_mapping; 153771f0e07aSHugh Dickins struct inode *inode = mapping->host; 1538708e3508SHugh Dickins unsigned int loff, nr_pages, req_pages; 1539708e3508SHugh Dickins struct page *pages[PIPE_DEF_BUFFERS]; 1540708e3508SHugh Dickins struct partial_page partial[PIPE_DEF_BUFFERS]; 1541708e3508SHugh Dickins struct page *page; 1542708e3508SHugh Dickins pgoff_t index, end_index; 1543708e3508SHugh Dickins loff_t isize, left; 1544708e3508SHugh Dickins int error, page_nr; 1545708e3508SHugh Dickins struct splice_pipe_desc spd = { 1546708e3508SHugh Dickins .pages = pages, 1547708e3508SHugh Dickins .partial = partial, 1548708e3508SHugh Dickins .flags = flags, 1549708e3508SHugh Dickins .ops = &page_cache_pipe_buf_ops, 1550708e3508SHugh Dickins .spd_release = spd_release_page, 1551708e3508SHugh Dickins }; 1552708e3508SHugh Dickins 155371f0e07aSHugh Dickins isize = i_size_read(inode); 1554708e3508SHugh Dickins if (unlikely(*ppos >= isize)) 1555708e3508SHugh Dickins return 0; 1556708e3508SHugh Dickins 1557708e3508SHugh Dickins left = isize - *ppos; 1558708e3508SHugh Dickins if (unlikely(left < len)) 1559708e3508SHugh Dickins len = left; 1560708e3508SHugh Dickins 1561708e3508SHugh Dickins if (splice_grow_spd(pipe, &spd)) 1562708e3508SHugh Dickins return -ENOMEM; 1563708e3508SHugh Dickins 1564708e3508SHugh Dickins index = *ppos >> PAGE_CACHE_SHIFT; 1565708e3508SHugh Dickins loff = *ppos & ~PAGE_CACHE_MASK; 1566708e3508SHugh Dickins req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1567708e3508SHugh Dickins nr_pages = min(req_pages, pipe->buffers); 1568708e3508SHugh Dickins 1569708e3508SHugh Dickins spd.nr_pages = find_get_pages_contig(mapping, index, 1570708e3508SHugh Dickins nr_pages, spd.pages); 1571708e3508SHugh Dickins index += spd.nr_pages; 1572708e3508SHugh Dickins error = 0; 157371f0e07aSHugh Dickins 1574708e3508SHugh Dickins while (spd.nr_pages < nr_pages) { 157571f0e07aSHugh Dickins error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 157671f0e07aSHugh Dickins if (error) 1577708e3508SHugh Dickins break; 1578708e3508SHugh Dickins unlock_page(page); 1579708e3508SHugh Dickins spd.pages[spd.nr_pages++] = page; 1580708e3508SHugh Dickins index++; 1581708e3508SHugh Dickins } 1582708e3508SHugh Dickins 1583708e3508SHugh Dickins index = *ppos >> PAGE_CACHE_SHIFT; 1584708e3508SHugh Dickins nr_pages = spd.nr_pages; 1585708e3508SHugh Dickins spd.nr_pages = 0; 158671f0e07aSHugh Dickins 1587708e3508SHugh Dickins for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1588708e3508SHugh Dickins unsigned int this_len; 1589708e3508SHugh Dickins 1590708e3508SHugh Dickins if (!len) 1591708e3508SHugh Dickins break; 1592708e3508SHugh Dickins 1593708e3508SHugh Dickins this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1594708e3508SHugh Dickins page = spd.pages[page_nr]; 1595708e3508SHugh Dickins 159671f0e07aSHugh Dickins if (!PageUptodate(page) || page->mapping != mapping) { 159771f0e07aSHugh Dickins error = shmem_getpage(inode, index, &page, 159871f0e07aSHugh Dickins SGP_CACHE, NULL); 159971f0e07aSHugh Dickins if (error) 1600708e3508SHugh Dickins break; 160171f0e07aSHugh Dickins unlock_page(page); 1602708e3508SHugh Dickins page_cache_release(spd.pages[page_nr]); 1603708e3508SHugh Dickins spd.pages[page_nr] = page; 1604708e3508SHugh Dickins } 1605708e3508SHugh Dickins 160671f0e07aSHugh Dickins isize = i_size_read(inode); 1607708e3508SHugh Dickins end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1608708e3508SHugh Dickins if (unlikely(!isize || index > end_index)) 1609708e3508SHugh Dickins break; 1610708e3508SHugh Dickins 1611708e3508SHugh Dickins if (end_index == index) { 1612708e3508SHugh Dickins unsigned int plen; 1613708e3508SHugh Dickins 1614708e3508SHugh Dickins plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1615708e3508SHugh Dickins if (plen <= loff) 1616708e3508SHugh Dickins break; 1617708e3508SHugh Dickins 1618708e3508SHugh Dickins this_len = min(this_len, plen - loff); 1619708e3508SHugh Dickins len = this_len; 1620708e3508SHugh Dickins } 1621708e3508SHugh Dickins 1622708e3508SHugh Dickins spd.partial[page_nr].offset = loff; 1623708e3508SHugh Dickins spd.partial[page_nr].len = this_len; 1624708e3508SHugh Dickins len -= this_len; 1625708e3508SHugh Dickins loff = 0; 1626708e3508SHugh Dickins spd.nr_pages++; 1627708e3508SHugh Dickins index++; 1628708e3508SHugh Dickins } 1629708e3508SHugh Dickins 1630708e3508SHugh Dickins while (page_nr < nr_pages) 1631708e3508SHugh Dickins page_cache_release(spd.pages[page_nr++]); 1632708e3508SHugh Dickins 1633708e3508SHugh Dickins if (spd.nr_pages) 1634708e3508SHugh Dickins error = splice_to_pipe(pipe, &spd); 1635708e3508SHugh Dickins 1636708e3508SHugh Dickins splice_shrink_spd(pipe, &spd); 1637708e3508SHugh Dickins 1638708e3508SHugh Dickins if (error > 0) { 1639708e3508SHugh Dickins *ppos += error; 1640708e3508SHugh Dickins file_accessed(in); 1641708e3508SHugh Dickins } 1642708e3508SHugh Dickins return error; 1643708e3508SHugh Dickins } 1644708e3508SHugh Dickins 164583e4fa9cSHugh Dickins static long shmem_fallocate(struct file *file, int mode, loff_t offset, 164683e4fa9cSHugh Dickins loff_t len) 164783e4fa9cSHugh Dickins { 164883e4fa9cSHugh Dickins struct inode *inode = file->f_path.dentry->d_inode; 1649e2d12e22SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1650e2d12e22SHugh Dickins pgoff_t start, index, end; 1651e2d12e22SHugh Dickins int error; 165283e4fa9cSHugh Dickins 165383e4fa9cSHugh Dickins mutex_lock(&inode->i_mutex); 165483e4fa9cSHugh Dickins 165583e4fa9cSHugh Dickins if (mode & FALLOC_FL_PUNCH_HOLE) { 165683e4fa9cSHugh Dickins struct address_space *mapping = file->f_mapping; 165783e4fa9cSHugh Dickins loff_t unmap_start = round_up(offset, PAGE_SIZE); 165883e4fa9cSHugh Dickins loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 165983e4fa9cSHugh Dickins 166083e4fa9cSHugh Dickins if ((u64)unmap_end > (u64)unmap_start) 166183e4fa9cSHugh Dickins unmap_mapping_range(mapping, unmap_start, 166283e4fa9cSHugh Dickins 1 + unmap_end - unmap_start, 0); 166383e4fa9cSHugh Dickins shmem_truncate_range(inode, offset, offset + len - 1); 166483e4fa9cSHugh Dickins /* No need to unmap again: hole-punching leaves COWed pages */ 166583e4fa9cSHugh Dickins error = 0; 1666e2d12e22SHugh Dickins goto out; 166783e4fa9cSHugh Dickins } 166883e4fa9cSHugh Dickins 1669e2d12e22SHugh Dickins /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1670e2d12e22SHugh Dickins error = inode_newsize_ok(inode, offset + len); 1671e2d12e22SHugh Dickins if (error) 1672e2d12e22SHugh Dickins goto out; 1673e2d12e22SHugh Dickins 1674e2d12e22SHugh Dickins start = offset >> PAGE_CACHE_SHIFT; 1675e2d12e22SHugh Dickins end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1676e2d12e22SHugh Dickins /* Try to avoid a swapstorm if len is impossible to satisfy */ 1677e2d12e22SHugh Dickins if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1678e2d12e22SHugh Dickins error = -ENOSPC; 1679e2d12e22SHugh Dickins goto out; 1680e2d12e22SHugh Dickins } 1681e2d12e22SHugh Dickins 1682e2d12e22SHugh Dickins for (index = start; index < end; index++) { 1683e2d12e22SHugh Dickins struct page *page; 1684e2d12e22SHugh Dickins 1685e2d12e22SHugh Dickins /* 1686e2d12e22SHugh Dickins * Good, the fallocate(2) manpage permits EINTR: we may have 1687e2d12e22SHugh Dickins * been interrupted because we are using up too much memory. 1688e2d12e22SHugh Dickins */ 1689e2d12e22SHugh Dickins if (signal_pending(current)) 1690e2d12e22SHugh Dickins error = -EINTR; 1691e2d12e22SHugh Dickins else 1692*1635f6a7SHugh Dickins error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1693e2d12e22SHugh Dickins NULL); 1694e2d12e22SHugh Dickins if (error) { 1695*1635f6a7SHugh Dickins /* Remove the !PageUptodate pages we added */ 1696*1635f6a7SHugh Dickins shmem_undo_range(inode, 1697*1635f6a7SHugh Dickins (loff_t)start << PAGE_CACHE_SHIFT, 1698*1635f6a7SHugh Dickins (loff_t)index << PAGE_CACHE_SHIFT, true); 1699e2d12e22SHugh Dickins goto ctime; 1700e2d12e22SHugh Dickins } 1701e2d12e22SHugh Dickins 1702e2d12e22SHugh Dickins /* 1703*1635f6a7SHugh Dickins * If !PageUptodate, leave it that way so that freeable pages 1704*1635f6a7SHugh Dickins * can be recognized if we need to rollback on error later. 1705*1635f6a7SHugh Dickins * But set_page_dirty so that memory pressure will swap rather 1706e2d12e22SHugh Dickins * than free the pages we are allocating (and SGP_CACHE pages 1707e2d12e22SHugh Dickins * might still be clean: we now need to mark those dirty too). 1708e2d12e22SHugh Dickins */ 1709e2d12e22SHugh Dickins set_page_dirty(page); 1710e2d12e22SHugh Dickins unlock_page(page); 1711e2d12e22SHugh Dickins page_cache_release(page); 1712e2d12e22SHugh Dickins cond_resched(); 1713e2d12e22SHugh Dickins } 1714e2d12e22SHugh Dickins 1715e2d12e22SHugh Dickins if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1716e2d12e22SHugh Dickins i_size_write(inode, offset + len); 1717e2d12e22SHugh Dickins ctime: 1718e2d12e22SHugh Dickins inode->i_ctime = CURRENT_TIME; 1719e2d12e22SHugh Dickins out: 172083e4fa9cSHugh Dickins mutex_unlock(&inode->i_mutex); 172183e4fa9cSHugh Dickins return error; 172283e4fa9cSHugh Dickins } 172383e4fa9cSHugh Dickins 1724726c3342SDavid Howells static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 17251da177e4SLinus Torvalds { 1726726c3342SDavid Howells struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 17271da177e4SLinus Torvalds 17281da177e4SLinus Torvalds buf->f_type = TMPFS_MAGIC; 17291da177e4SLinus Torvalds buf->f_bsize = PAGE_CACHE_SIZE; 17301da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 17310edd73b3SHugh Dickins if (sbinfo->max_blocks) { 17321da177e4SLinus Torvalds buf->f_blocks = sbinfo->max_blocks; 173341ffe5d5SHugh Dickins buf->f_bavail = 173441ffe5d5SHugh Dickins buf->f_bfree = sbinfo->max_blocks - 173541ffe5d5SHugh Dickins percpu_counter_sum(&sbinfo->used_blocks); 17360edd73b3SHugh Dickins } 17370edd73b3SHugh Dickins if (sbinfo->max_inodes) { 17381da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 17391da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 17401da177e4SLinus Torvalds } 17411da177e4SLinus Torvalds /* else leave those fields 0 like simple_statfs */ 17421da177e4SLinus Torvalds return 0; 17431da177e4SLinus Torvalds } 17441da177e4SLinus Torvalds 17451da177e4SLinus Torvalds /* 17461da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 17471da177e4SLinus Torvalds */ 17481da177e4SLinus Torvalds static int 17491a67aafbSAl Viro shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 17501da177e4SLinus Torvalds { 17510b0a0806SHugh Dickins struct inode *inode; 17521da177e4SLinus Torvalds int error = -ENOSPC; 17531da177e4SLinus Torvalds 1754454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 17551da177e4SLinus Torvalds if (inode) { 17562a7dba39SEric Paris error = security_inode_init_security(inode, dir, 17579d8f13baSMimi Zohar &dentry->d_name, 17586d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 1759570bc1c2SStephen Smalley if (error) { 1760570bc1c2SStephen Smalley if (error != -EOPNOTSUPP) { 1761570bc1c2SStephen Smalley iput(inode); 1762570bc1c2SStephen Smalley return error; 1763570bc1c2SStephen Smalley } 176439f0247dSAndreas Gruenbacher } 17651c7c474cSChristoph Hellwig #ifdef CONFIG_TMPFS_POSIX_ACL 17661c7c474cSChristoph Hellwig error = generic_acl_init(inode, dir); 176739f0247dSAndreas Gruenbacher if (error) { 176839f0247dSAndreas Gruenbacher iput(inode); 176939f0247dSAndreas Gruenbacher return error; 1770570bc1c2SStephen Smalley } 1771718deb6bSAl Viro #else 1772718deb6bSAl Viro error = 0; 17731c7c474cSChristoph Hellwig #endif 17741da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 17751da177e4SLinus Torvalds dir->i_ctime = dir->i_mtime = CURRENT_TIME; 17761da177e4SLinus Torvalds d_instantiate(dentry, inode); 17771da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 17781da177e4SLinus Torvalds } 17791da177e4SLinus Torvalds return error; 17801da177e4SLinus Torvalds } 17811da177e4SLinus Torvalds 178218bb1db3SAl Viro static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 17831da177e4SLinus Torvalds { 17841da177e4SLinus Torvalds int error; 17851da177e4SLinus Torvalds 17861da177e4SLinus Torvalds if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 17871da177e4SLinus Torvalds return error; 1788d8c76e6fSDave Hansen inc_nlink(dir); 17891da177e4SLinus Torvalds return 0; 17901da177e4SLinus Torvalds } 17911da177e4SLinus Torvalds 17924acdaf27SAl Viro static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 17931da177e4SLinus Torvalds struct nameidata *nd) 17941da177e4SLinus Torvalds { 17951da177e4SLinus Torvalds return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 17961da177e4SLinus Torvalds } 17971da177e4SLinus Torvalds 17981da177e4SLinus Torvalds /* 17991da177e4SLinus Torvalds * Link a file.. 18001da177e4SLinus Torvalds */ 18011da177e4SLinus Torvalds static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 18021da177e4SLinus Torvalds { 18031da177e4SLinus Torvalds struct inode *inode = old_dentry->d_inode; 18045b04c689SPavel Emelyanov int ret; 18051da177e4SLinus Torvalds 18061da177e4SLinus Torvalds /* 18071da177e4SLinus Torvalds * No ordinary (disk based) filesystem counts links as inodes; 18081da177e4SLinus Torvalds * but each new link needs a new dentry, pinning lowmem, and 18091da177e4SLinus Torvalds * tmpfs dentries cannot be pruned until they are unlinked. 18101da177e4SLinus Torvalds */ 18115b04c689SPavel Emelyanov ret = shmem_reserve_inode(inode->i_sb); 18125b04c689SPavel Emelyanov if (ret) 18135b04c689SPavel Emelyanov goto out; 18141da177e4SLinus Torvalds 18151da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 18161da177e4SLinus Torvalds inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1817d8c76e6fSDave Hansen inc_nlink(inode); 18187de9c6eeSAl Viro ihold(inode); /* New dentry reference */ 18191da177e4SLinus Torvalds dget(dentry); /* Extra pinning count for the created dentry */ 18201da177e4SLinus Torvalds d_instantiate(dentry, inode); 18215b04c689SPavel Emelyanov out: 18225b04c689SPavel Emelyanov return ret; 18231da177e4SLinus Torvalds } 18241da177e4SLinus Torvalds 18251da177e4SLinus Torvalds static int shmem_unlink(struct inode *dir, struct dentry *dentry) 18261da177e4SLinus Torvalds { 18271da177e4SLinus Torvalds struct inode *inode = dentry->d_inode; 18281da177e4SLinus Torvalds 18295b04c689SPavel Emelyanov if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 18305b04c689SPavel Emelyanov shmem_free_inode(inode->i_sb); 18311da177e4SLinus Torvalds 18321da177e4SLinus Torvalds dir->i_size -= BOGO_DIRENT_SIZE; 18331da177e4SLinus Torvalds inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 18349a53c3a7SDave Hansen drop_nlink(inode); 18351da177e4SLinus Torvalds dput(dentry); /* Undo the count from "create" - this does all the work */ 18361da177e4SLinus Torvalds return 0; 18371da177e4SLinus Torvalds } 18381da177e4SLinus Torvalds 18391da177e4SLinus Torvalds static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 18401da177e4SLinus Torvalds { 18411da177e4SLinus Torvalds if (!simple_empty(dentry)) 18421da177e4SLinus Torvalds return -ENOTEMPTY; 18431da177e4SLinus Torvalds 18449a53c3a7SDave Hansen drop_nlink(dentry->d_inode); 18459a53c3a7SDave Hansen drop_nlink(dir); 18461da177e4SLinus Torvalds return shmem_unlink(dir, dentry); 18471da177e4SLinus Torvalds } 18481da177e4SLinus Torvalds 18491da177e4SLinus Torvalds /* 18501da177e4SLinus Torvalds * The VFS layer already does all the dentry stuff for rename, 18511da177e4SLinus Torvalds * we just have to decrement the usage count for the target if 18521da177e4SLinus Torvalds * it exists so that the VFS layer correctly free's it when it 18531da177e4SLinus Torvalds * gets overwritten. 18541da177e4SLinus Torvalds */ 18551da177e4SLinus Torvalds static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 18561da177e4SLinus Torvalds { 18571da177e4SLinus Torvalds struct inode *inode = old_dentry->d_inode; 18581da177e4SLinus Torvalds int they_are_dirs = S_ISDIR(inode->i_mode); 18591da177e4SLinus Torvalds 18601da177e4SLinus Torvalds if (!simple_empty(new_dentry)) 18611da177e4SLinus Torvalds return -ENOTEMPTY; 18621da177e4SLinus Torvalds 18631da177e4SLinus Torvalds if (new_dentry->d_inode) { 18641da177e4SLinus Torvalds (void) shmem_unlink(new_dir, new_dentry); 18651da177e4SLinus Torvalds if (they_are_dirs) 18669a53c3a7SDave Hansen drop_nlink(old_dir); 18671da177e4SLinus Torvalds } else if (they_are_dirs) { 18689a53c3a7SDave Hansen drop_nlink(old_dir); 1869d8c76e6fSDave Hansen inc_nlink(new_dir); 18701da177e4SLinus Torvalds } 18711da177e4SLinus Torvalds 18721da177e4SLinus Torvalds old_dir->i_size -= BOGO_DIRENT_SIZE; 18731da177e4SLinus Torvalds new_dir->i_size += BOGO_DIRENT_SIZE; 18741da177e4SLinus Torvalds old_dir->i_ctime = old_dir->i_mtime = 18751da177e4SLinus Torvalds new_dir->i_ctime = new_dir->i_mtime = 18761da177e4SLinus Torvalds inode->i_ctime = CURRENT_TIME; 18771da177e4SLinus Torvalds return 0; 18781da177e4SLinus Torvalds } 18791da177e4SLinus Torvalds 18801da177e4SLinus Torvalds static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 18811da177e4SLinus Torvalds { 18821da177e4SLinus Torvalds int error; 18831da177e4SLinus Torvalds int len; 18841da177e4SLinus Torvalds struct inode *inode; 18859276aad6SHugh Dickins struct page *page; 18861da177e4SLinus Torvalds char *kaddr; 18871da177e4SLinus Torvalds struct shmem_inode_info *info; 18881da177e4SLinus Torvalds 18891da177e4SLinus Torvalds len = strlen(symname) + 1; 18901da177e4SLinus Torvalds if (len > PAGE_CACHE_SIZE) 18911da177e4SLinus Torvalds return -ENAMETOOLONG; 18921da177e4SLinus Torvalds 1893454abafeSDmitry Monakhov inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 18941da177e4SLinus Torvalds if (!inode) 18951da177e4SLinus Torvalds return -ENOSPC; 18961da177e4SLinus Torvalds 18979d8f13baSMimi Zohar error = security_inode_init_security(inode, dir, &dentry->d_name, 18986d9d88d0SJarkko Sakkinen shmem_initxattrs, NULL); 1899570bc1c2SStephen Smalley if (error) { 1900570bc1c2SStephen Smalley if (error != -EOPNOTSUPP) { 1901570bc1c2SStephen Smalley iput(inode); 1902570bc1c2SStephen Smalley return error; 1903570bc1c2SStephen Smalley } 1904570bc1c2SStephen Smalley error = 0; 1905570bc1c2SStephen Smalley } 1906570bc1c2SStephen Smalley 19071da177e4SLinus Torvalds info = SHMEM_I(inode); 19081da177e4SLinus Torvalds inode->i_size = len-1; 190969f07ec9SHugh Dickins if (len <= SHORT_SYMLINK_LEN) { 191069f07ec9SHugh Dickins info->symlink = kmemdup(symname, len, GFP_KERNEL); 191169f07ec9SHugh Dickins if (!info->symlink) { 191269f07ec9SHugh Dickins iput(inode); 191369f07ec9SHugh Dickins return -ENOMEM; 191469f07ec9SHugh Dickins } 191569f07ec9SHugh Dickins inode->i_op = &shmem_short_symlink_operations; 19161da177e4SLinus Torvalds } else { 19171da177e4SLinus Torvalds error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 19181da177e4SLinus Torvalds if (error) { 19191da177e4SLinus Torvalds iput(inode); 19201da177e4SLinus Torvalds return error; 19211da177e4SLinus Torvalds } 192214fcc23fSHugh Dickins inode->i_mapping->a_ops = &shmem_aops; 19231da177e4SLinus Torvalds inode->i_op = &shmem_symlink_inode_operations; 19249b04c5feSCong Wang kaddr = kmap_atomic(page); 19251da177e4SLinus Torvalds memcpy(kaddr, symname, len); 19269b04c5feSCong Wang kunmap_atomic(kaddr); 1927ec9516fbSHugh Dickins SetPageUptodate(page); 19281da177e4SLinus Torvalds set_page_dirty(page); 19296746aff7SWu Fengguang unlock_page(page); 19301da177e4SLinus Torvalds page_cache_release(page); 19311da177e4SLinus Torvalds } 19321da177e4SLinus Torvalds dir->i_size += BOGO_DIRENT_SIZE; 19331da177e4SLinus Torvalds dir->i_ctime = dir->i_mtime = CURRENT_TIME; 19341da177e4SLinus Torvalds d_instantiate(dentry, inode); 19351da177e4SLinus Torvalds dget(dentry); 19361da177e4SLinus Torvalds return 0; 19371da177e4SLinus Torvalds } 19381da177e4SLinus Torvalds 193969f07ec9SHugh Dickins static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 19401da177e4SLinus Torvalds { 194169f07ec9SHugh Dickins nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 1942cc314eefSLinus Torvalds return NULL; 19431da177e4SLinus Torvalds } 19441da177e4SLinus Torvalds 1945cc314eefSLinus Torvalds static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 19461da177e4SLinus Torvalds { 19471da177e4SLinus Torvalds struct page *page = NULL; 194841ffe5d5SHugh Dickins int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 194941ffe5d5SHugh Dickins nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 1950d3602444SHugh Dickins if (page) 1951d3602444SHugh Dickins unlock_page(page); 1952cc314eefSLinus Torvalds return page; 19531da177e4SLinus Torvalds } 19541da177e4SLinus Torvalds 1955cc314eefSLinus Torvalds static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 19561da177e4SLinus Torvalds { 19571da177e4SLinus Torvalds if (!IS_ERR(nd_get_link(nd))) { 1958cc314eefSLinus Torvalds struct page *page = cookie; 19591da177e4SLinus Torvalds kunmap(page); 19601da177e4SLinus Torvalds mark_page_accessed(page); 19611da177e4SLinus Torvalds page_cache_release(page); 19621da177e4SLinus Torvalds } 19631da177e4SLinus Torvalds } 19641da177e4SLinus Torvalds 1965b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 1966b09e0fa4SEric Paris /* 1967b09e0fa4SEric Paris * Superblocks without xattr inode operations may get some security.* xattr 1968b09e0fa4SEric Paris * support from the LSM "for free". As soon as we have any other xattrs 1969b09e0fa4SEric Paris * like ACLs, we also need to implement the security.* handlers at 1970b09e0fa4SEric Paris * filesystem level, though. 1971b09e0fa4SEric Paris */ 1972b09e0fa4SEric Paris 19736d9d88d0SJarkko Sakkinen /* 19746d9d88d0SJarkko Sakkinen * Allocate new xattr and copy in the value; but leave the name to callers. 19756d9d88d0SJarkko Sakkinen */ 19766d9d88d0SJarkko Sakkinen static struct shmem_xattr *shmem_xattr_alloc(const void *value, size_t size) 19776d9d88d0SJarkko Sakkinen { 19786d9d88d0SJarkko Sakkinen struct shmem_xattr *new_xattr; 19796d9d88d0SJarkko Sakkinen size_t len; 19806d9d88d0SJarkko Sakkinen 19816d9d88d0SJarkko Sakkinen /* wrap around? */ 19826d9d88d0SJarkko Sakkinen len = sizeof(*new_xattr) + size; 19836d9d88d0SJarkko Sakkinen if (len <= sizeof(*new_xattr)) 19846d9d88d0SJarkko Sakkinen return NULL; 19856d9d88d0SJarkko Sakkinen 19866d9d88d0SJarkko Sakkinen new_xattr = kmalloc(len, GFP_KERNEL); 19876d9d88d0SJarkko Sakkinen if (!new_xattr) 19886d9d88d0SJarkko Sakkinen return NULL; 19896d9d88d0SJarkko Sakkinen 19906d9d88d0SJarkko Sakkinen new_xattr->size = size; 19916d9d88d0SJarkko Sakkinen memcpy(new_xattr->value, value, size); 19926d9d88d0SJarkko Sakkinen return new_xattr; 19936d9d88d0SJarkko Sakkinen } 19946d9d88d0SJarkko Sakkinen 19956d9d88d0SJarkko Sakkinen /* 19966d9d88d0SJarkko Sakkinen * Callback for security_inode_init_security() for acquiring xattrs. 19976d9d88d0SJarkko Sakkinen */ 19986d9d88d0SJarkko Sakkinen static int shmem_initxattrs(struct inode *inode, 19996d9d88d0SJarkko Sakkinen const struct xattr *xattr_array, 20006d9d88d0SJarkko Sakkinen void *fs_info) 20016d9d88d0SJarkko Sakkinen { 20026d9d88d0SJarkko Sakkinen struct shmem_inode_info *info = SHMEM_I(inode); 20036d9d88d0SJarkko Sakkinen const struct xattr *xattr; 20046d9d88d0SJarkko Sakkinen struct shmem_xattr *new_xattr; 20056d9d88d0SJarkko Sakkinen size_t len; 20066d9d88d0SJarkko Sakkinen 20076d9d88d0SJarkko Sakkinen for (xattr = xattr_array; xattr->name != NULL; xattr++) { 20086d9d88d0SJarkko Sakkinen new_xattr = shmem_xattr_alloc(xattr->value, xattr->value_len); 20096d9d88d0SJarkko Sakkinen if (!new_xattr) 20106d9d88d0SJarkko Sakkinen return -ENOMEM; 20116d9d88d0SJarkko Sakkinen 20126d9d88d0SJarkko Sakkinen len = strlen(xattr->name) + 1; 20136d9d88d0SJarkko Sakkinen new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 20146d9d88d0SJarkko Sakkinen GFP_KERNEL); 20156d9d88d0SJarkko Sakkinen if (!new_xattr->name) { 20166d9d88d0SJarkko Sakkinen kfree(new_xattr); 20176d9d88d0SJarkko Sakkinen return -ENOMEM; 20186d9d88d0SJarkko Sakkinen } 20196d9d88d0SJarkko Sakkinen 20206d9d88d0SJarkko Sakkinen memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 20216d9d88d0SJarkko Sakkinen XATTR_SECURITY_PREFIX_LEN); 20226d9d88d0SJarkko Sakkinen memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 20236d9d88d0SJarkko Sakkinen xattr->name, len); 20246d9d88d0SJarkko Sakkinen 20256d9d88d0SJarkko Sakkinen spin_lock(&info->lock); 20266d9d88d0SJarkko Sakkinen list_add(&new_xattr->list, &info->xattr_list); 20276d9d88d0SJarkko Sakkinen spin_unlock(&info->lock); 20286d9d88d0SJarkko Sakkinen } 20296d9d88d0SJarkko Sakkinen 20306d9d88d0SJarkko Sakkinen return 0; 20316d9d88d0SJarkko Sakkinen } 20326d9d88d0SJarkko Sakkinen 2033b09e0fa4SEric Paris static int shmem_xattr_get(struct dentry *dentry, const char *name, 2034b09e0fa4SEric Paris void *buffer, size_t size) 2035b09e0fa4SEric Paris { 2036b09e0fa4SEric Paris struct shmem_inode_info *info; 2037b09e0fa4SEric Paris struct shmem_xattr *xattr; 2038b09e0fa4SEric Paris int ret = -ENODATA; 2039b09e0fa4SEric Paris 2040b09e0fa4SEric Paris info = SHMEM_I(dentry->d_inode); 2041b09e0fa4SEric Paris 2042b09e0fa4SEric Paris spin_lock(&info->lock); 2043b09e0fa4SEric Paris list_for_each_entry(xattr, &info->xattr_list, list) { 2044b09e0fa4SEric Paris if (strcmp(name, xattr->name)) 2045b09e0fa4SEric Paris continue; 2046b09e0fa4SEric Paris 2047b09e0fa4SEric Paris ret = xattr->size; 2048b09e0fa4SEric Paris if (buffer) { 2049b09e0fa4SEric Paris if (size < xattr->size) 2050b09e0fa4SEric Paris ret = -ERANGE; 2051b09e0fa4SEric Paris else 2052b09e0fa4SEric Paris memcpy(buffer, xattr->value, xattr->size); 2053b09e0fa4SEric Paris } 2054b09e0fa4SEric Paris break; 2055b09e0fa4SEric Paris } 2056b09e0fa4SEric Paris spin_unlock(&info->lock); 2057b09e0fa4SEric Paris return ret; 2058b09e0fa4SEric Paris } 2059b09e0fa4SEric Paris 20606d9d88d0SJarkko Sakkinen static int shmem_xattr_set(struct inode *inode, const char *name, 2061b09e0fa4SEric Paris const void *value, size_t size, int flags) 2062b09e0fa4SEric Paris { 2063b09e0fa4SEric Paris struct shmem_inode_info *info = SHMEM_I(inode); 2064b09e0fa4SEric Paris struct shmem_xattr *xattr; 2065b09e0fa4SEric Paris struct shmem_xattr *new_xattr = NULL; 2066b09e0fa4SEric Paris int err = 0; 2067b09e0fa4SEric Paris 2068b09e0fa4SEric Paris /* value == NULL means remove */ 2069b09e0fa4SEric Paris if (value) { 20706d9d88d0SJarkko Sakkinen new_xattr = shmem_xattr_alloc(value, size); 2071b09e0fa4SEric Paris if (!new_xattr) 2072b09e0fa4SEric Paris return -ENOMEM; 2073b09e0fa4SEric Paris 2074b09e0fa4SEric Paris new_xattr->name = kstrdup(name, GFP_KERNEL); 2075b09e0fa4SEric Paris if (!new_xattr->name) { 2076b09e0fa4SEric Paris kfree(new_xattr); 2077b09e0fa4SEric Paris return -ENOMEM; 2078b09e0fa4SEric Paris } 2079b09e0fa4SEric Paris } 2080b09e0fa4SEric Paris 2081b09e0fa4SEric Paris spin_lock(&info->lock); 2082b09e0fa4SEric Paris list_for_each_entry(xattr, &info->xattr_list, list) { 2083b09e0fa4SEric Paris if (!strcmp(name, xattr->name)) { 2084b09e0fa4SEric Paris if (flags & XATTR_CREATE) { 2085b09e0fa4SEric Paris xattr = new_xattr; 2086b09e0fa4SEric Paris err = -EEXIST; 2087b09e0fa4SEric Paris } else if (new_xattr) { 2088b09e0fa4SEric Paris list_replace(&xattr->list, &new_xattr->list); 2089b09e0fa4SEric Paris } else { 2090b09e0fa4SEric Paris list_del(&xattr->list); 2091b09e0fa4SEric Paris } 2092b09e0fa4SEric Paris goto out; 2093b09e0fa4SEric Paris } 2094b09e0fa4SEric Paris } 2095b09e0fa4SEric Paris if (flags & XATTR_REPLACE) { 2096b09e0fa4SEric Paris xattr = new_xattr; 2097b09e0fa4SEric Paris err = -ENODATA; 2098b09e0fa4SEric Paris } else { 2099b09e0fa4SEric Paris list_add(&new_xattr->list, &info->xattr_list); 2100b09e0fa4SEric Paris xattr = NULL; 2101b09e0fa4SEric Paris } 2102b09e0fa4SEric Paris out: 2103b09e0fa4SEric Paris spin_unlock(&info->lock); 2104b09e0fa4SEric Paris if (xattr) 2105b09e0fa4SEric Paris kfree(xattr->name); 2106b09e0fa4SEric Paris kfree(xattr); 2107b09e0fa4SEric Paris return err; 2108b09e0fa4SEric Paris } 2109b09e0fa4SEric Paris 2110b09e0fa4SEric Paris static const struct xattr_handler *shmem_xattr_handlers[] = { 2111b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 2112b09e0fa4SEric Paris &generic_acl_access_handler, 2113b09e0fa4SEric Paris &generic_acl_default_handler, 2114b09e0fa4SEric Paris #endif 2115b09e0fa4SEric Paris NULL 2116b09e0fa4SEric Paris }; 2117b09e0fa4SEric Paris 2118b09e0fa4SEric Paris static int shmem_xattr_validate(const char *name) 2119b09e0fa4SEric Paris { 2120b09e0fa4SEric Paris struct { const char *prefix; size_t len; } arr[] = { 2121b09e0fa4SEric Paris { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2122b09e0fa4SEric Paris { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2123b09e0fa4SEric Paris }; 2124b09e0fa4SEric Paris int i; 2125b09e0fa4SEric Paris 2126b09e0fa4SEric Paris for (i = 0; i < ARRAY_SIZE(arr); i++) { 2127b09e0fa4SEric Paris size_t preflen = arr[i].len; 2128b09e0fa4SEric Paris if (strncmp(name, arr[i].prefix, preflen) == 0) { 2129b09e0fa4SEric Paris if (!name[preflen]) 2130b09e0fa4SEric Paris return -EINVAL; 2131b09e0fa4SEric Paris return 0; 2132b09e0fa4SEric Paris } 2133b09e0fa4SEric Paris } 2134b09e0fa4SEric Paris return -EOPNOTSUPP; 2135b09e0fa4SEric Paris } 2136b09e0fa4SEric Paris 2137b09e0fa4SEric Paris static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2138b09e0fa4SEric Paris void *buffer, size_t size) 2139b09e0fa4SEric Paris { 2140b09e0fa4SEric Paris int err; 2141b09e0fa4SEric Paris 2142b09e0fa4SEric Paris /* 2143b09e0fa4SEric Paris * If this is a request for a synthetic attribute in the system.* 2144b09e0fa4SEric Paris * namespace use the generic infrastructure to resolve a handler 2145b09e0fa4SEric Paris * for it via sb->s_xattr. 2146b09e0fa4SEric Paris */ 2147b09e0fa4SEric Paris if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2148b09e0fa4SEric Paris return generic_getxattr(dentry, name, buffer, size); 2149b09e0fa4SEric Paris 2150b09e0fa4SEric Paris err = shmem_xattr_validate(name); 2151b09e0fa4SEric Paris if (err) 2152b09e0fa4SEric Paris return err; 2153b09e0fa4SEric Paris 2154b09e0fa4SEric Paris return shmem_xattr_get(dentry, name, buffer, size); 2155b09e0fa4SEric Paris } 2156b09e0fa4SEric Paris 2157b09e0fa4SEric Paris static int shmem_setxattr(struct dentry *dentry, const char *name, 2158b09e0fa4SEric Paris const void *value, size_t size, int flags) 2159b09e0fa4SEric Paris { 2160b09e0fa4SEric Paris int err; 2161b09e0fa4SEric Paris 2162b09e0fa4SEric Paris /* 2163b09e0fa4SEric Paris * If this is a request for a synthetic attribute in the system.* 2164b09e0fa4SEric Paris * namespace use the generic infrastructure to resolve a handler 2165b09e0fa4SEric Paris * for it via sb->s_xattr. 2166b09e0fa4SEric Paris */ 2167b09e0fa4SEric Paris if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2168b09e0fa4SEric Paris return generic_setxattr(dentry, name, value, size, flags); 2169b09e0fa4SEric Paris 2170b09e0fa4SEric Paris err = shmem_xattr_validate(name); 2171b09e0fa4SEric Paris if (err) 2172b09e0fa4SEric Paris return err; 2173b09e0fa4SEric Paris 2174b09e0fa4SEric Paris if (size == 0) 2175b09e0fa4SEric Paris value = ""; /* empty EA, do not remove */ 2176b09e0fa4SEric Paris 21776d9d88d0SJarkko Sakkinen return shmem_xattr_set(dentry->d_inode, name, value, size, flags); 2178b09e0fa4SEric Paris 2179b09e0fa4SEric Paris } 2180b09e0fa4SEric Paris 2181b09e0fa4SEric Paris static int shmem_removexattr(struct dentry *dentry, const char *name) 2182b09e0fa4SEric Paris { 2183b09e0fa4SEric Paris int err; 2184b09e0fa4SEric Paris 2185b09e0fa4SEric Paris /* 2186b09e0fa4SEric Paris * If this is a request for a synthetic attribute in the system.* 2187b09e0fa4SEric Paris * namespace use the generic infrastructure to resolve a handler 2188b09e0fa4SEric Paris * for it via sb->s_xattr. 2189b09e0fa4SEric Paris */ 2190b09e0fa4SEric Paris if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2191b09e0fa4SEric Paris return generic_removexattr(dentry, name); 2192b09e0fa4SEric Paris 2193b09e0fa4SEric Paris err = shmem_xattr_validate(name); 2194b09e0fa4SEric Paris if (err) 2195b09e0fa4SEric Paris return err; 2196b09e0fa4SEric Paris 21976d9d88d0SJarkko Sakkinen return shmem_xattr_set(dentry->d_inode, name, NULL, 0, XATTR_REPLACE); 2198b09e0fa4SEric Paris } 2199b09e0fa4SEric Paris 2200b09e0fa4SEric Paris static bool xattr_is_trusted(const char *name) 2201b09e0fa4SEric Paris { 2202b09e0fa4SEric Paris return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); 2203b09e0fa4SEric Paris } 2204b09e0fa4SEric Paris 2205b09e0fa4SEric Paris static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2206b09e0fa4SEric Paris { 2207b09e0fa4SEric Paris bool trusted = capable(CAP_SYS_ADMIN); 2208b09e0fa4SEric Paris struct shmem_xattr *xattr; 2209b09e0fa4SEric Paris struct shmem_inode_info *info; 2210b09e0fa4SEric Paris size_t used = 0; 2211b09e0fa4SEric Paris 2212b09e0fa4SEric Paris info = SHMEM_I(dentry->d_inode); 2213b09e0fa4SEric Paris 2214b09e0fa4SEric Paris spin_lock(&info->lock); 2215b09e0fa4SEric Paris list_for_each_entry(xattr, &info->xattr_list, list) { 2216b09e0fa4SEric Paris size_t len; 2217b09e0fa4SEric Paris 2218b09e0fa4SEric Paris /* skip "trusted." attributes for unprivileged callers */ 2219b09e0fa4SEric Paris if (!trusted && xattr_is_trusted(xattr->name)) 2220b09e0fa4SEric Paris continue; 2221b09e0fa4SEric Paris 2222b09e0fa4SEric Paris len = strlen(xattr->name) + 1; 2223b09e0fa4SEric Paris used += len; 2224b09e0fa4SEric Paris if (buffer) { 2225b09e0fa4SEric Paris if (size < used) { 2226b09e0fa4SEric Paris used = -ERANGE; 2227b09e0fa4SEric Paris break; 2228b09e0fa4SEric Paris } 2229b09e0fa4SEric Paris memcpy(buffer, xattr->name, len); 2230b09e0fa4SEric Paris buffer += len; 2231b09e0fa4SEric Paris } 2232b09e0fa4SEric Paris } 2233b09e0fa4SEric Paris spin_unlock(&info->lock); 2234b09e0fa4SEric Paris 2235b09e0fa4SEric Paris return used; 2236b09e0fa4SEric Paris } 2237b09e0fa4SEric Paris #endif /* CONFIG_TMPFS_XATTR */ 2238b09e0fa4SEric Paris 223969f07ec9SHugh Dickins static const struct inode_operations shmem_short_symlink_operations = { 22401da177e4SLinus Torvalds .readlink = generic_readlink, 224169f07ec9SHugh Dickins .follow_link = shmem_follow_short_symlink, 2242b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 2243b09e0fa4SEric Paris .setxattr = shmem_setxattr, 2244b09e0fa4SEric Paris .getxattr = shmem_getxattr, 2245b09e0fa4SEric Paris .listxattr = shmem_listxattr, 2246b09e0fa4SEric Paris .removexattr = shmem_removexattr, 2247b09e0fa4SEric Paris #endif 22481da177e4SLinus Torvalds }; 22491da177e4SLinus Torvalds 225092e1d5beSArjan van de Ven static const struct inode_operations shmem_symlink_inode_operations = { 22511da177e4SLinus Torvalds .readlink = generic_readlink, 22521da177e4SLinus Torvalds .follow_link = shmem_follow_link, 22531da177e4SLinus Torvalds .put_link = shmem_put_link, 2254b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 2255b09e0fa4SEric Paris .setxattr = shmem_setxattr, 2256b09e0fa4SEric Paris .getxattr = shmem_getxattr, 2257b09e0fa4SEric Paris .listxattr = shmem_listxattr, 2258b09e0fa4SEric Paris .removexattr = shmem_removexattr, 225939f0247dSAndreas Gruenbacher #endif 2260b09e0fa4SEric Paris }; 226139f0247dSAndreas Gruenbacher 226291828a40SDavid M. Grimes static struct dentry *shmem_get_parent(struct dentry *child) 226391828a40SDavid M. Grimes { 226491828a40SDavid M. Grimes return ERR_PTR(-ESTALE); 226591828a40SDavid M. Grimes } 226691828a40SDavid M. Grimes 226791828a40SDavid M. Grimes static int shmem_match(struct inode *ino, void *vfh) 226891828a40SDavid M. Grimes { 226991828a40SDavid M. Grimes __u32 *fh = vfh; 227091828a40SDavid M. Grimes __u64 inum = fh[2]; 227191828a40SDavid M. Grimes inum = (inum << 32) | fh[1]; 227291828a40SDavid M. Grimes return ino->i_ino == inum && fh[0] == ino->i_generation; 227391828a40SDavid M. Grimes } 227491828a40SDavid M. Grimes 2275480b116cSChristoph Hellwig static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2276480b116cSChristoph Hellwig struct fid *fid, int fh_len, int fh_type) 227791828a40SDavid M. Grimes { 227891828a40SDavid M. Grimes struct inode *inode; 2279480b116cSChristoph Hellwig struct dentry *dentry = NULL; 2280480b116cSChristoph Hellwig u64 inum = fid->raw[2]; 2281480b116cSChristoph Hellwig inum = (inum << 32) | fid->raw[1]; 228291828a40SDavid M. Grimes 2283480b116cSChristoph Hellwig if (fh_len < 3) 2284480b116cSChristoph Hellwig return NULL; 2285480b116cSChristoph Hellwig 2286480b116cSChristoph Hellwig inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2287480b116cSChristoph Hellwig shmem_match, fid->raw); 228891828a40SDavid M. Grimes if (inode) { 2289480b116cSChristoph Hellwig dentry = d_find_alias(inode); 229091828a40SDavid M. Grimes iput(inode); 229191828a40SDavid M. Grimes } 229291828a40SDavid M. Grimes 2293480b116cSChristoph Hellwig return dentry; 229491828a40SDavid M. Grimes } 229591828a40SDavid M. Grimes 229691828a40SDavid M. Grimes static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 229791828a40SDavid M. Grimes int connectable) 229891828a40SDavid M. Grimes { 229991828a40SDavid M. Grimes struct inode *inode = dentry->d_inode; 230091828a40SDavid M. Grimes 23015fe0c237SAneesh Kumar K.V if (*len < 3) { 23025fe0c237SAneesh Kumar K.V *len = 3; 230391828a40SDavid M. Grimes return 255; 23045fe0c237SAneesh Kumar K.V } 230591828a40SDavid M. Grimes 23061d3382cbSAl Viro if (inode_unhashed(inode)) { 230791828a40SDavid M. Grimes /* Unfortunately insert_inode_hash is not idempotent, 230891828a40SDavid M. Grimes * so as we hash inodes here rather than at creation 230991828a40SDavid M. Grimes * time, we need a lock to ensure we only try 231091828a40SDavid M. Grimes * to do it once 231191828a40SDavid M. Grimes */ 231291828a40SDavid M. Grimes static DEFINE_SPINLOCK(lock); 231391828a40SDavid M. Grimes spin_lock(&lock); 23141d3382cbSAl Viro if (inode_unhashed(inode)) 231591828a40SDavid M. Grimes __insert_inode_hash(inode, 231691828a40SDavid M. Grimes inode->i_ino + inode->i_generation); 231791828a40SDavid M. Grimes spin_unlock(&lock); 231891828a40SDavid M. Grimes } 231991828a40SDavid M. Grimes 232091828a40SDavid M. Grimes fh[0] = inode->i_generation; 232191828a40SDavid M. Grimes fh[1] = inode->i_ino; 232291828a40SDavid M. Grimes fh[2] = ((__u64)inode->i_ino) >> 32; 232391828a40SDavid M. Grimes 232491828a40SDavid M. Grimes *len = 3; 232591828a40SDavid M. Grimes return 1; 232691828a40SDavid M. Grimes } 232791828a40SDavid M. Grimes 232839655164SChristoph Hellwig static const struct export_operations shmem_export_ops = { 232991828a40SDavid M. Grimes .get_parent = shmem_get_parent, 233091828a40SDavid M. Grimes .encode_fh = shmem_encode_fh, 2331480b116cSChristoph Hellwig .fh_to_dentry = shmem_fh_to_dentry, 233291828a40SDavid M. Grimes }; 233391828a40SDavid M. Grimes 2334680d794bSakpm@linux-foundation.org static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2335680d794bSakpm@linux-foundation.org bool remount) 23361da177e4SLinus Torvalds { 23371da177e4SLinus Torvalds char *this_char, *value, *rest; 23388751e039SEric W. Biederman uid_t uid; 23398751e039SEric W. Biederman gid_t gid; 23401da177e4SLinus Torvalds 2341b00dc3adSHugh Dickins while (options != NULL) { 2342b00dc3adSHugh Dickins this_char = options; 2343b00dc3adSHugh Dickins for (;;) { 2344b00dc3adSHugh Dickins /* 2345b00dc3adSHugh Dickins * NUL-terminate this option: unfortunately, 2346b00dc3adSHugh Dickins * mount options form a comma-separated list, 2347b00dc3adSHugh Dickins * but mpol's nodelist may also contain commas. 2348b00dc3adSHugh Dickins */ 2349b00dc3adSHugh Dickins options = strchr(options, ','); 2350b00dc3adSHugh Dickins if (options == NULL) 2351b00dc3adSHugh Dickins break; 2352b00dc3adSHugh Dickins options++; 2353b00dc3adSHugh Dickins if (!isdigit(*options)) { 2354b00dc3adSHugh Dickins options[-1] = '\0'; 2355b00dc3adSHugh Dickins break; 2356b00dc3adSHugh Dickins } 2357b00dc3adSHugh Dickins } 23581da177e4SLinus Torvalds if (!*this_char) 23591da177e4SLinus Torvalds continue; 23601da177e4SLinus Torvalds if ((value = strchr(this_char,'=')) != NULL) { 23611da177e4SLinus Torvalds *value++ = 0; 23621da177e4SLinus Torvalds } else { 23631da177e4SLinus Torvalds printk(KERN_ERR 23641da177e4SLinus Torvalds "tmpfs: No value for mount option '%s'\n", 23651da177e4SLinus Torvalds this_char); 23661da177e4SLinus Torvalds return 1; 23671da177e4SLinus Torvalds } 23681da177e4SLinus Torvalds 23691da177e4SLinus Torvalds if (!strcmp(this_char,"size")) { 23701da177e4SLinus Torvalds unsigned long long size; 23711da177e4SLinus Torvalds size = memparse(value,&rest); 23721da177e4SLinus Torvalds if (*rest == '%') { 23731da177e4SLinus Torvalds size <<= PAGE_SHIFT; 23741da177e4SLinus Torvalds size *= totalram_pages; 23751da177e4SLinus Torvalds do_div(size, 100); 23761da177e4SLinus Torvalds rest++; 23771da177e4SLinus Torvalds } 23781da177e4SLinus Torvalds if (*rest) 23791da177e4SLinus Torvalds goto bad_val; 2380680d794bSakpm@linux-foundation.org sbinfo->max_blocks = 2381680d794bSakpm@linux-foundation.org DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 23821da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_blocks")) { 2383680d794bSakpm@linux-foundation.org sbinfo->max_blocks = memparse(value, &rest); 23841da177e4SLinus Torvalds if (*rest) 23851da177e4SLinus Torvalds goto bad_val; 23861da177e4SLinus Torvalds } else if (!strcmp(this_char,"nr_inodes")) { 2387680d794bSakpm@linux-foundation.org sbinfo->max_inodes = memparse(value, &rest); 23881da177e4SLinus Torvalds if (*rest) 23891da177e4SLinus Torvalds goto bad_val; 23901da177e4SLinus Torvalds } else if (!strcmp(this_char,"mode")) { 2391680d794bSakpm@linux-foundation.org if (remount) 23921da177e4SLinus Torvalds continue; 2393680d794bSakpm@linux-foundation.org sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 23941da177e4SLinus Torvalds if (*rest) 23951da177e4SLinus Torvalds goto bad_val; 23961da177e4SLinus Torvalds } else if (!strcmp(this_char,"uid")) { 2397680d794bSakpm@linux-foundation.org if (remount) 23981da177e4SLinus Torvalds continue; 23998751e039SEric W. Biederman uid = simple_strtoul(value, &rest, 0); 24001da177e4SLinus Torvalds if (*rest) 24011da177e4SLinus Torvalds goto bad_val; 24028751e039SEric W. Biederman sbinfo->uid = make_kuid(current_user_ns(), uid); 24038751e039SEric W. Biederman if (!uid_valid(sbinfo->uid)) 24048751e039SEric W. Biederman goto bad_val; 24051da177e4SLinus Torvalds } else if (!strcmp(this_char,"gid")) { 2406680d794bSakpm@linux-foundation.org if (remount) 24071da177e4SLinus Torvalds continue; 24088751e039SEric W. Biederman gid = simple_strtoul(value, &rest, 0); 24091da177e4SLinus Torvalds if (*rest) 24101da177e4SLinus Torvalds goto bad_val; 24118751e039SEric W. Biederman sbinfo->gid = make_kgid(current_user_ns(), gid); 24128751e039SEric W. Biederman if (!gid_valid(sbinfo->gid)) 24138751e039SEric W. Biederman goto bad_val; 24147339ff83SRobin Holt } else if (!strcmp(this_char,"mpol")) { 241571fe804bSLee Schermerhorn if (mpol_parse_str(value, &sbinfo->mpol, 1)) 24167339ff83SRobin Holt goto bad_val; 24171da177e4SLinus Torvalds } else { 24181da177e4SLinus Torvalds printk(KERN_ERR "tmpfs: Bad mount option %s\n", 24191da177e4SLinus Torvalds this_char); 24201da177e4SLinus Torvalds return 1; 24211da177e4SLinus Torvalds } 24221da177e4SLinus Torvalds } 24231da177e4SLinus Torvalds return 0; 24241da177e4SLinus Torvalds 24251da177e4SLinus Torvalds bad_val: 24261da177e4SLinus Torvalds printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 24271da177e4SLinus Torvalds value, this_char); 24281da177e4SLinus Torvalds return 1; 24291da177e4SLinus Torvalds 24301da177e4SLinus Torvalds } 24311da177e4SLinus Torvalds 24321da177e4SLinus Torvalds static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 24331da177e4SLinus Torvalds { 24341da177e4SLinus Torvalds struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2435680d794bSakpm@linux-foundation.org struct shmem_sb_info config = *sbinfo; 24360edd73b3SHugh Dickins unsigned long inodes; 24370edd73b3SHugh Dickins int error = -EINVAL; 24381da177e4SLinus Torvalds 2439680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, &config, true)) 24400edd73b3SHugh Dickins return error; 24410edd73b3SHugh Dickins 24420edd73b3SHugh Dickins spin_lock(&sbinfo->stat_lock); 24430edd73b3SHugh Dickins inodes = sbinfo->max_inodes - sbinfo->free_inodes; 24447e496299STim Chen if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 24450edd73b3SHugh Dickins goto out; 2446680d794bSakpm@linux-foundation.org if (config.max_inodes < inodes) 24470edd73b3SHugh Dickins goto out; 24480edd73b3SHugh Dickins /* 244954af6042SHugh Dickins * Those tests disallow limited->unlimited while any are in use; 24500edd73b3SHugh Dickins * but we must separately disallow unlimited->limited, because 24510edd73b3SHugh Dickins * in that case we have no record of how much is already in use. 24520edd73b3SHugh Dickins */ 2453680d794bSakpm@linux-foundation.org if (config.max_blocks && !sbinfo->max_blocks) 24540edd73b3SHugh Dickins goto out; 2455680d794bSakpm@linux-foundation.org if (config.max_inodes && !sbinfo->max_inodes) 24560edd73b3SHugh Dickins goto out; 24570edd73b3SHugh Dickins 24580edd73b3SHugh Dickins error = 0; 2459680d794bSakpm@linux-foundation.org sbinfo->max_blocks = config.max_blocks; 2460680d794bSakpm@linux-foundation.org sbinfo->max_inodes = config.max_inodes; 2461680d794bSakpm@linux-foundation.org sbinfo->free_inodes = config.max_inodes - inodes; 246271fe804bSLee Schermerhorn 246371fe804bSLee Schermerhorn mpol_put(sbinfo->mpol); 246471fe804bSLee Schermerhorn sbinfo->mpol = config.mpol; /* transfers initial ref */ 24650edd73b3SHugh Dickins out: 24660edd73b3SHugh Dickins spin_unlock(&sbinfo->stat_lock); 24670edd73b3SHugh Dickins return error; 24681da177e4SLinus Torvalds } 2469680d794bSakpm@linux-foundation.org 247034c80b1dSAl Viro static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2471680d794bSakpm@linux-foundation.org { 247234c80b1dSAl Viro struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2473680d794bSakpm@linux-foundation.org 2474680d794bSakpm@linux-foundation.org if (sbinfo->max_blocks != shmem_default_max_blocks()) 2475680d794bSakpm@linux-foundation.org seq_printf(seq, ",size=%luk", 2476680d794bSakpm@linux-foundation.org sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2477680d794bSakpm@linux-foundation.org if (sbinfo->max_inodes != shmem_default_max_inodes()) 2478680d794bSakpm@linux-foundation.org seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2479680d794bSakpm@linux-foundation.org if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 248009208d15SAl Viro seq_printf(seq, ",mode=%03ho", sbinfo->mode); 24818751e039SEric W. Biederman if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 24828751e039SEric W. Biederman seq_printf(seq, ",uid=%u", 24838751e039SEric W. Biederman from_kuid_munged(&init_user_ns, sbinfo->uid)); 24848751e039SEric W. Biederman if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 24858751e039SEric W. Biederman seq_printf(seq, ",gid=%u", 24868751e039SEric W. Biederman from_kgid_munged(&init_user_ns, sbinfo->gid)); 248771fe804bSLee Schermerhorn shmem_show_mpol(seq, sbinfo->mpol); 2488680d794bSakpm@linux-foundation.org return 0; 2489680d794bSakpm@linux-foundation.org } 2490680d794bSakpm@linux-foundation.org #endif /* CONFIG_TMPFS */ 24911da177e4SLinus Torvalds 24921da177e4SLinus Torvalds static void shmem_put_super(struct super_block *sb) 24931da177e4SLinus Torvalds { 2494602586a8SHugh Dickins struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2495602586a8SHugh Dickins 2496602586a8SHugh Dickins percpu_counter_destroy(&sbinfo->used_blocks); 2497602586a8SHugh Dickins kfree(sbinfo); 24981da177e4SLinus Torvalds sb->s_fs_info = NULL; 24991da177e4SLinus Torvalds } 25001da177e4SLinus Torvalds 25012b2af54aSKay Sievers int shmem_fill_super(struct super_block *sb, void *data, int silent) 25021da177e4SLinus Torvalds { 25031da177e4SLinus Torvalds struct inode *inode; 25040edd73b3SHugh Dickins struct shmem_sb_info *sbinfo; 2505680d794bSakpm@linux-foundation.org int err = -ENOMEM; 2506680d794bSakpm@linux-foundation.org 2507680d794bSakpm@linux-foundation.org /* Round up to L1_CACHE_BYTES to resist false sharing */ 2508425fbf04SPekka Enberg sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2509680d794bSakpm@linux-foundation.org L1_CACHE_BYTES), GFP_KERNEL); 2510680d794bSakpm@linux-foundation.org if (!sbinfo) 2511680d794bSakpm@linux-foundation.org return -ENOMEM; 2512680d794bSakpm@linux-foundation.org 2513680d794bSakpm@linux-foundation.org sbinfo->mode = S_IRWXUGO | S_ISVTX; 251476aac0e9SDavid Howells sbinfo->uid = current_fsuid(); 251576aac0e9SDavid Howells sbinfo->gid = current_fsgid(); 2516680d794bSakpm@linux-foundation.org sb->s_fs_info = sbinfo; 25171da177e4SLinus Torvalds 25180edd73b3SHugh Dickins #ifdef CONFIG_TMPFS 25191da177e4SLinus Torvalds /* 25201da177e4SLinus Torvalds * Per default we only allow half of the physical ram per 25211da177e4SLinus Torvalds * tmpfs instance, limiting inodes to one per page of lowmem; 25221da177e4SLinus Torvalds * but the internal instance is left unlimited. 25231da177e4SLinus Torvalds */ 25241da177e4SLinus Torvalds if (!(sb->s_flags & MS_NOUSER)) { 2525680d794bSakpm@linux-foundation.org sbinfo->max_blocks = shmem_default_max_blocks(); 2526680d794bSakpm@linux-foundation.org sbinfo->max_inodes = shmem_default_max_inodes(); 2527680d794bSakpm@linux-foundation.org if (shmem_parse_options(data, sbinfo, false)) { 2528680d794bSakpm@linux-foundation.org err = -EINVAL; 2529680d794bSakpm@linux-foundation.org goto failed; 2530680d794bSakpm@linux-foundation.org } 25311da177e4SLinus Torvalds } 253291828a40SDavid M. Grimes sb->s_export_op = &shmem_export_ops; 25332f6e38f3SHugh Dickins sb->s_flags |= MS_NOSEC; 25340edd73b3SHugh Dickins #else 25350edd73b3SHugh Dickins sb->s_flags |= MS_NOUSER; 25360edd73b3SHugh Dickins #endif 25371da177e4SLinus Torvalds 25381da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 2539602586a8SHugh Dickins if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2540602586a8SHugh Dickins goto failed; 2541680d794bSakpm@linux-foundation.org sbinfo->free_inodes = sbinfo->max_inodes; 25421da177e4SLinus Torvalds 2543285b2c4fSHugh Dickins sb->s_maxbytes = MAX_LFS_FILESIZE; 25441da177e4SLinus Torvalds sb->s_blocksize = PAGE_CACHE_SIZE; 25451da177e4SLinus Torvalds sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 25461da177e4SLinus Torvalds sb->s_magic = TMPFS_MAGIC; 25471da177e4SLinus Torvalds sb->s_op = &shmem_ops; 2548cfd95a9cSRobin H. Johnson sb->s_time_gran = 1; 2549b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 255039f0247dSAndreas Gruenbacher sb->s_xattr = shmem_xattr_handlers; 2551b09e0fa4SEric Paris #endif 2552b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_POSIX_ACL 255339f0247dSAndreas Gruenbacher sb->s_flags |= MS_POSIXACL; 255439f0247dSAndreas Gruenbacher #endif 25550edd73b3SHugh Dickins 2556454abafeSDmitry Monakhov inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 25571da177e4SLinus Torvalds if (!inode) 25581da177e4SLinus Torvalds goto failed; 2559680d794bSakpm@linux-foundation.org inode->i_uid = sbinfo->uid; 2560680d794bSakpm@linux-foundation.org inode->i_gid = sbinfo->gid; 2561318ceed0SAl Viro sb->s_root = d_make_root(inode); 2562318ceed0SAl Viro if (!sb->s_root) 256348fde701SAl Viro goto failed; 25641da177e4SLinus Torvalds return 0; 25651da177e4SLinus Torvalds 25661da177e4SLinus Torvalds failed: 25671da177e4SLinus Torvalds shmem_put_super(sb); 25681da177e4SLinus Torvalds return err; 25691da177e4SLinus Torvalds } 25701da177e4SLinus Torvalds 2571fcc234f8SPekka Enberg static struct kmem_cache *shmem_inode_cachep; 25721da177e4SLinus Torvalds 25731da177e4SLinus Torvalds static struct inode *shmem_alloc_inode(struct super_block *sb) 25741da177e4SLinus Torvalds { 257541ffe5d5SHugh Dickins struct shmem_inode_info *info; 257641ffe5d5SHugh Dickins info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 257741ffe5d5SHugh Dickins if (!info) 25781da177e4SLinus Torvalds return NULL; 257941ffe5d5SHugh Dickins return &info->vfs_inode; 25801da177e4SLinus Torvalds } 25811da177e4SLinus Torvalds 258241ffe5d5SHugh Dickins static void shmem_destroy_callback(struct rcu_head *head) 2583fa0d7e3dSNick Piggin { 2584fa0d7e3dSNick Piggin struct inode *inode = container_of(head, struct inode, i_rcu); 2585fa0d7e3dSNick Piggin kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2586fa0d7e3dSNick Piggin } 2587fa0d7e3dSNick Piggin 25881da177e4SLinus Torvalds static void shmem_destroy_inode(struct inode *inode) 25891da177e4SLinus Torvalds { 259009208d15SAl Viro if (S_ISREG(inode->i_mode)) 25911da177e4SLinus Torvalds mpol_free_shared_policy(&SHMEM_I(inode)->policy); 259241ffe5d5SHugh Dickins call_rcu(&inode->i_rcu, shmem_destroy_callback); 25931da177e4SLinus Torvalds } 25941da177e4SLinus Torvalds 259541ffe5d5SHugh Dickins static void shmem_init_inode(void *foo) 25961da177e4SLinus Torvalds { 259741ffe5d5SHugh Dickins struct shmem_inode_info *info = foo; 259841ffe5d5SHugh Dickins inode_init_once(&info->vfs_inode); 25991da177e4SLinus Torvalds } 26001da177e4SLinus Torvalds 260141ffe5d5SHugh Dickins static int shmem_init_inodecache(void) 26021da177e4SLinus Torvalds { 26031da177e4SLinus Torvalds shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 26041da177e4SLinus Torvalds sizeof(struct shmem_inode_info), 260541ffe5d5SHugh Dickins 0, SLAB_PANIC, shmem_init_inode); 26061da177e4SLinus Torvalds return 0; 26071da177e4SLinus Torvalds } 26081da177e4SLinus Torvalds 260941ffe5d5SHugh Dickins static void shmem_destroy_inodecache(void) 26101da177e4SLinus Torvalds { 26111a1d92c1SAlexey Dobriyan kmem_cache_destroy(shmem_inode_cachep); 26121da177e4SLinus Torvalds } 26131da177e4SLinus Torvalds 2614f5e54d6eSChristoph Hellwig static const struct address_space_operations shmem_aops = { 26151da177e4SLinus Torvalds .writepage = shmem_writepage, 261676719325SKen Chen .set_page_dirty = __set_page_dirty_no_writeback, 26171da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 2618800d15a5SNick Piggin .write_begin = shmem_write_begin, 2619800d15a5SNick Piggin .write_end = shmem_write_end, 26201da177e4SLinus Torvalds #endif 2621304dbdb7SLee Schermerhorn .migratepage = migrate_page, 2622aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 26231da177e4SLinus Torvalds }; 26241da177e4SLinus Torvalds 262515ad7cdcSHelge Deller static const struct file_operations shmem_file_operations = { 26261da177e4SLinus Torvalds .mmap = shmem_mmap, 26271da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 26281da177e4SLinus Torvalds .llseek = generic_file_llseek, 2629bcd78e49SHugh Dickins .read = do_sync_read, 26305402b976SHugh Dickins .write = do_sync_write, 2631bcd78e49SHugh Dickins .aio_read = shmem_file_aio_read, 26325402b976SHugh Dickins .aio_write = generic_file_aio_write, 26331b061d92SChristoph Hellwig .fsync = noop_fsync, 2634708e3508SHugh Dickins .splice_read = shmem_file_splice_read, 2635ae976416SHugh Dickins .splice_write = generic_file_splice_write, 263683e4fa9cSHugh Dickins .fallocate = shmem_fallocate, 26371da177e4SLinus Torvalds #endif 26381da177e4SLinus Torvalds }; 26391da177e4SLinus Torvalds 264092e1d5beSArjan van de Ven static const struct inode_operations shmem_inode_operations = { 264194c1e62dSHugh Dickins .setattr = shmem_setattr, 2642b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 2643b09e0fa4SEric Paris .setxattr = shmem_setxattr, 2644b09e0fa4SEric Paris .getxattr = shmem_getxattr, 2645b09e0fa4SEric Paris .listxattr = shmem_listxattr, 2646b09e0fa4SEric Paris .removexattr = shmem_removexattr, 2647b09e0fa4SEric Paris #endif 26481da177e4SLinus Torvalds }; 26491da177e4SLinus Torvalds 265092e1d5beSArjan van de Ven static const struct inode_operations shmem_dir_inode_operations = { 26511da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 26521da177e4SLinus Torvalds .create = shmem_create, 26531da177e4SLinus Torvalds .lookup = simple_lookup, 26541da177e4SLinus Torvalds .link = shmem_link, 26551da177e4SLinus Torvalds .unlink = shmem_unlink, 26561da177e4SLinus Torvalds .symlink = shmem_symlink, 26571da177e4SLinus Torvalds .mkdir = shmem_mkdir, 26581da177e4SLinus Torvalds .rmdir = shmem_rmdir, 26591da177e4SLinus Torvalds .mknod = shmem_mknod, 26601da177e4SLinus Torvalds .rename = shmem_rename, 26611da177e4SLinus Torvalds #endif 2662b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 2663b09e0fa4SEric Paris .setxattr = shmem_setxattr, 2664b09e0fa4SEric Paris .getxattr = shmem_getxattr, 2665b09e0fa4SEric Paris .listxattr = shmem_listxattr, 2666b09e0fa4SEric Paris .removexattr = shmem_removexattr, 2667b09e0fa4SEric Paris #endif 266839f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 266994c1e62dSHugh Dickins .setattr = shmem_setattr, 267039f0247dSAndreas Gruenbacher #endif 267139f0247dSAndreas Gruenbacher }; 267239f0247dSAndreas Gruenbacher 267392e1d5beSArjan van de Ven static const struct inode_operations shmem_special_inode_operations = { 2674b09e0fa4SEric Paris #ifdef CONFIG_TMPFS_XATTR 2675b09e0fa4SEric Paris .setxattr = shmem_setxattr, 2676b09e0fa4SEric Paris .getxattr = shmem_getxattr, 2677b09e0fa4SEric Paris .listxattr = shmem_listxattr, 2678b09e0fa4SEric Paris .removexattr = shmem_removexattr, 2679b09e0fa4SEric Paris #endif 268039f0247dSAndreas Gruenbacher #ifdef CONFIG_TMPFS_POSIX_ACL 268194c1e62dSHugh Dickins .setattr = shmem_setattr, 268239f0247dSAndreas Gruenbacher #endif 26831da177e4SLinus Torvalds }; 26841da177e4SLinus Torvalds 2685759b9775SHugh Dickins static const struct super_operations shmem_ops = { 26861da177e4SLinus Torvalds .alloc_inode = shmem_alloc_inode, 26871da177e4SLinus Torvalds .destroy_inode = shmem_destroy_inode, 26881da177e4SLinus Torvalds #ifdef CONFIG_TMPFS 26891da177e4SLinus Torvalds .statfs = shmem_statfs, 26901da177e4SLinus Torvalds .remount_fs = shmem_remount_fs, 2691680d794bSakpm@linux-foundation.org .show_options = shmem_show_options, 26921da177e4SLinus Torvalds #endif 26931f895f75SAl Viro .evict_inode = shmem_evict_inode, 26941da177e4SLinus Torvalds .drop_inode = generic_delete_inode, 26951da177e4SLinus Torvalds .put_super = shmem_put_super, 26961da177e4SLinus Torvalds }; 26971da177e4SLinus Torvalds 2698f0f37e2fSAlexey Dobriyan static const struct vm_operations_struct shmem_vm_ops = { 269954cb8821SNick Piggin .fault = shmem_fault, 27001da177e4SLinus Torvalds #ifdef CONFIG_NUMA 27011da177e4SLinus Torvalds .set_policy = shmem_set_policy, 27021da177e4SLinus Torvalds .get_policy = shmem_get_policy, 27031da177e4SLinus Torvalds #endif 27041da177e4SLinus Torvalds }; 27051da177e4SLinus Torvalds 27063c26ff6eSAl Viro static struct dentry *shmem_mount(struct file_system_type *fs_type, 27073c26ff6eSAl Viro int flags, const char *dev_name, void *data) 27081da177e4SLinus Torvalds { 27093c26ff6eSAl Viro return mount_nodev(fs_type, flags, data, shmem_fill_super); 27101da177e4SLinus Torvalds } 27111da177e4SLinus Torvalds 271241ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 27131da177e4SLinus Torvalds .owner = THIS_MODULE, 27141da177e4SLinus Torvalds .name = "tmpfs", 27153c26ff6eSAl Viro .mount = shmem_mount, 27161da177e4SLinus Torvalds .kill_sb = kill_litter_super, 27171da177e4SLinus Torvalds }; 27181da177e4SLinus Torvalds 271941ffe5d5SHugh Dickins int __init shmem_init(void) 27201da177e4SLinus Torvalds { 27211da177e4SLinus Torvalds int error; 27221da177e4SLinus Torvalds 2723e0bf68ddSPeter Zijlstra error = bdi_init(&shmem_backing_dev_info); 2724e0bf68ddSPeter Zijlstra if (error) 2725e0bf68ddSPeter Zijlstra goto out4; 2726e0bf68ddSPeter Zijlstra 272741ffe5d5SHugh Dickins error = shmem_init_inodecache(); 27281da177e4SLinus Torvalds if (error) 27291da177e4SLinus Torvalds goto out3; 27301da177e4SLinus Torvalds 273141ffe5d5SHugh Dickins error = register_filesystem(&shmem_fs_type); 27321da177e4SLinus Torvalds if (error) { 27331da177e4SLinus Torvalds printk(KERN_ERR "Could not register tmpfs\n"); 27341da177e4SLinus Torvalds goto out2; 27351da177e4SLinus Torvalds } 273695dc112aSGreg Kroah-Hartman 273741ffe5d5SHugh Dickins shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, 273841ffe5d5SHugh Dickins shmem_fs_type.name, NULL); 27391da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) { 27401da177e4SLinus Torvalds error = PTR_ERR(shm_mnt); 27411da177e4SLinus Torvalds printk(KERN_ERR "Could not kern_mount tmpfs\n"); 27421da177e4SLinus Torvalds goto out1; 27431da177e4SLinus Torvalds } 27441da177e4SLinus Torvalds return 0; 27451da177e4SLinus Torvalds 27461da177e4SLinus Torvalds out1: 274741ffe5d5SHugh Dickins unregister_filesystem(&shmem_fs_type); 27481da177e4SLinus Torvalds out2: 274941ffe5d5SHugh Dickins shmem_destroy_inodecache(); 27501da177e4SLinus Torvalds out3: 2751e0bf68ddSPeter Zijlstra bdi_destroy(&shmem_backing_dev_info); 2752e0bf68ddSPeter Zijlstra out4: 27531da177e4SLinus Torvalds shm_mnt = ERR_PTR(error); 27541da177e4SLinus Torvalds return error; 27551da177e4SLinus Torvalds } 2756853ac43aSMatt Mackall 2757853ac43aSMatt Mackall #else /* !CONFIG_SHMEM */ 2758853ac43aSMatt Mackall 2759853ac43aSMatt Mackall /* 2760853ac43aSMatt Mackall * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2761853ac43aSMatt Mackall * 2762853ac43aSMatt Mackall * This is intended for small system where the benefits of the full 2763853ac43aSMatt Mackall * shmem code (swap-backed and resource-limited) are outweighed by 2764853ac43aSMatt Mackall * their complexity. On systems without swap this code should be 2765853ac43aSMatt Mackall * effectively equivalent, but much lighter weight. 2766853ac43aSMatt Mackall */ 2767853ac43aSMatt Mackall 2768853ac43aSMatt Mackall #include <linux/ramfs.h> 2769853ac43aSMatt Mackall 277041ffe5d5SHugh Dickins static struct file_system_type shmem_fs_type = { 2771853ac43aSMatt Mackall .name = "tmpfs", 27723c26ff6eSAl Viro .mount = ramfs_mount, 2773853ac43aSMatt Mackall .kill_sb = kill_litter_super, 2774853ac43aSMatt Mackall }; 2775853ac43aSMatt Mackall 277641ffe5d5SHugh Dickins int __init shmem_init(void) 2777853ac43aSMatt Mackall { 277841ffe5d5SHugh Dickins BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2779853ac43aSMatt Mackall 278041ffe5d5SHugh Dickins shm_mnt = kern_mount(&shmem_fs_type); 2781853ac43aSMatt Mackall BUG_ON(IS_ERR(shm_mnt)); 2782853ac43aSMatt Mackall 2783853ac43aSMatt Mackall return 0; 2784853ac43aSMatt Mackall } 2785853ac43aSMatt Mackall 278641ffe5d5SHugh Dickins int shmem_unuse(swp_entry_t swap, struct page *page) 2787853ac43aSMatt Mackall { 2788853ac43aSMatt Mackall return 0; 2789853ac43aSMatt Mackall } 2790853ac43aSMatt Mackall 27913f96b79aSHugh Dickins int shmem_lock(struct file *file, int lock, struct user_struct *user) 27923f96b79aSHugh Dickins { 27933f96b79aSHugh Dickins return 0; 27943f96b79aSHugh Dickins } 27953f96b79aSHugh Dickins 279624513264SHugh Dickins void shmem_unlock_mapping(struct address_space *mapping) 279724513264SHugh Dickins { 279824513264SHugh Dickins } 279924513264SHugh Dickins 280041ffe5d5SHugh Dickins void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 280194c1e62dSHugh Dickins { 280241ffe5d5SHugh Dickins truncate_inode_pages_range(inode->i_mapping, lstart, lend); 280394c1e62dSHugh Dickins } 280494c1e62dSHugh Dickins EXPORT_SYMBOL_GPL(shmem_truncate_range); 280594c1e62dSHugh Dickins 2806853ac43aSMatt Mackall #define shmem_vm_ops generic_file_vm_ops 28070b0a0806SHugh Dickins #define shmem_file_operations ramfs_file_operations 2808454abafeSDmitry Monakhov #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 28090b0a0806SHugh Dickins #define shmem_acct_size(flags, size) 0 28100b0a0806SHugh Dickins #define shmem_unacct_size(flags, size) do {} while (0) 2811853ac43aSMatt Mackall 2812853ac43aSMatt Mackall #endif /* CONFIG_SHMEM */ 2813853ac43aSMatt Mackall 2814853ac43aSMatt Mackall /* common code */ 28151da177e4SLinus Torvalds 281646711810SRandy Dunlap /** 28171da177e4SLinus Torvalds * shmem_file_setup - get an unlinked file living in tmpfs 28181da177e4SLinus Torvalds * @name: name for dentry (to be seen in /proc/<pid>/maps 28191da177e4SLinus Torvalds * @size: size to be set for the file 28200b0a0806SHugh Dickins * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 28211da177e4SLinus Torvalds */ 2822168f5ac6SSergei Trofimovich struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 28231da177e4SLinus Torvalds { 28241da177e4SLinus Torvalds int error; 28251da177e4SLinus Torvalds struct file *file; 28261da177e4SLinus Torvalds struct inode *inode; 28272c48b9c4SAl Viro struct path path; 28282c48b9c4SAl Viro struct dentry *root; 28291da177e4SLinus Torvalds struct qstr this; 28301da177e4SLinus Torvalds 28311da177e4SLinus Torvalds if (IS_ERR(shm_mnt)) 28321da177e4SLinus Torvalds return (void *)shm_mnt; 28331da177e4SLinus Torvalds 2834285b2c4fSHugh Dickins if (size < 0 || size > MAX_LFS_FILESIZE) 28351da177e4SLinus Torvalds return ERR_PTR(-EINVAL); 28361da177e4SLinus Torvalds 28371da177e4SLinus Torvalds if (shmem_acct_size(flags, size)) 28381da177e4SLinus Torvalds return ERR_PTR(-ENOMEM); 28391da177e4SLinus Torvalds 28401da177e4SLinus Torvalds error = -ENOMEM; 28411da177e4SLinus Torvalds this.name = name; 28421da177e4SLinus Torvalds this.len = strlen(name); 28431da177e4SLinus Torvalds this.hash = 0; /* will go */ 28441da177e4SLinus Torvalds root = shm_mnt->mnt_root; 28452c48b9c4SAl Viro path.dentry = d_alloc(root, &this); 28462c48b9c4SAl Viro if (!path.dentry) 28471da177e4SLinus Torvalds goto put_memory; 28482c48b9c4SAl Viro path.mnt = mntget(shm_mnt); 28491da177e4SLinus Torvalds 28501da177e4SLinus Torvalds error = -ENOSPC; 2851454abafeSDmitry Monakhov inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 28521da177e4SLinus Torvalds if (!inode) 28534b42af81SAl Viro goto put_dentry; 28541da177e4SLinus Torvalds 28552c48b9c4SAl Viro d_instantiate(path.dentry, inode); 28561da177e4SLinus Torvalds inode->i_size = size; 28576d6b77f1SMiklos Szeredi clear_nlink(inode); /* It is unlinked */ 2858853ac43aSMatt Mackall #ifndef CONFIG_MMU 2859853ac43aSMatt Mackall error = ramfs_nommu_expand_for_mapping(inode, size); 2860853ac43aSMatt Mackall if (error) 28614b42af81SAl Viro goto put_dentry; 2862853ac43aSMatt Mackall #endif 28634b42af81SAl Viro 28644b42af81SAl Viro error = -ENFILE; 28652c48b9c4SAl Viro file = alloc_file(&path, FMODE_WRITE | FMODE_READ, 28664b42af81SAl Viro &shmem_file_operations); 28674b42af81SAl Viro if (!file) 28684b42af81SAl Viro goto put_dentry; 28694b42af81SAl Viro 28701da177e4SLinus Torvalds return file; 28711da177e4SLinus Torvalds 28721da177e4SLinus Torvalds put_dentry: 28732c48b9c4SAl Viro path_put(&path); 28741da177e4SLinus Torvalds put_memory: 28751da177e4SLinus Torvalds shmem_unacct_size(flags, size); 28761da177e4SLinus Torvalds return ERR_PTR(error); 28771da177e4SLinus Torvalds } 2878395e0ddcSKeith Packard EXPORT_SYMBOL_GPL(shmem_file_setup); 28791da177e4SLinus Torvalds 288046711810SRandy Dunlap /** 28811da177e4SLinus Torvalds * shmem_zero_setup - setup a shared anonymous mapping 28821da177e4SLinus Torvalds * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 28831da177e4SLinus Torvalds */ 28841da177e4SLinus Torvalds int shmem_zero_setup(struct vm_area_struct *vma) 28851da177e4SLinus Torvalds { 28861da177e4SLinus Torvalds struct file *file; 28871da177e4SLinus Torvalds loff_t size = vma->vm_end - vma->vm_start; 28881da177e4SLinus Torvalds 28891da177e4SLinus Torvalds file = shmem_file_setup("dev/zero", size, vma->vm_flags); 28901da177e4SLinus Torvalds if (IS_ERR(file)) 28911da177e4SLinus Torvalds return PTR_ERR(file); 28921da177e4SLinus Torvalds 28931da177e4SLinus Torvalds if (vma->vm_file) 28941da177e4SLinus Torvalds fput(vma->vm_file); 28951da177e4SLinus Torvalds vma->vm_file = file; 28961da177e4SLinus Torvalds vma->vm_ops = &shmem_vm_ops; 2897bee4c36aSHugh Dickins vma->vm_flags |= VM_CAN_NONLINEAR; 28981da177e4SLinus Torvalds return 0; 28991da177e4SLinus Torvalds } 2900d9d90e5eSHugh Dickins 2901d9d90e5eSHugh Dickins /** 2902d9d90e5eSHugh Dickins * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 2903d9d90e5eSHugh Dickins * @mapping: the page's address_space 2904d9d90e5eSHugh Dickins * @index: the page index 2905d9d90e5eSHugh Dickins * @gfp: the page allocator flags to use if allocating 2906d9d90e5eSHugh Dickins * 2907d9d90e5eSHugh Dickins * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 2908d9d90e5eSHugh Dickins * with any new page allocations done using the specified allocation flags. 2909d9d90e5eSHugh Dickins * But read_cache_page_gfp() uses the ->readpage() method: which does not 2910d9d90e5eSHugh Dickins * suit tmpfs, since it may have pages in swapcache, and needs to find those 2911d9d90e5eSHugh Dickins * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 2912d9d90e5eSHugh Dickins * 291368da9f05SHugh Dickins * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 291468da9f05SHugh Dickins * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 2915d9d90e5eSHugh Dickins */ 2916d9d90e5eSHugh Dickins struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 2917d9d90e5eSHugh Dickins pgoff_t index, gfp_t gfp) 2918d9d90e5eSHugh Dickins { 291968da9f05SHugh Dickins #ifdef CONFIG_SHMEM 292068da9f05SHugh Dickins struct inode *inode = mapping->host; 29219276aad6SHugh Dickins struct page *page; 292268da9f05SHugh Dickins int error; 292368da9f05SHugh Dickins 292468da9f05SHugh Dickins BUG_ON(mapping->a_ops != &shmem_aops); 292568da9f05SHugh Dickins error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 292668da9f05SHugh Dickins if (error) 292768da9f05SHugh Dickins page = ERR_PTR(error); 292868da9f05SHugh Dickins else 292968da9f05SHugh Dickins unlock_page(page); 293068da9f05SHugh Dickins return page; 293168da9f05SHugh Dickins #else 293268da9f05SHugh Dickins /* 293368da9f05SHugh Dickins * The tiny !SHMEM case uses ramfs without swap 293468da9f05SHugh Dickins */ 2935d9d90e5eSHugh Dickins return read_cache_page_gfp(mapping, index, gfp); 293668da9f05SHugh Dickins #endif 2937d9d90e5eSHugh Dickins } 2938d9d90e5eSHugh Dickins EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 2939