11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * hugetlbpage-backed filesystem. Based on ramfs. 31da177e4SLinus Torvalds * 46d49e352SNadia Yvette Chambers * Nadia Yvette Chambers, 2002 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Copyright (C) 2002 Linus Torvalds. 73e89e1c5SPaul Gortmaker * License: GPL 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 109b857d26SAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 119b857d26SAndrew Morton 121da177e4SLinus Torvalds #include <linux/thread_info.h> 131da177e4SLinus Torvalds #include <asm/current.h> 14174cd4b1SIngo Molnar #include <linux/sched/signal.h> /* remove ASAP */ 1570c3547eSMike Kravetz #include <linux/falloc.h> 161da177e4SLinus Torvalds #include <linux/fs.h> 171da177e4SLinus Torvalds #include <linux/mount.h> 181da177e4SLinus Torvalds #include <linux/file.h> 19e73a75faSRandy Dunlap #include <linux/kernel.h> 201da177e4SLinus Torvalds #include <linux/writeback.h> 211da177e4SLinus Torvalds #include <linux/pagemap.h> 221da177e4SLinus Torvalds #include <linux/highmem.h> 231da177e4SLinus Torvalds #include <linux/init.h> 241da177e4SLinus Torvalds #include <linux/string.h> 2516f7e0feSRandy Dunlap #include <linux/capability.h> 26e73a75faSRandy Dunlap #include <linux/ctype.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/hugetlb.h> 291da177e4SLinus Torvalds #include <linux/pagevec.h> 3032021982SDavid Howells #include <linux/fs_parser.h> 31036e0856SBenjamin Herrenschmidt #include <linux/mman.h> 321da177e4SLinus Torvalds #include <linux/slab.h> 331da177e4SLinus Torvalds #include <linux/dnotify.h> 341da177e4SLinus Torvalds #include <linux/statfs.h> 351da177e4SLinus Torvalds #include <linux/security.h> 361fd7317dSNick Black #include <linux/magic.h> 37290408d4SNaoya Horiguchi #include <linux/migrate.h> 3834d0640eSAl Viro #include <linux/uio.h> 391da177e4SLinus Torvalds 407c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 4188590253SShijie Hu #include <linux/sched/mm.h> 421da177e4SLinus Torvalds 43ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops; 44f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops; 454b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations; 4692e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations; 4792e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations; 481da177e4SLinus Torvalds 4932021982SDavid Howells enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; 5032021982SDavid Howells 5132021982SDavid Howells struct hugetlbfs_fs_context { 524a25220dSDavid Howells struct hstate *hstate; 5332021982SDavid Howells unsigned long long max_size_opt; 5432021982SDavid Howells unsigned long long min_size_opt; 554a25220dSDavid Howells long max_hpages; 564a25220dSDavid Howells long nr_inodes; 574a25220dSDavid Howells long min_hpages; 5832021982SDavid Howells enum hugetlbfs_size_type max_val_type; 5932021982SDavid Howells enum hugetlbfs_size_type min_val_type; 60a0eb3a05SEric W. Biederman kuid_t uid; 61a0eb3a05SEric W. Biederman kgid_t gid; 62a1d776eeSDavid Gibson umode_t mode; 63a1d776eeSDavid Gibson }; 64a1d776eeSDavid Gibson 651da177e4SLinus Torvalds int sysctl_hugetlb_shm_group; 661da177e4SLinus Torvalds 6732021982SDavid Howells enum hugetlb_param { 6832021982SDavid Howells Opt_gid, 6932021982SDavid Howells Opt_min_size, 7032021982SDavid Howells Opt_mode, 7132021982SDavid Howells Opt_nr_inodes, 7232021982SDavid Howells Opt_pagesize, 7332021982SDavid Howells Opt_size, 7432021982SDavid Howells Opt_uid, 75e73a75faSRandy Dunlap }; 76e73a75faSRandy Dunlap 77d7167b14SAl Viro static const struct fs_parameter_spec hugetlb_fs_parameters[] = { 7832021982SDavid Howells fsparam_u32 ("gid", Opt_gid), 7932021982SDavid Howells fsparam_string("min_size", Opt_min_size), 80e0f7e2b2SMike Kravetz fsparam_u32oct("mode", Opt_mode), 8132021982SDavid Howells fsparam_string("nr_inodes", Opt_nr_inodes), 8232021982SDavid Howells fsparam_string("pagesize", Opt_pagesize), 8332021982SDavid Howells fsparam_string("size", Opt_size), 8432021982SDavid Howells fsparam_u32 ("uid", Opt_uid), 8532021982SDavid Howells {} 8632021982SDavid Howells }; 8732021982SDavid Howells 8870c3547eSMike Kravetz #ifdef CONFIG_NUMA 8970c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 9070c3547eSMike Kravetz struct inode *inode, pgoff_t index) 9170c3547eSMike Kravetz { 9270c3547eSMike Kravetz vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, 9370c3547eSMike Kravetz index); 9470c3547eSMike Kravetz } 9570c3547eSMike Kravetz 9670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 9770c3547eSMike Kravetz { 9870c3547eSMike Kravetz mpol_cond_put(vma->vm_policy); 9970c3547eSMike Kravetz } 10070c3547eSMike Kravetz #else 10170c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 10270c3547eSMike Kravetz struct inode *inode, pgoff_t index) 10370c3547eSMike Kravetz { 10470c3547eSMike Kravetz } 10570c3547eSMike Kravetz 10670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 10770c3547eSMike Kravetz { 10870c3547eSMike Kravetz } 10970c3547eSMike Kravetz #endif 11070c3547eSMike Kravetz 1112e9b367cSAdam Litke static void huge_pagevec_release(struct pagevec *pvec) 1122e9b367cSAdam Litke { 1132e9b367cSAdam Litke int i; 1142e9b367cSAdam Litke 1152e9b367cSAdam Litke for (i = 0; i < pagevec_count(pvec); ++i) 1162e9b367cSAdam Litke put_page(pvec->pages[i]); 1172e9b367cSAdam Litke 1182e9b367cSAdam Litke pagevec_reinit(pvec); 1192e9b367cSAdam Litke } 1202e9b367cSAdam Litke 12163489f8eSMike Kravetz /* 12263489f8eSMike Kravetz * Mask used when checking the page offset value passed in via system 12363489f8eSMike Kravetz * calls. This value will be converted to a loff_t which is signed. 12463489f8eSMike Kravetz * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the 12563489f8eSMike Kravetz * value. The extra bit (- 1 in the shift value) is to take the sign 12663489f8eSMike Kravetz * bit into account. 12763489f8eSMike Kravetz */ 12863489f8eSMike Kravetz #define PGOFF_LOFFT_MAX \ 12963489f8eSMike Kravetz (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) 13063489f8eSMike Kravetz 1311da177e4SLinus Torvalds static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 1321da177e4SLinus Torvalds { 133496ad9aaSAl Viro struct inode *inode = file_inode(file); 13422247efdSPeter Xu struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 1351da177e4SLinus Torvalds loff_t len, vma_len; 1361da177e4SLinus Torvalds int ret; 137a5516438SAndi Kleen struct hstate *h = hstate_file(file); 1381da177e4SLinus Torvalds 13968589bc3SHugh Dickins /* 140dec4ad86SDavid Gibson * vma address alignment (but not the pgoff alignment) has 141dec4ad86SDavid Gibson * already been checked by prepare_hugepage_range. If you add 142dec4ad86SDavid Gibson * any error returns here, do so after setting VM_HUGETLB, so 143dec4ad86SDavid Gibson * is_vm_hugetlb_page tests below unmap_region go the right 14445e55300SPeter Collingbourne * way when do_mmap unwinds (may be important on powerpc 145dec4ad86SDavid Gibson * and ia64). 14668589bc3SHugh Dickins */ 147a2fce914SNaoya Horiguchi vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 14868589bc3SHugh Dickins vma->vm_ops = &hugetlb_vm_ops; 1491da177e4SLinus Torvalds 15022247efdSPeter Xu ret = seal_check_future_write(info->seals, vma); 15122247efdSPeter Xu if (ret) 15222247efdSPeter Xu return ret; 15322247efdSPeter Xu 154045c7a3fSMike Kravetz /* 15563489f8eSMike Kravetz * page based offset in vm_pgoff could be sufficiently large to 1565df63c2aSMike Kravetz * overflow a loff_t when converted to byte offset. This can 1575df63c2aSMike Kravetz * only happen on architectures where sizeof(loff_t) == 1585df63c2aSMike Kravetz * sizeof(unsigned long). So, only check in those instances. 159045c7a3fSMike Kravetz */ 1605df63c2aSMike Kravetz if (sizeof(unsigned long) == sizeof(loff_t)) { 16163489f8eSMike Kravetz if (vma->vm_pgoff & PGOFF_LOFFT_MAX) 162045c7a3fSMike Kravetz return -EINVAL; 1635df63c2aSMike Kravetz } 164045c7a3fSMike Kravetz 16563489f8eSMike Kravetz /* must be huge page aligned */ 1662b37c35eSBecky Bruce if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 167dec4ad86SDavid Gibson return -EINVAL; 168dec4ad86SDavid Gibson 1691da177e4SLinus Torvalds vma_len = (loff_t)(vma->vm_end - vma->vm_start); 170045c7a3fSMike Kravetz len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 171045c7a3fSMike Kravetz /* check for overflow */ 172045c7a3fSMike Kravetz if (len < vma_len) 173045c7a3fSMike Kravetz return -EINVAL; 1741da177e4SLinus Torvalds 1755955102cSAl Viro inode_lock(inode); 1761da177e4SLinus Torvalds file_accessed(file); 1771da177e4SLinus Torvalds 1781da177e4SLinus Torvalds ret = -ENOMEM; 17933b8f84aSMike Kravetz if (!hugetlb_reserve_pages(inode, 180a5516438SAndi Kleen vma->vm_pgoff >> huge_page_order(h), 1815a6fe125SMel Gorman len >> huge_page_shift(h), vma, 1825a6fe125SMel Gorman vma->vm_flags)) 183b45b5bd6SDavid Gibson goto out; 184b45b5bd6SDavid Gibson 1854c887265SAdam Litke ret = 0; 186b6174df5SZhang, Yanmin if (vma->vm_flags & VM_WRITE && inode->i_size < len) 187045c7a3fSMike Kravetz i_size_write(inode, len); 1881da177e4SLinus Torvalds out: 1895955102cSAl Viro inode_unlock(inode); 1901da177e4SLinus Torvalds 1911da177e4SLinus Torvalds return ret; 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds 1941da177e4SLinus Torvalds /* 1953e4e28c5SMichel Lespinasse * Called under mmap_write_lock(mm). 1961da177e4SLinus Torvalds */ 1971da177e4SLinus Torvalds 1981da177e4SLinus Torvalds static unsigned long 19988590253SShijie Hu hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, 20088590253SShijie Hu unsigned long len, unsigned long pgoff, unsigned long flags) 20188590253SShijie Hu { 20288590253SShijie Hu struct hstate *h = hstate_file(file); 20388590253SShijie Hu struct vm_unmapped_area_info info; 20488590253SShijie Hu 20588590253SShijie Hu info.flags = 0; 20688590253SShijie Hu info.length = len; 20788590253SShijie Hu info.low_limit = current->mm->mmap_base; 208*2cb4de08SChristophe Leroy info.high_limit = arch_get_mmap_end(addr, len, flags); 20988590253SShijie Hu info.align_mask = PAGE_MASK & ~huge_page_mask(h); 21088590253SShijie Hu info.align_offset = 0; 21188590253SShijie Hu return vm_unmapped_area(&info); 21288590253SShijie Hu } 21388590253SShijie Hu 21488590253SShijie Hu static unsigned long 21588590253SShijie Hu hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, 21688590253SShijie Hu unsigned long len, unsigned long pgoff, unsigned long flags) 21788590253SShijie Hu { 21888590253SShijie Hu struct hstate *h = hstate_file(file); 21988590253SShijie Hu struct vm_unmapped_area_info info; 22088590253SShijie Hu 22188590253SShijie Hu info.flags = VM_UNMAPPED_AREA_TOPDOWN; 22288590253SShijie Hu info.length = len; 22388590253SShijie Hu info.low_limit = max(PAGE_SIZE, mmap_min_addr); 2245f24d5a5SChristophe Leroy info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); 22588590253SShijie Hu info.align_mask = PAGE_MASK & ~huge_page_mask(h); 22688590253SShijie Hu info.align_offset = 0; 22788590253SShijie Hu addr = vm_unmapped_area(&info); 22888590253SShijie Hu 22988590253SShijie Hu /* 23088590253SShijie Hu * A failed mmap() very likely causes application failure, 23188590253SShijie Hu * so fall back to the bottom-up function here. This scenario 23288590253SShijie Hu * can happen with large stack limits and large mmap() 23388590253SShijie Hu * allocations. 23488590253SShijie Hu */ 23588590253SShijie Hu if (unlikely(offset_in_page(addr))) { 23688590253SShijie Hu VM_BUG_ON(addr != -ENOMEM); 23788590253SShijie Hu info.flags = 0; 23888590253SShijie Hu info.low_limit = current->mm->mmap_base; 239*2cb4de08SChristophe Leroy info.high_limit = arch_get_mmap_end(addr, len, flags); 24088590253SShijie Hu addr = vm_unmapped_area(&info); 24188590253SShijie Hu } 24288590253SShijie Hu 24388590253SShijie Hu return addr; 24488590253SShijie Hu } 24588590253SShijie Hu 2464b439e25SChristophe Leroy unsigned long 2474b439e25SChristophe Leroy generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 2484b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 2494b439e25SChristophe Leroy unsigned long flags) 2501da177e4SLinus Torvalds { 2511da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 2521da177e4SLinus Torvalds struct vm_area_struct *vma; 253a5516438SAndi Kleen struct hstate *h = hstate_file(file); 254*2cb4de08SChristophe Leroy const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 2551da177e4SLinus Torvalds 256a5516438SAndi Kleen if (len & ~huge_page_mask(h)) 2571da177e4SLinus Torvalds return -EINVAL; 2581da177e4SLinus Torvalds if (len > TASK_SIZE) 2591da177e4SLinus Torvalds return -ENOMEM; 2601da177e4SLinus Torvalds 261036e0856SBenjamin Herrenschmidt if (flags & MAP_FIXED) { 262a5516438SAndi Kleen if (prepare_hugepage_range(file, addr, len)) 263036e0856SBenjamin Herrenschmidt return -EINVAL; 264036e0856SBenjamin Herrenschmidt return addr; 265036e0856SBenjamin Herrenschmidt } 266036e0856SBenjamin Herrenschmidt 2671da177e4SLinus Torvalds if (addr) { 268a5516438SAndi Kleen addr = ALIGN(addr, huge_page_size(h)); 2691da177e4SLinus Torvalds vma = find_vma(mm, addr); 2705f24d5a5SChristophe Leroy if (mmap_end - len >= addr && 2711be7107fSHugh Dickins (!vma || addr + len <= vm_start_gap(vma))) 2721da177e4SLinus Torvalds return addr; 2731da177e4SLinus Torvalds } 2741da177e4SLinus Torvalds 27588590253SShijie Hu /* 27688590253SShijie Hu * Use mm->get_unmapped_area value as a hint to use topdown routine. 27788590253SShijie Hu * If architectures have special needs, they should define their own 27888590253SShijie Hu * version of hugetlb_get_unmapped_area. 27988590253SShijie Hu */ 28088590253SShijie Hu if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) 28188590253SShijie Hu return hugetlb_get_unmapped_area_topdown(file, addr, len, 28288590253SShijie Hu pgoff, flags); 28388590253SShijie Hu return hugetlb_get_unmapped_area_bottomup(file, addr, len, 28488590253SShijie Hu pgoff, flags); 2851da177e4SLinus Torvalds } 2864b439e25SChristophe Leroy 2874b439e25SChristophe Leroy #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 2884b439e25SChristophe Leroy static unsigned long 2894b439e25SChristophe Leroy hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 2904b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 2914b439e25SChristophe Leroy unsigned long flags) 2924b439e25SChristophe Leroy { 2934b439e25SChristophe Leroy return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); 2944b439e25SChristophe Leroy } 2951da177e4SLinus Torvalds #endif 2961da177e4SLinus Torvalds 29734d0640eSAl Viro static size_t 298e63e1e5aSBadari Pulavarty hugetlbfs_read_actor(struct page *page, unsigned long offset, 29934d0640eSAl Viro struct iov_iter *to, unsigned long size) 300e63e1e5aSBadari Pulavarty { 30134d0640eSAl Viro size_t copied = 0; 302e63e1e5aSBadari Pulavarty int i, chunksize; 303e63e1e5aSBadari Pulavarty 304e63e1e5aSBadari Pulavarty /* Find which 4k chunk and offset with in that chunk */ 30509cbfeafSKirill A. Shutemov i = offset >> PAGE_SHIFT; 30609cbfeafSKirill A. Shutemov offset = offset & ~PAGE_MASK; 307e63e1e5aSBadari Pulavarty 308e63e1e5aSBadari Pulavarty while (size) { 30934d0640eSAl Viro size_t n; 31009cbfeafSKirill A. Shutemov chunksize = PAGE_SIZE; 311e63e1e5aSBadari Pulavarty if (offset) 312e63e1e5aSBadari Pulavarty chunksize -= offset; 313e63e1e5aSBadari Pulavarty if (chunksize > size) 314e63e1e5aSBadari Pulavarty chunksize = size; 31534d0640eSAl Viro n = copy_page_to_iter(&page[i], offset, chunksize, to); 31634d0640eSAl Viro copied += n; 31734d0640eSAl Viro if (n != chunksize) 31834d0640eSAl Viro return copied; 319e63e1e5aSBadari Pulavarty offset = 0; 320e63e1e5aSBadari Pulavarty size -= chunksize; 321e63e1e5aSBadari Pulavarty i++; 322e63e1e5aSBadari Pulavarty } 32334d0640eSAl Viro return copied; 324e63e1e5aSBadari Pulavarty } 325e63e1e5aSBadari Pulavarty 326e63e1e5aSBadari Pulavarty /* 327e63e1e5aSBadari Pulavarty * Support for read() - Find the page attached to f_mapping and copy out the 328c7e285e3SMiaohe Lin * data. Its *very* similar to generic_file_buffered_read(), we can't use that 329ea1754a0SKirill A. Shutemov * since it has PAGE_SIZE assumptions. 330e63e1e5aSBadari Pulavarty */ 33134d0640eSAl Viro static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 332e63e1e5aSBadari Pulavarty { 33334d0640eSAl Viro struct file *file = iocb->ki_filp; 33434d0640eSAl Viro struct hstate *h = hstate_file(file); 33534d0640eSAl Viro struct address_space *mapping = file->f_mapping; 336e63e1e5aSBadari Pulavarty struct inode *inode = mapping->host; 33734d0640eSAl Viro unsigned long index = iocb->ki_pos >> huge_page_shift(h); 33834d0640eSAl Viro unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); 339e63e1e5aSBadari Pulavarty unsigned long end_index; 340e63e1e5aSBadari Pulavarty loff_t isize; 341e63e1e5aSBadari Pulavarty ssize_t retval = 0; 342e63e1e5aSBadari Pulavarty 34334d0640eSAl Viro while (iov_iter_count(to)) { 344e63e1e5aSBadari Pulavarty struct page *page; 34534d0640eSAl Viro size_t nr, copied; 346e63e1e5aSBadari Pulavarty 347e63e1e5aSBadari Pulavarty /* nr is the maximum number of bytes to copy from this page */ 348a5516438SAndi Kleen nr = huge_page_size(h); 349a05b0855SAneesh Kumar K.V isize = i_size_read(inode); 350a05b0855SAneesh Kumar K.V if (!isize) 35134d0640eSAl Viro break; 352a05b0855SAneesh Kumar K.V end_index = (isize - 1) >> huge_page_shift(h); 353e63e1e5aSBadari Pulavarty if (index > end_index) 35434d0640eSAl Viro break; 35534d0640eSAl Viro if (index == end_index) { 356a5516438SAndi Kleen nr = ((isize - 1) & ~huge_page_mask(h)) + 1; 357a05b0855SAneesh Kumar K.V if (nr <= offset) 35834d0640eSAl Viro break; 359e63e1e5aSBadari Pulavarty } 360e63e1e5aSBadari Pulavarty nr = nr - offset; 361e63e1e5aSBadari Pulavarty 362e63e1e5aSBadari Pulavarty /* Find the page */ 363a05b0855SAneesh Kumar K.V page = find_lock_page(mapping, index); 364e63e1e5aSBadari Pulavarty if (unlikely(page == NULL)) { 365e63e1e5aSBadari Pulavarty /* 366e63e1e5aSBadari Pulavarty * We have a HOLE, zero out the user-buffer for the 367e63e1e5aSBadari Pulavarty * length of the hole or request. 368e63e1e5aSBadari Pulavarty */ 36934d0640eSAl Viro copied = iov_iter_zero(nr, to); 370e63e1e5aSBadari Pulavarty } else { 371a05b0855SAneesh Kumar K.V unlock_page(page); 372a05b0855SAneesh Kumar K.V 373e63e1e5aSBadari Pulavarty /* 374e63e1e5aSBadari Pulavarty * We have the page, copy it to user space buffer. 375e63e1e5aSBadari Pulavarty */ 37634d0640eSAl Viro copied = hugetlbfs_read_actor(page, offset, to, nr); 37709cbfeafSKirill A. Shutemov put_page(page); 378e63e1e5aSBadari Pulavarty } 37934d0640eSAl Viro offset += copied; 38034d0640eSAl Viro retval += copied; 38134d0640eSAl Viro if (copied != nr && iov_iter_count(to)) { 38234d0640eSAl Viro if (!retval) 38334d0640eSAl Viro retval = -EFAULT; 384e63e1e5aSBadari Pulavarty break; 385e63e1e5aSBadari Pulavarty } 38634d0640eSAl Viro index += offset >> huge_page_shift(h); 38734d0640eSAl Viro offset &= ~huge_page_mask(h); 38834d0640eSAl Viro } 38934d0640eSAl Viro iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; 390e63e1e5aSBadari Pulavarty return retval; 391e63e1e5aSBadari Pulavarty } 392e63e1e5aSBadari Pulavarty 393800d15a5SNick Piggin static int hugetlbfs_write_begin(struct file *file, 394800d15a5SNick Piggin struct address_space *mapping, 395800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 396800d15a5SNick Piggin struct page **pagep, void **fsdata) 3971da177e4SLinus Torvalds { 3981da177e4SLinus Torvalds return -EINVAL; 3991da177e4SLinus Torvalds } 4001da177e4SLinus Torvalds 401800d15a5SNick Piggin static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 402800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 403800d15a5SNick Piggin struct page *page, void *fsdata) 4041da177e4SLinus Torvalds { 405800d15a5SNick Piggin BUG(); 4061da177e4SLinus Torvalds return -EINVAL; 4071da177e4SLinus Torvalds } 4081da177e4SLinus Torvalds 409b5cec28dSMike Kravetz static void remove_huge_page(struct page *page) 4101da177e4SLinus Torvalds { 411b9ea2515SKonstantin Khlebnikov ClearPageDirty(page); 4121da177e4SLinus Torvalds ClearPageUptodate(page); 413bd65cb86SMinchan Kim delete_from_page_cache(page); 4141da177e4SLinus Torvalds } 4151da177e4SLinus Torvalds 4164aae8d1cSMike Kravetz static void 417f808c13fSDavidlohr Bueso hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) 4184aae8d1cSMike Kravetz { 4194aae8d1cSMike Kravetz struct vm_area_struct *vma; 4204aae8d1cSMike Kravetz 4214aae8d1cSMike Kravetz /* 422d6aba4c8SSean Christopherson * end == 0 indicates that the entire range after start should be 423d6aba4c8SSean Christopherson * unmapped. Note, end is exclusive, whereas the interval tree takes 424d6aba4c8SSean Christopherson * an inclusive "last". 4254aae8d1cSMike Kravetz */ 426d6aba4c8SSean Christopherson vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { 4274aae8d1cSMike Kravetz unsigned long v_offset; 4284aae8d1cSMike Kravetz unsigned long v_end; 4294aae8d1cSMike Kravetz 4304aae8d1cSMike Kravetz /* 4314aae8d1cSMike Kravetz * Can the expression below overflow on 32-bit arches? 4324aae8d1cSMike Kravetz * No, because the interval tree returns us only those vmas 4334aae8d1cSMike Kravetz * which overlap the truncated area starting at pgoff, 4344aae8d1cSMike Kravetz * and no vma on a 32-bit arch can span beyond the 4GB. 4354aae8d1cSMike Kravetz */ 4364aae8d1cSMike Kravetz if (vma->vm_pgoff < start) 4374aae8d1cSMike Kravetz v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; 4384aae8d1cSMike Kravetz else 4394aae8d1cSMike Kravetz v_offset = 0; 4404aae8d1cSMike Kravetz 4414aae8d1cSMike Kravetz if (!end) 4424aae8d1cSMike Kravetz v_end = vma->vm_end; 4434aae8d1cSMike Kravetz else { 4444aae8d1cSMike Kravetz v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) 4454aae8d1cSMike Kravetz + vma->vm_start; 4464aae8d1cSMike Kravetz if (v_end > vma->vm_end) 4474aae8d1cSMike Kravetz v_end = vma->vm_end; 4484aae8d1cSMike Kravetz } 4494aae8d1cSMike Kravetz 4504aae8d1cSMike Kravetz unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, 4514aae8d1cSMike Kravetz NULL); 4524aae8d1cSMike Kravetz } 4534aae8d1cSMike Kravetz } 454b5cec28dSMike Kravetz 455b5cec28dSMike Kravetz /* 456b5cec28dSMike Kravetz * remove_inode_hugepages handles two distinct cases: truncation and hole 457b5cec28dSMike Kravetz * punch. There are subtle differences in operation for each case. 4584aae8d1cSMike Kravetz * 459b5cec28dSMike Kravetz * truncation is indicated by end of range being LLONG_MAX 460b5cec28dSMike Kravetz * In this case, we first scan the range and release found pages. 4611935ebd3SMiaohe Lin * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve 462e7c58097SMike Kravetz * maps and global counts. Page faults can not race with truncation 46387bf91d3SMike Kravetz * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents 46487bf91d3SMike Kravetz * page faults in the truncated range by checking i_size. i_size is 46587bf91d3SMike Kravetz * modified while holding i_mmap_rwsem. 466b5cec28dSMike Kravetz * hole punch is indicated if end is not LLONG_MAX 467b5cec28dSMike Kravetz * In the hole punch case we scan the range and release found pages. 4681935ebd3SMiaohe Lin * Only when releasing a page is the associated region/reserve map 4691935ebd3SMiaohe Lin * deleted. The region/reserve map for ranges without associated 470e7c58097SMike Kravetz * pages are not modified. Page faults can race with hole punch. 471e7c58097SMike Kravetz * This is indicated if we find a mapped page. 472b5cec28dSMike Kravetz * Note: If the passed end of range value is beyond the end of file, but 473b5cec28dSMike Kravetz * not LLONG_MAX this routine still performs a hole punch operation. 474b5cec28dSMike Kravetz */ 475b5cec28dSMike Kravetz static void remove_inode_hugepages(struct inode *inode, loff_t lstart, 476b5cec28dSMike Kravetz loff_t lend) 4771da177e4SLinus Torvalds { 478a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 479b45b5bd6SDavid Gibson struct address_space *mapping = &inode->i_data; 480a5516438SAndi Kleen const pgoff_t start = lstart >> huge_page_shift(h); 481b5cec28dSMike Kravetz const pgoff_t end = lend >> huge_page_shift(h); 4821da177e4SLinus Torvalds struct pagevec pvec; 483d72dc8a2SJan Kara pgoff_t next, index; 484a43a8c39SChen, Kenneth W int i, freed = 0; 485b5cec28dSMike Kravetz bool truncate_op = (lend == LLONG_MAX); 4861da177e4SLinus Torvalds 48786679820SMel Gorman pagevec_init(&pvec); 4881da177e4SLinus Torvalds next = start; 489b5cec28dSMike Kravetz while (next < end) { 490b5cec28dSMike Kravetz /* 4911817889eSMike Kravetz * When no more pages are found, we are done. 492b5cec28dSMike Kravetz */ 493397162ffSJan Kara if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) 4941da177e4SLinus Torvalds break; 4951da177e4SLinus Torvalds 4961da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); ++i) { 4971da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 498d4241a04SMiaohe Lin u32 hash = 0; 499b5cec28dSMike Kravetz 500d72dc8a2SJan Kara index = page->index; 50187bf91d3SMike Kravetz if (!truncate_op) { 50287bf91d3SMike Kravetz /* 50387bf91d3SMike Kravetz * Only need to hold the fault mutex in the 50487bf91d3SMike Kravetz * hole punch case. This prevents races with 50587bf91d3SMike Kravetz * page faults. Races are not possible in the 50687bf91d3SMike Kravetz * case of truncation. 50787bf91d3SMike Kravetz */ 508d4241a04SMiaohe Lin hash = hugetlb_fault_mutex_hash(mapping, index); 509e7c58097SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 51087bf91d3SMike Kravetz } 511e7c58097SMike Kravetz 512b5cec28dSMike Kravetz /* 513e7c58097SMike Kravetz * If page is mapped, it was faulted in after being 514e7c58097SMike Kravetz * unmapped in caller. Unmap (again) now after taking 515e7c58097SMike Kravetz * the fault mutex. The mutex will prevent faults 516e7c58097SMike Kravetz * until we finish removing the page. 517e7c58097SMike Kravetz * 518e7c58097SMike Kravetz * This race can only happen in the hole punch case. 519e7c58097SMike Kravetz * Getting here in a truncate operation is a bug. 520b5cec28dSMike Kravetz */ 521e7c58097SMike Kravetz if (unlikely(page_mapped(page))) { 522e7c58097SMike Kravetz BUG_ON(truncate_op); 523e7c58097SMike Kravetz 524c0d0381aSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 525e7c58097SMike Kravetz i_mmap_lock_write(mapping); 526c0d0381aSMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 527e7c58097SMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, 528e7c58097SMike Kravetz index * pages_per_huge_page(h), 529e7c58097SMike Kravetz (index + 1) * pages_per_huge_page(h)); 530e7c58097SMike Kravetz i_mmap_unlock_write(mapping); 531e7c58097SMike Kravetz } 5324aae8d1cSMike Kravetz 5334aae8d1cSMike Kravetz lock_page(page); 5344aae8d1cSMike Kravetz /* 5354aae8d1cSMike Kravetz * We must free the huge page and remove from page 5364aae8d1cSMike Kravetz * cache (remove_huge_page) BEFORE removing the 5374aae8d1cSMike Kravetz * region/reserve map (hugetlb_unreserve_pages). In 5384aae8d1cSMike Kravetz * rare out of memory conditions, removal of the 53972e2936cSzhong jiang * region/reserve map could fail. Correspondingly, 54072e2936cSzhong jiang * the subpool and global reserve usage count can need 54172e2936cSzhong jiang * to be adjusted. 5424aae8d1cSMike Kravetz */ 543e32905e5SMike Kravetz VM_BUG_ON(HPageRestoreReserve(page)); 544b5cec28dSMike Kravetz remove_huge_page(page); 545b5cec28dSMike Kravetz freed++; 546b5cec28dSMike Kravetz if (!truncate_op) { 5474aae8d1cSMike Kravetz if (unlikely(hugetlb_unreserve_pages(inode, 548d72dc8a2SJan Kara index, index + 1, 1))) 54972e2936cSzhong jiang hugetlb_fix_reserve_counts(inode); 550b5cec28dSMike Kravetz } 551b5cec28dSMike Kravetz 5521da177e4SLinus Torvalds unlock_page(page); 55387bf91d3SMike Kravetz if (!truncate_op) 554e7c58097SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5551da177e4SLinus Torvalds } 5561da177e4SLinus Torvalds huge_pagevec_release(&pvec); 5571817889eSMike Kravetz cond_resched(); 5581da177e4SLinus Torvalds } 559b5cec28dSMike Kravetz 560b5cec28dSMike Kravetz if (truncate_op) 561b5cec28dSMike Kravetz (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); 5621da177e4SLinus Torvalds } 5631da177e4SLinus Torvalds 5642bbbda30SAl Viro static void hugetlbfs_evict_inode(struct inode *inode) 5651da177e4SLinus Torvalds { 5669119a41eSJoonsoo Kim struct resv_map *resv_map; 5679119a41eSJoonsoo Kim 568b5cec28dSMike Kravetz remove_inode_hugepages(inode, 0, LLONG_MAX); 569f27a5136SMike Kravetz 570f27a5136SMike Kravetz /* 571f27a5136SMike Kravetz * Get the resv_map from the address space embedded in the inode. 572f27a5136SMike Kravetz * This is the address space which points to any resv_map allocated 573f27a5136SMike Kravetz * at inode creation time. If this is a device special inode, 574f27a5136SMike Kravetz * i_mapping may not point to the original address space. 575f27a5136SMike Kravetz */ 576f27a5136SMike Kravetz resv_map = (struct resv_map *)(&inode->i_data)->private_data; 577f27a5136SMike Kravetz /* Only regular and link inodes have associated reserve maps */ 5789119a41eSJoonsoo Kim if (resv_map) 5799119a41eSJoonsoo Kim resv_map_release(&resv_map->refs); 580dbd5768fSJan Kara clear_inode(inode); 581149f4211SChristoph Hellwig } 582149f4211SChristoph Hellwig 583e5d319deSMiaohe Lin static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) 5841da177e4SLinus Torvalds { 585856fc295SHugh Dickins pgoff_t pgoff; 5861da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 587a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 5881da177e4SLinus Torvalds 589a5516438SAndi Kleen BUG_ON(offset & ~huge_page_mask(h)); 590856fc295SHugh Dickins pgoff = offset >> PAGE_SHIFT; 5911da177e4SLinus Torvalds 59283cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 59387bf91d3SMike Kravetz i_size_write(inode, offset); 594f808c13fSDavidlohr Bueso if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 5951bfad99aSMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); 596c86aa7bbSMike Kravetz i_mmap_unlock_write(mapping); 597e7c58097SMike Kravetz remove_inode_hugepages(inode, offset, LLONG_MAX); 5981da177e4SLinus Torvalds } 5991da177e4SLinus Torvalds 60070c3547eSMike Kravetz static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 60170c3547eSMike Kravetz { 60270c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 60370c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 60470c3547eSMike Kravetz loff_t hole_start, hole_end; 60570c3547eSMike Kravetz 60670c3547eSMike Kravetz /* 60770c3547eSMike Kravetz * For hole punch round up the beginning offset of the hole and 60870c3547eSMike Kravetz * round down the end. 60970c3547eSMike Kravetz */ 61070c3547eSMike Kravetz hole_start = round_up(offset, hpage_size); 61170c3547eSMike Kravetz hole_end = round_down(offset + len, hpage_size); 61270c3547eSMike Kravetz 61370c3547eSMike Kravetz if (hole_end > hole_start) { 61470c3547eSMike Kravetz struct address_space *mapping = inode->i_mapping; 615ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 61670c3547eSMike Kravetz 6175955102cSAl Viro inode_lock(inode); 618ff62a342SMarc-André Lureau 619398c0da7SMiaohe Lin /* protected by i_rwsem */ 620ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 621ff62a342SMarc-André Lureau inode_unlock(inode); 622ff62a342SMarc-André Lureau return -EPERM; 623ff62a342SMarc-André Lureau } 624ff62a342SMarc-André Lureau 62570c3547eSMike Kravetz i_mmap_lock_write(mapping); 626f808c13fSDavidlohr Bueso if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 62770c3547eSMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, 62870c3547eSMike Kravetz hole_start >> PAGE_SHIFT, 62970c3547eSMike Kravetz hole_end >> PAGE_SHIFT); 630c86aa7bbSMike Kravetz i_mmap_unlock_write(mapping); 631e7c58097SMike Kravetz remove_inode_hugepages(inode, hole_start, hole_end); 6325955102cSAl Viro inode_unlock(inode); 63370c3547eSMike Kravetz } 63470c3547eSMike Kravetz 63570c3547eSMike Kravetz return 0; 63670c3547eSMike Kravetz } 63770c3547eSMike Kravetz 63870c3547eSMike Kravetz static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, 63970c3547eSMike Kravetz loff_t len) 64070c3547eSMike Kravetz { 64170c3547eSMike Kravetz struct inode *inode = file_inode(file); 642ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 64370c3547eSMike Kravetz struct address_space *mapping = inode->i_mapping; 64470c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 64570c3547eSMike Kravetz struct vm_area_struct pseudo_vma; 64670c3547eSMike Kravetz struct mm_struct *mm = current->mm; 64770c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 64870c3547eSMike Kravetz unsigned long hpage_shift = huge_page_shift(h); 64970c3547eSMike Kravetz pgoff_t start, index, end; 65070c3547eSMike Kravetz int error; 65170c3547eSMike Kravetz u32 hash; 65270c3547eSMike Kravetz 65370c3547eSMike Kravetz if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 65470c3547eSMike Kravetz return -EOPNOTSUPP; 65570c3547eSMike Kravetz 65670c3547eSMike Kravetz if (mode & FALLOC_FL_PUNCH_HOLE) 65770c3547eSMike Kravetz return hugetlbfs_punch_hole(inode, offset, len); 65870c3547eSMike Kravetz 65970c3547eSMike Kravetz /* 66070c3547eSMike Kravetz * Default preallocate case. 66170c3547eSMike Kravetz * For this range, start is rounded down and end is rounded up 66270c3547eSMike Kravetz * as well as being converted to page offsets. 66370c3547eSMike Kravetz */ 66470c3547eSMike Kravetz start = offset >> hpage_shift; 66570c3547eSMike Kravetz end = (offset + len + hpage_size - 1) >> hpage_shift; 66670c3547eSMike Kravetz 6675955102cSAl Viro inode_lock(inode); 66870c3547eSMike Kravetz 66970c3547eSMike Kravetz /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 67070c3547eSMike Kravetz error = inode_newsize_ok(inode, offset + len); 67170c3547eSMike Kravetz if (error) 67270c3547eSMike Kravetz goto out; 67370c3547eSMike Kravetz 674ff62a342SMarc-André Lureau if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 675ff62a342SMarc-André Lureau error = -EPERM; 676ff62a342SMarc-André Lureau goto out; 677ff62a342SMarc-André Lureau } 678ff62a342SMarc-André Lureau 67970c3547eSMike Kravetz /* 68070c3547eSMike Kravetz * Initialize a pseudo vma as this is required by the huge page 68170c3547eSMike Kravetz * allocation routines. If NUMA is configured, use page index 68270c3547eSMike Kravetz * as input to create an allocation policy. 68370c3547eSMike Kravetz */ 6842c4541e2SKirill A. Shutemov vma_init(&pseudo_vma, mm); 68570c3547eSMike Kravetz pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 68670c3547eSMike Kravetz pseudo_vma.vm_file = file; 68770c3547eSMike Kravetz 68870c3547eSMike Kravetz for (index = start; index < end; index++) { 68970c3547eSMike Kravetz /* 69070c3547eSMike Kravetz * This is supposed to be the vaddr where the page is being 69170c3547eSMike Kravetz * faulted in, but we have no vaddr here. 69270c3547eSMike Kravetz */ 69370c3547eSMike Kravetz struct page *page; 69470c3547eSMike Kravetz unsigned long addr; 69570c3547eSMike Kravetz 69670c3547eSMike Kravetz cond_resched(); 69770c3547eSMike Kravetz 69870c3547eSMike Kravetz /* 69970c3547eSMike Kravetz * fallocate(2) manpage permits EINTR; we may have been 70070c3547eSMike Kravetz * interrupted because we are using up too much memory. 70170c3547eSMike Kravetz */ 70270c3547eSMike Kravetz if (signal_pending(current)) { 70370c3547eSMike Kravetz error = -EINTR; 70470c3547eSMike Kravetz break; 70570c3547eSMike Kravetz } 70670c3547eSMike Kravetz 70770c3547eSMike Kravetz /* Set numa allocation policy based on index */ 70870c3547eSMike Kravetz hugetlb_set_vma_policy(&pseudo_vma, inode, index); 70970c3547eSMike Kravetz 71070c3547eSMike Kravetz /* addr is the offset within the file (zero based) */ 71170c3547eSMike Kravetz addr = index * hpage_size; 71270c3547eSMike Kravetz 71387bf91d3SMike Kravetz /* 71487bf91d3SMike Kravetz * fault mutex taken here, protects against fault path 71587bf91d3SMike Kravetz * and hole punch. inode_lock previously taken protects 71687bf91d3SMike Kravetz * against truncation. 71787bf91d3SMike Kravetz */ 718188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, index); 71970c3547eSMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 72070c3547eSMike Kravetz 72170c3547eSMike Kravetz /* See if already present in mapping to avoid alloc/free */ 72270c3547eSMike Kravetz page = find_get_page(mapping, index); 72370c3547eSMike Kravetz if (page) { 72470c3547eSMike Kravetz put_page(page); 72570c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 72670c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 72770c3547eSMike Kravetz continue; 72870c3547eSMike Kravetz } 72970c3547eSMike Kravetz 73088ce3fefSMiaohe Lin /* 73188ce3fefSMiaohe Lin * Allocate page without setting the avoid_reserve argument. 73288ce3fefSMiaohe Lin * There certainly are no reserves associated with the 73388ce3fefSMiaohe Lin * pseudo_vma. However, there could be shared mappings with 73488ce3fefSMiaohe Lin * reserves for the file at the inode level. If we fallocate 73588ce3fefSMiaohe Lin * pages in these areas, we need to consume the reserves 73688ce3fefSMiaohe Lin * to keep reservation accounting consistent. 73788ce3fefSMiaohe Lin */ 73888ce3fefSMiaohe Lin page = alloc_huge_page(&pseudo_vma, addr, 0); 73970c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 74070c3547eSMike Kravetz if (IS_ERR(page)) { 74170c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 74270c3547eSMike Kravetz error = PTR_ERR(page); 74370c3547eSMike Kravetz goto out; 74470c3547eSMike Kravetz } 74570c3547eSMike Kravetz clear_huge_page(page, addr, pages_per_huge_page(h)); 74670c3547eSMike Kravetz __SetPageUptodate(page); 74770c3547eSMike Kravetz error = huge_add_to_page_cache(page, mapping, index); 74870c3547eSMike Kravetz if (unlikely(error)) { 749846be085SMike Kravetz restore_reserve_on_error(h, &pseudo_vma, addr, page); 75070c3547eSMike Kravetz put_page(page); 75170c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 75270c3547eSMike Kravetz goto out; 75370c3547eSMike Kravetz } 75470c3547eSMike Kravetz 75570c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 75670c3547eSMike Kravetz 7578f251a3dSMike Kravetz SetHPageMigratable(page); 75870c3547eSMike Kravetz /* 75970c3547eSMike Kravetz * unlock_page because locked by add_to_page_cache() 760585fc0d2SMuchun Song * put_page() due to reference from alloc_huge_page() 76170c3547eSMike Kravetz */ 76270c3547eSMike Kravetz unlock_page(page); 76372639e6dSNadav Amit put_page(page); 76470c3547eSMike Kravetz } 76570c3547eSMike Kravetz 76670c3547eSMike Kravetz if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 76770c3547eSMike Kravetz i_size_write(inode, offset + len); 768078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 76970c3547eSMike Kravetz out: 7705955102cSAl Viro inode_unlock(inode); 77170c3547eSMike Kravetz return error; 77270c3547eSMike Kravetz } 77370c3547eSMike Kravetz 774549c7297SChristian Brauner static int hugetlbfs_setattr(struct user_namespace *mnt_userns, 775549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr) 7761da177e4SLinus Torvalds { 7772b0143b5SDavid Howells struct inode *inode = d_inode(dentry); 778a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 7791da177e4SLinus Torvalds int error; 7801da177e4SLinus Torvalds unsigned int ia_valid = attr->ia_valid; 781ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 7821da177e4SLinus Torvalds 7832f221d6fSChristian Brauner error = setattr_prepare(&init_user_ns, dentry, attr); 7841da177e4SLinus Torvalds if (error) 7851025774cSChristoph Hellwig return error; 7861da177e4SLinus Torvalds 7871da177e4SLinus Torvalds if (ia_valid & ATTR_SIZE) { 788ff62a342SMarc-André Lureau loff_t oldsize = inode->i_size; 789ff62a342SMarc-André Lureau loff_t newsize = attr->ia_size; 790ff62a342SMarc-André Lureau 791ff62a342SMarc-André Lureau if (newsize & ~huge_page_mask(h)) 7921025774cSChristoph Hellwig return -EINVAL; 793398c0da7SMiaohe Lin /* protected by i_rwsem */ 794ff62a342SMarc-André Lureau if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 795ff62a342SMarc-André Lureau (newsize > oldsize && (info->seals & F_SEAL_GROW))) 796ff62a342SMarc-André Lureau return -EPERM; 797e5d319deSMiaohe Lin hugetlb_vmtruncate(inode, newsize); 7981da177e4SLinus Torvalds } 7991da177e4SLinus Torvalds 8002f221d6fSChristian Brauner setattr_copy(&init_user_ns, inode, attr); 8011025774cSChristoph Hellwig mark_inode_dirty(inode); 8021025774cSChristoph Hellwig return 0; 8031025774cSChristoph Hellwig } 8041025774cSChristoph Hellwig 8057d54fa64SAl Viro static struct inode *hugetlbfs_get_root(struct super_block *sb, 80632021982SDavid Howells struct hugetlbfs_fs_context *ctx) 8071da177e4SLinus Torvalds { 8081da177e4SLinus Torvalds struct inode *inode; 8091da177e4SLinus Torvalds 8101da177e4SLinus Torvalds inode = new_inode(sb); 8111da177e4SLinus Torvalds if (inode) { 81285fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 81332021982SDavid Howells inode->i_mode = S_IFDIR | ctx->mode; 81432021982SDavid Howells inode->i_uid = ctx->uid; 81532021982SDavid Howells inode->i_gid = ctx->gid; 816078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 8177d54fa64SAl Viro inode->i_op = &hugetlbfs_dir_inode_operations; 8187d54fa64SAl Viro inode->i_fop = &simple_dir_operations; 8197d54fa64SAl Viro /* directory inodes start off with i_nlink == 2 (for "." entry) */ 8207d54fa64SAl Viro inc_nlink(inode); 82165ed7601SAneesh Kumar K.V lockdep_annotate_inode_mutex_key(inode); 8227d54fa64SAl Viro } 8237d54fa64SAl Viro return inode; 8247d54fa64SAl Viro } 8257d54fa64SAl Viro 826b610ded7SMichal Hocko /* 827c8c06efaSDavidlohr Bueso * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never 828b610ded7SMichal Hocko * be taken from reclaim -- unlike regular filesystems. This needs an 82988f306b6SKirill A. Shutemov * annotation because huge_pmd_share() does an allocation under hugetlb's 830c8c06efaSDavidlohr Bueso * i_mmap_rwsem. 831b610ded7SMichal Hocko */ 832c8c06efaSDavidlohr Bueso static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; 833b610ded7SMichal Hocko 8347d54fa64SAl Viro static struct inode *hugetlbfs_get_inode(struct super_block *sb, 8357d54fa64SAl Viro struct inode *dir, 83618df2252SAl Viro umode_t mode, dev_t dev) 8377d54fa64SAl Viro { 8387d54fa64SAl Viro struct inode *inode; 83958b6e5e8SMike Kravetz struct resv_map *resv_map = NULL; 8409119a41eSJoonsoo Kim 84158b6e5e8SMike Kravetz /* 84258b6e5e8SMike Kravetz * Reserve maps are only needed for inodes that can have associated 84358b6e5e8SMike Kravetz * page allocations. 84458b6e5e8SMike Kravetz */ 84558b6e5e8SMike Kravetz if (S_ISREG(mode) || S_ISLNK(mode)) { 8469119a41eSJoonsoo Kim resv_map = resv_map_alloc(); 8479119a41eSJoonsoo Kim if (!resv_map) 8489119a41eSJoonsoo Kim return NULL; 84958b6e5e8SMike Kravetz } 8507d54fa64SAl Viro 8517d54fa64SAl Viro inode = new_inode(sb); 8527d54fa64SAl Viro if (inode) { 853ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 854ff62a342SMarc-André Lureau 8557d54fa64SAl Viro inode->i_ino = get_next_ino(); 85621cb47beSChristian Brauner inode_init_owner(&init_user_ns, inode, dir, mode); 857c8c06efaSDavidlohr Bueso lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 858c8c06efaSDavidlohr Bueso &hugetlbfs_i_mmap_rwsem_key); 8591da177e4SLinus Torvalds inode->i_mapping->a_ops = &hugetlbfs_aops; 860078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 8619119a41eSJoonsoo Kim inode->i_mapping->private_data = resv_map; 862ff62a342SMarc-André Lureau info->seals = F_SEAL_SEAL; 8631da177e4SLinus Torvalds switch (mode & S_IFMT) { 8641da177e4SLinus Torvalds default: 8651da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 8661da177e4SLinus Torvalds break; 8671da177e4SLinus Torvalds case S_IFREG: 8681da177e4SLinus Torvalds inode->i_op = &hugetlbfs_inode_operations; 8691da177e4SLinus Torvalds inode->i_fop = &hugetlbfs_file_operations; 8701da177e4SLinus Torvalds break; 8711da177e4SLinus Torvalds case S_IFDIR: 8721da177e4SLinus Torvalds inode->i_op = &hugetlbfs_dir_inode_operations; 8731da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 8741da177e4SLinus Torvalds 8751da177e4SLinus Torvalds /* directory inodes start off with i_nlink == 2 (for "." entry) */ 876d8c76e6fSDave Hansen inc_nlink(inode); 8771da177e4SLinus Torvalds break; 8781da177e4SLinus Torvalds case S_IFLNK: 8791da177e4SLinus Torvalds inode->i_op = &page_symlink_inode_operations; 88021fc61c7SAl Viro inode_nohighmem(inode); 8811da177e4SLinus Torvalds break; 8821da177e4SLinus Torvalds } 883e096d0c7SJosh Boyer lockdep_annotate_inode_mutex_key(inode); 88458b6e5e8SMike Kravetz } else { 88558b6e5e8SMike Kravetz if (resv_map) 8869119a41eSJoonsoo Kim kref_put(&resv_map->refs, resv_map_release); 88758b6e5e8SMike Kravetz } 8889119a41eSJoonsoo Kim 8891da177e4SLinus Torvalds return inode; 8901da177e4SLinus Torvalds } 8911da177e4SLinus Torvalds 8921da177e4SLinus Torvalds /* 8931da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 8941da177e4SLinus Torvalds */ 8951ab5b82fSPiotr Sarna static int do_hugetlbfs_mknod(struct inode *dir, 8961ab5b82fSPiotr Sarna struct dentry *dentry, 8971ab5b82fSPiotr Sarna umode_t mode, 8981ab5b82fSPiotr Sarna dev_t dev, 8991ab5b82fSPiotr Sarna bool tmpfile) 9001da177e4SLinus Torvalds { 9011da177e4SLinus Torvalds struct inode *inode; 9021da177e4SLinus Torvalds int error = -ENOSPC; 9031da177e4SLinus Torvalds 9047d54fa64SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); 9051da177e4SLinus Torvalds if (inode) { 906078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 9071ab5b82fSPiotr Sarna if (tmpfile) { 9081ab5b82fSPiotr Sarna d_tmpfile(dentry, inode); 9091ab5b82fSPiotr Sarna } else { 9101da177e4SLinus Torvalds d_instantiate(dentry, inode); 9111da177e4SLinus Torvalds dget(dentry);/* Extra count - pin the dentry in core */ 9121ab5b82fSPiotr Sarna } 9131da177e4SLinus Torvalds error = 0; 9141da177e4SLinus Torvalds } 9151da177e4SLinus Torvalds return error; 9161da177e4SLinus Torvalds } 9171da177e4SLinus Torvalds 918549c7297SChristian Brauner static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, 9191ab5b82fSPiotr Sarna struct dentry *dentry, umode_t mode, dev_t dev) 9201ab5b82fSPiotr Sarna { 9211ab5b82fSPiotr Sarna return do_hugetlbfs_mknod(dir, dentry, mode, dev, false); 9221ab5b82fSPiotr Sarna } 9231ab5b82fSPiotr Sarna 924549c7297SChristian Brauner static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 925549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 9261da177e4SLinus Torvalds { 927549c7297SChristian Brauner int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry, 928549c7297SChristian Brauner mode | S_IFDIR, 0); 9291da177e4SLinus Torvalds if (!retval) 930d8c76e6fSDave Hansen inc_nlink(dir); 9311da177e4SLinus Torvalds return retval; 9321da177e4SLinus Torvalds } 9331da177e4SLinus Torvalds 934549c7297SChristian Brauner static int hugetlbfs_create(struct user_namespace *mnt_userns, 935549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 936549c7297SChristian Brauner umode_t mode, bool excl) 9371da177e4SLinus Torvalds { 938549c7297SChristian Brauner return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); 9391da177e4SLinus Torvalds } 9401da177e4SLinus Torvalds 941549c7297SChristian Brauner static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns, 942549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 943549c7297SChristian Brauner umode_t mode) 9441ab5b82fSPiotr Sarna { 9451ab5b82fSPiotr Sarna return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true); 9461ab5b82fSPiotr Sarna } 9471ab5b82fSPiotr Sarna 948549c7297SChristian Brauner static int hugetlbfs_symlink(struct user_namespace *mnt_userns, 949549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 950549c7297SChristian Brauner const char *symname) 9511da177e4SLinus Torvalds { 9521da177e4SLinus Torvalds struct inode *inode; 9531da177e4SLinus Torvalds int error = -ENOSPC; 9541da177e4SLinus Torvalds 9557d54fa64SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); 9561da177e4SLinus Torvalds if (inode) { 9571da177e4SLinus Torvalds int l = strlen(symname)+1; 9581da177e4SLinus Torvalds error = page_symlink(inode, symname, l); 9591da177e4SLinus Torvalds if (!error) { 9601da177e4SLinus Torvalds d_instantiate(dentry, inode); 9611da177e4SLinus Torvalds dget(dentry); 9621da177e4SLinus Torvalds } else 9631da177e4SLinus Torvalds iput(inode); 9641da177e4SLinus Torvalds } 965078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 9661da177e4SLinus Torvalds 9671da177e4SLinus Torvalds return error; 9681da177e4SLinus Torvalds } 9691da177e4SLinus Torvalds 970290408d4SNaoya Horiguchi static int hugetlbfs_migrate_page(struct address_space *mapping, 971b969c4abSMel Gorman struct page *newpage, struct page *page, 972a6bc32b8SMel Gorman enum migrate_mode mode) 973290408d4SNaoya Horiguchi { 974290408d4SNaoya Horiguchi int rc; 975290408d4SNaoya Horiguchi 976290408d4SNaoya Horiguchi rc = migrate_huge_page_move_mapping(mapping, newpage, page); 97778bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 978290408d4SNaoya Horiguchi return rc; 979cb6acd01SMike Kravetz 980d6995da3SMike Kravetz if (hugetlb_page_subpool(page)) { 981d6995da3SMike Kravetz hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page)); 982d6995da3SMike Kravetz hugetlb_set_page_subpool(page, NULL); 983cb6acd01SMike Kravetz } 984cb6acd01SMike Kravetz 9852916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 986290408d4SNaoya Horiguchi migrate_page_copy(newpage, page); 9872916ecc0SJérôme Glisse else 9882916ecc0SJérôme Glisse migrate_page_states(newpage, page); 989290408d4SNaoya Horiguchi 99078bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 991290408d4SNaoya Horiguchi } 992290408d4SNaoya Horiguchi 99378bb9203SNaoya Horiguchi static int hugetlbfs_error_remove_page(struct address_space *mapping, 99478bb9203SNaoya Horiguchi struct page *page) 99578bb9203SNaoya Horiguchi { 99678bb9203SNaoya Horiguchi struct inode *inode = mapping->host; 997ab615a5bSMike Kravetz pgoff_t index = page->index; 99878bb9203SNaoya Horiguchi 99978bb9203SNaoya Horiguchi remove_huge_page(page); 1000ab615a5bSMike Kravetz if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) 100178bb9203SNaoya Horiguchi hugetlb_fix_reserve_counts(inode); 1002ab615a5bSMike Kravetz 100378bb9203SNaoya Horiguchi return 0; 100478bb9203SNaoya Horiguchi } 100578bb9203SNaoya Horiguchi 10064a25220dSDavid Howells /* 10074a25220dSDavid Howells * Display the mount options in /proc/mounts. 10084a25220dSDavid Howells */ 10094a25220dSDavid Howells static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) 10104a25220dSDavid Howells { 10114a25220dSDavid Howells struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); 10124a25220dSDavid Howells struct hugepage_subpool *spool = sbinfo->spool; 10134a25220dSDavid Howells unsigned long hpage_size = huge_page_size(sbinfo->hstate); 10144a25220dSDavid Howells unsigned hpage_shift = huge_page_shift(sbinfo->hstate); 10154a25220dSDavid Howells char mod; 10164a25220dSDavid Howells 10174a25220dSDavid Howells if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 10184a25220dSDavid Howells seq_printf(m, ",uid=%u", 10194a25220dSDavid Howells from_kuid_munged(&init_user_ns, sbinfo->uid)); 10204a25220dSDavid Howells if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 10214a25220dSDavid Howells seq_printf(m, ",gid=%u", 10224a25220dSDavid Howells from_kgid_munged(&init_user_ns, sbinfo->gid)); 10234a25220dSDavid Howells if (sbinfo->mode != 0755) 10244a25220dSDavid Howells seq_printf(m, ",mode=%o", sbinfo->mode); 10254a25220dSDavid Howells if (sbinfo->max_inodes != -1) 10264a25220dSDavid Howells seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); 10274a25220dSDavid Howells 10284a25220dSDavid Howells hpage_size /= 1024; 10294a25220dSDavid Howells mod = 'K'; 10304a25220dSDavid Howells if (hpage_size >= 1024) { 10314a25220dSDavid Howells hpage_size /= 1024; 10324a25220dSDavid Howells mod = 'M'; 10334a25220dSDavid Howells } 10344a25220dSDavid Howells seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); 10354a25220dSDavid Howells if (spool) { 10364a25220dSDavid Howells if (spool->max_hpages != -1) 10374a25220dSDavid Howells seq_printf(m, ",size=%llu", 10384a25220dSDavid Howells (unsigned long long)spool->max_hpages << hpage_shift); 10394a25220dSDavid Howells if (spool->min_hpages != -1) 10404a25220dSDavid Howells seq_printf(m, ",min_size=%llu", 10414a25220dSDavid Howells (unsigned long long)spool->min_hpages << hpage_shift); 10424a25220dSDavid Howells } 10434a25220dSDavid Howells return 0; 10444a25220dSDavid Howells } 10454a25220dSDavid Howells 1046726c3342SDavid Howells static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 10471da177e4SLinus Torvalds { 1048726c3342SDavid Howells struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 10492b0143b5SDavid Howells struct hstate *h = hstate_inode(d_inode(dentry)); 10501da177e4SLinus Torvalds 10511da177e4SLinus Torvalds buf->f_type = HUGETLBFS_MAGIC; 1052a5516438SAndi Kleen buf->f_bsize = huge_page_size(h); 10531da177e4SLinus Torvalds if (sbinfo) { 10541da177e4SLinus Torvalds spin_lock(&sbinfo->stat_lock); 105574a8a65cSDavid Gibson /* If no limits set, just report 0 for max/free/used 105674a8a65cSDavid Gibson * blocks, like simple_statfs() */ 105790481622SDavid Gibson if (sbinfo->spool) { 105890481622SDavid Gibson long free_pages; 105990481622SDavid Gibson 106090481622SDavid Gibson spin_lock(&sbinfo->spool->lock); 106190481622SDavid Gibson buf->f_blocks = sbinfo->spool->max_hpages; 106290481622SDavid Gibson free_pages = sbinfo->spool->max_hpages 106390481622SDavid Gibson - sbinfo->spool->used_hpages; 106490481622SDavid Gibson buf->f_bavail = buf->f_bfree = free_pages; 106590481622SDavid Gibson spin_unlock(&sbinfo->spool->lock); 10661da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 10671da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 106874a8a65cSDavid Gibson } 10691da177e4SLinus Torvalds spin_unlock(&sbinfo->stat_lock); 10701da177e4SLinus Torvalds } 10711da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 10721da177e4SLinus Torvalds return 0; 10731da177e4SLinus Torvalds } 10741da177e4SLinus Torvalds 10751da177e4SLinus Torvalds static void hugetlbfs_put_super(struct super_block *sb) 10761da177e4SLinus Torvalds { 10771da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); 10781da177e4SLinus Torvalds 10791da177e4SLinus Torvalds if (sbi) { 10801da177e4SLinus Torvalds sb->s_fs_info = NULL; 108190481622SDavid Gibson 108290481622SDavid Gibson if (sbi->spool) 108390481622SDavid Gibson hugepage_put_subpool(sbi->spool); 108490481622SDavid Gibson 10851da177e4SLinus Torvalds kfree(sbi); 10861da177e4SLinus Torvalds } 10871da177e4SLinus Torvalds } 10881da177e4SLinus Torvalds 108996527980SChristoph Hellwig static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) 109096527980SChristoph Hellwig { 109196527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 109296527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 109396527980SChristoph Hellwig if (unlikely(!sbinfo->free_inodes)) { 109496527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 109596527980SChristoph Hellwig return 0; 109696527980SChristoph Hellwig } 109796527980SChristoph Hellwig sbinfo->free_inodes--; 109896527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 109996527980SChristoph Hellwig } 110096527980SChristoph Hellwig 110196527980SChristoph Hellwig return 1; 110296527980SChristoph Hellwig } 110396527980SChristoph Hellwig 110496527980SChristoph Hellwig static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) 110596527980SChristoph Hellwig { 110696527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 110796527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 110896527980SChristoph Hellwig sbinfo->free_inodes++; 110996527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 111096527980SChristoph Hellwig } 111196527980SChristoph Hellwig } 111296527980SChristoph Hellwig 111396527980SChristoph Hellwig 1114e18b890bSChristoph Lameter static struct kmem_cache *hugetlbfs_inode_cachep; 11151da177e4SLinus Torvalds 11161da177e4SLinus Torvalds static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 11171da177e4SLinus Torvalds { 111896527980SChristoph Hellwig struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); 11191da177e4SLinus Torvalds struct hugetlbfs_inode_info *p; 11201da177e4SLinus Torvalds 112196527980SChristoph Hellwig if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 11221da177e4SLinus Torvalds return NULL; 1123fd60b288SMuchun Song p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); 112496527980SChristoph Hellwig if (unlikely(!p)) { 112596527980SChristoph Hellwig hugetlbfs_inc_free_inodes(sbinfo); 112696527980SChristoph Hellwig return NULL; 11271da177e4SLinus Torvalds } 11284742a35dSMike Kravetz 11294742a35dSMike Kravetz /* 11304742a35dSMike Kravetz * Any time after allocation, hugetlbfs_destroy_inode can be called 11314742a35dSMike Kravetz * for the inode. mpol_free_shared_policy is unconditionally called 11324742a35dSMike Kravetz * as part of hugetlbfs_destroy_inode. So, initialize policy here 11334742a35dSMike Kravetz * in case of a quick call to destroy. 11344742a35dSMike Kravetz * 11354742a35dSMike Kravetz * Note that the policy is initialized even if we are creating a 11364742a35dSMike Kravetz * private inode. This simplifies hugetlbfs_destroy_inode. 11374742a35dSMike Kravetz */ 11384742a35dSMike Kravetz mpol_shared_policy_init(&p->policy, NULL); 11394742a35dSMike Kravetz 114096527980SChristoph Hellwig return &p->vfs_inode; 11411da177e4SLinus Torvalds } 11421da177e4SLinus Torvalds 1143b62de322SAl Viro static void hugetlbfs_free_inode(struct inode *inode) 1144fa0d7e3dSNick Piggin { 1145fa0d7e3dSNick Piggin kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 1146fa0d7e3dSNick Piggin } 1147fa0d7e3dSNick Piggin 11481da177e4SLinus Torvalds static void hugetlbfs_destroy_inode(struct inode *inode) 11491da177e4SLinus Torvalds { 115096527980SChristoph Hellwig hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 11511da177e4SLinus Torvalds mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 11521da177e4SLinus Torvalds } 11531da177e4SLinus Torvalds 1154f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops = { 1155800d15a5SNick Piggin .write_begin = hugetlbfs_write_begin, 1156800d15a5SNick Piggin .write_end = hugetlbfs_write_end, 115746de8b97SMatthew Wilcox (Oracle) .dirty_folio = noop_dirty_folio, 1158290408d4SNaoya Horiguchi .migratepage = hugetlbfs_migrate_page, 115978bb9203SNaoya Horiguchi .error_remove_page = hugetlbfs_error_remove_page, 11601da177e4SLinus Torvalds }; 11611da177e4SLinus Torvalds 116296527980SChristoph Hellwig 116351cc5068SAlexey Dobriyan static void init_once(void *foo) 116496527980SChristoph Hellwig { 116596527980SChristoph Hellwig struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 116696527980SChristoph Hellwig 116796527980SChristoph Hellwig inode_init_once(&ei->vfs_inode); 116896527980SChristoph Hellwig } 116996527980SChristoph Hellwig 11704b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations = { 117134d0640eSAl Viro .read_iter = hugetlbfs_read_iter, 11721da177e4SLinus Torvalds .mmap = hugetlbfs_file_mmap, 11731b061d92SChristoph Hellwig .fsync = noop_fsync, 11741da177e4SLinus Torvalds .get_unmapped_area = hugetlb_get_unmapped_area, 11756038f373SArnd Bergmann .llseek = default_llseek, 117670c3547eSMike Kravetz .fallocate = hugetlbfs_fallocate, 11771da177e4SLinus Torvalds }; 11781da177e4SLinus Torvalds 117992e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations = { 11801da177e4SLinus Torvalds .create = hugetlbfs_create, 11811da177e4SLinus Torvalds .lookup = simple_lookup, 11821da177e4SLinus Torvalds .link = simple_link, 11831da177e4SLinus Torvalds .unlink = simple_unlink, 11841da177e4SLinus Torvalds .symlink = hugetlbfs_symlink, 11851da177e4SLinus Torvalds .mkdir = hugetlbfs_mkdir, 11861da177e4SLinus Torvalds .rmdir = simple_rmdir, 11871da177e4SLinus Torvalds .mknod = hugetlbfs_mknod, 11881da177e4SLinus Torvalds .rename = simple_rename, 11891da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 11901ab5b82fSPiotr Sarna .tmpfile = hugetlbfs_tmpfile, 11911da177e4SLinus Torvalds }; 11921da177e4SLinus Torvalds 119392e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations = { 11941da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 11951da177e4SLinus Torvalds }; 11961da177e4SLinus Torvalds 1197ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops = { 11981da177e4SLinus Torvalds .alloc_inode = hugetlbfs_alloc_inode, 1199b62de322SAl Viro .free_inode = hugetlbfs_free_inode, 12001da177e4SLinus Torvalds .destroy_inode = hugetlbfs_destroy_inode, 12012bbbda30SAl Viro .evict_inode = hugetlbfs_evict_inode, 12021da177e4SLinus Torvalds .statfs = hugetlbfs_statfs, 12031da177e4SLinus Torvalds .put_super = hugetlbfs_put_super, 12044a25220dSDavid Howells .show_options = hugetlbfs_show_options, 12051da177e4SLinus Torvalds }; 12061da177e4SLinus Torvalds 12077ca02d0aSMike Kravetz /* 12087ca02d0aSMike Kravetz * Convert size option passed from command line to number of huge pages 12097ca02d0aSMike Kravetz * in the pool specified by hstate. Size option could be in bytes 12107ca02d0aSMike Kravetz * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). 12117ca02d0aSMike Kravetz */ 12124a25220dSDavid Howells static long 12137ca02d0aSMike Kravetz hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, 12144a25220dSDavid Howells enum hugetlbfs_size_type val_type) 12157ca02d0aSMike Kravetz { 12167ca02d0aSMike Kravetz if (val_type == NO_SIZE) 12177ca02d0aSMike Kravetz return -1; 12187ca02d0aSMike Kravetz 12197ca02d0aSMike Kravetz if (val_type == SIZE_PERCENT) { 12207ca02d0aSMike Kravetz size_opt <<= huge_page_shift(h); 12217ca02d0aSMike Kravetz size_opt *= h->max_huge_pages; 12227ca02d0aSMike Kravetz do_div(size_opt, 100); 12237ca02d0aSMike Kravetz } 12247ca02d0aSMike Kravetz 12257ca02d0aSMike Kravetz size_opt >>= huge_page_shift(h); 12267ca02d0aSMike Kravetz return size_opt; 12277ca02d0aSMike Kravetz } 12287ca02d0aSMike Kravetz 122932021982SDavid Howells /* 123032021982SDavid Howells * Parse one mount parameter. 123132021982SDavid Howells */ 123232021982SDavid Howells static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) 12331da177e4SLinus Torvalds { 123432021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 123532021982SDavid Howells struct fs_parse_result result; 123632021982SDavid Howells char *rest; 123732021982SDavid Howells unsigned long ps; 123832021982SDavid Howells int opt; 12391da177e4SLinus Torvalds 1240d7167b14SAl Viro opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); 124132021982SDavid Howells if (opt < 0) 124232021982SDavid Howells return opt; 124332021982SDavid Howells 124432021982SDavid Howells switch (opt) { 124532021982SDavid Howells case Opt_uid: 124632021982SDavid Howells ctx->uid = make_kuid(current_user_ns(), result.uint_32); 124732021982SDavid Howells if (!uid_valid(ctx->uid)) 124832021982SDavid Howells goto bad_val; 12491da177e4SLinus Torvalds return 0; 12501da177e4SLinus Torvalds 1251e73a75faSRandy Dunlap case Opt_gid: 125232021982SDavid Howells ctx->gid = make_kgid(current_user_ns(), result.uint_32); 125332021982SDavid Howells if (!gid_valid(ctx->gid)) 1254e73a75faSRandy Dunlap goto bad_val; 125532021982SDavid Howells return 0; 1256e73a75faSRandy Dunlap 1257e73a75faSRandy Dunlap case Opt_mode: 125832021982SDavid Howells ctx->mode = result.uint_32 & 01777U; 125932021982SDavid Howells return 0; 1260e73a75faSRandy Dunlap 126132021982SDavid Howells case Opt_size: 1262e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 126332021982SDavid Howells if (!isdigit(param->string[0])) 1264e73a75faSRandy Dunlap goto bad_val; 126532021982SDavid Howells ctx->max_size_opt = memparse(param->string, &rest); 126632021982SDavid Howells ctx->max_val_type = SIZE_STD; 1267a137e1ccSAndi Kleen if (*rest == '%') 126832021982SDavid Howells ctx->max_val_type = SIZE_PERCENT; 126932021982SDavid Howells return 0; 12701da177e4SLinus Torvalds 1271e73a75faSRandy Dunlap case Opt_nr_inodes: 1272e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 127332021982SDavid Howells if (!isdigit(param->string[0])) 1274e73a75faSRandy Dunlap goto bad_val; 127532021982SDavid Howells ctx->nr_inodes = memparse(param->string, &rest); 127632021982SDavid Howells return 0; 1277e73a75faSRandy Dunlap 127832021982SDavid Howells case Opt_pagesize: 127932021982SDavid Howells ps = memparse(param->string, &rest); 128032021982SDavid Howells ctx->hstate = size_to_hstate(ps); 128132021982SDavid Howells if (!ctx->hstate) { 128232021982SDavid Howells pr_err("Unsupported page size %lu MB\n", ps >> 20); 1283a137e1ccSAndi Kleen return -EINVAL; 1284a137e1ccSAndi Kleen } 128532021982SDavid Howells return 0; 1286a137e1ccSAndi Kleen 128732021982SDavid Howells case Opt_min_size: 12887ca02d0aSMike Kravetz /* memparse() will accept a K/M/G without a digit */ 128932021982SDavid Howells if (!isdigit(param->string[0])) 12907ca02d0aSMike Kravetz goto bad_val; 129132021982SDavid Howells ctx->min_size_opt = memparse(param->string, &rest); 129232021982SDavid Howells ctx->min_val_type = SIZE_STD; 12937ca02d0aSMike Kravetz if (*rest == '%') 129432021982SDavid Howells ctx->min_val_type = SIZE_PERCENT; 129532021982SDavid Howells return 0; 12967ca02d0aSMike Kravetz 1297e73a75faSRandy Dunlap default: 1298b4c07bceSLee Schermerhorn return -EINVAL; 1299e73a75faSRandy Dunlap } 130032021982SDavid Howells 130132021982SDavid Howells bad_val: 1302b5db30cfSAl Viro return invalfc(fc, "Bad value '%s' for mount option '%s'\n", 130332021982SDavid Howells param->string, param->key); 13041da177e4SLinus Torvalds } 1305a137e1ccSAndi Kleen 13067ca02d0aSMike Kravetz /* 130732021982SDavid Howells * Validate the parsed options. 130832021982SDavid Howells */ 130932021982SDavid Howells static int hugetlbfs_validate(struct fs_context *fc) 131032021982SDavid Howells { 131132021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 131232021982SDavid Howells 131332021982SDavid Howells /* 13147ca02d0aSMike Kravetz * Use huge page pool size (in hstate) to convert the size 13157ca02d0aSMike Kravetz * options to number of huge pages. If NO_SIZE, -1 is returned. 13167ca02d0aSMike Kravetz */ 131732021982SDavid Howells ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 131832021982SDavid Howells ctx->max_size_opt, 131932021982SDavid Howells ctx->max_val_type); 132032021982SDavid Howells ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 132132021982SDavid Howells ctx->min_size_opt, 132232021982SDavid Howells ctx->min_val_type); 13237ca02d0aSMike Kravetz 13247ca02d0aSMike Kravetz /* 13257ca02d0aSMike Kravetz * If max_size was specified, then min_size must be smaller 13267ca02d0aSMike Kravetz */ 132732021982SDavid Howells if (ctx->max_val_type > NO_SIZE && 132832021982SDavid Howells ctx->min_hpages > ctx->max_hpages) { 132932021982SDavid Howells pr_err("Minimum size can not be greater than maximum size\n"); 13307ca02d0aSMike Kravetz return -EINVAL; 1331a137e1ccSAndi Kleen } 1332a137e1ccSAndi Kleen 13331da177e4SLinus Torvalds return 0; 13341da177e4SLinus Torvalds } 13351da177e4SLinus Torvalds 13361da177e4SLinus Torvalds static int 133732021982SDavid Howells hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) 13381da177e4SLinus Torvalds { 133932021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 13401da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbinfo; 13411da177e4SLinus Torvalds 13421da177e4SLinus Torvalds sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); 13431da177e4SLinus Torvalds if (!sbinfo) 13441da177e4SLinus Torvalds return -ENOMEM; 13451da177e4SLinus Torvalds sb->s_fs_info = sbinfo; 13461da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 134732021982SDavid Howells sbinfo->hstate = ctx->hstate; 134832021982SDavid Howells sbinfo->max_inodes = ctx->nr_inodes; 134932021982SDavid Howells sbinfo->free_inodes = ctx->nr_inodes; 135090481622SDavid Gibson sbinfo->spool = NULL; 135132021982SDavid Howells sbinfo->uid = ctx->uid; 135232021982SDavid Howells sbinfo->gid = ctx->gid; 135332021982SDavid Howells sbinfo->mode = ctx->mode; 13544a25220dSDavid Howells 13557ca02d0aSMike Kravetz /* 13567ca02d0aSMike Kravetz * Allocate and initialize subpool if maximum or minimum size is 13571935ebd3SMiaohe Lin * specified. Any needed reservations (for minimum size) are taken 13587ca02d0aSMike Kravetz * taken when the subpool is created. 13597ca02d0aSMike Kravetz */ 136032021982SDavid Howells if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { 136132021982SDavid Howells sbinfo->spool = hugepage_new_subpool(ctx->hstate, 136232021982SDavid Howells ctx->max_hpages, 136332021982SDavid Howells ctx->min_hpages); 136490481622SDavid Gibson if (!sbinfo->spool) 136590481622SDavid Gibson goto out_free; 136690481622SDavid Gibson } 13671da177e4SLinus Torvalds sb->s_maxbytes = MAX_LFS_FILESIZE; 136832021982SDavid Howells sb->s_blocksize = huge_page_size(ctx->hstate); 136932021982SDavid Howells sb->s_blocksize_bits = huge_page_shift(ctx->hstate); 13701da177e4SLinus Torvalds sb->s_magic = HUGETLBFS_MAGIC; 13711da177e4SLinus Torvalds sb->s_op = &hugetlbfs_ops; 13721da177e4SLinus Torvalds sb->s_time_gran = 1; 137315568299SMike Kravetz 137415568299SMike Kravetz /* 137515568299SMike Kravetz * Due to the special and limited functionality of hugetlbfs, it does 137615568299SMike Kravetz * not work well as a stacking filesystem. 137715568299SMike Kravetz */ 137815568299SMike Kravetz sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; 137932021982SDavid Howells sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); 138048fde701SAl Viro if (!sb->s_root) 13811da177e4SLinus Torvalds goto out_free; 13821da177e4SLinus Torvalds return 0; 13831da177e4SLinus Torvalds out_free: 138490481622SDavid Gibson kfree(sbinfo->spool); 13851da177e4SLinus Torvalds kfree(sbinfo); 13861da177e4SLinus Torvalds return -ENOMEM; 13871da177e4SLinus Torvalds } 13881da177e4SLinus Torvalds 138932021982SDavid Howells static int hugetlbfs_get_tree(struct fs_context *fc) 13901da177e4SLinus Torvalds { 139132021982SDavid Howells int err = hugetlbfs_validate(fc); 139232021982SDavid Howells if (err) 139332021982SDavid Howells return err; 13942ac295d4SAl Viro return get_tree_nodev(fc, hugetlbfs_fill_super); 139532021982SDavid Howells } 139632021982SDavid Howells 139732021982SDavid Howells static void hugetlbfs_fs_context_free(struct fs_context *fc) 139832021982SDavid Howells { 139932021982SDavid Howells kfree(fc->fs_private); 140032021982SDavid Howells } 140132021982SDavid Howells 140232021982SDavid Howells static const struct fs_context_operations hugetlbfs_fs_context_ops = { 140332021982SDavid Howells .free = hugetlbfs_fs_context_free, 140432021982SDavid Howells .parse_param = hugetlbfs_parse_param, 140532021982SDavid Howells .get_tree = hugetlbfs_get_tree, 140632021982SDavid Howells }; 140732021982SDavid Howells 140832021982SDavid Howells static int hugetlbfs_init_fs_context(struct fs_context *fc) 140932021982SDavid Howells { 141032021982SDavid Howells struct hugetlbfs_fs_context *ctx; 141132021982SDavid Howells 141232021982SDavid Howells ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); 141332021982SDavid Howells if (!ctx) 141432021982SDavid Howells return -ENOMEM; 141532021982SDavid Howells 141632021982SDavid Howells ctx->max_hpages = -1; /* No limit on size by default */ 141732021982SDavid Howells ctx->nr_inodes = -1; /* No limit on number of inodes by default */ 141832021982SDavid Howells ctx->uid = current_fsuid(); 141932021982SDavid Howells ctx->gid = current_fsgid(); 142032021982SDavid Howells ctx->mode = 0755; 142132021982SDavid Howells ctx->hstate = &default_hstate; 142232021982SDavid Howells ctx->min_hpages = -1; /* No default minimum size */ 142332021982SDavid Howells ctx->max_val_type = NO_SIZE; 142432021982SDavid Howells ctx->min_val_type = NO_SIZE; 142532021982SDavid Howells fc->fs_private = ctx; 142632021982SDavid Howells fc->ops = &hugetlbfs_fs_context_ops; 142732021982SDavid Howells return 0; 14281da177e4SLinus Torvalds } 14291da177e4SLinus Torvalds 14301da177e4SLinus Torvalds static struct file_system_type hugetlbfs_fs_type = { 14311da177e4SLinus Torvalds .name = "hugetlbfs", 143232021982SDavid Howells .init_fs_context = hugetlbfs_init_fs_context, 1433d7167b14SAl Viro .parameters = hugetlb_fs_parameters, 14341da177e4SLinus Torvalds .kill_sb = kill_litter_super, 14351da177e4SLinus Torvalds }; 14361da177e4SLinus Torvalds 143742d7395fSAndi Kleen static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; 14381da177e4SLinus Torvalds 1439ef1ff6b8SFrom: Mel Gorman static int can_do_hugetlb_shm(void) 14401da177e4SLinus Torvalds { 1441a0eb3a05SEric W. Biederman kgid_t shm_group; 1442a0eb3a05SEric W. Biederman shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); 1443a0eb3a05SEric W. Biederman return capable(CAP_IPC_LOCK) || in_group_p(shm_group); 14441da177e4SLinus Torvalds } 14451da177e4SLinus Torvalds 144642d7395fSAndi Kleen static int get_hstate_idx(int page_size_log) 144742d7395fSAndi Kleen { 1448af73e4d9SNaoya Horiguchi struct hstate *h = hstate_sizelog(page_size_log); 144942d7395fSAndi Kleen 145042d7395fSAndi Kleen if (!h) 145142d7395fSAndi Kleen return -1; 145204adbc3fSMiaohe Lin return hstate_index(h); 145342d7395fSAndi Kleen } 145442d7395fSAndi Kleen 1455af73e4d9SNaoya Horiguchi /* 1456af73e4d9SNaoya Horiguchi * Note that size should be aligned to proper hugepage size in caller side, 1457af73e4d9SNaoya Horiguchi * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. 1458af73e4d9SNaoya Horiguchi */ 1459af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size, 146083c1fd76Szhangyiru vm_flags_t acctflag, int creat_flags, 146183c1fd76Szhangyiru int page_size_log) 14621da177e4SLinus Torvalds { 14631da177e4SLinus Torvalds struct inode *inode; 1464e68375c8SAl Viro struct vfsmount *mnt; 146542d7395fSAndi Kleen int hstate_idx; 1466e68375c8SAl Viro struct file *file; 146742d7395fSAndi Kleen 146842d7395fSAndi Kleen hstate_idx = get_hstate_idx(page_size_log); 146942d7395fSAndi Kleen if (hstate_idx < 0) 147042d7395fSAndi Kleen return ERR_PTR(-ENODEV); 14711da177e4SLinus Torvalds 1472e68375c8SAl Viro mnt = hugetlbfs_vfsmount[hstate_idx]; 1473e68375c8SAl Viro if (!mnt) 14745bc98594SAkinobu Mita return ERR_PTR(-ENOENT); 14755bc98594SAkinobu Mita 1476ef1ff6b8SFrom: Mel Gorman if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 147783c1fd76Szhangyiru struct ucounts *ucounts = current_ucounts(); 147883c1fd76Szhangyiru 147983c1fd76Szhangyiru if (user_shm_lock(size, ucounts)) { 148083c1fd76Szhangyiru pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", 148121a3c273SDavid Rientjes current->comm, current->pid); 148283c1fd76Szhangyiru user_shm_unlock(size, ucounts); 14832584e517SRavikiran G Thirumalai } 148483c1fd76Szhangyiru return ERR_PTR(-EPERM); 1485353d5c30SHugh Dickins } 14861da177e4SLinus Torvalds 148739b65252SAnatol Pomozov file = ERR_PTR(-ENOSPC); 1488e68375c8SAl Viro inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); 14891da177e4SLinus Torvalds if (!inode) 1490e68375c8SAl Viro goto out; 1491e1832f29SStephen Smalley if (creat_flags == HUGETLB_SHMFS_INODE) 1492e1832f29SStephen Smalley inode->i_flags |= S_PRIVATE; 14931da177e4SLinus Torvalds 14941da177e4SLinus Torvalds inode->i_size = size; 14956d6b77f1SMiklos Szeredi clear_nlink(inode); 1496ce8d2cdfSDave Hansen 149733b8f84aSMike Kravetz if (!hugetlb_reserve_pages(inode, 0, 1498e68375c8SAl Viro size >> huge_page_shift(hstate_inode(inode)), NULL, 1499e68375c8SAl Viro acctflag)) 1500e68375c8SAl Viro file = ERR_PTR(-ENOMEM); 1501e68375c8SAl Viro else 1502e68375c8SAl Viro file = alloc_file_pseudo(inode, mnt, name, O_RDWR, 1503ce8d2cdfSDave Hansen &hugetlbfs_file_operations); 1504e68375c8SAl Viro if (!IS_ERR(file)) 15051da177e4SLinus Torvalds return file; 15061da177e4SLinus Torvalds 1507b45b5bd6SDavid Gibson iput(inode); 1508e68375c8SAl Viro out: 150939b65252SAnatol Pomozov return file; 15101da177e4SLinus Torvalds } 15111da177e4SLinus Torvalds 151232021982SDavid Howells static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) 151332021982SDavid Howells { 151432021982SDavid Howells struct fs_context *fc; 151532021982SDavid Howells struct vfsmount *mnt; 151632021982SDavid Howells 151732021982SDavid Howells fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); 151832021982SDavid Howells if (IS_ERR(fc)) { 151932021982SDavid Howells mnt = ERR_CAST(fc); 152032021982SDavid Howells } else { 152132021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 152232021982SDavid Howells ctx->hstate = h; 152332021982SDavid Howells mnt = fc_mount(fc); 152432021982SDavid Howells put_fs_context(fc); 152532021982SDavid Howells } 152632021982SDavid Howells if (IS_ERR(mnt)) 1527a25fddceSMiaohe Lin pr_err("Cannot mount internal hugetlbfs for page size %luK", 1528a25fddceSMiaohe Lin huge_page_size(h) >> 10); 152932021982SDavid Howells return mnt; 153032021982SDavid Howells } 153132021982SDavid Howells 15321da177e4SLinus Torvalds static int __init init_hugetlbfs_fs(void) 15331da177e4SLinus Torvalds { 153432021982SDavid Howells struct vfsmount *mnt; 153542d7395fSAndi Kleen struct hstate *h; 15361da177e4SLinus Torvalds int error; 153742d7395fSAndi Kleen int i; 15381da177e4SLinus Torvalds 1539457c1b27SNishanth Aravamudan if (!hugepages_supported()) { 15409b857d26SAndrew Morton pr_info("disabling because there are no supported hugepage sizes\n"); 1541457c1b27SNishanth Aravamudan return -ENOTSUPP; 1542457c1b27SNishanth Aravamudan } 1543457c1b27SNishanth Aravamudan 1544d1d5e05fSHillf Danton error = -ENOMEM; 15451da177e4SLinus Torvalds hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 15461da177e4SLinus Torvalds sizeof(struct hugetlbfs_inode_info), 15475d097056SVladimir Davydov 0, SLAB_ACCOUNT, init_once); 15481da177e4SLinus Torvalds if (hugetlbfs_inode_cachep == NULL) 15498fc312b3SMike Kravetz goto out; 15501da177e4SLinus Torvalds 15511da177e4SLinus Torvalds error = register_filesystem(&hugetlbfs_fs_type); 15521da177e4SLinus Torvalds if (error) 15538fc312b3SMike Kravetz goto out_free; 15541da177e4SLinus Torvalds 15558fc312b3SMike Kravetz /* default hstate mount is required */ 15563b2275a8SMiaohe Lin mnt = mount_one_hugetlbfs(&default_hstate); 15578fc312b3SMike Kravetz if (IS_ERR(mnt)) { 15588fc312b3SMike Kravetz error = PTR_ERR(mnt); 15598fc312b3SMike Kravetz goto out_unreg; 15608fc312b3SMike Kravetz } 15618fc312b3SMike Kravetz hugetlbfs_vfsmount[default_hstate_idx] = mnt; 15628fc312b3SMike Kravetz 15638fc312b3SMike Kravetz /* other hstates are optional */ 156442d7395fSAndi Kleen i = 0; 156542d7395fSAndi Kleen for_each_hstate(h) { 156615f0ec94SJan Stancek if (i == default_hstate_idx) { 156715f0ec94SJan Stancek i++; 15688fc312b3SMike Kravetz continue; 156915f0ec94SJan Stancek } 15708fc312b3SMike Kravetz 157132021982SDavid Howells mnt = mount_one_hugetlbfs(h); 15728fc312b3SMike Kravetz if (IS_ERR(mnt)) 15738fc312b3SMike Kravetz hugetlbfs_vfsmount[i] = NULL; 15748fc312b3SMike Kravetz else 157532021982SDavid Howells hugetlbfs_vfsmount[i] = mnt; 157642d7395fSAndi Kleen i++; 157742d7395fSAndi Kleen } 157832021982SDavid Howells 157942d7395fSAndi Kleen return 0; 15801da177e4SLinus Torvalds 15818fc312b3SMike Kravetz out_unreg: 15828fc312b3SMike Kravetz (void)unregister_filesystem(&hugetlbfs_fs_type); 15838fc312b3SMike Kravetz out_free: 15841da177e4SLinus Torvalds kmem_cache_destroy(hugetlbfs_inode_cachep); 15858fc312b3SMike Kravetz out: 15861da177e4SLinus Torvalds return error; 15871da177e4SLinus Torvalds } 15883e89e1c5SPaul Gortmaker fs_initcall(init_hugetlbfs_fs) 1589