11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * hugetlbpage-backed filesystem. Based on ramfs. 31da177e4SLinus Torvalds * 46d49e352SNadia Yvette Chambers * Nadia Yvette Chambers, 2002 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Copyright (C) 2002 Linus Torvalds. 73e89e1c5SPaul Gortmaker * License: GPL 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 109b857d26SAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 119b857d26SAndrew Morton 121da177e4SLinus Torvalds #include <linux/thread_info.h> 131da177e4SLinus Torvalds #include <asm/current.h> 1470c3547eSMike Kravetz #include <linux/falloc.h> 151da177e4SLinus Torvalds #include <linux/fs.h> 161da177e4SLinus Torvalds #include <linux/mount.h> 171da177e4SLinus Torvalds #include <linux/file.h> 18e73a75faSRandy Dunlap #include <linux/kernel.h> 191da177e4SLinus Torvalds #include <linux/writeback.h> 201da177e4SLinus Torvalds #include <linux/pagemap.h> 211da177e4SLinus Torvalds #include <linux/highmem.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/string.h> 2416f7e0feSRandy Dunlap #include <linux/capability.h> 25e73a75faSRandy Dunlap #include <linux/ctype.h> 261da177e4SLinus Torvalds #include <linux/backing-dev.h> 271da177e4SLinus Torvalds #include <linux/hugetlb.h> 281da177e4SLinus Torvalds #include <linux/pagevec.h> 2932021982SDavid Howells #include <linux/fs_parser.h> 30036e0856SBenjamin Herrenschmidt #include <linux/mman.h> 311da177e4SLinus Torvalds #include <linux/slab.h> 321da177e4SLinus Torvalds #include <linux/dnotify.h> 331da177e4SLinus Torvalds #include <linux/statfs.h> 341da177e4SLinus Torvalds #include <linux/security.h> 351fd7317dSNick Black #include <linux/magic.h> 36290408d4SNaoya Horiguchi #include <linux/migrate.h> 3734d0640eSAl Viro #include <linux/uio.h> 381da177e4SLinus Torvalds 397c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 4088590253SShijie Hu #include <linux/sched/mm.h> 411da177e4SLinus Torvalds 42f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops; 434b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations; 4492e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations; 4592e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations; 461da177e4SLinus Torvalds 4732021982SDavid Howells enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; 4832021982SDavid Howells 4932021982SDavid Howells struct hugetlbfs_fs_context { 504a25220dSDavid Howells struct hstate *hstate; 5132021982SDavid Howells unsigned long long max_size_opt; 5232021982SDavid Howells unsigned long long min_size_opt; 534a25220dSDavid Howells long max_hpages; 544a25220dSDavid Howells long nr_inodes; 554a25220dSDavid Howells long min_hpages; 5632021982SDavid Howells enum hugetlbfs_size_type max_val_type; 5732021982SDavid Howells enum hugetlbfs_size_type min_val_type; 58a0eb3a05SEric W. Biederman kuid_t uid; 59a0eb3a05SEric W. Biederman kgid_t gid; 60a1d776eeSDavid Gibson umode_t mode; 61a1d776eeSDavid Gibson }; 62a1d776eeSDavid Gibson 631da177e4SLinus Torvalds int sysctl_hugetlb_shm_group; 641da177e4SLinus Torvalds 6532021982SDavid Howells enum hugetlb_param { 6632021982SDavid Howells Opt_gid, 6732021982SDavid Howells Opt_min_size, 6832021982SDavid Howells Opt_mode, 6932021982SDavid Howells Opt_nr_inodes, 7032021982SDavid Howells Opt_pagesize, 7132021982SDavid Howells Opt_size, 7232021982SDavid Howells Opt_uid, 73e73a75faSRandy Dunlap }; 74e73a75faSRandy Dunlap 75d7167b14SAl Viro static const struct fs_parameter_spec hugetlb_fs_parameters[] = { 7632021982SDavid Howells fsparam_u32 ("gid", Opt_gid), 7732021982SDavid Howells fsparam_string("min_size", Opt_min_size), 78e0f7e2b2SMike Kravetz fsparam_u32oct("mode", Opt_mode), 7932021982SDavid Howells fsparam_string("nr_inodes", Opt_nr_inodes), 8032021982SDavid Howells fsparam_string("pagesize", Opt_pagesize), 8132021982SDavid Howells fsparam_string("size", Opt_size), 8232021982SDavid Howells fsparam_u32 ("uid", Opt_uid), 8332021982SDavid Howells {} 8432021982SDavid Howells }; 8532021982SDavid Howells 8670c3547eSMike Kravetz #ifdef CONFIG_NUMA 8770c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 8870c3547eSMike Kravetz struct inode *inode, pgoff_t index) 8970c3547eSMike Kravetz { 9070c3547eSMike Kravetz vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, 9170c3547eSMike Kravetz index); 9270c3547eSMike Kravetz } 9370c3547eSMike Kravetz 9470c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 9570c3547eSMike Kravetz { 9670c3547eSMike Kravetz mpol_cond_put(vma->vm_policy); 9770c3547eSMike Kravetz } 9870c3547eSMike Kravetz #else 9970c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 10070c3547eSMike Kravetz struct inode *inode, pgoff_t index) 10170c3547eSMike Kravetz { 10270c3547eSMike Kravetz } 10370c3547eSMike Kravetz 10470c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 10570c3547eSMike Kravetz { 10670c3547eSMike Kravetz } 10770c3547eSMike Kravetz #endif 10870c3547eSMike Kravetz 10963489f8eSMike Kravetz /* 11063489f8eSMike Kravetz * Mask used when checking the page offset value passed in via system 11163489f8eSMike Kravetz * calls. This value will be converted to a loff_t which is signed. 11263489f8eSMike Kravetz * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the 11363489f8eSMike Kravetz * value. The extra bit (- 1 in the shift value) is to take the sign 11463489f8eSMike Kravetz * bit into account. 11563489f8eSMike Kravetz */ 11663489f8eSMike Kravetz #define PGOFF_LOFFT_MAX \ 11763489f8eSMike Kravetz (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) 11863489f8eSMike Kravetz 1191da177e4SLinus Torvalds static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 1201da177e4SLinus Torvalds { 121496ad9aaSAl Viro struct inode *inode = file_inode(file); 12222247efdSPeter Xu struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 1231da177e4SLinus Torvalds loff_t len, vma_len; 1241da177e4SLinus Torvalds int ret; 125a5516438SAndi Kleen struct hstate *h = hstate_file(file); 1261da177e4SLinus Torvalds 12768589bc3SHugh Dickins /* 128dec4ad86SDavid Gibson * vma address alignment (but not the pgoff alignment) has 129dec4ad86SDavid Gibson * already been checked by prepare_hugepage_range. If you add 130dec4ad86SDavid Gibson * any error returns here, do so after setting VM_HUGETLB, so 131dec4ad86SDavid Gibson * is_vm_hugetlb_page tests below unmap_region go the right 13245e55300SPeter Collingbourne * way when do_mmap unwinds (may be important on powerpc 133dec4ad86SDavid Gibson * and ia64). 13468589bc3SHugh Dickins */ 135a2fce914SNaoya Horiguchi vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 13668589bc3SHugh Dickins vma->vm_ops = &hugetlb_vm_ops; 1371da177e4SLinus Torvalds 13822247efdSPeter Xu ret = seal_check_future_write(info->seals, vma); 13922247efdSPeter Xu if (ret) 14022247efdSPeter Xu return ret; 14122247efdSPeter Xu 142045c7a3fSMike Kravetz /* 14363489f8eSMike Kravetz * page based offset in vm_pgoff could be sufficiently large to 1445df63c2aSMike Kravetz * overflow a loff_t when converted to byte offset. This can 1455df63c2aSMike Kravetz * only happen on architectures where sizeof(loff_t) == 1465df63c2aSMike Kravetz * sizeof(unsigned long). So, only check in those instances. 147045c7a3fSMike Kravetz */ 1485df63c2aSMike Kravetz if (sizeof(unsigned long) == sizeof(loff_t)) { 14963489f8eSMike Kravetz if (vma->vm_pgoff & PGOFF_LOFFT_MAX) 150045c7a3fSMike Kravetz return -EINVAL; 1515df63c2aSMike Kravetz } 152045c7a3fSMike Kravetz 15363489f8eSMike Kravetz /* must be huge page aligned */ 1542b37c35eSBecky Bruce if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 155dec4ad86SDavid Gibson return -EINVAL; 156dec4ad86SDavid Gibson 1571da177e4SLinus Torvalds vma_len = (loff_t)(vma->vm_end - vma->vm_start); 158045c7a3fSMike Kravetz len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 159045c7a3fSMike Kravetz /* check for overflow */ 160045c7a3fSMike Kravetz if (len < vma_len) 161045c7a3fSMike Kravetz return -EINVAL; 1621da177e4SLinus Torvalds 1635955102cSAl Viro inode_lock(inode); 1641da177e4SLinus Torvalds file_accessed(file); 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds ret = -ENOMEM; 16733b8f84aSMike Kravetz if (!hugetlb_reserve_pages(inode, 168a5516438SAndi Kleen vma->vm_pgoff >> huge_page_order(h), 1695a6fe125SMel Gorman len >> huge_page_shift(h), vma, 1705a6fe125SMel Gorman vma->vm_flags)) 171b45b5bd6SDavid Gibson goto out; 172b45b5bd6SDavid Gibson 1734c887265SAdam Litke ret = 0; 174b6174df5SZhang, Yanmin if (vma->vm_flags & VM_WRITE && inode->i_size < len) 175045c7a3fSMike Kravetz i_size_write(inode, len); 1761da177e4SLinus Torvalds out: 1775955102cSAl Viro inode_unlock(inode); 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds return ret; 1801da177e4SLinus Torvalds } 1811da177e4SLinus Torvalds 1821da177e4SLinus Torvalds /* 1833e4e28c5SMichel Lespinasse * Called under mmap_write_lock(mm). 1841da177e4SLinus Torvalds */ 1851da177e4SLinus Torvalds 1861da177e4SLinus Torvalds static unsigned long 18788590253SShijie Hu hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, 18888590253SShijie Hu unsigned long len, unsigned long pgoff, unsigned long flags) 18988590253SShijie Hu { 19088590253SShijie Hu struct hstate *h = hstate_file(file); 19188590253SShijie Hu struct vm_unmapped_area_info info; 19288590253SShijie Hu 19388590253SShijie Hu info.flags = 0; 19488590253SShijie Hu info.length = len; 19588590253SShijie Hu info.low_limit = current->mm->mmap_base; 1962cb4de08SChristophe Leroy info.high_limit = arch_get_mmap_end(addr, len, flags); 19788590253SShijie Hu info.align_mask = PAGE_MASK & ~huge_page_mask(h); 19888590253SShijie Hu info.align_offset = 0; 19988590253SShijie Hu return vm_unmapped_area(&info); 20088590253SShijie Hu } 20188590253SShijie Hu 20288590253SShijie Hu static unsigned long 20388590253SShijie Hu hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, 20488590253SShijie Hu unsigned long len, unsigned long pgoff, unsigned long flags) 20588590253SShijie Hu { 20688590253SShijie Hu struct hstate *h = hstate_file(file); 20788590253SShijie Hu struct vm_unmapped_area_info info; 20888590253SShijie Hu 20988590253SShijie Hu info.flags = VM_UNMAPPED_AREA_TOPDOWN; 21088590253SShijie Hu info.length = len; 21188590253SShijie Hu info.low_limit = max(PAGE_SIZE, mmap_min_addr); 2125f24d5a5SChristophe Leroy info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); 21388590253SShijie Hu info.align_mask = PAGE_MASK & ~huge_page_mask(h); 21488590253SShijie Hu info.align_offset = 0; 21588590253SShijie Hu addr = vm_unmapped_area(&info); 21688590253SShijie Hu 21788590253SShijie Hu /* 21888590253SShijie Hu * A failed mmap() very likely causes application failure, 21988590253SShijie Hu * so fall back to the bottom-up function here. This scenario 22088590253SShijie Hu * can happen with large stack limits and large mmap() 22188590253SShijie Hu * allocations. 22288590253SShijie Hu */ 22388590253SShijie Hu if (unlikely(offset_in_page(addr))) { 22488590253SShijie Hu VM_BUG_ON(addr != -ENOMEM); 22588590253SShijie Hu info.flags = 0; 22688590253SShijie Hu info.low_limit = current->mm->mmap_base; 2272cb4de08SChristophe Leroy info.high_limit = arch_get_mmap_end(addr, len, flags); 22888590253SShijie Hu addr = vm_unmapped_area(&info); 22988590253SShijie Hu } 23088590253SShijie Hu 23188590253SShijie Hu return addr; 23288590253SShijie Hu } 23388590253SShijie Hu 2344b439e25SChristophe Leroy unsigned long 2354b439e25SChristophe Leroy generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 2364b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 2374b439e25SChristophe Leroy unsigned long flags) 2381da177e4SLinus Torvalds { 2391da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 2401da177e4SLinus Torvalds struct vm_area_struct *vma; 241a5516438SAndi Kleen struct hstate *h = hstate_file(file); 2422cb4de08SChristophe Leroy const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 2431da177e4SLinus Torvalds 244a5516438SAndi Kleen if (len & ~huge_page_mask(h)) 2451da177e4SLinus Torvalds return -EINVAL; 2461da177e4SLinus Torvalds if (len > TASK_SIZE) 2471da177e4SLinus Torvalds return -ENOMEM; 2481da177e4SLinus Torvalds 249036e0856SBenjamin Herrenschmidt if (flags & MAP_FIXED) { 250a5516438SAndi Kleen if (prepare_hugepage_range(file, addr, len)) 251036e0856SBenjamin Herrenschmidt return -EINVAL; 252036e0856SBenjamin Herrenschmidt return addr; 253036e0856SBenjamin Herrenschmidt } 254036e0856SBenjamin Herrenschmidt 2551da177e4SLinus Torvalds if (addr) { 256a5516438SAndi Kleen addr = ALIGN(addr, huge_page_size(h)); 2571da177e4SLinus Torvalds vma = find_vma(mm, addr); 2585f24d5a5SChristophe Leroy if (mmap_end - len >= addr && 2591be7107fSHugh Dickins (!vma || addr + len <= vm_start_gap(vma))) 2601da177e4SLinus Torvalds return addr; 2611da177e4SLinus Torvalds } 2621da177e4SLinus Torvalds 26388590253SShijie Hu /* 26488590253SShijie Hu * Use mm->get_unmapped_area value as a hint to use topdown routine. 26588590253SShijie Hu * If architectures have special needs, they should define their own 26688590253SShijie Hu * version of hugetlb_get_unmapped_area. 26788590253SShijie Hu */ 26888590253SShijie Hu if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) 26988590253SShijie Hu return hugetlb_get_unmapped_area_topdown(file, addr, len, 27088590253SShijie Hu pgoff, flags); 27188590253SShijie Hu return hugetlb_get_unmapped_area_bottomup(file, addr, len, 27288590253SShijie Hu pgoff, flags); 2731da177e4SLinus Torvalds } 2744b439e25SChristophe Leroy 2754b439e25SChristophe Leroy #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 2764b439e25SChristophe Leroy static unsigned long 2774b439e25SChristophe Leroy hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 2784b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 2794b439e25SChristophe Leroy unsigned long flags) 2804b439e25SChristophe Leroy { 2814b439e25SChristophe Leroy return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); 2824b439e25SChristophe Leroy } 2831da177e4SLinus Torvalds #endif 2841da177e4SLinus Torvalds 285e63e1e5aSBadari Pulavarty /* 286e63e1e5aSBadari Pulavarty * Support for read() - Find the page attached to f_mapping and copy out the 287445c8098SMiaohe Lin * data. This provides functionality similar to filemap_read(). 288e63e1e5aSBadari Pulavarty */ 28934d0640eSAl Viro static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 290e63e1e5aSBadari Pulavarty { 29134d0640eSAl Viro struct file *file = iocb->ki_filp; 29234d0640eSAl Viro struct hstate *h = hstate_file(file); 29334d0640eSAl Viro struct address_space *mapping = file->f_mapping; 294e63e1e5aSBadari Pulavarty struct inode *inode = mapping->host; 29534d0640eSAl Viro unsigned long index = iocb->ki_pos >> huge_page_shift(h); 29634d0640eSAl Viro unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); 297e63e1e5aSBadari Pulavarty unsigned long end_index; 298e63e1e5aSBadari Pulavarty loff_t isize; 299e63e1e5aSBadari Pulavarty ssize_t retval = 0; 300e63e1e5aSBadari Pulavarty 30134d0640eSAl Viro while (iov_iter_count(to)) { 302e63e1e5aSBadari Pulavarty struct page *page; 30334d0640eSAl Viro size_t nr, copied; 304e63e1e5aSBadari Pulavarty 305e63e1e5aSBadari Pulavarty /* nr is the maximum number of bytes to copy from this page */ 306a5516438SAndi Kleen nr = huge_page_size(h); 307a05b0855SAneesh Kumar K.V isize = i_size_read(inode); 308a05b0855SAneesh Kumar K.V if (!isize) 30934d0640eSAl Viro break; 310a05b0855SAneesh Kumar K.V end_index = (isize - 1) >> huge_page_shift(h); 311e63e1e5aSBadari Pulavarty if (index > end_index) 31234d0640eSAl Viro break; 31334d0640eSAl Viro if (index == end_index) { 314a5516438SAndi Kleen nr = ((isize - 1) & ~huge_page_mask(h)) + 1; 315a05b0855SAneesh Kumar K.V if (nr <= offset) 31634d0640eSAl Viro break; 317e63e1e5aSBadari Pulavarty } 318e63e1e5aSBadari Pulavarty nr = nr - offset; 319e63e1e5aSBadari Pulavarty 320e63e1e5aSBadari Pulavarty /* Find the page */ 321a05b0855SAneesh Kumar K.V page = find_lock_page(mapping, index); 322e63e1e5aSBadari Pulavarty if (unlikely(page == NULL)) { 323e63e1e5aSBadari Pulavarty /* 324e63e1e5aSBadari Pulavarty * We have a HOLE, zero out the user-buffer for the 325e63e1e5aSBadari Pulavarty * length of the hole or request. 326e63e1e5aSBadari Pulavarty */ 32734d0640eSAl Viro copied = iov_iter_zero(nr, to); 328e63e1e5aSBadari Pulavarty } else { 329a05b0855SAneesh Kumar K.V unlock_page(page); 330a05b0855SAneesh Kumar K.V 331e63e1e5aSBadari Pulavarty /* 332e63e1e5aSBadari Pulavarty * We have the page, copy it to user space buffer. 333e63e1e5aSBadari Pulavarty */ 334c7d57ab1SAl Viro copied = copy_page_to_iter(page, offset, nr, to); 33509cbfeafSKirill A. Shutemov put_page(page); 336e63e1e5aSBadari Pulavarty } 33734d0640eSAl Viro offset += copied; 33834d0640eSAl Viro retval += copied; 33934d0640eSAl Viro if (copied != nr && iov_iter_count(to)) { 34034d0640eSAl Viro if (!retval) 34134d0640eSAl Viro retval = -EFAULT; 342e63e1e5aSBadari Pulavarty break; 343e63e1e5aSBadari Pulavarty } 34434d0640eSAl Viro index += offset >> huge_page_shift(h); 34534d0640eSAl Viro offset &= ~huge_page_mask(h); 34634d0640eSAl Viro } 34734d0640eSAl Viro iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; 348e63e1e5aSBadari Pulavarty return retval; 349e63e1e5aSBadari Pulavarty } 350e63e1e5aSBadari Pulavarty 351800d15a5SNick Piggin static int hugetlbfs_write_begin(struct file *file, 352800d15a5SNick Piggin struct address_space *mapping, 3539d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len, 354800d15a5SNick Piggin struct page **pagep, void **fsdata) 3551da177e4SLinus Torvalds { 3561da177e4SLinus Torvalds return -EINVAL; 3571da177e4SLinus Torvalds } 3581da177e4SLinus Torvalds 359800d15a5SNick Piggin static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 360800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 361800d15a5SNick Piggin struct page *page, void *fsdata) 3621da177e4SLinus Torvalds { 363800d15a5SNick Piggin BUG(); 3641da177e4SLinus Torvalds return -EINVAL; 3651da177e4SLinus Torvalds } 3661da177e4SLinus Torvalds 367b5cec28dSMike Kravetz static void remove_huge_page(struct page *page) 3681da177e4SLinus Torvalds { 369b9ea2515SKonstantin Khlebnikov ClearPageDirty(page); 3701da177e4SLinus Torvalds ClearPageUptodate(page); 371bd65cb86SMinchan Kim delete_from_page_cache(page); 3721da177e4SLinus Torvalds } 3731da177e4SLinus Torvalds 3744aae8d1cSMike Kravetz static void 37505e90bd0SPeter Xu hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, 37605e90bd0SPeter Xu zap_flags_t zap_flags) 3774aae8d1cSMike Kravetz { 3784aae8d1cSMike Kravetz struct vm_area_struct *vma; 3794aae8d1cSMike Kravetz 3804aae8d1cSMike Kravetz /* 381d6aba4c8SSean Christopherson * end == 0 indicates that the entire range after start should be 382d6aba4c8SSean Christopherson * unmapped. Note, end is exclusive, whereas the interval tree takes 383d6aba4c8SSean Christopherson * an inclusive "last". 3844aae8d1cSMike Kravetz */ 385d6aba4c8SSean Christopherson vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { 3864aae8d1cSMike Kravetz unsigned long v_offset; 3874aae8d1cSMike Kravetz unsigned long v_end; 3884aae8d1cSMike Kravetz 3894aae8d1cSMike Kravetz /* 3904aae8d1cSMike Kravetz * Can the expression below overflow on 32-bit arches? 3914aae8d1cSMike Kravetz * No, because the interval tree returns us only those vmas 3924aae8d1cSMike Kravetz * which overlap the truncated area starting at pgoff, 3934aae8d1cSMike Kravetz * and no vma on a 32-bit arch can span beyond the 4GB. 3944aae8d1cSMike Kravetz */ 3954aae8d1cSMike Kravetz if (vma->vm_pgoff < start) 3964aae8d1cSMike Kravetz v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; 3974aae8d1cSMike Kravetz else 3984aae8d1cSMike Kravetz v_offset = 0; 3994aae8d1cSMike Kravetz 4004aae8d1cSMike Kravetz if (!end) 4014aae8d1cSMike Kravetz v_end = vma->vm_end; 4024aae8d1cSMike Kravetz else { 4034aae8d1cSMike Kravetz v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) 4044aae8d1cSMike Kravetz + vma->vm_start; 4054aae8d1cSMike Kravetz if (v_end > vma->vm_end) 4064aae8d1cSMike Kravetz v_end = vma->vm_end; 4074aae8d1cSMike Kravetz } 4084aae8d1cSMike Kravetz 4094aae8d1cSMike Kravetz unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, 41005e90bd0SPeter Xu NULL, zap_flags); 4114aae8d1cSMike Kravetz } 4124aae8d1cSMike Kravetz } 413b5cec28dSMike Kravetz 414b5cec28dSMike Kravetz /* 415b5cec28dSMike Kravetz * remove_inode_hugepages handles two distinct cases: truncation and hole 416b5cec28dSMike Kravetz * punch. There are subtle differences in operation for each case. 4174aae8d1cSMike Kravetz * 418b5cec28dSMike Kravetz * truncation is indicated by end of range being LLONG_MAX 419b5cec28dSMike Kravetz * In this case, we first scan the range and release found pages. 4201935ebd3SMiaohe Lin * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve 421e7c58097SMike Kravetz * maps and global counts. Page faults can not race with truncation 42287bf91d3SMike Kravetz * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents 42387bf91d3SMike Kravetz * page faults in the truncated range by checking i_size. i_size is 42487bf91d3SMike Kravetz * modified while holding i_mmap_rwsem. 425b5cec28dSMike Kravetz * hole punch is indicated if end is not LLONG_MAX 426b5cec28dSMike Kravetz * In the hole punch case we scan the range and release found pages. 4271935ebd3SMiaohe Lin * Only when releasing a page is the associated region/reserve map 4281935ebd3SMiaohe Lin * deleted. The region/reserve map for ranges without associated 429e7c58097SMike Kravetz * pages are not modified. Page faults can race with hole punch. 430e7c58097SMike Kravetz * This is indicated if we find a mapped page. 431b5cec28dSMike Kravetz * Note: If the passed end of range value is beyond the end of file, but 432b5cec28dSMike Kravetz * not LLONG_MAX this routine still performs a hole punch operation. 433b5cec28dSMike Kravetz */ 434b5cec28dSMike Kravetz static void remove_inode_hugepages(struct inode *inode, loff_t lstart, 435b5cec28dSMike Kravetz loff_t lend) 4361da177e4SLinus Torvalds { 437a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 438b45b5bd6SDavid Gibson struct address_space *mapping = &inode->i_data; 439a5516438SAndi Kleen const pgoff_t start = lstart >> huge_page_shift(h); 440b5cec28dSMike Kravetz const pgoff_t end = lend >> huge_page_shift(h); 4411508062eSMatthew Wilcox (Oracle) struct folio_batch fbatch; 442d72dc8a2SJan Kara pgoff_t next, index; 443a43a8c39SChen, Kenneth W int i, freed = 0; 444b5cec28dSMike Kravetz bool truncate_op = (lend == LLONG_MAX); 4451da177e4SLinus Torvalds 4461508062eSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 4471da177e4SLinus Torvalds next = start; 4481508062eSMatthew Wilcox (Oracle) while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { 4491508062eSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); ++i) { 4501508062eSMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i]; 451d4241a04SMiaohe Lin u32 hash = 0; 452b5cec28dSMike Kravetz 4531508062eSMatthew Wilcox (Oracle) index = folio->index; 45487bf91d3SMike Kravetz if (!truncate_op) { 45587bf91d3SMike Kravetz /* 45687bf91d3SMike Kravetz * Only need to hold the fault mutex in the 45787bf91d3SMike Kravetz * hole punch case. This prevents races with 45887bf91d3SMike Kravetz * page faults. Races are not possible in the 45987bf91d3SMike Kravetz * case of truncation. 46087bf91d3SMike Kravetz */ 461d4241a04SMiaohe Lin hash = hugetlb_fault_mutex_hash(mapping, index); 462e7c58097SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 46387bf91d3SMike Kravetz } 464e7c58097SMike Kravetz 465b5cec28dSMike Kravetz /* 4661508062eSMatthew Wilcox (Oracle) * If folio is mapped, it was faulted in after being 467e7c58097SMike Kravetz * unmapped in caller. Unmap (again) now after taking 468e7c58097SMike Kravetz * the fault mutex. The mutex will prevent faults 4691508062eSMatthew Wilcox (Oracle) * until we finish removing the folio. 470e7c58097SMike Kravetz * 471e7c58097SMike Kravetz * This race can only happen in the hole punch case. 472e7c58097SMike Kravetz * Getting here in a truncate operation is a bug. 473b5cec28dSMike Kravetz */ 4741508062eSMatthew Wilcox (Oracle) if (unlikely(folio_mapped(folio))) { 475e7c58097SMike Kravetz BUG_ON(truncate_op); 476e7c58097SMike Kravetz 477c0d0381aSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 478e7c58097SMike Kravetz i_mmap_lock_write(mapping); 479c0d0381aSMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 480e7c58097SMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, 481e7c58097SMike Kravetz index * pages_per_huge_page(h), 48205e90bd0SPeter Xu (index + 1) * pages_per_huge_page(h), 48305e90bd0SPeter Xu ZAP_FLAG_DROP_MARKER); 484e7c58097SMike Kravetz i_mmap_unlock_write(mapping); 485e7c58097SMike Kravetz } 4864aae8d1cSMike Kravetz 4871508062eSMatthew Wilcox (Oracle) folio_lock(folio); 4884aae8d1cSMike Kravetz /* 4894aae8d1cSMike Kravetz * We must free the huge page and remove from page 4904aae8d1cSMike Kravetz * cache (remove_huge_page) BEFORE removing the 4914aae8d1cSMike Kravetz * region/reserve map (hugetlb_unreserve_pages). In 4924aae8d1cSMike Kravetz * rare out of memory conditions, removal of the 49372e2936cSzhong jiang * region/reserve map could fail. Correspondingly, 49472e2936cSzhong jiang * the subpool and global reserve usage count can need 49572e2936cSzhong jiang * to be adjusted. 4964aae8d1cSMike Kravetz */ 4971508062eSMatthew Wilcox (Oracle) VM_BUG_ON(HPageRestoreReserve(&folio->page)); 4981508062eSMatthew Wilcox (Oracle) remove_huge_page(&folio->page); 499b5cec28dSMike Kravetz freed++; 500b5cec28dSMike Kravetz if (!truncate_op) { 5014aae8d1cSMike Kravetz if (unlikely(hugetlb_unreserve_pages(inode, 502d72dc8a2SJan Kara index, index + 1, 1))) 50372e2936cSzhong jiang hugetlb_fix_reserve_counts(inode); 504b5cec28dSMike Kravetz } 505b5cec28dSMike Kravetz 5061508062eSMatthew Wilcox (Oracle) folio_unlock(folio); 50787bf91d3SMike Kravetz if (!truncate_op) 508e7c58097SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5091da177e4SLinus Torvalds } 5101508062eSMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 5111817889eSMike Kravetz cond_resched(); 5121da177e4SLinus Torvalds } 513b5cec28dSMike Kravetz 514b5cec28dSMike Kravetz if (truncate_op) 515b5cec28dSMike Kravetz (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); 5161da177e4SLinus Torvalds } 5171da177e4SLinus Torvalds 5182bbbda30SAl Viro static void hugetlbfs_evict_inode(struct inode *inode) 5191da177e4SLinus Torvalds { 5209119a41eSJoonsoo Kim struct resv_map *resv_map; 5219119a41eSJoonsoo Kim 522b5cec28dSMike Kravetz remove_inode_hugepages(inode, 0, LLONG_MAX); 523f27a5136SMike Kravetz 524f27a5136SMike Kravetz /* 525f27a5136SMike Kravetz * Get the resv_map from the address space embedded in the inode. 526f27a5136SMike Kravetz * This is the address space which points to any resv_map allocated 527f27a5136SMike Kravetz * at inode creation time. If this is a device special inode, 528f27a5136SMike Kravetz * i_mapping may not point to the original address space. 529f27a5136SMike Kravetz */ 530f27a5136SMike Kravetz resv_map = (struct resv_map *)(&inode->i_data)->private_data; 531f27a5136SMike Kravetz /* Only regular and link inodes have associated reserve maps */ 5329119a41eSJoonsoo Kim if (resv_map) 5339119a41eSJoonsoo Kim resv_map_release(&resv_map->refs); 534dbd5768fSJan Kara clear_inode(inode); 535149f4211SChristoph Hellwig } 536149f4211SChristoph Hellwig 537e5d319deSMiaohe Lin static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) 5381da177e4SLinus Torvalds { 539856fc295SHugh Dickins pgoff_t pgoff; 5401da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 541a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 5421da177e4SLinus Torvalds 543a5516438SAndi Kleen BUG_ON(offset & ~huge_page_mask(h)); 544856fc295SHugh Dickins pgoff = offset >> PAGE_SHIFT; 5451da177e4SLinus Torvalds 54683cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 54787bf91d3SMike Kravetz i_size_write(inode, offset); 548f808c13fSDavidlohr Bueso if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 54905e90bd0SPeter Xu hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, 55005e90bd0SPeter Xu ZAP_FLAG_DROP_MARKER); 551c86aa7bbSMike Kravetz i_mmap_unlock_write(mapping); 552e7c58097SMike Kravetz remove_inode_hugepages(inode, offset, LLONG_MAX); 5531da177e4SLinus Torvalds } 5541da177e4SLinus Torvalds 55568d32527SMike Kravetz static void hugetlbfs_zero_partial_page(struct hstate *h, 55668d32527SMike Kravetz struct address_space *mapping, 55768d32527SMike Kravetz loff_t start, 55868d32527SMike Kravetz loff_t end) 55968d32527SMike Kravetz { 56068d32527SMike Kravetz pgoff_t idx = start >> huge_page_shift(h); 56168d32527SMike Kravetz struct folio *folio; 56268d32527SMike Kravetz 56368d32527SMike Kravetz folio = filemap_lock_folio(mapping, idx); 56468d32527SMike Kravetz if (!folio) 56568d32527SMike Kravetz return; 56668d32527SMike Kravetz 56768d32527SMike Kravetz start = start & ~huge_page_mask(h); 56868d32527SMike Kravetz end = end & ~huge_page_mask(h); 56968d32527SMike Kravetz if (!end) 57068d32527SMike Kravetz end = huge_page_size(h); 57168d32527SMike Kravetz 57268d32527SMike Kravetz folio_zero_segment(folio, (size_t)start, (size_t)end); 57368d32527SMike Kravetz 57468d32527SMike Kravetz folio_unlock(folio); 57568d32527SMike Kravetz folio_put(folio); 57668d32527SMike Kravetz } 57768d32527SMike Kravetz 57870c3547eSMike Kravetz static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 57970c3547eSMike Kravetz { 58068d32527SMike Kravetz struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 58168d32527SMike Kravetz struct address_space *mapping = inode->i_mapping; 58270c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 58370c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 58470c3547eSMike Kravetz loff_t hole_start, hole_end; 58570c3547eSMike Kravetz 58670c3547eSMike Kravetz /* 58768d32527SMike Kravetz * hole_start and hole_end indicate the full pages within the hole. 58870c3547eSMike Kravetz */ 58970c3547eSMike Kravetz hole_start = round_up(offset, hpage_size); 59070c3547eSMike Kravetz hole_end = round_down(offset + len, hpage_size); 59170c3547eSMike Kravetz 5925955102cSAl Viro inode_lock(inode); 593ff62a342SMarc-André Lureau 594398c0da7SMiaohe Lin /* protected by i_rwsem */ 595ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 596ff62a342SMarc-André Lureau inode_unlock(inode); 597ff62a342SMarc-André Lureau return -EPERM; 598ff62a342SMarc-André Lureau } 599ff62a342SMarc-André Lureau 60070c3547eSMike Kravetz i_mmap_lock_write(mapping); 60168d32527SMike Kravetz 60268d32527SMike Kravetz /* If range starts before first full page, zero partial page. */ 60368d32527SMike Kravetz if (offset < hole_start) 60468d32527SMike Kravetz hugetlbfs_zero_partial_page(h, mapping, 60568d32527SMike Kravetz offset, min(offset + len, hole_start)); 60668d32527SMike Kravetz 60768d32527SMike Kravetz /* Unmap users of full pages in the hole. */ 60868d32527SMike Kravetz if (hole_end > hole_start) { 609f808c13fSDavidlohr Bueso if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 61070c3547eSMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, 61170c3547eSMike Kravetz hole_start >> PAGE_SHIFT, 61205e90bd0SPeter Xu hole_end >> PAGE_SHIFT, 0); 61370c3547eSMike Kravetz } 61470c3547eSMike Kravetz 61568d32527SMike Kravetz /* If range extends beyond last full page, zero partial page. */ 61668d32527SMike Kravetz if ((offset + len) > hole_end && (offset + len) > hole_start) 61768d32527SMike Kravetz hugetlbfs_zero_partial_page(h, mapping, 61868d32527SMike Kravetz hole_end, offset + len); 61968d32527SMike Kravetz 62068d32527SMike Kravetz i_mmap_unlock_write(mapping); 62168d32527SMike Kravetz 62268d32527SMike Kravetz /* Remove full pages from the file. */ 62368d32527SMike Kravetz if (hole_end > hole_start) 62468d32527SMike Kravetz remove_inode_hugepages(inode, hole_start, hole_end); 62568d32527SMike Kravetz 62668d32527SMike Kravetz inode_unlock(inode); 62768d32527SMike Kravetz 62870c3547eSMike Kravetz return 0; 62970c3547eSMike Kravetz } 63070c3547eSMike Kravetz 63170c3547eSMike Kravetz static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, 63270c3547eSMike Kravetz loff_t len) 63370c3547eSMike Kravetz { 63470c3547eSMike Kravetz struct inode *inode = file_inode(file); 635ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 63670c3547eSMike Kravetz struct address_space *mapping = inode->i_mapping; 63770c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 63870c3547eSMike Kravetz struct vm_area_struct pseudo_vma; 63970c3547eSMike Kravetz struct mm_struct *mm = current->mm; 64070c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 64170c3547eSMike Kravetz unsigned long hpage_shift = huge_page_shift(h); 64270c3547eSMike Kravetz pgoff_t start, index, end; 64370c3547eSMike Kravetz int error; 64470c3547eSMike Kravetz u32 hash; 64570c3547eSMike Kravetz 64670c3547eSMike Kravetz if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 64770c3547eSMike Kravetz return -EOPNOTSUPP; 64870c3547eSMike Kravetz 64970c3547eSMike Kravetz if (mode & FALLOC_FL_PUNCH_HOLE) 65070c3547eSMike Kravetz return hugetlbfs_punch_hole(inode, offset, len); 65170c3547eSMike Kravetz 65270c3547eSMike Kravetz /* 65370c3547eSMike Kravetz * Default preallocate case. 65470c3547eSMike Kravetz * For this range, start is rounded down and end is rounded up 65570c3547eSMike Kravetz * as well as being converted to page offsets. 65670c3547eSMike Kravetz */ 65770c3547eSMike Kravetz start = offset >> hpage_shift; 65870c3547eSMike Kravetz end = (offset + len + hpage_size - 1) >> hpage_shift; 65970c3547eSMike Kravetz 6605955102cSAl Viro inode_lock(inode); 66170c3547eSMike Kravetz 66270c3547eSMike Kravetz /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 66370c3547eSMike Kravetz error = inode_newsize_ok(inode, offset + len); 66470c3547eSMike Kravetz if (error) 66570c3547eSMike Kravetz goto out; 66670c3547eSMike Kravetz 667ff62a342SMarc-André Lureau if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 668ff62a342SMarc-André Lureau error = -EPERM; 669ff62a342SMarc-André Lureau goto out; 670ff62a342SMarc-André Lureau } 671ff62a342SMarc-André Lureau 67270c3547eSMike Kravetz /* 67370c3547eSMike Kravetz * Initialize a pseudo vma as this is required by the huge page 67470c3547eSMike Kravetz * allocation routines. If NUMA is configured, use page index 67570c3547eSMike Kravetz * as input to create an allocation policy. 67670c3547eSMike Kravetz */ 6772c4541e2SKirill A. Shutemov vma_init(&pseudo_vma, mm); 67870c3547eSMike Kravetz pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 67970c3547eSMike Kravetz pseudo_vma.vm_file = file; 68070c3547eSMike Kravetz 68170c3547eSMike Kravetz for (index = start; index < end; index++) { 68270c3547eSMike Kravetz /* 68370c3547eSMike Kravetz * This is supposed to be the vaddr where the page is being 68470c3547eSMike Kravetz * faulted in, but we have no vaddr here. 68570c3547eSMike Kravetz */ 68670c3547eSMike Kravetz struct page *page; 68770c3547eSMike Kravetz unsigned long addr; 68870c3547eSMike Kravetz 68970c3547eSMike Kravetz cond_resched(); 69070c3547eSMike Kravetz 69170c3547eSMike Kravetz /* 69270c3547eSMike Kravetz * fallocate(2) manpage permits EINTR; we may have been 69370c3547eSMike Kravetz * interrupted because we are using up too much memory. 69470c3547eSMike Kravetz */ 69570c3547eSMike Kravetz if (signal_pending(current)) { 69670c3547eSMike Kravetz error = -EINTR; 69770c3547eSMike Kravetz break; 69870c3547eSMike Kravetz } 69970c3547eSMike Kravetz 70070c3547eSMike Kravetz /* Set numa allocation policy based on index */ 70170c3547eSMike Kravetz hugetlb_set_vma_policy(&pseudo_vma, inode, index); 70270c3547eSMike Kravetz 70370c3547eSMike Kravetz /* addr is the offset within the file (zero based) */ 70470c3547eSMike Kravetz addr = index * hpage_size; 70570c3547eSMike Kravetz 70687bf91d3SMike Kravetz /* 70787bf91d3SMike Kravetz * fault mutex taken here, protects against fault path 70887bf91d3SMike Kravetz * and hole punch. inode_lock previously taken protects 70987bf91d3SMike Kravetz * against truncation. 71087bf91d3SMike Kravetz */ 711188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, index); 71270c3547eSMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 71370c3547eSMike Kravetz 71470c3547eSMike Kravetz /* See if already present in mapping to avoid alloc/free */ 71570c3547eSMike Kravetz page = find_get_page(mapping, index); 71670c3547eSMike Kravetz if (page) { 71770c3547eSMike Kravetz put_page(page); 71870c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 71970c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 72070c3547eSMike Kravetz continue; 72170c3547eSMike Kravetz } 72270c3547eSMike Kravetz 72388ce3fefSMiaohe Lin /* 72488ce3fefSMiaohe Lin * Allocate page without setting the avoid_reserve argument. 72588ce3fefSMiaohe Lin * There certainly are no reserves associated with the 72688ce3fefSMiaohe Lin * pseudo_vma. However, there could be shared mappings with 72788ce3fefSMiaohe Lin * reserves for the file at the inode level. If we fallocate 72888ce3fefSMiaohe Lin * pages in these areas, we need to consume the reserves 72988ce3fefSMiaohe Lin * to keep reservation accounting consistent. 73088ce3fefSMiaohe Lin */ 73188ce3fefSMiaohe Lin page = alloc_huge_page(&pseudo_vma, addr, 0); 73270c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 73370c3547eSMike Kravetz if (IS_ERR(page)) { 73470c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 73570c3547eSMike Kravetz error = PTR_ERR(page); 73670c3547eSMike Kravetz goto out; 73770c3547eSMike Kravetz } 73870c3547eSMike Kravetz clear_huge_page(page, addr, pages_per_huge_page(h)); 73970c3547eSMike Kravetz __SetPageUptodate(page); 74070c3547eSMike Kravetz error = huge_add_to_page_cache(page, mapping, index); 74170c3547eSMike Kravetz if (unlikely(error)) { 742846be085SMike Kravetz restore_reserve_on_error(h, &pseudo_vma, addr, page); 74370c3547eSMike Kravetz put_page(page); 74470c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 74570c3547eSMike Kravetz goto out; 74670c3547eSMike Kravetz } 74770c3547eSMike Kravetz 74870c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 74970c3547eSMike Kravetz 7508f251a3dSMike Kravetz SetHPageMigratable(page); 75170c3547eSMike Kravetz /* 752d9ef44deSMatthew Wilcox (Oracle) * unlock_page because locked by huge_add_to_page_cache() 753585fc0d2SMuchun Song * put_page() due to reference from alloc_huge_page() 75470c3547eSMike Kravetz */ 75570c3547eSMike Kravetz unlock_page(page); 75672639e6dSNadav Amit put_page(page); 75770c3547eSMike Kravetz } 75870c3547eSMike Kravetz 75970c3547eSMike Kravetz if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 76070c3547eSMike Kravetz i_size_write(inode, offset + len); 761078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 76270c3547eSMike Kravetz out: 7635955102cSAl Viro inode_unlock(inode); 76470c3547eSMike Kravetz return error; 76570c3547eSMike Kravetz } 76670c3547eSMike Kravetz 767549c7297SChristian Brauner static int hugetlbfs_setattr(struct user_namespace *mnt_userns, 768549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr) 7691da177e4SLinus Torvalds { 7702b0143b5SDavid Howells struct inode *inode = d_inode(dentry); 771a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 7721da177e4SLinus Torvalds int error; 7731da177e4SLinus Torvalds unsigned int ia_valid = attr->ia_valid; 774ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 7751da177e4SLinus Torvalds 7762f221d6fSChristian Brauner error = setattr_prepare(&init_user_ns, dentry, attr); 7771da177e4SLinus Torvalds if (error) 7781025774cSChristoph Hellwig return error; 7791da177e4SLinus Torvalds 7801da177e4SLinus Torvalds if (ia_valid & ATTR_SIZE) { 781ff62a342SMarc-André Lureau loff_t oldsize = inode->i_size; 782ff62a342SMarc-André Lureau loff_t newsize = attr->ia_size; 783ff62a342SMarc-André Lureau 784ff62a342SMarc-André Lureau if (newsize & ~huge_page_mask(h)) 7851025774cSChristoph Hellwig return -EINVAL; 786398c0da7SMiaohe Lin /* protected by i_rwsem */ 787ff62a342SMarc-André Lureau if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 788ff62a342SMarc-André Lureau (newsize > oldsize && (info->seals & F_SEAL_GROW))) 789ff62a342SMarc-André Lureau return -EPERM; 790e5d319deSMiaohe Lin hugetlb_vmtruncate(inode, newsize); 7911da177e4SLinus Torvalds } 7921da177e4SLinus Torvalds 7932f221d6fSChristian Brauner setattr_copy(&init_user_ns, inode, attr); 7941025774cSChristoph Hellwig mark_inode_dirty(inode); 7951025774cSChristoph Hellwig return 0; 7961025774cSChristoph Hellwig } 7971025774cSChristoph Hellwig 7987d54fa64SAl Viro static struct inode *hugetlbfs_get_root(struct super_block *sb, 79932021982SDavid Howells struct hugetlbfs_fs_context *ctx) 8001da177e4SLinus Torvalds { 8011da177e4SLinus Torvalds struct inode *inode; 8021da177e4SLinus Torvalds 8031da177e4SLinus Torvalds inode = new_inode(sb); 8041da177e4SLinus Torvalds if (inode) { 80585fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 80632021982SDavid Howells inode->i_mode = S_IFDIR | ctx->mode; 80732021982SDavid Howells inode->i_uid = ctx->uid; 80832021982SDavid Howells inode->i_gid = ctx->gid; 809078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 8107d54fa64SAl Viro inode->i_op = &hugetlbfs_dir_inode_operations; 8117d54fa64SAl Viro inode->i_fop = &simple_dir_operations; 8127d54fa64SAl Viro /* directory inodes start off with i_nlink == 2 (for "." entry) */ 8137d54fa64SAl Viro inc_nlink(inode); 81465ed7601SAneesh Kumar K.V lockdep_annotate_inode_mutex_key(inode); 8157d54fa64SAl Viro } 8167d54fa64SAl Viro return inode; 8177d54fa64SAl Viro } 8187d54fa64SAl Viro 819b610ded7SMichal Hocko /* 820c8c06efaSDavidlohr Bueso * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never 821b610ded7SMichal Hocko * be taken from reclaim -- unlike regular filesystems. This needs an 82288f306b6SKirill A. Shutemov * annotation because huge_pmd_share() does an allocation under hugetlb's 823c8c06efaSDavidlohr Bueso * i_mmap_rwsem. 824b610ded7SMichal Hocko */ 825c8c06efaSDavidlohr Bueso static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; 826b610ded7SMichal Hocko 8277d54fa64SAl Viro static struct inode *hugetlbfs_get_inode(struct super_block *sb, 8287d54fa64SAl Viro struct inode *dir, 82918df2252SAl Viro umode_t mode, dev_t dev) 8307d54fa64SAl Viro { 8317d54fa64SAl Viro struct inode *inode; 83258b6e5e8SMike Kravetz struct resv_map *resv_map = NULL; 8339119a41eSJoonsoo Kim 83458b6e5e8SMike Kravetz /* 83558b6e5e8SMike Kravetz * Reserve maps are only needed for inodes that can have associated 83658b6e5e8SMike Kravetz * page allocations. 83758b6e5e8SMike Kravetz */ 83858b6e5e8SMike Kravetz if (S_ISREG(mode) || S_ISLNK(mode)) { 8399119a41eSJoonsoo Kim resv_map = resv_map_alloc(); 8409119a41eSJoonsoo Kim if (!resv_map) 8419119a41eSJoonsoo Kim return NULL; 84258b6e5e8SMike Kravetz } 8437d54fa64SAl Viro 8447d54fa64SAl Viro inode = new_inode(sb); 8457d54fa64SAl Viro if (inode) { 846ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 847ff62a342SMarc-André Lureau 8487d54fa64SAl Viro inode->i_ino = get_next_ino(); 84921cb47beSChristian Brauner inode_init_owner(&init_user_ns, inode, dir, mode); 850c8c06efaSDavidlohr Bueso lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 851c8c06efaSDavidlohr Bueso &hugetlbfs_i_mmap_rwsem_key); 8521da177e4SLinus Torvalds inode->i_mapping->a_ops = &hugetlbfs_aops; 853078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 8549119a41eSJoonsoo Kim inode->i_mapping->private_data = resv_map; 855ff62a342SMarc-André Lureau info->seals = F_SEAL_SEAL; 8561da177e4SLinus Torvalds switch (mode & S_IFMT) { 8571da177e4SLinus Torvalds default: 8581da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 8591da177e4SLinus Torvalds break; 8601da177e4SLinus Torvalds case S_IFREG: 8611da177e4SLinus Torvalds inode->i_op = &hugetlbfs_inode_operations; 8621da177e4SLinus Torvalds inode->i_fop = &hugetlbfs_file_operations; 8631da177e4SLinus Torvalds break; 8641da177e4SLinus Torvalds case S_IFDIR: 8651da177e4SLinus Torvalds inode->i_op = &hugetlbfs_dir_inode_operations; 8661da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 8671da177e4SLinus Torvalds 8681da177e4SLinus Torvalds /* directory inodes start off with i_nlink == 2 (for "." entry) */ 869d8c76e6fSDave Hansen inc_nlink(inode); 8701da177e4SLinus Torvalds break; 8711da177e4SLinus Torvalds case S_IFLNK: 8721da177e4SLinus Torvalds inode->i_op = &page_symlink_inode_operations; 87321fc61c7SAl Viro inode_nohighmem(inode); 8741da177e4SLinus Torvalds break; 8751da177e4SLinus Torvalds } 876e096d0c7SJosh Boyer lockdep_annotate_inode_mutex_key(inode); 87758b6e5e8SMike Kravetz } else { 87858b6e5e8SMike Kravetz if (resv_map) 8799119a41eSJoonsoo Kim kref_put(&resv_map->refs, resv_map_release); 88058b6e5e8SMike Kravetz } 8819119a41eSJoonsoo Kim 8821da177e4SLinus Torvalds return inode; 8831da177e4SLinus Torvalds } 8841da177e4SLinus Torvalds 8851da177e4SLinus Torvalds /* 8861da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 8871da177e4SLinus Torvalds */ 888549c7297SChristian Brauner static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, 8891ab5b82fSPiotr Sarna struct dentry *dentry, umode_t mode, dev_t dev) 8901ab5b82fSPiotr Sarna { 89119ee5345SAl Viro struct inode *inode; 89219ee5345SAl Viro 89319ee5345SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); 89419ee5345SAl Viro if (!inode) 89519ee5345SAl Viro return -ENOSPC; 89619ee5345SAl Viro dir->i_ctime = dir->i_mtime = current_time(dir); 89719ee5345SAl Viro d_instantiate(dentry, inode); 89819ee5345SAl Viro dget(dentry);/* Extra count - pin the dentry in core */ 89919ee5345SAl Viro return 0; 9001ab5b82fSPiotr Sarna } 9011ab5b82fSPiotr Sarna 902549c7297SChristian Brauner static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 903549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 9041da177e4SLinus Torvalds { 905549c7297SChristian Brauner int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry, 906549c7297SChristian Brauner mode | S_IFDIR, 0); 9071da177e4SLinus Torvalds if (!retval) 908d8c76e6fSDave Hansen inc_nlink(dir); 9091da177e4SLinus Torvalds return retval; 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds 912549c7297SChristian Brauner static int hugetlbfs_create(struct user_namespace *mnt_userns, 913549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 914549c7297SChristian Brauner umode_t mode, bool excl) 9151da177e4SLinus Torvalds { 916549c7297SChristian Brauner return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); 9171da177e4SLinus Torvalds } 9181da177e4SLinus Torvalds 919549c7297SChristian Brauner static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns, 920*863f144fSMiklos Szeredi struct inode *dir, struct file *file, 921549c7297SChristian Brauner umode_t mode) 9221ab5b82fSPiotr Sarna { 92319ee5345SAl Viro struct inode *inode; 92419ee5345SAl Viro 92519ee5345SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, mode | S_IFREG, 0); 92619ee5345SAl Viro if (!inode) 92719ee5345SAl Viro return -ENOSPC; 92819ee5345SAl Viro dir->i_ctime = dir->i_mtime = current_time(dir); 929*863f144fSMiklos Szeredi d_tmpfile(file, inode); 930*863f144fSMiklos Szeredi return finish_open_simple(file, 0); 9311ab5b82fSPiotr Sarna } 9321ab5b82fSPiotr Sarna 933549c7297SChristian Brauner static int hugetlbfs_symlink(struct user_namespace *mnt_userns, 934549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 935549c7297SChristian Brauner const char *symname) 9361da177e4SLinus Torvalds { 9371da177e4SLinus Torvalds struct inode *inode; 9381da177e4SLinus Torvalds int error = -ENOSPC; 9391da177e4SLinus Torvalds 9407d54fa64SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); 9411da177e4SLinus Torvalds if (inode) { 9421da177e4SLinus Torvalds int l = strlen(symname)+1; 9431da177e4SLinus Torvalds error = page_symlink(inode, symname, l); 9441da177e4SLinus Torvalds if (!error) { 9451da177e4SLinus Torvalds d_instantiate(dentry, inode); 9461da177e4SLinus Torvalds dget(dentry); 9471da177e4SLinus Torvalds } else 9481da177e4SLinus Torvalds iput(inode); 9491da177e4SLinus Torvalds } 950078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 9511da177e4SLinus Torvalds 9521da177e4SLinus Torvalds return error; 9531da177e4SLinus Torvalds } 9541da177e4SLinus Torvalds 955b890ec2aSMatthew Wilcox (Oracle) #ifdef CONFIG_MIGRATION 956b890ec2aSMatthew Wilcox (Oracle) static int hugetlbfs_migrate_folio(struct address_space *mapping, 957b890ec2aSMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, 958a6bc32b8SMel Gorman enum migrate_mode mode) 959290408d4SNaoya Horiguchi { 960290408d4SNaoya Horiguchi int rc; 961290408d4SNaoya Horiguchi 962b890ec2aSMatthew Wilcox (Oracle) rc = migrate_huge_page_move_mapping(mapping, dst, src); 96378bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 964290408d4SNaoya Horiguchi return rc; 965cb6acd01SMike Kravetz 966b890ec2aSMatthew Wilcox (Oracle) if (hugetlb_page_subpool(&src->page)) { 967b890ec2aSMatthew Wilcox (Oracle) hugetlb_set_page_subpool(&dst->page, 968b890ec2aSMatthew Wilcox (Oracle) hugetlb_page_subpool(&src->page)); 969b890ec2aSMatthew Wilcox (Oracle) hugetlb_set_page_subpool(&src->page, NULL); 970cb6acd01SMike Kravetz } 971cb6acd01SMike Kravetz 9722916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 973b890ec2aSMatthew Wilcox (Oracle) folio_migrate_copy(dst, src); 9742916ecc0SJérôme Glisse else 975b890ec2aSMatthew Wilcox (Oracle) folio_migrate_flags(dst, src); 976290408d4SNaoya Horiguchi 97778bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 978290408d4SNaoya Horiguchi } 979b890ec2aSMatthew Wilcox (Oracle) #else 980b890ec2aSMatthew Wilcox (Oracle) #define hugetlbfs_migrate_folio NULL 981b890ec2aSMatthew Wilcox (Oracle) #endif 982290408d4SNaoya Horiguchi 98378bb9203SNaoya Horiguchi static int hugetlbfs_error_remove_page(struct address_space *mapping, 98478bb9203SNaoya Horiguchi struct page *page) 98578bb9203SNaoya Horiguchi { 98678bb9203SNaoya Horiguchi struct inode *inode = mapping->host; 987ab615a5bSMike Kravetz pgoff_t index = page->index; 98878bb9203SNaoya Horiguchi 98978bb9203SNaoya Horiguchi remove_huge_page(page); 990ab615a5bSMike Kravetz if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) 99178bb9203SNaoya Horiguchi hugetlb_fix_reserve_counts(inode); 992ab615a5bSMike Kravetz 99378bb9203SNaoya Horiguchi return 0; 99478bb9203SNaoya Horiguchi } 99578bb9203SNaoya Horiguchi 9964a25220dSDavid Howells /* 9974a25220dSDavid Howells * Display the mount options in /proc/mounts. 9984a25220dSDavid Howells */ 9994a25220dSDavid Howells static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) 10004a25220dSDavid Howells { 10014a25220dSDavid Howells struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); 10024a25220dSDavid Howells struct hugepage_subpool *spool = sbinfo->spool; 10034a25220dSDavid Howells unsigned long hpage_size = huge_page_size(sbinfo->hstate); 10044a25220dSDavid Howells unsigned hpage_shift = huge_page_shift(sbinfo->hstate); 10054a25220dSDavid Howells char mod; 10064a25220dSDavid Howells 10074a25220dSDavid Howells if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 10084a25220dSDavid Howells seq_printf(m, ",uid=%u", 10094a25220dSDavid Howells from_kuid_munged(&init_user_ns, sbinfo->uid)); 10104a25220dSDavid Howells if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 10114a25220dSDavid Howells seq_printf(m, ",gid=%u", 10124a25220dSDavid Howells from_kgid_munged(&init_user_ns, sbinfo->gid)); 10134a25220dSDavid Howells if (sbinfo->mode != 0755) 10144a25220dSDavid Howells seq_printf(m, ",mode=%o", sbinfo->mode); 10154a25220dSDavid Howells if (sbinfo->max_inodes != -1) 10164a25220dSDavid Howells seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); 10174a25220dSDavid Howells 10184a25220dSDavid Howells hpage_size /= 1024; 10194a25220dSDavid Howells mod = 'K'; 10204a25220dSDavid Howells if (hpage_size >= 1024) { 10214a25220dSDavid Howells hpage_size /= 1024; 10224a25220dSDavid Howells mod = 'M'; 10234a25220dSDavid Howells } 10244a25220dSDavid Howells seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); 10254a25220dSDavid Howells if (spool) { 10264a25220dSDavid Howells if (spool->max_hpages != -1) 10274a25220dSDavid Howells seq_printf(m, ",size=%llu", 10284a25220dSDavid Howells (unsigned long long)spool->max_hpages << hpage_shift); 10294a25220dSDavid Howells if (spool->min_hpages != -1) 10304a25220dSDavid Howells seq_printf(m, ",min_size=%llu", 10314a25220dSDavid Howells (unsigned long long)spool->min_hpages << hpage_shift); 10324a25220dSDavid Howells } 10334a25220dSDavid Howells return 0; 10344a25220dSDavid Howells } 10354a25220dSDavid Howells 1036726c3342SDavid Howells static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 10371da177e4SLinus Torvalds { 1038726c3342SDavid Howells struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 10392b0143b5SDavid Howells struct hstate *h = hstate_inode(d_inode(dentry)); 10401da177e4SLinus Torvalds 10411da177e4SLinus Torvalds buf->f_type = HUGETLBFS_MAGIC; 1042a5516438SAndi Kleen buf->f_bsize = huge_page_size(h); 10431da177e4SLinus Torvalds if (sbinfo) { 10441da177e4SLinus Torvalds spin_lock(&sbinfo->stat_lock); 104511680763SMiaohe Lin /* If no limits set, just report 0 or -1 for max/free/used 104674a8a65cSDavid Gibson * blocks, like simple_statfs() */ 104790481622SDavid Gibson if (sbinfo->spool) { 104890481622SDavid Gibson long free_pages; 104990481622SDavid Gibson 10504b25f030SMina Almasry spin_lock_irq(&sbinfo->spool->lock); 105190481622SDavid Gibson buf->f_blocks = sbinfo->spool->max_hpages; 105290481622SDavid Gibson free_pages = sbinfo->spool->max_hpages 105390481622SDavid Gibson - sbinfo->spool->used_hpages; 105490481622SDavid Gibson buf->f_bavail = buf->f_bfree = free_pages; 10554b25f030SMina Almasry spin_unlock_irq(&sbinfo->spool->lock); 10561da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 10571da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 105874a8a65cSDavid Gibson } 10591da177e4SLinus Torvalds spin_unlock(&sbinfo->stat_lock); 10601da177e4SLinus Torvalds } 10611da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 10621da177e4SLinus Torvalds return 0; 10631da177e4SLinus Torvalds } 10641da177e4SLinus Torvalds 10651da177e4SLinus Torvalds static void hugetlbfs_put_super(struct super_block *sb) 10661da177e4SLinus Torvalds { 10671da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); 10681da177e4SLinus Torvalds 10691da177e4SLinus Torvalds if (sbi) { 10701da177e4SLinus Torvalds sb->s_fs_info = NULL; 107190481622SDavid Gibson 107290481622SDavid Gibson if (sbi->spool) 107390481622SDavid Gibson hugepage_put_subpool(sbi->spool); 107490481622SDavid Gibson 10751da177e4SLinus Torvalds kfree(sbi); 10761da177e4SLinus Torvalds } 10771da177e4SLinus Torvalds } 10781da177e4SLinus Torvalds 107996527980SChristoph Hellwig static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) 108096527980SChristoph Hellwig { 108196527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 108296527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 108396527980SChristoph Hellwig if (unlikely(!sbinfo->free_inodes)) { 108496527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 108596527980SChristoph Hellwig return 0; 108696527980SChristoph Hellwig } 108796527980SChristoph Hellwig sbinfo->free_inodes--; 108896527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 108996527980SChristoph Hellwig } 109096527980SChristoph Hellwig 109196527980SChristoph Hellwig return 1; 109296527980SChristoph Hellwig } 109396527980SChristoph Hellwig 109496527980SChristoph Hellwig static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) 109596527980SChristoph Hellwig { 109696527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 109796527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 109896527980SChristoph Hellwig sbinfo->free_inodes++; 109996527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 110096527980SChristoph Hellwig } 110196527980SChristoph Hellwig } 110296527980SChristoph Hellwig 110396527980SChristoph Hellwig 1104e18b890bSChristoph Lameter static struct kmem_cache *hugetlbfs_inode_cachep; 11051da177e4SLinus Torvalds 11061da177e4SLinus Torvalds static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 11071da177e4SLinus Torvalds { 110896527980SChristoph Hellwig struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); 11091da177e4SLinus Torvalds struct hugetlbfs_inode_info *p; 11101da177e4SLinus Torvalds 111196527980SChristoph Hellwig if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 11121da177e4SLinus Torvalds return NULL; 1113fd60b288SMuchun Song p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); 111496527980SChristoph Hellwig if (unlikely(!p)) { 111596527980SChristoph Hellwig hugetlbfs_inc_free_inodes(sbinfo); 111696527980SChristoph Hellwig return NULL; 11171da177e4SLinus Torvalds } 11184742a35dSMike Kravetz 11194742a35dSMike Kravetz /* 11204742a35dSMike Kravetz * Any time after allocation, hugetlbfs_destroy_inode can be called 11214742a35dSMike Kravetz * for the inode. mpol_free_shared_policy is unconditionally called 11224742a35dSMike Kravetz * as part of hugetlbfs_destroy_inode. So, initialize policy here 11234742a35dSMike Kravetz * in case of a quick call to destroy. 11244742a35dSMike Kravetz * 11254742a35dSMike Kravetz * Note that the policy is initialized even if we are creating a 11264742a35dSMike Kravetz * private inode. This simplifies hugetlbfs_destroy_inode. 11274742a35dSMike Kravetz */ 11284742a35dSMike Kravetz mpol_shared_policy_init(&p->policy, NULL); 11294742a35dSMike Kravetz 113096527980SChristoph Hellwig return &p->vfs_inode; 11311da177e4SLinus Torvalds } 11321da177e4SLinus Torvalds 1133b62de322SAl Viro static void hugetlbfs_free_inode(struct inode *inode) 1134fa0d7e3dSNick Piggin { 1135fa0d7e3dSNick Piggin kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 1136fa0d7e3dSNick Piggin } 1137fa0d7e3dSNick Piggin 11381da177e4SLinus Torvalds static void hugetlbfs_destroy_inode(struct inode *inode) 11391da177e4SLinus Torvalds { 114096527980SChristoph Hellwig hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 11411da177e4SLinus Torvalds mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 11421da177e4SLinus Torvalds } 11431da177e4SLinus Torvalds 1144f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops = { 1145800d15a5SNick Piggin .write_begin = hugetlbfs_write_begin, 1146800d15a5SNick Piggin .write_end = hugetlbfs_write_end, 114746de8b97SMatthew Wilcox (Oracle) .dirty_folio = noop_dirty_folio, 1148b890ec2aSMatthew Wilcox (Oracle) .migrate_folio = hugetlbfs_migrate_folio, 114978bb9203SNaoya Horiguchi .error_remove_page = hugetlbfs_error_remove_page, 11501da177e4SLinus Torvalds }; 11511da177e4SLinus Torvalds 115296527980SChristoph Hellwig 115351cc5068SAlexey Dobriyan static void init_once(void *foo) 115496527980SChristoph Hellwig { 115596527980SChristoph Hellwig struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 115696527980SChristoph Hellwig 115796527980SChristoph Hellwig inode_init_once(&ei->vfs_inode); 115896527980SChristoph Hellwig } 115996527980SChristoph Hellwig 11604b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations = { 116134d0640eSAl Viro .read_iter = hugetlbfs_read_iter, 11621da177e4SLinus Torvalds .mmap = hugetlbfs_file_mmap, 11631b061d92SChristoph Hellwig .fsync = noop_fsync, 11641da177e4SLinus Torvalds .get_unmapped_area = hugetlb_get_unmapped_area, 11656038f373SArnd Bergmann .llseek = default_llseek, 116670c3547eSMike Kravetz .fallocate = hugetlbfs_fallocate, 11671da177e4SLinus Torvalds }; 11681da177e4SLinus Torvalds 116992e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations = { 11701da177e4SLinus Torvalds .create = hugetlbfs_create, 11711da177e4SLinus Torvalds .lookup = simple_lookup, 11721da177e4SLinus Torvalds .link = simple_link, 11731da177e4SLinus Torvalds .unlink = simple_unlink, 11741da177e4SLinus Torvalds .symlink = hugetlbfs_symlink, 11751da177e4SLinus Torvalds .mkdir = hugetlbfs_mkdir, 11761da177e4SLinus Torvalds .rmdir = simple_rmdir, 11771da177e4SLinus Torvalds .mknod = hugetlbfs_mknod, 11781da177e4SLinus Torvalds .rename = simple_rename, 11791da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 11801ab5b82fSPiotr Sarna .tmpfile = hugetlbfs_tmpfile, 11811da177e4SLinus Torvalds }; 11821da177e4SLinus Torvalds 118392e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations = { 11841da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 11851da177e4SLinus Torvalds }; 11861da177e4SLinus Torvalds 1187ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops = { 11881da177e4SLinus Torvalds .alloc_inode = hugetlbfs_alloc_inode, 1189b62de322SAl Viro .free_inode = hugetlbfs_free_inode, 11901da177e4SLinus Torvalds .destroy_inode = hugetlbfs_destroy_inode, 11912bbbda30SAl Viro .evict_inode = hugetlbfs_evict_inode, 11921da177e4SLinus Torvalds .statfs = hugetlbfs_statfs, 11931da177e4SLinus Torvalds .put_super = hugetlbfs_put_super, 11944a25220dSDavid Howells .show_options = hugetlbfs_show_options, 11951da177e4SLinus Torvalds }; 11961da177e4SLinus Torvalds 11977ca02d0aSMike Kravetz /* 11987ca02d0aSMike Kravetz * Convert size option passed from command line to number of huge pages 11997ca02d0aSMike Kravetz * in the pool specified by hstate. Size option could be in bytes 12007ca02d0aSMike Kravetz * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). 12017ca02d0aSMike Kravetz */ 12024a25220dSDavid Howells static long 12037ca02d0aSMike Kravetz hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, 12044a25220dSDavid Howells enum hugetlbfs_size_type val_type) 12057ca02d0aSMike Kravetz { 12067ca02d0aSMike Kravetz if (val_type == NO_SIZE) 12077ca02d0aSMike Kravetz return -1; 12087ca02d0aSMike Kravetz 12097ca02d0aSMike Kravetz if (val_type == SIZE_PERCENT) { 12107ca02d0aSMike Kravetz size_opt <<= huge_page_shift(h); 12117ca02d0aSMike Kravetz size_opt *= h->max_huge_pages; 12127ca02d0aSMike Kravetz do_div(size_opt, 100); 12137ca02d0aSMike Kravetz } 12147ca02d0aSMike Kravetz 12157ca02d0aSMike Kravetz size_opt >>= huge_page_shift(h); 12167ca02d0aSMike Kravetz return size_opt; 12177ca02d0aSMike Kravetz } 12187ca02d0aSMike Kravetz 121932021982SDavid Howells /* 122032021982SDavid Howells * Parse one mount parameter. 122132021982SDavid Howells */ 122232021982SDavid Howells static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) 12231da177e4SLinus Torvalds { 122432021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 122532021982SDavid Howells struct fs_parse_result result; 122632021982SDavid Howells char *rest; 122732021982SDavid Howells unsigned long ps; 122832021982SDavid Howells int opt; 12291da177e4SLinus Torvalds 1230d7167b14SAl Viro opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); 123132021982SDavid Howells if (opt < 0) 123232021982SDavid Howells return opt; 123332021982SDavid Howells 123432021982SDavid Howells switch (opt) { 123532021982SDavid Howells case Opt_uid: 123632021982SDavid Howells ctx->uid = make_kuid(current_user_ns(), result.uint_32); 123732021982SDavid Howells if (!uid_valid(ctx->uid)) 123832021982SDavid Howells goto bad_val; 12391da177e4SLinus Torvalds return 0; 12401da177e4SLinus Torvalds 1241e73a75faSRandy Dunlap case Opt_gid: 124232021982SDavid Howells ctx->gid = make_kgid(current_user_ns(), result.uint_32); 124332021982SDavid Howells if (!gid_valid(ctx->gid)) 1244e73a75faSRandy Dunlap goto bad_val; 124532021982SDavid Howells return 0; 1246e73a75faSRandy Dunlap 1247e73a75faSRandy Dunlap case Opt_mode: 124832021982SDavid Howells ctx->mode = result.uint_32 & 01777U; 124932021982SDavid Howells return 0; 1250e73a75faSRandy Dunlap 125132021982SDavid Howells case Opt_size: 1252e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 125332021982SDavid Howells if (!isdigit(param->string[0])) 1254e73a75faSRandy Dunlap goto bad_val; 125532021982SDavid Howells ctx->max_size_opt = memparse(param->string, &rest); 125632021982SDavid Howells ctx->max_val_type = SIZE_STD; 1257a137e1ccSAndi Kleen if (*rest == '%') 125832021982SDavid Howells ctx->max_val_type = SIZE_PERCENT; 125932021982SDavid Howells return 0; 12601da177e4SLinus Torvalds 1261e73a75faSRandy Dunlap case Opt_nr_inodes: 1262e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 126332021982SDavid Howells if (!isdigit(param->string[0])) 1264e73a75faSRandy Dunlap goto bad_val; 126532021982SDavid Howells ctx->nr_inodes = memparse(param->string, &rest); 126632021982SDavid Howells return 0; 1267e73a75faSRandy Dunlap 126832021982SDavid Howells case Opt_pagesize: 126932021982SDavid Howells ps = memparse(param->string, &rest); 127032021982SDavid Howells ctx->hstate = size_to_hstate(ps); 127132021982SDavid Howells if (!ctx->hstate) { 1272d0036517SMiaohe Lin pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); 1273a137e1ccSAndi Kleen return -EINVAL; 1274a137e1ccSAndi Kleen } 127532021982SDavid Howells return 0; 1276a137e1ccSAndi Kleen 127732021982SDavid Howells case Opt_min_size: 12787ca02d0aSMike Kravetz /* memparse() will accept a K/M/G without a digit */ 127932021982SDavid Howells if (!isdigit(param->string[0])) 12807ca02d0aSMike Kravetz goto bad_val; 128132021982SDavid Howells ctx->min_size_opt = memparse(param->string, &rest); 128232021982SDavid Howells ctx->min_val_type = SIZE_STD; 12837ca02d0aSMike Kravetz if (*rest == '%') 128432021982SDavid Howells ctx->min_val_type = SIZE_PERCENT; 128532021982SDavid Howells return 0; 12867ca02d0aSMike Kravetz 1287e73a75faSRandy Dunlap default: 1288b4c07bceSLee Schermerhorn return -EINVAL; 1289e73a75faSRandy Dunlap } 129032021982SDavid Howells 129132021982SDavid Howells bad_val: 1292b5db30cfSAl Viro return invalfc(fc, "Bad value '%s' for mount option '%s'\n", 129332021982SDavid Howells param->string, param->key); 12941da177e4SLinus Torvalds } 1295a137e1ccSAndi Kleen 12967ca02d0aSMike Kravetz /* 129732021982SDavid Howells * Validate the parsed options. 129832021982SDavid Howells */ 129932021982SDavid Howells static int hugetlbfs_validate(struct fs_context *fc) 130032021982SDavid Howells { 130132021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 130232021982SDavid Howells 130332021982SDavid Howells /* 13047ca02d0aSMike Kravetz * Use huge page pool size (in hstate) to convert the size 13057ca02d0aSMike Kravetz * options to number of huge pages. If NO_SIZE, -1 is returned. 13067ca02d0aSMike Kravetz */ 130732021982SDavid Howells ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 130832021982SDavid Howells ctx->max_size_opt, 130932021982SDavid Howells ctx->max_val_type); 131032021982SDavid Howells ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 131132021982SDavid Howells ctx->min_size_opt, 131232021982SDavid Howells ctx->min_val_type); 13137ca02d0aSMike Kravetz 13147ca02d0aSMike Kravetz /* 13157ca02d0aSMike Kravetz * If max_size was specified, then min_size must be smaller 13167ca02d0aSMike Kravetz */ 131732021982SDavid Howells if (ctx->max_val_type > NO_SIZE && 131832021982SDavid Howells ctx->min_hpages > ctx->max_hpages) { 131932021982SDavid Howells pr_err("Minimum size can not be greater than maximum size\n"); 13207ca02d0aSMike Kravetz return -EINVAL; 1321a137e1ccSAndi Kleen } 1322a137e1ccSAndi Kleen 13231da177e4SLinus Torvalds return 0; 13241da177e4SLinus Torvalds } 13251da177e4SLinus Torvalds 13261da177e4SLinus Torvalds static int 132732021982SDavid Howells hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) 13281da177e4SLinus Torvalds { 132932021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 13301da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbinfo; 13311da177e4SLinus Torvalds 13321da177e4SLinus Torvalds sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); 13331da177e4SLinus Torvalds if (!sbinfo) 13341da177e4SLinus Torvalds return -ENOMEM; 13351da177e4SLinus Torvalds sb->s_fs_info = sbinfo; 13361da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 133732021982SDavid Howells sbinfo->hstate = ctx->hstate; 133832021982SDavid Howells sbinfo->max_inodes = ctx->nr_inodes; 133932021982SDavid Howells sbinfo->free_inodes = ctx->nr_inodes; 134090481622SDavid Gibson sbinfo->spool = NULL; 134132021982SDavid Howells sbinfo->uid = ctx->uid; 134232021982SDavid Howells sbinfo->gid = ctx->gid; 134332021982SDavid Howells sbinfo->mode = ctx->mode; 13444a25220dSDavid Howells 13457ca02d0aSMike Kravetz /* 13467ca02d0aSMike Kravetz * Allocate and initialize subpool if maximum or minimum size is 13471935ebd3SMiaohe Lin * specified. Any needed reservations (for minimum size) are taken 1348445c8098SMiaohe Lin * when the subpool is created. 13497ca02d0aSMike Kravetz */ 135032021982SDavid Howells if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { 135132021982SDavid Howells sbinfo->spool = hugepage_new_subpool(ctx->hstate, 135232021982SDavid Howells ctx->max_hpages, 135332021982SDavid Howells ctx->min_hpages); 135490481622SDavid Gibson if (!sbinfo->spool) 135590481622SDavid Gibson goto out_free; 135690481622SDavid Gibson } 13571da177e4SLinus Torvalds sb->s_maxbytes = MAX_LFS_FILESIZE; 135832021982SDavid Howells sb->s_blocksize = huge_page_size(ctx->hstate); 135932021982SDavid Howells sb->s_blocksize_bits = huge_page_shift(ctx->hstate); 13601da177e4SLinus Torvalds sb->s_magic = HUGETLBFS_MAGIC; 13611da177e4SLinus Torvalds sb->s_op = &hugetlbfs_ops; 13621da177e4SLinus Torvalds sb->s_time_gran = 1; 136315568299SMike Kravetz 136415568299SMike Kravetz /* 136515568299SMike Kravetz * Due to the special and limited functionality of hugetlbfs, it does 136615568299SMike Kravetz * not work well as a stacking filesystem. 136715568299SMike Kravetz */ 136815568299SMike Kravetz sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; 136932021982SDavid Howells sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); 137048fde701SAl Viro if (!sb->s_root) 13711da177e4SLinus Torvalds goto out_free; 13721da177e4SLinus Torvalds return 0; 13731da177e4SLinus Torvalds out_free: 137490481622SDavid Gibson kfree(sbinfo->spool); 13751da177e4SLinus Torvalds kfree(sbinfo); 13761da177e4SLinus Torvalds return -ENOMEM; 13771da177e4SLinus Torvalds } 13781da177e4SLinus Torvalds 137932021982SDavid Howells static int hugetlbfs_get_tree(struct fs_context *fc) 13801da177e4SLinus Torvalds { 138132021982SDavid Howells int err = hugetlbfs_validate(fc); 138232021982SDavid Howells if (err) 138332021982SDavid Howells return err; 13842ac295d4SAl Viro return get_tree_nodev(fc, hugetlbfs_fill_super); 138532021982SDavid Howells } 138632021982SDavid Howells 138732021982SDavid Howells static void hugetlbfs_fs_context_free(struct fs_context *fc) 138832021982SDavid Howells { 138932021982SDavid Howells kfree(fc->fs_private); 139032021982SDavid Howells } 139132021982SDavid Howells 139232021982SDavid Howells static const struct fs_context_operations hugetlbfs_fs_context_ops = { 139332021982SDavid Howells .free = hugetlbfs_fs_context_free, 139432021982SDavid Howells .parse_param = hugetlbfs_parse_param, 139532021982SDavid Howells .get_tree = hugetlbfs_get_tree, 139632021982SDavid Howells }; 139732021982SDavid Howells 139832021982SDavid Howells static int hugetlbfs_init_fs_context(struct fs_context *fc) 139932021982SDavid Howells { 140032021982SDavid Howells struct hugetlbfs_fs_context *ctx; 140132021982SDavid Howells 140232021982SDavid Howells ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); 140332021982SDavid Howells if (!ctx) 140432021982SDavid Howells return -ENOMEM; 140532021982SDavid Howells 140632021982SDavid Howells ctx->max_hpages = -1; /* No limit on size by default */ 140732021982SDavid Howells ctx->nr_inodes = -1; /* No limit on number of inodes by default */ 140832021982SDavid Howells ctx->uid = current_fsuid(); 140932021982SDavid Howells ctx->gid = current_fsgid(); 141032021982SDavid Howells ctx->mode = 0755; 141132021982SDavid Howells ctx->hstate = &default_hstate; 141232021982SDavid Howells ctx->min_hpages = -1; /* No default minimum size */ 141332021982SDavid Howells ctx->max_val_type = NO_SIZE; 141432021982SDavid Howells ctx->min_val_type = NO_SIZE; 141532021982SDavid Howells fc->fs_private = ctx; 141632021982SDavid Howells fc->ops = &hugetlbfs_fs_context_ops; 141732021982SDavid Howells return 0; 14181da177e4SLinus Torvalds } 14191da177e4SLinus Torvalds 14201da177e4SLinus Torvalds static struct file_system_type hugetlbfs_fs_type = { 14211da177e4SLinus Torvalds .name = "hugetlbfs", 142232021982SDavid Howells .init_fs_context = hugetlbfs_init_fs_context, 1423d7167b14SAl Viro .parameters = hugetlb_fs_parameters, 14241da177e4SLinus Torvalds .kill_sb = kill_litter_super, 14251da177e4SLinus Torvalds }; 14261da177e4SLinus Torvalds 142742d7395fSAndi Kleen static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; 14281da177e4SLinus Torvalds 1429ef1ff6b8SFrom: Mel Gorman static int can_do_hugetlb_shm(void) 14301da177e4SLinus Torvalds { 1431a0eb3a05SEric W. Biederman kgid_t shm_group; 1432a0eb3a05SEric W. Biederman shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); 1433a0eb3a05SEric W. Biederman return capable(CAP_IPC_LOCK) || in_group_p(shm_group); 14341da177e4SLinus Torvalds } 14351da177e4SLinus Torvalds 143642d7395fSAndi Kleen static int get_hstate_idx(int page_size_log) 143742d7395fSAndi Kleen { 1438af73e4d9SNaoya Horiguchi struct hstate *h = hstate_sizelog(page_size_log); 143942d7395fSAndi Kleen 144042d7395fSAndi Kleen if (!h) 144142d7395fSAndi Kleen return -1; 144204adbc3fSMiaohe Lin return hstate_index(h); 144342d7395fSAndi Kleen } 144442d7395fSAndi Kleen 1445af73e4d9SNaoya Horiguchi /* 1446af73e4d9SNaoya Horiguchi * Note that size should be aligned to proper hugepage size in caller side, 1447af73e4d9SNaoya Horiguchi * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. 1448af73e4d9SNaoya Horiguchi */ 1449af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size, 145083c1fd76Szhangyiru vm_flags_t acctflag, int creat_flags, 145183c1fd76Szhangyiru int page_size_log) 14521da177e4SLinus Torvalds { 14531da177e4SLinus Torvalds struct inode *inode; 1454e68375c8SAl Viro struct vfsmount *mnt; 145542d7395fSAndi Kleen int hstate_idx; 1456e68375c8SAl Viro struct file *file; 145742d7395fSAndi Kleen 145842d7395fSAndi Kleen hstate_idx = get_hstate_idx(page_size_log); 145942d7395fSAndi Kleen if (hstate_idx < 0) 146042d7395fSAndi Kleen return ERR_PTR(-ENODEV); 14611da177e4SLinus Torvalds 1462e68375c8SAl Viro mnt = hugetlbfs_vfsmount[hstate_idx]; 1463e68375c8SAl Viro if (!mnt) 14645bc98594SAkinobu Mita return ERR_PTR(-ENOENT); 14655bc98594SAkinobu Mita 1466ef1ff6b8SFrom: Mel Gorman if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 146783c1fd76Szhangyiru struct ucounts *ucounts = current_ucounts(); 146883c1fd76Szhangyiru 146983c1fd76Szhangyiru if (user_shm_lock(size, ucounts)) { 147083c1fd76Szhangyiru pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", 147121a3c273SDavid Rientjes current->comm, current->pid); 147283c1fd76Szhangyiru user_shm_unlock(size, ucounts); 14732584e517SRavikiran G Thirumalai } 147483c1fd76Szhangyiru return ERR_PTR(-EPERM); 1475353d5c30SHugh Dickins } 14761da177e4SLinus Torvalds 147739b65252SAnatol Pomozov file = ERR_PTR(-ENOSPC); 1478e68375c8SAl Viro inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); 14791da177e4SLinus Torvalds if (!inode) 1480e68375c8SAl Viro goto out; 1481e1832f29SStephen Smalley if (creat_flags == HUGETLB_SHMFS_INODE) 1482e1832f29SStephen Smalley inode->i_flags |= S_PRIVATE; 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds inode->i_size = size; 14856d6b77f1SMiklos Szeredi clear_nlink(inode); 1486ce8d2cdfSDave Hansen 148733b8f84aSMike Kravetz if (!hugetlb_reserve_pages(inode, 0, 1488e68375c8SAl Viro size >> huge_page_shift(hstate_inode(inode)), NULL, 1489e68375c8SAl Viro acctflag)) 1490e68375c8SAl Viro file = ERR_PTR(-ENOMEM); 1491e68375c8SAl Viro else 1492e68375c8SAl Viro file = alloc_file_pseudo(inode, mnt, name, O_RDWR, 1493ce8d2cdfSDave Hansen &hugetlbfs_file_operations); 1494e68375c8SAl Viro if (!IS_ERR(file)) 14951da177e4SLinus Torvalds return file; 14961da177e4SLinus Torvalds 1497b45b5bd6SDavid Gibson iput(inode); 1498e68375c8SAl Viro out: 149939b65252SAnatol Pomozov return file; 15001da177e4SLinus Torvalds } 15011da177e4SLinus Torvalds 150232021982SDavid Howells static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) 150332021982SDavid Howells { 150432021982SDavid Howells struct fs_context *fc; 150532021982SDavid Howells struct vfsmount *mnt; 150632021982SDavid Howells 150732021982SDavid Howells fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); 150832021982SDavid Howells if (IS_ERR(fc)) { 150932021982SDavid Howells mnt = ERR_CAST(fc); 151032021982SDavid Howells } else { 151132021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 151232021982SDavid Howells ctx->hstate = h; 151332021982SDavid Howells mnt = fc_mount(fc); 151432021982SDavid Howells put_fs_context(fc); 151532021982SDavid Howells } 151632021982SDavid Howells if (IS_ERR(mnt)) 1517a25fddceSMiaohe Lin pr_err("Cannot mount internal hugetlbfs for page size %luK", 1518d0036517SMiaohe Lin huge_page_size(h) / SZ_1K); 151932021982SDavid Howells return mnt; 152032021982SDavid Howells } 152132021982SDavid Howells 15221da177e4SLinus Torvalds static int __init init_hugetlbfs_fs(void) 15231da177e4SLinus Torvalds { 152432021982SDavid Howells struct vfsmount *mnt; 152542d7395fSAndi Kleen struct hstate *h; 15261da177e4SLinus Torvalds int error; 152742d7395fSAndi Kleen int i; 15281da177e4SLinus Torvalds 1529457c1b27SNishanth Aravamudan if (!hugepages_supported()) { 15309b857d26SAndrew Morton pr_info("disabling because there are no supported hugepage sizes\n"); 1531457c1b27SNishanth Aravamudan return -ENOTSUPP; 1532457c1b27SNishanth Aravamudan } 1533457c1b27SNishanth Aravamudan 1534d1d5e05fSHillf Danton error = -ENOMEM; 15351da177e4SLinus Torvalds hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 15361da177e4SLinus Torvalds sizeof(struct hugetlbfs_inode_info), 15375d097056SVladimir Davydov 0, SLAB_ACCOUNT, init_once); 15381da177e4SLinus Torvalds if (hugetlbfs_inode_cachep == NULL) 15398fc312b3SMike Kravetz goto out; 15401da177e4SLinus Torvalds 15411da177e4SLinus Torvalds error = register_filesystem(&hugetlbfs_fs_type); 15421da177e4SLinus Torvalds if (error) 15438fc312b3SMike Kravetz goto out_free; 15441da177e4SLinus Torvalds 15458fc312b3SMike Kravetz /* default hstate mount is required */ 15463b2275a8SMiaohe Lin mnt = mount_one_hugetlbfs(&default_hstate); 15478fc312b3SMike Kravetz if (IS_ERR(mnt)) { 15488fc312b3SMike Kravetz error = PTR_ERR(mnt); 15498fc312b3SMike Kravetz goto out_unreg; 15508fc312b3SMike Kravetz } 15518fc312b3SMike Kravetz hugetlbfs_vfsmount[default_hstate_idx] = mnt; 15528fc312b3SMike Kravetz 15538fc312b3SMike Kravetz /* other hstates are optional */ 155442d7395fSAndi Kleen i = 0; 155542d7395fSAndi Kleen for_each_hstate(h) { 155615f0ec94SJan Stancek if (i == default_hstate_idx) { 155715f0ec94SJan Stancek i++; 15588fc312b3SMike Kravetz continue; 155915f0ec94SJan Stancek } 15608fc312b3SMike Kravetz 156132021982SDavid Howells mnt = mount_one_hugetlbfs(h); 15628fc312b3SMike Kravetz if (IS_ERR(mnt)) 15638fc312b3SMike Kravetz hugetlbfs_vfsmount[i] = NULL; 15648fc312b3SMike Kravetz else 156532021982SDavid Howells hugetlbfs_vfsmount[i] = mnt; 156642d7395fSAndi Kleen i++; 156742d7395fSAndi Kleen } 156832021982SDavid Howells 156942d7395fSAndi Kleen return 0; 15701da177e4SLinus Torvalds 15718fc312b3SMike Kravetz out_unreg: 15728fc312b3SMike Kravetz (void)unregister_filesystem(&hugetlbfs_fs_type); 15738fc312b3SMike Kravetz out_free: 15741da177e4SLinus Torvalds kmem_cache_destroy(hugetlbfs_inode_cachep); 15758fc312b3SMike Kravetz out: 15761da177e4SLinus Torvalds return error; 15771da177e4SLinus Torvalds } 15783e89e1c5SPaul Gortmaker fs_initcall(init_hugetlbfs_fs) 1579