11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * hugetlbpage-backed filesystem. Based on ramfs. 31da177e4SLinus Torvalds * 46d49e352SNadia Yvette Chambers * Nadia Yvette Chambers, 2002 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Copyright (C) 2002 Linus Torvalds. 73e89e1c5SPaul Gortmaker * License: GPL 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 109b857d26SAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 119b857d26SAndrew Morton 121da177e4SLinus Torvalds #include <linux/thread_info.h> 131da177e4SLinus Torvalds #include <asm/current.h> 14174cd4b1SIngo Molnar #include <linux/sched/signal.h> /* remove ASAP */ 1570c3547eSMike Kravetz #include <linux/falloc.h> 161da177e4SLinus Torvalds #include <linux/fs.h> 171da177e4SLinus Torvalds #include <linux/mount.h> 181da177e4SLinus Torvalds #include <linux/file.h> 19e73a75faSRandy Dunlap #include <linux/kernel.h> 201da177e4SLinus Torvalds #include <linux/writeback.h> 211da177e4SLinus Torvalds #include <linux/pagemap.h> 221da177e4SLinus Torvalds #include <linux/highmem.h> 231da177e4SLinus Torvalds #include <linux/init.h> 241da177e4SLinus Torvalds #include <linux/string.h> 2516f7e0feSRandy Dunlap #include <linux/capability.h> 26e73a75faSRandy Dunlap #include <linux/ctype.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/hugetlb.h> 291da177e4SLinus Torvalds #include <linux/pagevec.h> 3032021982SDavid Howells #include <linux/fs_parser.h> 31036e0856SBenjamin Herrenschmidt #include <linux/mman.h> 321da177e4SLinus Torvalds #include <linux/slab.h> 331da177e4SLinus Torvalds #include <linux/dnotify.h> 341da177e4SLinus Torvalds #include <linux/statfs.h> 351da177e4SLinus Torvalds #include <linux/security.h> 361fd7317dSNick Black #include <linux/magic.h> 37290408d4SNaoya Horiguchi #include <linux/migrate.h> 3834d0640eSAl Viro #include <linux/uio.h> 391da177e4SLinus Torvalds 407c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 4188590253SShijie Hu #include <linux/sched/mm.h> 421da177e4SLinus Torvalds 43ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops; 44f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops; 454b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations; 4692e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations; 4792e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations; 481da177e4SLinus Torvalds 4932021982SDavid Howells enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; 5032021982SDavid Howells 5132021982SDavid Howells struct hugetlbfs_fs_context { 524a25220dSDavid Howells struct hstate *hstate; 5332021982SDavid Howells unsigned long long max_size_opt; 5432021982SDavid Howells unsigned long long min_size_opt; 554a25220dSDavid Howells long max_hpages; 564a25220dSDavid Howells long nr_inodes; 574a25220dSDavid Howells long min_hpages; 5832021982SDavid Howells enum hugetlbfs_size_type max_val_type; 5932021982SDavid Howells enum hugetlbfs_size_type min_val_type; 60a0eb3a05SEric W. Biederman kuid_t uid; 61a0eb3a05SEric W. Biederman kgid_t gid; 62a1d776eeSDavid Gibson umode_t mode; 63a1d776eeSDavid Gibson }; 64a1d776eeSDavid Gibson 651da177e4SLinus Torvalds int sysctl_hugetlb_shm_group; 661da177e4SLinus Torvalds 6732021982SDavid Howells enum hugetlb_param { 6832021982SDavid Howells Opt_gid, 6932021982SDavid Howells Opt_min_size, 7032021982SDavid Howells Opt_mode, 7132021982SDavid Howells Opt_nr_inodes, 7232021982SDavid Howells Opt_pagesize, 7332021982SDavid Howells Opt_size, 7432021982SDavid Howells Opt_uid, 75e73a75faSRandy Dunlap }; 76e73a75faSRandy Dunlap 77d7167b14SAl Viro static const struct fs_parameter_spec hugetlb_fs_parameters[] = { 7832021982SDavid Howells fsparam_u32 ("gid", Opt_gid), 7932021982SDavid Howells fsparam_string("min_size", Opt_min_size), 80e0f7e2b2SMike Kravetz fsparam_u32oct("mode", Opt_mode), 8132021982SDavid Howells fsparam_string("nr_inodes", Opt_nr_inodes), 8232021982SDavid Howells fsparam_string("pagesize", Opt_pagesize), 8332021982SDavid Howells fsparam_string("size", Opt_size), 8432021982SDavid Howells fsparam_u32 ("uid", Opt_uid), 8532021982SDavid Howells {} 8632021982SDavid Howells }; 8732021982SDavid Howells 8870c3547eSMike Kravetz #ifdef CONFIG_NUMA 8970c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 9070c3547eSMike Kravetz struct inode *inode, pgoff_t index) 9170c3547eSMike Kravetz { 9270c3547eSMike Kravetz vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, 9370c3547eSMike Kravetz index); 9470c3547eSMike Kravetz } 9570c3547eSMike Kravetz 9670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 9770c3547eSMike Kravetz { 9870c3547eSMike Kravetz mpol_cond_put(vma->vm_policy); 9970c3547eSMike Kravetz } 10070c3547eSMike Kravetz #else 10170c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 10270c3547eSMike Kravetz struct inode *inode, pgoff_t index) 10370c3547eSMike Kravetz { 10470c3547eSMike Kravetz } 10570c3547eSMike Kravetz 10670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 10770c3547eSMike Kravetz { 10870c3547eSMike Kravetz } 10970c3547eSMike Kravetz #endif 11070c3547eSMike Kravetz 11163489f8eSMike Kravetz /* 11263489f8eSMike Kravetz * Mask used when checking the page offset value passed in via system 11363489f8eSMike Kravetz * calls. This value will be converted to a loff_t which is signed. 11463489f8eSMike Kravetz * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the 11563489f8eSMike Kravetz * value. The extra bit (- 1 in the shift value) is to take the sign 11663489f8eSMike Kravetz * bit into account. 11763489f8eSMike Kravetz */ 11863489f8eSMike Kravetz #define PGOFF_LOFFT_MAX \ 11963489f8eSMike Kravetz (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) 12063489f8eSMike Kravetz 1211da177e4SLinus Torvalds static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 1221da177e4SLinus Torvalds { 123496ad9aaSAl Viro struct inode *inode = file_inode(file); 12422247efdSPeter Xu struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 1251da177e4SLinus Torvalds loff_t len, vma_len; 1261da177e4SLinus Torvalds int ret; 127a5516438SAndi Kleen struct hstate *h = hstate_file(file); 1281da177e4SLinus Torvalds 12968589bc3SHugh Dickins /* 130dec4ad86SDavid Gibson * vma address alignment (but not the pgoff alignment) has 131dec4ad86SDavid Gibson * already been checked by prepare_hugepage_range. If you add 132dec4ad86SDavid Gibson * any error returns here, do so after setting VM_HUGETLB, so 133dec4ad86SDavid Gibson * is_vm_hugetlb_page tests below unmap_region go the right 13445e55300SPeter Collingbourne * way when do_mmap unwinds (may be important on powerpc 135dec4ad86SDavid Gibson * and ia64). 13668589bc3SHugh Dickins */ 137a2fce914SNaoya Horiguchi vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 13868589bc3SHugh Dickins vma->vm_ops = &hugetlb_vm_ops; 1391da177e4SLinus Torvalds 14022247efdSPeter Xu ret = seal_check_future_write(info->seals, vma); 14122247efdSPeter Xu if (ret) 14222247efdSPeter Xu return ret; 14322247efdSPeter Xu 144045c7a3fSMike Kravetz /* 14563489f8eSMike Kravetz * page based offset in vm_pgoff could be sufficiently large to 1465df63c2aSMike Kravetz * overflow a loff_t when converted to byte offset. This can 1475df63c2aSMike Kravetz * only happen on architectures where sizeof(loff_t) == 1485df63c2aSMike Kravetz * sizeof(unsigned long). So, only check in those instances. 149045c7a3fSMike Kravetz */ 1505df63c2aSMike Kravetz if (sizeof(unsigned long) == sizeof(loff_t)) { 15163489f8eSMike Kravetz if (vma->vm_pgoff & PGOFF_LOFFT_MAX) 152045c7a3fSMike Kravetz return -EINVAL; 1535df63c2aSMike Kravetz } 154045c7a3fSMike Kravetz 15563489f8eSMike Kravetz /* must be huge page aligned */ 1562b37c35eSBecky Bruce if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 157dec4ad86SDavid Gibson return -EINVAL; 158dec4ad86SDavid Gibson 1591da177e4SLinus Torvalds vma_len = (loff_t)(vma->vm_end - vma->vm_start); 160045c7a3fSMike Kravetz len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 161045c7a3fSMike Kravetz /* check for overflow */ 162045c7a3fSMike Kravetz if (len < vma_len) 163045c7a3fSMike Kravetz return -EINVAL; 1641da177e4SLinus Torvalds 1655955102cSAl Viro inode_lock(inode); 1661da177e4SLinus Torvalds file_accessed(file); 1671da177e4SLinus Torvalds 1681da177e4SLinus Torvalds ret = -ENOMEM; 16933b8f84aSMike Kravetz if (!hugetlb_reserve_pages(inode, 170a5516438SAndi Kleen vma->vm_pgoff >> huge_page_order(h), 1715a6fe125SMel Gorman len >> huge_page_shift(h), vma, 1725a6fe125SMel Gorman vma->vm_flags)) 173b45b5bd6SDavid Gibson goto out; 174b45b5bd6SDavid Gibson 1754c887265SAdam Litke ret = 0; 176b6174df5SZhang, Yanmin if (vma->vm_flags & VM_WRITE && inode->i_size < len) 177045c7a3fSMike Kravetz i_size_write(inode, len); 1781da177e4SLinus Torvalds out: 1795955102cSAl Viro inode_unlock(inode); 1801da177e4SLinus Torvalds 1811da177e4SLinus Torvalds return ret; 1821da177e4SLinus Torvalds } 1831da177e4SLinus Torvalds 1841da177e4SLinus Torvalds /* 1853e4e28c5SMichel Lespinasse * Called under mmap_write_lock(mm). 1861da177e4SLinus Torvalds */ 1871da177e4SLinus Torvalds 1881da177e4SLinus Torvalds static unsigned long 18988590253SShijie Hu hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, 19088590253SShijie Hu unsigned long len, unsigned long pgoff, unsigned long flags) 19188590253SShijie Hu { 19288590253SShijie Hu struct hstate *h = hstate_file(file); 19388590253SShijie Hu struct vm_unmapped_area_info info; 19488590253SShijie Hu 19588590253SShijie Hu info.flags = 0; 19688590253SShijie Hu info.length = len; 19788590253SShijie Hu info.low_limit = current->mm->mmap_base; 1982cb4de08SChristophe Leroy info.high_limit = arch_get_mmap_end(addr, len, flags); 19988590253SShijie Hu info.align_mask = PAGE_MASK & ~huge_page_mask(h); 20088590253SShijie Hu info.align_offset = 0; 20188590253SShijie Hu return vm_unmapped_area(&info); 20288590253SShijie Hu } 20388590253SShijie Hu 20488590253SShijie Hu static unsigned long 20588590253SShijie Hu hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, 20688590253SShijie Hu unsigned long len, unsigned long pgoff, unsigned long flags) 20788590253SShijie Hu { 20888590253SShijie Hu struct hstate *h = hstate_file(file); 20988590253SShijie Hu struct vm_unmapped_area_info info; 21088590253SShijie Hu 21188590253SShijie Hu info.flags = VM_UNMAPPED_AREA_TOPDOWN; 21288590253SShijie Hu info.length = len; 21388590253SShijie Hu info.low_limit = max(PAGE_SIZE, mmap_min_addr); 2145f24d5a5SChristophe Leroy info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); 21588590253SShijie Hu info.align_mask = PAGE_MASK & ~huge_page_mask(h); 21688590253SShijie Hu info.align_offset = 0; 21788590253SShijie Hu addr = vm_unmapped_area(&info); 21888590253SShijie Hu 21988590253SShijie Hu /* 22088590253SShijie Hu * A failed mmap() very likely causes application failure, 22188590253SShijie Hu * so fall back to the bottom-up function here. This scenario 22288590253SShijie Hu * can happen with large stack limits and large mmap() 22388590253SShijie Hu * allocations. 22488590253SShijie Hu */ 22588590253SShijie Hu if (unlikely(offset_in_page(addr))) { 22688590253SShijie Hu VM_BUG_ON(addr != -ENOMEM); 22788590253SShijie Hu info.flags = 0; 22888590253SShijie Hu info.low_limit = current->mm->mmap_base; 2292cb4de08SChristophe Leroy info.high_limit = arch_get_mmap_end(addr, len, flags); 23088590253SShijie Hu addr = vm_unmapped_area(&info); 23188590253SShijie Hu } 23288590253SShijie Hu 23388590253SShijie Hu return addr; 23488590253SShijie Hu } 23588590253SShijie Hu 2364b439e25SChristophe Leroy unsigned long 2374b439e25SChristophe Leroy generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 2384b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 2394b439e25SChristophe Leroy unsigned long flags) 2401da177e4SLinus Torvalds { 2411da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 2421da177e4SLinus Torvalds struct vm_area_struct *vma; 243a5516438SAndi Kleen struct hstate *h = hstate_file(file); 2442cb4de08SChristophe Leroy const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 2451da177e4SLinus Torvalds 246a5516438SAndi Kleen if (len & ~huge_page_mask(h)) 2471da177e4SLinus Torvalds return -EINVAL; 2481da177e4SLinus Torvalds if (len > TASK_SIZE) 2491da177e4SLinus Torvalds return -ENOMEM; 2501da177e4SLinus Torvalds 251036e0856SBenjamin Herrenschmidt if (flags & MAP_FIXED) { 252a5516438SAndi Kleen if (prepare_hugepage_range(file, addr, len)) 253036e0856SBenjamin Herrenschmidt return -EINVAL; 254036e0856SBenjamin Herrenschmidt return addr; 255036e0856SBenjamin Herrenschmidt } 256036e0856SBenjamin Herrenschmidt 2571da177e4SLinus Torvalds if (addr) { 258a5516438SAndi Kleen addr = ALIGN(addr, huge_page_size(h)); 2591da177e4SLinus Torvalds vma = find_vma(mm, addr); 2605f24d5a5SChristophe Leroy if (mmap_end - len >= addr && 2611be7107fSHugh Dickins (!vma || addr + len <= vm_start_gap(vma))) 2621da177e4SLinus Torvalds return addr; 2631da177e4SLinus Torvalds } 2641da177e4SLinus Torvalds 26588590253SShijie Hu /* 26688590253SShijie Hu * Use mm->get_unmapped_area value as a hint to use topdown routine. 26788590253SShijie Hu * If architectures have special needs, they should define their own 26888590253SShijie Hu * version of hugetlb_get_unmapped_area. 26988590253SShijie Hu */ 27088590253SShijie Hu if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) 27188590253SShijie Hu return hugetlb_get_unmapped_area_topdown(file, addr, len, 27288590253SShijie Hu pgoff, flags); 27388590253SShijie Hu return hugetlb_get_unmapped_area_bottomup(file, addr, len, 27488590253SShijie Hu pgoff, flags); 2751da177e4SLinus Torvalds } 2764b439e25SChristophe Leroy 2774b439e25SChristophe Leroy #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 2784b439e25SChristophe Leroy static unsigned long 2794b439e25SChristophe Leroy hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 2804b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 2814b439e25SChristophe Leroy unsigned long flags) 2824b439e25SChristophe Leroy { 2834b439e25SChristophe Leroy return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); 2844b439e25SChristophe Leroy } 2851da177e4SLinus Torvalds #endif 2861da177e4SLinus Torvalds 287e63e1e5aSBadari Pulavarty /* 288e63e1e5aSBadari Pulavarty * Support for read() - Find the page attached to f_mapping and copy out the 289c7e285e3SMiaohe Lin * data. Its *very* similar to generic_file_buffered_read(), we can't use that 290ea1754a0SKirill A. Shutemov * since it has PAGE_SIZE assumptions. 291e63e1e5aSBadari Pulavarty */ 29234d0640eSAl Viro static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 293e63e1e5aSBadari Pulavarty { 29434d0640eSAl Viro struct file *file = iocb->ki_filp; 29534d0640eSAl Viro struct hstate *h = hstate_file(file); 29634d0640eSAl Viro struct address_space *mapping = file->f_mapping; 297e63e1e5aSBadari Pulavarty struct inode *inode = mapping->host; 29834d0640eSAl Viro unsigned long index = iocb->ki_pos >> huge_page_shift(h); 29934d0640eSAl Viro unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); 300e63e1e5aSBadari Pulavarty unsigned long end_index; 301e63e1e5aSBadari Pulavarty loff_t isize; 302e63e1e5aSBadari Pulavarty ssize_t retval = 0; 303e63e1e5aSBadari Pulavarty 30434d0640eSAl Viro while (iov_iter_count(to)) { 305e63e1e5aSBadari Pulavarty struct page *page; 30634d0640eSAl Viro size_t nr, copied; 307e63e1e5aSBadari Pulavarty 308e63e1e5aSBadari Pulavarty /* nr is the maximum number of bytes to copy from this page */ 309a5516438SAndi Kleen nr = huge_page_size(h); 310a05b0855SAneesh Kumar K.V isize = i_size_read(inode); 311a05b0855SAneesh Kumar K.V if (!isize) 31234d0640eSAl Viro break; 313a05b0855SAneesh Kumar K.V end_index = (isize - 1) >> huge_page_shift(h); 314e63e1e5aSBadari Pulavarty if (index > end_index) 31534d0640eSAl Viro break; 31634d0640eSAl Viro if (index == end_index) { 317a5516438SAndi Kleen nr = ((isize - 1) & ~huge_page_mask(h)) + 1; 318a05b0855SAneesh Kumar K.V if (nr <= offset) 31934d0640eSAl Viro break; 320e63e1e5aSBadari Pulavarty } 321e63e1e5aSBadari Pulavarty nr = nr - offset; 322e63e1e5aSBadari Pulavarty 323e63e1e5aSBadari Pulavarty /* Find the page */ 324a05b0855SAneesh Kumar K.V page = find_lock_page(mapping, index); 325e63e1e5aSBadari Pulavarty if (unlikely(page == NULL)) { 326e63e1e5aSBadari Pulavarty /* 327e63e1e5aSBadari Pulavarty * We have a HOLE, zero out the user-buffer for the 328e63e1e5aSBadari Pulavarty * length of the hole or request. 329e63e1e5aSBadari Pulavarty */ 33034d0640eSAl Viro copied = iov_iter_zero(nr, to); 331e63e1e5aSBadari Pulavarty } else { 332a05b0855SAneesh Kumar K.V unlock_page(page); 333a05b0855SAneesh Kumar K.V 334e63e1e5aSBadari Pulavarty /* 335e63e1e5aSBadari Pulavarty * We have the page, copy it to user space buffer. 336e63e1e5aSBadari Pulavarty */ 337*c7d57ab1SAl Viro copied = copy_page_to_iter(page, offset, nr, to); 33809cbfeafSKirill A. Shutemov put_page(page); 339e63e1e5aSBadari Pulavarty } 34034d0640eSAl Viro offset += copied; 34134d0640eSAl Viro retval += copied; 34234d0640eSAl Viro if (copied != nr && iov_iter_count(to)) { 34334d0640eSAl Viro if (!retval) 34434d0640eSAl Viro retval = -EFAULT; 345e63e1e5aSBadari Pulavarty break; 346e63e1e5aSBadari Pulavarty } 34734d0640eSAl Viro index += offset >> huge_page_shift(h); 34834d0640eSAl Viro offset &= ~huge_page_mask(h); 34934d0640eSAl Viro } 35034d0640eSAl Viro iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; 351e63e1e5aSBadari Pulavarty return retval; 352e63e1e5aSBadari Pulavarty } 353e63e1e5aSBadari Pulavarty 354800d15a5SNick Piggin static int hugetlbfs_write_begin(struct file *file, 355800d15a5SNick Piggin struct address_space *mapping, 3569d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len, 357800d15a5SNick Piggin struct page **pagep, void **fsdata) 3581da177e4SLinus Torvalds { 3591da177e4SLinus Torvalds return -EINVAL; 3601da177e4SLinus Torvalds } 3611da177e4SLinus Torvalds 362800d15a5SNick Piggin static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 363800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 364800d15a5SNick Piggin struct page *page, void *fsdata) 3651da177e4SLinus Torvalds { 366800d15a5SNick Piggin BUG(); 3671da177e4SLinus Torvalds return -EINVAL; 3681da177e4SLinus Torvalds } 3691da177e4SLinus Torvalds 370b5cec28dSMike Kravetz static void remove_huge_page(struct page *page) 3711da177e4SLinus Torvalds { 372b9ea2515SKonstantin Khlebnikov ClearPageDirty(page); 3731da177e4SLinus Torvalds ClearPageUptodate(page); 374bd65cb86SMinchan Kim delete_from_page_cache(page); 3751da177e4SLinus Torvalds } 3761da177e4SLinus Torvalds 3774aae8d1cSMike Kravetz static void 37805e90bd0SPeter Xu hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, 37905e90bd0SPeter Xu zap_flags_t zap_flags) 3804aae8d1cSMike Kravetz { 3814aae8d1cSMike Kravetz struct vm_area_struct *vma; 3824aae8d1cSMike Kravetz 3834aae8d1cSMike Kravetz /* 384d6aba4c8SSean Christopherson * end == 0 indicates that the entire range after start should be 385d6aba4c8SSean Christopherson * unmapped. Note, end is exclusive, whereas the interval tree takes 386d6aba4c8SSean Christopherson * an inclusive "last". 3874aae8d1cSMike Kravetz */ 388d6aba4c8SSean Christopherson vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { 3894aae8d1cSMike Kravetz unsigned long v_offset; 3904aae8d1cSMike Kravetz unsigned long v_end; 3914aae8d1cSMike Kravetz 3924aae8d1cSMike Kravetz /* 3934aae8d1cSMike Kravetz * Can the expression below overflow on 32-bit arches? 3944aae8d1cSMike Kravetz * No, because the interval tree returns us only those vmas 3954aae8d1cSMike Kravetz * which overlap the truncated area starting at pgoff, 3964aae8d1cSMike Kravetz * and no vma on a 32-bit arch can span beyond the 4GB. 3974aae8d1cSMike Kravetz */ 3984aae8d1cSMike Kravetz if (vma->vm_pgoff < start) 3994aae8d1cSMike Kravetz v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; 4004aae8d1cSMike Kravetz else 4014aae8d1cSMike Kravetz v_offset = 0; 4024aae8d1cSMike Kravetz 4034aae8d1cSMike Kravetz if (!end) 4044aae8d1cSMike Kravetz v_end = vma->vm_end; 4054aae8d1cSMike Kravetz else { 4064aae8d1cSMike Kravetz v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) 4074aae8d1cSMike Kravetz + vma->vm_start; 4084aae8d1cSMike Kravetz if (v_end > vma->vm_end) 4094aae8d1cSMike Kravetz v_end = vma->vm_end; 4104aae8d1cSMike Kravetz } 4114aae8d1cSMike Kravetz 4124aae8d1cSMike Kravetz unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, 41305e90bd0SPeter Xu NULL, zap_flags); 4144aae8d1cSMike Kravetz } 4154aae8d1cSMike Kravetz } 416b5cec28dSMike Kravetz 417b5cec28dSMike Kravetz /* 418b5cec28dSMike Kravetz * remove_inode_hugepages handles two distinct cases: truncation and hole 419b5cec28dSMike Kravetz * punch. There are subtle differences in operation for each case. 4204aae8d1cSMike Kravetz * 421b5cec28dSMike Kravetz * truncation is indicated by end of range being LLONG_MAX 422b5cec28dSMike Kravetz * In this case, we first scan the range and release found pages. 4231935ebd3SMiaohe Lin * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve 424e7c58097SMike Kravetz * maps and global counts. Page faults can not race with truncation 42587bf91d3SMike Kravetz * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents 42687bf91d3SMike Kravetz * page faults in the truncated range by checking i_size. i_size is 42787bf91d3SMike Kravetz * modified while holding i_mmap_rwsem. 428b5cec28dSMike Kravetz * hole punch is indicated if end is not LLONG_MAX 429b5cec28dSMike Kravetz * In the hole punch case we scan the range and release found pages. 4301935ebd3SMiaohe Lin * Only when releasing a page is the associated region/reserve map 4311935ebd3SMiaohe Lin * deleted. The region/reserve map for ranges without associated 432e7c58097SMike Kravetz * pages are not modified. Page faults can race with hole punch. 433e7c58097SMike Kravetz * This is indicated if we find a mapped page. 434b5cec28dSMike Kravetz * Note: If the passed end of range value is beyond the end of file, but 435b5cec28dSMike Kravetz * not LLONG_MAX this routine still performs a hole punch operation. 436b5cec28dSMike Kravetz */ 437b5cec28dSMike Kravetz static void remove_inode_hugepages(struct inode *inode, loff_t lstart, 438b5cec28dSMike Kravetz loff_t lend) 4391da177e4SLinus Torvalds { 440a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 441b45b5bd6SDavid Gibson struct address_space *mapping = &inode->i_data; 442a5516438SAndi Kleen const pgoff_t start = lstart >> huge_page_shift(h); 443b5cec28dSMike Kravetz const pgoff_t end = lend >> huge_page_shift(h); 4441508062eSMatthew Wilcox (Oracle) struct folio_batch fbatch; 445d72dc8a2SJan Kara pgoff_t next, index; 446a43a8c39SChen, Kenneth W int i, freed = 0; 447b5cec28dSMike Kravetz bool truncate_op = (lend == LLONG_MAX); 4481da177e4SLinus Torvalds 4491508062eSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 4501da177e4SLinus Torvalds next = start; 4511508062eSMatthew Wilcox (Oracle) while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { 4521508062eSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); ++i) { 4531508062eSMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i]; 454d4241a04SMiaohe Lin u32 hash = 0; 455b5cec28dSMike Kravetz 4561508062eSMatthew Wilcox (Oracle) index = folio->index; 45787bf91d3SMike Kravetz if (!truncate_op) { 45887bf91d3SMike Kravetz /* 45987bf91d3SMike Kravetz * Only need to hold the fault mutex in the 46087bf91d3SMike Kravetz * hole punch case. This prevents races with 46187bf91d3SMike Kravetz * page faults. Races are not possible in the 46287bf91d3SMike Kravetz * case of truncation. 46387bf91d3SMike Kravetz */ 464d4241a04SMiaohe Lin hash = hugetlb_fault_mutex_hash(mapping, index); 465e7c58097SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 46687bf91d3SMike Kravetz } 467e7c58097SMike Kravetz 468b5cec28dSMike Kravetz /* 4691508062eSMatthew Wilcox (Oracle) * If folio is mapped, it was faulted in after being 470e7c58097SMike Kravetz * unmapped in caller. Unmap (again) now after taking 471e7c58097SMike Kravetz * the fault mutex. The mutex will prevent faults 4721508062eSMatthew Wilcox (Oracle) * until we finish removing the folio. 473e7c58097SMike Kravetz * 474e7c58097SMike Kravetz * This race can only happen in the hole punch case. 475e7c58097SMike Kravetz * Getting here in a truncate operation is a bug. 476b5cec28dSMike Kravetz */ 4771508062eSMatthew Wilcox (Oracle) if (unlikely(folio_mapped(folio))) { 478e7c58097SMike Kravetz BUG_ON(truncate_op); 479e7c58097SMike Kravetz 480c0d0381aSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 481e7c58097SMike Kravetz i_mmap_lock_write(mapping); 482c0d0381aSMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 483e7c58097SMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, 484e7c58097SMike Kravetz index * pages_per_huge_page(h), 48505e90bd0SPeter Xu (index + 1) * pages_per_huge_page(h), 48605e90bd0SPeter Xu ZAP_FLAG_DROP_MARKER); 487e7c58097SMike Kravetz i_mmap_unlock_write(mapping); 488e7c58097SMike Kravetz } 4894aae8d1cSMike Kravetz 4901508062eSMatthew Wilcox (Oracle) folio_lock(folio); 4914aae8d1cSMike Kravetz /* 4924aae8d1cSMike Kravetz * We must free the huge page and remove from page 4934aae8d1cSMike Kravetz * cache (remove_huge_page) BEFORE removing the 4944aae8d1cSMike Kravetz * region/reserve map (hugetlb_unreserve_pages). In 4954aae8d1cSMike Kravetz * rare out of memory conditions, removal of the 49672e2936cSzhong jiang * region/reserve map could fail. Correspondingly, 49772e2936cSzhong jiang * the subpool and global reserve usage count can need 49872e2936cSzhong jiang * to be adjusted. 4994aae8d1cSMike Kravetz */ 5001508062eSMatthew Wilcox (Oracle) VM_BUG_ON(HPageRestoreReserve(&folio->page)); 5011508062eSMatthew Wilcox (Oracle) remove_huge_page(&folio->page); 502b5cec28dSMike Kravetz freed++; 503b5cec28dSMike Kravetz if (!truncate_op) { 5044aae8d1cSMike Kravetz if (unlikely(hugetlb_unreserve_pages(inode, 505d72dc8a2SJan Kara index, index + 1, 1))) 50672e2936cSzhong jiang hugetlb_fix_reserve_counts(inode); 507b5cec28dSMike Kravetz } 508b5cec28dSMike Kravetz 5091508062eSMatthew Wilcox (Oracle) folio_unlock(folio); 51087bf91d3SMike Kravetz if (!truncate_op) 511e7c58097SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5121da177e4SLinus Torvalds } 5131508062eSMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 5141817889eSMike Kravetz cond_resched(); 5151da177e4SLinus Torvalds } 516b5cec28dSMike Kravetz 517b5cec28dSMike Kravetz if (truncate_op) 518b5cec28dSMike Kravetz (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds 5212bbbda30SAl Viro static void hugetlbfs_evict_inode(struct inode *inode) 5221da177e4SLinus Torvalds { 5239119a41eSJoonsoo Kim struct resv_map *resv_map; 5249119a41eSJoonsoo Kim 525b5cec28dSMike Kravetz remove_inode_hugepages(inode, 0, LLONG_MAX); 526f27a5136SMike Kravetz 527f27a5136SMike Kravetz /* 528f27a5136SMike Kravetz * Get the resv_map from the address space embedded in the inode. 529f27a5136SMike Kravetz * This is the address space which points to any resv_map allocated 530f27a5136SMike Kravetz * at inode creation time. If this is a device special inode, 531f27a5136SMike Kravetz * i_mapping may not point to the original address space. 532f27a5136SMike Kravetz */ 533f27a5136SMike Kravetz resv_map = (struct resv_map *)(&inode->i_data)->private_data; 534f27a5136SMike Kravetz /* Only regular and link inodes have associated reserve maps */ 5359119a41eSJoonsoo Kim if (resv_map) 5369119a41eSJoonsoo Kim resv_map_release(&resv_map->refs); 537dbd5768fSJan Kara clear_inode(inode); 538149f4211SChristoph Hellwig } 539149f4211SChristoph Hellwig 540e5d319deSMiaohe Lin static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) 5411da177e4SLinus Torvalds { 542856fc295SHugh Dickins pgoff_t pgoff; 5431da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 544a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 5451da177e4SLinus Torvalds 546a5516438SAndi Kleen BUG_ON(offset & ~huge_page_mask(h)); 547856fc295SHugh Dickins pgoff = offset >> PAGE_SHIFT; 5481da177e4SLinus Torvalds 54983cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 55087bf91d3SMike Kravetz i_size_write(inode, offset); 551f808c13fSDavidlohr Bueso if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 55205e90bd0SPeter Xu hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, 55305e90bd0SPeter Xu ZAP_FLAG_DROP_MARKER); 554c86aa7bbSMike Kravetz i_mmap_unlock_write(mapping); 555e7c58097SMike Kravetz remove_inode_hugepages(inode, offset, LLONG_MAX); 5561da177e4SLinus Torvalds } 5571da177e4SLinus Torvalds 55868d32527SMike Kravetz static void hugetlbfs_zero_partial_page(struct hstate *h, 55968d32527SMike Kravetz struct address_space *mapping, 56068d32527SMike Kravetz loff_t start, 56168d32527SMike Kravetz loff_t end) 56268d32527SMike Kravetz { 56368d32527SMike Kravetz pgoff_t idx = start >> huge_page_shift(h); 56468d32527SMike Kravetz struct folio *folio; 56568d32527SMike Kravetz 56668d32527SMike Kravetz folio = filemap_lock_folio(mapping, idx); 56768d32527SMike Kravetz if (!folio) 56868d32527SMike Kravetz return; 56968d32527SMike Kravetz 57068d32527SMike Kravetz start = start & ~huge_page_mask(h); 57168d32527SMike Kravetz end = end & ~huge_page_mask(h); 57268d32527SMike Kravetz if (!end) 57368d32527SMike Kravetz end = huge_page_size(h); 57468d32527SMike Kravetz 57568d32527SMike Kravetz folio_zero_segment(folio, (size_t)start, (size_t)end); 57668d32527SMike Kravetz 57768d32527SMike Kravetz folio_unlock(folio); 57868d32527SMike Kravetz folio_put(folio); 57968d32527SMike Kravetz } 58068d32527SMike Kravetz 58170c3547eSMike Kravetz static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 58270c3547eSMike Kravetz { 58368d32527SMike Kravetz struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 58468d32527SMike Kravetz struct address_space *mapping = inode->i_mapping; 58570c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 58670c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 58770c3547eSMike Kravetz loff_t hole_start, hole_end; 58870c3547eSMike Kravetz 58970c3547eSMike Kravetz /* 59068d32527SMike Kravetz * hole_start and hole_end indicate the full pages within the hole. 59170c3547eSMike Kravetz */ 59270c3547eSMike Kravetz hole_start = round_up(offset, hpage_size); 59370c3547eSMike Kravetz hole_end = round_down(offset + len, hpage_size); 59470c3547eSMike Kravetz 5955955102cSAl Viro inode_lock(inode); 596ff62a342SMarc-André Lureau 597398c0da7SMiaohe Lin /* protected by i_rwsem */ 598ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 599ff62a342SMarc-André Lureau inode_unlock(inode); 600ff62a342SMarc-André Lureau return -EPERM; 601ff62a342SMarc-André Lureau } 602ff62a342SMarc-André Lureau 60370c3547eSMike Kravetz i_mmap_lock_write(mapping); 60468d32527SMike Kravetz 60568d32527SMike Kravetz /* If range starts before first full page, zero partial page. */ 60668d32527SMike Kravetz if (offset < hole_start) 60768d32527SMike Kravetz hugetlbfs_zero_partial_page(h, mapping, 60868d32527SMike Kravetz offset, min(offset + len, hole_start)); 60968d32527SMike Kravetz 61068d32527SMike Kravetz /* Unmap users of full pages in the hole. */ 61168d32527SMike Kravetz if (hole_end > hole_start) { 612f808c13fSDavidlohr Bueso if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 61370c3547eSMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, 61470c3547eSMike Kravetz hole_start >> PAGE_SHIFT, 61505e90bd0SPeter Xu hole_end >> PAGE_SHIFT, 0); 61670c3547eSMike Kravetz } 61770c3547eSMike Kravetz 61868d32527SMike Kravetz /* If range extends beyond last full page, zero partial page. */ 61968d32527SMike Kravetz if ((offset + len) > hole_end && (offset + len) > hole_start) 62068d32527SMike Kravetz hugetlbfs_zero_partial_page(h, mapping, 62168d32527SMike Kravetz hole_end, offset + len); 62268d32527SMike Kravetz 62368d32527SMike Kravetz i_mmap_unlock_write(mapping); 62468d32527SMike Kravetz 62568d32527SMike Kravetz /* Remove full pages from the file. */ 62668d32527SMike Kravetz if (hole_end > hole_start) 62768d32527SMike Kravetz remove_inode_hugepages(inode, hole_start, hole_end); 62868d32527SMike Kravetz 62968d32527SMike Kravetz inode_unlock(inode); 63068d32527SMike Kravetz 63170c3547eSMike Kravetz return 0; 63270c3547eSMike Kravetz } 63370c3547eSMike Kravetz 63470c3547eSMike Kravetz static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, 63570c3547eSMike Kravetz loff_t len) 63670c3547eSMike Kravetz { 63770c3547eSMike Kravetz struct inode *inode = file_inode(file); 638ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 63970c3547eSMike Kravetz struct address_space *mapping = inode->i_mapping; 64070c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 64170c3547eSMike Kravetz struct vm_area_struct pseudo_vma; 64270c3547eSMike Kravetz struct mm_struct *mm = current->mm; 64370c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 64470c3547eSMike Kravetz unsigned long hpage_shift = huge_page_shift(h); 64570c3547eSMike Kravetz pgoff_t start, index, end; 64670c3547eSMike Kravetz int error; 64770c3547eSMike Kravetz u32 hash; 64870c3547eSMike Kravetz 64970c3547eSMike Kravetz if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 65070c3547eSMike Kravetz return -EOPNOTSUPP; 65170c3547eSMike Kravetz 65270c3547eSMike Kravetz if (mode & FALLOC_FL_PUNCH_HOLE) 65370c3547eSMike Kravetz return hugetlbfs_punch_hole(inode, offset, len); 65470c3547eSMike Kravetz 65570c3547eSMike Kravetz /* 65670c3547eSMike Kravetz * Default preallocate case. 65770c3547eSMike Kravetz * For this range, start is rounded down and end is rounded up 65870c3547eSMike Kravetz * as well as being converted to page offsets. 65970c3547eSMike Kravetz */ 66070c3547eSMike Kravetz start = offset >> hpage_shift; 66170c3547eSMike Kravetz end = (offset + len + hpage_size - 1) >> hpage_shift; 66270c3547eSMike Kravetz 6635955102cSAl Viro inode_lock(inode); 66470c3547eSMike Kravetz 66570c3547eSMike Kravetz /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 66670c3547eSMike Kravetz error = inode_newsize_ok(inode, offset + len); 66770c3547eSMike Kravetz if (error) 66870c3547eSMike Kravetz goto out; 66970c3547eSMike Kravetz 670ff62a342SMarc-André Lureau if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 671ff62a342SMarc-André Lureau error = -EPERM; 672ff62a342SMarc-André Lureau goto out; 673ff62a342SMarc-André Lureau } 674ff62a342SMarc-André Lureau 67570c3547eSMike Kravetz /* 67670c3547eSMike Kravetz * Initialize a pseudo vma as this is required by the huge page 67770c3547eSMike Kravetz * allocation routines. If NUMA is configured, use page index 67870c3547eSMike Kravetz * as input to create an allocation policy. 67970c3547eSMike Kravetz */ 6802c4541e2SKirill A. Shutemov vma_init(&pseudo_vma, mm); 68170c3547eSMike Kravetz pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 68270c3547eSMike Kravetz pseudo_vma.vm_file = file; 68370c3547eSMike Kravetz 68470c3547eSMike Kravetz for (index = start; index < end; index++) { 68570c3547eSMike Kravetz /* 68670c3547eSMike Kravetz * This is supposed to be the vaddr where the page is being 68770c3547eSMike Kravetz * faulted in, but we have no vaddr here. 68870c3547eSMike Kravetz */ 68970c3547eSMike Kravetz struct page *page; 69070c3547eSMike Kravetz unsigned long addr; 69170c3547eSMike Kravetz 69270c3547eSMike Kravetz cond_resched(); 69370c3547eSMike Kravetz 69470c3547eSMike Kravetz /* 69570c3547eSMike Kravetz * fallocate(2) manpage permits EINTR; we may have been 69670c3547eSMike Kravetz * interrupted because we are using up too much memory. 69770c3547eSMike Kravetz */ 69870c3547eSMike Kravetz if (signal_pending(current)) { 69970c3547eSMike Kravetz error = -EINTR; 70070c3547eSMike Kravetz break; 70170c3547eSMike Kravetz } 70270c3547eSMike Kravetz 70370c3547eSMike Kravetz /* Set numa allocation policy based on index */ 70470c3547eSMike Kravetz hugetlb_set_vma_policy(&pseudo_vma, inode, index); 70570c3547eSMike Kravetz 70670c3547eSMike Kravetz /* addr is the offset within the file (zero based) */ 70770c3547eSMike Kravetz addr = index * hpage_size; 70870c3547eSMike Kravetz 70987bf91d3SMike Kravetz /* 71087bf91d3SMike Kravetz * fault mutex taken here, protects against fault path 71187bf91d3SMike Kravetz * and hole punch. inode_lock previously taken protects 71287bf91d3SMike Kravetz * against truncation. 71387bf91d3SMike Kravetz */ 714188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, index); 71570c3547eSMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 71670c3547eSMike Kravetz 71770c3547eSMike Kravetz /* See if already present in mapping to avoid alloc/free */ 71870c3547eSMike Kravetz page = find_get_page(mapping, index); 71970c3547eSMike Kravetz if (page) { 72070c3547eSMike Kravetz put_page(page); 72170c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 72270c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 72370c3547eSMike Kravetz continue; 72470c3547eSMike Kravetz } 72570c3547eSMike Kravetz 72688ce3fefSMiaohe Lin /* 72788ce3fefSMiaohe Lin * Allocate page without setting the avoid_reserve argument. 72888ce3fefSMiaohe Lin * There certainly are no reserves associated with the 72988ce3fefSMiaohe Lin * pseudo_vma. However, there could be shared mappings with 73088ce3fefSMiaohe Lin * reserves for the file at the inode level. If we fallocate 73188ce3fefSMiaohe Lin * pages in these areas, we need to consume the reserves 73288ce3fefSMiaohe Lin * to keep reservation accounting consistent. 73388ce3fefSMiaohe Lin */ 73488ce3fefSMiaohe Lin page = alloc_huge_page(&pseudo_vma, addr, 0); 73570c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 73670c3547eSMike Kravetz if (IS_ERR(page)) { 73770c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 73870c3547eSMike Kravetz error = PTR_ERR(page); 73970c3547eSMike Kravetz goto out; 74070c3547eSMike Kravetz } 74170c3547eSMike Kravetz clear_huge_page(page, addr, pages_per_huge_page(h)); 74270c3547eSMike Kravetz __SetPageUptodate(page); 74370c3547eSMike Kravetz error = huge_add_to_page_cache(page, mapping, index); 74470c3547eSMike Kravetz if (unlikely(error)) { 745846be085SMike Kravetz restore_reserve_on_error(h, &pseudo_vma, addr, page); 74670c3547eSMike Kravetz put_page(page); 74770c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 74870c3547eSMike Kravetz goto out; 74970c3547eSMike Kravetz } 75070c3547eSMike Kravetz 75170c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 75270c3547eSMike Kravetz 7538f251a3dSMike Kravetz SetHPageMigratable(page); 75470c3547eSMike Kravetz /* 755d9ef44deSMatthew Wilcox (Oracle) * unlock_page because locked by huge_add_to_page_cache() 756585fc0d2SMuchun Song * put_page() due to reference from alloc_huge_page() 75770c3547eSMike Kravetz */ 75870c3547eSMike Kravetz unlock_page(page); 75972639e6dSNadav Amit put_page(page); 76070c3547eSMike Kravetz } 76170c3547eSMike Kravetz 76270c3547eSMike Kravetz if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 76370c3547eSMike Kravetz i_size_write(inode, offset + len); 764078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 76570c3547eSMike Kravetz out: 7665955102cSAl Viro inode_unlock(inode); 76770c3547eSMike Kravetz return error; 76870c3547eSMike Kravetz } 76970c3547eSMike Kravetz 770549c7297SChristian Brauner static int hugetlbfs_setattr(struct user_namespace *mnt_userns, 771549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr) 7721da177e4SLinus Torvalds { 7732b0143b5SDavid Howells struct inode *inode = d_inode(dentry); 774a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 7751da177e4SLinus Torvalds int error; 7761da177e4SLinus Torvalds unsigned int ia_valid = attr->ia_valid; 777ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 7781da177e4SLinus Torvalds 7792f221d6fSChristian Brauner error = setattr_prepare(&init_user_ns, dentry, attr); 7801da177e4SLinus Torvalds if (error) 7811025774cSChristoph Hellwig return error; 7821da177e4SLinus Torvalds 7831da177e4SLinus Torvalds if (ia_valid & ATTR_SIZE) { 784ff62a342SMarc-André Lureau loff_t oldsize = inode->i_size; 785ff62a342SMarc-André Lureau loff_t newsize = attr->ia_size; 786ff62a342SMarc-André Lureau 787ff62a342SMarc-André Lureau if (newsize & ~huge_page_mask(h)) 7881025774cSChristoph Hellwig return -EINVAL; 789398c0da7SMiaohe Lin /* protected by i_rwsem */ 790ff62a342SMarc-André Lureau if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 791ff62a342SMarc-André Lureau (newsize > oldsize && (info->seals & F_SEAL_GROW))) 792ff62a342SMarc-André Lureau return -EPERM; 793e5d319deSMiaohe Lin hugetlb_vmtruncate(inode, newsize); 7941da177e4SLinus Torvalds } 7951da177e4SLinus Torvalds 7962f221d6fSChristian Brauner setattr_copy(&init_user_ns, inode, attr); 7971025774cSChristoph Hellwig mark_inode_dirty(inode); 7981025774cSChristoph Hellwig return 0; 7991025774cSChristoph Hellwig } 8001025774cSChristoph Hellwig 8017d54fa64SAl Viro static struct inode *hugetlbfs_get_root(struct super_block *sb, 80232021982SDavid Howells struct hugetlbfs_fs_context *ctx) 8031da177e4SLinus Torvalds { 8041da177e4SLinus Torvalds struct inode *inode; 8051da177e4SLinus Torvalds 8061da177e4SLinus Torvalds inode = new_inode(sb); 8071da177e4SLinus Torvalds if (inode) { 80885fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 80932021982SDavid Howells inode->i_mode = S_IFDIR | ctx->mode; 81032021982SDavid Howells inode->i_uid = ctx->uid; 81132021982SDavid Howells inode->i_gid = ctx->gid; 812078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 8137d54fa64SAl Viro inode->i_op = &hugetlbfs_dir_inode_operations; 8147d54fa64SAl Viro inode->i_fop = &simple_dir_operations; 8157d54fa64SAl Viro /* directory inodes start off with i_nlink == 2 (for "." entry) */ 8167d54fa64SAl Viro inc_nlink(inode); 81765ed7601SAneesh Kumar K.V lockdep_annotate_inode_mutex_key(inode); 8187d54fa64SAl Viro } 8197d54fa64SAl Viro return inode; 8207d54fa64SAl Viro } 8217d54fa64SAl Viro 822b610ded7SMichal Hocko /* 823c8c06efaSDavidlohr Bueso * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never 824b610ded7SMichal Hocko * be taken from reclaim -- unlike regular filesystems. This needs an 82588f306b6SKirill A. Shutemov * annotation because huge_pmd_share() does an allocation under hugetlb's 826c8c06efaSDavidlohr Bueso * i_mmap_rwsem. 827b610ded7SMichal Hocko */ 828c8c06efaSDavidlohr Bueso static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; 829b610ded7SMichal Hocko 8307d54fa64SAl Viro static struct inode *hugetlbfs_get_inode(struct super_block *sb, 8317d54fa64SAl Viro struct inode *dir, 83218df2252SAl Viro umode_t mode, dev_t dev) 8337d54fa64SAl Viro { 8347d54fa64SAl Viro struct inode *inode; 83558b6e5e8SMike Kravetz struct resv_map *resv_map = NULL; 8369119a41eSJoonsoo Kim 83758b6e5e8SMike Kravetz /* 83858b6e5e8SMike Kravetz * Reserve maps are only needed for inodes that can have associated 83958b6e5e8SMike Kravetz * page allocations. 84058b6e5e8SMike Kravetz */ 84158b6e5e8SMike Kravetz if (S_ISREG(mode) || S_ISLNK(mode)) { 8429119a41eSJoonsoo Kim resv_map = resv_map_alloc(); 8439119a41eSJoonsoo Kim if (!resv_map) 8449119a41eSJoonsoo Kim return NULL; 84558b6e5e8SMike Kravetz } 8467d54fa64SAl Viro 8477d54fa64SAl Viro inode = new_inode(sb); 8487d54fa64SAl Viro if (inode) { 849ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 850ff62a342SMarc-André Lureau 8517d54fa64SAl Viro inode->i_ino = get_next_ino(); 85221cb47beSChristian Brauner inode_init_owner(&init_user_ns, inode, dir, mode); 853c8c06efaSDavidlohr Bueso lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 854c8c06efaSDavidlohr Bueso &hugetlbfs_i_mmap_rwsem_key); 8551da177e4SLinus Torvalds inode->i_mapping->a_ops = &hugetlbfs_aops; 856078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 8579119a41eSJoonsoo Kim inode->i_mapping->private_data = resv_map; 858ff62a342SMarc-André Lureau info->seals = F_SEAL_SEAL; 8591da177e4SLinus Torvalds switch (mode & S_IFMT) { 8601da177e4SLinus Torvalds default: 8611da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 8621da177e4SLinus Torvalds break; 8631da177e4SLinus Torvalds case S_IFREG: 8641da177e4SLinus Torvalds inode->i_op = &hugetlbfs_inode_operations; 8651da177e4SLinus Torvalds inode->i_fop = &hugetlbfs_file_operations; 8661da177e4SLinus Torvalds break; 8671da177e4SLinus Torvalds case S_IFDIR: 8681da177e4SLinus Torvalds inode->i_op = &hugetlbfs_dir_inode_operations; 8691da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 8701da177e4SLinus Torvalds 8711da177e4SLinus Torvalds /* directory inodes start off with i_nlink == 2 (for "." entry) */ 872d8c76e6fSDave Hansen inc_nlink(inode); 8731da177e4SLinus Torvalds break; 8741da177e4SLinus Torvalds case S_IFLNK: 8751da177e4SLinus Torvalds inode->i_op = &page_symlink_inode_operations; 87621fc61c7SAl Viro inode_nohighmem(inode); 8771da177e4SLinus Torvalds break; 8781da177e4SLinus Torvalds } 879e096d0c7SJosh Boyer lockdep_annotate_inode_mutex_key(inode); 88058b6e5e8SMike Kravetz } else { 88158b6e5e8SMike Kravetz if (resv_map) 8829119a41eSJoonsoo Kim kref_put(&resv_map->refs, resv_map_release); 88358b6e5e8SMike Kravetz } 8849119a41eSJoonsoo Kim 8851da177e4SLinus Torvalds return inode; 8861da177e4SLinus Torvalds } 8871da177e4SLinus Torvalds 8881da177e4SLinus Torvalds /* 8891da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 8901da177e4SLinus Torvalds */ 8911ab5b82fSPiotr Sarna static int do_hugetlbfs_mknod(struct inode *dir, 8921ab5b82fSPiotr Sarna struct dentry *dentry, 8931ab5b82fSPiotr Sarna umode_t mode, 8941ab5b82fSPiotr Sarna dev_t dev, 8951ab5b82fSPiotr Sarna bool tmpfile) 8961da177e4SLinus Torvalds { 8971da177e4SLinus Torvalds struct inode *inode; 8981da177e4SLinus Torvalds int error = -ENOSPC; 8991da177e4SLinus Torvalds 9007d54fa64SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); 9011da177e4SLinus Torvalds if (inode) { 902078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 9031ab5b82fSPiotr Sarna if (tmpfile) { 9041ab5b82fSPiotr Sarna d_tmpfile(dentry, inode); 9051ab5b82fSPiotr Sarna } else { 9061da177e4SLinus Torvalds d_instantiate(dentry, inode); 9071da177e4SLinus Torvalds dget(dentry);/* Extra count - pin the dentry in core */ 9081ab5b82fSPiotr Sarna } 9091da177e4SLinus Torvalds error = 0; 9101da177e4SLinus Torvalds } 9111da177e4SLinus Torvalds return error; 9121da177e4SLinus Torvalds } 9131da177e4SLinus Torvalds 914549c7297SChristian Brauner static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, 9151ab5b82fSPiotr Sarna struct dentry *dentry, umode_t mode, dev_t dev) 9161ab5b82fSPiotr Sarna { 9171ab5b82fSPiotr Sarna return do_hugetlbfs_mknod(dir, dentry, mode, dev, false); 9181ab5b82fSPiotr Sarna } 9191ab5b82fSPiotr Sarna 920549c7297SChristian Brauner static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 921549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 9221da177e4SLinus Torvalds { 923549c7297SChristian Brauner int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry, 924549c7297SChristian Brauner mode | S_IFDIR, 0); 9251da177e4SLinus Torvalds if (!retval) 926d8c76e6fSDave Hansen inc_nlink(dir); 9271da177e4SLinus Torvalds return retval; 9281da177e4SLinus Torvalds } 9291da177e4SLinus Torvalds 930549c7297SChristian Brauner static int hugetlbfs_create(struct user_namespace *mnt_userns, 931549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 932549c7297SChristian Brauner umode_t mode, bool excl) 9331da177e4SLinus Torvalds { 934549c7297SChristian Brauner return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); 9351da177e4SLinus Torvalds } 9361da177e4SLinus Torvalds 937549c7297SChristian Brauner static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns, 938549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 939549c7297SChristian Brauner umode_t mode) 9401ab5b82fSPiotr Sarna { 9411ab5b82fSPiotr Sarna return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true); 9421ab5b82fSPiotr Sarna } 9431ab5b82fSPiotr Sarna 944549c7297SChristian Brauner static int hugetlbfs_symlink(struct user_namespace *mnt_userns, 945549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 946549c7297SChristian Brauner const char *symname) 9471da177e4SLinus Torvalds { 9481da177e4SLinus Torvalds struct inode *inode; 9491da177e4SLinus Torvalds int error = -ENOSPC; 9501da177e4SLinus Torvalds 9517d54fa64SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); 9521da177e4SLinus Torvalds if (inode) { 9531da177e4SLinus Torvalds int l = strlen(symname)+1; 9541da177e4SLinus Torvalds error = page_symlink(inode, symname, l); 9551da177e4SLinus Torvalds if (!error) { 9561da177e4SLinus Torvalds d_instantiate(dentry, inode); 9571da177e4SLinus Torvalds dget(dentry); 9581da177e4SLinus Torvalds } else 9591da177e4SLinus Torvalds iput(inode); 9601da177e4SLinus Torvalds } 961078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 9621da177e4SLinus Torvalds 9631da177e4SLinus Torvalds return error; 9641da177e4SLinus Torvalds } 9651da177e4SLinus Torvalds 966b890ec2aSMatthew Wilcox (Oracle) #ifdef CONFIG_MIGRATION 967b890ec2aSMatthew Wilcox (Oracle) static int hugetlbfs_migrate_folio(struct address_space *mapping, 968b890ec2aSMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, 969a6bc32b8SMel Gorman enum migrate_mode mode) 970290408d4SNaoya Horiguchi { 971290408d4SNaoya Horiguchi int rc; 972290408d4SNaoya Horiguchi 973b890ec2aSMatthew Wilcox (Oracle) rc = migrate_huge_page_move_mapping(mapping, dst, src); 97478bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 975290408d4SNaoya Horiguchi return rc; 976cb6acd01SMike Kravetz 977b890ec2aSMatthew Wilcox (Oracle) if (hugetlb_page_subpool(&src->page)) { 978b890ec2aSMatthew Wilcox (Oracle) hugetlb_set_page_subpool(&dst->page, 979b890ec2aSMatthew Wilcox (Oracle) hugetlb_page_subpool(&src->page)); 980b890ec2aSMatthew Wilcox (Oracle) hugetlb_set_page_subpool(&src->page, NULL); 981cb6acd01SMike Kravetz } 982cb6acd01SMike Kravetz 9832916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 984b890ec2aSMatthew Wilcox (Oracle) folio_migrate_copy(dst, src); 9852916ecc0SJérôme Glisse else 986b890ec2aSMatthew Wilcox (Oracle) folio_migrate_flags(dst, src); 987290408d4SNaoya Horiguchi 98878bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 989290408d4SNaoya Horiguchi } 990b890ec2aSMatthew Wilcox (Oracle) #else 991b890ec2aSMatthew Wilcox (Oracle) #define hugetlbfs_migrate_folio NULL 992b890ec2aSMatthew Wilcox (Oracle) #endif 993290408d4SNaoya Horiguchi 99478bb9203SNaoya Horiguchi static int hugetlbfs_error_remove_page(struct address_space *mapping, 99578bb9203SNaoya Horiguchi struct page *page) 99678bb9203SNaoya Horiguchi { 99778bb9203SNaoya Horiguchi struct inode *inode = mapping->host; 998ab615a5bSMike Kravetz pgoff_t index = page->index; 99978bb9203SNaoya Horiguchi 100078bb9203SNaoya Horiguchi remove_huge_page(page); 1001ab615a5bSMike Kravetz if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) 100278bb9203SNaoya Horiguchi hugetlb_fix_reserve_counts(inode); 1003ab615a5bSMike Kravetz 100478bb9203SNaoya Horiguchi return 0; 100578bb9203SNaoya Horiguchi } 100678bb9203SNaoya Horiguchi 10074a25220dSDavid Howells /* 10084a25220dSDavid Howells * Display the mount options in /proc/mounts. 10094a25220dSDavid Howells */ 10104a25220dSDavid Howells static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) 10114a25220dSDavid Howells { 10124a25220dSDavid Howells struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); 10134a25220dSDavid Howells struct hugepage_subpool *spool = sbinfo->spool; 10144a25220dSDavid Howells unsigned long hpage_size = huge_page_size(sbinfo->hstate); 10154a25220dSDavid Howells unsigned hpage_shift = huge_page_shift(sbinfo->hstate); 10164a25220dSDavid Howells char mod; 10174a25220dSDavid Howells 10184a25220dSDavid Howells if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 10194a25220dSDavid Howells seq_printf(m, ",uid=%u", 10204a25220dSDavid Howells from_kuid_munged(&init_user_ns, sbinfo->uid)); 10214a25220dSDavid Howells if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 10224a25220dSDavid Howells seq_printf(m, ",gid=%u", 10234a25220dSDavid Howells from_kgid_munged(&init_user_ns, sbinfo->gid)); 10244a25220dSDavid Howells if (sbinfo->mode != 0755) 10254a25220dSDavid Howells seq_printf(m, ",mode=%o", sbinfo->mode); 10264a25220dSDavid Howells if (sbinfo->max_inodes != -1) 10274a25220dSDavid Howells seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); 10284a25220dSDavid Howells 10294a25220dSDavid Howells hpage_size /= 1024; 10304a25220dSDavid Howells mod = 'K'; 10314a25220dSDavid Howells if (hpage_size >= 1024) { 10324a25220dSDavid Howells hpage_size /= 1024; 10334a25220dSDavid Howells mod = 'M'; 10344a25220dSDavid Howells } 10354a25220dSDavid Howells seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); 10364a25220dSDavid Howells if (spool) { 10374a25220dSDavid Howells if (spool->max_hpages != -1) 10384a25220dSDavid Howells seq_printf(m, ",size=%llu", 10394a25220dSDavid Howells (unsigned long long)spool->max_hpages << hpage_shift); 10404a25220dSDavid Howells if (spool->min_hpages != -1) 10414a25220dSDavid Howells seq_printf(m, ",min_size=%llu", 10424a25220dSDavid Howells (unsigned long long)spool->min_hpages << hpage_shift); 10434a25220dSDavid Howells } 10444a25220dSDavid Howells return 0; 10454a25220dSDavid Howells } 10464a25220dSDavid Howells 1047726c3342SDavid Howells static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 10481da177e4SLinus Torvalds { 1049726c3342SDavid Howells struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 10502b0143b5SDavid Howells struct hstate *h = hstate_inode(d_inode(dentry)); 10511da177e4SLinus Torvalds 10521da177e4SLinus Torvalds buf->f_type = HUGETLBFS_MAGIC; 1053a5516438SAndi Kleen buf->f_bsize = huge_page_size(h); 10541da177e4SLinus Torvalds if (sbinfo) { 10551da177e4SLinus Torvalds spin_lock(&sbinfo->stat_lock); 105674a8a65cSDavid Gibson /* If no limits set, just report 0 for max/free/used 105774a8a65cSDavid Gibson * blocks, like simple_statfs() */ 105890481622SDavid Gibson if (sbinfo->spool) { 105990481622SDavid Gibson long free_pages; 106090481622SDavid Gibson 10614b25f030SMina Almasry spin_lock_irq(&sbinfo->spool->lock); 106290481622SDavid Gibson buf->f_blocks = sbinfo->spool->max_hpages; 106390481622SDavid Gibson free_pages = sbinfo->spool->max_hpages 106490481622SDavid Gibson - sbinfo->spool->used_hpages; 106590481622SDavid Gibson buf->f_bavail = buf->f_bfree = free_pages; 10664b25f030SMina Almasry spin_unlock_irq(&sbinfo->spool->lock); 10671da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 10681da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 106974a8a65cSDavid Gibson } 10701da177e4SLinus Torvalds spin_unlock(&sbinfo->stat_lock); 10711da177e4SLinus Torvalds } 10721da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 10731da177e4SLinus Torvalds return 0; 10741da177e4SLinus Torvalds } 10751da177e4SLinus Torvalds 10761da177e4SLinus Torvalds static void hugetlbfs_put_super(struct super_block *sb) 10771da177e4SLinus Torvalds { 10781da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); 10791da177e4SLinus Torvalds 10801da177e4SLinus Torvalds if (sbi) { 10811da177e4SLinus Torvalds sb->s_fs_info = NULL; 108290481622SDavid Gibson 108390481622SDavid Gibson if (sbi->spool) 108490481622SDavid Gibson hugepage_put_subpool(sbi->spool); 108590481622SDavid Gibson 10861da177e4SLinus Torvalds kfree(sbi); 10871da177e4SLinus Torvalds } 10881da177e4SLinus Torvalds } 10891da177e4SLinus Torvalds 109096527980SChristoph Hellwig static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) 109196527980SChristoph Hellwig { 109296527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 109396527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 109496527980SChristoph Hellwig if (unlikely(!sbinfo->free_inodes)) { 109596527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 109696527980SChristoph Hellwig return 0; 109796527980SChristoph Hellwig } 109896527980SChristoph Hellwig sbinfo->free_inodes--; 109996527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 110096527980SChristoph Hellwig } 110196527980SChristoph Hellwig 110296527980SChristoph Hellwig return 1; 110396527980SChristoph Hellwig } 110496527980SChristoph Hellwig 110596527980SChristoph Hellwig static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) 110696527980SChristoph Hellwig { 110796527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 110896527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 110996527980SChristoph Hellwig sbinfo->free_inodes++; 111096527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 111196527980SChristoph Hellwig } 111296527980SChristoph Hellwig } 111396527980SChristoph Hellwig 111496527980SChristoph Hellwig 1115e18b890bSChristoph Lameter static struct kmem_cache *hugetlbfs_inode_cachep; 11161da177e4SLinus Torvalds 11171da177e4SLinus Torvalds static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 11181da177e4SLinus Torvalds { 111996527980SChristoph Hellwig struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); 11201da177e4SLinus Torvalds struct hugetlbfs_inode_info *p; 11211da177e4SLinus Torvalds 112296527980SChristoph Hellwig if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 11231da177e4SLinus Torvalds return NULL; 1124fd60b288SMuchun Song p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); 112596527980SChristoph Hellwig if (unlikely(!p)) { 112696527980SChristoph Hellwig hugetlbfs_inc_free_inodes(sbinfo); 112796527980SChristoph Hellwig return NULL; 11281da177e4SLinus Torvalds } 11294742a35dSMike Kravetz 11304742a35dSMike Kravetz /* 11314742a35dSMike Kravetz * Any time after allocation, hugetlbfs_destroy_inode can be called 11324742a35dSMike Kravetz * for the inode. mpol_free_shared_policy is unconditionally called 11334742a35dSMike Kravetz * as part of hugetlbfs_destroy_inode. So, initialize policy here 11344742a35dSMike Kravetz * in case of a quick call to destroy. 11354742a35dSMike Kravetz * 11364742a35dSMike Kravetz * Note that the policy is initialized even if we are creating a 11374742a35dSMike Kravetz * private inode. This simplifies hugetlbfs_destroy_inode. 11384742a35dSMike Kravetz */ 11394742a35dSMike Kravetz mpol_shared_policy_init(&p->policy, NULL); 11404742a35dSMike Kravetz 114196527980SChristoph Hellwig return &p->vfs_inode; 11421da177e4SLinus Torvalds } 11431da177e4SLinus Torvalds 1144b62de322SAl Viro static void hugetlbfs_free_inode(struct inode *inode) 1145fa0d7e3dSNick Piggin { 1146fa0d7e3dSNick Piggin kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 1147fa0d7e3dSNick Piggin } 1148fa0d7e3dSNick Piggin 11491da177e4SLinus Torvalds static void hugetlbfs_destroy_inode(struct inode *inode) 11501da177e4SLinus Torvalds { 115196527980SChristoph Hellwig hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 11521da177e4SLinus Torvalds mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 11531da177e4SLinus Torvalds } 11541da177e4SLinus Torvalds 1155f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops = { 1156800d15a5SNick Piggin .write_begin = hugetlbfs_write_begin, 1157800d15a5SNick Piggin .write_end = hugetlbfs_write_end, 115846de8b97SMatthew Wilcox (Oracle) .dirty_folio = noop_dirty_folio, 1159b890ec2aSMatthew Wilcox (Oracle) .migrate_folio = hugetlbfs_migrate_folio, 116078bb9203SNaoya Horiguchi .error_remove_page = hugetlbfs_error_remove_page, 11611da177e4SLinus Torvalds }; 11621da177e4SLinus Torvalds 116396527980SChristoph Hellwig 116451cc5068SAlexey Dobriyan static void init_once(void *foo) 116596527980SChristoph Hellwig { 116696527980SChristoph Hellwig struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 116796527980SChristoph Hellwig 116896527980SChristoph Hellwig inode_init_once(&ei->vfs_inode); 116996527980SChristoph Hellwig } 117096527980SChristoph Hellwig 11714b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations = { 117234d0640eSAl Viro .read_iter = hugetlbfs_read_iter, 11731da177e4SLinus Torvalds .mmap = hugetlbfs_file_mmap, 11741b061d92SChristoph Hellwig .fsync = noop_fsync, 11751da177e4SLinus Torvalds .get_unmapped_area = hugetlb_get_unmapped_area, 11766038f373SArnd Bergmann .llseek = default_llseek, 117770c3547eSMike Kravetz .fallocate = hugetlbfs_fallocate, 11781da177e4SLinus Torvalds }; 11791da177e4SLinus Torvalds 118092e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations = { 11811da177e4SLinus Torvalds .create = hugetlbfs_create, 11821da177e4SLinus Torvalds .lookup = simple_lookup, 11831da177e4SLinus Torvalds .link = simple_link, 11841da177e4SLinus Torvalds .unlink = simple_unlink, 11851da177e4SLinus Torvalds .symlink = hugetlbfs_symlink, 11861da177e4SLinus Torvalds .mkdir = hugetlbfs_mkdir, 11871da177e4SLinus Torvalds .rmdir = simple_rmdir, 11881da177e4SLinus Torvalds .mknod = hugetlbfs_mknod, 11891da177e4SLinus Torvalds .rename = simple_rename, 11901da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 11911ab5b82fSPiotr Sarna .tmpfile = hugetlbfs_tmpfile, 11921da177e4SLinus Torvalds }; 11931da177e4SLinus Torvalds 119492e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations = { 11951da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 11961da177e4SLinus Torvalds }; 11971da177e4SLinus Torvalds 1198ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops = { 11991da177e4SLinus Torvalds .alloc_inode = hugetlbfs_alloc_inode, 1200b62de322SAl Viro .free_inode = hugetlbfs_free_inode, 12011da177e4SLinus Torvalds .destroy_inode = hugetlbfs_destroy_inode, 12022bbbda30SAl Viro .evict_inode = hugetlbfs_evict_inode, 12031da177e4SLinus Torvalds .statfs = hugetlbfs_statfs, 12041da177e4SLinus Torvalds .put_super = hugetlbfs_put_super, 12054a25220dSDavid Howells .show_options = hugetlbfs_show_options, 12061da177e4SLinus Torvalds }; 12071da177e4SLinus Torvalds 12087ca02d0aSMike Kravetz /* 12097ca02d0aSMike Kravetz * Convert size option passed from command line to number of huge pages 12107ca02d0aSMike Kravetz * in the pool specified by hstate. Size option could be in bytes 12117ca02d0aSMike Kravetz * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). 12127ca02d0aSMike Kravetz */ 12134a25220dSDavid Howells static long 12147ca02d0aSMike Kravetz hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, 12154a25220dSDavid Howells enum hugetlbfs_size_type val_type) 12167ca02d0aSMike Kravetz { 12177ca02d0aSMike Kravetz if (val_type == NO_SIZE) 12187ca02d0aSMike Kravetz return -1; 12197ca02d0aSMike Kravetz 12207ca02d0aSMike Kravetz if (val_type == SIZE_PERCENT) { 12217ca02d0aSMike Kravetz size_opt <<= huge_page_shift(h); 12227ca02d0aSMike Kravetz size_opt *= h->max_huge_pages; 12237ca02d0aSMike Kravetz do_div(size_opt, 100); 12247ca02d0aSMike Kravetz } 12257ca02d0aSMike Kravetz 12267ca02d0aSMike Kravetz size_opt >>= huge_page_shift(h); 12277ca02d0aSMike Kravetz return size_opt; 12287ca02d0aSMike Kravetz } 12297ca02d0aSMike Kravetz 123032021982SDavid Howells /* 123132021982SDavid Howells * Parse one mount parameter. 123232021982SDavid Howells */ 123332021982SDavid Howells static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) 12341da177e4SLinus Torvalds { 123532021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 123632021982SDavid Howells struct fs_parse_result result; 123732021982SDavid Howells char *rest; 123832021982SDavid Howells unsigned long ps; 123932021982SDavid Howells int opt; 12401da177e4SLinus Torvalds 1241d7167b14SAl Viro opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); 124232021982SDavid Howells if (opt < 0) 124332021982SDavid Howells return opt; 124432021982SDavid Howells 124532021982SDavid Howells switch (opt) { 124632021982SDavid Howells case Opt_uid: 124732021982SDavid Howells ctx->uid = make_kuid(current_user_ns(), result.uint_32); 124832021982SDavid Howells if (!uid_valid(ctx->uid)) 124932021982SDavid Howells goto bad_val; 12501da177e4SLinus Torvalds return 0; 12511da177e4SLinus Torvalds 1252e73a75faSRandy Dunlap case Opt_gid: 125332021982SDavid Howells ctx->gid = make_kgid(current_user_ns(), result.uint_32); 125432021982SDavid Howells if (!gid_valid(ctx->gid)) 1255e73a75faSRandy Dunlap goto bad_val; 125632021982SDavid Howells return 0; 1257e73a75faSRandy Dunlap 1258e73a75faSRandy Dunlap case Opt_mode: 125932021982SDavid Howells ctx->mode = result.uint_32 & 01777U; 126032021982SDavid Howells return 0; 1261e73a75faSRandy Dunlap 126232021982SDavid Howells case Opt_size: 1263e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 126432021982SDavid Howells if (!isdigit(param->string[0])) 1265e73a75faSRandy Dunlap goto bad_val; 126632021982SDavid Howells ctx->max_size_opt = memparse(param->string, &rest); 126732021982SDavid Howells ctx->max_val_type = SIZE_STD; 1268a137e1ccSAndi Kleen if (*rest == '%') 126932021982SDavid Howells ctx->max_val_type = SIZE_PERCENT; 127032021982SDavid Howells return 0; 12711da177e4SLinus Torvalds 1272e73a75faSRandy Dunlap case Opt_nr_inodes: 1273e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 127432021982SDavid Howells if (!isdigit(param->string[0])) 1275e73a75faSRandy Dunlap goto bad_val; 127632021982SDavid Howells ctx->nr_inodes = memparse(param->string, &rest); 127732021982SDavid Howells return 0; 1278e73a75faSRandy Dunlap 127932021982SDavid Howells case Opt_pagesize: 128032021982SDavid Howells ps = memparse(param->string, &rest); 128132021982SDavid Howells ctx->hstate = size_to_hstate(ps); 128232021982SDavid Howells if (!ctx->hstate) { 128332021982SDavid Howells pr_err("Unsupported page size %lu MB\n", ps >> 20); 1284a137e1ccSAndi Kleen return -EINVAL; 1285a137e1ccSAndi Kleen } 128632021982SDavid Howells return 0; 1287a137e1ccSAndi Kleen 128832021982SDavid Howells case Opt_min_size: 12897ca02d0aSMike Kravetz /* memparse() will accept a K/M/G without a digit */ 129032021982SDavid Howells if (!isdigit(param->string[0])) 12917ca02d0aSMike Kravetz goto bad_val; 129232021982SDavid Howells ctx->min_size_opt = memparse(param->string, &rest); 129332021982SDavid Howells ctx->min_val_type = SIZE_STD; 12947ca02d0aSMike Kravetz if (*rest == '%') 129532021982SDavid Howells ctx->min_val_type = SIZE_PERCENT; 129632021982SDavid Howells return 0; 12977ca02d0aSMike Kravetz 1298e73a75faSRandy Dunlap default: 1299b4c07bceSLee Schermerhorn return -EINVAL; 1300e73a75faSRandy Dunlap } 130132021982SDavid Howells 130232021982SDavid Howells bad_val: 1303b5db30cfSAl Viro return invalfc(fc, "Bad value '%s' for mount option '%s'\n", 130432021982SDavid Howells param->string, param->key); 13051da177e4SLinus Torvalds } 1306a137e1ccSAndi Kleen 13077ca02d0aSMike Kravetz /* 130832021982SDavid Howells * Validate the parsed options. 130932021982SDavid Howells */ 131032021982SDavid Howells static int hugetlbfs_validate(struct fs_context *fc) 131132021982SDavid Howells { 131232021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 131332021982SDavid Howells 131432021982SDavid Howells /* 13157ca02d0aSMike Kravetz * Use huge page pool size (in hstate) to convert the size 13167ca02d0aSMike Kravetz * options to number of huge pages. If NO_SIZE, -1 is returned. 13177ca02d0aSMike Kravetz */ 131832021982SDavid Howells ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 131932021982SDavid Howells ctx->max_size_opt, 132032021982SDavid Howells ctx->max_val_type); 132132021982SDavid Howells ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 132232021982SDavid Howells ctx->min_size_opt, 132332021982SDavid Howells ctx->min_val_type); 13247ca02d0aSMike Kravetz 13257ca02d0aSMike Kravetz /* 13267ca02d0aSMike Kravetz * If max_size was specified, then min_size must be smaller 13277ca02d0aSMike Kravetz */ 132832021982SDavid Howells if (ctx->max_val_type > NO_SIZE && 132932021982SDavid Howells ctx->min_hpages > ctx->max_hpages) { 133032021982SDavid Howells pr_err("Minimum size can not be greater than maximum size\n"); 13317ca02d0aSMike Kravetz return -EINVAL; 1332a137e1ccSAndi Kleen } 1333a137e1ccSAndi Kleen 13341da177e4SLinus Torvalds return 0; 13351da177e4SLinus Torvalds } 13361da177e4SLinus Torvalds 13371da177e4SLinus Torvalds static int 133832021982SDavid Howells hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) 13391da177e4SLinus Torvalds { 134032021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 13411da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbinfo; 13421da177e4SLinus Torvalds 13431da177e4SLinus Torvalds sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); 13441da177e4SLinus Torvalds if (!sbinfo) 13451da177e4SLinus Torvalds return -ENOMEM; 13461da177e4SLinus Torvalds sb->s_fs_info = sbinfo; 13471da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 134832021982SDavid Howells sbinfo->hstate = ctx->hstate; 134932021982SDavid Howells sbinfo->max_inodes = ctx->nr_inodes; 135032021982SDavid Howells sbinfo->free_inodes = ctx->nr_inodes; 135190481622SDavid Gibson sbinfo->spool = NULL; 135232021982SDavid Howells sbinfo->uid = ctx->uid; 135332021982SDavid Howells sbinfo->gid = ctx->gid; 135432021982SDavid Howells sbinfo->mode = ctx->mode; 13554a25220dSDavid Howells 13567ca02d0aSMike Kravetz /* 13577ca02d0aSMike Kravetz * Allocate and initialize subpool if maximum or minimum size is 13581935ebd3SMiaohe Lin * specified. Any needed reservations (for minimum size) are taken 13597ca02d0aSMike Kravetz * taken when the subpool is created. 13607ca02d0aSMike Kravetz */ 136132021982SDavid Howells if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { 136232021982SDavid Howells sbinfo->spool = hugepage_new_subpool(ctx->hstate, 136332021982SDavid Howells ctx->max_hpages, 136432021982SDavid Howells ctx->min_hpages); 136590481622SDavid Gibson if (!sbinfo->spool) 136690481622SDavid Gibson goto out_free; 136790481622SDavid Gibson } 13681da177e4SLinus Torvalds sb->s_maxbytes = MAX_LFS_FILESIZE; 136932021982SDavid Howells sb->s_blocksize = huge_page_size(ctx->hstate); 137032021982SDavid Howells sb->s_blocksize_bits = huge_page_shift(ctx->hstate); 13711da177e4SLinus Torvalds sb->s_magic = HUGETLBFS_MAGIC; 13721da177e4SLinus Torvalds sb->s_op = &hugetlbfs_ops; 13731da177e4SLinus Torvalds sb->s_time_gran = 1; 137415568299SMike Kravetz 137515568299SMike Kravetz /* 137615568299SMike Kravetz * Due to the special and limited functionality of hugetlbfs, it does 137715568299SMike Kravetz * not work well as a stacking filesystem. 137815568299SMike Kravetz */ 137915568299SMike Kravetz sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; 138032021982SDavid Howells sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); 138148fde701SAl Viro if (!sb->s_root) 13821da177e4SLinus Torvalds goto out_free; 13831da177e4SLinus Torvalds return 0; 13841da177e4SLinus Torvalds out_free: 138590481622SDavid Gibson kfree(sbinfo->spool); 13861da177e4SLinus Torvalds kfree(sbinfo); 13871da177e4SLinus Torvalds return -ENOMEM; 13881da177e4SLinus Torvalds } 13891da177e4SLinus Torvalds 139032021982SDavid Howells static int hugetlbfs_get_tree(struct fs_context *fc) 13911da177e4SLinus Torvalds { 139232021982SDavid Howells int err = hugetlbfs_validate(fc); 139332021982SDavid Howells if (err) 139432021982SDavid Howells return err; 13952ac295d4SAl Viro return get_tree_nodev(fc, hugetlbfs_fill_super); 139632021982SDavid Howells } 139732021982SDavid Howells 139832021982SDavid Howells static void hugetlbfs_fs_context_free(struct fs_context *fc) 139932021982SDavid Howells { 140032021982SDavid Howells kfree(fc->fs_private); 140132021982SDavid Howells } 140232021982SDavid Howells 140332021982SDavid Howells static const struct fs_context_operations hugetlbfs_fs_context_ops = { 140432021982SDavid Howells .free = hugetlbfs_fs_context_free, 140532021982SDavid Howells .parse_param = hugetlbfs_parse_param, 140632021982SDavid Howells .get_tree = hugetlbfs_get_tree, 140732021982SDavid Howells }; 140832021982SDavid Howells 140932021982SDavid Howells static int hugetlbfs_init_fs_context(struct fs_context *fc) 141032021982SDavid Howells { 141132021982SDavid Howells struct hugetlbfs_fs_context *ctx; 141232021982SDavid Howells 141332021982SDavid Howells ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); 141432021982SDavid Howells if (!ctx) 141532021982SDavid Howells return -ENOMEM; 141632021982SDavid Howells 141732021982SDavid Howells ctx->max_hpages = -1; /* No limit on size by default */ 141832021982SDavid Howells ctx->nr_inodes = -1; /* No limit on number of inodes by default */ 141932021982SDavid Howells ctx->uid = current_fsuid(); 142032021982SDavid Howells ctx->gid = current_fsgid(); 142132021982SDavid Howells ctx->mode = 0755; 142232021982SDavid Howells ctx->hstate = &default_hstate; 142332021982SDavid Howells ctx->min_hpages = -1; /* No default minimum size */ 142432021982SDavid Howells ctx->max_val_type = NO_SIZE; 142532021982SDavid Howells ctx->min_val_type = NO_SIZE; 142632021982SDavid Howells fc->fs_private = ctx; 142732021982SDavid Howells fc->ops = &hugetlbfs_fs_context_ops; 142832021982SDavid Howells return 0; 14291da177e4SLinus Torvalds } 14301da177e4SLinus Torvalds 14311da177e4SLinus Torvalds static struct file_system_type hugetlbfs_fs_type = { 14321da177e4SLinus Torvalds .name = "hugetlbfs", 143332021982SDavid Howells .init_fs_context = hugetlbfs_init_fs_context, 1434d7167b14SAl Viro .parameters = hugetlb_fs_parameters, 14351da177e4SLinus Torvalds .kill_sb = kill_litter_super, 14361da177e4SLinus Torvalds }; 14371da177e4SLinus Torvalds 143842d7395fSAndi Kleen static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; 14391da177e4SLinus Torvalds 1440ef1ff6b8SFrom: Mel Gorman static int can_do_hugetlb_shm(void) 14411da177e4SLinus Torvalds { 1442a0eb3a05SEric W. Biederman kgid_t shm_group; 1443a0eb3a05SEric W. Biederman shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); 1444a0eb3a05SEric W. Biederman return capable(CAP_IPC_LOCK) || in_group_p(shm_group); 14451da177e4SLinus Torvalds } 14461da177e4SLinus Torvalds 144742d7395fSAndi Kleen static int get_hstate_idx(int page_size_log) 144842d7395fSAndi Kleen { 1449af73e4d9SNaoya Horiguchi struct hstate *h = hstate_sizelog(page_size_log); 145042d7395fSAndi Kleen 145142d7395fSAndi Kleen if (!h) 145242d7395fSAndi Kleen return -1; 145304adbc3fSMiaohe Lin return hstate_index(h); 145442d7395fSAndi Kleen } 145542d7395fSAndi Kleen 1456af73e4d9SNaoya Horiguchi /* 1457af73e4d9SNaoya Horiguchi * Note that size should be aligned to proper hugepage size in caller side, 1458af73e4d9SNaoya Horiguchi * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. 1459af73e4d9SNaoya Horiguchi */ 1460af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size, 146183c1fd76Szhangyiru vm_flags_t acctflag, int creat_flags, 146283c1fd76Szhangyiru int page_size_log) 14631da177e4SLinus Torvalds { 14641da177e4SLinus Torvalds struct inode *inode; 1465e68375c8SAl Viro struct vfsmount *mnt; 146642d7395fSAndi Kleen int hstate_idx; 1467e68375c8SAl Viro struct file *file; 146842d7395fSAndi Kleen 146942d7395fSAndi Kleen hstate_idx = get_hstate_idx(page_size_log); 147042d7395fSAndi Kleen if (hstate_idx < 0) 147142d7395fSAndi Kleen return ERR_PTR(-ENODEV); 14721da177e4SLinus Torvalds 1473e68375c8SAl Viro mnt = hugetlbfs_vfsmount[hstate_idx]; 1474e68375c8SAl Viro if (!mnt) 14755bc98594SAkinobu Mita return ERR_PTR(-ENOENT); 14765bc98594SAkinobu Mita 1477ef1ff6b8SFrom: Mel Gorman if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 147883c1fd76Szhangyiru struct ucounts *ucounts = current_ucounts(); 147983c1fd76Szhangyiru 148083c1fd76Szhangyiru if (user_shm_lock(size, ucounts)) { 148183c1fd76Szhangyiru pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", 148221a3c273SDavid Rientjes current->comm, current->pid); 148383c1fd76Szhangyiru user_shm_unlock(size, ucounts); 14842584e517SRavikiran G Thirumalai } 148583c1fd76Szhangyiru return ERR_PTR(-EPERM); 1486353d5c30SHugh Dickins } 14871da177e4SLinus Torvalds 148839b65252SAnatol Pomozov file = ERR_PTR(-ENOSPC); 1489e68375c8SAl Viro inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); 14901da177e4SLinus Torvalds if (!inode) 1491e68375c8SAl Viro goto out; 1492e1832f29SStephen Smalley if (creat_flags == HUGETLB_SHMFS_INODE) 1493e1832f29SStephen Smalley inode->i_flags |= S_PRIVATE; 14941da177e4SLinus Torvalds 14951da177e4SLinus Torvalds inode->i_size = size; 14966d6b77f1SMiklos Szeredi clear_nlink(inode); 1497ce8d2cdfSDave Hansen 149833b8f84aSMike Kravetz if (!hugetlb_reserve_pages(inode, 0, 1499e68375c8SAl Viro size >> huge_page_shift(hstate_inode(inode)), NULL, 1500e68375c8SAl Viro acctflag)) 1501e68375c8SAl Viro file = ERR_PTR(-ENOMEM); 1502e68375c8SAl Viro else 1503e68375c8SAl Viro file = alloc_file_pseudo(inode, mnt, name, O_RDWR, 1504ce8d2cdfSDave Hansen &hugetlbfs_file_operations); 1505e68375c8SAl Viro if (!IS_ERR(file)) 15061da177e4SLinus Torvalds return file; 15071da177e4SLinus Torvalds 1508b45b5bd6SDavid Gibson iput(inode); 1509e68375c8SAl Viro out: 151039b65252SAnatol Pomozov return file; 15111da177e4SLinus Torvalds } 15121da177e4SLinus Torvalds 151332021982SDavid Howells static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) 151432021982SDavid Howells { 151532021982SDavid Howells struct fs_context *fc; 151632021982SDavid Howells struct vfsmount *mnt; 151732021982SDavid Howells 151832021982SDavid Howells fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); 151932021982SDavid Howells if (IS_ERR(fc)) { 152032021982SDavid Howells mnt = ERR_CAST(fc); 152132021982SDavid Howells } else { 152232021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 152332021982SDavid Howells ctx->hstate = h; 152432021982SDavid Howells mnt = fc_mount(fc); 152532021982SDavid Howells put_fs_context(fc); 152632021982SDavid Howells } 152732021982SDavid Howells if (IS_ERR(mnt)) 1528a25fddceSMiaohe Lin pr_err("Cannot mount internal hugetlbfs for page size %luK", 1529a25fddceSMiaohe Lin huge_page_size(h) >> 10); 153032021982SDavid Howells return mnt; 153132021982SDavid Howells } 153232021982SDavid Howells 15331da177e4SLinus Torvalds static int __init init_hugetlbfs_fs(void) 15341da177e4SLinus Torvalds { 153532021982SDavid Howells struct vfsmount *mnt; 153642d7395fSAndi Kleen struct hstate *h; 15371da177e4SLinus Torvalds int error; 153842d7395fSAndi Kleen int i; 15391da177e4SLinus Torvalds 1540457c1b27SNishanth Aravamudan if (!hugepages_supported()) { 15419b857d26SAndrew Morton pr_info("disabling because there are no supported hugepage sizes\n"); 1542457c1b27SNishanth Aravamudan return -ENOTSUPP; 1543457c1b27SNishanth Aravamudan } 1544457c1b27SNishanth Aravamudan 1545d1d5e05fSHillf Danton error = -ENOMEM; 15461da177e4SLinus Torvalds hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 15471da177e4SLinus Torvalds sizeof(struct hugetlbfs_inode_info), 15485d097056SVladimir Davydov 0, SLAB_ACCOUNT, init_once); 15491da177e4SLinus Torvalds if (hugetlbfs_inode_cachep == NULL) 15508fc312b3SMike Kravetz goto out; 15511da177e4SLinus Torvalds 15521da177e4SLinus Torvalds error = register_filesystem(&hugetlbfs_fs_type); 15531da177e4SLinus Torvalds if (error) 15548fc312b3SMike Kravetz goto out_free; 15551da177e4SLinus Torvalds 15568fc312b3SMike Kravetz /* default hstate mount is required */ 15573b2275a8SMiaohe Lin mnt = mount_one_hugetlbfs(&default_hstate); 15588fc312b3SMike Kravetz if (IS_ERR(mnt)) { 15598fc312b3SMike Kravetz error = PTR_ERR(mnt); 15608fc312b3SMike Kravetz goto out_unreg; 15618fc312b3SMike Kravetz } 15628fc312b3SMike Kravetz hugetlbfs_vfsmount[default_hstate_idx] = mnt; 15638fc312b3SMike Kravetz 15648fc312b3SMike Kravetz /* other hstates are optional */ 156542d7395fSAndi Kleen i = 0; 156642d7395fSAndi Kleen for_each_hstate(h) { 156715f0ec94SJan Stancek if (i == default_hstate_idx) { 156815f0ec94SJan Stancek i++; 15698fc312b3SMike Kravetz continue; 157015f0ec94SJan Stancek } 15718fc312b3SMike Kravetz 157232021982SDavid Howells mnt = mount_one_hugetlbfs(h); 15738fc312b3SMike Kravetz if (IS_ERR(mnt)) 15748fc312b3SMike Kravetz hugetlbfs_vfsmount[i] = NULL; 15758fc312b3SMike Kravetz else 157632021982SDavid Howells hugetlbfs_vfsmount[i] = mnt; 157742d7395fSAndi Kleen i++; 157842d7395fSAndi Kleen } 157932021982SDavid Howells 158042d7395fSAndi Kleen return 0; 15811da177e4SLinus Torvalds 15828fc312b3SMike Kravetz out_unreg: 15838fc312b3SMike Kravetz (void)unregister_filesystem(&hugetlbfs_fs_type); 15848fc312b3SMike Kravetz out_free: 15851da177e4SLinus Torvalds kmem_cache_destroy(hugetlbfs_inode_cachep); 15868fc312b3SMike Kravetz out: 15871da177e4SLinus Torvalds return error; 15881da177e4SLinus Torvalds } 15893e89e1c5SPaul Gortmaker fs_initcall(init_hugetlbfs_fs) 1590