11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * hugetlbpage-backed filesystem. Based on ramfs. 31da177e4SLinus Torvalds * 46d49e352SNadia Yvette Chambers * Nadia Yvette Chambers, 2002 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Copyright (C) 2002 Linus Torvalds. 73e89e1c5SPaul Gortmaker * License: GPL 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 109b857d26SAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 119b857d26SAndrew Morton 121da177e4SLinus Torvalds #include <linux/thread_info.h> 131da177e4SLinus Torvalds #include <asm/current.h> 1470c3547eSMike Kravetz #include <linux/falloc.h> 151da177e4SLinus Torvalds #include <linux/fs.h> 161da177e4SLinus Torvalds #include <linux/mount.h> 171da177e4SLinus Torvalds #include <linux/file.h> 18e73a75faSRandy Dunlap #include <linux/kernel.h> 191da177e4SLinus Torvalds #include <linux/writeback.h> 201da177e4SLinus Torvalds #include <linux/pagemap.h> 211da177e4SLinus Torvalds #include <linux/highmem.h> 221da177e4SLinus Torvalds #include <linux/init.h> 231da177e4SLinus Torvalds #include <linux/string.h> 2416f7e0feSRandy Dunlap #include <linux/capability.h> 25e73a75faSRandy Dunlap #include <linux/ctype.h> 261da177e4SLinus Torvalds #include <linux/backing-dev.h> 271da177e4SLinus Torvalds #include <linux/hugetlb.h> 281da177e4SLinus Torvalds #include <linux/pagevec.h> 2932021982SDavid Howells #include <linux/fs_parser.h> 30036e0856SBenjamin Herrenschmidt #include <linux/mman.h> 311da177e4SLinus Torvalds #include <linux/slab.h> 321da177e4SLinus Torvalds #include <linux/dnotify.h> 331da177e4SLinus Torvalds #include <linux/statfs.h> 341da177e4SLinus Torvalds #include <linux/security.h> 351fd7317dSNick Black #include <linux/magic.h> 36290408d4SNaoya Horiguchi #include <linux/migrate.h> 3734d0640eSAl Viro #include <linux/uio.h> 381da177e4SLinus Torvalds 397c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 4088590253SShijie Hu #include <linux/sched/mm.h> 411da177e4SLinus Torvalds 42f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops; 434b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations; 4492e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations; 4592e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations; 461da177e4SLinus Torvalds 4732021982SDavid Howells enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; 4832021982SDavid Howells 4932021982SDavid Howells struct hugetlbfs_fs_context { 504a25220dSDavid Howells struct hstate *hstate; 5132021982SDavid Howells unsigned long long max_size_opt; 5232021982SDavid Howells unsigned long long min_size_opt; 534a25220dSDavid Howells long max_hpages; 544a25220dSDavid Howells long nr_inodes; 554a25220dSDavid Howells long min_hpages; 5632021982SDavid Howells enum hugetlbfs_size_type max_val_type; 5732021982SDavid Howells enum hugetlbfs_size_type min_val_type; 58a0eb3a05SEric W. Biederman kuid_t uid; 59a0eb3a05SEric W. Biederman kgid_t gid; 60a1d776eeSDavid Gibson umode_t mode; 61a1d776eeSDavid Gibson }; 62a1d776eeSDavid Gibson 631da177e4SLinus Torvalds int sysctl_hugetlb_shm_group; 641da177e4SLinus Torvalds 6532021982SDavid Howells enum hugetlb_param { 6632021982SDavid Howells Opt_gid, 6732021982SDavid Howells Opt_min_size, 6832021982SDavid Howells Opt_mode, 6932021982SDavid Howells Opt_nr_inodes, 7032021982SDavid Howells Opt_pagesize, 7132021982SDavid Howells Opt_size, 7232021982SDavid Howells Opt_uid, 73e73a75faSRandy Dunlap }; 74e73a75faSRandy Dunlap 75d7167b14SAl Viro static const struct fs_parameter_spec hugetlb_fs_parameters[] = { 7632021982SDavid Howells fsparam_u32 ("gid", Opt_gid), 7732021982SDavid Howells fsparam_string("min_size", Opt_min_size), 78e0f7e2b2SMike Kravetz fsparam_u32oct("mode", Opt_mode), 7932021982SDavid Howells fsparam_string("nr_inodes", Opt_nr_inodes), 8032021982SDavid Howells fsparam_string("pagesize", Opt_pagesize), 8132021982SDavid Howells fsparam_string("size", Opt_size), 8232021982SDavid Howells fsparam_u32 ("uid", Opt_uid), 8332021982SDavid Howells {} 8432021982SDavid Howells }; 8532021982SDavid Howells 8670c3547eSMike Kravetz #ifdef CONFIG_NUMA 8770c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 8870c3547eSMike Kravetz struct inode *inode, pgoff_t index) 8970c3547eSMike Kravetz { 9070c3547eSMike Kravetz vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, 9170c3547eSMike Kravetz index); 9270c3547eSMike Kravetz } 9370c3547eSMike Kravetz 9470c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 9570c3547eSMike Kravetz { 9670c3547eSMike Kravetz mpol_cond_put(vma->vm_policy); 9770c3547eSMike Kravetz } 9870c3547eSMike Kravetz #else 9970c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 10070c3547eSMike Kravetz struct inode *inode, pgoff_t index) 10170c3547eSMike Kravetz { 10270c3547eSMike Kravetz } 10370c3547eSMike Kravetz 10470c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 10570c3547eSMike Kravetz { 10670c3547eSMike Kravetz } 10770c3547eSMike Kravetz #endif 10870c3547eSMike Kravetz 10963489f8eSMike Kravetz /* 11063489f8eSMike Kravetz * Mask used when checking the page offset value passed in via system 11163489f8eSMike Kravetz * calls. This value will be converted to a loff_t which is signed. 11263489f8eSMike Kravetz * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the 11363489f8eSMike Kravetz * value. The extra bit (- 1 in the shift value) is to take the sign 11463489f8eSMike Kravetz * bit into account. 11563489f8eSMike Kravetz */ 11663489f8eSMike Kravetz #define PGOFF_LOFFT_MAX \ 11763489f8eSMike Kravetz (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) 11863489f8eSMike Kravetz 1191da177e4SLinus Torvalds static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 1201da177e4SLinus Torvalds { 121496ad9aaSAl Viro struct inode *inode = file_inode(file); 12222247efdSPeter Xu struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 1231da177e4SLinus Torvalds loff_t len, vma_len; 1241da177e4SLinus Torvalds int ret; 125a5516438SAndi Kleen struct hstate *h = hstate_file(file); 1261da177e4SLinus Torvalds 12768589bc3SHugh Dickins /* 128dec4ad86SDavid Gibson * vma address alignment (but not the pgoff alignment) has 129dec4ad86SDavid Gibson * already been checked by prepare_hugepage_range. If you add 130dec4ad86SDavid Gibson * any error returns here, do so after setting VM_HUGETLB, so 131dec4ad86SDavid Gibson * is_vm_hugetlb_page tests below unmap_region go the right 13245e55300SPeter Collingbourne * way when do_mmap unwinds (may be important on powerpc 133dec4ad86SDavid Gibson * and ia64). 13468589bc3SHugh Dickins */ 135a2fce914SNaoya Horiguchi vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 13668589bc3SHugh Dickins vma->vm_ops = &hugetlb_vm_ops; 1371da177e4SLinus Torvalds 13822247efdSPeter Xu ret = seal_check_future_write(info->seals, vma); 13922247efdSPeter Xu if (ret) 14022247efdSPeter Xu return ret; 14122247efdSPeter Xu 142045c7a3fSMike Kravetz /* 14363489f8eSMike Kravetz * page based offset in vm_pgoff could be sufficiently large to 1445df63c2aSMike Kravetz * overflow a loff_t when converted to byte offset. This can 1455df63c2aSMike Kravetz * only happen on architectures where sizeof(loff_t) == 1465df63c2aSMike Kravetz * sizeof(unsigned long). So, only check in those instances. 147045c7a3fSMike Kravetz */ 1485df63c2aSMike Kravetz if (sizeof(unsigned long) == sizeof(loff_t)) { 14963489f8eSMike Kravetz if (vma->vm_pgoff & PGOFF_LOFFT_MAX) 150045c7a3fSMike Kravetz return -EINVAL; 1515df63c2aSMike Kravetz } 152045c7a3fSMike Kravetz 15363489f8eSMike Kravetz /* must be huge page aligned */ 1542b37c35eSBecky Bruce if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 155dec4ad86SDavid Gibson return -EINVAL; 156dec4ad86SDavid Gibson 1571da177e4SLinus Torvalds vma_len = (loff_t)(vma->vm_end - vma->vm_start); 158045c7a3fSMike Kravetz len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 159045c7a3fSMike Kravetz /* check for overflow */ 160045c7a3fSMike Kravetz if (len < vma_len) 161045c7a3fSMike Kravetz return -EINVAL; 1621da177e4SLinus Torvalds 1635955102cSAl Viro inode_lock(inode); 1641da177e4SLinus Torvalds file_accessed(file); 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds ret = -ENOMEM; 16733b8f84aSMike Kravetz if (!hugetlb_reserve_pages(inode, 168a5516438SAndi Kleen vma->vm_pgoff >> huge_page_order(h), 1695a6fe125SMel Gorman len >> huge_page_shift(h), vma, 1705a6fe125SMel Gorman vma->vm_flags)) 171b45b5bd6SDavid Gibson goto out; 172b45b5bd6SDavid Gibson 1734c887265SAdam Litke ret = 0; 174b6174df5SZhang, Yanmin if (vma->vm_flags & VM_WRITE && inode->i_size < len) 175045c7a3fSMike Kravetz i_size_write(inode, len); 1761da177e4SLinus Torvalds out: 1775955102cSAl Viro inode_unlock(inode); 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds return ret; 1801da177e4SLinus Torvalds } 1811da177e4SLinus Torvalds 1821da177e4SLinus Torvalds /* 1833e4e28c5SMichel Lespinasse * Called under mmap_write_lock(mm). 1841da177e4SLinus Torvalds */ 1851da177e4SLinus Torvalds 1861da177e4SLinus Torvalds static unsigned long 18788590253SShijie Hu hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, 18888590253SShijie Hu unsigned long len, unsigned long pgoff, unsigned long flags) 18988590253SShijie Hu { 19088590253SShijie Hu struct hstate *h = hstate_file(file); 19188590253SShijie Hu struct vm_unmapped_area_info info; 19288590253SShijie Hu 19388590253SShijie Hu info.flags = 0; 19488590253SShijie Hu info.length = len; 19588590253SShijie Hu info.low_limit = current->mm->mmap_base; 1962cb4de08SChristophe Leroy info.high_limit = arch_get_mmap_end(addr, len, flags); 19788590253SShijie Hu info.align_mask = PAGE_MASK & ~huge_page_mask(h); 19888590253SShijie Hu info.align_offset = 0; 19988590253SShijie Hu return vm_unmapped_area(&info); 20088590253SShijie Hu } 20188590253SShijie Hu 20288590253SShijie Hu static unsigned long 20388590253SShijie Hu hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, 20488590253SShijie Hu unsigned long len, unsigned long pgoff, unsigned long flags) 20588590253SShijie Hu { 20688590253SShijie Hu struct hstate *h = hstate_file(file); 20788590253SShijie Hu struct vm_unmapped_area_info info; 20888590253SShijie Hu 20988590253SShijie Hu info.flags = VM_UNMAPPED_AREA_TOPDOWN; 21088590253SShijie Hu info.length = len; 21188590253SShijie Hu info.low_limit = max(PAGE_SIZE, mmap_min_addr); 2125f24d5a5SChristophe Leroy info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); 21388590253SShijie Hu info.align_mask = PAGE_MASK & ~huge_page_mask(h); 21488590253SShijie Hu info.align_offset = 0; 21588590253SShijie Hu addr = vm_unmapped_area(&info); 21688590253SShijie Hu 21788590253SShijie Hu /* 21888590253SShijie Hu * A failed mmap() very likely causes application failure, 21988590253SShijie Hu * so fall back to the bottom-up function here. This scenario 22088590253SShijie Hu * can happen with large stack limits and large mmap() 22188590253SShijie Hu * allocations. 22288590253SShijie Hu */ 22388590253SShijie Hu if (unlikely(offset_in_page(addr))) { 22488590253SShijie Hu VM_BUG_ON(addr != -ENOMEM); 22588590253SShijie Hu info.flags = 0; 22688590253SShijie Hu info.low_limit = current->mm->mmap_base; 2272cb4de08SChristophe Leroy info.high_limit = arch_get_mmap_end(addr, len, flags); 22888590253SShijie Hu addr = vm_unmapped_area(&info); 22988590253SShijie Hu } 23088590253SShijie Hu 23188590253SShijie Hu return addr; 23288590253SShijie Hu } 23388590253SShijie Hu 2344b439e25SChristophe Leroy unsigned long 2354b439e25SChristophe Leroy generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 2364b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 2374b439e25SChristophe Leroy unsigned long flags) 2381da177e4SLinus Torvalds { 2391da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 2401da177e4SLinus Torvalds struct vm_area_struct *vma; 241a5516438SAndi Kleen struct hstate *h = hstate_file(file); 2422cb4de08SChristophe Leroy const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 2431da177e4SLinus Torvalds 244a5516438SAndi Kleen if (len & ~huge_page_mask(h)) 2451da177e4SLinus Torvalds return -EINVAL; 2461da177e4SLinus Torvalds if (len > TASK_SIZE) 2471da177e4SLinus Torvalds return -ENOMEM; 2481da177e4SLinus Torvalds 249036e0856SBenjamin Herrenschmidt if (flags & MAP_FIXED) { 250a5516438SAndi Kleen if (prepare_hugepage_range(file, addr, len)) 251036e0856SBenjamin Herrenschmidt return -EINVAL; 252036e0856SBenjamin Herrenschmidt return addr; 253036e0856SBenjamin Herrenschmidt } 254036e0856SBenjamin Herrenschmidt 2551da177e4SLinus Torvalds if (addr) { 256a5516438SAndi Kleen addr = ALIGN(addr, huge_page_size(h)); 2571da177e4SLinus Torvalds vma = find_vma(mm, addr); 2585f24d5a5SChristophe Leroy if (mmap_end - len >= addr && 2591be7107fSHugh Dickins (!vma || addr + len <= vm_start_gap(vma))) 2601da177e4SLinus Torvalds return addr; 2611da177e4SLinus Torvalds } 2621da177e4SLinus Torvalds 26388590253SShijie Hu /* 26488590253SShijie Hu * Use mm->get_unmapped_area value as a hint to use topdown routine. 26588590253SShijie Hu * If architectures have special needs, they should define their own 26688590253SShijie Hu * version of hugetlb_get_unmapped_area. 26788590253SShijie Hu */ 26888590253SShijie Hu if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) 26988590253SShijie Hu return hugetlb_get_unmapped_area_topdown(file, addr, len, 27088590253SShijie Hu pgoff, flags); 27188590253SShijie Hu return hugetlb_get_unmapped_area_bottomup(file, addr, len, 27288590253SShijie Hu pgoff, flags); 2731da177e4SLinus Torvalds } 2744b439e25SChristophe Leroy 2754b439e25SChristophe Leroy #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 2764b439e25SChristophe Leroy static unsigned long 2774b439e25SChristophe Leroy hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 2784b439e25SChristophe Leroy unsigned long len, unsigned long pgoff, 2794b439e25SChristophe Leroy unsigned long flags) 2804b439e25SChristophe Leroy { 2814b439e25SChristophe Leroy return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); 2824b439e25SChristophe Leroy } 2831da177e4SLinus Torvalds #endif 2841da177e4SLinus Torvalds 285e63e1e5aSBadari Pulavarty /* 286e63e1e5aSBadari Pulavarty * Support for read() - Find the page attached to f_mapping and copy out the 287445c8098SMiaohe Lin * data. This provides functionality similar to filemap_read(). 288e63e1e5aSBadari Pulavarty */ 28934d0640eSAl Viro static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 290e63e1e5aSBadari Pulavarty { 29134d0640eSAl Viro struct file *file = iocb->ki_filp; 29234d0640eSAl Viro struct hstate *h = hstate_file(file); 29334d0640eSAl Viro struct address_space *mapping = file->f_mapping; 294e63e1e5aSBadari Pulavarty struct inode *inode = mapping->host; 29534d0640eSAl Viro unsigned long index = iocb->ki_pos >> huge_page_shift(h); 29634d0640eSAl Viro unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); 297e63e1e5aSBadari Pulavarty unsigned long end_index; 298e63e1e5aSBadari Pulavarty loff_t isize; 299e63e1e5aSBadari Pulavarty ssize_t retval = 0; 300e63e1e5aSBadari Pulavarty 30134d0640eSAl Viro while (iov_iter_count(to)) { 302e63e1e5aSBadari Pulavarty struct page *page; 30334d0640eSAl Viro size_t nr, copied; 304e63e1e5aSBadari Pulavarty 305e63e1e5aSBadari Pulavarty /* nr is the maximum number of bytes to copy from this page */ 306a5516438SAndi Kleen nr = huge_page_size(h); 307a05b0855SAneesh Kumar K.V isize = i_size_read(inode); 308a05b0855SAneesh Kumar K.V if (!isize) 30934d0640eSAl Viro break; 310a05b0855SAneesh Kumar K.V end_index = (isize - 1) >> huge_page_shift(h); 311e63e1e5aSBadari Pulavarty if (index > end_index) 31234d0640eSAl Viro break; 31334d0640eSAl Viro if (index == end_index) { 314a5516438SAndi Kleen nr = ((isize - 1) & ~huge_page_mask(h)) + 1; 315a05b0855SAneesh Kumar K.V if (nr <= offset) 31634d0640eSAl Viro break; 317e63e1e5aSBadari Pulavarty } 318e63e1e5aSBadari Pulavarty nr = nr - offset; 319e63e1e5aSBadari Pulavarty 320e63e1e5aSBadari Pulavarty /* Find the page */ 321a05b0855SAneesh Kumar K.V page = find_lock_page(mapping, index); 322e63e1e5aSBadari Pulavarty if (unlikely(page == NULL)) { 323e63e1e5aSBadari Pulavarty /* 324e63e1e5aSBadari Pulavarty * We have a HOLE, zero out the user-buffer for the 325e63e1e5aSBadari Pulavarty * length of the hole or request. 326e63e1e5aSBadari Pulavarty */ 32734d0640eSAl Viro copied = iov_iter_zero(nr, to); 328e63e1e5aSBadari Pulavarty } else { 329a05b0855SAneesh Kumar K.V unlock_page(page); 330a05b0855SAneesh Kumar K.V 3318625147cSJames Houghton if (PageHWPoison(page)) { 3328625147cSJames Houghton put_page(page); 3338625147cSJames Houghton retval = -EIO; 3348625147cSJames Houghton break; 3358625147cSJames Houghton } 3368625147cSJames Houghton 337e63e1e5aSBadari Pulavarty /* 338e63e1e5aSBadari Pulavarty * We have the page, copy it to user space buffer. 339e63e1e5aSBadari Pulavarty */ 340c7d57ab1SAl Viro copied = copy_page_to_iter(page, offset, nr, to); 34109cbfeafSKirill A. Shutemov put_page(page); 342e63e1e5aSBadari Pulavarty } 34334d0640eSAl Viro offset += copied; 34434d0640eSAl Viro retval += copied; 34534d0640eSAl Viro if (copied != nr && iov_iter_count(to)) { 34634d0640eSAl Viro if (!retval) 34734d0640eSAl Viro retval = -EFAULT; 348e63e1e5aSBadari Pulavarty break; 349e63e1e5aSBadari Pulavarty } 35034d0640eSAl Viro index += offset >> huge_page_shift(h); 35134d0640eSAl Viro offset &= ~huge_page_mask(h); 35234d0640eSAl Viro } 35334d0640eSAl Viro iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; 354e63e1e5aSBadari Pulavarty return retval; 355e63e1e5aSBadari Pulavarty } 356e63e1e5aSBadari Pulavarty 357800d15a5SNick Piggin static int hugetlbfs_write_begin(struct file *file, 358800d15a5SNick Piggin struct address_space *mapping, 3599d6b0cd7SMatthew Wilcox (Oracle) loff_t pos, unsigned len, 360800d15a5SNick Piggin struct page **pagep, void **fsdata) 3611da177e4SLinus Torvalds { 3621da177e4SLinus Torvalds return -EINVAL; 3631da177e4SLinus Torvalds } 3641da177e4SLinus Torvalds 365800d15a5SNick Piggin static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 366800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 367800d15a5SNick Piggin struct page *page, void *fsdata) 3681da177e4SLinus Torvalds { 369800d15a5SNick Piggin BUG(); 3701da177e4SLinus Torvalds return -EINVAL; 3711da177e4SLinus Torvalds } 3721da177e4SLinus Torvalds 373ece62684SSidhartha Kumar static void hugetlb_delete_from_page_cache(struct folio *folio) 3741da177e4SLinus Torvalds { 375ece62684SSidhartha Kumar folio_clear_dirty(folio); 376ece62684SSidhartha Kumar folio_clear_uptodate(folio); 377ece62684SSidhartha Kumar filemap_remove_folio(folio); 3781da177e4SLinus Torvalds } 3791da177e4SLinus Torvalds 380378397ccSMike Kravetz /* 381378397ccSMike Kravetz * Called with i_mmap_rwsem held for inode based vma maps. This makes 382378397ccSMike Kravetz * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault 383378397ccSMike Kravetz * mutex for the page in the mapping. So, we can not race with page being 384378397ccSMike Kravetz * faulted into the vma. 385378397ccSMike Kravetz */ 386378397ccSMike Kravetz static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, 387378397ccSMike Kravetz unsigned long addr, struct page *page) 388378397ccSMike Kravetz { 389378397ccSMike Kravetz pte_t *ptep, pte; 390378397ccSMike Kravetz 391*9c67a207SPeter Xu ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); 392378397ccSMike Kravetz if (!ptep) 393378397ccSMike Kravetz return false; 394378397ccSMike Kravetz 395378397ccSMike Kravetz pte = huge_ptep_get(ptep); 396378397ccSMike Kravetz if (huge_pte_none(pte) || !pte_present(pte)) 397378397ccSMike Kravetz return false; 398378397ccSMike Kravetz 399378397ccSMike Kravetz if (pte_page(pte) == page) 400378397ccSMike Kravetz return true; 401378397ccSMike Kravetz 402378397ccSMike Kravetz return false; 403378397ccSMike Kravetz } 404378397ccSMike Kravetz 405378397ccSMike Kravetz /* 406378397ccSMike Kravetz * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? 407378397ccSMike Kravetz * No, because the interval tree returns us only those vmas 408378397ccSMike Kravetz * which overlap the truncated area starting at pgoff, 409378397ccSMike Kravetz * and no vma on a 32-bit arch can span beyond the 4GB. 410378397ccSMike Kravetz */ 411378397ccSMike Kravetz static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) 412378397ccSMike Kravetz { 413243b1f2dSPeter Xu unsigned long offset = 0; 414243b1f2dSPeter Xu 415378397ccSMike Kravetz if (vma->vm_pgoff < start) 416243b1f2dSPeter Xu offset = (start - vma->vm_pgoff) << PAGE_SHIFT; 417243b1f2dSPeter Xu 418243b1f2dSPeter Xu return vma->vm_start + offset; 419378397ccSMike Kravetz } 420378397ccSMike Kravetz 421378397ccSMike Kravetz static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) 422378397ccSMike Kravetz { 423378397ccSMike Kravetz unsigned long t_end; 424378397ccSMike Kravetz 425378397ccSMike Kravetz if (!end) 426378397ccSMike Kravetz return vma->vm_end; 427378397ccSMike Kravetz 428378397ccSMike Kravetz t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; 429378397ccSMike Kravetz if (t_end > vma->vm_end) 430378397ccSMike Kravetz t_end = vma->vm_end; 431378397ccSMike Kravetz return t_end; 432378397ccSMike Kravetz } 433378397ccSMike Kravetz 434378397ccSMike Kravetz /* 435378397ccSMike Kravetz * Called with hugetlb fault mutex held. Therefore, no more mappings to 436378397ccSMike Kravetz * this folio can be created while executing the routine. 437378397ccSMike Kravetz */ 438378397ccSMike Kravetz static void hugetlb_unmap_file_folio(struct hstate *h, 439378397ccSMike Kravetz struct address_space *mapping, 440378397ccSMike Kravetz struct folio *folio, pgoff_t index) 441378397ccSMike Kravetz { 442378397ccSMike Kravetz struct rb_root_cached *root = &mapping->i_mmap; 44340549ba8SMike Kravetz struct hugetlb_vma_lock *vma_lock; 444378397ccSMike Kravetz struct page *page = &folio->page; 445378397ccSMike Kravetz struct vm_area_struct *vma; 446378397ccSMike Kravetz unsigned long v_start; 447378397ccSMike Kravetz unsigned long v_end; 448378397ccSMike Kravetz pgoff_t start, end; 449378397ccSMike Kravetz 450378397ccSMike Kravetz start = index * pages_per_huge_page(h); 451378397ccSMike Kravetz end = (index + 1) * pages_per_huge_page(h); 452378397ccSMike Kravetz 453378397ccSMike Kravetz i_mmap_lock_write(mapping); 45440549ba8SMike Kravetz retry: 45540549ba8SMike Kravetz vma_lock = NULL; 456378397ccSMike Kravetz vma_interval_tree_foreach(vma, root, start, end - 1) { 457378397ccSMike Kravetz v_start = vma_offset_start(vma, start); 458378397ccSMike Kravetz v_end = vma_offset_end(vma, end); 459378397ccSMike Kravetz 460243b1f2dSPeter Xu if (!hugetlb_vma_maps_page(vma, v_start, page)) 461378397ccSMike Kravetz continue; 462378397ccSMike Kravetz 46340549ba8SMike Kravetz if (!hugetlb_vma_trylock_write(vma)) { 46440549ba8SMike Kravetz vma_lock = vma->vm_private_data; 46540549ba8SMike Kravetz /* 46640549ba8SMike Kravetz * If we can not get vma lock, we need to drop 46740549ba8SMike Kravetz * immap_sema and take locks in order. First, 46840549ba8SMike Kravetz * take a ref on the vma_lock structure so that 46940549ba8SMike Kravetz * we can be guaranteed it will not go away when 47040549ba8SMike Kravetz * dropping immap_sema. 47140549ba8SMike Kravetz */ 47240549ba8SMike Kravetz kref_get(&vma_lock->refs); 47340549ba8SMike Kravetz break; 47440549ba8SMike Kravetz } 47540549ba8SMike Kravetz 476243b1f2dSPeter Xu unmap_hugepage_range(vma, v_start, v_end, NULL, 477243b1f2dSPeter Xu ZAP_FLAG_DROP_MARKER); 47840549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 479378397ccSMike Kravetz } 480378397ccSMike Kravetz 481378397ccSMike Kravetz i_mmap_unlock_write(mapping); 48240549ba8SMike Kravetz 48340549ba8SMike Kravetz if (vma_lock) { 48440549ba8SMike Kravetz /* 48540549ba8SMike Kravetz * Wait on vma_lock. We know it is still valid as we have 48640549ba8SMike Kravetz * a reference. We must 'open code' vma locking as we do 48740549ba8SMike Kravetz * not know if vma_lock is still attached to vma. 48840549ba8SMike Kravetz */ 48940549ba8SMike Kravetz down_write(&vma_lock->rw_sema); 49040549ba8SMike Kravetz i_mmap_lock_write(mapping); 49140549ba8SMike Kravetz 49240549ba8SMike Kravetz vma = vma_lock->vma; 49340549ba8SMike Kravetz if (!vma) { 49440549ba8SMike Kravetz /* 49540549ba8SMike Kravetz * If lock is no longer attached to vma, then just 49640549ba8SMike Kravetz * unlock, drop our reference and retry looking for 49740549ba8SMike Kravetz * other vmas. 49840549ba8SMike Kravetz */ 49940549ba8SMike Kravetz up_write(&vma_lock->rw_sema); 50040549ba8SMike Kravetz kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 50140549ba8SMike Kravetz goto retry; 50240549ba8SMike Kravetz } 50340549ba8SMike Kravetz 50440549ba8SMike Kravetz /* 50540549ba8SMike Kravetz * vma_lock is still attached to vma. Check to see if vma 50640549ba8SMike Kravetz * still maps page and if so, unmap. 50740549ba8SMike Kravetz */ 50840549ba8SMike Kravetz v_start = vma_offset_start(vma, start); 50940549ba8SMike Kravetz v_end = vma_offset_end(vma, end); 510243b1f2dSPeter Xu if (hugetlb_vma_maps_page(vma, v_start, page)) 511243b1f2dSPeter Xu unmap_hugepage_range(vma, v_start, v_end, NULL, 51240549ba8SMike Kravetz ZAP_FLAG_DROP_MARKER); 51340549ba8SMike Kravetz 51440549ba8SMike Kravetz kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 51540549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 51640549ba8SMike Kravetz 51740549ba8SMike Kravetz goto retry; 51840549ba8SMike Kravetz } 519378397ccSMike Kravetz } 520378397ccSMike Kravetz 5214aae8d1cSMike Kravetz static void 52205e90bd0SPeter Xu hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, 52305e90bd0SPeter Xu zap_flags_t zap_flags) 5244aae8d1cSMike Kravetz { 5254aae8d1cSMike Kravetz struct vm_area_struct *vma; 5264aae8d1cSMike Kravetz 5274aae8d1cSMike Kravetz /* 528d6aba4c8SSean Christopherson * end == 0 indicates that the entire range after start should be 529d6aba4c8SSean Christopherson * unmapped. Note, end is exclusive, whereas the interval tree takes 530d6aba4c8SSean Christopherson * an inclusive "last". 5314aae8d1cSMike Kravetz */ 532d6aba4c8SSean Christopherson vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { 533378397ccSMike Kravetz unsigned long v_start; 5344aae8d1cSMike Kravetz unsigned long v_end; 5354aae8d1cSMike Kravetz 53640549ba8SMike Kravetz if (!hugetlb_vma_trylock_write(vma)) 53740549ba8SMike Kravetz continue; 53840549ba8SMike Kravetz 539378397ccSMike Kravetz v_start = vma_offset_start(vma, start); 540378397ccSMike Kravetz v_end = vma_offset_end(vma, end); 5414aae8d1cSMike Kravetz 542243b1f2dSPeter Xu unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); 54340549ba8SMike Kravetz 54440549ba8SMike Kravetz /* 54540549ba8SMike Kravetz * Note that vma lock only exists for shared/non-private 54640549ba8SMike Kravetz * vmas. Therefore, lock is not held when calling 54740549ba8SMike Kravetz * unmap_hugepage_range for private vmas. 54840549ba8SMike Kravetz */ 54940549ba8SMike Kravetz hugetlb_vma_unlock_write(vma); 5504aae8d1cSMike Kravetz } 5514aae8d1cSMike Kravetz } 552b5cec28dSMike Kravetz 553b5cec28dSMike Kravetz /* 554c8627228SMike Kravetz * Called with hugetlb fault mutex held. 555c8627228SMike Kravetz * Returns true if page was actually removed, false otherwise. 556c8627228SMike Kravetz */ 557c8627228SMike Kravetz static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, 558c8627228SMike Kravetz struct address_space *mapping, 559c8627228SMike Kravetz struct folio *folio, pgoff_t index, 560c8627228SMike Kravetz bool truncate_op) 561c8627228SMike Kravetz { 562c8627228SMike Kravetz bool ret = false; 563c8627228SMike Kravetz 564c8627228SMike Kravetz /* 565c8627228SMike Kravetz * If folio is mapped, it was faulted in after being 566c8627228SMike Kravetz * unmapped in caller. Unmap (again) while holding 567c8627228SMike Kravetz * the fault mutex. The mutex will prevent faults 568c8627228SMike Kravetz * until we finish removing the folio. 569c8627228SMike Kravetz */ 570378397ccSMike Kravetz if (unlikely(folio_mapped(folio))) 571378397ccSMike Kravetz hugetlb_unmap_file_folio(h, mapping, folio, index); 572c8627228SMike Kravetz 573c8627228SMike Kravetz folio_lock(folio); 574c8627228SMike Kravetz /* 575c8627228SMike Kravetz * We must remove the folio from page cache before removing 576c8627228SMike Kravetz * the region/ reserve map (hugetlb_unreserve_pages). In 577c8627228SMike Kravetz * rare out of memory conditions, removal of the region/reserve 578c8627228SMike Kravetz * map could fail. Correspondingly, the subpool and global 579c8627228SMike Kravetz * reserve usage count can need to be adjusted. 580c8627228SMike Kravetz */ 581ece62684SSidhartha Kumar VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); 582ece62684SSidhartha Kumar hugetlb_delete_from_page_cache(folio); 583c8627228SMike Kravetz ret = true; 584c8627228SMike Kravetz if (!truncate_op) { 585c8627228SMike Kravetz if (unlikely(hugetlb_unreserve_pages(inode, index, 586c8627228SMike Kravetz index + 1, 1))) 587c8627228SMike Kravetz hugetlb_fix_reserve_counts(inode); 588c8627228SMike Kravetz } 589c8627228SMike Kravetz 590c8627228SMike Kravetz folio_unlock(folio); 591c8627228SMike Kravetz return ret; 592c8627228SMike Kravetz } 593c8627228SMike Kravetz 594c8627228SMike Kravetz /* 595b5cec28dSMike Kravetz * remove_inode_hugepages handles two distinct cases: truncation and hole 596b5cec28dSMike Kravetz * punch. There are subtle differences in operation for each case. 5974aae8d1cSMike Kravetz * 598b5cec28dSMike Kravetz * truncation is indicated by end of range being LLONG_MAX 599b5cec28dSMike Kravetz * In this case, we first scan the range and release found pages. 6001935ebd3SMiaohe Lin * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve 601c8627228SMike Kravetz * maps and global counts. Page faults can race with truncation. 602c8627228SMike Kravetz * During faults, hugetlb_no_page() checks i_size before page allocation, 603c8627228SMike Kravetz * and again after obtaining page table lock. It will 'back out' 604c8627228SMike Kravetz * allocations in the truncated range. 605b5cec28dSMike Kravetz * hole punch is indicated if end is not LLONG_MAX 606b5cec28dSMike Kravetz * In the hole punch case we scan the range and release found pages. 6071935ebd3SMiaohe Lin * Only when releasing a page is the associated region/reserve map 6081935ebd3SMiaohe Lin * deleted. The region/reserve map for ranges without associated 609e7c58097SMike Kravetz * pages are not modified. Page faults can race with hole punch. 610e7c58097SMike Kravetz * This is indicated if we find a mapped page. 611b5cec28dSMike Kravetz * Note: If the passed end of range value is beyond the end of file, but 612b5cec28dSMike Kravetz * not LLONG_MAX this routine still performs a hole punch operation. 613b5cec28dSMike Kravetz */ 614b5cec28dSMike Kravetz static void remove_inode_hugepages(struct inode *inode, loff_t lstart, 615b5cec28dSMike Kravetz loff_t lend) 6161da177e4SLinus Torvalds { 617a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 618b45b5bd6SDavid Gibson struct address_space *mapping = &inode->i_data; 619a5516438SAndi Kleen const pgoff_t start = lstart >> huge_page_shift(h); 620b5cec28dSMike Kravetz const pgoff_t end = lend >> huge_page_shift(h); 6211508062eSMatthew Wilcox (Oracle) struct folio_batch fbatch; 622d72dc8a2SJan Kara pgoff_t next, index; 623a43a8c39SChen, Kenneth W int i, freed = 0; 624b5cec28dSMike Kravetz bool truncate_op = (lend == LLONG_MAX); 6251da177e4SLinus Torvalds 6261508062eSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 6271da177e4SLinus Torvalds next = start; 6281508062eSMatthew Wilcox (Oracle) while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { 6291508062eSMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(&fbatch); ++i) { 6301508062eSMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i]; 631d4241a04SMiaohe Lin u32 hash = 0; 632b5cec28dSMike Kravetz 6331508062eSMatthew Wilcox (Oracle) index = folio->index; 634d4241a04SMiaohe Lin hash = hugetlb_fault_mutex_hash(mapping, index); 635e7c58097SMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 636e7c58097SMike Kravetz 637b5cec28dSMike Kravetz /* 638c8627228SMike Kravetz * Remove folio that was part of folio_batch. 639b5cec28dSMike Kravetz */ 640c8627228SMike Kravetz if (remove_inode_single_folio(h, inode, mapping, folio, 641c8627228SMike Kravetz index, truncate_op)) 642b5cec28dSMike Kravetz freed++; 643b5cec28dSMike Kravetz 644e7c58097SMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6451da177e4SLinus Torvalds } 6461508062eSMatthew Wilcox (Oracle) folio_batch_release(&fbatch); 6471817889eSMike Kravetz cond_resched(); 6481da177e4SLinus Torvalds } 649b5cec28dSMike Kravetz 650b5cec28dSMike Kravetz if (truncate_op) 651b5cec28dSMike Kravetz (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); 6521da177e4SLinus Torvalds } 6531da177e4SLinus Torvalds 6542bbbda30SAl Viro static void hugetlbfs_evict_inode(struct inode *inode) 6551da177e4SLinus Torvalds { 6569119a41eSJoonsoo Kim struct resv_map *resv_map; 6579119a41eSJoonsoo Kim 658b5cec28dSMike Kravetz remove_inode_hugepages(inode, 0, LLONG_MAX); 659f27a5136SMike Kravetz 660f27a5136SMike Kravetz /* 661f27a5136SMike Kravetz * Get the resv_map from the address space embedded in the inode. 662f27a5136SMike Kravetz * This is the address space which points to any resv_map allocated 663f27a5136SMike Kravetz * at inode creation time. If this is a device special inode, 664f27a5136SMike Kravetz * i_mapping may not point to the original address space. 665f27a5136SMike Kravetz */ 666f27a5136SMike Kravetz resv_map = (struct resv_map *)(&inode->i_data)->private_data; 667f27a5136SMike Kravetz /* Only regular and link inodes have associated reserve maps */ 6689119a41eSJoonsoo Kim if (resv_map) 6699119a41eSJoonsoo Kim resv_map_release(&resv_map->refs); 670dbd5768fSJan Kara clear_inode(inode); 671149f4211SChristoph Hellwig } 672149f4211SChristoph Hellwig 673e5d319deSMiaohe Lin static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) 6741da177e4SLinus Torvalds { 675856fc295SHugh Dickins pgoff_t pgoff; 6761da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 677a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 6781da177e4SLinus Torvalds 679a5516438SAndi Kleen BUG_ON(offset & ~huge_page_mask(h)); 680856fc295SHugh Dickins pgoff = offset >> PAGE_SHIFT; 6811da177e4SLinus Torvalds 68287bf91d3SMike Kravetz i_size_write(inode, offset); 683188a3972SMike Kravetz i_mmap_lock_write(mapping); 684f808c13fSDavidlohr Bueso if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 68505e90bd0SPeter Xu hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, 68605e90bd0SPeter Xu ZAP_FLAG_DROP_MARKER); 687c86aa7bbSMike Kravetz i_mmap_unlock_write(mapping); 688e7c58097SMike Kravetz remove_inode_hugepages(inode, offset, LLONG_MAX); 6891da177e4SLinus Torvalds } 6901da177e4SLinus Torvalds 69168d32527SMike Kravetz static void hugetlbfs_zero_partial_page(struct hstate *h, 69268d32527SMike Kravetz struct address_space *mapping, 69368d32527SMike Kravetz loff_t start, 69468d32527SMike Kravetz loff_t end) 69568d32527SMike Kravetz { 69668d32527SMike Kravetz pgoff_t idx = start >> huge_page_shift(h); 69768d32527SMike Kravetz struct folio *folio; 69868d32527SMike Kravetz 69968d32527SMike Kravetz folio = filemap_lock_folio(mapping, idx); 70068d32527SMike Kravetz if (!folio) 70168d32527SMike Kravetz return; 70268d32527SMike Kravetz 70368d32527SMike Kravetz start = start & ~huge_page_mask(h); 70468d32527SMike Kravetz end = end & ~huge_page_mask(h); 70568d32527SMike Kravetz if (!end) 70668d32527SMike Kravetz end = huge_page_size(h); 70768d32527SMike Kravetz 70868d32527SMike Kravetz folio_zero_segment(folio, (size_t)start, (size_t)end); 70968d32527SMike Kravetz 71068d32527SMike Kravetz folio_unlock(folio); 71168d32527SMike Kravetz folio_put(folio); 71268d32527SMike Kravetz } 71368d32527SMike Kravetz 71470c3547eSMike Kravetz static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 71570c3547eSMike Kravetz { 71668d32527SMike Kravetz struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 71768d32527SMike Kravetz struct address_space *mapping = inode->i_mapping; 71870c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 71970c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 72070c3547eSMike Kravetz loff_t hole_start, hole_end; 72170c3547eSMike Kravetz 72270c3547eSMike Kravetz /* 72368d32527SMike Kravetz * hole_start and hole_end indicate the full pages within the hole. 72470c3547eSMike Kravetz */ 72570c3547eSMike Kravetz hole_start = round_up(offset, hpage_size); 72670c3547eSMike Kravetz hole_end = round_down(offset + len, hpage_size); 72770c3547eSMike Kravetz 7285955102cSAl Viro inode_lock(inode); 729ff62a342SMarc-André Lureau 730398c0da7SMiaohe Lin /* protected by i_rwsem */ 731ab3948f5SJoel Fernandes (Google) if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 732ff62a342SMarc-André Lureau inode_unlock(inode); 733ff62a342SMarc-André Lureau return -EPERM; 734ff62a342SMarc-André Lureau } 735ff62a342SMarc-André Lureau 73670c3547eSMike Kravetz i_mmap_lock_write(mapping); 73768d32527SMike Kravetz 73868d32527SMike Kravetz /* If range starts before first full page, zero partial page. */ 73968d32527SMike Kravetz if (offset < hole_start) 74068d32527SMike Kravetz hugetlbfs_zero_partial_page(h, mapping, 74168d32527SMike Kravetz offset, min(offset + len, hole_start)); 74268d32527SMike Kravetz 74368d32527SMike Kravetz /* Unmap users of full pages in the hole. */ 74468d32527SMike Kravetz if (hole_end > hole_start) { 745f808c13fSDavidlohr Bueso if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 74670c3547eSMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, 74770c3547eSMike Kravetz hole_start >> PAGE_SHIFT, 74805e90bd0SPeter Xu hole_end >> PAGE_SHIFT, 0); 74970c3547eSMike Kravetz } 75070c3547eSMike Kravetz 75168d32527SMike Kravetz /* If range extends beyond last full page, zero partial page. */ 75268d32527SMike Kravetz if ((offset + len) > hole_end && (offset + len) > hole_start) 75368d32527SMike Kravetz hugetlbfs_zero_partial_page(h, mapping, 75468d32527SMike Kravetz hole_end, offset + len); 75568d32527SMike Kravetz 75668d32527SMike Kravetz i_mmap_unlock_write(mapping); 75768d32527SMike Kravetz 75868d32527SMike Kravetz /* Remove full pages from the file. */ 75968d32527SMike Kravetz if (hole_end > hole_start) 76068d32527SMike Kravetz remove_inode_hugepages(inode, hole_start, hole_end); 76168d32527SMike Kravetz 76268d32527SMike Kravetz inode_unlock(inode); 76368d32527SMike Kravetz 76470c3547eSMike Kravetz return 0; 76570c3547eSMike Kravetz } 76670c3547eSMike Kravetz 76770c3547eSMike Kravetz static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, 76870c3547eSMike Kravetz loff_t len) 76970c3547eSMike Kravetz { 77070c3547eSMike Kravetz struct inode *inode = file_inode(file); 771ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 77270c3547eSMike Kravetz struct address_space *mapping = inode->i_mapping; 77370c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 77470c3547eSMike Kravetz struct vm_area_struct pseudo_vma; 77570c3547eSMike Kravetz struct mm_struct *mm = current->mm; 77670c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 77770c3547eSMike Kravetz unsigned long hpage_shift = huge_page_shift(h); 77870c3547eSMike Kravetz pgoff_t start, index, end; 77970c3547eSMike Kravetz int error; 78070c3547eSMike Kravetz u32 hash; 78170c3547eSMike Kravetz 78270c3547eSMike Kravetz if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 78370c3547eSMike Kravetz return -EOPNOTSUPP; 78470c3547eSMike Kravetz 78570c3547eSMike Kravetz if (mode & FALLOC_FL_PUNCH_HOLE) 78670c3547eSMike Kravetz return hugetlbfs_punch_hole(inode, offset, len); 78770c3547eSMike Kravetz 78870c3547eSMike Kravetz /* 78970c3547eSMike Kravetz * Default preallocate case. 79070c3547eSMike Kravetz * For this range, start is rounded down and end is rounded up 79170c3547eSMike Kravetz * as well as being converted to page offsets. 79270c3547eSMike Kravetz */ 79370c3547eSMike Kravetz start = offset >> hpage_shift; 79470c3547eSMike Kravetz end = (offset + len + hpage_size - 1) >> hpage_shift; 79570c3547eSMike Kravetz 7965955102cSAl Viro inode_lock(inode); 79770c3547eSMike Kravetz 79870c3547eSMike Kravetz /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 79970c3547eSMike Kravetz error = inode_newsize_ok(inode, offset + len); 80070c3547eSMike Kravetz if (error) 80170c3547eSMike Kravetz goto out; 80270c3547eSMike Kravetz 803ff62a342SMarc-André Lureau if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 804ff62a342SMarc-André Lureau error = -EPERM; 805ff62a342SMarc-André Lureau goto out; 806ff62a342SMarc-André Lureau } 807ff62a342SMarc-André Lureau 80870c3547eSMike Kravetz /* 80970c3547eSMike Kravetz * Initialize a pseudo vma as this is required by the huge page 81070c3547eSMike Kravetz * allocation routines. If NUMA is configured, use page index 81170c3547eSMike Kravetz * as input to create an allocation policy. 81270c3547eSMike Kravetz */ 8132c4541e2SKirill A. Shutemov vma_init(&pseudo_vma, mm); 81470c3547eSMike Kravetz pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 81570c3547eSMike Kravetz pseudo_vma.vm_file = file; 81670c3547eSMike Kravetz 81770c3547eSMike Kravetz for (index = start; index < end; index++) { 81870c3547eSMike Kravetz /* 81970c3547eSMike Kravetz * This is supposed to be the vaddr where the page is being 82070c3547eSMike Kravetz * faulted in, but we have no vaddr here. 82170c3547eSMike Kravetz */ 82270c3547eSMike Kravetz struct page *page; 82370c3547eSMike Kravetz unsigned long addr; 82470c3547eSMike Kravetz 82570c3547eSMike Kravetz cond_resched(); 82670c3547eSMike Kravetz 82770c3547eSMike Kravetz /* 82870c3547eSMike Kravetz * fallocate(2) manpage permits EINTR; we may have been 82970c3547eSMike Kravetz * interrupted because we are using up too much memory. 83070c3547eSMike Kravetz */ 83170c3547eSMike Kravetz if (signal_pending(current)) { 83270c3547eSMike Kravetz error = -EINTR; 83370c3547eSMike Kravetz break; 83470c3547eSMike Kravetz } 83570c3547eSMike Kravetz 83670c3547eSMike Kravetz /* Set numa allocation policy based on index */ 83770c3547eSMike Kravetz hugetlb_set_vma_policy(&pseudo_vma, inode, index); 83870c3547eSMike Kravetz 83970c3547eSMike Kravetz /* addr is the offset within the file (zero based) */ 84070c3547eSMike Kravetz addr = index * hpage_size; 84170c3547eSMike Kravetz 842188a3972SMike Kravetz /* mutex taken here, fault path and hole punch */ 843188b04a7SWei Yang hash = hugetlb_fault_mutex_hash(mapping, index); 84470c3547eSMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 84570c3547eSMike Kravetz 84670c3547eSMike Kravetz /* See if already present in mapping to avoid alloc/free */ 84770c3547eSMike Kravetz page = find_get_page(mapping, index); 84870c3547eSMike Kravetz if (page) { 84970c3547eSMike Kravetz put_page(page); 85070c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 85170c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 85270c3547eSMike Kravetz continue; 85370c3547eSMike Kravetz } 85470c3547eSMike Kravetz 85588ce3fefSMiaohe Lin /* 85688ce3fefSMiaohe Lin * Allocate page without setting the avoid_reserve argument. 85788ce3fefSMiaohe Lin * There certainly are no reserves associated with the 85888ce3fefSMiaohe Lin * pseudo_vma. However, there could be shared mappings with 85988ce3fefSMiaohe Lin * reserves for the file at the inode level. If we fallocate 86088ce3fefSMiaohe Lin * pages in these areas, we need to consume the reserves 86188ce3fefSMiaohe Lin * to keep reservation accounting consistent. 86288ce3fefSMiaohe Lin */ 86388ce3fefSMiaohe Lin page = alloc_huge_page(&pseudo_vma, addr, 0); 86470c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 86570c3547eSMike Kravetz if (IS_ERR(page)) { 86670c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 86770c3547eSMike Kravetz error = PTR_ERR(page); 86870c3547eSMike Kravetz goto out; 86970c3547eSMike Kravetz } 87070c3547eSMike Kravetz clear_huge_page(page, addr, pages_per_huge_page(h)); 87170c3547eSMike Kravetz __SetPageUptodate(page); 8727e1813d4SMike Kravetz error = hugetlb_add_to_page_cache(page, mapping, index); 87370c3547eSMike Kravetz if (unlikely(error)) { 874846be085SMike Kravetz restore_reserve_on_error(h, &pseudo_vma, addr, page); 87570c3547eSMike Kravetz put_page(page); 87670c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 87770c3547eSMike Kravetz goto out; 87870c3547eSMike Kravetz } 87970c3547eSMike Kravetz 88070c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 88170c3547eSMike Kravetz 8828f251a3dSMike Kravetz SetHPageMigratable(page); 88370c3547eSMike Kravetz /* 8847e1813d4SMike Kravetz * unlock_page because locked by hugetlb_add_to_page_cache() 885585fc0d2SMuchun Song * put_page() due to reference from alloc_huge_page() 88670c3547eSMike Kravetz */ 88770c3547eSMike Kravetz unlock_page(page); 88872639e6dSNadav Amit put_page(page); 88970c3547eSMike Kravetz } 89070c3547eSMike Kravetz 89170c3547eSMike Kravetz if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 89270c3547eSMike Kravetz i_size_write(inode, offset + len); 893078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 89470c3547eSMike Kravetz out: 8955955102cSAl Viro inode_unlock(inode); 89670c3547eSMike Kravetz return error; 89770c3547eSMike Kravetz } 89870c3547eSMike Kravetz 899549c7297SChristian Brauner static int hugetlbfs_setattr(struct user_namespace *mnt_userns, 900549c7297SChristian Brauner struct dentry *dentry, struct iattr *attr) 9011da177e4SLinus Torvalds { 9022b0143b5SDavid Howells struct inode *inode = d_inode(dentry); 903a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 9041da177e4SLinus Torvalds int error; 9051da177e4SLinus Torvalds unsigned int ia_valid = attr->ia_valid; 906ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 9071da177e4SLinus Torvalds 9082f221d6fSChristian Brauner error = setattr_prepare(&init_user_ns, dentry, attr); 9091da177e4SLinus Torvalds if (error) 9101025774cSChristoph Hellwig return error; 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds if (ia_valid & ATTR_SIZE) { 913ff62a342SMarc-André Lureau loff_t oldsize = inode->i_size; 914ff62a342SMarc-André Lureau loff_t newsize = attr->ia_size; 915ff62a342SMarc-André Lureau 916ff62a342SMarc-André Lureau if (newsize & ~huge_page_mask(h)) 9171025774cSChristoph Hellwig return -EINVAL; 918398c0da7SMiaohe Lin /* protected by i_rwsem */ 919ff62a342SMarc-André Lureau if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 920ff62a342SMarc-André Lureau (newsize > oldsize && (info->seals & F_SEAL_GROW))) 921ff62a342SMarc-André Lureau return -EPERM; 922e5d319deSMiaohe Lin hugetlb_vmtruncate(inode, newsize); 9231da177e4SLinus Torvalds } 9241da177e4SLinus Torvalds 9252f221d6fSChristian Brauner setattr_copy(&init_user_ns, inode, attr); 9261025774cSChristoph Hellwig mark_inode_dirty(inode); 9271025774cSChristoph Hellwig return 0; 9281025774cSChristoph Hellwig } 9291025774cSChristoph Hellwig 9307d54fa64SAl Viro static struct inode *hugetlbfs_get_root(struct super_block *sb, 93132021982SDavid Howells struct hugetlbfs_fs_context *ctx) 9321da177e4SLinus Torvalds { 9331da177e4SLinus Torvalds struct inode *inode; 9341da177e4SLinus Torvalds 9351da177e4SLinus Torvalds inode = new_inode(sb); 9361da177e4SLinus Torvalds if (inode) { 93785fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 93832021982SDavid Howells inode->i_mode = S_IFDIR | ctx->mode; 93932021982SDavid Howells inode->i_uid = ctx->uid; 94032021982SDavid Howells inode->i_gid = ctx->gid; 941078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 9427d54fa64SAl Viro inode->i_op = &hugetlbfs_dir_inode_operations; 9437d54fa64SAl Viro inode->i_fop = &simple_dir_operations; 9447d54fa64SAl Viro /* directory inodes start off with i_nlink == 2 (for "." entry) */ 9457d54fa64SAl Viro inc_nlink(inode); 94665ed7601SAneesh Kumar K.V lockdep_annotate_inode_mutex_key(inode); 9477d54fa64SAl Viro } 9487d54fa64SAl Viro return inode; 9497d54fa64SAl Viro } 9507d54fa64SAl Viro 951b610ded7SMichal Hocko /* 952c8c06efaSDavidlohr Bueso * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never 953b610ded7SMichal Hocko * be taken from reclaim -- unlike regular filesystems. This needs an 95488f306b6SKirill A. Shutemov * annotation because huge_pmd_share() does an allocation under hugetlb's 955c8c06efaSDavidlohr Bueso * i_mmap_rwsem. 956b610ded7SMichal Hocko */ 957c8c06efaSDavidlohr Bueso static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; 958b610ded7SMichal Hocko 9597d54fa64SAl Viro static struct inode *hugetlbfs_get_inode(struct super_block *sb, 9607d54fa64SAl Viro struct inode *dir, 96118df2252SAl Viro umode_t mode, dev_t dev) 9627d54fa64SAl Viro { 9637d54fa64SAl Viro struct inode *inode; 96458b6e5e8SMike Kravetz struct resv_map *resv_map = NULL; 9659119a41eSJoonsoo Kim 96658b6e5e8SMike Kravetz /* 96758b6e5e8SMike Kravetz * Reserve maps are only needed for inodes that can have associated 96858b6e5e8SMike Kravetz * page allocations. 96958b6e5e8SMike Kravetz */ 97058b6e5e8SMike Kravetz if (S_ISREG(mode) || S_ISLNK(mode)) { 9719119a41eSJoonsoo Kim resv_map = resv_map_alloc(); 9729119a41eSJoonsoo Kim if (!resv_map) 9739119a41eSJoonsoo Kim return NULL; 97458b6e5e8SMike Kravetz } 9757d54fa64SAl Viro 9767d54fa64SAl Viro inode = new_inode(sb); 9777d54fa64SAl Viro if (inode) { 978ff62a342SMarc-André Lureau struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 979ff62a342SMarc-André Lureau 9807d54fa64SAl Viro inode->i_ino = get_next_ino(); 98121cb47beSChristian Brauner inode_init_owner(&init_user_ns, inode, dir, mode); 982c8c06efaSDavidlohr Bueso lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 983c8c06efaSDavidlohr Bueso &hugetlbfs_i_mmap_rwsem_key); 9841da177e4SLinus Torvalds inode->i_mapping->a_ops = &hugetlbfs_aops; 985078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 9869119a41eSJoonsoo Kim inode->i_mapping->private_data = resv_map; 987ff62a342SMarc-André Lureau info->seals = F_SEAL_SEAL; 9881da177e4SLinus Torvalds switch (mode & S_IFMT) { 9891da177e4SLinus Torvalds default: 9901da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 9911da177e4SLinus Torvalds break; 9921da177e4SLinus Torvalds case S_IFREG: 9931da177e4SLinus Torvalds inode->i_op = &hugetlbfs_inode_operations; 9941da177e4SLinus Torvalds inode->i_fop = &hugetlbfs_file_operations; 9951da177e4SLinus Torvalds break; 9961da177e4SLinus Torvalds case S_IFDIR: 9971da177e4SLinus Torvalds inode->i_op = &hugetlbfs_dir_inode_operations; 9981da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 9991da177e4SLinus Torvalds 10001da177e4SLinus Torvalds /* directory inodes start off with i_nlink == 2 (for "." entry) */ 1001d8c76e6fSDave Hansen inc_nlink(inode); 10021da177e4SLinus Torvalds break; 10031da177e4SLinus Torvalds case S_IFLNK: 10041da177e4SLinus Torvalds inode->i_op = &page_symlink_inode_operations; 100521fc61c7SAl Viro inode_nohighmem(inode); 10061da177e4SLinus Torvalds break; 10071da177e4SLinus Torvalds } 1008e096d0c7SJosh Boyer lockdep_annotate_inode_mutex_key(inode); 100958b6e5e8SMike Kravetz } else { 101058b6e5e8SMike Kravetz if (resv_map) 10119119a41eSJoonsoo Kim kref_put(&resv_map->refs, resv_map_release); 101258b6e5e8SMike Kravetz } 10139119a41eSJoonsoo Kim 10141da177e4SLinus Torvalds return inode; 10151da177e4SLinus Torvalds } 10161da177e4SLinus Torvalds 10171da177e4SLinus Torvalds /* 10181da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 10191da177e4SLinus Torvalds */ 1020549c7297SChristian Brauner static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir, 10211ab5b82fSPiotr Sarna struct dentry *dentry, umode_t mode, dev_t dev) 10221ab5b82fSPiotr Sarna { 102319ee5345SAl Viro struct inode *inode; 102419ee5345SAl Viro 102519ee5345SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); 102619ee5345SAl Viro if (!inode) 102719ee5345SAl Viro return -ENOSPC; 102819ee5345SAl Viro dir->i_ctime = dir->i_mtime = current_time(dir); 102919ee5345SAl Viro d_instantiate(dentry, inode); 103019ee5345SAl Viro dget(dentry);/* Extra count - pin the dentry in core */ 103119ee5345SAl Viro return 0; 10321ab5b82fSPiotr Sarna } 10331ab5b82fSPiotr Sarna 1034549c7297SChristian Brauner static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 1035549c7297SChristian Brauner struct dentry *dentry, umode_t mode) 10361da177e4SLinus Torvalds { 1037549c7297SChristian Brauner int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry, 1038549c7297SChristian Brauner mode | S_IFDIR, 0); 10391da177e4SLinus Torvalds if (!retval) 1040d8c76e6fSDave Hansen inc_nlink(dir); 10411da177e4SLinus Torvalds return retval; 10421da177e4SLinus Torvalds } 10431da177e4SLinus Torvalds 1044549c7297SChristian Brauner static int hugetlbfs_create(struct user_namespace *mnt_userns, 1045549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 1046549c7297SChristian Brauner umode_t mode, bool excl) 10471da177e4SLinus Torvalds { 1048549c7297SChristian Brauner return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); 10491da177e4SLinus Torvalds } 10501da177e4SLinus Torvalds 1051549c7297SChristian Brauner static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns, 1052863f144fSMiklos Szeredi struct inode *dir, struct file *file, 1053549c7297SChristian Brauner umode_t mode) 10541ab5b82fSPiotr Sarna { 105519ee5345SAl Viro struct inode *inode; 105619ee5345SAl Viro 105719ee5345SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, mode | S_IFREG, 0); 105819ee5345SAl Viro if (!inode) 105919ee5345SAl Viro return -ENOSPC; 106019ee5345SAl Viro dir->i_ctime = dir->i_mtime = current_time(dir); 1061863f144fSMiklos Szeredi d_tmpfile(file, inode); 1062863f144fSMiklos Szeredi return finish_open_simple(file, 0); 10631ab5b82fSPiotr Sarna } 10641ab5b82fSPiotr Sarna 1065549c7297SChristian Brauner static int hugetlbfs_symlink(struct user_namespace *mnt_userns, 1066549c7297SChristian Brauner struct inode *dir, struct dentry *dentry, 1067549c7297SChristian Brauner const char *symname) 10681da177e4SLinus Torvalds { 10691da177e4SLinus Torvalds struct inode *inode; 10701da177e4SLinus Torvalds int error = -ENOSPC; 10711da177e4SLinus Torvalds 10727d54fa64SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); 10731da177e4SLinus Torvalds if (inode) { 10741da177e4SLinus Torvalds int l = strlen(symname)+1; 10751da177e4SLinus Torvalds error = page_symlink(inode, symname, l); 10761da177e4SLinus Torvalds if (!error) { 10771da177e4SLinus Torvalds d_instantiate(dentry, inode); 10781da177e4SLinus Torvalds dget(dentry); 10791da177e4SLinus Torvalds } else 10801da177e4SLinus Torvalds iput(inode); 10811da177e4SLinus Torvalds } 1082078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 10831da177e4SLinus Torvalds 10841da177e4SLinus Torvalds return error; 10851da177e4SLinus Torvalds } 10861da177e4SLinus Torvalds 1087b890ec2aSMatthew Wilcox (Oracle) #ifdef CONFIG_MIGRATION 1088b890ec2aSMatthew Wilcox (Oracle) static int hugetlbfs_migrate_folio(struct address_space *mapping, 1089b890ec2aSMatthew Wilcox (Oracle) struct folio *dst, struct folio *src, 1090a6bc32b8SMel Gorman enum migrate_mode mode) 1091290408d4SNaoya Horiguchi { 1092290408d4SNaoya Horiguchi int rc; 1093290408d4SNaoya Horiguchi 1094b890ec2aSMatthew Wilcox (Oracle) rc = migrate_huge_page_move_mapping(mapping, dst, src); 109578bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 1096290408d4SNaoya Horiguchi return rc; 1097cb6acd01SMike Kravetz 1098149562f7SSidhartha Kumar if (hugetlb_folio_subpool(src)) { 1099149562f7SSidhartha Kumar hugetlb_set_folio_subpool(dst, 1100149562f7SSidhartha Kumar hugetlb_folio_subpool(src)); 1101149562f7SSidhartha Kumar hugetlb_set_folio_subpool(src, NULL); 1102cb6acd01SMike Kravetz } 1103cb6acd01SMike Kravetz 11042916ecc0SJérôme Glisse if (mode != MIGRATE_SYNC_NO_COPY) 1105b890ec2aSMatthew Wilcox (Oracle) folio_migrate_copy(dst, src); 11062916ecc0SJérôme Glisse else 1107b890ec2aSMatthew Wilcox (Oracle) folio_migrate_flags(dst, src); 1108290408d4SNaoya Horiguchi 110978bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 1110290408d4SNaoya Horiguchi } 1111b890ec2aSMatthew Wilcox (Oracle) #else 1112b890ec2aSMatthew Wilcox (Oracle) #define hugetlbfs_migrate_folio NULL 1113b890ec2aSMatthew Wilcox (Oracle) #endif 1114290408d4SNaoya Horiguchi 111578bb9203SNaoya Horiguchi static int hugetlbfs_error_remove_page(struct address_space *mapping, 111678bb9203SNaoya Horiguchi struct page *page) 111778bb9203SNaoya Horiguchi { 111878bb9203SNaoya Horiguchi return 0; 111978bb9203SNaoya Horiguchi } 112078bb9203SNaoya Horiguchi 11214a25220dSDavid Howells /* 11224a25220dSDavid Howells * Display the mount options in /proc/mounts. 11234a25220dSDavid Howells */ 11244a25220dSDavid Howells static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) 11254a25220dSDavid Howells { 11264a25220dSDavid Howells struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); 11274a25220dSDavid Howells struct hugepage_subpool *spool = sbinfo->spool; 11284a25220dSDavid Howells unsigned long hpage_size = huge_page_size(sbinfo->hstate); 11294a25220dSDavid Howells unsigned hpage_shift = huge_page_shift(sbinfo->hstate); 11304a25220dSDavid Howells char mod; 11314a25220dSDavid Howells 11324a25220dSDavid Howells if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 11334a25220dSDavid Howells seq_printf(m, ",uid=%u", 11344a25220dSDavid Howells from_kuid_munged(&init_user_ns, sbinfo->uid)); 11354a25220dSDavid Howells if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 11364a25220dSDavid Howells seq_printf(m, ",gid=%u", 11374a25220dSDavid Howells from_kgid_munged(&init_user_ns, sbinfo->gid)); 11384a25220dSDavid Howells if (sbinfo->mode != 0755) 11394a25220dSDavid Howells seq_printf(m, ",mode=%o", sbinfo->mode); 11404a25220dSDavid Howells if (sbinfo->max_inodes != -1) 11414a25220dSDavid Howells seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); 11424a25220dSDavid Howells 11434a25220dSDavid Howells hpage_size /= 1024; 11444a25220dSDavid Howells mod = 'K'; 11454a25220dSDavid Howells if (hpage_size >= 1024) { 11464a25220dSDavid Howells hpage_size /= 1024; 11474a25220dSDavid Howells mod = 'M'; 11484a25220dSDavid Howells } 11494a25220dSDavid Howells seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); 11504a25220dSDavid Howells if (spool) { 11514a25220dSDavid Howells if (spool->max_hpages != -1) 11524a25220dSDavid Howells seq_printf(m, ",size=%llu", 11534a25220dSDavid Howells (unsigned long long)spool->max_hpages << hpage_shift); 11544a25220dSDavid Howells if (spool->min_hpages != -1) 11554a25220dSDavid Howells seq_printf(m, ",min_size=%llu", 11564a25220dSDavid Howells (unsigned long long)spool->min_hpages << hpage_shift); 11574a25220dSDavid Howells } 11584a25220dSDavid Howells return 0; 11594a25220dSDavid Howells } 11604a25220dSDavid Howells 1161726c3342SDavid Howells static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 11621da177e4SLinus Torvalds { 1163726c3342SDavid Howells struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 11642b0143b5SDavid Howells struct hstate *h = hstate_inode(d_inode(dentry)); 11651da177e4SLinus Torvalds 11661da177e4SLinus Torvalds buf->f_type = HUGETLBFS_MAGIC; 1167a5516438SAndi Kleen buf->f_bsize = huge_page_size(h); 11681da177e4SLinus Torvalds if (sbinfo) { 11691da177e4SLinus Torvalds spin_lock(&sbinfo->stat_lock); 117011680763SMiaohe Lin /* If no limits set, just report 0 or -1 for max/free/used 117174a8a65cSDavid Gibson * blocks, like simple_statfs() */ 117290481622SDavid Gibson if (sbinfo->spool) { 117390481622SDavid Gibson long free_pages; 117490481622SDavid Gibson 11754b25f030SMina Almasry spin_lock_irq(&sbinfo->spool->lock); 117690481622SDavid Gibson buf->f_blocks = sbinfo->spool->max_hpages; 117790481622SDavid Gibson free_pages = sbinfo->spool->max_hpages 117890481622SDavid Gibson - sbinfo->spool->used_hpages; 117990481622SDavid Gibson buf->f_bavail = buf->f_bfree = free_pages; 11804b25f030SMina Almasry spin_unlock_irq(&sbinfo->spool->lock); 11811da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 11821da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 118374a8a65cSDavid Gibson } 11841da177e4SLinus Torvalds spin_unlock(&sbinfo->stat_lock); 11851da177e4SLinus Torvalds } 11861da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 11871da177e4SLinus Torvalds return 0; 11881da177e4SLinus Torvalds } 11891da177e4SLinus Torvalds 11901da177e4SLinus Torvalds static void hugetlbfs_put_super(struct super_block *sb) 11911da177e4SLinus Torvalds { 11921da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); 11931da177e4SLinus Torvalds 11941da177e4SLinus Torvalds if (sbi) { 11951da177e4SLinus Torvalds sb->s_fs_info = NULL; 119690481622SDavid Gibson 119790481622SDavid Gibson if (sbi->spool) 119890481622SDavid Gibson hugepage_put_subpool(sbi->spool); 119990481622SDavid Gibson 12001da177e4SLinus Torvalds kfree(sbi); 12011da177e4SLinus Torvalds } 12021da177e4SLinus Torvalds } 12031da177e4SLinus Torvalds 120496527980SChristoph Hellwig static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) 120596527980SChristoph Hellwig { 120696527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 120796527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 120896527980SChristoph Hellwig if (unlikely(!sbinfo->free_inodes)) { 120996527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 121096527980SChristoph Hellwig return 0; 121196527980SChristoph Hellwig } 121296527980SChristoph Hellwig sbinfo->free_inodes--; 121396527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 121496527980SChristoph Hellwig } 121596527980SChristoph Hellwig 121696527980SChristoph Hellwig return 1; 121796527980SChristoph Hellwig } 121896527980SChristoph Hellwig 121996527980SChristoph Hellwig static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) 122096527980SChristoph Hellwig { 122196527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 122296527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 122396527980SChristoph Hellwig sbinfo->free_inodes++; 122496527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 122596527980SChristoph Hellwig } 122696527980SChristoph Hellwig } 122796527980SChristoph Hellwig 122896527980SChristoph Hellwig 1229e18b890bSChristoph Lameter static struct kmem_cache *hugetlbfs_inode_cachep; 12301da177e4SLinus Torvalds 12311da177e4SLinus Torvalds static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 12321da177e4SLinus Torvalds { 123396527980SChristoph Hellwig struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); 12341da177e4SLinus Torvalds struct hugetlbfs_inode_info *p; 12351da177e4SLinus Torvalds 123696527980SChristoph Hellwig if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 12371da177e4SLinus Torvalds return NULL; 1238fd60b288SMuchun Song p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); 123996527980SChristoph Hellwig if (unlikely(!p)) { 124096527980SChristoph Hellwig hugetlbfs_inc_free_inodes(sbinfo); 124196527980SChristoph Hellwig return NULL; 12421da177e4SLinus Torvalds } 12434742a35dSMike Kravetz 12444742a35dSMike Kravetz /* 12454742a35dSMike Kravetz * Any time after allocation, hugetlbfs_destroy_inode can be called 12464742a35dSMike Kravetz * for the inode. mpol_free_shared_policy is unconditionally called 12474742a35dSMike Kravetz * as part of hugetlbfs_destroy_inode. So, initialize policy here 12484742a35dSMike Kravetz * in case of a quick call to destroy. 12494742a35dSMike Kravetz * 12504742a35dSMike Kravetz * Note that the policy is initialized even if we are creating a 12514742a35dSMike Kravetz * private inode. This simplifies hugetlbfs_destroy_inode. 12524742a35dSMike Kravetz */ 12534742a35dSMike Kravetz mpol_shared_policy_init(&p->policy, NULL); 12544742a35dSMike Kravetz 125596527980SChristoph Hellwig return &p->vfs_inode; 12561da177e4SLinus Torvalds } 12571da177e4SLinus Torvalds 1258b62de322SAl Viro static void hugetlbfs_free_inode(struct inode *inode) 1259fa0d7e3dSNick Piggin { 1260fa0d7e3dSNick Piggin kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 1261fa0d7e3dSNick Piggin } 1262fa0d7e3dSNick Piggin 12631da177e4SLinus Torvalds static void hugetlbfs_destroy_inode(struct inode *inode) 12641da177e4SLinus Torvalds { 126596527980SChristoph Hellwig hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 12661da177e4SLinus Torvalds mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 12671da177e4SLinus Torvalds } 12681da177e4SLinus Torvalds 1269f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops = { 1270800d15a5SNick Piggin .write_begin = hugetlbfs_write_begin, 1271800d15a5SNick Piggin .write_end = hugetlbfs_write_end, 127246de8b97SMatthew Wilcox (Oracle) .dirty_folio = noop_dirty_folio, 1273b890ec2aSMatthew Wilcox (Oracle) .migrate_folio = hugetlbfs_migrate_folio, 127478bb9203SNaoya Horiguchi .error_remove_page = hugetlbfs_error_remove_page, 12751da177e4SLinus Torvalds }; 12761da177e4SLinus Torvalds 127796527980SChristoph Hellwig 127851cc5068SAlexey Dobriyan static void init_once(void *foo) 127996527980SChristoph Hellwig { 1280dbaf7dc9SLi zeming struct hugetlbfs_inode_info *ei = foo; 128196527980SChristoph Hellwig 128296527980SChristoph Hellwig inode_init_once(&ei->vfs_inode); 128396527980SChristoph Hellwig } 128496527980SChristoph Hellwig 12854b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations = { 128634d0640eSAl Viro .read_iter = hugetlbfs_read_iter, 12871da177e4SLinus Torvalds .mmap = hugetlbfs_file_mmap, 12881b061d92SChristoph Hellwig .fsync = noop_fsync, 12891da177e4SLinus Torvalds .get_unmapped_area = hugetlb_get_unmapped_area, 12906038f373SArnd Bergmann .llseek = default_llseek, 129170c3547eSMike Kravetz .fallocate = hugetlbfs_fallocate, 12921da177e4SLinus Torvalds }; 12931da177e4SLinus Torvalds 129492e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations = { 12951da177e4SLinus Torvalds .create = hugetlbfs_create, 12961da177e4SLinus Torvalds .lookup = simple_lookup, 12971da177e4SLinus Torvalds .link = simple_link, 12981da177e4SLinus Torvalds .unlink = simple_unlink, 12991da177e4SLinus Torvalds .symlink = hugetlbfs_symlink, 13001da177e4SLinus Torvalds .mkdir = hugetlbfs_mkdir, 13011da177e4SLinus Torvalds .rmdir = simple_rmdir, 13021da177e4SLinus Torvalds .mknod = hugetlbfs_mknod, 13031da177e4SLinus Torvalds .rename = simple_rename, 13041da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 13051ab5b82fSPiotr Sarna .tmpfile = hugetlbfs_tmpfile, 13061da177e4SLinus Torvalds }; 13071da177e4SLinus Torvalds 130892e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations = { 13091da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 13101da177e4SLinus Torvalds }; 13111da177e4SLinus Torvalds 1312ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops = { 13131da177e4SLinus Torvalds .alloc_inode = hugetlbfs_alloc_inode, 1314b62de322SAl Viro .free_inode = hugetlbfs_free_inode, 13151da177e4SLinus Torvalds .destroy_inode = hugetlbfs_destroy_inode, 13162bbbda30SAl Viro .evict_inode = hugetlbfs_evict_inode, 13171da177e4SLinus Torvalds .statfs = hugetlbfs_statfs, 13181da177e4SLinus Torvalds .put_super = hugetlbfs_put_super, 13194a25220dSDavid Howells .show_options = hugetlbfs_show_options, 13201da177e4SLinus Torvalds }; 13211da177e4SLinus Torvalds 13227ca02d0aSMike Kravetz /* 13237ca02d0aSMike Kravetz * Convert size option passed from command line to number of huge pages 13247ca02d0aSMike Kravetz * in the pool specified by hstate. Size option could be in bytes 13257ca02d0aSMike Kravetz * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). 13267ca02d0aSMike Kravetz */ 13274a25220dSDavid Howells static long 13287ca02d0aSMike Kravetz hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, 13294a25220dSDavid Howells enum hugetlbfs_size_type val_type) 13307ca02d0aSMike Kravetz { 13317ca02d0aSMike Kravetz if (val_type == NO_SIZE) 13327ca02d0aSMike Kravetz return -1; 13337ca02d0aSMike Kravetz 13347ca02d0aSMike Kravetz if (val_type == SIZE_PERCENT) { 13357ca02d0aSMike Kravetz size_opt <<= huge_page_shift(h); 13367ca02d0aSMike Kravetz size_opt *= h->max_huge_pages; 13377ca02d0aSMike Kravetz do_div(size_opt, 100); 13387ca02d0aSMike Kravetz } 13397ca02d0aSMike Kravetz 13407ca02d0aSMike Kravetz size_opt >>= huge_page_shift(h); 13417ca02d0aSMike Kravetz return size_opt; 13427ca02d0aSMike Kravetz } 13437ca02d0aSMike Kravetz 134432021982SDavid Howells /* 134532021982SDavid Howells * Parse one mount parameter. 134632021982SDavid Howells */ 134732021982SDavid Howells static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) 13481da177e4SLinus Torvalds { 134932021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 135032021982SDavid Howells struct fs_parse_result result; 135132021982SDavid Howells char *rest; 135232021982SDavid Howells unsigned long ps; 135332021982SDavid Howells int opt; 13541da177e4SLinus Torvalds 1355d7167b14SAl Viro opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); 135632021982SDavid Howells if (opt < 0) 135732021982SDavid Howells return opt; 135832021982SDavid Howells 135932021982SDavid Howells switch (opt) { 136032021982SDavid Howells case Opt_uid: 136132021982SDavid Howells ctx->uid = make_kuid(current_user_ns(), result.uint_32); 136232021982SDavid Howells if (!uid_valid(ctx->uid)) 136332021982SDavid Howells goto bad_val; 13641da177e4SLinus Torvalds return 0; 13651da177e4SLinus Torvalds 1366e73a75faSRandy Dunlap case Opt_gid: 136732021982SDavid Howells ctx->gid = make_kgid(current_user_ns(), result.uint_32); 136832021982SDavid Howells if (!gid_valid(ctx->gid)) 1369e73a75faSRandy Dunlap goto bad_val; 137032021982SDavid Howells return 0; 1371e73a75faSRandy Dunlap 1372e73a75faSRandy Dunlap case Opt_mode: 137332021982SDavid Howells ctx->mode = result.uint_32 & 01777U; 137432021982SDavid Howells return 0; 1375e73a75faSRandy Dunlap 137632021982SDavid Howells case Opt_size: 1377e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 137826215b7eSHawkins Jiawei if (!param->string || !isdigit(param->string[0])) 1379e73a75faSRandy Dunlap goto bad_val; 138032021982SDavid Howells ctx->max_size_opt = memparse(param->string, &rest); 138132021982SDavid Howells ctx->max_val_type = SIZE_STD; 1382a137e1ccSAndi Kleen if (*rest == '%') 138332021982SDavid Howells ctx->max_val_type = SIZE_PERCENT; 138432021982SDavid Howells return 0; 13851da177e4SLinus Torvalds 1386e73a75faSRandy Dunlap case Opt_nr_inodes: 1387e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 138826215b7eSHawkins Jiawei if (!param->string || !isdigit(param->string[0])) 1389e73a75faSRandy Dunlap goto bad_val; 139032021982SDavid Howells ctx->nr_inodes = memparse(param->string, &rest); 139132021982SDavid Howells return 0; 1392e73a75faSRandy Dunlap 139332021982SDavid Howells case Opt_pagesize: 139432021982SDavid Howells ps = memparse(param->string, &rest); 139532021982SDavid Howells ctx->hstate = size_to_hstate(ps); 139632021982SDavid Howells if (!ctx->hstate) { 1397d0036517SMiaohe Lin pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); 1398a137e1ccSAndi Kleen return -EINVAL; 1399a137e1ccSAndi Kleen } 140032021982SDavid Howells return 0; 1401a137e1ccSAndi Kleen 140232021982SDavid Howells case Opt_min_size: 14037ca02d0aSMike Kravetz /* memparse() will accept a K/M/G without a digit */ 140426215b7eSHawkins Jiawei if (!param->string || !isdigit(param->string[0])) 14057ca02d0aSMike Kravetz goto bad_val; 140632021982SDavid Howells ctx->min_size_opt = memparse(param->string, &rest); 140732021982SDavid Howells ctx->min_val_type = SIZE_STD; 14087ca02d0aSMike Kravetz if (*rest == '%') 140932021982SDavid Howells ctx->min_val_type = SIZE_PERCENT; 141032021982SDavid Howells return 0; 14117ca02d0aSMike Kravetz 1412e73a75faSRandy Dunlap default: 1413b4c07bceSLee Schermerhorn return -EINVAL; 1414e73a75faSRandy Dunlap } 141532021982SDavid Howells 141632021982SDavid Howells bad_val: 1417b5db30cfSAl Viro return invalfc(fc, "Bad value '%s' for mount option '%s'\n", 141832021982SDavid Howells param->string, param->key); 14191da177e4SLinus Torvalds } 1420a137e1ccSAndi Kleen 14217ca02d0aSMike Kravetz /* 142232021982SDavid Howells * Validate the parsed options. 142332021982SDavid Howells */ 142432021982SDavid Howells static int hugetlbfs_validate(struct fs_context *fc) 142532021982SDavid Howells { 142632021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 142732021982SDavid Howells 142832021982SDavid Howells /* 14297ca02d0aSMike Kravetz * Use huge page pool size (in hstate) to convert the size 14307ca02d0aSMike Kravetz * options to number of huge pages. If NO_SIZE, -1 is returned. 14317ca02d0aSMike Kravetz */ 143232021982SDavid Howells ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 143332021982SDavid Howells ctx->max_size_opt, 143432021982SDavid Howells ctx->max_val_type); 143532021982SDavid Howells ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 143632021982SDavid Howells ctx->min_size_opt, 143732021982SDavid Howells ctx->min_val_type); 14387ca02d0aSMike Kravetz 14397ca02d0aSMike Kravetz /* 14407ca02d0aSMike Kravetz * If max_size was specified, then min_size must be smaller 14417ca02d0aSMike Kravetz */ 144232021982SDavid Howells if (ctx->max_val_type > NO_SIZE && 144332021982SDavid Howells ctx->min_hpages > ctx->max_hpages) { 144432021982SDavid Howells pr_err("Minimum size can not be greater than maximum size\n"); 14457ca02d0aSMike Kravetz return -EINVAL; 1446a137e1ccSAndi Kleen } 1447a137e1ccSAndi Kleen 14481da177e4SLinus Torvalds return 0; 14491da177e4SLinus Torvalds } 14501da177e4SLinus Torvalds 14511da177e4SLinus Torvalds static int 145232021982SDavid Howells hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) 14531da177e4SLinus Torvalds { 145432021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 14551da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbinfo; 14561da177e4SLinus Torvalds 14571da177e4SLinus Torvalds sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); 14581da177e4SLinus Torvalds if (!sbinfo) 14591da177e4SLinus Torvalds return -ENOMEM; 14601da177e4SLinus Torvalds sb->s_fs_info = sbinfo; 14611da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 146232021982SDavid Howells sbinfo->hstate = ctx->hstate; 146332021982SDavid Howells sbinfo->max_inodes = ctx->nr_inodes; 146432021982SDavid Howells sbinfo->free_inodes = ctx->nr_inodes; 146590481622SDavid Gibson sbinfo->spool = NULL; 146632021982SDavid Howells sbinfo->uid = ctx->uid; 146732021982SDavid Howells sbinfo->gid = ctx->gid; 146832021982SDavid Howells sbinfo->mode = ctx->mode; 14694a25220dSDavid Howells 14707ca02d0aSMike Kravetz /* 14717ca02d0aSMike Kravetz * Allocate and initialize subpool if maximum or minimum size is 14721935ebd3SMiaohe Lin * specified. Any needed reservations (for minimum size) are taken 1473445c8098SMiaohe Lin * when the subpool is created. 14747ca02d0aSMike Kravetz */ 147532021982SDavid Howells if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { 147632021982SDavid Howells sbinfo->spool = hugepage_new_subpool(ctx->hstate, 147732021982SDavid Howells ctx->max_hpages, 147832021982SDavid Howells ctx->min_hpages); 147990481622SDavid Gibson if (!sbinfo->spool) 148090481622SDavid Gibson goto out_free; 148190481622SDavid Gibson } 14821da177e4SLinus Torvalds sb->s_maxbytes = MAX_LFS_FILESIZE; 148332021982SDavid Howells sb->s_blocksize = huge_page_size(ctx->hstate); 148432021982SDavid Howells sb->s_blocksize_bits = huge_page_shift(ctx->hstate); 14851da177e4SLinus Torvalds sb->s_magic = HUGETLBFS_MAGIC; 14861da177e4SLinus Torvalds sb->s_op = &hugetlbfs_ops; 14871da177e4SLinus Torvalds sb->s_time_gran = 1; 148815568299SMike Kravetz 148915568299SMike Kravetz /* 149015568299SMike Kravetz * Due to the special and limited functionality of hugetlbfs, it does 149115568299SMike Kravetz * not work well as a stacking filesystem. 149215568299SMike Kravetz */ 149315568299SMike Kravetz sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; 149432021982SDavid Howells sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); 149548fde701SAl Viro if (!sb->s_root) 14961da177e4SLinus Torvalds goto out_free; 14971da177e4SLinus Torvalds return 0; 14981da177e4SLinus Torvalds out_free: 149990481622SDavid Gibson kfree(sbinfo->spool); 15001da177e4SLinus Torvalds kfree(sbinfo); 15011da177e4SLinus Torvalds return -ENOMEM; 15021da177e4SLinus Torvalds } 15031da177e4SLinus Torvalds 150432021982SDavid Howells static int hugetlbfs_get_tree(struct fs_context *fc) 15051da177e4SLinus Torvalds { 150632021982SDavid Howells int err = hugetlbfs_validate(fc); 150732021982SDavid Howells if (err) 150832021982SDavid Howells return err; 15092ac295d4SAl Viro return get_tree_nodev(fc, hugetlbfs_fill_super); 151032021982SDavid Howells } 151132021982SDavid Howells 151232021982SDavid Howells static void hugetlbfs_fs_context_free(struct fs_context *fc) 151332021982SDavid Howells { 151432021982SDavid Howells kfree(fc->fs_private); 151532021982SDavid Howells } 151632021982SDavid Howells 151732021982SDavid Howells static const struct fs_context_operations hugetlbfs_fs_context_ops = { 151832021982SDavid Howells .free = hugetlbfs_fs_context_free, 151932021982SDavid Howells .parse_param = hugetlbfs_parse_param, 152032021982SDavid Howells .get_tree = hugetlbfs_get_tree, 152132021982SDavid Howells }; 152232021982SDavid Howells 152332021982SDavid Howells static int hugetlbfs_init_fs_context(struct fs_context *fc) 152432021982SDavid Howells { 152532021982SDavid Howells struct hugetlbfs_fs_context *ctx; 152632021982SDavid Howells 152732021982SDavid Howells ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); 152832021982SDavid Howells if (!ctx) 152932021982SDavid Howells return -ENOMEM; 153032021982SDavid Howells 153132021982SDavid Howells ctx->max_hpages = -1; /* No limit on size by default */ 153232021982SDavid Howells ctx->nr_inodes = -1; /* No limit on number of inodes by default */ 153332021982SDavid Howells ctx->uid = current_fsuid(); 153432021982SDavid Howells ctx->gid = current_fsgid(); 153532021982SDavid Howells ctx->mode = 0755; 153632021982SDavid Howells ctx->hstate = &default_hstate; 153732021982SDavid Howells ctx->min_hpages = -1; /* No default minimum size */ 153832021982SDavid Howells ctx->max_val_type = NO_SIZE; 153932021982SDavid Howells ctx->min_val_type = NO_SIZE; 154032021982SDavid Howells fc->fs_private = ctx; 154132021982SDavid Howells fc->ops = &hugetlbfs_fs_context_ops; 154232021982SDavid Howells return 0; 15431da177e4SLinus Torvalds } 15441da177e4SLinus Torvalds 15451da177e4SLinus Torvalds static struct file_system_type hugetlbfs_fs_type = { 15461da177e4SLinus Torvalds .name = "hugetlbfs", 154732021982SDavid Howells .init_fs_context = hugetlbfs_init_fs_context, 1548d7167b14SAl Viro .parameters = hugetlb_fs_parameters, 15491da177e4SLinus Torvalds .kill_sb = kill_litter_super, 15501da177e4SLinus Torvalds }; 15511da177e4SLinus Torvalds 155242d7395fSAndi Kleen static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; 15531da177e4SLinus Torvalds 1554ef1ff6b8SFrom: Mel Gorman static int can_do_hugetlb_shm(void) 15551da177e4SLinus Torvalds { 1556a0eb3a05SEric W. Biederman kgid_t shm_group; 1557a0eb3a05SEric W. Biederman shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); 1558a0eb3a05SEric W. Biederman return capable(CAP_IPC_LOCK) || in_group_p(shm_group); 15591da177e4SLinus Torvalds } 15601da177e4SLinus Torvalds 156142d7395fSAndi Kleen static int get_hstate_idx(int page_size_log) 156242d7395fSAndi Kleen { 1563af73e4d9SNaoya Horiguchi struct hstate *h = hstate_sizelog(page_size_log); 156442d7395fSAndi Kleen 156542d7395fSAndi Kleen if (!h) 156642d7395fSAndi Kleen return -1; 156704adbc3fSMiaohe Lin return hstate_index(h); 156842d7395fSAndi Kleen } 156942d7395fSAndi Kleen 1570af73e4d9SNaoya Horiguchi /* 1571af73e4d9SNaoya Horiguchi * Note that size should be aligned to proper hugepage size in caller side, 1572af73e4d9SNaoya Horiguchi * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. 1573af73e4d9SNaoya Horiguchi */ 1574af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size, 157583c1fd76Szhangyiru vm_flags_t acctflag, int creat_flags, 157683c1fd76Szhangyiru int page_size_log) 15771da177e4SLinus Torvalds { 15781da177e4SLinus Torvalds struct inode *inode; 1579e68375c8SAl Viro struct vfsmount *mnt; 158042d7395fSAndi Kleen int hstate_idx; 1581e68375c8SAl Viro struct file *file; 158242d7395fSAndi Kleen 158342d7395fSAndi Kleen hstate_idx = get_hstate_idx(page_size_log); 158442d7395fSAndi Kleen if (hstate_idx < 0) 158542d7395fSAndi Kleen return ERR_PTR(-ENODEV); 15861da177e4SLinus Torvalds 1587e68375c8SAl Viro mnt = hugetlbfs_vfsmount[hstate_idx]; 1588e68375c8SAl Viro if (!mnt) 15895bc98594SAkinobu Mita return ERR_PTR(-ENOENT); 15905bc98594SAkinobu Mita 1591ef1ff6b8SFrom: Mel Gorman if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 159283c1fd76Szhangyiru struct ucounts *ucounts = current_ucounts(); 159383c1fd76Szhangyiru 159483c1fd76Szhangyiru if (user_shm_lock(size, ucounts)) { 159583c1fd76Szhangyiru pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", 159621a3c273SDavid Rientjes current->comm, current->pid); 159783c1fd76Szhangyiru user_shm_unlock(size, ucounts); 15982584e517SRavikiran G Thirumalai } 159983c1fd76Szhangyiru return ERR_PTR(-EPERM); 1600353d5c30SHugh Dickins } 16011da177e4SLinus Torvalds 160239b65252SAnatol Pomozov file = ERR_PTR(-ENOSPC); 1603e68375c8SAl Viro inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); 16041da177e4SLinus Torvalds if (!inode) 1605e68375c8SAl Viro goto out; 1606e1832f29SStephen Smalley if (creat_flags == HUGETLB_SHMFS_INODE) 1607e1832f29SStephen Smalley inode->i_flags |= S_PRIVATE; 16081da177e4SLinus Torvalds 16091da177e4SLinus Torvalds inode->i_size = size; 16106d6b77f1SMiklos Szeredi clear_nlink(inode); 1611ce8d2cdfSDave Hansen 161233b8f84aSMike Kravetz if (!hugetlb_reserve_pages(inode, 0, 1613e68375c8SAl Viro size >> huge_page_shift(hstate_inode(inode)), NULL, 1614e68375c8SAl Viro acctflag)) 1615e68375c8SAl Viro file = ERR_PTR(-ENOMEM); 1616e68375c8SAl Viro else 1617e68375c8SAl Viro file = alloc_file_pseudo(inode, mnt, name, O_RDWR, 1618ce8d2cdfSDave Hansen &hugetlbfs_file_operations); 1619e68375c8SAl Viro if (!IS_ERR(file)) 16201da177e4SLinus Torvalds return file; 16211da177e4SLinus Torvalds 1622b45b5bd6SDavid Gibson iput(inode); 1623e68375c8SAl Viro out: 162439b65252SAnatol Pomozov return file; 16251da177e4SLinus Torvalds } 16261da177e4SLinus Torvalds 162732021982SDavid Howells static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) 162832021982SDavid Howells { 162932021982SDavid Howells struct fs_context *fc; 163032021982SDavid Howells struct vfsmount *mnt; 163132021982SDavid Howells 163232021982SDavid Howells fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); 163332021982SDavid Howells if (IS_ERR(fc)) { 163432021982SDavid Howells mnt = ERR_CAST(fc); 163532021982SDavid Howells } else { 163632021982SDavid Howells struct hugetlbfs_fs_context *ctx = fc->fs_private; 163732021982SDavid Howells ctx->hstate = h; 163832021982SDavid Howells mnt = fc_mount(fc); 163932021982SDavid Howells put_fs_context(fc); 164032021982SDavid Howells } 164132021982SDavid Howells if (IS_ERR(mnt)) 1642a25fddceSMiaohe Lin pr_err("Cannot mount internal hugetlbfs for page size %luK", 1643d0036517SMiaohe Lin huge_page_size(h) / SZ_1K); 164432021982SDavid Howells return mnt; 164532021982SDavid Howells } 164632021982SDavid Howells 16471da177e4SLinus Torvalds static int __init init_hugetlbfs_fs(void) 16481da177e4SLinus Torvalds { 164932021982SDavid Howells struct vfsmount *mnt; 165042d7395fSAndi Kleen struct hstate *h; 16511da177e4SLinus Torvalds int error; 165242d7395fSAndi Kleen int i; 16531da177e4SLinus Torvalds 1654457c1b27SNishanth Aravamudan if (!hugepages_supported()) { 16559b857d26SAndrew Morton pr_info("disabling because there are no supported hugepage sizes\n"); 1656457c1b27SNishanth Aravamudan return -ENOTSUPP; 1657457c1b27SNishanth Aravamudan } 1658457c1b27SNishanth Aravamudan 1659d1d5e05fSHillf Danton error = -ENOMEM; 16601da177e4SLinus Torvalds hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 16611da177e4SLinus Torvalds sizeof(struct hugetlbfs_inode_info), 16625d097056SVladimir Davydov 0, SLAB_ACCOUNT, init_once); 16631da177e4SLinus Torvalds if (hugetlbfs_inode_cachep == NULL) 16648fc312b3SMike Kravetz goto out; 16651da177e4SLinus Torvalds 16661da177e4SLinus Torvalds error = register_filesystem(&hugetlbfs_fs_type); 16671da177e4SLinus Torvalds if (error) 16688fc312b3SMike Kravetz goto out_free; 16691da177e4SLinus Torvalds 16708fc312b3SMike Kravetz /* default hstate mount is required */ 16713b2275a8SMiaohe Lin mnt = mount_one_hugetlbfs(&default_hstate); 16728fc312b3SMike Kravetz if (IS_ERR(mnt)) { 16738fc312b3SMike Kravetz error = PTR_ERR(mnt); 16748fc312b3SMike Kravetz goto out_unreg; 16758fc312b3SMike Kravetz } 16768fc312b3SMike Kravetz hugetlbfs_vfsmount[default_hstate_idx] = mnt; 16778fc312b3SMike Kravetz 16788fc312b3SMike Kravetz /* other hstates are optional */ 167942d7395fSAndi Kleen i = 0; 168042d7395fSAndi Kleen for_each_hstate(h) { 168115f0ec94SJan Stancek if (i == default_hstate_idx) { 168215f0ec94SJan Stancek i++; 16838fc312b3SMike Kravetz continue; 168415f0ec94SJan Stancek } 16858fc312b3SMike Kravetz 168632021982SDavid Howells mnt = mount_one_hugetlbfs(h); 16878fc312b3SMike Kravetz if (IS_ERR(mnt)) 16888fc312b3SMike Kravetz hugetlbfs_vfsmount[i] = NULL; 16898fc312b3SMike Kravetz else 169032021982SDavid Howells hugetlbfs_vfsmount[i] = mnt; 169142d7395fSAndi Kleen i++; 169242d7395fSAndi Kleen } 169332021982SDavid Howells 169442d7395fSAndi Kleen return 0; 16951da177e4SLinus Torvalds 16968fc312b3SMike Kravetz out_unreg: 16978fc312b3SMike Kravetz (void)unregister_filesystem(&hugetlbfs_fs_type); 16988fc312b3SMike Kravetz out_free: 16991da177e4SLinus Torvalds kmem_cache_destroy(hugetlbfs_inode_cachep); 17008fc312b3SMike Kravetz out: 17011da177e4SLinus Torvalds return error; 17021da177e4SLinus Torvalds } 17033e89e1c5SPaul Gortmaker fs_initcall(init_hugetlbfs_fs) 1704