11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * hugetlbpage-backed filesystem. Based on ramfs. 31da177e4SLinus Torvalds * 46d49e352SNadia Yvette Chambers * Nadia Yvette Chambers, 2002 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Copyright (C) 2002 Linus Torvalds. 73e89e1c5SPaul Gortmaker * License: GPL 81da177e4SLinus Torvalds */ 91da177e4SLinus Torvalds 109b857d26SAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 119b857d26SAndrew Morton 121da177e4SLinus Torvalds #include <linux/thread_info.h> 131da177e4SLinus Torvalds #include <asm/current.h> 14174cd4b1SIngo Molnar #include <linux/sched/signal.h> /* remove ASAP */ 1570c3547eSMike Kravetz #include <linux/falloc.h> 161da177e4SLinus Torvalds #include <linux/fs.h> 171da177e4SLinus Torvalds #include <linux/mount.h> 181da177e4SLinus Torvalds #include <linux/file.h> 19e73a75faSRandy Dunlap #include <linux/kernel.h> 201da177e4SLinus Torvalds #include <linux/writeback.h> 211da177e4SLinus Torvalds #include <linux/pagemap.h> 221da177e4SLinus Torvalds #include <linux/highmem.h> 231da177e4SLinus Torvalds #include <linux/init.h> 241da177e4SLinus Torvalds #include <linux/string.h> 2516f7e0feSRandy Dunlap #include <linux/capability.h> 26e73a75faSRandy Dunlap #include <linux/ctype.h> 271da177e4SLinus Torvalds #include <linux/backing-dev.h> 281da177e4SLinus Torvalds #include <linux/hugetlb.h> 291da177e4SLinus Torvalds #include <linux/pagevec.h> 30e73a75faSRandy Dunlap #include <linux/parser.h> 31036e0856SBenjamin Herrenschmidt #include <linux/mman.h> 321da177e4SLinus Torvalds #include <linux/slab.h> 331da177e4SLinus Torvalds #include <linux/dnotify.h> 341da177e4SLinus Torvalds #include <linux/statfs.h> 351da177e4SLinus Torvalds #include <linux/security.h> 361fd7317dSNick Black #include <linux/magic.h> 37290408d4SNaoya Horiguchi #include <linux/migrate.h> 3834d0640eSAl Viro #include <linux/uio.h> 391da177e4SLinus Torvalds 407c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 411da177e4SLinus Torvalds 42ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops; 43f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops; 444b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations; 4592e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations; 4692e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations; 471da177e4SLinus Torvalds 48a1d776eeSDavid Gibson struct hugetlbfs_config { 49a0eb3a05SEric W. Biederman kuid_t uid; 50a0eb3a05SEric W. Biederman kgid_t gid; 51a1d776eeSDavid Gibson umode_t mode; 527ca02d0aSMike Kravetz long max_hpages; 53a1d776eeSDavid Gibson long nr_inodes; 54a1d776eeSDavid Gibson struct hstate *hstate; 557ca02d0aSMike Kravetz long min_hpages; 56a1d776eeSDavid Gibson }; 57a1d776eeSDavid Gibson 58a1d776eeSDavid Gibson struct hugetlbfs_inode_info { 59a1d776eeSDavid Gibson struct shared_policy policy; 60a1d776eeSDavid Gibson struct inode vfs_inode; 61a1d776eeSDavid Gibson }; 62a1d776eeSDavid Gibson 63a1d776eeSDavid Gibson static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 64a1d776eeSDavid Gibson { 65a1d776eeSDavid Gibson return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 66a1d776eeSDavid Gibson } 67a1d776eeSDavid Gibson 681da177e4SLinus Torvalds int sysctl_hugetlb_shm_group; 691da177e4SLinus Torvalds 70e73a75faSRandy Dunlap enum { 71e73a75faSRandy Dunlap Opt_size, Opt_nr_inodes, 72e73a75faSRandy Dunlap Opt_mode, Opt_uid, Opt_gid, 737ca02d0aSMike Kravetz Opt_pagesize, Opt_min_size, 74e73a75faSRandy Dunlap Opt_err, 75e73a75faSRandy Dunlap }; 76e73a75faSRandy Dunlap 77a447c093SSteven Whitehouse static const match_table_t tokens = { 78e73a75faSRandy Dunlap {Opt_size, "size=%s"}, 79e73a75faSRandy Dunlap {Opt_nr_inodes, "nr_inodes=%s"}, 80e73a75faSRandy Dunlap {Opt_mode, "mode=%o"}, 81e73a75faSRandy Dunlap {Opt_uid, "uid=%u"}, 82e73a75faSRandy Dunlap {Opt_gid, "gid=%u"}, 83a137e1ccSAndi Kleen {Opt_pagesize, "pagesize=%s"}, 847ca02d0aSMike Kravetz {Opt_min_size, "min_size=%s"}, 85e73a75faSRandy Dunlap {Opt_err, NULL}, 86e73a75faSRandy Dunlap }; 87e73a75faSRandy Dunlap 8870c3547eSMike Kravetz #ifdef CONFIG_NUMA 8970c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 9070c3547eSMike Kravetz struct inode *inode, pgoff_t index) 9170c3547eSMike Kravetz { 9270c3547eSMike Kravetz vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, 9370c3547eSMike Kravetz index); 9470c3547eSMike Kravetz } 9570c3547eSMike Kravetz 9670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 9770c3547eSMike Kravetz { 9870c3547eSMike Kravetz mpol_cond_put(vma->vm_policy); 9970c3547eSMike Kravetz } 10070c3547eSMike Kravetz #else 10170c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 10270c3547eSMike Kravetz struct inode *inode, pgoff_t index) 10370c3547eSMike Kravetz { 10470c3547eSMike Kravetz } 10570c3547eSMike Kravetz 10670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 10770c3547eSMike Kravetz { 10870c3547eSMike Kravetz } 10970c3547eSMike Kravetz #endif 11070c3547eSMike Kravetz 1112e9b367cSAdam Litke static void huge_pagevec_release(struct pagevec *pvec) 1122e9b367cSAdam Litke { 1132e9b367cSAdam Litke int i; 1142e9b367cSAdam Litke 1152e9b367cSAdam Litke for (i = 0; i < pagevec_count(pvec); ++i) 1162e9b367cSAdam Litke put_page(pvec->pages[i]); 1172e9b367cSAdam Litke 1182e9b367cSAdam Litke pagevec_reinit(pvec); 1192e9b367cSAdam Litke } 1202e9b367cSAdam Litke 1211da177e4SLinus Torvalds static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 1221da177e4SLinus Torvalds { 123496ad9aaSAl Viro struct inode *inode = file_inode(file); 1241da177e4SLinus Torvalds loff_t len, vma_len; 1251da177e4SLinus Torvalds int ret; 126a5516438SAndi Kleen struct hstate *h = hstate_file(file); 1271da177e4SLinus Torvalds 12868589bc3SHugh Dickins /* 129dec4ad86SDavid Gibson * vma address alignment (but not the pgoff alignment) has 130dec4ad86SDavid Gibson * already been checked by prepare_hugepage_range. If you add 131dec4ad86SDavid Gibson * any error returns here, do so after setting VM_HUGETLB, so 132dec4ad86SDavid Gibson * is_vm_hugetlb_page tests below unmap_region go the right 133dec4ad86SDavid Gibson * way when do_mmap_pgoff unwinds (may be important on powerpc 134dec4ad86SDavid Gibson * and ia64). 13568589bc3SHugh Dickins */ 136a2fce914SNaoya Horiguchi vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 13768589bc3SHugh Dickins vma->vm_ops = &hugetlb_vm_ops; 1381da177e4SLinus Torvalds 1392b37c35eSBecky Bruce if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 140dec4ad86SDavid Gibson return -EINVAL; 141dec4ad86SDavid Gibson 1421da177e4SLinus Torvalds vma_len = (loff_t)(vma->vm_end - vma->vm_start); 1431da177e4SLinus Torvalds 1445955102cSAl Viro inode_lock(inode); 1451da177e4SLinus Torvalds file_accessed(file); 1461da177e4SLinus Torvalds 1471da177e4SLinus Torvalds ret = -ENOMEM; 1481da177e4SLinus Torvalds len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 1491da177e4SLinus Torvalds 150a1e78772SMel Gorman if (hugetlb_reserve_pages(inode, 151a5516438SAndi Kleen vma->vm_pgoff >> huge_page_order(h), 1525a6fe125SMel Gorman len >> huge_page_shift(h), vma, 1535a6fe125SMel Gorman vma->vm_flags)) 154b45b5bd6SDavid Gibson goto out; 155b45b5bd6SDavid Gibson 1564c887265SAdam Litke ret = 0; 157b6174df5SZhang, Yanmin if (vma->vm_flags & VM_WRITE && inode->i_size < len) 1581da177e4SLinus Torvalds inode->i_size = len; 1591da177e4SLinus Torvalds out: 1605955102cSAl Viro inode_unlock(inode); 1611da177e4SLinus Torvalds 1621da177e4SLinus Torvalds return ret; 1631da177e4SLinus Torvalds } 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds /* 166508034a3SHugh Dickins * Called under down_write(mmap_sem). 1671da177e4SLinus Torvalds */ 1681da177e4SLinus Torvalds 169d2ba27e8SAdrian Bunk #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 1701da177e4SLinus Torvalds static unsigned long 1711da177e4SLinus Torvalds hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 1721da177e4SLinus Torvalds unsigned long len, unsigned long pgoff, unsigned long flags) 1731da177e4SLinus Torvalds { 1741da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 1751da177e4SLinus Torvalds struct vm_area_struct *vma; 176a5516438SAndi Kleen struct hstate *h = hstate_file(file); 17708659355SMichel Lespinasse struct vm_unmapped_area_info info; 1781da177e4SLinus Torvalds 179a5516438SAndi Kleen if (len & ~huge_page_mask(h)) 1801da177e4SLinus Torvalds return -EINVAL; 1811da177e4SLinus Torvalds if (len > TASK_SIZE) 1821da177e4SLinus Torvalds return -ENOMEM; 1831da177e4SLinus Torvalds 184036e0856SBenjamin Herrenschmidt if (flags & MAP_FIXED) { 185a5516438SAndi Kleen if (prepare_hugepage_range(file, addr, len)) 186036e0856SBenjamin Herrenschmidt return -EINVAL; 187036e0856SBenjamin Herrenschmidt return addr; 188036e0856SBenjamin Herrenschmidt } 189036e0856SBenjamin Herrenschmidt 1901da177e4SLinus Torvalds if (addr) { 191a5516438SAndi Kleen addr = ALIGN(addr, huge_page_size(h)); 1921da177e4SLinus Torvalds vma = find_vma(mm, addr); 1931da177e4SLinus Torvalds if (TASK_SIZE - len >= addr && 1941da177e4SLinus Torvalds (!vma || addr + len <= vma->vm_start)) 1951da177e4SLinus Torvalds return addr; 1961da177e4SLinus Torvalds } 1971da177e4SLinus Torvalds 19808659355SMichel Lespinasse info.flags = 0; 19908659355SMichel Lespinasse info.length = len; 20008659355SMichel Lespinasse info.low_limit = TASK_UNMAPPED_BASE; 20108659355SMichel Lespinasse info.high_limit = TASK_SIZE; 20208659355SMichel Lespinasse info.align_mask = PAGE_MASK & ~huge_page_mask(h); 20308659355SMichel Lespinasse info.align_offset = 0; 20408659355SMichel Lespinasse return vm_unmapped_area(&info); 2051da177e4SLinus Torvalds } 2061da177e4SLinus Torvalds #endif 2071da177e4SLinus Torvalds 20834d0640eSAl Viro static size_t 209e63e1e5aSBadari Pulavarty hugetlbfs_read_actor(struct page *page, unsigned long offset, 21034d0640eSAl Viro struct iov_iter *to, unsigned long size) 211e63e1e5aSBadari Pulavarty { 21234d0640eSAl Viro size_t copied = 0; 213e63e1e5aSBadari Pulavarty int i, chunksize; 214e63e1e5aSBadari Pulavarty 215e63e1e5aSBadari Pulavarty /* Find which 4k chunk and offset with in that chunk */ 21609cbfeafSKirill A. Shutemov i = offset >> PAGE_SHIFT; 21709cbfeafSKirill A. Shutemov offset = offset & ~PAGE_MASK; 218e63e1e5aSBadari Pulavarty 219e63e1e5aSBadari Pulavarty while (size) { 22034d0640eSAl Viro size_t n; 22109cbfeafSKirill A. Shutemov chunksize = PAGE_SIZE; 222e63e1e5aSBadari Pulavarty if (offset) 223e63e1e5aSBadari Pulavarty chunksize -= offset; 224e63e1e5aSBadari Pulavarty if (chunksize > size) 225e63e1e5aSBadari Pulavarty chunksize = size; 22634d0640eSAl Viro n = copy_page_to_iter(&page[i], offset, chunksize, to); 22734d0640eSAl Viro copied += n; 22834d0640eSAl Viro if (n != chunksize) 22934d0640eSAl Viro return copied; 230e63e1e5aSBadari Pulavarty offset = 0; 231e63e1e5aSBadari Pulavarty size -= chunksize; 232e63e1e5aSBadari Pulavarty i++; 233e63e1e5aSBadari Pulavarty } 23434d0640eSAl Viro return copied; 235e63e1e5aSBadari Pulavarty } 236e63e1e5aSBadari Pulavarty 237e63e1e5aSBadari Pulavarty /* 238e63e1e5aSBadari Pulavarty * Support for read() - Find the page attached to f_mapping and copy out the 239e63e1e5aSBadari Pulavarty * data. Its *very* similar to do_generic_mapping_read(), we can't use that 240ea1754a0SKirill A. Shutemov * since it has PAGE_SIZE assumptions. 241e63e1e5aSBadari Pulavarty */ 24234d0640eSAl Viro static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 243e63e1e5aSBadari Pulavarty { 24434d0640eSAl Viro struct file *file = iocb->ki_filp; 24534d0640eSAl Viro struct hstate *h = hstate_file(file); 24634d0640eSAl Viro struct address_space *mapping = file->f_mapping; 247e63e1e5aSBadari Pulavarty struct inode *inode = mapping->host; 24834d0640eSAl Viro unsigned long index = iocb->ki_pos >> huge_page_shift(h); 24934d0640eSAl Viro unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); 250e63e1e5aSBadari Pulavarty unsigned long end_index; 251e63e1e5aSBadari Pulavarty loff_t isize; 252e63e1e5aSBadari Pulavarty ssize_t retval = 0; 253e63e1e5aSBadari Pulavarty 25434d0640eSAl Viro while (iov_iter_count(to)) { 255e63e1e5aSBadari Pulavarty struct page *page; 25634d0640eSAl Viro size_t nr, copied; 257e63e1e5aSBadari Pulavarty 258e63e1e5aSBadari Pulavarty /* nr is the maximum number of bytes to copy from this page */ 259a5516438SAndi Kleen nr = huge_page_size(h); 260a05b0855SAneesh Kumar K.V isize = i_size_read(inode); 261a05b0855SAneesh Kumar K.V if (!isize) 26234d0640eSAl Viro break; 263a05b0855SAneesh Kumar K.V end_index = (isize - 1) >> huge_page_shift(h); 264e63e1e5aSBadari Pulavarty if (index > end_index) 26534d0640eSAl Viro break; 26634d0640eSAl Viro if (index == end_index) { 267a5516438SAndi Kleen nr = ((isize - 1) & ~huge_page_mask(h)) + 1; 268a05b0855SAneesh Kumar K.V if (nr <= offset) 26934d0640eSAl Viro break; 270e63e1e5aSBadari Pulavarty } 271e63e1e5aSBadari Pulavarty nr = nr - offset; 272e63e1e5aSBadari Pulavarty 273e63e1e5aSBadari Pulavarty /* Find the page */ 274a05b0855SAneesh Kumar K.V page = find_lock_page(mapping, index); 275e63e1e5aSBadari Pulavarty if (unlikely(page == NULL)) { 276e63e1e5aSBadari Pulavarty /* 277e63e1e5aSBadari Pulavarty * We have a HOLE, zero out the user-buffer for the 278e63e1e5aSBadari Pulavarty * length of the hole or request. 279e63e1e5aSBadari Pulavarty */ 28034d0640eSAl Viro copied = iov_iter_zero(nr, to); 281e63e1e5aSBadari Pulavarty } else { 282a05b0855SAneesh Kumar K.V unlock_page(page); 283a05b0855SAneesh Kumar K.V 284e63e1e5aSBadari Pulavarty /* 285e63e1e5aSBadari Pulavarty * We have the page, copy it to user space buffer. 286e63e1e5aSBadari Pulavarty */ 28734d0640eSAl Viro copied = hugetlbfs_read_actor(page, offset, to, nr); 28809cbfeafSKirill A. Shutemov put_page(page); 289e63e1e5aSBadari Pulavarty } 29034d0640eSAl Viro offset += copied; 29134d0640eSAl Viro retval += copied; 29234d0640eSAl Viro if (copied != nr && iov_iter_count(to)) { 29334d0640eSAl Viro if (!retval) 29434d0640eSAl Viro retval = -EFAULT; 295e63e1e5aSBadari Pulavarty break; 296e63e1e5aSBadari Pulavarty } 29734d0640eSAl Viro index += offset >> huge_page_shift(h); 29834d0640eSAl Viro offset &= ~huge_page_mask(h); 29934d0640eSAl Viro } 30034d0640eSAl Viro iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; 301e63e1e5aSBadari Pulavarty return retval; 302e63e1e5aSBadari Pulavarty } 303e63e1e5aSBadari Pulavarty 304800d15a5SNick Piggin static int hugetlbfs_write_begin(struct file *file, 305800d15a5SNick Piggin struct address_space *mapping, 306800d15a5SNick Piggin loff_t pos, unsigned len, unsigned flags, 307800d15a5SNick Piggin struct page **pagep, void **fsdata) 3081da177e4SLinus Torvalds { 3091da177e4SLinus Torvalds return -EINVAL; 3101da177e4SLinus Torvalds } 3111da177e4SLinus Torvalds 312800d15a5SNick Piggin static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 313800d15a5SNick Piggin loff_t pos, unsigned len, unsigned copied, 314800d15a5SNick Piggin struct page *page, void *fsdata) 3151da177e4SLinus Torvalds { 316800d15a5SNick Piggin BUG(); 3171da177e4SLinus Torvalds return -EINVAL; 3181da177e4SLinus Torvalds } 3191da177e4SLinus Torvalds 320b5cec28dSMike Kravetz static void remove_huge_page(struct page *page) 3211da177e4SLinus Torvalds { 322b9ea2515SKonstantin Khlebnikov ClearPageDirty(page); 3231da177e4SLinus Torvalds ClearPageUptodate(page); 324bd65cb86SMinchan Kim delete_from_page_cache(page); 3251da177e4SLinus Torvalds } 3261da177e4SLinus Torvalds 3274aae8d1cSMike Kravetz static void 3284aae8d1cSMike Kravetz hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end) 3294aae8d1cSMike Kravetz { 3304aae8d1cSMike Kravetz struct vm_area_struct *vma; 3314aae8d1cSMike Kravetz 3324aae8d1cSMike Kravetz /* 3334aae8d1cSMike Kravetz * end == 0 indicates that the entire range after 3344aae8d1cSMike Kravetz * start should be unmapped. 3354aae8d1cSMike Kravetz */ 3364aae8d1cSMike Kravetz vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { 3374aae8d1cSMike Kravetz unsigned long v_offset; 3384aae8d1cSMike Kravetz unsigned long v_end; 3394aae8d1cSMike Kravetz 3404aae8d1cSMike Kravetz /* 3414aae8d1cSMike Kravetz * Can the expression below overflow on 32-bit arches? 3424aae8d1cSMike Kravetz * No, because the interval tree returns us only those vmas 3434aae8d1cSMike Kravetz * which overlap the truncated area starting at pgoff, 3444aae8d1cSMike Kravetz * and no vma on a 32-bit arch can span beyond the 4GB. 3454aae8d1cSMike Kravetz */ 3464aae8d1cSMike Kravetz if (vma->vm_pgoff < start) 3474aae8d1cSMike Kravetz v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; 3484aae8d1cSMike Kravetz else 3494aae8d1cSMike Kravetz v_offset = 0; 3504aae8d1cSMike Kravetz 3514aae8d1cSMike Kravetz if (!end) 3524aae8d1cSMike Kravetz v_end = vma->vm_end; 3534aae8d1cSMike Kravetz else { 3544aae8d1cSMike Kravetz v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) 3554aae8d1cSMike Kravetz + vma->vm_start; 3564aae8d1cSMike Kravetz if (v_end > vma->vm_end) 3574aae8d1cSMike Kravetz v_end = vma->vm_end; 3584aae8d1cSMike Kravetz } 3594aae8d1cSMike Kravetz 3604aae8d1cSMike Kravetz unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, 3614aae8d1cSMike Kravetz NULL); 3624aae8d1cSMike Kravetz } 3634aae8d1cSMike Kravetz } 364b5cec28dSMike Kravetz 365b5cec28dSMike Kravetz /* 366b5cec28dSMike Kravetz * remove_inode_hugepages handles two distinct cases: truncation and hole 367b5cec28dSMike Kravetz * punch. There are subtle differences in operation for each case. 3684aae8d1cSMike Kravetz * 369b5cec28dSMike Kravetz * truncation is indicated by end of range being LLONG_MAX 370b5cec28dSMike Kravetz * In this case, we first scan the range and release found pages. 371b5cec28dSMike Kravetz * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 3721817889eSMike Kravetz * maps and global counts. Page faults can not race with truncation 3731817889eSMike Kravetz * in this routine. hugetlb_no_page() prevents page faults in the 3741817889eSMike Kravetz * truncated range. It checks i_size before allocation, and again after 3751817889eSMike Kravetz * with the page table lock for the page held. The same lock must be 3761817889eSMike Kravetz * acquired to unmap a page. 377b5cec28dSMike Kravetz * hole punch is indicated if end is not LLONG_MAX 378b5cec28dSMike Kravetz * In the hole punch case we scan the range and release found pages. 379b5cec28dSMike Kravetz * Only when releasing a page is the associated region/reserv map 380b5cec28dSMike Kravetz * deleted. The region/reserv map for ranges without associated 3811817889eSMike Kravetz * pages are not modified. Page faults can race with hole punch. 3821817889eSMike Kravetz * This is indicated if we find a mapped page. 383b5cec28dSMike Kravetz * Note: If the passed end of range value is beyond the end of file, but 384b5cec28dSMike Kravetz * not LLONG_MAX this routine still performs a hole punch operation. 385b5cec28dSMike Kravetz */ 386b5cec28dSMike Kravetz static void remove_inode_hugepages(struct inode *inode, loff_t lstart, 387b5cec28dSMike Kravetz loff_t lend) 3881da177e4SLinus Torvalds { 389a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 390b45b5bd6SDavid Gibson struct address_space *mapping = &inode->i_data; 391a5516438SAndi Kleen const pgoff_t start = lstart >> huge_page_shift(h); 392b5cec28dSMike Kravetz const pgoff_t end = lend >> huge_page_shift(h); 393b5cec28dSMike Kravetz struct vm_area_struct pseudo_vma; 3941da177e4SLinus Torvalds struct pagevec pvec; 3951da177e4SLinus Torvalds pgoff_t next; 396a43a8c39SChen, Kenneth W int i, freed = 0; 397b5cec28dSMike Kravetz long lookup_nr = PAGEVEC_SIZE; 398b5cec28dSMike Kravetz bool truncate_op = (lend == LLONG_MAX); 3991da177e4SLinus Torvalds 400b5cec28dSMike Kravetz memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 401b5cec28dSMike Kravetz pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 4021da177e4SLinus Torvalds pagevec_init(&pvec, 0); 4031da177e4SLinus Torvalds next = start; 404b5cec28dSMike Kravetz while (next < end) { 405b5cec28dSMike Kravetz /* 4061817889eSMike Kravetz * Don't grab more pages than the number left in the range. 407b5cec28dSMike Kravetz */ 408b5cec28dSMike Kravetz if (end - next < lookup_nr) 409b5cec28dSMike Kravetz lookup_nr = end - next; 410b5cec28dSMike Kravetz 411b5cec28dSMike Kravetz /* 4121817889eSMike Kravetz * When no more pages are found, we are done. 413b5cec28dSMike Kravetz */ 4141817889eSMike Kravetz if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) 4151da177e4SLinus Torvalds break; 4161da177e4SLinus Torvalds 4171da177e4SLinus Torvalds for (i = 0; i < pagevec_count(&pvec); ++i) { 4181da177e4SLinus Torvalds struct page *page = pvec.pages[i]; 419b5cec28dSMike Kravetz u32 hash; 420b5cec28dSMike Kravetz 4211817889eSMike Kravetz /* 4221817889eSMike Kravetz * The page (index) could be beyond end. This is 4231817889eSMike Kravetz * only possible in the punch hole case as end is 4241817889eSMike Kravetz * max page offset in the truncate case. 4251817889eSMike Kravetz */ 4261817889eSMike Kravetz next = page->index; 4271817889eSMike Kravetz if (next >= end) 4281817889eSMike Kravetz break; 4291817889eSMike Kravetz 430b5cec28dSMike Kravetz hash = hugetlb_fault_mutex_hash(h, current->mm, 431b5cec28dSMike Kravetz &pseudo_vma, 432b5cec28dSMike Kravetz mapping, next, 0); 433b5cec28dSMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 4341da177e4SLinus Torvalds 435b5cec28dSMike Kravetz /* 4364aae8d1cSMike Kravetz * If page is mapped, it was faulted in after being 4374aae8d1cSMike Kravetz * unmapped in caller. Unmap (again) now after taking 4384aae8d1cSMike Kravetz * the fault mutex. The mutex will prevent faults 4394aae8d1cSMike Kravetz * until we finish removing the page. 4404aae8d1cSMike Kravetz * 4414aae8d1cSMike Kravetz * This race can only happen in the hole punch case. 4424aae8d1cSMike Kravetz * Getting here in a truncate operation is a bug. 443b5cec28dSMike Kravetz */ 4444aae8d1cSMike Kravetz if (unlikely(page_mapped(page))) { 4454aae8d1cSMike Kravetz BUG_ON(truncate_op); 4464aae8d1cSMike Kravetz 4474aae8d1cSMike Kravetz i_mmap_lock_write(mapping); 4484aae8d1cSMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, 4494aae8d1cSMike Kravetz next * pages_per_huge_page(h), 4504aae8d1cSMike Kravetz (next + 1) * pages_per_huge_page(h)); 4514aae8d1cSMike Kravetz i_mmap_unlock_write(mapping); 4524aae8d1cSMike Kravetz } 4534aae8d1cSMike Kravetz 4544aae8d1cSMike Kravetz lock_page(page); 4554aae8d1cSMike Kravetz /* 4564aae8d1cSMike Kravetz * We must free the huge page and remove from page 4574aae8d1cSMike Kravetz * cache (remove_huge_page) BEFORE removing the 4584aae8d1cSMike Kravetz * region/reserve map (hugetlb_unreserve_pages). In 4594aae8d1cSMike Kravetz * rare out of memory conditions, removal of the 46072e2936cSzhong jiang * region/reserve map could fail. Correspondingly, 46172e2936cSzhong jiang * the subpool and global reserve usage count can need 46272e2936cSzhong jiang * to be adjusted. 4634aae8d1cSMike Kravetz */ 46472e2936cSzhong jiang VM_BUG_ON(PagePrivate(page)); 465b5cec28dSMike Kravetz remove_huge_page(page); 466b5cec28dSMike Kravetz freed++; 467b5cec28dSMike Kravetz if (!truncate_op) { 4684aae8d1cSMike Kravetz if (unlikely(hugetlb_unreserve_pages(inode, 4694aae8d1cSMike Kravetz next, next + 1, 1))) 47072e2936cSzhong jiang hugetlb_fix_reserve_counts(inode); 471b5cec28dSMike Kravetz } 472b5cec28dSMike Kravetz 4731da177e4SLinus Torvalds unlock_page(page); 474b5cec28dSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4751da177e4SLinus Torvalds } 4761817889eSMike Kravetz ++next; 4771da177e4SLinus Torvalds huge_pagevec_release(&pvec); 4781817889eSMike Kravetz cond_resched(); 4791da177e4SLinus Torvalds } 480b5cec28dSMike Kravetz 481b5cec28dSMike Kravetz if (truncate_op) 482b5cec28dSMike Kravetz (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); 4831da177e4SLinus Torvalds } 4841da177e4SLinus Torvalds 4852bbbda30SAl Viro static void hugetlbfs_evict_inode(struct inode *inode) 4861da177e4SLinus Torvalds { 4879119a41eSJoonsoo Kim struct resv_map *resv_map; 4889119a41eSJoonsoo Kim 489b5cec28dSMike Kravetz remove_inode_hugepages(inode, 0, LLONG_MAX); 4909119a41eSJoonsoo Kim resv_map = (struct resv_map *)inode->i_mapping->private_data; 4919119a41eSJoonsoo Kim /* root inode doesn't have the resv_map, so we should check it */ 4929119a41eSJoonsoo Kim if (resv_map) 4939119a41eSJoonsoo Kim resv_map_release(&resv_map->refs); 494dbd5768fSJan Kara clear_inode(inode); 495149f4211SChristoph Hellwig } 496149f4211SChristoph Hellwig 4971da177e4SLinus Torvalds static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) 4981da177e4SLinus Torvalds { 499856fc295SHugh Dickins pgoff_t pgoff; 5001da177e4SLinus Torvalds struct address_space *mapping = inode->i_mapping; 501a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 5021da177e4SLinus Torvalds 503a5516438SAndi Kleen BUG_ON(offset & ~huge_page_mask(h)); 504856fc295SHugh Dickins pgoff = offset >> PAGE_SHIFT; 5051da177e4SLinus Torvalds 5067aa91e10SKen Chen i_size_write(inode, offset); 50783cde9e8SDavidlohr Bueso i_mmap_lock_write(mapping); 5086b2dbba8SMichel Lespinasse if (!RB_EMPTY_ROOT(&mapping->i_mmap)) 5091bfad99aSMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); 51083cde9e8SDavidlohr Bueso i_mmap_unlock_write(mapping); 511b5cec28dSMike Kravetz remove_inode_hugepages(inode, offset, LLONG_MAX); 5121da177e4SLinus Torvalds return 0; 5131da177e4SLinus Torvalds } 5141da177e4SLinus Torvalds 51570c3547eSMike Kravetz static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 51670c3547eSMike Kravetz { 51770c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 51870c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 51970c3547eSMike Kravetz loff_t hole_start, hole_end; 52070c3547eSMike Kravetz 52170c3547eSMike Kravetz /* 52270c3547eSMike Kravetz * For hole punch round up the beginning offset of the hole and 52370c3547eSMike Kravetz * round down the end. 52470c3547eSMike Kravetz */ 52570c3547eSMike Kravetz hole_start = round_up(offset, hpage_size); 52670c3547eSMike Kravetz hole_end = round_down(offset + len, hpage_size); 52770c3547eSMike Kravetz 52870c3547eSMike Kravetz if (hole_end > hole_start) { 52970c3547eSMike Kravetz struct address_space *mapping = inode->i_mapping; 53070c3547eSMike Kravetz 5315955102cSAl Viro inode_lock(inode); 53270c3547eSMike Kravetz i_mmap_lock_write(mapping); 53370c3547eSMike Kravetz if (!RB_EMPTY_ROOT(&mapping->i_mmap)) 53470c3547eSMike Kravetz hugetlb_vmdelete_list(&mapping->i_mmap, 53570c3547eSMike Kravetz hole_start >> PAGE_SHIFT, 53670c3547eSMike Kravetz hole_end >> PAGE_SHIFT); 53770c3547eSMike Kravetz i_mmap_unlock_write(mapping); 53870c3547eSMike Kravetz remove_inode_hugepages(inode, hole_start, hole_end); 5395955102cSAl Viro inode_unlock(inode); 54070c3547eSMike Kravetz } 54170c3547eSMike Kravetz 54270c3547eSMike Kravetz return 0; 54370c3547eSMike Kravetz } 54470c3547eSMike Kravetz 54570c3547eSMike Kravetz static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, 54670c3547eSMike Kravetz loff_t len) 54770c3547eSMike Kravetz { 54870c3547eSMike Kravetz struct inode *inode = file_inode(file); 54970c3547eSMike Kravetz struct address_space *mapping = inode->i_mapping; 55070c3547eSMike Kravetz struct hstate *h = hstate_inode(inode); 55170c3547eSMike Kravetz struct vm_area_struct pseudo_vma; 55270c3547eSMike Kravetz struct mm_struct *mm = current->mm; 55370c3547eSMike Kravetz loff_t hpage_size = huge_page_size(h); 55470c3547eSMike Kravetz unsigned long hpage_shift = huge_page_shift(h); 55570c3547eSMike Kravetz pgoff_t start, index, end; 55670c3547eSMike Kravetz int error; 55770c3547eSMike Kravetz u32 hash; 55870c3547eSMike Kravetz 55970c3547eSMike Kravetz if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 56070c3547eSMike Kravetz return -EOPNOTSUPP; 56170c3547eSMike Kravetz 56270c3547eSMike Kravetz if (mode & FALLOC_FL_PUNCH_HOLE) 56370c3547eSMike Kravetz return hugetlbfs_punch_hole(inode, offset, len); 56470c3547eSMike Kravetz 56570c3547eSMike Kravetz /* 56670c3547eSMike Kravetz * Default preallocate case. 56770c3547eSMike Kravetz * For this range, start is rounded down and end is rounded up 56870c3547eSMike Kravetz * as well as being converted to page offsets. 56970c3547eSMike Kravetz */ 57070c3547eSMike Kravetz start = offset >> hpage_shift; 57170c3547eSMike Kravetz end = (offset + len + hpage_size - 1) >> hpage_shift; 57270c3547eSMike Kravetz 5735955102cSAl Viro inode_lock(inode); 57470c3547eSMike Kravetz 57570c3547eSMike Kravetz /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 57670c3547eSMike Kravetz error = inode_newsize_ok(inode, offset + len); 57770c3547eSMike Kravetz if (error) 57870c3547eSMike Kravetz goto out; 57970c3547eSMike Kravetz 58070c3547eSMike Kravetz /* 58170c3547eSMike Kravetz * Initialize a pseudo vma as this is required by the huge page 58270c3547eSMike Kravetz * allocation routines. If NUMA is configured, use page index 58370c3547eSMike Kravetz * as input to create an allocation policy. 58470c3547eSMike Kravetz */ 58570c3547eSMike Kravetz memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 58670c3547eSMike Kravetz pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 58770c3547eSMike Kravetz pseudo_vma.vm_file = file; 58870c3547eSMike Kravetz 58970c3547eSMike Kravetz for (index = start; index < end; index++) { 59070c3547eSMike Kravetz /* 59170c3547eSMike Kravetz * This is supposed to be the vaddr where the page is being 59270c3547eSMike Kravetz * faulted in, but we have no vaddr here. 59370c3547eSMike Kravetz */ 59470c3547eSMike Kravetz struct page *page; 59570c3547eSMike Kravetz unsigned long addr; 59670c3547eSMike Kravetz int avoid_reserve = 0; 59770c3547eSMike Kravetz 59870c3547eSMike Kravetz cond_resched(); 59970c3547eSMike Kravetz 60070c3547eSMike Kravetz /* 60170c3547eSMike Kravetz * fallocate(2) manpage permits EINTR; we may have been 60270c3547eSMike Kravetz * interrupted because we are using up too much memory. 60370c3547eSMike Kravetz */ 60470c3547eSMike Kravetz if (signal_pending(current)) { 60570c3547eSMike Kravetz error = -EINTR; 60670c3547eSMike Kravetz break; 60770c3547eSMike Kravetz } 60870c3547eSMike Kravetz 60970c3547eSMike Kravetz /* Set numa allocation policy based on index */ 61070c3547eSMike Kravetz hugetlb_set_vma_policy(&pseudo_vma, inode, index); 61170c3547eSMike Kravetz 61270c3547eSMike Kravetz /* addr is the offset within the file (zero based) */ 61370c3547eSMike Kravetz addr = index * hpage_size; 61470c3547eSMike Kravetz 61570c3547eSMike Kravetz /* mutex taken here, fault path and hole punch */ 61670c3547eSMike Kravetz hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, 61770c3547eSMike Kravetz index, addr); 61870c3547eSMike Kravetz mutex_lock(&hugetlb_fault_mutex_table[hash]); 61970c3547eSMike Kravetz 62070c3547eSMike Kravetz /* See if already present in mapping to avoid alloc/free */ 62170c3547eSMike Kravetz page = find_get_page(mapping, index); 62270c3547eSMike Kravetz if (page) { 62370c3547eSMike Kravetz put_page(page); 62470c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 62570c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 62670c3547eSMike Kravetz continue; 62770c3547eSMike Kravetz } 62870c3547eSMike Kravetz 62970c3547eSMike Kravetz /* Allocate page and add to page cache */ 63070c3547eSMike Kravetz page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve); 63170c3547eSMike Kravetz hugetlb_drop_vma_policy(&pseudo_vma); 63270c3547eSMike Kravetz if (IS_ERR(page)) { 63370c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 63470c3547eSMike Kravetz error = PTR_ERR(page); 63570c3547eSMike Kravetz goto out; 63670c3547eSMike Kravetz } 63770c3547eSMike Kravetz clear_huge_page(page, addr, pages_per_huge_page(h)); 63870c3547eSMike Kravetz __SetPageUptodate(page); 63970c3547eSMike Kravetz error = huge_add_to_page_cache(page, mapping, index); 64070c3547eSMike Kravetz if (unlikely(error)) { 64170c3547eSMike Kravetz put_page(page); 64270c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 64370c3547eSMike Kravetz goto out; 64470c3547eSMike Kravetz } 64570c3547eSMike Kravetz 64670c3547eSMike Kravetz mutex_unlock(&hugetlb_fault_mutex_table[hash]); 64770c3547eSMike Kravetz 64870c3547eSMike Kravetz /* 64970c3547eSMike Kravetz * page_put due to reference from alloc_huge_page() 65070c3547eSMike Kravetz * unlock_page because locked by add_to_page_cache() 65170c3547eSMike Kravetz */ 65270c3547eSMike Kravetz put_page(page); 65370c3547eSMike Kravetz unlock_page(page); 65470c3547eSMike Kravetz } 65570c3547eSMike Kravetz 65670c3547eSMike Kravetz if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 65770c3547eSMike Kravetz i_size_write(inode, offset + len); 658078cd827SDeepa Dinamani inode->i_ctime = current_time(inode); 65970c3547eSMike Kravetz out: 6605955102cSAl Viro inode_unlock(inode); 66170c3547eSMike Kravetz return error; 66270c3547eSMike Kravetz } 66370c3547eSMike Kravetz 6641da177e4SLinus Torvalds static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) 6651da177e4SLinus Torvalds { 6662b0143b5SDavid Howells struct inode *inode = d_inode(dentry); 667a5516438SAndi Kleen struct hstate *h = hstate_inode(inode); 6681da177e4SLinus Torvalds int error; 6691da177e4SLinus Torvalds unsigned int ia_valid = attr->ia_valid; 6701da177e4SLinus Torvalds 6711da177e4SLinus Torvalds BUG_ON(!inode); 6721da177e4SLinus Torvalds 67331051c85SJan Kara error = setattr_prepare(dentry, attr); 6741da177e4SLinus Torvalds if (error) 6751025774cSChristoph Hellwig return error; 6761da177e4SLinus Torvalds 6771da177e4SLinus Torvalds if (ia_valid & ATTR_SIZE) { 6781da177e4SLinus Torvalds error = -EINVAL; 6791025774cSChristoph Hellwig if (attr->ia_size & ~huge_page_mask(h)) 6801025774cSChristoph Hellwig return -EINVAL; 6811da177e4SLinus Torvalds error = hugetlb_vmtruncate(inode, attr->ia_size); 6821da177e4SLinus Torvalds if (error) 6831da177e4SLinus Torvalds return error; 6841da177e4SLinus Torvalds } 6851da177e4SLinus Torvalds 6861025774cSChristoph Hellwig setattr_copy(inode, attr); 6871025774cSChristoph Hellwig mark_inode_dirty(inode); 6881025774cSChristoph Hellwig return 0; 6891025774cSChristoph Hellwig } 6901025774cSChristoph Hellwig 6917d54fa64SAl Viro static struct inode *hugetlbfs_get_root(struct super_block *sb, 6927d54fa64SAl Viro struct hugetlbfs_config *config) 6931da177e4SLinus Torvalds { 6941da177e4SLinus Torvalds struct inode *inode; 6951da177e4SLinus Torvalds 6961da177e4SLinus Torvalds inode = new_inode(sb); 6971da177e4SLinus Torvalds if (inode) { 69885fe4025SChristoph Hellwig inode->i_ino = get_next_ino(); 6997d54fa64SAl Viro inode->i_mode = S_IFDIR | config->mode; 7007d54fa64SAl Viro inode->i_uid = config->uid; 7017d54fa64SAl Viro inode->i_gid = config->gid; 702078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 7037d54fa64SAl Viro inode->i_op = &hugetlbfs_dir_inode_operations; 7047d54fa64SAl Viro inode->i_fop = &simple_dir_operations; 7057d54fa64SAl Viro /* directory inodes start off with i_nlink == 2 (for "." entry) */ 7067d54fa64SAl Viro inc_nlink(inode); 70765ed7601SAneesh Kumar K.V lockdep_annotate_inode_mutex_key(inode); 7087d54fa64SAl Viro } 7097d54fa64SAl Viro return inode; 7107d54fa64SAl Viro } 7117d54fa64SAl Viro 712b610ded7SMichal Hocko /* 713c8c06efaSDavidlohr Bueso * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never 714b610ded7SMichal Hocko * be taken from reclaim -- unlike regular filesystems. This needs an 71588f306b6SKirill A. Shutemov * annotation because huge_pmd_share() does an allocation under hugetlb's 716c8c06efaSDavidlohr Bueso * i_mmap_rwsem. 717b610ded7SMichal Hocko */ 718c8c06efaSDavidlohr Bueso static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; 719b610ded7SMichal Hocko 7207d54fa64SAl Viro static struct inode *hugetlbfs_get_inode(struct super_block *sb, 7217d54fa64SAl Viro struct inode *dir, 72218df2252SAl Viro umode_t mode, dev_t dev) 7237d54fa64SAl Viro { 7247d54fa64SAl Viro struct inode *inode; 7259119a41eSJoonsoo Kim struct resv_map *resv_map; 7269119a41eSJoonsoo Kim 7279119a41eSJoonsoo Kim resv_map = resv_map_alloc(); 7289119a41eSJoonsoo Kim if (!resv_map) 7299119a41eSJoonsoo Kim return NULL; 7307d54fa64SAl Viro 7317d54fa64SAl Viro inode = new_inode(sb); 7327d54fa64SAl Viro if (inode) { 7337d54fa64SAl Viro inode->i_ino = get_next_ino(); 7347d54fa64SAl Viro inode_init_owner(inode, dir, mode); 735c8c06efaSDavidlohr Bueso lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 736c8c06efaSDavidlohr Bueso &hugetlbfs_i_mmap_rwsem_key); 7371da177e4SLinus Torvalds inode->i_mapping->a_ops = &hugetlbfs_aops; 738078cd827SDeepa Dinamani inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 7399119a41eSJoonsoo Kim inode->i_mapping->private_data = resv_map; 7401da177e4SLinus Torvalds switch (mode & S_IFMT) { 7411da177e4SLinus Torvalds default: 7421da177e4SLinus Torvalds init_special_inode(inode, mode, dev); 7431da177e4SLinus Torvalds break; 7441da177e4SLinus Torvalds case S_IFREG: 7451da177e4SLinus Torvalds inode->i_op = &hugetlbfs_inode_operations; 7461da177e4SLinus Torvalds inode->i_fop = &hugetlbfs_file_operations; 7471da177e4SLinus Torvalds break; 7481da177e4SLinus Torvalds case S_IFDIR: 7491da177e4SLinus Torvalds inode->i_op = &hugetlbfs_dir_inode_operations; 7501da177e4SLinus Torvalds inode->i_fop = &simple_dir_operations; 7511da177e4SLinus Torvalds 7521da177e4SLinus Torvalds /* directory inodes start off with i_nlink == 2 (for "." entry) */ 753d8c76e6fSDave Hansen inc_nlink(inode); 7541da177e4SLinus Torvalds break; 7551da177e4SLinus Torvalds case S_IFLNK: 7561da177e4SLinus Torvalds inode->i_op = &page_symlink_inode_operations; 75721fc61c7SAl Viro inode_nohighmem(inode); 7581da177e4SLinus Torvalds break; 7591da177e4SLinus Torvalds } 760e096d0c7SJosh Boyer lockdep_annotate_inode_mutex_key(inode); 7619119a41eSJoonsoo Kim } else 7629119a41eSJoonsoo Kim kref_put(&resv_map->refs, resv_map_release); 7639119a41eSJoonsoo Kim 7641da177e4SLinus Torvalds return inode; 7651da177e4SLinus Torvalds } 7661da177e4SLinus Torvalds 7671da177e4SLinus Torvalds /* 7681da177e4SLinus Torvalds * File creation. Allocate an inode, and we're done.. 7691da177e4SLinus Torvalds */ 7701da177e4SLinus Torvalds static int hugetlbfs_mknod(struct inode *dir, 7711a67aafbSAl Viro struct dentry *dentry, umode_t mode, dev_t dev) 7721da177e4SLinus Torvalds { 7731da177e4SLinus Torvalds struct inode *inode; 7741da177e4SLinus Torvalds int error = -ENOSPC; 7751da177e4SLinus Torvalds 7767d54fa64SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); 7771da177e4SLinus Torvalds if (inode) { 778078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 7791da177e4SLinus Torvalds d_instantiate(dentry, inode); 7801da177e4SLinus Torvalds dget(dentry); /* Extra count - pin the dentry in core */ 7811da177e4SLinus Torvalds error = 0; 7821da177e4SLinus Torvalds } 7831da177e4SLinus Torvalds return error; 7841da177e4SLinus Torvalds } 7851da177e4SLinus Torvalds 78618bb1db3SAl Viro static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 7871da177e4SLinus Torvalds { 7881da177e4SLinus Torvalds int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); 7891da177e4SLinus Torvalds if (!retval) 790d8c76e6fSDave Hansen inc_nlink(dir); 7911da177e4SLinus Torvalds return retval; 7921da177e4SLinus Torvalds } 7931da177e4SLinus Torvalds 794ebfc3b49SAl Viro static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) 7951da177e4SLinus Torvalds { 7961da177e4SLinus Torvalds return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); 7971da177e4SLinus Torvalds } 7981da177e4SLinus Torvalds 7991da177e4SLinus Torvalds static int hugetlbfs_symlink(struct inode *dir, 8001da177e4SLinus Torvalds struct dentry *dentry, const char *symname) 8011da177e4SLinus Torvalds { 8021da177e4SLinus Torvalds struct inode *inode; 8031da177e4SLinus Torvalds int error = -ENOSPC; 8041da177e4SLinus Torvalds 8057d54fa64SAl Viro inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); 8061da177e4SLinus Torvalds if (inode) { 8071da177e4SLinus Torvalds int l = strlen(symname)+1; 8081da177e4SLinus Torvalds error = page_symlink(inode, symname, l); 8091da177e4SLinus Torvalds if (!error) { 8101da177e4SLinus Torvalds d_instantiate(dentry, inode); 8111da177e4SLinus Torvalds dget(dentry); 8121da177e4SLinus Torvalds } else 8131da177e4SLinus Torvalds iput(inode); 8141da177e4SLinus Torvalds } 815078cd827SDeepa Dinamani dir->i_ctime = dir->i_mtime = current_time(dir); 8161da177e4SLinus Torvalds 8171da177e4SLinus Torvalds return error; 8181da177e4SLinus Torvalds } 8191da177e4SLinus Torvalds 8201da177e4SLinus Torvalds /* 8216649a386SKen Chen * mark the head page dirty 8221da177e4SLinus Torvalds */ 8231da177e4SLinus Torvalds static int hugetlbfs_set_page_dirty(struct page *page) 8241da177e4SLinus Torvalds { 825d85f3385SChristoph Lameter struct page *head = compound_head(page); 8266649a386SKen Chen 8276649a386SKen Chen SetPageDirty(head); 8281da177e4SLinus Torvalds return 0; 8291da177e4SLinus Torvalds } 8301da177e4SLinus Torvalds 831290408d4SNaoya Horiguchi static int hugetlbfs_migrate_page(struct address_space *mapping, 832b969c4abSMel Gorman struct page *newpage, struct page *page, 833a6bc32b8SMel Gorman enum migrate_mode mode) 834290408d4SNaoya Horiguchi { 835290408d4SNaoya Horiguchi int rc; 836290408d4SNaoya Horiguchi 837290408d4SNaoya Horiguchi rc = migrate_huge_page_move_mapping(mapping, newpage, page); 83878bd5209SRafael Aquini if (rc != MIGRATEPAGE_SUCCESS) 839290408d4SNaoya Horiguchi return rc; 840290408d4SNaoya Horiguchi migrate_page_copy(newpage, page); 841290408d4SNaoya Horiguchi 84278bd5209SRafael Aquini return MIGRATEPAGE_SUCCESS; 843290408d4SNaoya Horiguchi } 844290408d4SNaoya Horiguchi 845726c3342SDavid Howells static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 8461da177e4SLinus Torvalds { 847726c3342SDavid Howells struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 8482b0143b5SDavid Howells struct hstate *h = hstate_inode(d_inode(dentry)); 8491da177e4SLinus Torvalds 8501da177e4SLinus Torvalds buf->f_type = HUGETLBFS_MAGIC; 851a5516438SAndi Kleen buf->f_bsize = huge_page_size(h); 8521da177e4SLinus Torvalds if (sbinfo) { 8531da177e4SLinus Torvalds spin_lock(&sbinfo->stat_lock); 85474a8a65cSDavid Gibson /* If no limits set, just report 0 for max/free/used 85574a8a65cSDavid Gibson * blocks, like simple_statfs() */ 85690481622SDavid Gibson if (sbinfo->spool) { 85790481622SDavid Gibson long free_pages; 85890481622SDavid Gibson 85990481622SDavid Gibson spin_lock(&sbinfo->spool->lock); 86090481622SDavid Gibson buf->f_blocks = sbinfo->spool->max_hpages; 86190481622SDavid Gibson free_pages = sbinfo->spool->max_hpages 86290481622SDavid Gibson - sbinfo->spool->used_hpages; 86390481622SDavid Gibson buf->f_bavail = buf->f_bfree = free_pages; 86490481622SDavid Gibson spin_unlock(&sbinfo->spool->lock); 8651da177e4SLinus Torvalds buf->f_files = sbinfo->max_inodes; 8661da177e4SLinus Torvalds buf->f_ffree = sbinfo->free_inodes; 86774a8a65cSDavid Gibson } 8681da177e4SLinus Torvalds spin_unlock(&sbinfo->stat_lock); 8691da177e4SLinus Torvalds } 8701da177e4SLinus Torvalds buf->f_namelen = NAME_MAX; 8711da177e4SLinus Torvalds return 0; 8721da177e4SLinus Torvalds } 8731da177e4SLinus Torvalds 8741da177e4SLinus Torvalds static void hugetlbfs_put_super(struct super_block *sb) 8751da177e4SLinus Torvalds { 8761da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); 8771da177e4SLinus Torvalds 8781da177e4SLinus Torvalds if (sbi) { 8791da177e4SLinus Torvalds sb->s_fs_info = NULL; 88090481622SDavid Gibson 88190481622SDavid Gibson if (sbi->spool) 88290481622SDavid Gibson hugepage_put_subpool(sbi->spool); 88390481622SDavid Gibson 8841da177e4SLinus Torvalds kfree(sbi); 8851da177e4SLinus Torvalds } 8861da177e4SLinus Torvalds } 8871da177e4SLinus Torvalds 88896527980SChristoph Hellwig static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) 88996527980SChristoph Hellwig { 89096527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 89196527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 89296527980SChristoph Hellwig if (unlikely(!sbinfo->free_inodes)) { 89396527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 89496527980SChristoph Hellwig return 0; 89596527980SChristoph Hellwig } 89696527980SChristoph Hellwig sbinfo->free_inodes--; 89796527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 89896527980SChristoph Hellwig } 89996527980SChristoph Hellwig 90096527980SChristoph Hellwig return 1; 90196527980SChristoph Hellwig } 90296527980SChristoph Hellwig 90396527980SChristoph Hellwig static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) 90496527980SChristoph Hellwig { 90596527980SChristoph Hellwig if (sbinfo->free_inodes >= 0) { 90696527980SChristoph Hellwig spin_lock(&sbinfo->stat_lock); 90796527980SChristoph Hellwig sbinfo->free_inodes++; 90896527980SChristoph Hellwig spin_unlock(&sbinfo->stat_lock); 90996527980SChristoph Hellwig } 91096527980SChristoph Hellwig } 91196527980SChristoph Hellwig 91296527980SChristoph Hellwig 913e18b890bSChristoph Lameter static struct kmem_cache *hugetlbfs_inode_cachep; 9141da177e4SLinus Torvalds 9151da177e4SLinus Torvalds static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 9161da177e4SLinus Torvalds { 91796527980SChristoph Hellwig struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); 9181da177e4SLinus Torvalds struct hugetlbfs_inode_info *p; 9191da177e4SLinus Torvalds 92096527980SChristoph Hellwig if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 9211da177e4SLinus Torvalds return NULL; 922e94b1766SChristoph Lameter p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); 92396527980SChristoph Hellwig if (unlikely(!p)) { 92496527980SChristoph Hellwig hugetlbfs_inc_free_inodes(sbinfo); 92596527980SChristoph Hellwig return NULL; 9261da177e4SLinus Torvalds } 927*4742a35dSMike Kravetz 928*4742a35dSMike Kravetz /* 929*4742a35dSMike Kravetz * Any time after allocation, hugetlbfs_destroy_inode can be called 930*4742a35dSMike Kravetz * for the inode. mpol_free_shared_policy is unconditionally called 931*4742a35dSMike Kravetz * as part of hugetlbfs_destroy_inode. So, initialize policy here 932*4742a35dSMike Kravetz * in case of a quick call to destroy. 933*4742a35dSMike Kravetz * 934*4742a35dSMike Kravetz * Note that the policy is initialized even if we are creating a 935*4742a35dSMike Kravetz * private inode. This simplifies hugetlbfs_destroy_inode. 936*4742a35dSMike Kravetz */ 937*4742a35dSMike Kravetz mpol_shared_policy_init(&p->policy, NULL); 938*4742a35dSMike Kravetz 93996527980SChristoph Hellwig return &p->vfs_inode; 9401da177e4SLinus Torvalds } 9411da177e4SLinus Torvalds 942fa0d7e3dSNick Piggin static void hugetlbfs_i_callback(struct rcu_head *head) 943fa0d7e3dSNick Piggin { 944fa0d7e3dSNick Piggin struct inode *inode = container_of(head, struct inode, i_rcu); 945fa0d7e3dSNick Piggin kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 946fa0d7e3dSNick Piggin } 947fa0d7e3dSNick Piggin 9481da177e4SLinus Torvalds static void hugetlbfs_destroy_inode(struct inode *inode) 9491da177e4SLinus Torvalds { 95096527980SChristoph Hellwig hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 9511da177e4SLinus Torvalds mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 952fa0d7e3dSNick Piggin call_rcu(&inode->i_rcu, hugetlbfs_i_callback); 9531da177e4SLinus Torvalds } 9541da177e4SLinus Torvalds 955f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops = { 956800d15a5SNick Piggin .write_begin = hugetlbfs_write_begin, 957800d15a5SNick Piggin .write_end = hugetlbfs_write_end, 9581da177e4SLinus Torvalds .set_page_dirty = hugetlbfs_set_page_dirty, 959290408d4SNaoya Horiguchi .migratepage = hugetlbfs_migrate_page, 9601da177e4SLinus Torvalds }; 9611da177e4SLinus Torvalds 96296527980SChristoph Hellwig 96351cc5068SAlexey Dobriyan static void init_once(void *foo) 96496527980SChristoph Hellwig { 96596527980SChristoph Hellwig struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 96696527980SChristoph Hellwig 96796527980SChristoph Hellwig inode_init_once(&ei->vfs_inode); 96896527980SChristoph Hellwig } 96996527980SChristoph Hellwig 9704b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations = { 97134d0640eSAl Viro .read_iter = hugetlbfs_read_iter, 9721da177e4SLinus Torvalds .mmap = hugetlbfs_file_mmap, 9731b061d92SChristoph Hellwig .fsync = noop_fsync, 9741da177e4SLinus Torvalds .get_unmapped_area = hugetlb_get_unmapped_area, 9756038f373SArnd Bergmann .llseek = default_llseek, 97670c3547eSMike Kravetz .fallocate = hugetlbfs_fallocate, 9771da177e4SLinus Torvalds }; 9781da177e4SLinus Torvalds 97992e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations = { 9801da177e4SLinus Torvalds .create = hugetlbfs_create, 9811da177e4SLinus Torvalds .lookup = simple_lookup, 9821da177e4SLinus Torvalds .link = simple_link, 9831da177e4SLinus Torvalds .unlink = simple_unlink, 9841da177e4SLinus Torvalds .symlink = hugetlbfs_symlink, 9851da177e4SLinus Torvalds .mkdir = hugetlbfs_mkdir, 9861da177e4SLinus Torvalds .rmdir = simple_rmdir, 9871da177e4SLinus Torvalds .mknod = hugetlbfs_mknod, 9881da177e4SLinus Torvalds .rename = simple_rename, 9891da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 9901da177e4SLinus Torvalds }; 9911da177e4SLinus Torvalds 99292e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations = { 9931da177e4SLinus Torvalds .setattr = hugetlbfs_setattr, 9941da177e4SLinus Torvalds }; 9951da177e4SLinus Torvalds 996ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops = { 9971da177e4SLinus Torvalds .alloc_inode = hugetlbfs_alloc_inode, 9981da177e4SLinus Torvalds .destroy_inode = hugetlbfs_destroy_inode, 9992bbbda30SAl Viro .evict_inode = hugetlbfs_evict_inode, 10001da177e4SLinus Torvalds .statfs = hugetlbfs_statfs, 10011da177e4SLinus Torvalds .put_super = hugetlbfs_put_super, 100210f19a86SMiklos Szeredi .show_options = generic_show_options, 10031da177e4SLinus Torvalds }; 10041da177e4SLinus Torvalds 10057ca02d0aSMike Kravetz enum { NO_SIZE, SIZE_STD, SIZE_PERCENT }; 10067ca02d0aSMike Kravetz 10077ca02d0aSMike Kravetz /* 10087ca02d0aSMike Kravetz * Convert size option passed from command line to number of huge pages 10097ca02d0aSMike Kravetz * in the pool specified by hstate. Size option could be in bytes 10107ca02d0aSMike Kravetz * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). 10117ca02d0aSMike Kravetz */ 10127ca02d0aSMike Kravetz static long long 10137ca02d0aSMike Kravetz hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, 10147ca02d0aSMike Kravetz int val_type) 10157ca02d0aSMike Kravetz { 10167ca02d0aSMike Kravetz if (val_type == NO_SIZE) 10177ca02d0aSMike Kravetz return -1; 10187ca02d0aSMike Kravetz 10197ca02d0aSMike Kravetz if (val_type == SIZE_PERCENT) { 10207ca02d0aSMike Kravetz size_opt <<= huge_page_shift(h); 10217ca02d0aSMike Kravetz size_opt *= h->max_huge_pages; 10227ca02d0aSMike Kravetz do_div(size_opt, 100); 10237ca02d0aSMike Kravetz } 10247ca02d0aSMike Kravetz 10257ca02d0aSMike Kravetz size_opt >>= huge_page_shift(h); 10267ca02d0aSMike Kravetz return size_opt; 10277ca02d0aSMike Kravetz } 10287ca02d0aSMike Kravetz 10291da177e4SLinus Torvalds static int 10301da177e4SLinus Torvalds hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) 10311da177e4SLinus Torvalds { 1032e73a75faSRandy Dunlap char *p, *rest; 1033e73a75faSRandy Dunlap substring_t args[MAX_OPT_ARGS]; 1034e73a75faSRandy Dunlap int option; 10357ca02d0aSMike Kravetz unsigned long long max_size_opt = 0, min_size_opt = 0; 10367ca02d0aSMike Kravetz int max_val_type = NO_SIZE, min_val_type = NO_SIZE; 10371da177e4SLinus Torvalds 10381da177e4SLinus Torvalds if (!options) 10391da177e4SLinus Torvalds return 0; 10401da177e4SLinus Torvalds 1041e73a75faSRandy Dunlap while ((p = strsep(&options, ",")) != NULL) { 1042e73a75faSRandy Dunlap int token; 1043b4c07bceSLee Schermerhorn if (!*p) 1044b4c07bceSLee Schermerhorn continue; 10451da177e4SLinus Torvalds 1046e73a75faSRandy Dunlap token = match_token(p, tokens, args); 1047e73a75faSRandy Dunlap switch (token) { 1048e73a75faSRandy Dunlap case Opt_uid: 1049e73a75faSRandy Dunlap if (match_int(&args[0], &option)) 1050e73a75faSRandy Dunlap goto bad_val; 1051a0eb3a05SEric W. Biederman pconfig->uid = make_kuid(current_user_ns(), option); 1052a0eb3a05SEric W. Biederman if (!uid_valid(pconfig->uid)) 1053a0eb3a05SEric W. Biederman goto bad_val; 1054e73a75faSRandy Dunlap break; 1055e73a75faSRandy Dunlap 1056e73a75faSRandy Dunlap case Opt_gid: 1057e73a75faSRandy Dunlap if (match_int(&args[0], &option)) 1058e73a75faSRandy Dunlap goto bad_val; 1059a0eb3a05SEric W. Biederman pconfig->gid = make_kgid(current_user_ns(), option); 1060a0eb3a05SEric W. Biederman if (!gid_valid(pconfig->gid)) 1061a0eb3a05SEric W. Biederman goto bad_val; 1062e73a75faSRandy Dunlap break; 1063e73a75faSRandy Dunlap 1064e73a75faSRandy Dunlap case Opt_mode: 1065e73a75faSRandy Dunlap if (match_octal(&args[0], &option)) 1066e73a75faSRandy Dunlap goto bad_val; 106775897d60SKen Chen pconfig->mode = option & 01777U; 1068e73a75faSRandy Dunlap break; 1069e73a75faSRandy Dunlap 1070e73a75faSRandy Dunlap case Opt_size: { 1071e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 1072e73a75faSRandy Dunlap if (!isdigit(*args[0].from)) 1073e73a75faSRandy Dunlap goto bad_val; 10747ca02d0aSMike Kravetz max_size_opt = memparse(args[0].from, &rest); 10757ca02d0aSMike Kravetz max_val_type = SIZE_STD; 1076a137e1ccSAndi Kleen if (*rest == '%') 10777ca02d0aSMike Kravetz max_val_type = SIZE_PERCENT; 1078e73a75faSRandy Dunlap break; 1079e73a75faSRandy Dunlap } 10801da177e4SLinus Torvalds 1081e73a75faSRandy Dunlap case Opt_nr_inodes: 1082e73a75faSRandy Dunlap /* memparse() will accept a K/M/G without a digit */ 1083e73a75faSRandy Dunlap if (!isdigit(*args[0].from)) 1084e73a75faSRandy Dunlap goto bad_val; 1085e73a75faSRandy Dunlap pconfig->nr_inodes = memparse(args[0].from, &rest); 1086e73a75faSRandy Dunlap break; 1087e73a75faSRandy Dunlap 1088a137e1ccSAndi Kleen case Opt_pagesize: { 1089a137e1ccSAndi Kleen unsigned long ps; 1090a137e1ccSAndi Kleen ps = memparse(args[0].from, &rest); 1091a137e1ccSAndi Kleen pconfig->hstate = size_to_hstate(ps); 1092a137e1ccSAndi Kleen if (!pconfig->hstate) { 10939b857d26SAndrew Morton pr_err("Unsupported page size %lu MB\n", 1094a137e1ccSAndi Kleen ps >> 20); 1095a137e1ccSAndi Kleen return -EINVAL; 1096a137e1ccSAndi Kleen } 1097a137e1ccSAndi Kleen break; 1098a137e1ccSAndi Kleen } 1099a137e1ccSAndi Kleen 11007ca02d0aSMike Kravetz case Opt_min_size: { 11017ca02d0aSMike Kravetz /* memparse() will accept a K/M/G without a digit */ 11027ca02d0aSMike Kravetz if (!isdigit(*args[0].from)) 11037ca02d0aSMike Kravetz goto bad_val; 11047ca02d0aSMike Kravetz min_size_opt = memparse(args[0].from, &rest); 11057ca02d0aSMike Kravetz min_val_type = SIZE_STD; 11067ca02d0aSMike Kravetz if (*rest == '%') 11077ca02d0aSMike Kravetz min_val_type = SIZE_PERCENT; 11087ca02d0aSMike Kravetz break; 11097ca02d0aSMike Kravetz } 11107ca02d0aSMike Kravetz 1111e73a75faSRandy Dunlap default: 11129b857d26SAndrew Morton pr_err("Bad mount option: \"%s\"\n", p); 1113b4c07bceSLee Schermerhorn return -EINVAL; 1114e73a75faSRandy Dunlap break; 1115e73a75faSRandy Dunlap } 11161da177e4SLinus Torvalds } 1117a137e1ccSAndi Kleen 11187ca02d0aSMike Kravetz /* 11197ca02d0aSMike Kravetz * Use huge page pool size (in hstate) to convert the size 11207ca02d0aSMike Kravetz * options to number of huge pages. If NO_SIZE, -1 is returned. 11217ca02d0aSMike Kravetz */ 11227ca02d0aSMike Kravetz pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, 11237ca02d0aSMike Kravetz max_size_opt, max_val_type); 11247ca02d0aSMike Kravetz pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, 11257ca02d0aSMike Kravetz min_size_opt, min_val_type); 11267ca02d0aSMike Kravetz 11277ca02d0aSMike Kravetz /* 11287ca02d0aSMike Kravetz * If max_size was specified, then min_size must be smaller 11297ca02d0aSMike Kravetz */ 11307ca02d0aSMike Kravetz if (max_val_type > NO_SIZE && 11317ca02d0aSMike Kravetz pconfig->min_hpages > pconfig->max_hpages) { 11327ca02d0aSMike Kravetz pr_err("minimum size can not be greater than maximum size\n"); 11337ca02d0aSMike Kravetz return -EINVAL; 1134a137e1ccSAndi Kleen } 1135a137e1ccSAndi Kleen 11361da177e4SLinus Torvalds return 0; 1137e73a75faSRandy Dunlap 1138e73a75faSRandy Dunlap bad_val: 11399b857d26SAndrew Morton pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p); 1140c12ddba0SAkinobu Mita return -EINVAL; 11411da177e4SLinus Torvalds } 11421da177e4SLinus Torvalds 11431da177e4SLinus Torvalds static int 11441da177e4SLinus Torvalds hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) 11451da177e4SLinus Torvalds { 11461da177e4SLinus Torvalds int ret; 11471da177e4SLinus Torvalds struct hugetlbfs_config config; 11481da177e4SLinus Torvalds struct hugetlbfs_sb_info *sbinfo; 11491da177e4SLinus Torvalds 115010f19a86SMiklos Szeredi save_mount_options(sb, data); 115110f19a86SMiklos Szeredi 11527ca02d0aSMike Kravetz config.max_hpages = -1; /* No limit on size by default */ 11531da177e4SLinus Torvalds config.nr_inodes = -1; /* No limit on number of inodes by default */ 115477c70de1SDavid Howells config.uid = current_fsuid(); 115577c70de1SDavid Howells config.gid = current_fsgid(); 11561da177e4SLinus Torvalds config.mode = 0755; 1157a137e1ccSAndi Kleen config.hstate = &default_hstate; 11587ca02d0aSMike Kravetz config.min_hpages = -1; /* No default minimum size */ 11591da177e4SLinus Torvalds ret = hugetlbfs_parse_options(data, &config); 11601da177e4SLinus Torvalds if (ret) 11611da177e4SLinus Torvalds return ret; 11621da177e4SLinus Torvalds 11631da177e4SLinus Torvalds sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); 11641da177e4SLinus Torvalds if (!sbinfo) 11651da177e4SLinus Torvalds return -ENOMEM; 11661da177e4SLinus Torvalds sb->s_fs_info = sbinfo; 1167a137e1ccSAndi Kleen sbinfo->hstate = config.hstate; 11681da177e4SLinus Torvalds spin_lock_init(&sbinfo->stat_lock); 11691da177e4SLinus Torvalds sbinfo->max_inodes = config.nr_inodes; 11701da177e4SLinus Torvalds sbinfo->free_inodes = config.nr_inodes; 117190481622SDavid Gibson sbinfo->spool = NULL; 11727ca02d0aSMike Kravetz /* 11737ca02d0aSMike Kravetz * Allocate and initialize subpool if maximum or minimum size is 11747ca02d0aSMike Kravetz * specified. Any needed reservations (for minimim size) are taken 11757ca02d0aSMike Kravetz * taken when the subpool is created. 11767ca02d0aSMike Kravetz */ 11777ca02d0aSMike Kravetz if (config.max_hpages != -1 || config.min_hpages != -1) { 11787ca02d0aSMike Kravetz sbinfo->spool = hugepage_new_subpool(config.hstate, 11797ca02d0aSMike Kravetz config.max_hpages, 11807ca02d0aSMike Kravetz config.min_hpages); 118190481622SDavid Gibson if (!sbinfo->spool) 118290481622SDavid Gibson goto out_free; 118390481622SDavid Gibson } 11841da177e4SLinus Torvalds sb->s_maxbytes = MAX_LFS_FILESIZE; 1185a137e1ccSAndi Kleen sb->s_blocksize = huge_page_size(config.hstate); 1186a137e1ccSAndi Kleen sb->s_blocksize_bits = huge_page_shift(config.hstate); 11871da177e4SLinus Torvalds sb->s_magic = HUGETLBFS_MAGIC; 11881da177e4SLinus Torvalds sb->s_op = &hugetlbfs_ops; 11891da177e4SLinus Torvalds sb->s_time_gran = 1; 119048fde701SAl Viro sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config)); 119148fde701SAl Viro if (!sb->s_root) 11921da177e4SLinus Torvalds goto out_free; 11931da177e4SLinus Torvalds return 0; 11941da177e4SLinus Torvalds out_free: 119590481622SDavid Gibson kfree(sbinfo->spool); 11961da177e4SLinus Torvalds kfree(sbinfo); 11971da177e4SLinus Torvalds return -ENOMEM; 11981da177e4SLinus Torvalds } 11991da177e4SLinus Torvalds 12003c26ff6eSAl Viro static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, 12013c26ff6eSAl Viro int flags, const char *dev_name, void *data) 12021da177e4SLinus Torvalds { 12033c26ff6eSAl Viro return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super); 12041da177e4SLinus Torvalds } 12051da177e4SLinus Torvalds 12061da177e4SLinus Torvalds static struct file_system_type hugetlbfs_fs_type = { 12071da177e4SLinus Torvalds .name = "hugetlbfs", 12083c26ff6eSAl Viro .mount = hugetlbfs_mount, 12091da177e4SLinus Torvalds .kill_sb = kill_litter_super, 12101da177e4SLinus Torvalds }; 12111da177e4SLinus Torvalds 121242d7395fSAndi Kleen static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; 12131da177e4SLinus Torvalds 1214ef1ff6b8SFrom: Mel Gorman static int can_do_hugetlb_shm(void) 12151da177e4SLinus Torvalds { 1216a0eb3a05SEric W. Biederman kgid_t shm_group; 1217a0eb3a05SEric W. Biederman shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); 1218a0eb3a05SEric W. Biederman return capable(CAP_IPC_LOCK) || in_group_p(shm_group); 12191da177e4SLinus Torvalds } 12201da177e4SLinus Torvalds 122142d7395fSAndi Kleen static int get_hstate_idx(int page_size_log) 122242d7395fSAndi Kleen { 1223af73e4d9SNaoya Horiguchi struct hstate *h = hstate_sizelog(page_size_log); 122442d7395fSAndi Kleen 122542d7395fSAndi Kleen if (!h) 122642d7395fSAndi Kleen return -1; 122742d7395fSAndi Kleen return h - hstates; 122842d7395fSAndi Kleen } 122942d7395fSAndi Kleen 1230be1d2cf5SFabian Frederick static const struct dentry_operations anon_ops = { 1231118b2302SAl Viro .d_dname = simple_dname 12320df4d6e5SAl Viro }; 12330df4d6e5SAl Viro 1234af73e4d9SNaoya Horiguchi /* 1235af73e4d9SNaoya Horiguchi * Note that size should be aligned to proper hugepage size in caller side, 1236af73e4d9SNaoya Horiguchi * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. 1237af73e4d9SNaoya Horiguchi */ 1238af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size, 1239af73e4d9SNaoya Horiguchi vm_flags_t acctflag, struct user_struct **user, 124042d7395fSAndi Kleen int creat_flags, int page_size_log) 12411da177e4SLinus Torvalds { 124239b65252SAnatol Pomozov struct file *file = ERR_PTR(-ENOMEM); 12431da177e4SLinus Torvalds struct inode *inode; 12442c48b9c4SAl Viro struct path path; 12450df4d6e5SAl Viro struct super_block *sb; 12461da177e4SLinus Torvalds struct qstr quick_string; 124742d7395fSAndi Kleen int hstate_idx; 124842d7395fSAndi Kleen 124942d7395fSAndi Kleen hstate_idx = get_hstate_idx(page_size_log); 125042d7395fSAndi Kleen if (hstate_idx < 0) 125142d7395fSAndi Kleen return ERR_PTR(-ENODEV); 12521da177e4SLinus Torvalds 1253353d5c30SHugh Dickins *user = NULL; 125442d7395fSAndi Kleen if (!hugetlbfs_vfsmount[hstate_idx]) 12555bc98594SAkinobu Mita return ERR_PTR(-ENOENT); 12565bc98594SAkinobu Mita 1257ef1ff6b8SFrom: Mel Gorman if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 1258353d5c30SHugh Dickins *user = current_user(); 1259353d5c30SHugh Dickins if (user_shm_lock(size, *user)) { 126021a3c273SDavid Rientjes task_lock(current); 12619b857d26SAndrew Morton pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", 126221a3c273SDavid Rientjes current->comm, current->pid); 126321a3c273SDavid Rientjes task_unlock(current); 1264353d5c30SHugh Dickins } else { 1265353d5c30SHugh Dickins *user = NULL; 12661da177e4SLinus Torvalds return ERR_PTR(-EPERM); 12672584e517SRavikiran G Thirumalai } 1268353d5c30SHugh Dickins } 12691da177e4SLinus Torvalds 12700df4d6e5SAl Viro sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb; 12719d66586fSEric W. Biederman quick_string.name = name; 12721da177e4SLinus Torvalds quick_string.len = strlen(quick_string.name); 12731da177e4SLinus Torvalds quick_string.hash = 0; 12740df4d6e5SAl Viro path.dentry = d_alloc_pseudo(sb, &quick_string); 12752c48b9c4SAl Viro if (!path.dentry) 12761da177e4SLinus Torvalds goto out_shm_unlock; 12771da177e4SLinus Torvalds 12780df4d6e5SAl Viro d_set_d_op(path.dentry, &anon_ops); 127942d7395fSAndi Kleen path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]); 128039b65252SAnatol Pomozov file = ERR_PTR(-ENOSPC); 12810df4d6e5SAl Viro inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); 12821da177e4SLinus Torvalds if (!inode) 1283ce8d2cdfSDave Hansen goto out_dentry; 1284e1832f29SStephen Smalley if (creat_flags == HUGETLB_SHMFS_INODE) 1285e1832f29SStephen Smalley inode->i_flags |= S_PRIVATE; 12861da177e4SLinus Torvalds 128739b65252SAnatol Pomozov file = ERR_PTR(-ENOMEM); 1288af73e4d9SNaoya Horiguchi if (hugetlb_reserve_pages(inode, 0, 1289af73e4d9SNaoya Horiguchi size >> huge_page_shift(hstate_inode(inode)), NULL, 1290af73e4d9SNaoya Horiguchi acctflag)) 1291b45b5bd6SDavid Gibson goto out_inode; 1292b45b5bd6SDavid Gibson 12932c48b9c4SAl Viro d_instantiate(path.dentry, inode); 12941da177e4SLinus Torvalds inode->i_size = size; 12956d6b77f1SMiklos Szeredi clear_nlink(inode); 1296ce8d2cdfSDave Hansen 12972c48b9c4SAl Viro file = alloc_file(&path, FMODE_WRITE | FMODE_READ, 1298ce8d2cdfSDave Hansen &hugetlbfs_file_operations); 129939b65252SAnatol Pomozov if (IS_ERR(file)) 1300b4d232e6SAl Viro goto out_dentry; /* inode is already attached */ 1301ce8d2cdfSDave Hansen 13021da177e4SLinus Torvalds return file; 13031da177e4SLinus Torvalds 1304b45b5bd6SDavid Gibson out_inode: 1305b45b5bd6SDavid Gibson iput(inode); 13061da177e4SLinus Torvalds out_dentry: 13072c48b9c4SAl Viro path_put(&path); 13081da177e4SLinus Torvalds out_shm_unlock: 1309353d5c30SHugh Dickins if (*user) { 1310353d5c30SHugh Dickins user_shm_unlock(size, *user); 1311353d5c30SHugh Dickins *user = NULL; 1312353d5c30SHugh Dickins } 131339b65252SAnatol Pomozov return file; 13141da177e4SLinus Torvalds } 13151da177e4SLinus Torvalds 13161da177e4SLinus Torvalds static int __init init_hugetlbfs_fs(void) 13171da177e4SLinus Torvalds { 131842d7395fSAndi Kleen struct hstate *h; 13191da177e4SLinus Torvalds int error; 132042d7395fSAndi Kleen int i; 13211da177e4SLinus Torvalds 1322457c1b27SNishanth Aravamudan if (!hugepages_supported()) { 13239b857d26SAndrew Morton pr_info("disabling because there are no supported hugepage sizes\n"); 1324457c1b27SNishanth Aravamudan return -ENOTSUPP; 1325457c1b27SNishanth Aravamudan } 1326457c1b27SNishanth Aravamudan 1327d1d5e05fSHillf Danton error = -ENOMEM; 13281da177e4SLinus Torvalds hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 13291da177e4SLinus Torvalds sizeof(struct hugetlbfs_inode_info), 13305d097056SVladimir Davydov 0, SLAB_ACCOUNT, init_once); 13311da177e4SLinus Torvalds if (hugetlbfs_inode_cachep == NULL) 1332e0bf68ddSPeter Zijlstra goto out2; 13331da177e4SLinus Torvalds 13341da177e4SLinus Torvalds error = register_filesystem(&hugetlbfs_fs_type); 13351da177e4SLinus Torvalds if (error) 13361da177e4SLinus Torvalds goto out; 13371da177e4SLinus Torvalds 133842d7395fSAndi Kleen i = 0; 133942d7395fSAndi Kleen for_each_hstate(h) { 134042d7395fSAndi Kleen char buf[50]; 134142d7395fSAndi Kleen unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10); 13421da177e4SLinus Torvalds 134342d7395fSAndi Kleen snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb); 134442d7395fSAndi Kleen hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type, 134542d7395fSAndi Kleen buf); 134642d7395fSAndi Kleen 134742d7395fSAndi Kleen if (IS_ERR(hugetlbfs_vfsmount[i])) { 13489b857d26SAndrew Morton pr_err("Cannot mount internal hugetlbfs for " 134942d7395fSAndi Kleen "page size %uK", ps_kb); 135042d7395fSAndi Kleen error = PTR_ERR(hugetlbfs_vfsmount[i]); 135142d7395fSAndi Kleen hugetlbfs_vfsmount[i] = NULL; 13521da177e4SLinus Torvalds } 135342d7395fSAndi Kleen i++; 135442d7395fSAndi Kleen } 135542d7395fSAndi Kleen /* Non default hstates are optional */ 135642d7395fSAndi Kleen if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx])) 135742d7395fSAndi Kleen return 0; 13581da177e4SLinus Torvalds 13591da177e4SLinus Torvalds out: 13601da177e4SLinus Torvalds kmem_cache_destroy(hugetlbfs_inode_cachep); 1361e0bf68ddSPeter Zijlstra out2: 13621da177e4SLinus Torvalds return error; 13631da177e4SLinus Torvalds } 13643e89e1c5SPaul Gortmaker fs_initcall(init_hugetlbfs_fs) 1365