xref: /openbmc/linux/fs/hugetlbfs/inode.c (revision 7e1813d4)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * hugetlbpage-backed filesystem.  Based on ramfs.
31da177e4SLinus Torvalds  *
46d49e352SNadia Yvette Chambers  * Nadia Yvette Chambers, 2002
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Copyright (C) 2002 Linus Torvalds.
73e89e1c5SPaul Gortmaker  * License: GPL
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
109b857d26SAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
119b857d26SAndrew Morton 
121da177e4SLinus Torvalds #include <linux/thread_info.h>
131da177e4SLinus Torvalds #include <asm/current.h>
1470c3547eSMike Kravetz #include <linux/falloc.h>
151da177e4SLinus Torvalds #include <linux/fs.h>
161da177e4SLinus Torvalds #include <linux/mount.h>
171da177e4SLinus Torvalds #include <linux/file.h>
18e73a75faSRandy Dunlap #include <linux/kernel.h>
191da177e4SLinus Torvalds #include <linux/writeback.h>
201da177e4SLinus Torvalds #include <linux/pagemap.h>
211da177e4SLinus Torvalds #include <linux/highmem.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/string.h>
2416f7e0feSRandy Dunlap #include <linux/capability.h>
25e73a75faSRandy Dunlap #include <linux/ctype.h>
261da177e4SLinus Torvalds #include <linux/backing-dev.h>
271da177e4SLinus Torvalds #include <linux/hugetlb.h>
281da177e4SLinus Torvalds #include <linux/pagevec.h>
2932021982SDavid Howells #include <linux/fs_parser.h>
30036e0856SBenjamin Herrenschmidt #include <linux/mman.h>
311da177e4SLinus Torvalds #include <linux/slab.h>
321da177e4SLinus Torvalds #include <linux/dnotify.h>
331da177e4SLinus Torvalds #include <linux/statfs.h>
341da177e4SLinus Torvalds #include <linux/security.h>
351fd7317dSNick Black #include <linux/magic.h>
36290408d4SNaoya Horiguchi #include <linux/migrate.h>
3734d0640eSAl Viro #include <linux/uio.h>
381da177e4SLinus Torvalds 
397c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
4088590253SShijie Hu #include <linux/sched/mm.h>
411da177e4SLinus Torvalds 
42f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops;
434b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations;
4492e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations;
4592e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations;
461da177e4SLinus Torvalds 
4732021982SDavid Howells enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
4832021982SDavid Howells 
4932021982SDavid Howells struct hugetlbfs_fs_context {
504a25220dSDavid Howells 	struct hstate		*hstate;
5132021982SDavid Howells 	unsigned long long	max_size_opt;
5232021982SDavid Howells 	unsigned long long	min_size_opt;
534a25220dSDavid Howells 	long			max_hpages;
544a25220dSDavid Howells 	long			nr_inodes;
554a25220dSDavid Howells 	long			min_hpages;
5632021982SDavid Howells 	enum hugetlbfs_size_type max_val_type;
5732021982SDavid Howells 	enum hugetlbfs_size_type min_val_type;
58a0eb3a05SEric W. Biederman 	kuid_t			uid;
59a0eb3a05SEric W. Biederman 	kgid_t			gid;
60a1d776eeSDavid Gibson 	umode_t			mode;
61a1d776eeSDavid Gibson };
62a1d776eeSDavid Gibson 
631da177e4SLinus Torvalds int sysctl_hugetlb_shm_group;
641da177e4SLinus Torvalds 
6532021982SDavid Howells enum hugetlb_param {
6632021982SDavid Howells 	Opt_gid,
6732021982SDavid Howells 	Opt_min_size,
6832021982SDavid Howells 	Opt_mode,
6932021982SDavid Howells 	Opt_nr_inodes,
7032021982SDavid Howells 	Opt_pagesize,
7132021982SDavid Howells 	Opt_size,
7232021982SDavid Howells 	Opt_uid,
73e73a75faSRandy Dunlap };
74e73a75faSRandy Dunlap 
75d7167b14SAl Viro static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
7632021982SDavid Howells 	fsparam_u32   ("gid",		Opt_gid),
7732021982SDavid Howells 	fsparam_string("min_size",	Opt_min_size),
78e0f7e2b2SMike Kravetz 	fsparam_u32oct("mode",		Opt_mode),
7932021982SDavid Howells 	fsparam_string("nr_inodes",	Opt_nr_inodes),
8032021982SDavid Howells 	fsparam_string("pagesize",	Opt_pagesize),
8132021982SDavid Howells 	fsparam_string("size",		Opt_size),
8232021982SDavid Howells 	fsparam_u32   ("uid",		Opt_uid),
8332021982SDavid Howells 	{}
8432021982SDavid Howells };
8532021982SDavid Howells 
8670c3547eSMike Kravetz #ifdef CONFIG_NUMA
8770c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
8870c3547eSMike Kravetz 					struct inode *inode, pgoff_t index)
8970c3547eSMike Kravetz {
9070c3547eSMike Kravetz 	vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
9170c3547eSMike Kravetz 							index);
9270c3547eSMike Kravetz }
9370c3547eSMike Kravetz 
9470c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
9570c3547eSMike Kravetz {
9670c3547eSMike Kravetz 	mpol_cond_put(vma->vm_policy);
9770c3547eSMike Kravetz }
9870c3547eSMike Kravetz #else
9970c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
10070c3547eSMike Kravetz 					struct inode *inode, pgoff_t index)
10170c3547eSMike Kravetz {
10270c3547eSMike Kravetz }
10370c3547eSMike Kravetz 
10470c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
10570c3547eSMike Kravetz {
10670c3547eSMike Kravetz }
10770c3547eSMike Kravetz #endif
10870c3547eSMike Kravetz 
10963489f8eSMike Kravetz /*
11063489f8eSMike Kravetz  * Mask used when checking the page offset value passed in via system
11163489f8eSMike Kravetz  * calls.  This value will be converted to a loff_t which is signed.
11263489f8eSMike Kravetz  * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
11363489f8eSMike Kravetz  * value.  The extra bit (- 1 in the shift value) is to take the sign
11463489f8eSMike Kravetz  * bit into account.
11563489f8eSMike Kravetz  */
11663489f8eSMike Kravetz #define PGOFF_LOFFT_MAX \
11763489f8eSMike Kravetz 	(((1UL << (PAGE_SHIFT + 1)) - 1) <<  (BITS_PER_LONG - (PAGE_SHIFT + 1)))
11863489f8eSMike Kravetz 
1191da177e4SLinus Torvalds static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
1201da177e4SLinus Torvalds {
121496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
12222247efdSPeter Xu 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
1231da177e4SLinus Torvalds 	loff_t len, vma_len;
1241da177e4SLinus Torvalds 	int ret;
125a5516438SAndi Kleen 	struct hstate *h = hstate_file(file);
1261da177e4SLinus Torvalds 
12768589bc3SHugh Dickins 	/*
128dec4ad86SDavid Gibson 	 * vma address alignment (but not the pgoff alignment) has
129dec4ad86SDavid Gibson 	 * already been checked by prepare_hugepage_range.  If you add
130dec4ad86SDavid Gibson 	 * any error returns here, do so after setting VM_HUGETLB, so
131dec4ad86SDavid Gibson 	 * is_vm_hugetlb_page tests below unmap_region go the right
13245e55300SPeter Collingbourne 	 * way when do_mmap unwinds (may be important on powerpc
133dec4ad86SDavid Gibson 	 * and ia64).
13468589bc3SHugh Dickins 	 */
135a2fce914SNaoya Horiguchi 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
13668589bc3SHugh Dickins 	vma->vm_ops = &hugetlb_vm_ops;
1371da177e4SLinus Torvalds 
13822247efdSPeter Xu 	ret = seal_check_future_write(info->seals, vma);
13922247efdSPeter Xu 	if (ret)
14022247efdSPeter Xu 		return ret;
14122247efdSPeter Xu 
142045c7a3fSMike Kravetz 	/*
14363489f8eSMike Kravetz 	 * page based offset in vm_pgoff could be sufficiently large to
1445df63c2aSMike Kravetz 	 * overflow a loff_t when converted to byte offset.  This can
1455df63c2aSMike Kravetz 	 * only happen on architectures where sizeof(loff_t) ==
1465df63c2aSMike Kravetz 	 * sizeof(unsigned long).  So, only check in those instances.
147045c7a3fSMike Kravetz 	 */
1485df63c2aSMike Kravetz 	if (sizeof(unsigned long) == sizeof(loff_t)) {
14963489f8eSMike Kravetz 		if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
150045c7a3fSMike Kravetz 			return -EINVAL;
1515df63c2aSMike Kravetz 	}
152045c7a3fSMike Kravetz 
15363489f8eSMike Kravetz 	/* must be huge page aligned */
1542b37c35eSBecky Bruce 	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
155dec4ad86SDavid Gibson 		return -EINVAL;
156dec4ad86SDavid Gibson 
1571da177e4SLinus Torvalds 	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
158045c7a3fSMike Kravetz 	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
159045c7a3fSMike Kravetz 	/* check for overflow */
160045c7a3fSMike Kravetz 	if (len < vma_len)
161045c7a3fSMike Kravetz 		return -EINVAL;
1621da177e4SLinus Torvalds 
1635955102cSAl Viro 	inode_lock(inode);
1641da177e4SLinus Torvalds 	file_accessed(file);
1651da177e4SLinus Torvalds 
1661da177e4SLinus Torvalds 	ret = -ENOMEM;
16733b8f84aSMike Kravetz 	if (!hugetlb_reserve_pages(inode,
168a5516438SAndi Kleen 				vma->vm_pgoff >> huge_page_order(h),
1695a6fe125SMel Gorman 				len >> huge_page_shift(h), vma,
1705a6fe125SMel Gorman 				vma->vm_flags))
171b45b5bd6SDavid Gibson 		goto out;
172b45b5bd6SDavid Gibson 
1734c887265SAdam Litke 	ret = 0;
174b6174df5SZhang, Yanmin 	if (vma->vm_flags & VM_WRITE && inode->i_size < len)
175045c7a3fSMike Kravetz 		i_size_write(inode, len);
1761da177e4SLinus Torvalds out:
1775955102cSAl Viro 	inode_unlock(inode);
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds 	return ret;
1801da177e4SLinus Torvalds }
1811da177e4SLinus Torvalds 
1821da177e4SLinus Torvalds /*
1833e4e28c5SMichel Lespinasse  * Called under mmap_write_lock(mm).
1841da177e4SLinus Torvalds  */
1851da177e4SLinus Torvalds 
1861da177e4SLinus Torvalds static unsigned long
18788590253SShijie Hu hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
18888590253SShijie Hu 		unsigned long len, unsigned long pgoff, unsigned long flags)
18988590253SShijie Hu {
19088590253SShijie Hu 	struct hstate *h = hstate_file(file);
19188590253SShijie Hu 	struct vm_unmapped_area_info info;
19288590253SShijie Hu 
19388590253SShijie Hu 	info.flags = 0;
19488590253SShijie Hu 	info.length = len;
19588590253SShijie Hu 	info.low_limit = current->mm->mmap_base;
1962cb4de08SChristophe Leroy 	info.high_limit = arch_get_mmap_end(addr, len, flags);
19788590253SShijie Hu 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
19888590253SShijie Hu 	info.align_offset = 0;
19988590253SShijie Hu 	return vm_unmapped_area(&info);
20088590253SShijie Hu }
20188590253SShijie Hu 
20288590253SShijie Hu static unsigned long
20388590253SShijie Hu hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
20488590253SShijie Hu 		unsigned long len, unsigned long pgoff, unsigned long flags)
20588590253SShijie Hu {
20688590253SShijie Hu 	struct hstate *h = hstate_file(file);
20788590253SShijie Hu 	struct vm_unmapped_area_info info;
20888590253SShijie Hu 
20988590253SShijie Hu 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
21088590253SShijie Hu 	info.length = len;
21188590253SShijie Hu 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
2125f24d5a5SChristophe Leroy 	info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
21388590253SShijie Hu 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
21488590253SShijie Hu 	info.align_offset = 0;
21588590253SShijie Hu 	addr = vm_unmapped_area(&info);
21688590253SShijie Hu 
21788590253SShijie Hu 	/*
21888590253SShijie Hu 	 * A failed mmap() very likely causes application failure,
21988590253SShijie Hu 	 * so fall back to the bottom-up function here. This scenario
22088590253SShijie Hu 	 * can happen with large stack limits and large mmap()
22188590253SShijie Hu 	 * allocations.
22288590253SShijie Hu 	 */
22388590253SShijie Hu 	if (unlikely(offset_in_page(addr))) {
22488590253SShijie Hu 		VM_BUG_ON(addr != -ENOMEM);
22588590253SShijie Hu 		info.flags = 0;
22688590253SShijie Hu 		info.low_limit = current->mm->mmap_base;
2272cb4de08SChristophe Leroy 		info.high_limit = arch_get_mmap_end(addr, len, flags);
22888590253SShijie Hu 		addr = vm_unmapped_area(&info);
22988590253SShijie Hu 	}
23088590253SShijie Hu 
23188590253SShijie Hu 	return addr;
23288590253SShijie Hu }
23388590253SShijie Hu 
2344b439e25SChristophe Leroy unsigned long
2354b439e25SChristophe Leroy generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2364b439e25SChristophe Leroy 				  unsigned long len, unsigned long pgoff,
2374b439e25SChristophe Leroy 				  unsigned long flags)
2381da177e4SLinus Torvalds {
2391da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
2401da177e4SLinus Torvalds 	struct vm_area_struct *vma;
241a5516438SAndi Kleen 	struct hstate *h = hstate_file(file);
2422cb4de08SChristophe Leroy 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
2431da177e4SLinus Torvalds 
244a5516438SAndi Kleen 	if (len & ~huge_page_mask(h))
2451da177e4SLinus Torvalds 		return -EINVAL;
2461da177e4SLinus Torvalds 	if (len > TASK_SIZE)
2471da177e4SLinus Torvalds 		return -ENOMEM;
2481da177e4SLinus Torvalds 
249036e0856SBenjamin Herrenschmidt 	if (flags & MAP_FIXED) {
250a5516438SAndi Kleen 		if (prepare_hugepage_range(file, addr, len))
251036e0856SBenjamin Herrenschmidt 			return -EINVAL;
252036e0856SBenjamin Herrenschmidt 		return addr;
253036e0856SBenjamin Herrenschmidt 	}
254036e0856SBenjamin Herrenschmidt 
2551da177e4SLinus Torvalds 	if (addr) {
256a5516438SAndi Kleen 		addr = ALIGN(addr, huge_page_size(h));
2571da177e4SLinus Torvalds 		vma = find_vma(mm, addr);
2585f24d5a5SChristophe Leroy 		if (mmap_end - len >= addr &&
2591be7107fSHugh Dickins 		    (!vma || addr + len <= vm_start_gap(vma)))
2601da177e4SLinus Torvalds 			return addr;
2611da177e4SLinus Torvalds 	}
2621da177e4SLinus Torvalds 
26388590253SShijie Hu 	/*
26488590253SShijie Hu 	 * Use mm->get_unmapped_area value as a hint to use topdown routine.
26588590253SShijie Hu 	 * If architectures have special needs, they should define their own
26688590253SShijie Hu 	 * version of hugetlb_get_unmapped_area.
26788590253SShijie Hu 	 */
26888590253SShijie Hu 	if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
26988590253SShijie Hu 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
27088590253SShijie Hu 				pgoff, flags);
27188590253SShijie Hu 	return hugetlb_get_unmapped_area_bottomup(file, addr, len,
27288590253SShijie Hu 			pgoff, flags);
2731da177e4SLinus Torvalds }
2744b439e25SChristophe Leroy 
2754b439e25SChristophe Leroy #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
2764b439e25SChristophe Leroy static unsigned long
2774b439e25SChristophe Leroy hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2784b439e25SChristophe Leroy 			  unsigned long len, unsigned long pgoff,
2794b439e25SChristophe Leroy 			  unsigned long flags)
2804b439e25SChristophe Leroy {
2814b439e25SChristophe Leroy 	return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
2824b439e25SChristophe Leroy }
2831da177e4SLinus Torvalds #endif
2841da177e4SLinus Torvalds 
285e63e1e5aSBadari Pulavarty /*
286e63e1e5aSBadari Pulavarty  * Support for read() - Find the page attached to f_mapping and copy out the
287445c8098SMiaohe Lin  * data. This provides functionality similar to filemap_read().
288e63e1e5aSBadari Pulavarty  */
28934d0640eSAl Viro static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
290e63e1e5aSBadari Pulavarty {
29134d0640eSAl Viro 	struct file *file = iocb->ki_filp;
29234d0640eSAl Viro 	struct hstate *h = hstate_file(file);
29334d0640eSAl Viro 	struct address_space *mapping = file->f_mapping;
294e63e1e5aSBadari Pulavarty 	struct inode *inode = mapping->host;
29534d0640eSAl Viro 	unsigned long index = iocb->ki_pos >> huge_page_shift(h);
29634d0640eSAl Viro 	unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
297e63e1e5aSBadari Pulavarty 	unsigned long end_index;
298e63e1e5aSBadari Pulavarty 	loff_t isize;
299e63e1e5aSBadari Pulavarty 	ssize_t retval = 0;
300e63e1e5aSBadari Pulavarty 
30134d0640eSAl Viro 	while (iov_iter_count(to)) {
302e63e1e5aSBadari Pulavarty 		struct page *page;
30334d0640eSAl Viro 		size_t nr, copied;
304e63e1e5aSBadari Pulavarty 
305e63e1e5aSBadari Pulavarty 		/* nr is the maximum number of bytes to copy from this page */
306a5516438SAndi Kleen 		nr = huge_page_size(h);
307a05b0855SAneesh Kumar K.V 		isize = i_size_read(inode);
308a05b0855SAneesh Kumar K.V 		if (!isize)
30934d0640eSAl Viro 			break;
310a05b0855SAneesh Kumar K.V 		end_index = (isize - 1) >> huge_page_shift(h);
311e63e1e5aSBadari Pulavarty 		if (index > end_index)
31234d0640eSAl Viro 			break;
31334d0640eSAl Viro 		if (index == end_index) {
314a5516438SAndi Kleen 			nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
315a05b0855SAneesh Kumar K.V 			if (nr <= offset)
31634d0640eSAl Viro 				break;
317e63e1e5aSBadari Pulavarty 		}
318e63e1e5aSBadari Pulavarty 		nr = nr - offset;
319e63e1e5aSBadari Pulavarty 
320e63e1e5aSBadari Pulavarty 		/* Find the page */
321a05b0855SAneesh Kumar K.V 		page = find_lock_page(mapping, index);
322e63e1e5aSBadari Pulavarty 		if (unlikely(page == NULL)) {
323e63e1e5aSBadari Pulavarty 			/*
324e63e1e5aSBadari Pulavarty 			 * We have a HOLE, zero out the user-buffer for the
325e63e1e5aSBadari Pulavarty 			 * length of the hole or request.
326e63e1e5aSBadari Pulavarty 			 */
32734d0640eSAl Viro 			copied = iov_iter_zero(nr, to);
328e63e1e5aSBadari Pulavarty 		} else {
329a05b0855SAneesh Kumar K.V 			unlock_page(page);
330a05b0855SAneesh Kumar K.V 
331e63e1e5aSBadari Pulavarty 			/*
332e63e1e5aSBadari Pulavarty 			 * We have the page, copy it to user space buffer.
333e63e1e5aSBadari Pulavarty 			 */
334c7d57ab1SAl Viro 			copied = copy_page_to_iter(page, offset, nr, to);
33509cbfeafSKirill A. Shutemov 			put_page(page);
336e63e1e5aSBadari Pulavarty 		}
33734d0640eSAl Viro 		offset += copied;
33834d0640eSAl Viro 		retval += copied;
33934d0640eSAl Viro 		if (copied != nr && iov_iter_count(to)) {
34034d0640eSAl Viro 			if (!retval)
34134d0640eSAl Viro 				retval = -EFAULT;
342e63e1e5aSBadari Pulavarty 			break;
343e63e1e5aSBadari Pulavarty 		}
34434d0640eSAl Viro 		index += offset >> huge_page_shift(h);
34534d0640eSAl Viro 		offset &= ~huge_page_mask(h);
34634d0640eSAl Viro 	}
34734d0640eSAl Viro 	iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
348e63e1e5aSBadari Pulavarty 	return retval;
349e63e1e5aSBadari Pulavarty }
350e63e1e5aSBadari Pulavarty 
351800d15a5SNick Piggin static int hugetlbfs_write_begin(struct file *file,
352800d15a5SNick Piggin 			struct address_space *mapping,
3539d6b0cd7SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
354800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
3551da177e4SLinus Torvalds {
3561da177e4SLinus Torvalds 	return -EINVAL;
3571da177e4SLinus Torvalds }
3581da177e4SLinus Torvalds 
359800d15a5SNick Piggin static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
360800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
361800d15a5SNick Piggin 			struct page *page, void *fsdata)
3621da177e4SLinus Torvalds {
363800d15a5SNick Piggin 	BUG();
3641da177e4SLinus Torvalds 	return -EINVAL;
3651da177e4SLinus Torvalds }
3661da177e4SLinus Torvalds 
367*7e1813d4SMike Kravetz static void hugetlb_delete_from_page_cache(struct page *page)
3681da177e4SLinus Torvalds {
369b9ea2515SKonstantin Khlebnikov 	ClearPageDirty(page);
3701da177e4SLinus Torvalds 	ClearPageUptodate(page);
371bd65cb86SMinchan Kim 	delete_from_page_cache(page);
3721da177e4SLinus Torvalds }
3731da177e4SLinus Torvalds 
3744aae8d1cSMike Kravetz static void
37505e90bd0SPeter Xu hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
37605e90bd0SPeter Xu 		      zap_flags_t zap_flags)
3774aae8d1cSMike Kravetz {
3784aae8d1cSMike Kravetz 	struct vm_area_struct *vma;
3794aae8d1cSMike Kravetz 
3804aae8d1cSMike Kravetz 	/*
381d6aba4c8SSean Christopherson 	 * end == 0 indicates that the entire range after start should be
382d6aba4c8SSean Christopherson 	 * unmapped.  Note, end is exclusive, whereas the interval tree takes
383d6aba4c8SSean Christopherson 	 * an inclusive "last".
3844aae8d1cSMike Kravetz 	 */
385d6aba4c8SSean Christopherson 	vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
3864aae8d1cSMike Kravetz 		unsigned long v_offset;
3874aae8d1cSMike Kravetz 		unsigned long v_end;
3884aae8d1cSMike Kravetz 
3894aae8d1cSMike Kravetz 		/*
3904aae8d1cSMike Kravetz 		 * Can the expression below overflow on 32-bit arches?
3914aae8d1cSMike Kravetz 		 * No, because the interval tree returns us only those vmas
3924aae8d1cSMike Kravetz 		 * which overlap the truncated area starting at pgoff,
3934aae8d1cSMike Kravetz 		 * and no vma on a 32-bit arch can span beyond the 4GB.
3944aae8d1cSMike Kravetz 		 */
3954aae8d1cSMike Kravetz 		if (vma->vm_pgoff < start)
3964aae8d1cSMike Kravetz 			v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
3974aae8d1cSMike Kravetz 		else
3984aae8d1cSMike Kravetz 			v_offset = 0;
3994aae8d1cSMike Kravetz 
4004aae8d1cSMike Kravetz 		if (!end)
4014aae8d1cSMike Kravetz 			v_end = vma->vm_end;
4024aae8d1cSMike Kravetz 		else {
4034aae8d1cSMike Kravetz 			v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
4044aae8d1cSMike Kravetz 							+ vma->vm_start;
4054aae8d1cSMike Kravetz 			if (v_end > vma->vm_end)
4064aae8d1cSMike Kravetz 				v_end = vma->vm_end;
4074aae8d1cSMike Kravetz 		}
4084aae8d1cSMike Kravetz 
4094aae8d1cSMike Kravetz 		unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
41005e90bd0SPeter Xu 				     NULL, zap_flags);
4114aae8d1cSMike Kravetz 	}
4124aae8d1cSMike Kravetz }
413b5cec28dSMike Kravetz 
414b5cec28dSMike Kravetz /*
415b5cec28dSMike Kravetz  * remove_inode_hugepages handles two distinct cases: truncation and hole
416b5cec28dSMike Kravetz  * punch.  There are subtle differences in operation for each case.
4174aae8d1cSMike Kravetz  *
418b5cec28dSMike Kravetz  * truncation is indicated by end of range being LLONG_MAX
419b5cec28dSMike Kravetz  *	In this case, we first scan the range and release found pages.
4201935ebd3SMiaohe Lin  *	After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
421e7c58097SMike Kravetz  *	maps and global counts.  Page faults can not race with truncation
422188a3972SMike Kravetz  *	in this routine.  hugetlb_no_page() prevents page faults in the
423188a3972SMike Kravetz  *	truncated range.  It checks i_size before allocation, and again after
424188a3972SMike Kravetz  *	with the page table lock for the page held.  The same lock must be
425188a3972SMike Kravetz  *	acquired to unmap a page.
426b5cec28dSMike Kravetz  * hole punch is indicated if end is not LLONG_MAX
427b5cec28dSMike Kravetz  *	In the hole punch case we scan the range and release found pages.
4281935ebd3SMiaohe Lin  *	Only when releasing a page is the associated region/reserve map
4291935ebd3SMiaohe Lin  *	deleted.  The region/reserve map for ranges without associated
430e7c58097SMike Kravetz  *	pages are not modified.  Page faults can race with hole punch.
431e7c58097SMike Kravetz  *	This is indicated if we find a mapped page.
432b5cec28dSMike Kravetz  * Note: If the passed end of range value is beyond the end of file, but
433b5cec28dSMike Kravetz  * not LLONG_MAX this routine still performs a hole punch operation.
434b5cec28dSMike Kravetz  */
435b5cec28dSMike Kravetz static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
436b5cec28dSMike Kravetz 				   loff_t lend)
4371da177e4SLinus Torvalds {
438a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
439b45b5bd6SDavid Gibson 	struct address_space *mapping = &inode->i_data;
440a5516438SAndi Kleen 	const pgoff_t start = lstart >> huge_page_shift(h);
441b5cec28dSMike Kravetz 	const pgoff_t end = lend >> huge_page_shift(h);
4421508062eSMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
443d72dc8a2SJan Kara 	pgoff_t next, index;
444a43a8c39SChen, Kenneth W 	int i, freed = 0;
445b5cec28dSMike Kravetz 	bool truncate_op = (lend == LLONG_MAX);
4461da177e4SLinus Torvalds 
4471508062eSMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
4481da177e4SLinus Torvalds 	next = start;
4491508062eSMatthew Wilcox (Oracle) 	while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
4501508062eSMatthew Wilcox (Oracle) 		for (i = 0; i < folio_batch_count(&fbatch); ++i) {
4511508062eSMatthew Wilcox (Oracle) 			struct folio *folio = fbatch.folios[i];
452d4241a04SMiaohe Lin 			u32 hash = 0;
453b5cec28dSMike Kravetz 
4541508062eSMatthew Wilcox (Oracle) 			index = folio->index;
455d4241a04SMiaohe Lin 			hash = hugetlb_fault_mutex_hash(mapping, index);
456e7c58097SMike Kravetz 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
457e7c58097SMike Kravetz 
458b5cec28dSMike Kravetz 			/*
4591508062eSMatthew Wilcox (Oracle) 			 * If folio is mapped, it was faulted in after being
460e7c58097SMike Kravetz 			 * unmapped in caller.  Unmap (again) now after taking
461e7c58097SMike Kravetz 			 * the fault mutex.  The mutex will prevent faults
4621508062eSMatthew Wilcox (Oracle) 			 * until we finish removing the folio.
463e7c58097SMike Kravetz 			 *
464e7c58097SMike Kravetz 			 * This race can only happen in the hole punch case.
465e7c58097SMike Kravetz 			 * Getting here in a truncate operation is a bug.
466b5cec28dSMike Kravetz 			 */
4671508062eSMatthew Wilcox (Oracle) 			if (unlikely(folio_mapped(folio))) {
468e7c58097SMike Kravetz 				BUG_ON(truncate_op);
469e7c58097SMike Kravetz 
470e7c58097SMike Kravetz 				i_mmap_lock_write(mapping);
471e7c58097SMike Kravetz 				hugetlb_vmdelete_list(&mapping->i_mmap,
472e7c58097SMike Kravetz 					index * pages_per_huge_page(h),
47305e90bd0SPeter Xu 					(index + 1) * pages_per_huge_page(h),
47405e90bd0SPeter Xu 					ZAP_FLAG_DROP_MARKER);
475e7c58097SMike Kravetz 				i_mmap_unlock_write(mapping);
476e7c58097SMike Kravetz 			}
4774aae8d1cSMike Kravetz 
4781508062eSMatthew Wilcox (Oracle) 			folio_lock(folio);
4794aae8d1cSMike Kravetz 			/*
4804aae8d1cSMike Kravetz 			 * We must free the huge page and remove from page
481*7e1813d4SMike Kravetz 			 * cache BEFORE removing the region/reserve map
482*7e1813d4SMike Kravetz 			 * (hugetlb_unreserve_pages).  In rare out of memory
483*7e1813d4SMike Kravetz 			 * conditions, removal of the region/reserve map could
484*7e1813d4SMike Kravetz 			 * fail. Correspondingly, the subpool and global
485*7e1813d4SMike Kravetz 			 * reserve usage count can need to be adjusted.
4864aae8d1cSMike Kravetz 			 */
4871508062eSMatthew Wilcox (Oracle) 			VM_BUG_ON(HPageRestoreReserve(&folio->page));
488*7e1813d4SMike Kravetz 			hugetlb_delete_from_page_cache(&folio->page);
489b5cec28dSMike Kravetz 			freed++;
490b5cec28dSMike Kravetz 			if (!truncate_op) {
4914aae8d1cSMike Kravetz 				if (unlikely(hugetlb_unreserve_pages(inode,
492d72dc8a2SJan Kara 							index, index + 1, 1)))
49372e2936cSzhong jiang 					hugetlb_fix_reserve_counts(inode);
494b5cec28dSMike Kravetz 			}
495b5cec28dSMike Kravetz 
4961508062eSMatthew Wilcox (Oracle) 			folio_unlock(folio);
497e7c58097SMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4981da177e4SLinus Torvalds 		}
4991508062eSMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
5001817889eSMike Kravetz 		cond_resched();
5011da177e4SLinus Torvalds 	}
502b5cec28dSMike Kravetz 
503b5cec28dSMike Kravetz 	if (truncate_op)
504b5cec28dSMike Kravetz 		(void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
5051da177e4SLinus Torvalds }
5061da177e4SLinus Torvalds 
5072bbbda30SAl Viro static void hugetlbfs_evict_inode(struct inode *inode)
5081da177e4SLinus Torvalds {
5099119a41eSJoonsoo Kim 	struct resv_map *resv_map;
5109119a41eSJoonsoo Kim 
511b5cec28dSMike Kravetz 	remove_inode_hugepages(inode, 0, LLONG_MAX);
512f27a5136SMike Kravetz 
513f27a5136SMike Kravetz 	/*
514f27a5136SMike Kravetz 	 * Get the resv_map from the address space embedded in the inode.
515f27a5136SMike Kravetz 	 * This is the address space which points to any resv_map allocated
516f27a5136SMike Kravetz 	 * at inode creation time.  If this is a device special inode,
517f27a5136SMike Kravetz 	 * i_mapping may not point to the original address space.
518f27a5136SMike Kravetz 	 */
519f27a5136SMike Kravetz 	resv_map = (struct resv_map *)(&inode->i_data)->private_data;
520f27a5136SMike Kravetz 	/* Only regular and link inodes have associated reserve maps */
5219119a41eSJoonsoo Kim 	if (resv_map)
5229119a41eSJoonsoo Kim 		resv_map_release(&resv_map->refs);
523dbd5768fSJan Kara 	clear_inode(inode);
524149f4211SChristoph Hellwig }
525149f4211SChristoph Hellwig 
526e5d319deSMiaohe Lin static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
5271da177e4SLinus Torvalds {
528856fc295SHugh Dickins 	pgoff_t pgoff;
5291da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
530a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
5311da177e4SLinus Torvalds 
532a5516438SAndi Kleen 	BUG_ON(offset & ~huge_page_mask(h));
533856fc295SHugh Dickins 	pgoff = offset >> PAGE_SHIFT;
5341da177e4SLinus Torvalds 
53587bf91d3SMike Kravetz 	i_size_write(inode, offset);
536188a3972SMike Kravetz 	i_mmap_lock_write(mapping);
537f808c13fSDavidlohr Bueso 	if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
53805e90bd0SPeter Xu 		hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
53905e90bd0SPeter Xu 				      ZAP_FLAG_DROP_MARKER);
540c86aa7bbSMike Kravetz 	i_mmap_unlock_write(mapping);
541e7c58097SMike Kravetz 	remove_inode_hugepages(inode, offset, LLONG_MAX);
5421da177e4SLinus Torvalds }
5431da177e4SLinus Torvalds 
54468d32527SMike Kravetz static void hugetlbfs_zero_partial_page(struct hstate *h,
54568d32527SMike Kravetz 					struct address_space *mapping,
54668d32527SMike Kravetz 					loff_t start,
54768d32527SMike Kravetz 					loff_t end)
54868d32527SMike Kravetz {
54968d32527SMike Kravetz 	pgoff_t idx = start >> huge_page_shift(h);
55068d32527SMike Kravetz 	struct folio *folio;
55168d32527SMike Kravetz 
55268d32527SMike Kravetz 	folio = filemap_lock_folio(mapping, idx);
55368d32527SMike Kravetz 	if (!folio)
55468d32527SMike Kravetz 		return;
55568d32527SMike Kravetz 
55668d32527SMike Kravetz 	start = start & ~huge_page_mask(h);
55768d32527SMike Kravetz 	end = end & ~huge_page_mask(h);
55868d32527SMike Kravetz 	if (!end)
55968d32527SMike Kravetz 		end = huge_page_size(h);
56068d32527SMike Kravetz 
56168d32527SMike Kravetz 	folio_zero_segment(folio, (size_t)start, (size_t)end);
56268d32527SMike Kravetz 
56368d32527SMike Kravetz 	folio_unlock(folio);
56468d32527SMike Kravetz 	folio_put(folio);
56568d32527SMike Kravetz }
56668d32527SMike Kravetz 
56770c3547eSMike Kravetz static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
56870c3547eSMike Kravetz {
56968d32527SMike Kravetz 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
57068d32527SMike Kravetz 	struct address_space *mapping = inode->i_mapping;
57170c3547eSMike Kravetz 	struct hstate *h = hstate_inode(inode);
57270c3547eSMike Kravetz 	loff_t hpage_size = huge_page_size(h);
57370c3547eSMike Kravetz 	loff_t hole_start, hole_end;
57470c3547eSMike Kravetz 
57570c3547eSMike Kravetz 	/*
57668d32527SMike Kravetz 	 * hole_start and hole_end indicate the full pages within the hole.
57770c3547eSMike Kravetz 	 */
57870c3547eSMike Kravetz 	hole_start = round_up(offset, hpage_size);
57970c3547eSMike Kravetz 	hole_end = round_down(offset + len, hpage_size);
58070c3547eSMike Kravetz 
5815955102cSAl Viro 	inode_lock(inode);
582ff62a342SMarc-André Lureau 
583398c0da7SMiaohe Lin 	/* protected by i_rwsem */
584ab3948f5SJoel Fernandes (Google) 	if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
585ff62a342SMarc-André Lureau 		inode_unlock(inode);
586ff62a342SMarc-André Lureau 		return -EPERM;
587ff62a342SMarc-André Lureau 	}
588ff62a342SMarc-André Lureau 
58970c3547eSMike Kravetz 	i_mmap_lock_write(mapping);
59068d32527SMike Kravetz 
59168d32527SMike Kravetz 	/* If range starts before first full page, zero partial page. */
59268d32527SMike Kravetz 	if (offset < hole_start)
59368d32527SMike Kravetz 		hugetlbfs_zero_partial_page(h, mapping,
59468d32527SMike Kravetz 				offset, min(offset + len, hole_start));
59568d32527SMike Kravetz 
59668d32527SMike Kravetz 	/* Unmap users of full pages in the hole. */
59768d32527SMike Kravetz 	if (hole_end > hole_start) {
598f808c13fSDavidlohr Bueso 		if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
59970c3547eSMike Kravetz 			hugetlb_vmdelete_list(&mapping->i_mmap,
60070c3547eSMike Kravetz 					      hole_start >> PAGE_SHIFT,
60105e90bd0SPeter Xu 					      hole_end >> PAGE_SHIFT, 0);
60270c3547eSMike Kravetz 	}
60370c3547eSMike Kravetz 
60468d32527SMike Kravetz 	/* If range extends beyond last full page, zero partial page. */
60568d32527SMike Kravetz 	if ((offset + len) > hole_end && (offset + len) > hole_start)
60668d32527SMike Kravetz 		hugetlbfs_zero_partial_page(h, mapping,
60768d32527SMike Kravetz 				hole_end, offset + len);
60868d32527SMike Kravetz 
60968d32527SMike Kravetz 	i_mmap_unlock_write(mapping);
61068d32527SMike Kravetz 
61168d32527SMike Kravetz 	/* Remove full pages from the file. */
61268d32527SMike Kravetz 	if (hole_end > hole_start)
61368d32527SMike Kravetz 		remove_inode_hugepages(inode, hole_start, hole_end);
61468d32527SMike Kravetz 
61568d32527SMike Kravetz 	inode_unlock(inode);
61668d32527SMike Kravetz 
61770c3547eSMike Kravetz 	return 0;
61870c3547eSMike Kravetz }
61970c3547eSMike Kravetz 
62070c3547eSMike Kravetz static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
62170c3547eSMike Kravetz 				loff_t len)
62270c3547eSMike Kravetz {
62370c3547eSMike Kravetz 	struct inode *inode = file_inode(file);
624ff62a342SMarc-André Lureau 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
62570c3547eSMike Kravetz 	struct address_space *mapping = inode->i_mapping;
62670c3547eSMike Kravetz 	struct hstate *h = hstate_inode(inode);
62770c3547eSMike Kravetz 	struct vm_area_struct pseudo_vma;
62870c3547eSMike Kravetz 	struct mm_struct *mm = current->mm;
62970c3547eSMike Kravetz 	loff_t hpage_size = huge_page_size(h);
63070c3547eSMike Kravetz 	unsigned long hpage_shift = huge_page_shift(h);
63170c3547eSMike Kravetz 	pgoff_t start, index, end;
63270c3547eSMike Kravetz 	int error;
63370c3547eSMike Kravetz 	u32 hash;
63470c3547eSMike Kravetz 
63570c3547eSMike Kravetz 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
63670c3547eSMike Kravetz 		return -EOPNOTSUPP;
63770c3547eSMike Kravetz 
63870c3547eSMike Kravetz 	if (mode & FALLOC_FL_PUNCH_HOLE)
63970c3547eSMike Kravetz 		return hugetlbfs_punch_hole(inode, offset, len);
64070c3547eSMike Kravetz 
64170c3547eSMike Kravetz 	/*
64270c3547eSMike Kravetz 	 * Default preallocate case.
64370c3547eSMike Kravetz 	 * For this range, start is rounded down and end is rounded up
64470c3547eSMike Kravetz 	 * as well as being converted to page offsets.
64570c3547eSMike Kravetz 	 */
64670c3547eSMike Kravetz 	start = offset >> hpage_shift;
64770c3547eSMike Kravetz 	end = (offset + len + hpage_size - 1) >> hpage_shift;
64870c3547eSMike Kravetz 
6495955102cSAl Viro 	inode_lock(inode);
65070c3547eSMike Kravetz 
65170c3547eSMike Kravetz 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
65270c3547eSMike Kravetz 	error = inode_newsize_ok(inode, offset + len);
65370c3547eSMike Kravetz 	if (error)
65470c3547eSMike Kravetz 		goto out;
65570c3547eSMike Kravetz 
656ff62a342SMarc-André Lureau 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
657ff62a342SMarc-André Lureau 		error = -EPERM;
658ff62a342SMarc-André Lureau 		goto out;
659ff62a342SMarc-André Lureau 	}
660ff62a342SMarc-André Lureau 
66170c3547eSMike Kravetz 	/*
66270c3547eSMike Kravetz 	 * Initialize a pseudo vma as this is required by the huge page
66370c3547eSMike Kravetz 	 * allocation routines.  If NUMA is configured, use page index
66470c3547eSMike Kravetz 	 * as input to create an allocation policy.
66570c3547eSMike Kravetz 	 */
6662c4541e2SKirill A. Shutemov 	vma_init(&pseudo_vma, mm);
66770c3547eSMike Kravetz 	pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
66870c3547eSMike Kravetz 	pseudo_vma.vm_file = file;
66970c3547eSMike Kravetz 
67070c3547eSMike Kravetz 	for (index = start; index < end; index++) {
67170c3547eSMike Kravetz 		/*
67270c3547eSMike Kravetz 		 * This is supposed to be the vaddr where the page is being
67370c3547eSMike Kravetz 		 * faulted in, but we have no vaddr here.
67470c3547eSMike Kravetz 		 */
67570c3547eSMike Kravetz 		struct page *page;
67670c3547eSMike Kravetz 		unsigned long addr;
67770c3547eSMike Kravetz 
67870c3547eSMike Kravetz 		cond_resched();
67970c3547eSMike Kravetz 
68070c3547eSMike Kravetz 		/*
68170c3547eSMike Kravetz 		 * fallocate(2) manpage permits EINTR; we may have been
68270c3547eSMike Kravetz 		 * interrupted because we are using up too much memory.
68370c3547eSMike Kravetz 		 */
68470c3547eSMike Kravetz 		if (signal_pending(current)) {
68570c3547eSMike Kravetz 			error = -EINTR;
68670c3547eSMike Kravetz 			break;
68770c3547eSMike Kravetz 		}
68870c3547eSMike Kravetz 
68970c3547eSMike Kravetz 		/* Set numa allocation policy based on index */
69070c3547eSMike Kravetz 		hugetlb_set_vma_policy(&pseudo_vma, inode, index);
69170c3547eSMike Kravetz 
69270c3547eSMike Kravetz 		/* addr is the offset within the file (zero based) */
69370c3547eSMike Kravetz 		addr = index * hpage_size;
69470c3547eSMike Kravetz 
695188a3972SMike Kravetz 		/* mutex taken here, fault path and hole punch */
696188b04a7SWei Yang 		hash = hugetlb_fault_mutex_hash(mapping, index);
69770c3547eSMike Kravetz 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
69870c3547eSMike Kravetz 
69970c3547eSMike Kravetz 		/* See if already present in mapping to avoid alloc/free */
70070c3547eSMike Kravetz 		page = find_get_page(mapping, index);
70170c3547eSMike Kravetz 		if (page) {
70270c3547eSMike Kravetz 			put_page(page);
70370c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
70470c3547eSMike Kravetz 			hugetlb_drop_vma_policy(&pseudo_vma);
70570c3547eSMike Kravetz 			continue;
70670c3547eSMike Kravetz 		}
70770c3547eSMike Kravetz 
70888ce3fefSMiaohe Lin 		/*
70988ce3fefSMiaohe Lin 		 * Allocate page without setting the avoid_reserve argument.
71088ce3fefSMiaohe Lin 		 * There certainly are no reserves associated with the
71188ce3fefSMiaohe Lin 		 * pseudo_vma.  However, there could be shared mappings with
71288ce3fefSMiaohe Lin 		 * reserves for the file at the inode level.  If we fallocate
71388ce3fefSMiaohe Lin 		 * pages in these areas, we need to consume the reserves
71488ce3fefSMiaohe Lin 		 * to keep reservation accounting consistent.
71588ce3fefSMiaohe Lin 		 */
71688ce3fefSMiaohe Lin 		page = alloc_huge_page(&pseudo_vma, addr, 0);
71770c3547eSMike Kravetz 		hugetlb_drop_vma_policy(&pseudo_vma);
71870c3547eSMike Kravetz 		if (IS_ERR(page)) {
71970c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
72070c3547eSMike Kravetz 			error = PTR_ERR(page);
72170c3547eSMike Kravetz 			goto out;
72270c3547eSMike Kravetz 		}
72370c3547eSMike Kravetz 		clear_huge_page(page, addr, pages_per_huge_page(h));
72470c3547eSMike Kravetz 		__SetPageUptodate(page);
725*7e1813d4SMike Kravetz 		error = hugetlb_add_to_page_cache(page, mapping, index);
72670c3547eSMike Kravetz 		if (unlikely(error)) {
727846be085SMike Kravetz 			restore_reserve_on_error(h, &pseudo_vma, addr, page);
72870c3547eSMike Kravetz 			put_page(page);
72970c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
73070c3547eSMike Kravetz 			goto out;
73170c3547eSMike Kravetz 		}
73270c3547eSMike Kravetz 
73370c3547eSMike Kravetz 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
73470c3547eSMike Kravetz 
7358f251a3dSMike Kravetz 		SetHPageMigratable(page);
73670c3547eSMike Kravetz 		/*
737*7e1813d4SMike Kravetz 		 * unlock_page because locked by hugetlb_add_to_page_cache()
738585fc0d2SMuchun Song 		 * put_page() due to reference from alloc_huge_page()
73970c3547eSMike Kravetz 		 */
74070c3547eSMike Kravetz 		unlock_page(page);
74172639e6dSNadav Amit 		put_page(page);
74270c3547eSMike Kravetz 	}
74370c3547eSMike Kravetz 
74470c3547eSMike Kravetz 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
74570c3547eSMike Kravetz 		i_size_write(inode, offset + len);
746078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
74770c3547eSMike Kravetz out:
7485955102cSAl Viro 	inode_unlock(inode);
74970c3547eSMike Kravetz 	return error;
75070c3547eSMike Kravetz }
75170c3547eSMike Kravetz 
752549c7297SChristian Brauner static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
753549c7297SChristian Brauner 			     struct dentry *dentry, struct iattr *attr)
7541da177e4SLinus Torvalds {
7552b0143b5SDavid Howells 	struct inode *inode = d_inode(dentry);
756a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
7571da177e4SLinus Torvalds 	int error;
7581da177e4SLinus Torvalds 	unsigned int ia_valid = attr->ia_valid;
759ff62a342SMarc-André Lureau 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
7601da177e4SLinus Torvalds 
7612f221d6fSChristian Brauner 	error = setattr_prepare(&init_user_ns, dentry, attr);
7621da177e4SLinus Torvalds 	if (error)
7631025774cSChristoph Hellwig 		return error;
7641da177e4SLinus Torvalds 
7651da177e4SLinus Torvalds 	if (ia_valid & ATTR_SIZE) {
766ff62a342SMarc-André Lureau 		loff_t oldsize = inode->i_size;
767ff62a342SMarc-André Lureau 		loff_t newsize = attr->ia_size;
768ff62a342SMarc-André Lureau 
769ff62a342SMarc-André Lureau 		if (newsize & ~huge_page_mask(h))
7701025774cSChristoph Hellwig 			return -EINVAL;
771398c0da7SMiaohe Lin 		/* protected by i_rwsem */
772ff62a342SMarc-André Lureau 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
773ff62a342SMarc-André Lureau 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
774ff62a342SMarc-André Lureau 			return -EPERM;
775e5d319deSMiaohe Lin 		hugetlb_vmtruncate(inode, newsize);
7761da177e4SLinus Torvalds 	}
7771da177e4SLinus Torvalds 
7782f221d6fSChristian Brauner 	setattr_copy(&init_user_ns, inode, attr);
7791025774cSChristoph Hellwig 	mark_inode_dirty(inode);
7801025774cSChristoph Hellwig 	return 0;
7811025774cSChristoph Hellwig }
7821025774cSChristoph Hellwig 
7837d54fa64SAl Viro static struct inode *hugetlbfs_get_root(struct super_block *sb,
78432021982SDavid Howells 					struct hugetlbfs_fs_context *ctx)
7851da177e4SLinus Torvalds {
7861da177e4SLinus Torvalds 	struct inode *inode;
7871da177e4SLinus Torvalds 
7881da177e4SLinus Torvalds 	inode = new_inode(sb);
7891da177e4SLinus Torvalds 	if (inode) {
79085fe4025SChristoph Hellwig 		inode->i_ino = get_next_ino();
79132021982SDavid Howells 		inode->i_mode = S_IFDIR | ctx->mode;
79232021982SDavid Howells 		inode->i_uid = ctx->uid;
79332021982SDavid Howells 		inode->i_gid = ctx->gid;
794078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
7957d54fa64SAl Viro 		inode->i_op = &hugetlbfs_dir_inode_operations;
7967d54fa64SAl Viro 		inode->i_fop = &simple_dir_operations;
7977d54fa64SAl Viro 		/* directory inodes start off with i_nlink == 2 (for "." entry) */
7987d54fa64SAl Viro 		inc_nlink(inode);
79965ed7601SAneesh Kumar K.V 		lockdep_annotate_inode_mutex_key(inode);
8007d54fa64SAl Viro 	}
8017d54fa64SAl Viro 	return inode;
8027d54fa64SAl Viro }
8037d54fa64SAl Viro 
804b610ded7SMichal Hocko /*
805c8c06efaSDavidlohr Bueso  * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
806b610ded7SMichal Hocko  * be taken from reclaim -- unlike regular filesystems. This needs an
80788f306b6SKirill A. Shutemov  * annotation because huge_pmd_share() does an allocation under hugetlb's
808c8c06efaSDavidlohr Bueso  * i_mmap_rwsem.
809b610ded7SMichal Hocko  */
810c8c06efaSDavidlohr Bueso static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
811b610ded7SMichal Hocko 
8127d54fa64SAl Viro static struct inode *hugetlbfs_get_inode(struct super_block *sb,
8137d54fa64SAl Viro 					struct inode *dir,
81418df2252SAl Viro 					umode_t mode, dev_t dev)
8157d54fa64SAl Viro {
8167d54fa64SAl Viro 	struct inode *inode;
81758b6e5e8SMike Kravetz 	struct resv_map *resv_map = NULL;
8189119a41eSJoonsoo Kim 
81958b6e5e8SMike Kravetz 	/*
82058b6e5e8SMike Kravetz 	 * Reserve maps are only needed for inodes that can have associated
82158b6e5e8SMike Kravetz 	 * page allocations.
82258b6e5e8SMike Kravetz 	 */
82358b6e5e8SMike Kravetz 	if (S_ISREG(mode) || S_ISLNK(mode)) {
8249119a41eSJoonsoo Kim 		resv_map = resv_map_alloc();
8259119a41eSJoonsoo Kim 		if (!resv_map)
8269119a41eSJoonsoo Kim 			return NULL;
82758b6e5e8SMike Kravetz 	}
8287d54fa64SAl Viro 
8297d54fa64SAl Viro 	inode = new_inode(sb);
8307d54fa64SAl Viro 	if (inode) {
831ff62a342SMarc-André Lureau 		struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
832ff62a342SMarc-André Lureau 
8337d54fa64SAl Viro 		inode->i_ino = get_next_ino();
83421cb47beSChristian Brauner 		inode_init_owner(&init_user_ns, inode, dir, mode);
835c8c06efaSDavidlohr Bueso 		lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
836c8c06efaSDavidlohr Bueso 				&hugetlbfs_i_mmap_rwsem_key);
8371da177e4SLinus Torvalds 		inode->i_mapping->a_ops = &hugetlbfs_aops;
838078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
8399119a41eSJoonsoo Kim 		inode->i_mapping->private_data = resv_map;
840ff62a342SMarc-André Lureau 		info->seals = F_SEAL_SEAL;
8411da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
8421da177e4SLinus Torvalds 		default:
8431da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
8441da177e4SLinus Torvalds 			break;
8451da177e4SLinus Torvalds 		case S_IFREG:
8461da177e4SLinus Torvalds 			inode->i_op = &hugetlbfs_inode_operations;
8471da177e4SLinus Torvalds 			inode->i_fop = &hugetlbfs_file_operations;
8481da177e4SLinus Torvalds 			break;
8491da177e4SLinus Torvalds 		case S_IFDIR:
8501da177e4SLinus Torvalds 			inode->i_op = &hugetlbfs_dir_inode_operations;
8511da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
8521da177e4SLinus Torvalds 
8531da177e4SLinus Torvalds 			/* directory inodes start off with i_nlink == 2 (for "." entry) */
854d8c76e6fSDave Hansen 			inc_nlink(inode);
8551da177e4SLinus Torvalds 			break;
8561da177e4SLinus Torvalds 		case S_IFLNK:
8571da177e4SLinus Torvalds 			inode->i_op = &page_symlink_inode_operations;
85821fc61c7SAl Viro 			inode_nohighmem(inode);
8591da177e4SLinus Torvalds 			break;
8601da177e4SLinus Torvalds 		}
861e096d0c7SJosh Boyer 		lockdep_annotate_inode_mutex_key(inode);
86258b6e5e8SMike Kravetz 	} else {
86358b6e5e8SMike Kravetz 		if (resv_map)
8649119a41eSJoonsoo Kim 			kref_put(&resv_map->refs, resv_map_release);
86558b6e5e8SMike Kravetz 	}
8669119a41eSJoonsoo Kim 
8671da177e4SLinus Torvalds 	return inode;
8681da177e4SLinus Torvalds }
8691da177e4SLinus Torvalds 
8701da177e4SLinus Torvalds /*
8711da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
8721da177e4SLinus Torvalds  */
8731ab5b82fSPiotr Sarna static int do_hugetlbfs_mknod(struct inode *dir,
8741ab5b82fSPiotr Sarna 			struct dentry *dentry,
8751ab5b82fSPiotr Sarna 			umode_t mode,
8761ab5b82fSPiotr Sarna 			dev_t dev,
8771ab5b82fSPiotr Sarna 			bool tmpfile)
8781da177e4SLinus Torvalds {
8791da177e4SLinus Torvalds 	struct inode *inode;
8801da177e4SLinus Torvalds 	int error = -ENOSPC;
8811da177e4SLinus Torvalds 
8827d54fa64SAl Viro 	inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
8831da177e4SLinus Torvalds 	if (inode) {
884078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
8851ab5b82fSPiotr Sarna 		if (tmpfile) {
8861ab5b82fSPiotr Sarna 			d_tmpfile(dentry, inode);
8871ab5b82fSPiotr Sarna 		} else {
8881da177e4SLinus Torvalds 			d_instantiate(dentry, inode);
8891da177e4SLinus Torvalds 			dget(dentry);/* Extra count - pin the dentry in core */
8901ab5b82fSPiotr Sarna 		}
8911da177e4SLinus Torvalds 		error = 0;
8921da177e4SLinus Torvalds 	}
8931da177e4SLinus Torvalds 	return error;
8941da177e4SLinus Torvalds }
8951da177e4SLinus Torvalds 
896549c7297SChristian Brauner static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
8971ab5b82fSPiotr Sarna 			   struct dentry *dentry, umode_t mode, dev_t dev)
8981ab5b82fSPiotr Sarna {
8991ab5b82fSPiotr Sarna 	return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
9001ab5b82fSPiotr Sarna }
9011ab5b82fSPiotr Sarna 
902549c7297SChristian Brauner static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
903549c7297SChristian Brauner 			   struct dentry *dentry, umode_t mode)
9041da177e4SLinus Torvalds {
905549c7297SChristian Brauner 	int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
906549c7297SChristian Brauner 				     mode | S_IFDIR, 0);
9071da177e4SLinus Torvalds 	if (!retval)
908d8c76e6fSDave Hansen 		inc_nlink(dir);
9091da177e4SLinus Torvalds 	return retval;
9101da177e4SLinus Torvalds }
9111da177e4SLinus Torvalds 
912549c7297SChristian Brauner static int hugetlbfs_create(struct user_namespace *mnt_userns,
913549c7297SChristian Brauner 			    struct inode *dir, struct dentry *dentry,
914549c7297SChristian Brauner 			    umode_t mode, bool excl)
9151da177e4SLinus Torvalds {
916549c7297SChristian Brauner 	return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
9171da177e4SLinus Torvalds }
9181da177e4SLinus Torvalds 
919549c7297SChristian Brauner static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
920549c7297SChristian Brauner 			     struct inode *dir, struct dentry *dentry,
921549c7297SChristian Brauner 			     umode_t mode)
9221ab5b82fSPiotr Sarna {
9231ab5b82fSPiotr Sarna 	return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
9241ab5b82fSPiotr Sarna }
9251ab5b82fSPiotr Sarna 
926549c7297SChristian Brauner static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
927549c7297SChristian Brauner 			     struct inode *dir, struct dentry *dentry,
928549c7297SChristian Brauner 			     const char *symname)
9291da177e4SLinus Torvalds {
9301da177e4SLinus Torvalds 	struct inode *inode;
9311da177e4SLinus Torvalds 	int error = -ENOSPC;
9321da177e4SLinus Torvalds 
9337d54fa64SAl Viro 	inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
9341da177e4SLinus Torvalds 	if (inode) {
9351da177e4SLinus Torvalds 		int l = strlen(symname)+1;
9361da177e4SLinus Torvalds 		error = page_symlink(inode, symname, l);
9371da177e4SLinus Torvalds 		if (!error) {
9381da177e4SLinus Torvalds 			d_instantiate(dentry, inode);
9391da177e4SLinus Torvalds 			dget(dentry);
9401da177e4SLinus Torvalds 		} else
9411da177e4SLinus Torvalds 			iput(inode);
9421da177e4SLinus Torvalds 	}
943078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
9441da177e4SLinus Torvalds 
9451da177e4SLinus Torvalds 	return error;
9461da177e4SLinus Torvalds }
9471da177e4SLinus Torvalds 
948b890ec2aSMatthew Wilcox (Oracle) #ifdef CONFIG_MIGRATION
949b890ec2aSMatthew Wilcox (Oracle) static int hugetlbfs_migrate_folio(struct address_space *mapping,
950b890ec2aSMatthew Wilcox (Oracle) 				struct folio *dst, struct folio *src,
951a6bc32b8SMel Gorman 				enum migrate_mode mode)
952290408d4SNaoya Horiguchi {
953290408d4SNaoya Horiguchi 	int rc;
954290408d4SNaoya Horiguchi 
955b890ec2aSMatthew Wilcox (Oracle) 	rc = migrate_huge_page_move_mapping(mapping, dst, src);
95678bd5209SRafael Aquini 	if (rc != MIGRATEPAGE_SUCCESS)
957290408d4SNaoya Horiguchi 		return rc;
958cb6acd01SMike Kravetz 
959b890ec2aSMatthew Wilcox (Oracle) 	if (hugetlb_page_subpool(&src->page)) {
960b890ec2aSMatthew Wilcox (Oracle) 		hugetlb_set_page_subpool(&dst->page,
961b890ec2aSMatthew Wilcox (Oracle) 					hugetlb_page_subpool(&src->page));
962b890ec2aSMatthew Wilcox (Oracle) 		hugetlb_set_page_subpool(&src->page, NULL);
963cb6acd01SMike Kravetz 	}
964cb6acd01SMike Kravetz 
9652916ecc0SJérôme Glisse 	if (mode != MIGRATE_SYNC_NO_COPY)
966b890ec2aSMatthew Wilcox (Oracle) 		folio_migrate_copy(dst, src);
9672916ecc0SJérôme Glisse 	else
968b890ec2aSMatthew Wilcox (Oracle) 		folio_migrate_flags(dst, src);
969290408d4SNaoya Horiguchi 
97078bd5209SRafael Aquini 	return MIGRATEPAGE_SUCCESS;
971290408d4SNaoya Horiguchi }
972b890ec2aSMatthew Wilcox (Oracle) #else
973b890ec2aSMatthew Wilcox (Oracle) #define hugetlbfs_migrate_folio NULL
974b890ec2aSMatthew Wilcox (Oracle) #endif
975290408d4SNaoya Horiguchi 
97678bb9203SNaoya Horiguchi static int hugetlbfs_error_remove_page(struct address_space *mapping,
97778bb9203SNaoya Horiguchi 				struct page *page)
97878bb9203SNaoya Horiguchi {
97978bb9203SNaoya Horiguchi 	struct inode *inode = mapping->host;
980ab615a5bSMike Kravetz 	pgoff_t index = page->index;
98178bb9203SNaoya Horiguchi 
982*7e1813d4SMike Kravetz 	hugetlb_delete_from_page_cache(page);
983ab615a5bSMike Kravetz 	if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
98478bb9203SNaoya Horiguchi 		hugetlb_fix_reserve_counts(inode);
985ab615a5bSMike Kravetz 
98678bb9203SNaoya Horiguchi 	return 0;
98778bb9203SNaoya Horiguchi }
98878bb9203SNaoya Horiguchi 
9894a25220dSDavid Howells /*
9904a25220dSDavid Howells  * Display the mount options in /proc/mounts.
9914a25220dSDavid Howells  */
9924a25220dSDavid Howells static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
9934a25220dSDavid Howells {
9944a25220dSDavid Howells 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
9954a25220dSDavid Howells 	struct hugepage_subpool *spool = sbinfo->spool;
9964a25220dSDavid Howells 	unsigned long hpage_size = huge_page_size(sbinfo->hstate);
9974a25220dSDavid Howells 	unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
9984a25220dSDavid Howells 	char mod;
9994a25220dSDavid Howells 
10004a25220dSDavid Howells 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
10014a25220dSDavid Howells 		seq_printf(m, ",uid=%u",
10024a25220dSDavid Howells 			   from_kuid_munged(&init_user_ns, sbinfo->uid));
10034a25220dSDavid Howells 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
10044a25220dSDavid Howells 		seq_printf(m, ",gid=%u",
10054a25220dSDavid Howells 			   from_kgid_munged(&init_user_ns, sbinfo->gid));
10064a25220dSDavid Howells 	if (sbinfo->mode != 0755)
10074a25220dSDavid Howells 		seq_printf(m, ",mode=%o", sbinfo->mode);
10084a25220dSDavid Howells 	if (sbinfo->max_inodes != -1)
10094a25220dSDavid Howells 		seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
10104a25220dSDavid Howells 
10114a25220dSDavid Howells 	hpage_size /= 1024;
10124a25220dSDavid Howells 	mod = 'K';
10134a25220dSDavid Howells 	if (hpage_size >= 1024) {
10144a25220dSDavid Howells 		hpage_size /= 1024;
10154a25220dSDavid Howells 		mod = 'M';
10164a25220dSDavid Howells 	}
10174a25220dSDavid Howells 	seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
10184a25220dSDavid Howells 	if (spool) {
10194a25220dSDavid Howells 		if (spool->max_hpages != -1)
10204a25220dSDavid Howells 			seq_printf(m, ",size=%llu",
10214a25220dSDavid Howells 				   (unsigned long long)spool->max_hpages << hpage_shift);
10224a25220dSDavid Howells 		if (spool->min_hpages != -1)
10234a25220dSDavid Howells 			seq_printf(m, ",min_size=%llu",
10244a25220dSDavid Howells 				   (unsigned long long)spool->min_hpages << hpage_shift);
10254a25220dSDavid Howells 	}
10264a25220dSDavid Howells 	return 0;
10274a25220dSDavid Howells }
10284a25220dSDavid Howells 
1029726c3342SDavid Howells static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
10301da177e4SLinus Torvalds {
1031726c3342SDavid Howells 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
10322b0143b5SDavid Howells 	struct hstate *h = hstate_inode(d_inode(dentry));
10331da177e4SLinus Torvalds 
10341da177e4SLinus Torvalds 	buf->f_type = HUGETLBFS_MAGIC;
1035a5516438SAndi Kleen 	buf->f_bsize = huge_page_size(h);
10361da177e4SLinus Torvalds 	if (sbinfo) {
10371da177e4SLinus Torvalds 		spin_lock(&sbinfo->stat_lock);
103811680763SMiaohe Lin 		/* If no limits set, just report 0 or -1 for max/free/used
103974a8a65cSDavid Gibson 		 * blocks, like simple_statfs() */
104090481622SDavid Gibson 		if (sbinfo->spool) {
104190481622SDavid Gibson 			long free_pages;
104290481622SDavid Gibson 
10434b25f030SMina Almasry 			spin_lock_irq(&sbinfo->spool->lock);
104490481622SDavid Gibson 			buf->f_blocks = sbinfo->spool->max_hpages;
104590481622SDavid Gibson 			free_pages = sbinfo->spool->max_hpages
104690481622SDavid Gibson 				- sbinfo->spool->used_hpages;
104790481622SDavid Gibson 			buf->f_bavail = buf->f_bfree = free_pages;
10484b25f030SMina Almasry 			spin_unlock_irq(&sbinfo->spool->lock);
10491da177e4SLinus Torvalds 			buf->f_files = sbinfo->max_inodes;
10501da177e4SLinus Torvalds 			buf->f_ffree = sbinfo->free_inodes;
105174a8a65cSDavid Gibson 		}
10521da177e4SLinus Torvalds 		spin_unlock(&sbinfo->stat_lock);
10531da177e4SLinus Torvalds 	}
10541da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
10551da177e4SLinus Torvalds 	return 0;
10561da177e4SLinus Torvalds }
10571da177e4SLinus Torvalds 
10581da177e4SLinus Torvalds static void hugetlbfs_put_super(struct super_block *sb)
10591da177e4SLinus Torvalds {
10601da177e4SLinus Torvalds 	struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
10611da177e4SLinus Torvalds 
10621da177e4SLinus Torvalds 	if (sbi) {
10631da177e4SLinus Torvalds 		sb->s_fs_info = NULL;
106490481622SDavid Gibson 
106590481622SDavid Gibson 		if (sbi->spool)
106690481622SDavid Gibson 			hugepage_put_subpool(sbi->spool);
106790481622SDavid Gibson 
10681da177e4SLinus Torvalds 		kfree(sbi);
10691da177e4SLinus Torvalds 	}
10701da177e4SLinus Torvalds }
10711da177e4SLinus Torvalds 
107296527980SChristoph Hellwig static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
107396527980SChristoph Hellwig {
107496527980SChristoph Hellwig 	if (sbinfo->free_inodes >= 0) {
107596527980SChristoph Hellwig 		spin_lock(&sbinfo->stat_lock);
107696527980SChristoph Hellwig 		if (unlikely(!sbinfo->free_inodes)) {
107796527980SChristoph Hellwig 			spin_unlock(&sbinfo->stat_lock);
107896527980SChristoph Hellwig 			return 0;
107996527980SChristoph Hellwig 		}
108096527980SChristoph Hellwig 		sbinfo->free_inodes--;
108196527980SChristoph Hellwig 		spin_unlock(&sbinfo->stat_lock);
108296527980SChristoph Hellwig 	}
108396527980SChristoph Hellwig 
108496527980SChristoph Hellwig 	return 1;
108596527980SChristoph Hellwig }
108696527980SChristoph Hellwig 
108796527980SChristoph Hellwig static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
108896527980SChristoph Hellwig {
108996527980SChristoph Hellwig 	if (sbinfo->free_inodes >= 0) {
109096527980SChristoph Hellwig 		spin_lock(&sbinfo->stat_lock);
109196527980SChristoph Hellwig 		sbinfo->free_inodes++;
109296527980SChristoph Hellwig 		spin_unlock(&sbinfo->stat_lock);
109396527980SChristoph Hellwig 	}
109496527980SChristoph Hellwig }
109596527980SChristoph Hellwig 
109696527980SChristoph Hellwig 
1097e18b890bSChristoph Lameter static struct kmem_cache *hugetlbfs_inode_cachep;
10981da177e4SLinus Torvalds 
10991da177e4SLinus Torvalds static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
11001da177e4SLinus Torvalds {
110196527980SChristoph Hellwig 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
11021da177e4SLinus Torvalds 	struct hugetlbfs_inode_info *p;
11031da177e4SLinus Torvalds 
110496527980SChristoph Hellwig 	if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
11051da177e4SLinus Torvalds 		return NULL;
1106fd60b288SMuchun Song 	p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
110796527980SChristoph Hellwig 	if (unlikely(!p)) {
110896527980SChristoph Hellwig 		hugetlbfs_inc_free_inodes(sbinfo);
110996527980SChristoph Hellwig 		return NULL;
11101da177e4SLinus Torvalds 	}
11114742a35dSMike Kravetz 
11124742a35dSMike Kravetz 	/*
11134742a35dSMike Kravetz 	 * Any time after allocation, hugetlbfs_destroy_inode can be called
11144742a35dSMike Kravetz 	 * for the inode.  mpol_free_shared_policy is unconditionally called
11154742a35dSMike Kravetz 	 * as part of hugetlbfs_destroy_inode.  So, initialize policy here
11164742a35dSMike Kravetz 	 * in case of a quick call to destroy.
11174742a35dSMike Kravetz 	 *
11184742a35dSMike Kravetz 	 * Note that the policy is initialized even if we are creating a
11194742a35dSMike Kravetz 	 * private inode.  This simplifies hugetlbfs_destroy_inode.
11204742a35dSMike Kravetz 	 */
11214742a35dSMike Kravetz 	mpol_shared_policy_init(&p->policy, NULL);
11224742a35dSMike Kravetz 
112396527980SChristoph Hellwig 	return &p->vfs_inode;
11241da177e4SLinus Torvalds }
11251da177e4SLinus Torvalds 
1126b62de322SAl Viro static void hugetlbfs_free_inode(struct inode *inode)
1127fa0d7e3dSNick Piggin {
1128fa0d7e3dSNick Piggin 	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1129fa0d7e3dSNick Piggin }
1130fa0d7e3dSNick Piggin 
11311da177e4SLinus Torvalds static void hugetlbfs_destroy_inode(struct inode *inode)
11321da177e4SLinus Torvalds {
113396527980SChristoph Hellwig 	hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
11341da177e4SLinus Torvalds 	mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
11351da177e4SLinus Torvalds }
11361da177e4SLinus Torvalds 
1137f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops = {
1138800d15a5SNick Piggin 	.write_begin	= hugetlbfs_write_begin,
1139800d15a5SNick Piggin 	.write_end	= hugetlbfs_write_end,
114046de8b97SMatthew Wilcox (Oracle) 	.dirty_folio	= noop_dirty_folio,
1141b890ec2aSMatthew Wilcox (Oracle) 	.migrate_folio  = hugetlbfs_migrate_folio,
114278bb9203SNaoya Horiguchi 	.error_remove_page	= hugetlbfs_error_remove_page,
11431da177e4SLinus Torvalds };
11441da177e4SLinus Torvalds 
114596527980SChristoph Hellwig 
114651cc5068SAlexey Dobriyan static void init_once(void *foo)
114796527980SChristoph Hellwig {
114896527980SChristoph Hellwig 	struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
114996527980SChristoph Hellwig 
115096527980SChristoph Hellwig 	inode_init_once(&ei->vfs_inode);
115196527980SChristoph Hellwig }
115296527980SChristoph Hellwig 
11534b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations = {
115434d0640eSAl Viro 	.read_iter		= hugetlbfs_read_iter,
11551da177e4SLinus Torvalds 	.mmap			= hugetlbfs_file_mmap,
11561b061d92SChristoph Hellwig 	.fsync			= noop_fsync,
11571da177e4SLinus Torvalds 	.get_unmapped_area	= hugetlb_get_unmapped_area,
11586038f373SArnd Bergmann 	.llseek			= default_llseek,
115970c3547eSMike Kravetz 	.fallocate		= hugetlbfs_fallocate,
11601da177e4SLinus Torvalds };
11611da177e4SLinus Torvalds 
116292e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations = {
11631da177e4SLinus Torvalds 	.create		= hugetlbfs_create,
11641da177e4SLinus Torvalds 	.lookup		= simple_lookup,
11651da177e4SLinus Torvalds 	.link		= simple_link,
11661da177e4SLinus Torvalds 	.unlink		= simple_unlink,
11671da177e4SLinus Torvalds 	.symlink	= hugetlbfs_symlink,
11681da177e4SLinus Torvalds 	.mkdir		= hugetlbfs_mkdir,
11691da177e4SLinus Torvalds 	.rmdir		= simple_rmdir,
11701da177e4SLinus Torvalds 	.mknod		= hugetlbfs_mknod,
11711da177e4SLinus Torvalds 	.rename		= simple_rename,
11721da177e4SLinus Torvalds 	.setattr	= hugetlbfs_setattr,
11731ab5b82fSPiotr Sarna 	.tmpfile	= hugetlbfs_tmpfile,
11741da177e4SLinus Torvalds };
11751da177e4SLinus Torvalds 
117692e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations = {
11771da177e4SLinus Torvalds 	.setattr	= hugetlbfs_setattr,
11781da177e4SLinus Torvalds };
11791da177e4SLinus Torvalds 
1180ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops = {
11811da177e4SLinus Torvalds 	.alloc_inode    = hugetlbfs_alloc_inode,
1182b62de322SAl Viro 	.free_inode     = hugetlbfs_free_inode,
11831da177e4SLinus Torvalds 	.destroy_inode  = hugetlbfs_destroy_inode,
11842bbbda30SAl Viro 	.evict_inode	= hugetlbfs_evict_inode,
11851da177e4SLinus Torvalds 	.statfs		= hugetlbfs_statfs,
11861da177e4SLinus Torvalds 	.put_super	= hugetlbfs_put_super,
11874a25220dSDavid Howells 	.show_options	= hugetlbfs_show_options,
11881da177e4SLinus Torvalds };
11891da177e4SLinus Torvalds 
11907ca02d0aSMike Kravetz /*
11917ca02d0aSMike Kravetz  * Convert size option passed from command line to number of huge pages
11927ca02d0aSMike Kravetz  * in the pool specified by hstate.  Size option could be in bytes
11937ca02d0aSMike Kravetz  * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
11947ca02d0aSMike Kravetz  */
11954a25220dSDavid Howells static long
11967ca02d0aSMike Kravetz hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
11974a25220dSDavid Howells 			 enum hugetlbfs_size_type val_type)
11987ca02d0aSMike Kravetz {
11997ca02d0aSMike Kravetz 	if (val_type == NO_SIZE)
12007ca02d0aSMike Kravetz 		return -1;
12017ca02d0aSMike Kravetz 
12027ca02d0aSMike Kravetz 	if (val_type == SIZE_PERCENT) {
12037ca02d0aSMike Kravetz 		size_opt <<= huge_page_shift(h);
12047ca02d0aSMike Kravetz 		size_opt *= h->max_huge_pages;
12057ca02d0aSMike Kravetz 		do_div(size_opt, 100);
12067ca02d0aSMike Kravetz 	}
12077ca02d0aSMike Kravetz 
12087ca02d0aSMike Kravetz 	size_opt >>= huge_page_shift(h);
12097ca02d0aSMike Kravetz 	return size_opt;
12107ca02d0aSMike Kravetz }
12117ca02d0aSMike Kravetz 
121232021982SDavid Howells /*
121332021982SDavid Howells  * Parse one mount parameter.
121432021982SDavid Howells  */
121532021982SDavid Howells static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
12161da177e4SLinus Torvalds {
121732021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
121832021982SDavid Howells 	struct fs_parse_result result;
121932021982SDavid Howells 	char *rest;
122032021982SDavid Howells 	unsigned long ps;
122132021982SDavid Howells 	int opt;
12221da177e4SLinus Torvalds 
1223d7167b14SAl Viro 	opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
122432021982SDavid Howells 	if (opt < 0)
122532021982SDavid Howells 		return opt;
122632021982SDavid Howells 
122732021982SDavid Howells 	switch (opt) {
122832021982SDavid Howells 	case Opt_uid:
122932021982SDavid Howells 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
123032021982SDavid Howells 		if (!uid_valid(ctx->uid))
123132021982SDavid Howells 			goto bad_val;
12321da177e4SLinus Torvalds 		return 0;
12331da177e4SLinus Torvalds 
1234e73a75faSRandy Dunlap 	case Opt_gid:
123532021982SDavid Howells 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
123632021982SDavid Howells 		if (!gid_valid(ctx->gid))
1237e73a75faSRandy Dunlap 			goto bad_val;
123832021982SDavid Howells 		return 0;
1239e73a75faSRandy Dunlap 
1240e73a75faSRandy Dunlap 	case Opt_mode:
124132021982SDavid Howells 		ctx->mode = result.uint_32 & 01777U;
124232021982SDavid Howells 		return 0;
1243e73a75faSRandy Dunlap 
124432021982SDavid Howells 	case Opt_size:
1245e73a75faSRandy Dunlap 		/* memparse() will accept a K/M/G without a digit */
124632021982SDavid Howells 		if (!isdigit(param->string[0]))
1247e73a75faSRandy Dunlap 			goto bad_val;
124832021982SDavid Howells 		ctx->max_size_opt = memparse(param->string, &rest);
124932021982SDavid Howells 		ctx->max_val_type = SIZE_STD;
1250a137e1ccSAndi Kleen 		if (*rest == '%')
125132021982SDavid Howells 			ctx->max_val_type = SIZE_PERCENT;
125232021982SDavid Howells 		return 0;
12531da177e4SLinus Torvalds 
1254e73a75faSRandy Dunlap 	case Opt_nr_inodes:
1255e73a75faSRandy Dunlap 		/* memparse() will accept a K/M/G without a digit */
125632021982SDavid Howells 		if (!isdigit(param->string[0]))
1257e73a75faSRandy Dunlap 			goto bad_val;
125832021982SDavid Howells 		ctx->nr_inodes = memparse(param->string, &rest);
125932021982SDavid Howells 		return 0;
1260e73a75faSRandy Dunlap 
126132021982SDavid Howells 	case Opt_pagesize:
126232021982SDavid Howells 		ps = memparse(param->string, &rest);
126332021982SDavid Howells 		ctx->hstate = size_to_hstate(ps);
126432021982SDavid Howells 		if (!ctx->hstate) {
1265d0036517SMiaohe Lin 			pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
1266a137e1ccSAndi Kleen 			return -EINVAL;
1267a137e1ccSAndi Kleen 		}
126832021982SDavid Howells 		return 0;
1269a137e1ccSAndi Kleen 
127032021982SDavid Howells 	case Opt_min_size:
12717ca02d0aSMike Kravetz 		/* memparse() will accept a K/M/G without a digit */
127232021982SDavid Howells 		if (!isdigit(param->string[0]))
12737ca02d0aSMike Kravetz 			goto bad_val;
127432021982SDavid Howells 		ctx->min_size_opt = memparse(param->string, &rest);
127532021982SDavid Howells 		ctx->min_val_type = SIZE_STD;
12767ca02d0aSMike Kravetz 		if (*rest == '%')
127732021982SDavid Howells 			ctx->min_val_type = SIZE_PERCENT;
127832021982SDavid Howells 		return 0;
12797ca02d0aSMike Kravetz 
1280e73a75faSRandy Dunlap 	default:
1281b4c07bceSLee Schermerhorn 		return -EINVAL;
1282e73a75faSRandy Dunlap 	}
128332021982SDavid Howells 
128432021982SDavid Howells bad_val:
1285b5db30cfSAl Viro 	return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
128632021982SDavid Howells 		      param->string, param->key);
12871da177e4SLinus Torvalds }
1288a137e1ccSAndi Kleen 
12897ca02d0aSMike Kravetz /*
129032021982SDavid Howells  * Validate the parsed options.
129132021982SDavid Howells  */
129232021982SDavid Howells static int hugetlbfs_validate(struct fs_context *fc)
129332021982SDavid Howells {
129432021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
129532021982SDavid Howells 
129632021982SDavid Howells 	/*
12977ca02d0aSMike Kravetz 	 * Use huge page pool size (in hstate) to convert the size
12987ca02d0aSMike Kravetz 	 * options to number of huge pages.  If NO_SIZE, -1 is returned.
12997ca02d0aSMike Kravetz 	 */
130032021982SDavid Howells 	ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
130132021982SDavid Howells 						   ctx->max_size_opt,
130232021982SDavid Howells 						   ctx->max_val_type);
130332021982SDavid Howells 	ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
130432021982SDavid Howells 						   ctx->min_size_opt,
130532021982SDavid Howells 						   ctx->min_val_type);
13067ca02d0aSMike Kravetz 
13077ca02d0aSMike Kravetz 	/*
13087ca02d0aSMike Kravetz 	 * If max_size was specified, then min_size must be smaller
13097ca02d0aSMike Kravetz 	 */
131032021982SDavid Howells 	if (ctx->max_val_type > NO_SIZE &&
131132021982SDavid Howells 	    ctx->min_hpages > ctx->max_hpages) {
131232021982SDavid Howells 		pr_err("Minimum size can not be greater than maximum size\n");
13137ca02d0aSMike Kravetz 		return -EINVAL;
1314a137e1ccSAndi Kleen 	}
1315a137e1ccSAndi Kleen 
13161da177e4SLinus Torvalds 	return 0;
13171da177e4SLinus Torvalds }
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds static int
132032021982SDavid Howells hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
13211da177e4SLinus Torvalds {
132232021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
13231da177e4SLinus Torvalds 	struct hugetlbfs_sb_info *sbinfo;
13241da177e4SLinus Torvalds 
13251da177e4SLinus Torvalds 	sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
13261da177e4SLinus Torvalds 	if (!sbinfo)
13271da177e4SLinus Torvalds 		return -ENOMEM;
13281da177e4SLinus Torvalds 	sb->s_fs_info = sbinfo;
13291da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
133032021982SDavid Howells 	sbinfo->hstate		= ctx->hstate;
133132021982SDavid Howells 	sbinfo->max_inodes	= ctx->nr_inodes;
133232021982SDavid Howells 	sbinfo->free_inodes	= ctx->nr_inodes;
133390481622SDavid Gibson 	sbinfo->spool		= NULL;
133432021982SDavid Howells 	sbinfo->uid		= ctx->uid;
133532021982SDavid Howells 	sbinfo->gid		= ctx->gid;
133632021982SDavid Howells 	sbinfo->mode		= ctx->mode;
13374a25220dSDavid Howells 
13387ca02d0aSMike Kravetz 	/*
13397ca02d0aSMike Kravetz 	 * Allocate and initialize subpool if maximum or minimum size is
13401935ebd3SMiaohe Lin 	 * specified.  Any needed reservations (for minimum size) are taken
1341445c8098SMiaohe Lin 	 * when the subpool is created.
13427ca02d0aSMike Kravetz 	 */
134332021982SDavid Howells 	if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
134432021982SDavid Howells 		sbinfo->spool = hugepage_new_subpool(ctx->hstate,
134532021982SDavid Howells 						     ctx->max_hpages,
134632021982SDavid Howells 						     ctx->min_hpages);
134790481622SDavid Gibson 		if (!sbinfo->spool)
134890481622SDavid Gibson 			goto out_free;
134990481622SDavid Gibson 	}
13501da177e4SLinus Torvalds 	sb->s_maxbytes = MAX_LFS_FILESIZE;
135132021982SDavid Howells 	sb->s_blocksize = huge_page_size(ctx->hstate);
135232021982SDavid Howells 	sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
13531da177e4SLinus Torvalds 	sb->s_magic = HUGETLBFS_MAGIC;
13541da177e4SLinus Torvalds 	sb->s_op = &hugetlbfs_ops;
13551da177e4SLinus Torvalds 	sb->s_time_gran = 1;
135615568299SMike Kravetz 
135715568299SMike Kravetz 	/*
135815568299SMike Kravetz 	 * Due to the special and limited functionality of hugetlbfs, it does
135915568299SMike Kravetz 	 * not work well as a stacking filesystem.
136015568299SMike Kravetz 	 */
136115568299SMike Kravetz 	sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
136232021982SDavid Howells 	sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
136348fde701SAl Viro 	if (!sb->s_root)
13641da177e4SLinus Torvalds 		goto out_free;
13651da177e4SLinus Torvalds 	return 0;
13661da177e4SLinus Torvalds out_free:
136790481622SDavid Gibson 	kfree(sbinfo->spool);
13681da177e4SLinus Torvalds 	kfree(sbinfo);
13691da177e4SLinus Torvalds 	return -ENOMEM;
13701da177e4SLinus Torvalds }
13711da177e4SLinus Torvalds 
137232021982SDavid Howells static int hugetlbfs_get_tree(struct fs_context *fc)
13731da177e4SLinus Torvalds {
137432021982SDavid Howells 	int err = hugetlbfs_validate(fc);
137532021982SDavid Howells 	if (err)
137632021982SDavid Howells 		return err;
13772ac295d4SAl Viro 	return get_tree_nodev(fc, hugetlbfs_fill_super);
137832021982SDavid Howells }
137932021982SDavid Howells 
138032021982SDavid Howells static void hugetlbfs_fs_context_free(struct fs_context *fc)
138132021982SDavid Howells {
138232021982SDavid Howells 	kfree(fc->fs_private);
138332021982SDavid Howells }
138432021982SDavid Howells 
138532021982SDavid Howells static const struct fs_context_operations hugetlbfs_fs_context_ops = {
138632021982SDavid Howells 	.free		= hugetlbfs_fs_context_free,
138732021982SDavid Howells 	.parse_param	= hugetlbfs_parse_param,
138832021982SDavid Howells 	.get_tree	= hugetlbfs_get_tree,
138932021982SDavid Howells };
139032021982SDavid Howells 
139132021982SDavid Howells static int hugetlbfs_init_fs_context(struct fs_context *fc)
139232021982SDavid Howells {
139332021982SDavid Howells 	struct hugetlbfs_fs_context *ctx;
139432021982SDavid Howells 
139532021982SDavid Howells 	ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
139632021982SDavid Howells 	if (!ctx)
139732021982SDavid Howells 		return -ENOMEM;
139832021982SDavid Howells 
139932021982SDavid Howells 	ctx->max_hpages	= -1; /* No limit on size by default */
140032021982SDavid Howells 	ctx->nr_inodes	= -1; /* No limit on number of inodes by default */
140132021982SDavid Howells 	ctx->uid	= current_fsuid();
140232021982SDavid Howells 	ctx->gid	= current_fsgid();
140332021982SDavid Howells 	ctx->mode	= 0755;
140432021982SDavid Howells 	ctx->hstate	= &default_hstate;
140532021982SDavid Howells 	ctx->min_hpages	= -1; /* No default minimum size */
140632021982SDavid Howells 	ctx->max_val_type = NO_SIZE;
140732021982SDavid Howells 	ctx->min_val_type = NO_SIZE;
140832021982SDavid Howells 	fc->fs_private = ctx;
140932021982SDavid Howells 	fc->ops	= &hugetlbfs_fs_context_ops;
141032021982SDavid Howells 	return 0;
14111da177e4SLinus Torvalds }
14121da177e4SLinus Torvalds 
14131da177e4SLinus Torvalds static struct file_system_type hugetlbfs_fs_type = {
14141da177e4SLinus Torvalds 	.name			= "hugetlbfs",
141532021982SDavid Howells 	.init_fs_context	= hugetlbfs_init_fs_context,
1416d7167b14SAl Viro 	.parameters		= hugetlb_fs_parameters,
14171da177e4SLinus Torvalds 	.kill_sb		= kill_litter_super,
14181da177e4SLinus Torvalds };
14191da177e4SLinus Torvalds 
142042d7395fSAndi Kleen static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
14211da177e4SLinus Torvalds 
1422ef1ff6b8SFrom: Mel Gorman static int can_do_hugetlb_shm(void)
14231da177e4SLinus Torvalds {
1424a0eb3a05SEric W. Biederman 	kgid_t shm_group;
1425a0eb3a05SEric W. Biederman 	shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1426a0eb3a05SEric W. Biederman 	return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
14271da177e4SLinus Torvalds }
14281da177e4SLinus Torvalds 
142942d7395fSAndi Kleen static int get_hstate_idx(int page_size_log)
143042d7395fSAndi Kleen {
1431af73e4d9SNaoya Horiguchi 	struct hstate *h = hstate_sizelog(page_size_log);
143242d7395fSAndi Kleen 
143342d7395fSAndi Kleen 	if (!h)
143442d7395fSAndi Kleen 		return -1;
143504adbc3fSMiaohe Lin 	return hstate_index(h);
143642d7395fSAndi Kleen }
143742d7395fSAndi Kleen 
1438af73e4d9SNaoya Horiguchi /*
1439af73e4d9SNaoya Horiguchi  * Note that size should be aligned to proper hugepage size in caller side,
1440af73e4d9SNaoya Horiguchi  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1441af73e4d9SNaoya Horiguchi  */
1442af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size,
144383c1fd76Szhangyiru 				vm_flags_t acctflag, int creat_flags,
144483c1fd76Szhangyiru 				int page_size_log)
14451da177e4SLinus Torvalds {
14461da177e4SLinus Torvalds 	struct inode *inode;
1447e68375c8SAl Viro 	struct vfsmount *mnt;
144842d7395fSAndi Kleen 	int hstate_idx;
1449e68375c8SAl Viro 	struct file *file;
145042d7395fSAndi Kleen 
145142d7395fSAndi Kleen 	hstate_idx = get_hstate_idx(page_size_log);
145242d7395fSAndi Kleen 	if (hstate_idx < 0)
145342d7395fSAndi Kleen 		return ERR_PTR(-ENODEV);
14541da177e4SLinus Torvalds 
1455e68375c8SAl Viro 	mnt = hugetlbfs_vfsmount[hstate_idx];
1456e68375c8SAl Viro 	if (!mnt)
14575bc98594SAkinobu Mita 		return ERR_PTR(-ENOENT);
14585bc98594SAkinobu Mita 
1459ef1ff6b8SFrom: Mel Gorman 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
146083c1fd76Szhangyiru 		struct ucounts *ucounts = current_ucounts();
146183c1fd76Szhangyiru 
146283c1fd76Szhangyiru 		if (user_shm_lock(size, ucounts)) {
146383c1fd76Szhangyiru 			pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
146421a3c273SDavid Rientjes 				current->comm, current->pid);
146583c1fd76Szhangyiru 			user_shm_unlock(size, ucounts);
14662584e517SRavikiran G Thirumalai 		}
146783c1fd76Szhangyiru 		return ERR_PTR(-EPERM);
1468353d5c30SHugh Dickins 	}
14691da177e4SLinus Torvalds 
147039b65252SAnatol Pomozov 	file = ERR_PTR(-ENOSPC);
1471e68375c8SAl Viro 	inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
14721da177e4SLinus Torvalds 	if (!inode)
1473e68375c8SAl Viro 		goto out;
1474e1832f29SStephen Smalley 	if (creat_flags == HUGETLB_SHMFS_INODE)
1475e1832f29SStephen Smalley 		inode->i_flags |= S_PRIVATE;
14761da177e4SLinus Torvalds 
14771da177e4SLinus Torvalds 	inode->i_size = size;
14786d6b77f1SMiklos Szeredi 	clear_nlink(inode);
1479ce8d2cdfSDave Hansen 
148033b8f84aSMike Kravetz 	if (!hugetlb_reserve_pages(inode, 0,
1481e68375c8SAl Viro 			size >> huge_page_shift(hstate_inode(inode)), NULL,
1482e68375c8SAl Viro 			acctflag))
1483e68375c8SAl Viro 		file = ERR_PTR(-ENOMEM);
1484e68375c8SAl Viro 	else
1485e68375c8SAl Viro 		file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1486ce8d2cdfSDave Hansen 					&hugetlbfs_file_operations);
1487e68375c8SAl Viro 	if (!IS_ERR(file))
14881da177e4SLinus Torvalds 		return file;
14891da177e4SLinus Torvalds 
1490b45b5bd6SDavid Gibson 	iput(inode);
1491e68375c8SAl Viro out:
149239b65252SAnatol Pomozov 	return file;
14931da177e4SLinus Torvalds }
14941da177e4SLinus Torvalds 
149532021982SDavid Howells static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
149632021982SDavid Howells {
149732021982SDavid Howells 	struct fs_context *fc;
149832021982SDavid Howells 	struct vfsmount *mnt;
149932021982SDavid Howells 
150032021982SDavid Howells 	fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
150132021982SDavid Howells 	if (IS_ERR(fc)) {
150232021982SDavid Howells 		mnt = ERR_CAST(fc);
150332021982SDavid Howells 	} else {
150432021982SDavid Howells 		struct hugetlbfs_fs_context *ctx = fc->fs_private;
150532021982SDavid Howells 		ctx->hstate = h;
150632021982SDavid Howells 		mnt = fc_mount(fc);
150732021982SDavid Howells 		put_fs_context(fc);
150832021982SDavid Howells 	}
150932021982SDavid Howells 	if (IS_ERR(mnt))
1510a25fddceSMiaohe Lin 		pr_err("Cannot mount internal hugetlbfs for page size %luK",
1511d0036517SMiaohe Lin 		       huge_page_size(h) / SZ_1K);
151232021982SDavid Howells 	return mnt;
151332021982SDavid Howells }
151432021982SDavid Howells 
15151da177e4SLinus Torvalds static int __init init_hugetlbfs_fs(void)
15161da177e4SLinus Torvalds {
151732021982SDavid Howells 	struct vfsmount *mnt;
151842d7395fSAndi Kleen 	struct hstate *h;
15191da177e4SLinus Torvalds 	int error;
152042d7395fSAndi Kleen 	int i;
15211da177e4SLinus Torvalds 
1522457c1b27SNishanth Aravamudan 	if (!hugepages_supported()) {
15239b857d26SAndrew Morton 		pr_info("disabling because there are no supported hugepage sizes\n");
1524457c1b27SNishanth Aravamudan 		return -ENOTSUPP;
1525457c1b27SNishanth Aravamudan 	}
1526457c1b27SNishanth Aravamudan 
1527d1d5e05fSHillf Danton 	error = -ENOMEM;
15281da177e4SLinus Torvalds 	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
15291da177e4SLinus Torvalds 					sizeof(struct hugetlbfs_inode_info),
15305d097056SVladimir Davydov 					0, SLAB_ACCOUNT, init_once);
15311da177e4SLinus Torvalds 	if (hugetlbfs_inode_cachep == NULL)
15328fc312b3SMike Kravetz 		goto out;
15331da177e4SLinus Torvalds 
15341da177e4SLinus Torvalds 	error = register_filesystem(&hugetlbfs_fs_type);
15351da177e4SLinus Torvalds 	if (error)
15368fc312b3SMike Kravetz 		goto out_free;
15371da177e4SLinus Torvalds 
15388fc312b3SMike Kravetz 	/* default hstate mount is required */
15393b2275a8SMiaohe Lin 	mnt = mount_one_hugetlbfs(&default_hstate);
15408fc312b3SMike Kravetz 	if (IS_ERR(mnt)) {
15418fc312b3SMike Kravetz 		error = PTR_ERR(mnt);
15428fc312b3SMike Kravetz 		goto out_unreg;
15438fc312b3SMike Kravetz 	}
15448fc312b3SMike Kravetz 	hugetlbfs_vfsmount[default_hstate_idx] = mnt;
15458fc312b3SMike Kravetz 
15468fc312b3SMike Kravetz 	/* other hstates are optional */
154742d7395fSAndi Kleen 	i = 0;
154842d7395fSAndi Kleen 	for_each_hstate(h) {
154915f0ec94SJan Stancek 		if (i == default_hstate_idx) {
155015f0ec94SJan Stancek 			i++;
15518fc312b3SMike Kravetz 			continue;
155215f0ec94SJan Stancek 		}
15538fc312b3SMike Kravetz 
155432021982SDavid Howells 		mnt = mount_one_hugetlbfs(h);
15558fc312b3SMike Kravetz 		if (IS_ERR(mnt))
15568fc312b3SMike Kravetz 			hugetlbfs_vfsmount[i] = NULL;
15578fc312b3SMike Kravetz 		else
155832021982SDavid Howells 			hugetlbfs_vfsmount[i] = mnt;
155942d7395fSAndi Kleen 		i++;
156042d7395fSAndi Kleen 	}
156132021982SDavid Howells 
156242d7395fSAndi Kleen 	return 0;
15631da177e4SLinus Torvalds 
15648fc312b3SMike Kravetz  out_unreg:
15658fc312b3SMike Kravetz 	(void)unregister_filesystem(&hugetlbfs_fs_type);
15668fc312b3SMike Kravetz  out_free:
15671da177e4SLinus Torvalds 	kmem_cache_destroy(hugetlbfs_inode_cachep);
15688fc312b3SMike Kravetz  out:
15691da177e4SLinus Torvalds 	return error;
15701da177e4SLinus Torvalds }
15713e89e1c5SPaul Gortmaker fs_initcall(init_hugetlbfs_fs)
1572