xref: /openbmc/linux/fs/hugetlbfs/inode.c (revision b890ec2a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * hugetlbpage-backed filesystem.  Based on ramfs.
31da177e4SLinus Torvalds  *
46d49e352SNadia Yvette Chambers  * Nadia Yvette Chambers, 2002
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Copyright (C) 2002 Linus Torvalds.
73e89e1c5SPaul Gortmaker  * License: GPL
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
109b857d26SAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
119b857d26SAndrew Morton 
121da177e4SLinus Torvalds #include <linux/thread_info.h>
131da177e4SLinus Torvalds #include <asm/current.h>
14174cd4b1SIngo Molnar #include <linux/sched/signal.h>		/* remove ASAP */
1570c3547eSMike Kravetz #include <linux/falloc.h>
161da177e4SLinus Torvalds #include <linux/fs.h>
171da177e4SLinus Torvalds #include <linux/mount.h>
181da177e4SLinus Torvalds #include <linux/file.h>
19e73a75faSRandy Dunlap #include <linux/kernel.h>
201da177e4SLinus Torvalds #include <linux/writeback.h>
211da177e4SLinus Torvalds #include <linux/pagemap.h>
221da177e4SLinus Torvalds #include <linux/highmem.h>
231da177e4SLinus Torvalds #include <linux/init.h>
241da177e4SLinus Torvalds #include <linux/string.h>
2516f7e0feSRandy Dunlap #include <linux/capability.h>
26e73a75faSRandy Dunlap #include <linux/ctype.h>
271da177e4SLinus Torvalds #include <linux/backing-dev.h>
281da177e4SLinus Torvalds #include <linux/hugetlb.h>
291da177e4SLinus Torvalds #include <linux/pagevec.h>
3032021982SDavid Howells #include <linux/fs_parser.h>
31036e0856SBenjamin Herrenschmidt #include <linux/mman.h>
321da177e4SLinus Torvalds #include <linux/slab.h>
331da177e4SLinus Torvalds #include <linux/dnotify.h>
341da177e4SLinus Torvalds #include <linux/statfs.h>
351da177e4SLinus Torvalds #include <linux/security.h>
361fd7317dSNick Black #include <linux/magic.h>
37290408d4SNaoya Horiguchi #include <linux/migrate.h>
3834d0640eSAl Viro #include <linux/uio.h>
391da177e4SLinus Torvalds 
407c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
4188590253SShijie Hu #include <linux/sched/mm.h>
421da177e4SLinus Torvalds 
43ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops;
44f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops;
454b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations;
4692e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations;
4792e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations;
481da177e4SLinus Torvalds 
4932021982SDavid Howells enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
5032021982SDavid Howells 
5132021982SDavid Howells struct hugetlbfs_fs_context {
524a25220dSDavid Howells 	struct hstate		*hstate;
5332021982SDavid Howells 	unsigned long long	max_size_opt;
5432021982SDavid Howells 	unsigned long long	min_size_opt;
554a25220dSDavid Howells 	long			max_hpages;
564a25220dSDavid Howells 	long			nr_inodes;
574a25220dSDavid Howells 	long			min_hpages;
5832021982SDavid Howells 	enum hugetlbfs_size_type max_val_type;
5932021982SDavid Howells 	enum hugetlbfs_size_type min_val_type;
60a0eb3a05SEric W. Biederman 	kuid_t			uid;
61a0eb3a05SEric W. Biederman 	kgid_t			gid;
62a1d776eeSDavid Gibson 	umode_t			mode;
63a1d776eeSDavid Gibson };
64a1d776eeSDavid Gibson 
651da177e4SLinus Torvalds int sysctl_hugetlb_shm_group;
661da177e4SLinus Torvalds 
6732021982SDavid Howells enum hugetlb_param {
6832021982SDavid Howells 	Opt_gid,
6932021982SDavid Howells 	Opt_min_size,
7032021982SDavid Howells 	Opt_mode,
7132021982SDavid Howells 	Opt_nr_inodes,
7232021982SDavid Howells 	Opt_pagesize,
7332021982SDavid Howells 	Opt_size,
7432021982SDavid Howells 	Opt_uid,
75e73a75faSRandy Dunlap };
76e73a75faSRandy Dunlap 
77d7167b14SAl Viro static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
7832021982SDavid Howells 	fsparam_u32   ("gid",		Opt_gid),
7932021982SDavid Howells 	fsparam_string("min_size",	Opt_min_size),
80e0f7e2b2SMike Kravetz 	fsparam_u32oct("mode",		Opt_mode),
8132021982SDavid Howells 	fsparam_string("nr_inodes",	Opt_nr_inodes),
8232021982SDavid Howells 	fsparam_string("pagesize",	Opt_pagesize),
8332021982SDavid Howells 	fsparam_string("size",		Opt_size),
8432021982SDavid Howells 	fsparam_u32   ("uid",		Opt_uid),
8532021982SDavid Howells 	{}
8632021982SDavid Howells };
8732021982SDavid Howells 
8870c3547eSMike Kravetz #ifdef CONFIG_NUMA
8970c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
9070c3547eSMike Kravetz 					struct inode *inode, pgoff_t index)
9170c3547eSMike Kravetz {
9270c3547eSMike Kravetz 	vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
9370c3547eSMike Kravetz 							index);
9470c3547eSMike Kravetz }
9570c3547eSMike Kravetz 
9670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
9770c3547eSMike Kravetz {
9870c3547eSMike Kravetz 	mpol_cond_put(vma->vm_policy);
9970c3547eSMike Kravetz }
10070c3547eSMike Kravetz #else
10170c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
10270c3547eSMike Kravetz 					struct inode *inode, pgoff_t index)
10370c3547eSMike Kravetz {
10470c3547eSMike Kravetz }
10570c3547eSMike Kravetz 
10670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
10770c3547eSMike Kravetz {
10870c3547eSMike Kravetz }
10970c3547eSMike Kravetz #endif
11070c3547eSMike Kravetz 
11163489f8eSMike Kravetz /*
11263489f8eSMike Kravetz  * Mask used when checking the page offset value passed in via system
11363489f8eSMike Kravetz  * calls.  This value will be converted to a loff_t which is signed.
11463489f8eSMike Kravetz  * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
11563489f8eSMike Kravetz  * value.  The extra bit (- 1 in the shift value) is to take the sign
11663489f8eSMike Kravetz  * bit into account.
11763489f8eSMike Kravetz  */
11863489f8eSMike Kravetz #define PGOFF_LOFFT_MAX \
11963489f8eSMike Kravetz 	(((1UL << (PAGE_SHIFT + 1)) - 1) <<  (BITS_PER_LONG - (PAGE_SHIFT + 1)))
12063489f8eSMike Kravetz 
1211da177e4SLinus Torvalds static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
1221da177e4SLinus Torvalds {
123496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
12422247efdSPeter Xu 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
1251da177e4SLinus Torvalds 	loff_t len, vma_len;
1261da177e4SLinus Torvalds 	int ret;
127a5516438SAndi Kleen 	struct hstate *h = hstate_file(file);
1281da177e4SLinus Torvalds 
12968589bc3SHugh Dickins 	/*
130dec4ad86SDavid Gibson 	 * vma address alignment (but not the pgoff alignment) has
131dec4ad86SDavid Gibson 	 * already been checked by prepare_hugepage_range.  If you add
132dec4ad86SDavid Gibson 	 * any error returns here, do so after setting VM_HUGETLB, so
133dec4ad86SDavid Gibson 	 * is_vm_hugetlb_page tests below unmap_region go the right
13445e55300SPeter Collingbourne 	 * way when do_mmap unwinds (may be important on powerpc
135dec4ad86SDavid Gibson 	 * and ia64).
13668589bc3SHugh Dickins 	 */
137a2fce914SNaoya Horiguchi 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
13868589bc3SHugh Dickins 	vma->vm_ops = &hugetlb_vm_ops;
1391da177e4SLinus Torvalds 
14022247efdSPeter Xu 	ret = seal_check_future_write(info->seals, vma);
14122247efdSPeter Xu 	if (ret)
14222247efdSPeter Xu 		return ret;
14322247efdSPeter Xu 
144045c7a3fSMike Kravetz 	/*
14563489f8eSMike Kravetz 	 * page based offset in vm_pgoff could be sufficiently large to
1465df63c2aSMike Kravetz 	 * overflow a loff_t when converted to byte offset.  This can
1475df63c2aSMike Kravetz 	 * only happen on architectures where sizeof(loff_t) ==
1485df63c2aSMike Kravetz 	 * sizeof(unsigned long).  So, only check in those instances.
149045c7a3fSMike Kravetz 	 */
1505df63c2aSMike Kravetz 	if (sizeof(unsigned long) == sizeof(loff_t)) {
15163489f8eSMike Kravetz 		if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
152045c7a3fSMike Kravetz 			return -EINVAL;
1535df63c2aSMike Kravetz 	}
154045c7a3fSMike Kravetz 
15563489f8eSMike Kravetz 	/* must be huge page aligned */
1562b37c35eSBecky Bruce 	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
157dec4ad86SDavid Gibson 		return -EINVAL;
158dec4ad86SDavid Gibson 
1591da177e4SLinus Torvalds 	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
160045c7a3fSMike Kravetz 	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
161045c7a3fSMike Kravetz 	/* check for overflow */
162045c7a3fSMike Kravetz 	if (len < vma_len)
163045c7a3fSMike Kravetz 		return -EINVAL;
1641da177e4SLinus Torvalds 
1655955102cSAl Viro 	inode_lock(inode);
1661da177e4SLinus Torvalds 	file_accessed(file);
1671da177e4SLinus Torvalds 
1681da177e4SLinus Torvalds 	ret = -ENOMEM;
16933b8f84aSMike Kravetz 	if (!hugetlb_reserve_pages(inode,
170a5516438SAndi Kleen 				vma->vm_pgoff >> huge_page_order(h),
1715a6fe125SMel Gorman 				len >> huge_page_shift(h), vma,
1725a6fe125SMel Gorman 				vma->vm_flags))
173b45b5bd6SDavid Gibson 		goto out;
174b45b5bd6SDavid Gibson 
1754c887265SAdam Litke 	ret = 0;
176b6174df5SZhang, Yanmin 	if (vma->vm_flags & VM_WRITE && inode->i_size < len)
177045c7a3fSMike Kravetz 		i_size_write(inode, len);
1781da177e4SLinus Torvalds out:
1795955102cSAl Viro 	inode_unlock(inode);
1801da177e4SLinus Torvalds 
1811da177e4SLinus Torvalds 	return ret;
1821da177e4SLinus Torvalds }
1831da177e4SLinus Torvalds 
1841da177e4SLinus Torvalds /*
1853e4e28c5SMichel Lespinasse  * Called under mmap_write_lock(mm).
1861da177e4SLinus Torvalds  */
1871da177e4SLinus Torvalds 
1881da177e4SLinus Torvalds static unsigned long
18988590253SShijie Hu hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
19088590253SShijie Hu 		unsigned long len, unsigned long pgoff, unsigned long flags)
19188590253SShijie Hu {
19288590253SShijie Hu 	struct hstate *h = hstate_file(file);
19388590253SShijie Hu 	struct vm_unmapped_area_info info;
19488590253SShijie Hu 
19588590253SShijie Hu 	info.flags = 0;
19688590253SShijie Hu 	info.length = len;
19788590253SShijie Hu 	info.low_limit = current->mm->mmap_base;
1982cb4de08SChristophe Leroy 	info.high_limit = arch_get_mmap_end(addr, len, flags);
19988590253SShijie Hu 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
20088590253SShijie Hu 	info.align_offset = 0;
20188590253SShijie Hu 	return vm_unmapped_area(&info);
20288590253SShijie Hu }
20388590253SShijie Hu 
20488590253SShijie Hu static unsigned long
20588590253SShijie Hu hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
20688590253SShijie Hu 		unsigned long len, unsigned long pgoff, unsigned long flags)
20788590253SShijie Hu {
20888590253SShijie Hu 	struct hstate *h = hstate_file(file);
20988590253SShijie Hu 	struct vm_unmapped_area_info info;
21088590253SShijie Hu 
21188590253SShijie Hu 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
21288590253SShijie Hu 	info.length = len;
21388590253SShijie Hu 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
2145f24d5a5SChristophe Leroy 	info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
21588590253SShijie Hu 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
21688590253SShijie Hu 	info.align_offset = 0;
21788590253SShijie Hu 	addr = vm_unmapped_area(&info);
21888590253SShijie Hu 
21988590253SShijie Hu 	/*
22088590253SShijie Hu 	 * A failed mmap() very likely causes application failure,
22188590253SShijie Hu 	 * so fall back to the bottom-up function here. This scenario
22288590253SShijie Hu 	 * can happen with large stack limits and large mmap()
22388590253SShijie Hu 	 * allocations.
22488590253SShijie Hu 	 */
22588590253SShijie Hu 	if (unlikely(offset_in_page(addr))) {
22688590253SShijie Hu 		VM_BUG_ON(addr != -ENOMEM);
22788590253SShijie Hu 		info.flags = 0;
22888590253SShijie Hu 		info.low_limit = current->mm->mmap_base;
2292cb4de08SChristophe Leroy 		info.high_limit = arch_get_mmap_end(addr, len, flags);
23088590253SShijie Hu 		addr = vm_unmapped_area(&info);
23188590253SShijie Hu 	}
23288590253SShijie Hu 
23388590253SShijie Hu 	return addr;
23488590253SShijie Hu }
23588590253SShijie Hu 
2364b439e25SChristophe Leroy unsigned long
2374b439e25SChristophe Leroy generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2384b439e25SChristophe Leroy 				  unsigned long len, unsigned long pgoff,
2394b439e25SChristophe Leroy 				  unsigned long flags)
2401da177e4SLinus Torvalds {
2411da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
2421da177e4SLinus Torvalds 	struct vm_area_struct *vma;
243a5516438SAndi Kleen 	struct hstate *h = hstate_file(file);
2442cb4de08SChristophe Leroy 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
2451da177e4SLinus Torvalds 
246a5516438SAndi Kleen 	if (len & ~huge_page_mask(h))
2471da177e4SLinus Torvalds 		return -EINVAL;
2481da177e4SLinus Torvalds 	if (len > TASK_SIZE)
2491da177e4SLinus Torvalds 		return -ENOMEM;
2501da177e4SLinus Torvalds 
251036e0856SBenjamin Herrenschmidt 	if (flags & MAP_FIXED) {
252a5516438SAndi Kleen 		if (prepare_hugepage_range(file, addr, len))
253036e0856SBenjamin Herrenschmidt 			return -EINVAL;
254036e0856SBenjamin Herrenschmidt 		return addr;
255036e0856SBenjamin Herrenschmidt 	}
256036e0856SBenjamin Herrenschmidt 
2571da177e4SLinus Torvalds 	if (addr) {
258a5516438SAndi Kleen 		addr = ALIGN(addr, huge_page_size(h));
2591da177e4SLinus Torvalds 		vma = find_vma(mm, addr);
2605f24d5a5SChristophe Leroy 		if (mmap_end - len >= addr &&
2611be7107fSHugh Dickins 		    (!vma || addr + len <= vm_start_gap(vma)))
2621da177e4SLinus Torvalds 			return addr;
2631da177e4SLinus Torvalds 	}
2641da177e4SLinus Torvalds 
26588590253SShijie Hu 	/*
26688590253SShijie Hu 	 * Use mm->get_unmapped_area value as a hint to use topdown routine.
26788590253SShijie Hu 	 * If architectures have special needs, they should define their own
26888590253SShijie Hu 	 * version of hugetlb_get_unmapped_area.
26988590253SShijie Hu 	 */
27088590253SShijie Hu 	if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
27188590253SShijie Hu 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
27288590253SShijie Hu 				pgoff, flags);
27388590253SShijie Hu 	return hugetlb_get_unmapped_area_bottomup(file, addr, len,
27488590253SShijie Hu 			pgoff, flags);
2751da177e4SLinus Torvalds }
2764b439e25SChristophe Leroy 
2774b439e25SChristophe Leroy #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
2784b439e25SChristophe Leroy static unsigned long
2794b439e25SChristophe Leroy hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2804b439e25SChristophe Leroy 			  unsigned long len, unsigned long pgoff,
2814b439e25SChristophe Leroy 			  unsigned long flags)
2824b439e25SChristophe Leroy {
2834b439e25SChristophe Leroy 	return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
2844b439e25SChristophe Leroy }
2851da177e4SLinus Torvalds #endif
2861da177e4SLinus Torvalds 
28734d0640eSAl Viro static size_t
288e63e1e5aSBadari Pulavarty hugetlbfs_read_actor(struct page *page, unsigned long offset,
28934d0640eSAl Viro 			struct iov_iter *to, unsigned long size)
290e63e1e5aSBadari Pulavarty {
29134d0640eSAl Viro 	size_t copied = 0;
292e63e1e5aSBadari Pulavarty 	int i, chunksize;
293e63e1e5aSBadari Pulavarty 
294e63e1e5aSBadari Pulavarty 	/* Find which 4k chunk and offset with in that chunk */
29509cbfeafSKirill A. Shutemov 	i = offset >> PAGE_SHIFT;
29609cbfeafSKirill A. Shutemov 	offset = offset & ~PAGE_MASK;
297e63e1e5aSBadari Pulavarty 
298e63e1e5aSBadari Pulavarty 	while (size) {
29934d0640eSAl Viro 		size_t n;
30009cbfeafSKirill A. Shutemov 		chunksize = PAGE_SIZE;
301e63e1e5aSBadari Pulavarty 		if (offset)
302e63e1e5aSBadari Pulavarty 			chunksize -= offset;
303e63e1e5aSBadari Pulavarty 		if (chunksize > size)
304e63e1e5aSBadari Pulavarty 			chunksize = size;
30534d0640eSAl Viro 		n = copy_page_to_iter(&page[i], offset, chunksize, to);
30634d0640eSAl Viro 		copied += n;
30734d0640eSAl Viro 		if (n != chunksize)
30834d0640eSAl Viro 			return copied;
309e63e1e5aSBadari Pulavarty 		offset = 0;
310e63e1e5aSBadari Pulavarty 		size -= chunksize;
311e63e1e5aSBadari Pulavarty 		i++;
312e63e1e5aSBadari Pulavarty 	}
31334d0640eSAl Viro 	return copied;
314e63e1e5aSBadari Pulavarty }
315e63e1e5aSBadari Pulavarty 
316e63e1e5aSBadari Pulavarty /*
317e63e1e5aSBadari Pulavarty  * Support for read() - Find the page attached to f_mapping and copy out the
318c7e285e3SMiaohe Lin  * data. Its *very* similar to generic_file_buffered_read(), we can't use that
319ea1754a0SKirill A. Shutemov  * since it has PAGE_SIZE assumptions.
320e63e1e5aSBadari Pulavarty  */
32134d0640eSAl Viro static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
322e63e1e5aSBadari Pulavarty {
32334d0640eSAl Viro 	struct file *file = iocb->ki_filp;
32434d0640eSAl Viro 	struct hstate *h = hstate_file(file);
32534d0640eSAl Viro 	struct address_space *mapping = file->f_mapping;
326e63e1e5aSBadari Pulavarty 	struct inode *inode = mapping->host;
32734d0640eSAl Viro 	unsigned long index = iocb->ki_pos >> huge_page_shift(h);
32834d0640eSAl Viro 	unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
329e63e1e5aSBadari Pulavarty 	unsigned long end_index;
330e63e1e5aSBadari Pulavarty 	loff_t isize;
331e63e1e5aSBadari Pulavarty 	ssize_t retval = 0;
332e63e1e5aSBadari Pulavarty 
33334d0640eSAl Viro 	while (iov_iter_count(to)) {
334e63e1e5aSBadari Pulavarty 		struct page *page;
33534d0640eSAl Viro 		size_t nr, copied;
336e63e1e5aSBadari Pulavarty 
337e63e1e5aSBadari Pulavarty 		/* nr is the maximum number of bytes to copy from this page */
338a5516438SAndi Kleen 		nr = huge_page_size(h);
339a05b0855SAneesh Kumar K.V 		isize = i_size_read(inode);
340a05b0855SAneesh Kumar K.V 		if (!isize)
34134d0640eSAl Viro 			break;
342a05b0855SAneesh Kumar K.V 		end_index = (isize - 1) >> huge_page_shift(h);
343e63e1e5aSBadari Pulavarty 		if (index > end_index)
34434d0640eSAl Viro 			break;
34534d0640eSAl Viro 		if (index == end_index) {
346a5516438SAndi Kleen 			nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
347a05b0855SAneesh Kumar K.V 			if (nr <= offset)
34834d0640eSAl Viro 				break;
349e63e1e5aSBadari Pulavarty 		}
350e63e1e5aSBadari Pulavarty 		nr = nr - offset;
351e63e1e5aSBadari Pulavarty 
352e63e1e5aSBadari Pulavarty 		/* Find the page */
353a05b0855SAneesh Kumar K.V 		page = find_lock_page(mapping, index);
354e63e1e5aSBadari Pulavarty 		if (unlikely(page == NULL)) {
355e63e1e5aSBadari Pulavarty 			/*
356e63e1e5aSBadari Pulavarty 			 * We have a HOLE, zero out the user-buffer for the
357e63e1e5aSBadari Pulavarty 			 * length of the hole or request.
358e63e1e5aSBadari Pulavarty 			 */
35934d0640eSAl Viro 			copied = iov_iter_zero(nr, to);
360e63e1e5aSBadari Pulavarty 		} else {
361a05b0855SAneesh Kumar K.V 			unlock_page(page);
362a05b0855SAneesh Kumar K.V 
363e63e1e5aSBadari Pulavarty 			/*
364e63e1e5aSBadari Pulavarty 			 * We have the page, copy it to user space buffer.
365e63e1e5aSBadari Pulavarty 			 */
36634d0640eSAl Viro 			copied = hugetlbfs_read_actor(page, offset, to, nr);
36709cbfeafSKirill A. Shutemov 			put_page(page);
368e63e1e5aSBadari Pulavarty 		}
36934d0640eSAl Viro 		offset += copied;
37034d0640eSAl Viro 		retval += copied;
37134d0640eSAl Viro 		if (copied != nr && iov_iter_count(to)) {
37234d0640eSAl Viro 			if (!retval)
37334d0640eSAl Viro 				retval = -EFAULT;
374e63e1e5aSBadari Pulavarty 			break;
375e63e1e5aSBadari Pulavarty 		}
37634d0640eSAl Viro 		index += offset >> huge_page_shift(h);
37734d0640eSAl Viro 		offset &= ~huge_page_mask(h);
37834d0640eSAl Viro 	}
37934d0640eSAl Viro 	iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
380e63e1e5aSBadari Pulavarty 	return retval;
381e63e1e5aSBadari Pulavarty }
382e63e1e5aSBadari Pulavarty 
383800d15a5SNick Piggin static int hugetlbfs_write_begin(struct file *file,
384800d15a5SNick Piggin 			struct address_space *mapping,
3859d6b0cd7SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
386800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
3871da177e4SLinus Torvalds {
3881da177e4SLinus Torvalds 	return -EINVAL;
3891da177e4SLinus Torvalds }
3901da177e4SLinus Torvalds 
391800d15a5SNick Piggin static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
392800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
393800d15a5SNick Piggin 			struct page *page, void *fsdata)
3941da177e4SLinus Torvalds {
395800d15a5SNick Piggin 	BUG();
3961da177e4SLinus Torvalds 	return -EINVAL;
3971da177e4SLinus Torvalds }
3981da177e4SLinus Torvalds 
399b5cec28dSMike Kravetz static void remove_huge_page(struct page *page)
4001da177e4SLinus Torvalds {
401b9ea2515SKonstantin Khlebnikov 	ClearPageDirty(page);
4021da177e4SLinus Torvalds 	ClearPageUptodate(page);
403bd65cb86SMinchan Kim 	delete_from_page_cache(page);
4041da177e4SLinus Torvalds }
4051da177e4SLinus Torvalds 
4064aae8d1cSMike Kravetz static void
40705e90bd0SPeter Xu hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
40805e90bd0SPeter Xu 		      zap_flags_t zap_flags)
4094aae8d1cSMike Kravetz {
4104aae8d1cSMike Kravetz 	struct vm_area_struct *vma;
4114aae8d1cSMike Kravetz 
4124aae8d1cSMike Kravetz 	/*
413d6aba4c8SSean Christopherson 	 * end == 0 indicates that the entire range after start should be
414d6aba4c8SSean Christopherson 	 * unmapped.  Note, end is exclusive, whereas the interval tree takes
415d6aba4c8SSean Christopherson 	 * an inclusive "last".
4164aae8d1cSMike Kravetz 	 */
417d6aba4c8SSean Christopherson 	vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
4184aae8d1cSMike Kravetz 		unsigned long v_offset;
4194aae8d1cSMike Kravetz 		unsigned long v_end;
4204aae8d1cSMike Kravetz 
4214aae8d1cSMike Kravetz 		/*
4224aae8d1cSMike Kravetz 		 * Can the expression below overflow on 32-bit arches?
4234aae8d1cSMike Kravetz 		 * No, because the interval tree returns us only those vmas
4244aae8d1cSMike Kravetz 		 * which overlap the truncated area starting at pgoff,
4254aae8d1cSMike Kravetz 		 * and no vma on a 32-bit arch can span beyond the 4GB.
4264aae8d1cSMike Kravetz 		 */
4274aae8d1cSMike Kravetz 		if (vma->vm_pgoff < start)
4284aae8d1cSMike Kravetz 			v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
4294aae8d1cSMike Kravetz 		else
4304aae8d1cSMike Kravetz 			v_offset = 0;
4314aae8d1cSMike Kravetz 
4324aae8d1cSMike Kravetz 		if (!end)
4334aae8d1cSMike Kravetz 			v_end = vma->vm_end;
4344aae8d1cSMike Kravetz 		else {
4354aae8d1cSMike Kravetz 			v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
4364aae8d1cSMike Kravetz 							+ vma->vm_start;
4374aae8d1cSMike Kravetz 			if (v_end > vma->vm_end)
4384aae8d1cSMike Kravetz 				v_end = vma->vm_end;
4394aae8d1cSMike Kravetz 		}
4404aae8d1cSMike Kravetz 
4414aae8d1cSMike Kravetz 		unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
44205e90bd0SPeter Xu 				     NULL, zap_flags);
4434aae8d1cSMike Kravetz 	}
4444aae8d1cSMike Kravetz }
445b5cec28dSMike Kravetz 
446b5cec28dSMike Kravetz /*
447b5cec28dSMike Kravetz  * remove_inode_hugepages handles two distinct cases: truncation and hole
448b5cec28dSMike Kravetz  * punch.  There are subtle differences in operation for each case.
4494aae8d1cSMike Kravetz  *
450b5cec28dSMike Kravetz  * truncation is indicated by end of range being LLONG_MAX
451b5cec28dSMike Kravetz  *	In this case, we first scan the range and release found pages.
4521935ebd3SMiaohe Lin  *	After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
453e7c58097SMike Kravetz  *	maps and global counts.  Page faults can not race with truncation
45487bf91d3SMike Kravetz  *	in this routine.  hugetlb_no_page() holds i_mmap_rwsem and prevents
45587bf91d3SMike Kravetz  *	page faults in the truncated range by checking i_size.  i_size is
45687bf91d3SMike Kravetz  *	modified while holding i_mmap_rwsem.
457b5cec28dSMike Kravetz  * hole punch is indicated if end is not LLONG_MAX
458b5cec28dSMike Kravetz  *	In the hole punch case we scan the range and release found pages.
4591935ebd3SMiaohe Lin  *	Only when releasing a page is the associated region/reserve map
4601935ebd3SMiaohe Lin  *	deleted.  The region/reserve map for ranges without associated
461e7c58097SMike Kravetz  *	pages are not modified.  Page faults can race with hole punch.
462e7c58097SMike Kravetz  *	This is indicated if we find a mapped page.
463b5cec28dSMike Kravetz  * Note: If the passed end of range value is beyond the end of file, but
464b5cec28dSMike Kravetz  * not LLONG_MAX this routine still performs a hole punch operation.
465b5cec28dSMike Kravetz  */
466b5cec28dSMike Kravetz static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
467b5cec28dSMike Kravetz 				   loff_t lend)
4681da177e4SLinus Torvalds {
469a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
470b45b5bd6SDavid Gibson 	struct address_space *mapping = &inode->i_data;
471a5516438SAndi Kleen 	const pgoff_t start = lstart >> huge_page_shift(h);
472b5cec28dSMike Kravetz 	const pgoff_t end = lend >> huge_page_shift(h);
4731508062eSMatthew Wilcox (Oracle) 	struct folio_batch fbatch;
474d72dc8a2SJan Kara 	pgoff_t next, index;
475a43a8c39SChen, Kenneth W 	int i, freed = 0;
476b5cec28dSMike Kravetz 	bool truncate_op = (lend == LLONG_MAX);
4771da177e4SLinus Torvalds 
4781508062eSMatthew Wilcox (Oracle) 	folio_batch_init(&fbatch);
4791da177e4SLinus Torvalds 	next = start;
4801508062eSMatthew Wilcox (Oracle) 	while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
4811508062eSMatthew Wilcox (Oracle) 		for (i = 0; i < folio_batch_count(&fbatch); ++i) {
4821508062eSMatthew Wilcox (Oracle) 			struct folio *folio = fbatch.folios[i];
483d4241a04SMiaohe Lin 			u32 hash = 0;
484b5cec28dSMike Kravetz 
4851508062eSMatthew Wilcox (Oracle) 			index = folio->index;
48687bf91d3SMike Kravetz 			if (!truncate_op) {
48787bf91d3SMike Kravetz 				/*
48887bf91d3SMike Kravetz 				 * Only need to hold the fault mutex in the
48987bf91d3SMike Kravetz 				 * hole punch case.  This prevents races with
49087bf91d3SMike Kravetz 				 * page faults.  Races are not possible in the
49187bf91d3SMike Kravetz 				 * case of truncation.
49287bf91d3SMike Kravetz 				 */
493d4241a04SMiaohe Lin 				hash = hugetlb_fault_mutex_hash(mapping, index);
494e7c58097SMike Kravetz 				mutex_lock(&hugetlb_fault_mutex_table[hash]);
49587bf91d3SMike Kravetz 			}
496e7c58097SMike Kravetz 
497b5cec28dSMike Kravetz 			/*
4981508062eSMatthew Wilcox (Oracle) 			 * If folio is mapped, it was faulted in after being
499e7c58097SMike Kravetz 			 * unmapped in caller.  Unmap (again) now after taking
500e7c58097SMike Kravetz 			 * the fault mutex.  The mutex will prevent faults
5011508062eSMatthew Wilcox (Oracle) 			 * until we finish removing the folio.
502e7c58097SMike Kravetz 			 *
503e7c58097SMike Kravetz 			 * This race can only happen in the hole punch case.
504e7c58097SMike Kravetz 			 * Getting here in a truncate operation is a bug.
505b5cec28dSMike Kravetz 			 */
5061508062eSMatthew Wilcox (Oracle) 			if (unlikely(folio_mapped(folio))) {
507e7c58097SMike Kravetz 				BUG_ON(truncate_op);
508e7c58097SMike Kravetz 
509c0d0381aSMike Kravetz 				mutex_unlock(&hugetlb_fault_mutex_table[hash]);
510e7c58097SMike Kravetz 				i_mmap_lock_write(mapping);
511c0d0381aSMike Kravetz 				mutex_lock(&hugetlb_fault_mutex_table[hash]);
512e7c58097SMike Kravetz 				hugetlb_vmdelete_list(&mapping->i_mmap,
513e7c58097SMike Kravetz 					index * pages_per_huge_page(h),
51405e90bd0SPeter Xu 					(index + 1) * pages_per_huge_page(h),
51505e90bd0SPeter Xu 					ZAP_FLAG_DROP_MARKER);
516e7c58097SMike Kravetz 				i_mmap_unlock_write(mapping);
517e7c58097SMike Kravetz 			}
5184aae8d1cSMike Kravetz 
5191508062eSMatthew Wilcox (Oracle) 			folio_lock(folio);
5204aae8d1cSMike Kravetz 			/*
5214aae8d1cSMike Kravetz 			 * We must free the huge page and remove from page
5224aae8d1cSMike Kravetz 			 * cache (remove_huge_page) BEFORE removing the
5234aae8d1cSMike Kravetz 			 * region/reserve map (hugetlb_unreserve_pages).  In
5244aae8d1cSMike Kravetz 			 * rare out of memory conditions, removal of the
52572e2936cSzhong jiang 			 * region/reserve map could fail. Correspondingly,
52672e2936cSzhong jiang 			 * the subpool and global reserve usage count can need
52772e2936cSzhong jiang 			 * to be adjusted.
5284aae8d1cSMike Kravetz 			 */
5291508062eSMatthew Wilcox (Oracle) 			VM_BUG_ON(HPageRestoreReserve(&folio->page));
5301508062eSMatthew Wilcox (Oracle) 			remove_huge_page(&folio->page);
531b5cec28dSMike Kravetz 			freed++;
532b5cec28dSMike Kravetz 			if (!truncate_op) {
5334aae8d1cSMike Kravetz 				if (unlikely(hugetlb_unreserve_pages(inode,
534d72dc8a2SJan Kara 							index, index + 1, 1)))
53572e2936cSzhong jiang 					hugetlb_fix_reserve_counts(inode);
536b5cec28dSMike Kravetz 			}
537b5cec28dSMike Kravetz 
5381508062eSMatthew Wilcox (Oracle) 			folio_unlock(folio);
53987bf91d3SMike Kravetz 			if (!truncate_op)
540e7c58097SMike Kravetz 				mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5411da177e4SLinus Torvalds 		}
5421508062eSMatthew Wilcox (Oracle) 		folio_batch_release(&fbatch);
5431817889eSMike Kravetz 		cond_resched();
5441da177e4SLinus Torvalds 	}
545b5cec28dSMike Kravetz 
546b5cec28dSMike Kravetz 	if (truncate_op)
547b5cec28dSMike Kravetz 		(void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
5481da177e4SLinus Torvalds }
5491da177e4SLinus Torvalds 
5502bbbda30SAl Viro static void hugetlbfs_evict_inode(struct inode *inode)
5511da177e4SLinus Torvalds {
5529119a41eSJoonsoo Kim 	struct resv_map *resv_map;
5539119a41eSJoonsoo Kim 
554b5cec28dSMike Kravetz 	remove_inode_hugepages(inode, 0, LLONG_MAX);
555f27a5136SMike Kravetz 
556f27a5136SMike Kravetz 	/*
557f27a5136SMike Kravetz 	 * Get the resv_map from the address space embedded in the inode.
558f27a5136SMike Kravetz 	 * This is the address space which points to any resv_map allocated
559f27a5136SMike Kravetz 	 * at inode creation time.  If this is a device special inode,
560f27a5136SMike Kravetz 	 * i_mapping may not point to the original address space.
561f27a5136SMike Kravetz 	 */
562f27a5136SMike Kravetz 	resv_map = (struct resv_map *)(&inode->i_data)->private_data;
563f27a5136SMike Kravetz 	/* Only regular and link inodes have associated reserve maps */
5649119a41eSJoonsoo Kim 	if (resv_map)
5659119a41eSJoonsoo Kim 		resv_map_release(&resv_map->refs);
566dbd5768fSJan Kara 	clear_inode(inode);
567149f4211SChristoph Hellwig }
568149f4211SChristoph Hellwig 
569e5d319deSMiaohe Lin static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
5701da177e4SLinus Torvalds {
571856fc295SHugh Dickins 	pgoff_t pgoff;
5721da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
573a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
5741da177e4SLinus Torvalds 
575a5516438SAndi Kleen 	BUG_ON(offset & ~huge_page_mask(h));
576856fc295SHugh Dickins 	pgoff = offset >> PAGE_SHIFT;
5771da177e4SLinus Torvalds 
57883cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
57987bf91d3SMike Kravetz 	i_size_write(inode, offset);
580f808c13fSDavidlohr Bueso 	if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
58105e90bd0SPeter Xu 		hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
58205e90bd0SPeter Xu 				      ZAP_FLAG_DROP_MARKER);
583c86aa7bbSMike Kravetz 	i_mmap_unlock_write(mapping);
584e7c58097SMike Kravetz 	remove_inode_hugepages(inode, offset, LLONG_MAX);
5851da177e4SLinus Torvalds }
5861da177e4SLinus Torvalds 
58770c3547eSMike Kravetz static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
58870c3547eSMike Kravetz {
58970c3547eSMike Kravetz 	struct hstate *h = hstate_inode(inode);
59070c3547eSMike Kravetz 	loff_t hpage_size = huge_page_size(h);
59170c3547eSMike Kravetz 	loff_t hole_start, hole_end;
59270c3547eSMike Kravetz 
59370c3547eSMike Kravetz 	/*
59470c3547eSMike Kravetz 	 * For hole punch round up the beginning offset of the hole and
59570c3547eSMike Kravetz 	 * round down the end.
59670c3547eSMike Kravetz 	 */
59770c3547eSMike Kravetz 	hole_start = round_up(offset, hpage_size);
59870c3547eSMike Kravetz 	hole_end = round_down(offset + len, hpage_size);
59970c3547eSMike Kravetz 
60070c3547eSMike Kravetz 	if (hole_end > hole_start) {
60170c3547eSMike Kravetz 		struct address_space *mapping = inode->i_mapping;
602ff62a342SMarc-André Lureau 		struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
60370c3547eSMike Kravetz 
6045955102cSAl Viro 		inode_lock(inode);
605ff62a342SMarc-André Lureau 
606398c0da7SMiaohe Lin 		/* protected by i_rwsem */
607ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
608ff62a342SMarc-André Lureau 			inode_unlock(inode);
609ff62a342SMarc-André Lureau 			return -EPERM;
610ff62a342SMarc-André Lureau 		}
611ff62a342SMarc-André Lureau 
61270c3547eSMike Kravetz 		i_mmap_lock_write(mapping);
613f808c13fSDavidlohr Bueso 		if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
61470c3547eSMike Kravetz 			hugetlb_vmdelete_list(&mapping->i_mmap,
61570c3547eSMike Kravetz 					      hole_start >> PAGE_SHIFT,
61605e90bd0SPeter Xu 					      hole_end >> PAGE_SHIFT, 0);
617c86aa7bbSMike Kravetz 		i_mmap_unlock_write(mapping);
618e7c58097SMike Kravetz 		remove_inode_hugepages(inode, hole_start, hole_end);
6195955102cSAl Viro 		inode_unlock(inode);
62070c3547eSMike Kravetz 	}
62170c3547eSMike Kravetz 
62270c3547eSMike Kravetz 	return 0;
62370c3547eSMike Kravetz }
62470c3547eSMike Kravetz 
62570c3547eSMike Kravetz static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
62670c3547eSMike Kravetz 				loff_t len)
62770c3547eSMike Kravetz {
62870c3547eSMike Kravetz 	struct inode *inode = file_inode(file);
629ff62a342SMarc-André Lureau 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
63070c3547eSMike Kravetz 	struct address_space *mapping = inode->i_mapping;
63170c3547eSMike Kravetz 	struct hstate *h = hstate_inode(inode);
63270c3547eSMike Kravetz 	struct vm_area_struct pseudo_vma;
63370c3547eSMike Kravetz 	struct mm_struct *mm = current->mm;
63470c3547eSMike Kravetz 	loff_t hpage_size = huge_page_size(h);
63570c3547eSMike Kravetz 	unsigned long hpage_shift = huge_page_shift(h);
63670c3547eSMike Kravetz 	pgoff_t start, index, end;
63770c3547eSMike Kravetz 	int error;
63870c3547eSMike Kravetz 	u32 hash;
63970c3547eSMike Kravetz 
64070c3547eSMike Kravetz 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
64170c3547eSMike Kravetz 		return -EOPNOTSUPP;
64270c3547eSMike Kravetz 
64370c3547eSMike Kravetz 	if (mode & FALLOC_FL_PUNCH_HOLE)
64470c3547eSMike Kravetz 		return hugetlbfs_punch_hole(inode, offset, len);
64570c3547eSMike Kravetz 
64670c3547eSMike Kravetz 	/*
64770c3547eSMike Kravetz 	 * Default preallocate case.
64870c3547eSMike Kravetz 	 * For this range, start is rounded down and end is rounded up
64970c3547eSMike Kravetz 	 * as well as being converted to page offsets.
65070c3547eSMike Kravetz 	 */
65170c3547eSMike Kravetz 	start = offset >> hpage_shift;
65270c3547eSMike Kravetz 	end = (offset + len + hpage_size - 1) >> hpage_shift;
65370c3547eSMike Kravetz 
6545955102cSAl Viro 	inode_lock(inode);
65570c3547eSMike Kravetz 
65670c3547eSMike Kravetz 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
65770c3547eSMike Kravetz 	error = inode_newsize_ok(inode, offset + len);
65870c3547eSMike Kravetz 	if (error)
65970c3547eSMike Kravetz 		goto out;
66070c3547eSMike Kravetz 
661ff62a342SMarc-André Lureau 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
662ff62a342SMarc-André Lureau 		error = -EPERM;
663ff62a342SMarc-André Lureau 		goto out;
664ff62a342SMarc-André Lureau 	}
665ff62a342SMarc-André Lureau 
66670c3547eSMike Kravetz 	/*
66770c3547eSMike Kravetz 	 * Initialize a pseudo vma as this is required by the huge page
66870c3547eSMike Kravetz 	 * allocation routines.  If NUMA is configured, use page index
66970c3547eSMike Kravetz 	 * as input to create an allocation policy.
67070c3547eSMike Kravetz 	 */
6712c4541e2SKirill A. Shutemov 	vma_init(&pseudo_vma, mm);
67270c3547eSMike Kravetz 	pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
67370c3547eSMike Kravetz 	pseudo_vma.vm_file = file;
67470c3547eSMike Kravetz 
67570c3547eSMike Kravetz 	for (index = start; index < end; index++) {
67670c3547eSMike Kravetz 		/*
67770c3547eSMike Kravetz 		 * This is supposed to be the vaddr where the page is being
67870c3547eSMike Kravetz 		 * faulted in, but we have no vaddr here.
67970c3547eSMike Kravetz 		 */
68070c3547eSMike Kravetz 		struct page *page;
68170c3547eSMike Kravetz 		unsigned long addr;
68270c3547eSMike Kravetz 
68370c3547eSMike Kravetz 		cond_resched();
68470c3547eSMike Kravetz 
68570c3547eSMike Kravetz 		/*
68670c3547eSMike Kravetz 		 * fallocate(2) manpage permits EINTR; we may have been
68770c3547eSMike Kravetz 		 * interrupted because we are using up too much memory.
68870c3547eSMike Kravetz 		 */
68970c3547eSMike Kravetz 		if (signal_pending(current)) {
69070c3547eSMike Kravetz 			error = -EINTR;
69170c3547eSMike Kravetz 			break;
69270c3547eSMike Kravetz 		}
69370c3547eSMike Kravetz 
69470c3547eSMike Kravetz 		/* Set numa allocation policy based on index */
69570c3547eSMike Kravetz 		hugetlb_set_vma_policy(&pseudo_vma, inode, index);
69670c3547eSMike Kravetz 
69770c3547eSMike Kravetz 		/* addr is the offset within the file (zero based) */
69870c3547eSMike Kravetz 		addr = index * hpage_size;
69970c3547eSMike Kravetz 
70087bf91d3SMike Kravetz 		/*
70187bf91d3SMike Kravetz 		 * fault mutex taken here, protects against fault path
70287bf91d3SMike Kravetz 		 * and hole punch.  inode_lock previously taken protects
70387bf91d3SMike Kravetz 		 * against truncation.
70487bf91d3SMike Kravetz 		 */
705188b04a7SWei Yang 		hash = hugetlb_fault_mutex_hash(mapping, index);
70670c3547eSMike Kravetz 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
70770c3547eSMike Kravetz 
70870c3547eSMike Kravetz 		/* See if already present in mapping to avoid alloc/free */
70970c3547eSMike Kravetz 		page = find_get_page(mapping, index);
71070c3547eSMike Kravetz 		if (page) {
71170c3547eSMike Kravetz 			put_page(page);
71270c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
71370c3547eSMike Kravetz 			hugetlb_drop_vma_policy(&pseudo_vma);
71470c3547eSMike Kravetz 			continue;
71570c3547eSMike Kravetz 		}
71670c3547eSMike Kravetz 
71788ce3fefSMiaohe Lin 		/*
71888ce3fefSMiaohe Lin 		 * Allocate page without setting the avoid_reserve argument.
71988ce3fefSMiaohe Lin 		 * There certainly are no reserves associated with the
72088ce3fefSMiaohe Lin 		 * pseudo_vma.  However, there could be shared mappings with
72188ce3fefSMiaohe Lin 		 * reserves for the file at the inode level.  If we fallocate
72288ce3fefSMiaohe Lin 		 * pages in these areas, we need to consume the reserves
72388ce3fefSMiaohe Lin 		 * to keep reservation accounting consistent.
72488ce3fefSMiaohe Lin 		 */
72588ce3fefSMiaohe Lin 		page = alloc_huge_page(&pseudo_vma, addr, 0);
72670c3547eSMike Kravetz 		hugetlb_drop_vma_policy(&pseudo_vma);
72770c3547eSMike Kravetz 		if (IS_ERR(page)) {
72870c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
72970c3547eSMike Kravetz 			error = PTR_ERR(page);
73070c3547eSMike Kravetz 			goto out;
73170c3547eSMike Kravetz 		}
73270c3547eSMike Kravetz 		clear_huge_page(page, addr, pages_per_huge_page(h));
73370c3547eSMike Kravetz 		__SetPageUptodate(page);
73470c3547eSMike Kravetz 		error = huge_add_to_page_cache(page, mapping, index);
73570c3547eSMike Kravetz 		if (unlikely(error)) {
736846be085SMike Kravetz 			restore_reserve_on_error(h, &pseudo_vma, addr, page);
73770c3547eSMike Kravetz 			put_page(page);
73870c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
73970c3547eSMike Kravetz 			goto out;
74070c3547eSMike Kravetz 		}
74170c3547eSMike Kravetz 
74270c3547eSMike Kravetz 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
74370c3547eSMike Kravetz 
7448f251a3dSMike Kravetz 		SetHPageMigratable(page);
74570c3547eSMike Kravetz 		/*
746d9ef44deSMatthew Wilcox (Oracle) 		 * unlock_page because locked by huge_add_to_page_cache()
747585fc0d2SMuchun Song 		 * put_page() due to reference from alloc_huge_page()
74870c3547eSMike Kravetz 		 */
74970c3547eSMike Kravetz 		unlock_page(page);
75072639e6dSNadav Amit 		put_page(page);
75170c3547eSMike Kravetz 	}
75270c3547eSMike Kravetz 
75370c3547eSMike Kravetz 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
75470c3547eSMike Kravetz 		i_size_write(inode, offset + len);
755078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
75670c3547eSMike Kravetz out:
7575955102cSAl Viro 	inode_unlock(inode);
75870c3547eSMike Kravetz 	return error;
75970c3547eSMike Kravetz }
76070c3547eSMike Kravetz 
761549c7297SChristian Brauner static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
762549c7297SChristian Brauner 			     struct dentry *dentry, struct iattr *attr)
7631da177e4SLinus Torvalds {
7642b0143b5SDavid Howells 	struct inode *inode = d_inode(dentry);
765a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
7661da177e4SLinus Torvalds 	int error;
7671da177e4SLinus Torvalds 	unsigned int ia_valid = attr->ia_valid;
768ff62a342SMarc-André Lureau 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
7691da177e4SLinus Torvalds 
7702f221d6fSChristian Brauner 	error = setattr_prepare(&init_user_ns, dentry, attr);
7711da177e4SLinus Torvalds 	if (error)
7721025774cSChristoph Hellwig 		return error;
7731da177e4SLinus Torvalds 
7741da177e4SLinus Torvalds 	if (ia_valid & ATTR_SIZE) {
775ff62a342SMarc-André Lureau 		loff_t oldsize = inode->i_size;
776ff62a342SMarc-André Lureau 		loff_t newsize = attr->ia_size;
777ff62a342SMarc-André Lureau 
778ff62a342SMarc-André Lureau 		if (newsize & ~huge_page_mask(h))
7791025774cSChristoph Hellwig 			return -EINVAL;
780398c0da7SMiaohe Lin 		/* protected by i_rwsem */
781ff62a342SMarc-André Lureau 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
782ff62a342SMarc-André Lureau 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
783ff62a342SMarc-André Lureau 			return -EPERM;
784e5d319deSMiaohe Lin 		hugetlb_vmtruncate(inode, newsize);
7851da177e4SLinus Torvalds 	}
7861da177e4SLinus Torvalds 
7872f221d6fSChristian Brauner 	setattr_copy(&init_user_ns, inode, attr);
7881025774cSChristoph Hellwig 	mark_inode_dirty(inode);
7891025774cSChristoph Hellwig 	return 0;
7901025774cSChristoph Hellwig }
7911025774cSChristoph Hellwig 
7927d54fa64SAl Viro static struct inode *hugetlbfs_get_root(struct super_block *sb,
79332021982SDavid Howells 					struct hugetlbfs_fs_context *ctx)
7941da177e4SLinus Torvalds {
7951da177e4SLinus Torvalds 	struct inode *inode;
7961da177e4SLinus Torvalds 
7971da177e4SLinus Torvalds 	inode = new_inode(sb);
7981da177e4SLinus Torvalds 	if (inode) {
79985fe4025SChristoph Hellwig 		inode->i_ino = get_next_ino();
80032021982SDavid Howells 		inode->i_mode = S_IFDIR | ctx->mode;
80132021982SDavid Howells 		inode->i_uid = ctx->uid;
80232021982SDavid Howells 		inode->i_gid = ctx->gid;
803078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
8047d54fa64SAl Viro 		inode->i_op = &hugetlbfs_dir_inode_operations;
8057d54fa64SAl Viro 		inode->i_fop = &simple_dir_operations;
8067d54fa64SAl Viro 		/* directory inodes start off with i_nlink == 2 (for "." entry) */
8077d54fa64SAl Viro 		inc_nlink(inode);
80865ed7601SAneesh Kumar K.V 		lockdep_annotate_inode_mutex_key(inode);
8097d54fa64SAl Viro 	}
8107d54fa64SAl Viro 	return inode;
8117d54fa64SAl Viro }
8127d54fa64SAl Viro 
813b610ded7SMichal Hocko /*
814c8c06efaSDavidlohr Bueso  * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
815b610ded7SMichal Hocko  * be taken from reclaim -- unlike regular filesystems. This needs an
81688f306b6SKirill A. Shutemov  * annotation because huge_pmd_share() does an allocation under hugetlb's
817c8c06efaSDavidlohr Bueso  * i_mmap_rwsem.
818b610ded7SMichal Hocko  */
819c8c06efaSDavidlohr Bueso static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
820b610ded7SMichal Hocko 
8217d54fa64SAl Viro static struct inode *hugetlbfs_get_inode(struct super_block *sb,
8227d54fa64SAl Viro 					struct inode *dir,
82318df2252SAl Viro 					umode_t mode, dev_t dev)
8247d54fa64SAl Viro {
8257d54fa64SAl Viro 	struct inode *inode;
82658b6e5e8SMike Kravetz 	struct resv_map *resv_map = NULL;
8279119a41eSJoonsoo Kim 
82858b6e5e8SMike Kravetz 	/*
82958b6e5e8SMike Kravetz 	 * Reserve maps are only needed for inodes that can have associated
83058b6e5e8SMike Kravetz 	 * page allocations.
83158b6e5e8SMike Kravetz 	 */
83258b6e5e8SMike Kravetz 	if (S_ISREG(mode) || S_ISLNK(mode)) {
8339119a41eSJoonsoo Kim 		resv_map = resv_map_alloc();
8349119a41eSJoonsoo Kim 		if (!resv_map)
8359119a41eSJoonsoo Kim 			return NULL;
83658b6e5e8SMike Kravetz 	}
8377d54fa64SAl Viro 
8387d54fa64SAl Viro 	inode = new_inode(sb);
8397d54fa64SAl Viro 	if (inode) {
840ff62a342SMarc-André Lureau 		struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
841ff62a342SMarc-André Lureau 
8427d54fa64SAl Viro 		inode->i_ino = get_next_ino();
84321cb47beSChristian Brauner 		inode_init_owner(&init_user_ns, inode, dir, mode);
844c8c06efaSDavidlohr Bueso 		lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
845c8c06efaSDavidlohr Bueso 				&hugetlbfs_i_mmap_rwsem_key);
8461da177e4SLinus Torvalds 		inode->i_mapping->a_ops = &hugetlbfs_aops;
847078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
8489119a41eSJoonsoo Kim 		inode->i_mapping->private_data = resv_map;
849ff62a342SMarc-André Lureau 		info->seals = F_SEAL_SEAL;
8501da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
8511da177e4SLinus Torvalds 		default:
8521da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
8531da177e4SLinus Torvalds 			break;
8541da177e4SLinus Torvalds 		case S_IFREG:
8551da177e4SLinus Torvalds 			inode->i_op = &hugetlbfs_inode_operations;
8561da177e4SLinus Torvalds 			inode->i_fop = &hugetlbfs_file_operations;
8571da177e4SLinus Torvalds 			break;
8581da177e4SLinus Torvalds 		case S_IFDIR:
8591da177e4SLinus Torvalds 			inode->i_op = &hugetlbfs_dir_inode_operations;
8601da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
8611da177e4SLinus Torvalds 
8621da177e4SLinus Torvalds 			/* directory inodes start off with i_nlink == 2 (for "." entry) */
863d8c76e6fSDave Hansen 			inc_nlink(inode);
8641da177e4SLinus Torvalds 			break;
8651da177e4SLinus Torvalds 		case S_IFLNK:
8661da177e4SLinus Torvalds 			inode->i_op = &page_symlink_inode_operations;
86721fc61c7SAl Viro 			inode_nohighmem(inode);
8681da177e4SLinus Torvalds 			break;
8691da177e4SLinus Torvalds 		}
870e096d0c7SJosh Boyer 		lockdep_annotate_inode_mutex_key(inode);
87158b6e5e8SMike Kravetz 	} else {
87258b6e5e8SMike Kravetz 		if (resv_map)
8739119a41eSJoonsoo Kim 			kref_put(&resv_map->refs, resv_map_release);
87458b6e5e8SMike Kravetz 	}
8759119a41eSJoonsoo Kim 
8761da177e4SLinus Torvalds 	return inode;
8771da177e4SLinus Torvalds }
8781da177e4SLinus Torvalds 
8791da177e4SLinus Torvalds /*
8801da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
8811da177e4SLinus Torvalds  */
8821ab5b82fSPiotr Sarna static int do_hugetlbfs_mknod(struct inode *dir,
8831ab5b82fSPiotr Sarna 			struct dentry *dentry,
8841ab5b82fSPiotr Sarna 			umode_t mode,
8851ab5b82fSPiotr Sarna 			dev_t dev,
8861ab5b82fSPiotr Sarna 			bool tmpfile)
8871da177e4SLinus Torvalds {
8881da177e4SLinus Torvalds 	struct inode *inode;
8891da177e4SLinus Torvalds 	int error = -ENOSPC;
8901da177e4SLinus Torvalds 
8917d54fa64SAl Viro 	inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
8921da177e4SLinus Torvalds 	if (inode) {
893078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
8941ab5b82fSPiotr Sarna 		if (tmpfile) {
8951ab5b82fSPiotr Sarna 			d_tmpfile(dentry, inode);
8961ab5b82fSPiotr Sarna 		} else {
8971da177e4SLinus Torvalds 			d_instantiate(dentry, inode);
8981da177e4SLinus Torvalds 			dget(dentry);/* Extra count - pin the dentry in core */
8991ab5b82fSPiotr Sarna 		}
9001da177e4SLinus Torvalds 		error = 0;
9011da177e4SLinus Torvalds 	}
9021da177e4SLinus Torvalds 	return error;
9031da177e4SLinus Torvalds }
9041da177e4SLinus Torvalds 
905549c7297SChristian Brauner static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
9061ab5b82fSPiotr Sarna 			   struct dentry *dentry, umode_t mode, dev_t dev)
9071ab5b82fSPiotr Sarna {
9081ab5b82fSPiotr Sarna 	return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
9091ab5b82fSPiotr Sarna }
9101ab5b82fSPiotr Sarna 
911549c7297SChristian Brauner static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
912549c7297SChristian Brauner 			   struct dentry *dentry, umode_t mode)
9131da177e4SLinus Torvalds {
914549c7297SChristian Brauner 	int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
915549c7297SChristian Brauner 				     mode | S_IFDIR, 0);
9161da177e4SLinus Torvalds 	if (!retval)
917d8c76e6fSDave Hansen 		inc_nlink(dir);
9181da177e4SLinus Torvalds 	return retval;
9191da177e4SLinus Torvalds }
9201da177e4SLinus Torvalds 
921549c7297SChristian Brauner static int hugetlbfs_create(struct user_namespace *mnt_userns,
922549c7297SChristian Brauner 			    struct inode *dir, struct dentry *dentry,
923549c7297SChristian Brauner 			    umode_t mode, bool excl)
9241da177e4SLinus Torvalds {
925549c7297SChristian Brauner 	return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
9261da177e4SLinus Torvalds }
9271da177e4SLinus Torvalds 
928549c7297SChristian Brauner static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
929549c7297SChristian Brauner 			     struct inode *dir, struct dentry *dentry,
930549c7297SChristian Brauner 			     umode_t mode)
9311ab5b82fSPiotr Sarna {
9321ab5b82fSPiotr Sarna 	return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
9331ab5b82fSPiotr Sarna }
9341ab5b82fSPiotr Sarna 
935549c7297SChristian Brauner static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
936549c7297SChristian Brauner 			     struct inode *dir, struct dentry *dentry,
937549c7297SChristian Brauner 			     const char *symname)
9381da177e4SLinus Torvalds {
9391da177e4SLinus Torvalds 	struct inode *inode;
9401da177e4SLinus Torvalds 	int error = -ENOSPC;
9411da177e4SLinus Torvalds 
9427d54fa64SAl Viro 	inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
9431da177e4SLinus Torvalds 	if (inode) {
9441da177e4SLinus Torvalds 		int l = strlen(symname)+1;
9451da177e4SLinus Torvalds 		error = page_symlink(inode, symname, l);
9461da177e4SLinus Torvalds 		if (!error) {
9471da177e4SLinus Torvalds 			d_instantiate(dentry, inode);
9481da177e4SLinus Torvalds 			dget(dentry);
9491da177e4SLinus Torvalds 		} else
9501da177e4SLinus Torvalds 			iput(inode);
9511da177e4SLinus Torvalds 	}
952078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
9531da177e4SLinus Torvalds 
9541da177e4SLinus Torvalds 	return error;
9551da177e4SLinus Torvalds }
9561da177e4SLinus Torvalds 
957*b890ec2aSMatthew Wilcox (Oracle) #ifdef CONFIG_MIGRATION
958*b890ec2aSMatthew Wilcox (Oracle) static int hugetlbfs_migrate_folio(struct address_space *mapping,
959*b890ec2aSMatthew Wilcox (Oracle) 				struct folio *dst, struct folio *src,
960a6bc32b8SMel Gorman 				enum migrate_mode mode)
961290408d4SNaoya Horiguchi {
962290408d4SNaoya Horiguchi 	int rc;
963290408d4SNaoya Horiguchi 
964*b890ec2aSMatthew Wilcox (Oracle) 	rc = migrate_huge_page_move_mapping(mapping, dst, src);
96578bd5209SRafael Aquini 	if (rc != MIGRATEPAGE_SUCCESS)
966290408d4SNaoya Horiguchi 		return rc;
967cb6acd01SMike Kravetz 
968*b890ec2aSMatthew Wilcox (Oracle) 	if (hugetlb_page_subpool(&src->page)) {
969*b890ec2aSMatthew Wilcox (Oracle) 		hugetlb_set_page_subpool(&dst->page,
970*b890ec2aSMatthew Wilcox (Oracle) 					hugetlb_page_subpool(&src->page));
971*b890ec2aSMatthew Wilcox (Oracle) 		hugetlb_set_page_subpool(&src->page, NULL);
972cb6acd01SMike Kravetz 	}
973cb6acd01SMike Kravetz 
9742916ecc0SJérôme Glisse 	if (mode != MIGRATE_SYNC_NO_COPY)
975*b890ec2aSMatthew Wilcox (Oracle) 		folio_migrate_copy(dst, src);
9762916ecc0SJérôme Glisse 	else
977*b890ec2aSMatthew Wilcox (Oracle) 		folio_migrate_flags(dst, src);
978290408d4SNaoya Horiguchi 
97978bd5209SRafael Aquini 	return MIGRATEPAGE_SUCCESS;
980290408d4SNaoya Horiguchi }
981*b890ec2aSMatthew Wilcox (Oracle) #else
982*b890ec2aSMatthew Wilcox (Oracle) #define hugetlbfs_migrate_folio NULL
983*b890ec2aSMatthew Wilcox (Oracle) #endif
984290408d4SNaoya Horiguchi 
98578bb9203SNaoya Horiguchi static int hugetlbfs_error_remove_page(struct address_space *mapping,
98678bb9203SNaoya Horiguchi 				struct page *page)
98778bb9203SNaoya Horiguchi {
98878bb9203SNaoya Horiguchi 	struct inode *inode = mapping->host;
989ab615a5bSMike Kravetz 	pgoff_t index = page->index;
99078bb9203SNaoya Horiguchi 
99178bb9203SNaoya Horiguchi 	remove_huge_page(page);
992ab615a5bSMike Kravetz 	if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
99378bb9203SNaoya Horiguchi 		hugetlb_fix_reserve_counts(inode);
994ab615a5bSMike Kravetz 
99578bb9203SNaoya Horiguchi 	return 0;
99678bb9203SNaoya Horiguchi }
99778bb9203SNaoya Horiguchi 
9984a25220dSDavid Howells /*
9994a25220dSDavid Howells  * Display the mount options in /proc/mounts.
10004a25220dSDavid Howells  */
10014a25220dSDavid Howells static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
10024a25220dSDavid Howells {
10034a25220dSDavid Howells 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
10044a25220dSDavid Howells 	struct hugepage_subpool *spool = sbinfo->spool;
10054a25220dSDavid Howells 	unsigned long hpage_size = huge_page_size(sbinfo->hstate);
10064a25220dSDavid Howells 	unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
10074a25220dSDavid Howells 	char mod;
10084a25220dSDavid Howells 
10094a25220dSDavid Howells 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
10104a25220dSDavid Howells 		seq_printf(m, ",uid=%u",
10114a25220dSDavid Howells 			   from_kuid_munged(&init_user_ns, sbinfo->uid));
10124a25220dSDavid Howells 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
10134a25220dSDavid Howells 		seq_printf(m, ",gid=%u",
10144a25220dSDavid Howells 			   from_kgid_munged(&init_user_ns, sbinfo->gid));
10154a25220dSDavid Howells 	if (sbinfo->mode != 0755)
10164a25220dSDavid Howells 		seq_printf(m, ",mode=%o", sbinfo->mode);
10174a25220dSDavid Howells 	if (sbinfo->max_inodes != -1)
10184a25220dSDavid Howells 		seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
10194a25220dSDavid Howells 
10204a25220dSDavid Howells 	hpage_size /= 1024;
10214a25220dSDavid Howells 	mod = 'K';
10224a25220dSDavid Howells 	if (hpage_size >= 1024) {
10234a25220dSDavid Howells 		hpage_size /= 1024;
10244a25220dSDavid Howells 		mod = 'M';
10254a25220dSDavid Howells 	}
10264a25220dSDavid Howells 	seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
10274a25220dSDavid Howells 	if (spool) {
10284a25220dSDavid Howells 		if (spool->max_hpages != -1)
10294a25220dSDavid Howells 			seq_printf(m, ",size=%llu",
10304a25220dSDavid Howells 				   (unsigned long long)spool->max_hpages << hpage_shift);
10314a25220dSDavid Howells 		if (spool->min_hpages != -1)
10324a25220dSDavid Howells 			seq_printf(m, ",min_size=%llu",
10334a25220dSDavid Howells 				   (unsigned long long)spool->min_hpages << hpage_shift);
10344a25220dSDavid Howells 	}
10354a25220dSDavid Howells 	return 0;
10364a25220dSDavid Howells }
10374a25220dSDavid Howells 
1038726c3342SDavid Howells static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
10391da177e4SLinus Torvalds {
1040726c3342SDavid Howells 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
10412b0143b5SDavid Howells 	struct hstate *h = hstate_inode(d_inode(dentry));
10421da177e4SLinus Torvalds 
10431da177e4SLinus Torvalds 	buf->f_type = HUGETLBFS_MAGIC;
1044a5516438SAndi Kleen 	buf->f_bsize = huge_page_size(h);
10451da177e4SLinus Torvalds 	if (sbinfo) {
10461da177e4SLinus Torvalds 		spin_lock(&sbinfo->stat_lock);
104774a8a65cSDavid Gibson 		/* If no limits set, just report 0 for max/free/used
104874a8a65cSDavid Gibson 		 * blocks, like simple_statfs() */
104990481622SDavid Gibson 		if (sbinfo->spool) {
105090481622SDavid Gibson 			long free_pages;
105190481622SDavid Gibson 
10524b25f030SMina Almasry 			spin_lock_irq(&sbinfo->spool->lock);
105390481622SDavid Gibson 			buf->f_blocks = sbinfo->spool->max_hpages;
105490481622SDavid Gibson 			free_pages = sbinfo->spool->max_hpages
105590481622SDavid Gibson 				- sbinfo->spool->used_hpages;
105690481622SDavid Gibson 			buf->f_bavail = buf->f_bfree = free_pages;
10574b25f030SMina Almasry 			spin_unlock_irq(&sbinfo->spool->lock);
10581da177e4SLinus Torvalds 			buf->f_files = sbinfo->max_inodes;
10591da177e4SLinus Torvalds 			buf->f_ffree = sbinfo->free_inodes;
106074a8a65cSDavid Gibson 		}
10611da177e4SLinus Torvalds 		spin_unlock(&sbinfo->stat_lock);
10621da177e4SLinus Torvalds 	}
10631da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
10641da177e4SLinus Torvalds 	return 0;
10651da177e4SLinus Torvalds }
10661da177e4SLinus Torvalds 
10671da177e4SLinus Torvalds static void hugetlbfs_put_super(struct super_block *sb)
10681da177e4SLinus Torvalds {
10691da177e4SLinus Torvalds 	struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
10701da177e4SLinus Torvalds 
10711da177e4SLinus Torvalds 	if (sbi) {
10721da177e4SLinus Torvalds 		sb->s_fs_info = NULL;
107390481622SDavid Gibson 
107490481622SDavid Gibson 		if (sbi->spool)
107590481622SDavid Gibson 			hugepage_put_subpool(sbi->spool);
107690481622SDavid Gibson 
10771da177e4SLinus Torvalds 		kfree(sbi);
10781da177e4SLinus Torvalds 	}
10791da177e4SLinus Torvalds }
10801da177e4SLinus Torvalds 
108196527980SChristoph Hellwig static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
108296527980SChristoph Hellwig {
108396527980SChristoph Hellwig 	if (sbinfo->free_inodes >= 0) {
108496527980SChristoph Hellwig 		spin_lock(&sbinfo->stat_lock);
108596527980SChristoph Hellwig 		if (unlikely(!sbinfo->free_inodes)) {
108696527980SChristoph Hellwig 			spin_unlock(&sbinfo->stat_lock);
108796527980SChristoph Hellwig 			return 0;
108896527980SChristoph Hellwig 		}
108996527980SChristoph Hellwig 		sbinfo->free_inodes--;
109096527980SChristoph Hellwig 		spin_unlock(&sbinfo->stat_lock);
109196527980SChristoph Hellwig 	}
109296527980SChristoph Hellwig 
109396527980SChristoph Hellwig 	return 1;
109496527980SChristoph Hellwig }
109596527980SChristoph Hellwig 
109696527980SChristoph Hellwig static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
109796527980SChristoph Hellwig {
109896527980SChristoph Hellwig 	if (sbinfo->free_inodes >= 0) {
109996527980SChristoph Hellwig 		spin_lock(&sbinfo->stat_lock);
110096527980SChristoph Hellwig 		sbinfo->free_inodes++;
110196527980SChristoph Hellwig 		spin_unlock(&sbinfo->stat_lock);
110296527980SChristoph Hellwig 	}
110396527980SChristoph Hellwig }
110496527980SChristoph Hellwig 
110596527980SChristoph Hellwig 
1106e18b890bSChristoph Lameter static struct kmem_cache *hugetlbfs_inode_cachep;
11071da177e4SLinus Torvalds 
11081da177e4SLinus Torvalds static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
11091da177e4SLinus Torvalds {
111096527980SChristoph Hellwig 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
11111da177e4SLinus Torvalds 	struct hugetlbfs_inode_info *p;
11121da177e4SLinus Torvalds 
111396527980SChristoph Hellwig 	if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
11141da177e4SLinus Torvalds 		return NULL;
1115fd60b288SMuchun Song 	p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
111696527980SChristoph Hellwig 	if (unlikely(!p)) {
111796527980SChristoph Hellwig 		hugetlbfs_inc_free_inodes(sbinfo);
111896527980SChristoph Hellwig 		return NULL;
11191da177e4SLinus Torvalds 	}
11204742a35dSMike Kravetz 
11214742a35dSMike Kravetz 	/*
11224742a35dSMike Kravetz 	 * Any time after allocation, hugetlbfs_destroy_inode can be called
11234742a35dSMike Kravetz 	 * for the inode.  mpol_free_shared_policy is unconditionally called
11244742a35dSMike Kravetz 	 * as part of hugetlbfs_destroy_inode.  So, initialize policy here
11254742a35dSMike Kravetz 	 * in case of a quick call to destroy.
11264742a35dSMike Kravetz 	 *
11274742a35dSMike Kravetz 	 * Note that the policy is initialized even if we are creating a
11284742a35dSMike Kravetz 	 * private inode.  This simplifies hugetlbfs_destroy_inode.
11294742a35dSMike Kravetz 	 */
11304742a35dSMike Kravetz 	mpol_shared_policy_init(&p->policy, NULL);
11314742a35dSMike Kravetz 
113296527980SChristoph Hellwig 	return &p->vfs_inode;
11331da177e4SLinus Torvalds }
11341da177e4SLinus Torvalds 
1135b62de322SAl Viro static void hugetlbfs_free_inode(struct inode *inode)
1136fa0d7e3dSNick Piggin {
1137fa0d7e3dSNick Piggin 	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1138fa0d7e3dSNick Piggin }
1139fa0d7e3dSNick Piggin 
11401da177e4SLinus Torvalds static void hugetlbfs_destroy_inode(struct inode *inode)
11411da177e4SLinus Torvalds {
114296527980SChristoph Hellwig 	hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
11431da177e4SLinus Torvalds 	mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
11441da177e4SLinus Torvalds }
11451da177e4SLinus Torvalds 
1146f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops = {
1147800d15a5SNick Piggin 	.write_begin	= hugetlbfs_write_begin,
1148800d15a5SNick Piggin 	.write_end	= hugetlbfs_write_end,
114946de8b97SMatthew Wilcox (Oracle) 	.dirty_folio	= noop_dirty_folio,
1150*b890ec2aSMatthew Wilcox (Oracle) 	.migrate_folio  = hugetlbfs_migrate_folio,
115178bb9203SNaoya Horiguchi 	.error_remove_page	= hugetlbfs_error_remove_page,
11521da177e4SLinus Torvalds };
11531da177e4SLinus Torvalds 
115496527980SChristoph Hellwig 
115551cc5068SAlexey Dobriyan static void init_once(void *foo)
115696527980SChristoph Hellwig {
115796527980SChristoph Hellwig 	struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
115896527980SChristoph Hellwig 
115996527980SChristoph Hellwig 	inode_init_once(&ei->vfs_inode);
116096527980SChristoph Hellwig }
116196527980SChristoph Hellwig 
11624b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations = {
116334d0640eSAl Viro 	.read_iter		= hugetlbfs_read_iter,
11641da177e4SLinus Torvalds 	.mmap			= hugetlbfs_file_mmap,
11651b061d92SChristoph Hellwig 	.fsync			= noop_fsync,
11661da177e4SLinus Torvalds 	.get_unmapped_area	= hugetlb_get_unmapped_area,
11676038f373SArnd Bergmann 	.llseek			= default_llseek,
116870c3547eSMike Kravetz 	.fallocate		= hugetlbfs_fallocate,
11691da177e4SLinus Torvalds };
11701da177e4SLinus Torvalds 
117192e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations = {
11721da177e4SLinus Torvalds 	.create		= hugetlbfs_create,
11731da177e4SLinus Torvalds 	.lookup		= simple_lookup,
11741da177e4SLinus Torvalds 	.link		= simple_link,
11751da177e4SLinus Torvalds 	.unlink		= simple_unlink,
11761da177e4SLinus Torvalds 	.symlink	= hugetlbfs_symlink,
11771da177e4SLinus Torvalds 	.mkdir		= hugetlbfs_mkdir,
11781da177e4SLinus Torvalds 	.rmdir		= simple_rmdir,
11791da177e4SLinus Torvalds 	.mknod		= hugetlbfs_mknod,
11801da177e4SLinus Torvalds 	.rename		= simple_rename,
11811da177e4SLinus Torvalds 	.setattr	= hugetlbfs_setattr,
11821ab5b82fSPiotr Sarna 	.tmpfile	= hugetlbfs_tmpfile,
11831da177e4SLinus Torvalds };
11841da177e4SLinus Torvalds 
118592e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations = {
11861da177e4SLinus Torvalds 	.setattr	= hugetlbfs_setattr,
11871da177e4SLinus Torvalds };
11881da177e4SLinus Torvalds 
1189ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops = {
11901da177e4SLinus Torvalds 	.alloc_inode    = hugetlbfs_alloc_inode,
1191b62de322SAl Viro 	.free_inode     = hugetlbfs_free_inode,
11921da177e4SLinus Torvalds 	.destroy_inode  = hugetlbfs_destroy_inode,
11932bbbda30SAl Viro 	.evict_inode	= hugetlbfs_evict_inode,
11941da177e4SLinus Torvalds 	.statfs		= hugetlbfs_statfs,
11951da177e4SLinus Torvalds 	.put_super	= hugetlbfs_put_super,
11964a25220dSDavid Howells 	.show_options	= hugetlbfs_show_options,
11971da177e4SLinus Torvalds };
11981da177e4SLinus Torvalds 
11997ca02d0aSMike Kravetz /*
12007ca02d0aSMike Kravetz  * Convert size option passed from command line to number of huge pages
12017ca02d0aSMike Kravetz  * in the pool specified by hstate.  Size option could be in bytes
12027ca02d0aSMike Kravetz  * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
12037ca02d0aSMike Kravetz  */
12044a25220dSDavid Howells static long
12057ca02d0aSMike Kravetz hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
12064a25220dSDavid Howells 			 enum hugetlbfs_size_type val_type)
12077ca02d0aSMike Kravetz {
12087ca02d0aSMike Kravetz 	if (val_type == NO_SIZE)
12097ca02d0aSMike Kravetz 		return -1;
12107ca02d0aSMike Kravetz 
12117ca02d0aSMike Kravetz 	if (val_type == SIZE_PERCENT) {
12127ca02d0aSMike Kravetz 		size_opt <<= huge_page_shift(h);
12137ca02d0aSMike Kravetz 		size_opt *= h->max_huge_pages;
12147ca02d0aSMike Kravetz 		do_div(size_opt, 100);
12157ca02d0aSMike Kravetz 	}
12167ca02d0aSMike Kravetz 
12177ca02d0aSMike Kravetz 	size_opt >>= huge_page_shift(h);
12187ca02d0aSMike Kravetz 	return size_opt;
12197ca02d0aSMike Kravetz }
12207ca02d0aSMike Kravetz 
122132021982SDavid Howells /*
122232021982SDavid Howells  * Parse one mount parameter.
122332021982SDavid Howells  */
122432021982SDavid Howells static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
12251da177e4SLinus Torvalds {
122632021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
122732021982SDavid Howells 	struct fs_parse_result result;
122832021982SDavid Howells 	char *rest;
122932021982SDavid Howells 	unsigned long ps;
123032021982SDavid Howells 	int opt;
12311da177e4SLinus Torvalds 
1232d7167b14SAl Viro 	opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
123332021982SDavid Howells 	if (opt < 0)
123432021982SDavid Howells 		return opt;
123532021982SDavid Howells 
123632021982SDavid Howells 	switch (opt) {
123732021982SDavid Howells 	case Opt_uid:
123832021982SDavid Howells 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
123932021982SDavid Howells 		if (!uid_valid(ctx->uid))
124032021982SDavid Howells 			goto bad_val;
12411da177e4SLinus Torvalds 		return 0;
12421da177e4SLinus Torvalds 
1243e73a75faSRandy Dunlap 	case Opt_gid:
124432021982SDavid Howells 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
124532021982SDavid Howells 		if (!gid_valid(ctx->gid))
1246e73a75faSRandy Dunlap 			goto bad_val;
124732021982SDavid Howells 		return 0;
1248e73a75faSRandy Dunlap 
1249e73a75faSRandy Dunlap 	case Opt_mode:
125032021982SDavid Howells 		ctx->mode = result.uint_32 & 01777U;
125132021982SDavid Howells 		return 0;
1252e73a75faSRandy Dunlap 
125332021982SDavid Howells 	case Opt_size:
1254e73a75faSRandy Dunlap 		/* memparse() will accept a K/M/G without a digit */
125532021982SDavid Howells 		if (!isdigit(param->string[0]))
1256e73a75faSRandy Dunlap 			goto bad_val;
125732021982SDavid Howells 		ctx->max_size_opt = memparse(param->string, &rest);
125832021982SDavid Howells 		ctx->max_val_type = SIZE_STD;
1259a137e1ccSAndi Kleen 		if (*rest == '%')
126032021982SDavid Howells 			ctx->max_val_type = SIZE_PERCENT;
126132021982SDavid Howells 		return 0;
12621da177e4SLinus Torvalds 
1263e73a75faSRandy Dunlap 	case Opt_nr_inodes:
1264e73a75faSRandy Dunlap 		/* memparse() will accept a K/M/G without a digit */
126532021982SDavid Howells 		if (!isdigit(param->string[0]))
1266e73a75faSRandy Dunlap 			goto bad_val;
126732021982SDavid Howells 		ctx->nr_inodes = memparse(param->string, &rest);
126832021982SDavid Howells 		return 0;
1269e73a75faSRandy Dunlap 
127032021982SDavid Howells 	case Opt_pagesize:
127132021982SDavid Howells 		ps = memparse(param->string, &rest);
127232021982SDavid Howells 		ctx->hstate = size_to_hstate(ps);
127332021982SDavid Howells 		if (!ctx->hstate) {
127432021982SDavid Howells 			pr_err("Unsupported page size %lu MB\n", ps >> 20);
1275a137e1ccSAndi Kleen 			return -EINVAL;
1276a137e1ccSAndi Kleen 		}
127732021982SDavid Howells 		return 0;
1278a137e1ccSAndi Kleen 
127932021982SDavid Howells 	case Opt_min_size:
12807ca02d0aSMike Kravetz 		/* memparse() will accept a K/M/G without a digit */
128132021982SDavid Howells 		if (!isdigit(param->string[0]))
12827ca02d0aSMike Kravetz 			goto bad_val;
128332021982SDavid Howells 		ctx->min_size_opt = memparse(param->string, &rest);
128432021982SDavid Howells 		ctx->min_val_type = SIZE_STD;
12857ca02d0aSMike Kravetz 		if (*rest == '%')
128632021982SDavid Howells 			ctx->min_val_type = SIZE_PERCENT;
128732021982SDavid Howells 		return 0;
12887ca02d0aSMike Kravetz 
1289e73a75faSRandy Dunlap 	default:
1290b4c07bceSLee Schermerhorn 		return -EINVAL;
1291e73a75faSRandy Dunlap 	}
129232021982SDavid Howells 
129332021982SDavid Howells bad_val:
1294b5db30cfSAl Viro 	return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
129532021982SDavid Howells 		      param->string, param->key);
12961da177e4SLinus Torvalds }
1297a137e1ccSAndi Kleen 
12987ca02d0aSMike Kravetz /*
129932021982SDavid Howells  * Validate the parsed options.
130032021982SDavid Howells  */
130132021982SDavid Howells static int hugetlbfs_validate(struct fs_context *fc)
130232021982SDavid Howells {
130332021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
130432021982SDavid Howells 
130532021982SDavid Howells 	/*
13067ca02d0aSMike Kravetz 	 * Use huge page pool size (in hstate) to convert the size
13077ca02d0aSMike Kravetz 	 * options to number of huge pages.  If NO_SIZE, -1 is returned.
13087ca02d0aSMike Kravetz 	 */
130932021982SDavid Howells 	ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
131032021982SDavid Howells 						   ctx->max_size_opt,
131132021982SDavid Howells 						   ctx->max_val_type);
131232021982SDavid Howells 	ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
131332021982SDavid Howells 						   ctx->min_size_opt,
131432021982SDavid Howells 						   ctx->min_val_type);
13157ca02d0aSMike Kravetz 
13167ca02d0aSMike Kravetz 	/*
13177ca02d0aSMike Kravetz 	 * If max_size was specified, then min_size must be smaller
13187ca02d0aSMike Kravetz 	 */
131932021982SDavid Howells 	if (ctx->max_val_type > NO_SIZE &&
132032021982SDavid Howells 	    ctx->min_hpages > ctx->max_hpages) {
132132021982SDavid Howells 		pr_err("Minimum size can not be greater than maximum size\n");
13227ca02d0aSMike Kravetz 		return -EINVAL;
1323a137e1ccSAndi Kleen 	}
1324a137e1ccSAndi Kleen 
13251da177e4SLinus Torvalds 	return 0;
13261da177e4SLinus Torvalds }
13271da177e4SLinus Torvalds 
13281da177e4SLinus Torvalds static int
132932021982SDavid Howells hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
13301da177e4SLinus Torvalds {
133132021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
13321da177e4SLinus Torvalds 	struct hugetlbfs_sb_info *sbinfo;
13331da177e4SLinus Torvalds 
13341da177e4SLinus Torvalds 	sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
13351da177e4SLinus Torvalds 	if (!sbinfo)
13361da177e4SLinus Torvalds 		return -ENOMEM;
13371da177e4SLinus Torvalds 	sb->s_fs_info = sbinfo;
13381da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
133932021982SDavid Howells 	sbinfo->hstate		= ctx->hstate;
134032021982SDavid Howells 	sbinfo->max_inodes	= ctx->nr_inodes;
134132021982SDavid Howells 	sbinfo->free_inodes	= ctx->nr_inodes;
134290481622SDavid Gibson 	sbinfo->spool		= NULL;
134332021982SDavid Howells 	sbinfo->uid		= ctx->uid;
134432021982SDavid Howells 	sbinfo->gid		= ctx->gid;
134532021982SDavid Howells 	sbinfo->mode		= ctx->mode;
13464a25220dSDavid Howells 
13477ca02d0aSMike Kravetz 	/*
13487ca02d0aSMike Kravetz 	 * Allocate and initialize subpool if maximum or minimum size is
13491935ebd3SMiaohe Lin 	 * specified.  Any needed reservations (for minimum size) are taken
13507ca02d0aSMike Kravetz 	 * taken when the subpool is created.
13517ca02d0aSMike Kravetz 	 */
135232021982SDavid Howells 	if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
135332021982SDavid Howells 		sbinfo->spool = hugepage_new_subpool(ctx->hstate,
135432021982SDavid Howells 						     ctx->max_hpages,
135532021982SDavid Howells 						     ctx->min_hpages);
135690481622SDavid Gibson 		if (!sbinfo->spool)
135790481622SDavid Gibson 			goto out_free;
135890481622SDavid Gibson 	}
13591da177e4SLinus Torvalds 	sb->s_maxbytes = MAX_LFS_FILESIZE;
136032021982SDavid Howells 	sb->s_blocksize = huge_page_size(ctx->hstate);
136132021982SDavid Howells 	sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
13621da177e4SLinus Torvalds 	sb->s_magic = HUGETLBFS_MAGIC;
13631da177e4SLinus Torvalds 	sb->s_op = &hugetlbfs_ops;
13641da177e4SLinus Torvalds 	sb->s_time_gran = 1;
136515568299SMike Kravetz 
136615568299SMike Kravetz 	/*
136715568299SMike Kravetz 	 * Due to the special and limited functionality of hugetlbfs, it does
136815568299SMike Kravetz 	 * not work well as a stacking filesystem.
136915568299SMike Kravetz 	 */
137015568299SMike Kravetz 	sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
137132021982SDavid Howells 	sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
137248fde701SAl Viro 	if (!sb->s_root)
13731da177e4SLinus Torvalds 		goto out_free;
13741da177e4SLinus Torvalds 	return 0;
13751da177e4SLinus Torvalds out_free:
137690481622SDavid Gibson 	kfree(sbinfo->spool);
13771da177e4SLinus Torvalds 	kfree(sbinfo);
13781da177e4SLinus Torvalds 	return -ENOMEM;
13791da177e4SLinus Torvalds }
13801da177e4SLinus Torvalds 
138132021982SDavid Howells static int hugetlbfs_get_tree(struct fs_context *fc)
13821da177e4SLinus Torvalds {
138332021982SDavid Howells 	int err = hugetlbfs_validate(fc);
138432021982SDavid Howells 	if (err)
138532021982SDavid Howells 		return err;
13862ac295d4SAl Viro 	return get_tree_nodev(fc, hugetlbfs_fill_super);
138732021982SDavid Howells }
138832021982SDavid Howells 
138932021982SDavid Howells static void hugetlbfs_fs_context_free(struct fs_context *fc)
139032021982SDavid Howells {
139132021982SDavid Howells 	kfree(fc->fs_private);
139232021982SDavid Howells }
139332021982SDavid Howells 
139432021982SDavid Howells static const struct fs_context_operations hugetlbfs_fs_context_ops = {
139532021982SDavid Howells 	.free		= hugetlbfs_fs_context_free,
139632021982SDavid Howells 	.parse_param	= hugetlbfs_parse_param,
139732021982SDavid Howells 	.get_tree	= hugetlbfs_get_tree,
139832021982SDavid Howells };
139932021982SDavid Howells 
140032021982SDavid Howells static int hugetlbfs_init_fs_context(struct fs_context *fc)
140132021982SDavid Howells {
140232021982SDavid Howells 	struct hugetlbfs_fs_context *ctx;
140332021982SDavid Howells 
140432021982SDavid Howells 	ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
140532021982SDavid Howells 	if (!ctx)
140632021982SDavid Howells 		return -ENOMEM;
140732021982SDavid Howells 
140832021982SDavid Howells 	ctx->max_hpages	= -1; /* No limit on size by default */
140932021982SDavid Howells 	ctx->nr_inodes	= -1; /* No limit on number of inodes by default */
141032021982SDavid Howells 	ctx->uid	= current_fsuid();
141132021982SDavid Howells 	ctx->gid	= current_fsgid();
141232021982SDavid Howells 	ctx->mode	= 0755;
141332021982SDavid Howells 	ctx->hstate	= &default_hstate;
141432021982SDavid Howells 	ctx->min_hpages	= -1; /* No default minimum size */
141532021982SDavid Howells 	ctx->max_val_type = NO_SIZE;
141632021982SDavid Howells 	ctx->min_val_type = NO_SIZE;
141732021982SDavid Howells 	fc->fs_private = ctx;
141832021982SDavid Howells 	fc->ops	= &hugetlbfs_fs_context_ops;
141932021982SDavid Howells 	return 0;
14201da177e4SLinus Torvalds }
14211da177e4SLinus Torvalds 
14221da177e4SLinus Torvalds static struct file_system_type hugetlbfs_fs_type = {
14231da177e4SLinus Torvalds 	.name			= "hugetlbfs",
142432021982SDavid Howells 	.init_fs_context	= hugetlbfs_init_fs_context,
1425d7167b14SAl Viro 	.parameters		= hugetlb_fs_parameters,
14261da177e4SLinus Torvalds 	.kill_sb		= kill_litter_super,
14271da177e4SLinus Torvalds };
14281da177e4SLinus Torvalds 
142942d7395fSAndi Kleen static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
14301da177e4SLinus Torvalds 
1431ef1ff6b8SFrom: Mel Gorman static int can_do_hugetlb_shm(void)
14321da177e4SLinus Torvalds {
1433a0eb3a05SEric W. Biederman 	kgid_t shm_group;
1434a0eb3a05SEric W. Biederman 	shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1435a0eb3a05SEric W. Biederman 	return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
14361da177e4SLinus Torvalds }
14371da177e4SLinus Torvalds 
143842d7395fSAndi Kleen static int get_hstate_idx(int page_size_log)
143942d7395fSAndi Kleen {
1440af73e4d9SNaoya Horiguchi 	struct hstate *h = hstate_sizelog(page_size_log);
144142d7395fSAndi Kleen 
144242d7395fSAndi Kleen 	if (!h)
144342d7395fSAndi Kleen 		return -1;
144404adbc3fSMiaohe Lin 	return hstate_index(h);
144542d7395fSAndi Kleen }
144642d7395fSAndi Kleen 
1447af73e4d9SNaoya Horiguchi /*
1448af73e4d9SNaoya Horiguchi  * Note that size should be aligned to proper hugepage size in caller side,
1449af73e4d9SNaoya Horiguchi  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1450af73e4d9SNaoya Horiguchi  */
1451af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size,
145283c1fd76Szhangyiru 				vm_flags_t acctflag, int creat_flags,
145383c1fd76Szhangyiru 				int page_size_log)
14541da177e4SLinus Torvalds {
14551da177e4SLinus Torvalds 	struct inode *inode;
1456e68375c8SAl Viro 	struct vfsmount *mnt;
145742d7395fSAndi Kleen 	int hstate_idx;
1458e68375c8SAl Viro 	struct file *file;
145942d7395fSAndi Kleen 
146042d7395fSAndi Kleen 	hstate_idx = get_hstate_idx(page_size_log);
146142d7395fSAndi Kleen 	if (hstate_idx < 0)
146242d7395fSAndi Kleen 		return ERR_PTR(-ENODEV);
14631da177e4SLinus Torvalds 
1464e68375c8SAl Viro 	mnt = hugetlbfs_vfsmount[hstate_idx];
1465e68375c8SAl Viro 	if (!mnt)
14665bc98594SAkinobu Mita 		return ERR_PTR(-ENOENT);
14675bc98594SAkinobu Mita 
1468ef1ff6b8SFrom: Mel Gorman 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
146983c1fd76Szhangyiru 		struct ucounts *ucounts = current_ucounts();
147083c1fd76Szhangyiru 
147183c1fd76Szhangyiru 		if (user_shm_lock(size, ucounts)) {
147283c1fd76Szhangyiru 			pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
147321a3c273SDavid Rientjes 				current->comm, current->pid);
147483c1fd76Szhangyiru 			user_shm_unlock(size, ucounts);
14752584e517SRavikiran G Thirumalai 		}
147683c1fd76Szhangyiru 		return ERR_PTR(-EPERM);
1477353d5c30SHugh Dickins 	}
14781da177e4SLinus Torvalds 
147939b65252SAnatol Pomozov 	file = ERR_PTR(-ENOSPC);
1480e68375c8SAl Viro 	inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
14811da177e4SLinus Torvalds 	if (!inode)
1482e68375c8SAl Viro 		goto out;
1483e1832f29SStephen Smalley 	if (creat_flags == HUGETLB_SHMFS_INODE)
1484e1832f29SStephen Smalley 		inode->i_flags |= S_PRIVATE;
14851da177e4SLinus Torvalds 
14861da177e4SLinus Torvalds 	inode->i_size = size;
14876d6b77f1SMiklos Szeredi 	clear_nlink(inode);
1488ce8d2cdfSDave Hansen 
148933b8f84aSMike Kravetz 	if (!hugetlb_reserve_pages(inode, 0,
1490e68375c8SAl Viro 			size >> huge_page_shift(hstate_inode(inode)), NULL,
1491e68375c8SAl Viro 			acctflag))
1492e68375c8SAl Viro 		file = ERR_PTR(-ENOMEM);
1493e68375c8SAl Viro 	else
1494e68375c8SAl Viro 		file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1495ce8d2cdfSDave Hansen 					&hugetlbfs_file_operations);
1496e68375c8SAl Viro 	if (!IS_ERR(file))
14971da177e4SLinus Torvalds 		return file;
14981da177e4SLinus Torvalds 
1499b45b5bd6SDavid Gibson 	iput(inode);
1500e68375c8SAl Viro out:
150139b65252SAnatol Pomozov 	return file;
15021da177e4SLinus Torvalds }
15031da177e4SLinus Torvalds 
150432021982SDavid Howells static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
150532021982SDavid Howells {
150632021982SDavid Howells 	struct fs_context *fc;
150732021982SDavid Howells 	struct vfsmount *mnt;
150832021982SDavid Howells 
150932021982SDavid Howells 	fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
151032021982SDavid Howells 	if (IS_ERR(fc)) {
151132021982SDavid Howells 		mnt = ERR_CAST(fc);
151232021982SDavid Howells 	} else {
151332021982SDavid Howells 		struct hugetlbfs_fs_context *ctx = fc->fs_private;
151432021982SDavid Howells 		ctx->hstate = h;
151532021982SDavid Howells 		mnt = fc_mount(fc);
151632021982SDavid Howells 		put_fs_context(fc);
151732021982SDavid Howells 	}
151832021982SDavid Howells 	if (IS_ERR(mnt))
1519a25fddceSMiaohe Lin 		pr_err("Cannot mount internal hugetlbfs for page size %luK",
1520a25fddceSMiaohe Lin 		       huge_page_size(h) >> 10);
152132021982SDavid Howells 	return mnt;
152232021982SDavid Howells }
152332021982SDavid Howells 
15241da177e4SLinus Torvalds static int __init init_hugetlbfs_fs(void)
15251da177e4SLinus Torvalds {
152632021982SDavid Howells 	struct vfsmount *mnt;
152742d7395fSAndi Kleen 	struct hstate *h;
15281da177e4SLinus Torvalds 	int error;
152942d7395fSAndi Kleen 	int i;
15301da177e4SLinus Torvalds 
1531457c1b27SNishanth Aravamudan 	if (!hugepages_supported()) {
15329b857d26SAndrew Morton 		pr_info("disabling because there are no supported hugepage sizes\n");
1533457c1b27SNishanth Aravamudan 		return -ENOTSUPP;
1534457c1b27SNishanth Aravamudan 	}
1535457c1b27SNishanth Aravamudan 
1536d1d5e05fSHillf Danton 	error = -ENOMEM;
15371da177e4SLinus Torvalds 	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
15381da177e4SLinus Torvalds 					sizeof(struct hugetlbfs_inode_info),
15395d097056SVladimir Davydov 					0, SLAB_ACCOUNT, init_once);
15401da177e4SLinus Torvalds 	if (hugetlbfs_inode_cachep == NULL)
15418fc312b3SMike Kravetz 		goto out;
15421da177e4SLinus Torvalds 
15431da177e4SLinus Torvalds 	error = register_filesystem(&hugetlbfs_fs_type);
15441da177e4SLinus Torvalds 	if (error)
15458fc312b3SMike Kravetz 		goto out_free;
15461da177e4SLinus Torvalds 
15478fc312b3SMike Kravetz 	/* default hstate mount is required */
15483b2275a8SMiaohe Lin 	mnt = mount_one_hugetlbfs(&default_hstate);
15498fc312b3SMike Kravetz 	if (IS_ERR(mnt)) {
15508fc312b3SMike Kravetz 		error = PTR_ERR(mnt);
15518fc312b3SMike Kravetz 		goto out_unreg;
15528fc312b3SMike Kravetz 	}
15538fc312b3SMike Kravetz 	hugetlbfs_vfsmount[default_hstate_idx] = mnt;
15548fc312b3SMike Kravetz 
15558fc312b3SMike Kravetz 	/* other hstates are optional */
155642d7395fSAndi Kleen 	i = 0;
155742d7395fSAndi Kleen 	for_each_hstate(h) {
155815f0ec94SJan Stancek 		if (i == default_hstate_idx) {
155915f0ec94SJan Stancek 			i++;
15608fc312b3SMike Kravetz 			continue;
156115f0ec94SJan Stancek 		}
15628fc312b3SMike Kravetz 
156332021982SDavid Howells 		mnt = mount_one_hugetlbfs(h);
15648fc312b3SMike Kravetz 		if (IS_ERR(mnt))
15658fc312b3SMike Kravetz 			hugetlbfs_vfsmount[i] = NULL;
15668fc312b3SMike Kravetz 		else
156732021982SDavid Howells 			hugetlbfs_vfsmount[i] = mnt;
156842d7395fSAndi Kleen 		i++;
156942d7395fSAndi Kleen 	}
157032021982SDavid Howells 
157142d7395fSAndi Kleen 	return 0;
15721da177e4SLinus Torvalds 
15738fc312b3SMike Kravetz  out_unreg:
15748fc312b3SMike Kravetz 	(void)unregister_filesystem(&hugetlbfs_fs_type);
15758fc312b3SMike Kravetz  out_free:
15761da177e4SLinus Torvalds 	kmem_cache_destroy(hugetlbfs_inode_cachep);
15778fc312b3SMike Kravetz  out:
15781da177e4SLinus Torvalds 	return error;
15791da177e4SLinus Torvalds }
15803e89e1c5SPaul Gortmaker fs_initcall(init_hugetlbfs_fs)
1581