xref: /openbmc/linux/fs/hugetlbfs/inode.c (revision 11680763)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * hugetlbpage-backed filesystem.  Based on ramfs.
31da177e4SLinus Torvalds  *
46d49e352SNadia Yvette Chambers  * Nadia Yvette Chambers, 2002
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Copyright (C) 2002 Linus Torvalds.
73e89e1c5SPaul Gortmaker  * License: GPL
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
109b857d26SAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
119b857d26SAndrew Morton 
121da177e4SLinus Torvalds #include <linux/thread_info.h>
131da177e4SLinus Torvalds #include <asm/current.h>
1470c3547eSMike Kravetz #include <linux/falloc.h>
151da177e4SLinus Torvalds #include <linux/fs.h>
161da177e4SLinus Torvalds #include <linux/mount.h>
171da177e4SLinus Torvalds #include <linux/file.h>
18e73a75faSRandy Dunlap #include <linux/kernel.h>
191da177e4SLinus Torvalds #include <linux/writeback.h>
201da177e4SLinus Torvalds #include <linux/pagemap.h>
211da177e4SLinus Torvalds #include <linux/highmem.h>
221da177e4SLinus Torvalds #include <linux/init.h>
231da177e4SLinus Torvalds #include <linux/string.h>
2416f7e0feSRandy Dunlap #include <linux/capability.h>
25e73a75faSRandy Dunlap #include <linux/ctype.h>
261da177e4SLinus Torvalds #include <linux/backing-dev.h>
271da177e4SLinus Torvalds #include <linux/hugetlb.h>
281da177e4SLinus Torvalds #include <linux/pagevec.h>
2932021982SDavid Howells #include <linux/fs_parser.h>
30036e0856SBenjamin Herrenschmidt #include <linux/mman.h>
311da177e4SLinus Torvalds #include <linux/slab.h>
321da177e4SLinus Torvalds #include <linux/dnotify.h>
331da177e4SLinus Torvalds #include <linux/statfs.h>
341da177e4SLinus Torvalds #include <linux/security.h>
351fd7317dSNick Black #include <linux/magic.h>
36290408d4SNaoya Horiguchi #include <linux/migrate.h>
3734d0640eSAl Viro #include <linux/uio.h>
381da177e4SLinus Torvalds 
397c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
4088590253SShijie Hu #include <linux/sched/mm.h>
411da177e4SLinus Torvalds 
42f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops;
434b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations;
4492e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations;
4592e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations;
461da177e4SLinus Torvalds 
4732021982SDavid Howells enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
4832021982SDavid Howells 
4932021982SDavid Howells struct hugetlbfs_fs_context {
504a25220dSDavid Howells 	struct hstate		*hstate;
5132021982SDavid Howells 	unsigned long long	max_size_opt;
5232021982SDavid Howells 	unsigned long long	min_size_opt;
534a25220dSDavid Howells 	long			max_hpages;
544a25220dSDavid Howells 	long			nr_inodes;
554a25220dSDavid Howells 	long			min_hpages;
5632021982SDavid Howells 	enum hugetlbfs_size_type max_val_type;
5732021982SDavid Howells 	enum hugetlbfs_size_type min_val_type;
58a0eb3a05SEric W. Biederman 	kuid_t			uid;
59a0eb3a05SEric W. Biederman 	kgid_t			gid;
60a1d776eeSDavid Gibson 	umode_t			mode;
61a1d776eeSDavid Gibson };
62a1d776eeSDavid Gibson 
631da177e4SLinus Torvalds int sysctl_hugetlb_shm_group;
641da177e4SLinus Torvalds 
6532021982SDavid Howells enum hugetlb_param {
6632021982SDavid Howells 	Opt_gid,
6732021982SDavid Howells 	Opt_min_size,
6832021982SDavid Howells 	Opt_mode,
6932021982SDavid Howells 	Opt_nr_inodes,
7032021982SDavid Howells 	Opt_pagesize,
7132021982SDavid Howells 	Opt_size,
7232021982SDavid Howells 	Opt_uid,
73e73a75faSRandy Dunlap };
74e73a75faSRandy Dunlap 
75d7167b14SAl Viro static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
7632021982SDavid Howells 	fsparam_u32   ("gid",		Opt_gid),
7732021982SDavid Howells 	fsparam_string("min_size",	Opt_min_size),
78e0f7e2b2SMike Kravetz 	fsparam_u32oct("mode",		Opt_mode),
7932021982SDavid Howells 	fsparam_string("nr_inodes",	Opt_nr_inodes),
8032021982SDavid Howells 	fsparam_string("pagesize",	Opt_pagesize),
8132021982SDavid Howells 	fsparam_string("size",		Opt_size),
8232021982SDavid Howells 	fsparam_u32   ("uid",		Opt_uid),
8332021982SDavid Howells 	{}
8432021982SDavid Howells };
8532021982SDavid Howells 
8670c3547eSMike Kravetz #ifdef CONFIG_NUMA
8770c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
8870c3547eSMike Kravetz 					struct inode *inode, pgoff_t index)
8970c3547eSMike Kravetz {
9070c3547eSMike Kravetz 	vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
9170c3547eSMike Kravetz 							index);
9270c3547eSMike Kravetz }
9370c3547eSMike Kravetz 
9470c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
9570c3547eSMike Kravetz {
9670c3547eSMike Kravetz 	mpol_cond_put(vma->vm_policy);
9770c3547eSMike Kravetz }
9870c3547eSMike Kravetz #else
9970c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
10070c3547eSMike Kravetz 					struct inode *inode, pgoff_t index)
10170c3547eSMike Kravetz {
10270c3547eSMike Kravetz }
10370c3547eSMike Kravetz 
10470c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
10570c3547eSMike Kravetz {
10670c3547eSMike Kravetz }
10770c3547eSMike Kravetz #endif
10870c3547eSMike Kravetz 
1092e9b367cSAdam Litke static void huge_pagevec_release(struct pagevec *pvec)
1102e9b367cSAdam Litke {
1112e9b367cSAdam Litke 	int i;
1122e9b367cSAdam Litke 
1132e9b367cSAdam Litke 	for (i = 0; i < pagevec_count(pvec); ++i)
1142e9b367cSAdam Litke 		put_page(pvec->pages[i]);
1152e9b367cSAdam Litke 
1162e9b367cSAdam Litke 	pagevec_reinit(pvec);
1172e9b367cSAdam Litke }
1182e9b367cSAdam Litke 
11963489f8eSMike Kravetz /*
12063489f8eSMike Kravetz  * Mask used when checking the page offset value passed in via system
12163489f8eSMike Kravetz  * calls.  This value will be converted to a loff_t which is signed.
12263489f8eSMike Kravetz  * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
12363489f8eSMike Kravetz  * value.  The extra bit (- 1 in the shift value) is to take the sign
12463489f8eSMike Kravetz  * bit into account.
12563489f8eSMike Kravetz  */
12663489f8eSMike Kravetz #define PGOFF_LOFFT_MAX \
12763489f8eSMike Kravetz 	(((1UL << (PAGE_SHIFT + 1)) - 1) <<  (BITS_PER_LONG - (PAGE_SHIFT + 1)))
12863489f8eSMike Kravetz 
1291da177e4SLinus Torvalds static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
1301da177e4SLinus Torvalds {
131496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
13222247efdSPeter Xu 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
1331da177e4SLinus Torvalds 	loff_t len, vma_len;
1341da177e4SLinus Torvalds 	int ret;
135a5516438SAndi Kleen 	struct hstate *h = hstate_file(file);
1361da177e4SLinus Torvalds 
13768589bc3SHugh Dickins 	/*
138dec4ad86SDavid Gibson 	 * vma address alignment (but not the pgoff alignment) has
139dec4ad86SDavid Gibson 	 * already been checked by prepare_hugepage_range.  If you add
140dec4ad86SDavid Gibson 	 * any error returns here, do so after setting VM_HUGETLB, so
141dec4ad86SDavid Gibson 	 * is_vm_hugetlb_page tests below unmap_region go the right
14245e55300SPeter Collingbourne 	 * way when do_mmap unwinds (may be important on powerpc
143dec4ad86SDavid Gibson 	 * and ia64).
14468589bc3SHugh Dickins 	 */
145a2fce914SNaoya Horiguchi 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
14668589bc3SHugh Dickins 	vma->vm_ops = &hugetlb_vm_ops;
1471da177e4SLinus Torvalds 
14822247efdSPeter Xu 	ret = seal_check_future_write(info->seals, vma);
14922247efdSPeter Xu 	if (ret)
15022247efdSPeter Xu 		return ret;
15122247efdSPeter Xu 
152045c7a3fSMike Kravetz 	/*
15363489f8eSMike Kravetz 	 * page based offset in vm_pgoff could be sufficiently large to
1545df63c2aSMike Kravetz 	 * overflow a loff_t when converted to byte offset.  This can
1555df63c2aSMike Kravetz 	 * only happen on architectures where sizeof(loff_t) ==
1565df63c2aSMike Kravetz 	 * sizeof(unsigned long).  So, only check in those instances.
157045c7a3fSMike Kravetz 	 */
1585df63c2aSMike Kravetz 	if (sizeof(unsigned long) == sizeof(loff_t)) {
15963489f8eSMike Kravetz 		if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
160045c7a3fSMike Kravetz 			return -EINVAL;
1615df63c2aSMike Kravetz 	}
162045c7a3fSMike Kravetz 
16363489f8eSMike Kravetz 	/* must be huge page aligned */
1642b37c35eSBecky Bruce 	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
165dec4ad86SDavid Gibson 		return -EINVAL;
166dec4ad86SDavid Gibson 
1671da177e4SLinus Torvalds 	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
168045c7a3fSMike Kravetz 	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
169045c7a3fSMike Kravetz 	/* check for overflow */
170045c7a3fSMike Kravetz 	if (len < vma_len)
171045c7a3fSMike Kravetz 		return -EINVAL;
1721da177e4SLinus Torvalds 
1735955102cSAl Viro 	inode_lock(inode);
1741da177e4SLinus Torvalds 	file_accessed(file);
1751da177e4SLinus Torvalds 
1761da177e4SLinus Torvalds 	ret = -ENOMEM;
17733b8f84aSMike Kravetz 	if (!hugetlb_reserve_pages(inode,
178a5516438SAndi Kleen 				vma->vm_pgoff >> huge_page_order(h),
1795a6fe125SMel Gorman 				len >> huge_page_shift(h), vma,
1805a6fe125SMel Gorman 				vma->vm_flags))
181b45b5bd6SDavid Gibson 		goto out;
182b45b5bd6SDavid Gibson 
1834c887265SAdam Litke 	ret = 0;
184b6174df5SZhang, Yanmin 	if (vma->vm_flags & VM_WRITE && inode->i_size < len)
185045c7a3fSMike Kravetz 		i_size_write(inode, len);
1861da177e4SLinus Torvalds out:
1875955102cSAl Viro 	inode_unlock(inode);
1881da177e4SLinus Torvalds 
1891da177e4SLinus Torvalds 	return ret;
1901da177e4SLinus Torvalds }
1911da177e4SLinus Torvalds 
1921da177e4SLinus Torvalds /*
1933e4e28c5SMichel Lespinasse  * Called under mmap_write_lock(mm).
1941da177e4SLinus Torvalds  */
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds static unsigned long
19788590253SShijie Hu hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
19888590253SShijie Hu 		unsigned long len, unsigned long pgoff, unsigned long flags)
19988590253SShijie Hu {
20088590253SShijie Hu 	struct hstate *h = hstate_file(file);
20188590253SShijie Hu 	struct vm_unmapped_area_info info;
20288590253SShijie Hu 
20388590253SShijie Hu 	info.flags = 0;
20488590253SShijie Hu 	info.length = len;
20588590253SShijie Hu 	info.low_limit = current->mm->mmap_base;
2062cb4de08SChristophe Leroy 	info.high_limit = arch_get_mmap_end(addr, len, flags);
20788590253SShijie Hu 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
20888590253SShijie Hu 	info.align_offset = 0;
20988590253SShijie Hu 	return vm_unmapped_area(&info);
21088590253SShijie Hu }
21188590253SShijie Hu 
21288590253SShijie Hu static unsigned long
21388590253SShijie Hu hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
21488590253SShijie Hu 		unsigned long len, unsigned long pgoff, unsigned long flags)
21588590253SShijie Hu {
21688590253SShijie Hu 	struct hstate *h = hstate_file(file);
21788590253SShijie Hu 	struct vm_unmapped_area_info info;
21888590253SShijie Hu 
21988590253SShijie Hu 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
22088590253SShijie Hu 	info.length = len;
22188590253SShijie Hu 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
2225f24d5a5SChristophe Leroy 	info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
22388590253SShijie Hu 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
22488590253SShijie Hu 	info.align_offset = 0;
22588590253SShijie Hu 	addr = vm_unmapped_area(&info);
22688590253SShijie Hu 
22788590253SShijie Hu 	/*
22888590253SShijie Hu 	 * A failed mmap() very likely causes application failure,
22988590253SShijie Hu 	 * so fall back to the bottom-up function here. This scenario
23088590253SShijie Hu 	 * can happen with large stack limits and large mmap()
23188590253SShijie Hu 	 * allocations.
23288590253SShijie Hu 	 */
23388590253SShijie Hu 	if (unlikely(offset_in_page(addr))) {
23488590253SShijie Hu 		VM_BUG_ON(addr != -ENOMEM);
23588590253SShijie Hu 		info.flags = 0;
23688590253SShijie Hu 		info.low_limit = current->mm->mmap_base;
2372cb4de08SChristophe Leroy 		info.high_limit = arch_get_mmap_end(addr, len, flags);
23888590253SShijie Hu 		addr = vm_unmapped_area(&info);
23988590253SShijie Hu 	}
24088590253SShijie Hu 
24188590253SShijie Hu 	return addr;
24288590253SShijie Hu }
24388590253SShijie Hu 
2444b439e25SChristophe Leroy unsigned long
2454b439e25SChristophe Leroy generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2464b439e25SChristophe Leroy 				  unsigned long len, unsigned long pgoff,
2474b439e25SChristophe Leroy 				  unsigned long flags)
2481da177e4SLinus Torvalds {
2491da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
2501da177e4SLinus Torvalds 	struct vm_area_struct *vma;
251a5516438SAndi Kleen 	struct hstate *h = hstate_file(file);
2522cb4de08SChristophe Leroy 	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
2531da177e4SLinus Torvalds 
254a5516438SAndi Kleen 	if (len & ~huge_page_mask(h))
2551da177e4SLinus Torvalds 		return -EINVAL;
2561da177e4SLinus Torvalds 	if (len > TASK_SIZE)
2571da177e4SLinus Torvalds 		return -ENOMEM;
2581da177e4SLinus Torvalds 
259036e0856SBenjamin Herrenschmidt 	if (flags & MAP_FIXED) {
260a5516438SAndi Kleen 		if (prepare_hugepage_range(file, addr, len))
261036e0856SBenjamin Herrenschmidt 			return -EINVAL;
262036e0856SBenjamin Herrenschmidt 		return addr;
263036e0856SBenjamin Herrenschmidt 	}
264036e0856SBenjamin Herrenschmidt 
2651da177e4SLinus Torvalds 	if (addr) {
266a5516438SAndi Kleen 		addr = ALIGN(addr, huge_page_size(h));
2671da177e4SLinus Torvalds 		vma = find_vma(mm, addr);
2685f24d5a5SChristophe Leroy 		if (mmap_end - len >= addr &&
2691be7107fSHugh Dickins 		    (!vma || addr + len <= vm_start_gap(vma)))
2701da177e4SLinus Torvalds 			return addr;
2711da177e4SLinus Torvalds 	}
2721da177e4SLinus Torvalds 
27388590253SShijie Hu 	/*
27488590253SShijie Hu 	 * Use mm->get_unmapped_area value as a hint to use topdown routine.
27588590253SShijie Hu 	 * If architectures have special needs, they should define their own
27688590253SShijie Hu 	 * version of hugetlb_get_unmapped_area.
27788590253SShijie Hu 	 */
27888590253SShijie Hu 	if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
27988590253SShijie Hu 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
28088590253SShijie Hu 				pgoff, flags);
28188590253SShijie Hu 	return hugetlb_get_unmapped_area_bottomup(file, addr, len,
28288590253SShijie Hu 			pgoff, flags);
2831da177e4SLinus Torvalds }
2844b439e25SChristophe Leroy 
2854b439e25SChristophe Leroy #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
2864b439e25SChristophe Leroy static unsigned long
2874b439e25SChristophe Leroy hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2884b439e25SChristophe Leroy 			  unsigned long len, unsigned long pgoff,
2894b439e25SChristophe Leroy 			  unsigned long flags)
2904b439e25SChristophe Leroy {
2914b439e25SChristophe Leroy 	return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
2924b439e25SChristophe Leroy }
2931da177e4SLinus Torvalds #endif
2941da177e4SLinus Torvalds 
29534d0640eSAl Viro static size_t
296e63e1e5aSBadari Pulavarty hugetlbfs_read_actor(struct page *page, unsigned long offset,
29734d0640eSAl Viro 			struct iov_iter *to, unsigned long size)
298e63e1e5aSBadari Pulavarty {
29934d0640eSAl Viro 	size_t copied = 0;
300e63e1e5aSBadari Pulavarty 	int i, chunksize;
301e63e1e5aSBadari Pulavarty 
302e63e1e5aSBadari Pulavarty 	/* Find which 4k chunk and offset with in that chunk */
30309cbfeafSKirill A. Shutemov 	i = offset >> PAGE_SHIFT;
30409cbfeafSKirill A. Shutemov 	offset = offset & ~PAGE_MASK;
305e63e1e5aSBadari Pulavarty 
306e63e1e5aSBadari Pulavarty 	while (size) {
30734d0640eSAl Viro 		size_t n;
30809cbfeafSKirill A. Shutemov 		chunksize = PAGE_SIZE;
309e63e1e5aSBadari Pulavarty 		if (offset)
310e63e1e5aSBadari Pulavarty 			chunksize -= offset;
311e63e1e5aSBadari Pulavarty 		if (chunksize > size)
312e63e1e5aSBadari Pulavarty 			chunksize = size;
31334d0640eSAl Viro 		n = copy_page_to_iter(&page[i], offset, chunksize, to);
31434d0640eSAl Viro 		copied += n;
31534d0640eSAl Viro 		if (n != chunksize)
31634d0640eSAl Viro 			return copied;
317e63e1e5aSBadari Pulavarty 		offset = 0;
318e63e1e5aSBadari Pulavarty 		size -= chunksize;
319e63e1e5aSBadari Pulavarty 		i++;
320e63e1e5aSBadari Pulavarty 	}
32134d0640eSAl Viro 	return copied;
322e63e1e5aSBadari Pulavarty }
323e63e1e5aSBadari Pulavarty 
324e63e1e5aSBadari Pulavarty /*
325e63e1e5aSBadari Pulavarty  * Support for read() - Find the page attached to f_mapping and copy out the
326445c8098SMiaohe Lin  * data. This provides functionality similar to filemap_read().
327e63e1e5aSBadari Pulavarty  */
32834d0640eSAl Viro static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
329e63e1e5aSBadari Pulavarty {
33034d0640eSAl Viro 	struct file *file = iocb->ki_filp;
33134d0640eSAl Viro 	struct hstate *h = hstate_file(file);
33234d0640eSAl Viro 	struct address_space *mapping = file->f_mapping;
333e63e1e5aSBadari Pulavarty 	struct inode *inode = mapping->host;
33434d0640eSAl Viro 	unsigned long index = iocb->ki_pos >> huge_page_shift(h);
33534d0640eSAl Viro 	unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
336e63e1e5aSBadari Pulavarty 	unsigned long end_index;
337e63e1e5aSBadari Pulavarty 	loff_t isize;
338e63e1e5aSBadari Pulavarty 	ssize_t retval = 0;
339e63e1e5aSBadari Pulavarty 
34034d0640eSAl Viro 	while (iov_iter_count(to)) {
341e63e1e5aSBadari Pulavarty 		struct page *page;
34234d0640eSAl Viro 		size_t nr, copied;
343e63e1e5aSBadari Pulavarty 
344e63e1e5aSBadari Pulavarty 		/* nr is the maximum number of bytes to copy from this page */
345a5516438SAndi Kleen 		nr = huge_page_size(h);
346a05b0855SAneesh Kumar K.V 		isize = i_size_read(inode);
347a05b0855SAneesh Kumar K.V 		if (!isize)
34834d0640eSAl Viro 			break;
349a05b0855SAneesh Kumar K.V 		end_index = (isize - 1) >> huge_page_shift(h);
350e63e1e5aSBadari Pulavarty 		if (index > end_index)
35134d0640eSAl Viro 			break;
35234d0640eSAl Viro 		if (index == end_index) {
353a5516438SAndi Kleen 			nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
354a05b0855SAneesh Kumar K.V 			if (nr <= offset)
35534d0640eSAl Viro 				break;
356e63e1e5aSBadari Pulavarty 		}
357e63e1e5aSBadari Pulavarty 		nr = nr - offset;
358e63e1e5aSBadari Pulavarty 
359e63e1e5aSBadari Pulavarty 		/* Find the page */
360a05b0855SAneesh Kumar K.V 		page = find_lock_page(mapping, index);
361e63e1e5aSBadari Pulavarty 		if (unlikely(page == NULL)) {
362e63e1e5aSBadari Pulavarty 			/*
363e63e1e5aSBadari Pulavarty 			 * We have a HOLE, zero out the user-buffer for the
364e63e1e5aSBadari Pulavarty 			 * length of the hole or request.
365e63e1e5aSBadari Pulavarty 			 */
36634d0640eSAl Viro 			copied = iov_iter_zero(nr, to);
367e63e1e5aSBadari Pulavarty 		} else {
368a05b0855SAneesh Kumar K.V 			unlock_page(page);
369a05b0855SAneesh Kumar K.V 
370e63e1e5aSBadari Pulavarty 			/*
371e63e1e5aSBadari Pulavarty 			 * We have the page, copy it to user space buffer.
372e63e1e5aSBadari Pulavarty 			 */
37334d0640eSAl Viro 			copied = hugetlbfs_read_actor(page, offset, to, nr);
37409cbfeafSKirill A. Shutemov 			put_page(page);
375e63e1e5aSBadari Pulavarty 		}
37634d0640eSAl Viro 		offset += copied;
37734d0640eSAl Viro 		retval += copied;
37834d0640eSAl Viro 		if (copied != nr && iov_iter_count(to)) {
37934d0640eSAl Viro 			if (!retval)
38034d0640eSAl Viro 				retval = -EFAULT;
381e63e1e5aSBadari Pulavarty 			break;
382e63e1e5aSBadari Pulavarty 		}
38334d0640eSAl Viro 		index += offset >> huge_page_shift(h);
38434d0640eSAl Viro 		offset &= ~huge_page_mask(h);
38534d0640eSAl Viro 	}
38634d0640eSAl Viro 	iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
387e63e1e5aSBadari Pulavarty 	return retval;
388e63e1e5aSBadari Pulavarty }
389e63e1e5aSBadari Pulavarty 
390800d15a5SNick Piggin static int hugetlbfs_write_begin(struct file *file,
391800d15a5SNick Piggin 			struct address_space *mapping,
3929d6b0cd7SMatthew Wilcox (Oracle) 			loff_t pos, unsigned len,
393800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
3941da177e4SLinus Torvalds {
3951da177e4SLinus Torvalds 	return -EINVAL;
3961da177e4SLinus Torvalds }
3971da177e4SLinus Torvalds 
398800d15a5SNick Piggin static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
399800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
400800d15a5SNick Piggin 			struct page *page, void *fsdata)
4011da177e4SLinus Torvalds {
402800d15a5SNick Piggin 	BUG();
4031da177e4SLinus Torvalds 	return -EINVAL;
4041da177e4SLinus Torvalds }
4051da177e4SLinus Torvalds 
406b5cec28dSMike Kravetz static void remove_huge_page(struct page *page)
4071da177e4SLinus Torvalds {
408b9ea2515SKonstantin Khlebnikov 	ClearPageDirty(page);
4091da177e4SLinus Torvalds 	ClearPageUptodate(page);
410bd65cb86SMinchan Kim 	delete_from_page_cache(page);
4111da177e4SLinus Torvalds }
4121da177e4SLinus Torvalds 
4134aae8d1cSMike Kravetz static void
41405e90bd0SPeter Xu hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
41505e90bd0SPeter Xu 		      zap_flags_t zap_flags)
4164aae8d1cSMike Kravetz {
4174aae8d1cSMike Kravetz 	struct vm_area_struct *vma;
4184aae8d1cSMike Kravetz 
4194aae8d1cSMike Kravetz 	/*
420d6aba4c8SSean Christopherson 	 * end == 0 indicates that the entire range after start should be
421d6aba4c8SSean Christopherson 	 * unmapped.  Note, end is exclusive, whereas the interval tree takes
422d6aba4c8SSean Christopherson 	 * an inclusive "last".
4234aae8d1cSMike Kravetz 	 */
424d6aba4c8SSean Christopherson 	vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
4254aae8d1cSMike Kravetz 		unsigned long v_offset;
4264aae8d1cSMike Kravetz 		unsigned long v_end;
4274aae8d1cSMike Kravetz 
4284aae8d1cSMike Kravetz 		/*
4294aae8d1cSMike Kravetz 		 * Can the expression below overflow on 32-bit arches?
4304aae8d1cSMike Kravetz 		 * No, because the interval tree returns us only those vmas
4314aae8d1cSMike Kravetz 		 * which overlap the truncated area starting at pgoff,
4324aae8d1cSMike Kravetz 		 * and no vma on a 32-bit arch can span beyond the 4GB.
4334aae8d1cSMike Kravetz 		 */
4344aae8d1cSMike Kravetz 		if (vma->vm_pgoff < start)
4354aae8d1cSMike Kravetz 			v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
4364aae8d1cSMike Kravetz 		else
4374aae8d1cSMike Kravetz 			v_offset = 0;
4384aae8d1cSMike Kravetz 
4394aae8d1cSMike Kravetz 		if (!end)
4404aae8d1cSMike Kravetz 			v_end = vma->vm_end;
4414aae8d1cSMike Kravetz 		else {
4424aae8d1cSMike Kravetz 			v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
4434aae8d1cSMike Kravetz 							+ vma->vm_start;
4444aae8d1cSMike Kravetz 			if (v_end > vma->vm_end)
4454aae8d1cSMike Kravetz 				v_end = vma->vm_end;
4464aae8d1cSMike Kravetz 		}
4474aae8d1cSMike Kravetz 
4484aae8d1cSMike Kravetz 		unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
44905e90bd0SPeter Xu 				     NULL, zap_flags);
4504aae8d1cSMike Kravetz 	}
4514aae8d1cSMike Kravetz }
452b5cec28dSMike Kravetz 
453b5cec28dSMike Kravetz /*
454b5cec28dSMike Kravetz  * remove_inode_hugepages handles two distinct cases: truncation and hole
455b5cec28dSMike Kravetz  * punch.  There are subtle differences in operation for each case.
4564aae8d1cSMike Kravetz  *
457b5cec28dSMike Kravetz  * truncation is indicated by end of range being LLONG_MAX
458b5cec28dSMike Kravetz  *	In this case, we first scan the range and release found pages.
4591935ebd3SMiaohe Lin  *	After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
460e7c58097SMike Kravetz  *	maps and global counts.  Page faults can not race with truncation
46187bf91d3SMike Kravetz  *	in this routine.  hugetlb_no_page() holds i_mmap_rwsem and prevents
46287bf91d3SMike Kravetz  *	page faults in the truncated range by checking i_size.  i_size is
46387bf91d3SMike Kravetz  *	modified while holding i_mmap_rwsem.
464b5cec28dSMike Kravetz  * hole punch is indicated if end is not LLONG_MAX
465b5cec28dSMike Kravetz  *	In the hole punch case we scan the range and release found pages.
4661935ebd3SMiaohe Lin  *	Only when releasing a page is the associated region/reserve map
4671935ebd3SMiaohe Lin  *	deleted.  The region/reserve map for ranges without associated
468e7c58097SMike Kravetz  *	pages are not modified.  Page faults can race with hole punch.
469e7c58097SMike Kravetz  *	This is indicated if we find a mapped page.
470b5cec28dSMike Kravetz  * Note: If the passed end of range value is beyond the end of file, but
471b5cec28dSMike Kravetz  * not LLONG_MAX this routine still performs a hole punch operation.
472b5cec28dSMike Kravetz  */
473b5cec28dSMike Kravetz static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
474b5cec28dSMike Kravetz 				   loff_t lend)
4751da177e4SLinus Torvalds {
476a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
477b45b5bd6SDavid Gibson 	struct address_space *mapping = &inode->i_data;
478a5516438SAndi Kleen 	const pgoff_t start = lstart >> huge_page_shift(h);
479b5cec28dSMike Kravetz 	const pgoff_t end = lend >> huge_page_shift(h);
4801da177e4SLinus Torvalds 	struct pagevec pvec;
481d72dc8a2SJan Kara 	pgoff_t next, index;
482a43a8c39SChen, Kenneth W 	int i, freed = 0;
483b5cec28dSMike Kravetz 	bool truncate_op = (lend == LLONG_MAX);
4841da177e4SLinus Torvalds 
48586679820SMel Gorman 	pagevec_init(&pvec);
4861da177e4SLinus Torvalds 	next = start;
487b5cec28dSMike Kravetz 	while (next < end) {
488b5cec28dSMike Kravetz 		/*
4891817889eSMike Kravetz 		 * When no more pages are found, we are done.
490b5cec28dSMike Kravetz 		 */
491397162ffSJan Kara 		if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
4921da177e4SLinus Torvalds 			break;
4931da177e4SLinus Torvalds 
4941da177e4SLinus Torvalds 		for (i = 0; i < pagevec_count(&pvec); ++i) {
4951da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
496d4241a04SMiaohe Lin 			u32 hash = 0;
497b5cec28dSMike Kravetz 
498d72dc8a2SJan Kara 			index = page->index;
49987bf91d3SMike Kravetz 			if (!truncate_op) {
50087bf91d3SMike Kravetz 				/*
50187bf91d3SMike Kravetz 				 * Only need to hold the fault mutex in the
50287bf91d3SMike Kravetz 				 * hole punch case.  This prevents races with
50387bf91d3SMike Kravetz 				 * page faults.  Races are not possible in the
50487bf91d3SMike Kravetz 				 * case of truncation.
50587bf91d3SMike Kravetz 				 */
506d4241a04SMiaohe Lin 				hash = hugetlb_fault_mutex_hash(mapping, index);
507e7c58097SMike Kravetz 				mutex_lock(&hugetlb_fault_mutex_table[hash]);
50887bf91d3SMike Kravetz 			}
509e7c58097SMike Kravetz 
510b5cec28dSMike Kravetz 			/*
511e7c58097SMike Kravetz 			 * If page is mapped, it was faulted in after being
512e7c58097SMike Kravetz 			 * unmapped in caller.  Unmap (again) now after taking
513e7c58097SMike Kravetz 			 * the fault mutex.  The mutex will prevent faults
514e7c58097SMike Kravetz 			 * until we finish removing the page.
515e7c58097SMike Kravetz 			 *
516e7c58097SMike Kravetz 			 * This race can only happen in the hole punch case.
517e7c58097SMike Kravetz 			 * Getting here in a truncate operation is a bug.
518b5cec28dSMike Kravetz 			 */
519e7c58097SMike Kravetz 			if (unlikely(page_mapped(page))) {
520e7c58097SMike Kravetz 				BUG_ON(truncate_op);
521e7c58097SMike Kravetz 
522c0d0381aSMike Kravetz 				mutex_unlock(&hugetlb_fault_mutex_table[hash]);
523e7c58097SMike Kravetz 				i_mmap_lock_write(mapping);
524c0d0381aSMike Kravetz 				mutex_lock(&hugetlb_fault_mutex_table[hash]);
525e7c58097SMike Kravetz 				hugetlb_vmdelete_list(&mapping->i_mmap,
526e7c58097SMike Kravetz 					index * pages_per_huge_page(h),
52705e90bd0SPeter Xu 					(index + 1) * pages_per_huge_page(h),
52805e90bd0SPeter Xu 					ZAP_FLAG_DROP_MARKER);
529e7c58097SMike Kravetz 				i_mmap_unlock_write(mapping);
530e7c58097SMike Kravetz 			}
5314aae8d1cSMike Kravetz 
5324aae8d1cSMike Kravetz 			lock_page(page);
5334aae8d1cSMike Kravetz 			/*
5344aae8d1cSMike Kravetz 			 * We must free the huge page and remove from page
5354aae8d1cSMike Kravetz 			 * cache (remove_huge_page) BEFORE removing the
5364aae8d1cSMike Kravetz 			 * region/reserve map (hugetlb_unreserve_pages).  In
5374aae8d1cSMike Kravetz 			 * rare out of memory conditions, removal of the
53872e2936cSzhong jiang 			 * region/reserve map could fail. Correspondingly,
53972e2936cSzhong jiang 			 * the subpool and global reserve usage count can need
54072e2936cSzhong jiang 			 * to be adjusted.
5414aae8d1cSMike Kravetz 			 */
542e32905e5SMike Kravetz 			VM_BUG_ON(HPageRestoreReserve(page));
543b5cec28dSMike Kravetz 			remove_huge_page(page);
544b5cec28dSMike Kravetz 			freed++;
545b5cec28dSMike Kravetz 			if (!truncate_op) {
5464aae8d1cSMike Kravetz 				if (unlikely(hugetlb_unreserve_pages(inode,
547d72dc8a2SJan Kara 							index, index + 1, 1)))
54872e2936cSzhong jiang 					hugetlb_fix_reserve_counts(inode);
549b5cec28dSMike Kravetz 			}
550b5cec28dSMike Kravetz 
5511da177e4SLinus Torvalds 			unlock_page(page);
55287bf91d3SMike Kravetz 			if (!truncate_op)
553e7c58097SMike Kravetz 				mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5541da177e4SLinus Torvalds 		}
5551da177e4SLinus Torvalds 		huge_pagevec_release(&pvec);
5561817889eSMike Kravetz 		cond_resched();
5571da177e4SLinus Torvalds 	}
558b5cec28dSMike Kravetz 
559b5cec28dSMike Kravetz 	if (truncate_op)
560b5cec28dSMike Kravetz 		(void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
5611da177e4SLinus Torvalds }
5621da177e4SLinus Torvalds 
5632bbbda30SAl Viro static void hugetlbfs_evict_inode(struct inode *inode)
5641da177e4SLinus Torvalds {
5659119a41eSJoonsoo Kim 	struct resv_map *resv_map;
5669119a41eSJoonsoo Kim 
567b5cec28dSMike Kravetz 	remove_inode_hugepages(inode, 0, LLONG_MAX);
568f27a5136SMike Kravetz 
569f27a5136SMike Kravetz 	/*
570f27a5136SMike Kravetz 	 * Get the resv_map from the address space embedded in the inode.
571f27a5136SMike Kravetz 	 * This is the address space which points to any resv_map allocated
572f27a5136SMike Kravetz 	 * at inode creation time.  If this is a device special inode,
573f27a5136SMike Kravetz 	 * i_mapping may not point to the original address space.
574f27a5136SMike Kravetz 	 */
575f27a5136SMike Kravetz 	resv_map = (struct resv_map *)(&inode->i_data)->private_data;
576f27a5136SMike Kravetz 	/* Only regular and link inodes have associated reserve maps */
5779119a41eSJoonsoo Kim 	if (resv_map)
5789119a41eSJoonsoo Kim 		resv_map_release(&resv_map->refs);
579dbd5768fSJan Kara 	clear_inode(inode);
580149f4211SChristoph Hellwig }
581149f4211SChristoph Hellwig 
582e5d319deSMiaohe Lin static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
5831da177e4SLinus Torvalds {
584856fc295SHugh Dickins 	pgoff_t pgoff;
5851da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
586a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
5871da177e4SLinus Torvalds 
588a5516438SAndi Kleen 	BUG_ON(offset & ~huge_page_mask(h));
589856fc295SHugh Dickins 	pgoff = offset >> PAGE_SHIFT;
5901da177e4SLinus Torvalds 
59183cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
59287bf91d3SMike Kravetz 	i_size_write(inode, offset);
593f808c13fSDavidlohr Bueso 	if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
59405e90bd0SPeter Xu 		hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
59505e90bd0SPeter Xu 				      ZAP_FLAG_DROP_MARKER);
596c86aa7bbSMike Kravetz 	i_mmap_unlock_write(mapping);
597e7c58097SMike Kravetz 	remove_inode_hugepages(inode, offset, LLONG_MAX);
5981da177e4SLinus Torvalds }
5991da177e4SLinus Torvalds 
60068d32527SMike Kravetz static void hugetlbfs_zero_partial_page(struct hstate *h,
60168d32527SMike Kravetz 					struct address_space *mapping,
60268d32527SMike Kravetz 					loff_t start,
60368d32527SMike Kravetz 					loff_t end)
60468d32527SMike Kravetz {
60568d32527SMike Kravetz 	pgoff_t idx = start >> huge_page_shift(h);
60668d32527SMike Kravetz 	struct folio *folio;
60768d32527SMike Kravetz 
60868d32527SMike Kravetz 	folio = filemap_lock_folio(mapping, idx);
60968d32527SMike Kravetz 	if (!folio)
61068d32527SMike Kravetz 		return;
61168d32527SMike Kravetz 
61268d32527SMike Kravetz 	start = start & ~huge_page_mask(h);
61368d32527SMike Kravetz 	end = end & ~huge_page_mask(h);
61468d32527SMike Kravetz 	if (!end)
61568d32527SMike Kravetz 		end = huge_page_size(h);
61668d32527SMike Kravetz 
61768d32527SMike Kravetz 	folio_zero_segment(folio, (size_t)start, (size_t)end);
61868d32527SMike Kravetz 
61968d32527SMike Kravetz 	folio_unlock(folio);
62068d32527SMike Kravetz 	folio_put(folio);
62168d32527SMike Kravetz }
62268d32527SMike Kravetz 
62370c3547eSMike Kravetz static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
62470c3547eSMike Kravetz {
62568d32527SMike Kravetz 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
62668d32527SMike Kravetz 	struct address_space *mapping = inode->i_mapping;
62770c3547eSMike Kravetz 	struct hstate *h = hstate_inode(inode);
62870c3547eSMike Kravetz 	loff_t hpage_size = huge_page_size(h);
62970c3547eSMike Kravetz 	loff_t hole_start, hole_end;
63070c3547eSMike Kravetz 
63170c3547eSMike Kravetz 	/*
63268d32527SMike Kravetz 	 * hole_start and hole_end indicate the full pages within the hole.
63370c3547eSMike Kravetz 	 */
63470c3547eSMike Kravetz 	hole_start = round_up(offset, hpage_size);
63570c3547eSMike Kravetz 	hole_end = round_down(offset + len, hpage_size);
63670c3547eSMike Kravetz 
6375955102cSAl Viro 	inode_lock(inode);
638ff62a342SMarc-André Lureau 
639398c0da7SMiaohe Lin 	/* protected by i_rwsem */
640ab3948f5SJoel Fernandes (Google) 	if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
641ff62a342SMarc-André Lureau 		inode_unlock(inode);
642ff62a342SMarc-André Lureau 		return -EPERM;
643ff62a342SMarc-André Lureau 	}
644ff62a342SMarc-André Lureau 
64570c3547eSMike Kravetz 	i_mmap_lock_write(mapping);
64668d32527SMike Kravetz 
64768d32527SMike Kravetz 	/* If range starts before first full page, zero partial page. */
64868d32527SMike Kravetz 	if (offset < hole_start)
64968d32527SMike Kravetz 		hugetlbfs_zero_partial_page(h, mapping,
65068d32527SMike Kravetz 				offset, min(offset + len, hole_start));
65168d32527SMike Kravetz 
65268d32527SMike Kravetz 	/* Unmap users of full pages in the hole. */
65368d32527SMike Kravetz 	if (hole_end > hole_start) {
654f808c13fSDavidlohr Bueso 		if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
65570c3547eSMike Kravetz 			hugetlb_vmdelete_list(&mapping->i_mmap,
65670c3547eSMike Kravetz 					      hole_start >> PAGE_SHIFT,
65705e90bd0SPeter Xu 					      hole_end >> PAGE_SHIFT, 0);
65870c3547eSMike Kravetz 	}
65970c3547eSMike Kravetz 
66068d32527SMike Kravetz 	/* If range extends beyond last full page, zero partial page. */
66168d32527SMike Kravetz 	if ((offset + len) > hole_end && (offset + len) > hole_start)
66268d32527SMike Kravetz 		hugetlbfs_zero_partial_page(h, mapping,
66368d32527SMike Kravetz 				hole_end, offset + len);
66468d32527SMike Kravetz 
66568d32527SMike Kravetz 	i_mmap_unlock_write(mapping);
66668d32527SMike Kravetz 
66768d32527SMike Kravetz 	/* Remove full pages from the file. */
66868d32527SMike Kravetz 	if (hole_end > hole_start)
66968d32527SMike Kravetz 		remove_inode_hugepages(inode, hole_start, hole_end);
67068d32527SMike Kravetz 
67168d32527SMike Kravetz 	inode_unlock(inode);
67268d32527SMike Kravetz 
67370c3547eSMike Kravetz 	return 0;
67470c3547eSMike Kravetz }
67570c3547eSMike Kravetz 
67670c3547eSMike Kravetz static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
67770c3547eSMike Kravetz 				loff_t len)
67870c3547eSMike Kravetz {
67970c3547eSMike Kravetz 	struct inode *inode = file_inode(file);
680ff62a342SMarc-André Lureau 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
68170c3547eSMike Kravetz 	struct address_space *mapping = inode->i_mapping;
68270c3547eSMike Kravetz 	struct hstate *h = hstate_inode(inode);
68370c3547eSMike Kravetz 	struct vm_area_struct pseudo_vma;
68470c3547eSMike Kravetz 	struct mm_struct *mm = current->mm;
68570c3547eSMike Kravetz 	loff_t hpage_size = huge_page_size(h);
68670c3547eSMike Kravetz 	unsigned long hpage_shift = huge_page_shift(h);
68770c3547eSMike Kravetz 	pgoff_t start, index, end;
68870c3547eSMike Kravetz 	int error;
68970c3547eSMike Kravetz 	u32 hash;
69070c3547eSMike Kravetz 
69170c3547eSMike Kravetz 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
69270c3547eSMike Kravetz 		return -EOPNOTSUPP;
69370c3547eSMike Kravetz 
69470c3547eSMike Kravetz 	if (mode & FALLOC_FL_PUNCH_HOLE)
69570c3547eSMike Kravetz 		return hugetlbfs_punch_hole(inode, offset, len);
69670c3547eSMike Kravetz 
69770c3547eSMike Kravetz 	/*
69870c3547eSMike Kravetz 	 * Default preallocate case.
69970c3547eSMike Kravetz 	 * For this range, start is rounded down and end is rounded up
70070c3547eSMike Kravetz 	 * as well as being converted to page offsets.
70170c3547eSMike Kravetz 	 */
70270c3547eSMike Kravetz 	start = offset >> hpage_shift;
70370c3547eSMike Kravetz 	end = (offset + len + hpage_size - 1) >> hpage_shift;
70470c3547eSMike Kravetz 
7055955102cSAl Viro 	inode_lock(inode);
70670c3547eSMike Kravetz 
70770c3547eSMike Kravetz 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
70870c3547eSMike Kravetz 	error = inode_newsize_ok(inode, offset + len);
70970c3547eSMike Kravetz 	if (error)
71070c3547eSMike Kravetz 		goto out;
71170c3547eSMike Kravetz 
712ff62a342SMarc-André Lureau 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
713ff62a342SMarc-André Lureau 		error = -EPERM;
714ff62a342SMarc-André Lureau 		goto out;
715ff62a342SMarc-André Lureau 	}
716ff62a342SMarc-André Lureau 
71770c3547eSMike Kravetz 	/*
71870c3547eSMike Kravetz 	 * Initialize a pseudo vma as this is required by the huge page
71970c3547eSMike Kravetz 	 * allocation routines.  If NUMA is configured, use page index
72070c3547eSMike Kravetz 	 * as input to create an allocation policy.
72170c3547eSMike Kravetz 	 */
7222c4541e2SKirill A. Shutemov 	vma_init(&pseudo_vma, mm);
72370c3547eSMike Kravetz 	pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
72470c3547eSMike Kravetz 	pseudo_vma.vm_file = file;
72570c3547eSMike Kravetz 
72670c3547eSMike Kravetz 	for (index = start; index < end; index++) {
72770c3547eSMike Kravetz 		/*
72870c3547eSMike Kravetz 		 * This is supposed to be the vaddr where the page is being
72970c3547eSMike Kravetz 		 * faulted in, but we have no vaddr here.
73070c3547eSMike Kravetz 		 */
73170c3547eSMike Kravetz 		struct page *page;
73270c3547eSMike Kravetz 		unsigned long addr;
73370c3547eSMike Kravetz 
73470c3547eSMike Kravetz 		cond_resched();
73570c3547eSMike Kravetz 
73670c3547eSMike Kravetz 		/*
73770c3547eSMike Kravetz 		 * fallocate(2) manpage permits EINTR; we may have been
73870c3547eSMike Kravetz 		 * interrupted because we are using up too much memory.
73970c3547eSMike Kravetz 		 */
74070c3547eSMike Kravetz 		if (signal_pending(current)) {
74170c3547eSMike Kravetz 			error = -EINTR;
74270c3547eSMike Kravetz 			break;
74370c3547eSMike Kravetz 		}
74470c3547eSMike Kravetz 
74570c3547eSMike Kravetz 		/* Set numa allocation policy based on index */
74670c3547eSMike Kravetz 		hugetlb_set_vma_policy(&pseudo_vma, inode, index);
74770c3547eSMike Kravetz 
74870c3547eSMike Kravetz 		/* addr is the offset within the file (zero based) */
74970c3547eSMike Kravetz 		addr = index * hpage_size;
75070c3547eSMike Kravetz 
75187bf91d3SMike Kravetz 		/*
75287bf91d3SMike Kravetz 		 * fault mutex taken here, protects against fault path
75387bf91d3SMike Kravetz 		 * and hole punch.  inode_lock previously taken protects
75487bf91d3SMike Kravetz 		 * against truncation.
75587bf91d3SMike Kravetz 		 */
756188b04a7SWei Yang 		hash = hugetlb_fault_mutex_hash(mapping, index);
75770c3547eSMike Kravetz 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
75870c3547eSMike Kravetz 
75970c3547eSMike Kravetz 		/* See if already present in mapping to avoid alloc/free */
76070c3547eSMike Kravetz 		page = find_get_page(mapping, index);
76170c3547eSMike Kravetz 		if (page) {
76270c3547eSMike Kravetz 			put_page(page);
76370c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
76470c3547eSMike Kravetz 			hugetlb_drop_vma_policy(&pseudo_vma);
76570c3547eSMike Kravetz 			continue;
76670c3547eSMike Kravetz 		}
76770c3547eSMike Kravetz 
76888ce3fefSMiaohe Lin 		/*
76988ce3fefSMiaohe Lin 		 * Allocate page without setting the avoid_reserve argument.
77088ce3fefSMiaohe Lin 		 * There certainly are no reserves associated with the
77188ce3fefSMiaohe Lin 		 * pseudo_vma.  However, there could be shared mappings with
77288ce3fefSMiaohe Lin 		 * reserves for the file at the inode level.  If we fallocate
77388ce3fefSMiaohe Lin 		 * pages in these areas, we need to consume the reserves
77488ce3fefSMiaohe Lin 		 * to keep reservation accounting consistent.
77588ce3fefSMiaohe Lin 		 */
77688ce3fefSMiaohe Lin 		page = alloc_huge_page(&pseudo_vma, addr, 0);
77770c3547eSMike Kravetz 		hugetlb_drop_vma_policy(&pseudo_vma);
77870c3547eSMike Kravetz 		if (IS_ERR(page)) {
77970c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
78070c3547eSMike Kravetz 			error = PTR_ERR(page);
78170c3547eSMike Kravetz 			goto out;
78270c3547eSMike Kravetz 		}
78370c3547eSMike Kravetz 		clear_huge_page(page, addr, pages_per_huge_page(h));
78470c3547eSMike Kravetz 		__SetPageUptodate(page);
78570c3547eSMike Kravetz 		error = huge_add_to_page_cache(page, mapping, index);
78670c3547eSMike Kravetz 		if (unlikely(error)) {
787846be085SMike Kravetz 			restore_reserve_on_error(h, &pseudo_vma, addr, page);
78870c3547eSMike Kravetz 			put_page(page);
78970c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
79070c3547eSMike Kravetz 			goto out;
79170c3547eSMike Kravetz 		}
79270c3547eSMike Kravetz 
79370c3547eSMike Kravetz 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
79470c3547eSMike Kravetz 
7958f251a3dSMike Kravetz 		SetHPageMigratable(page);
79670c3547eSMike Kravetz 		/*
79770c3547eSMike Kravetz 		 * unlock_page because locked by add_to_page_cache()
798585fc0d2SMuchun Song 		 * put_page() due to reference from alloc_huge_page()
79970c3547eSMike Kravetz 		 */
80070c3547eSMike Kravetz 		unlock_page(page);
80172639e6dSNadav Amit 		put_page(page);
80270c3547eSMike Kravetz 	}
80370c3547eSMike Kravetz 
80470c3547eSMike Kravetz 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
80570c3547eSMike Kravetz 		i_size_write(inode, offset + len);
806078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
80770c3547eSMike Kravetz out:
8085955102cSAl Viro 	inode_unlock(inode);
80970c3547eSMike Kravetz 	return error;
81070c3547eSMike Kravetz }
81170c3547eSMike Kravetz 
812549c7297SChristian Brauner static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
813549c7297SChristian Brauner 			     struct dentry *dentry, struct iattr *attr)
8141da177e4SLinus Torvalds {
8152b0143b5SDavid Howells 	struct inode *inode = d_inode(dentry);
816a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
8171da177e4SLinus Torvalds 	int error;
8181da177e4SLinus Torvalds 	unsigned int ia_valid = attr->ia_valid;
819ff62a342SMarc-André Lureau 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
8201da177e4SLinus Torvalds 
8212f221d6fSChristian Brauner 	error = setattr_prepare(&init_user_ns, dentry, attr);
8221da177e4SLinus Torvalds 	if (error)
8231025774cSChristoph Hellwig 		return error;
8241da177e4SLinus Torvalds 
8251da177e4SLinus Torvalds 	if (ia_valid & ATTR_SIZE) {
826ff62a342SMarc-André Lureau 		loff_t oldsize = inode->i_size;
827ff62a342SMarc-André Lureau 		loff_t newsize = attr->ia_size;
828ff62a342SMarc-André Lureau 
829ff62a342SMarc-André Lureau 		if (newsize & ~huge_page_mask(h))
8301025774cSChristoph Hellwig 			return -EINVAL;
831398c0da7SMiaohe Lin 		/* protected by i_rwsem */
832ff62a342SMarc-André Lureau 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
833ff62a342SMarc-André Lureau 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
834ff62a342SMarc-André Lureau 			return -EPERM;
835e5d319deSMiaohe Lin 		hugetlb_vmtruncate(inode, newsize);
8361da177e4SLinus Torvalds 	}
8371da177e4SLinus Torvalds 
8382f221d6fSChristian Brauner 	setattr_copy(&init_user_ns, inode, attr);
8391025774cSChristoph Hellwig 	mark_inode_dirty(inode);
8401025774cSChristoph Hellwig 	return 0;
8411025774cSChristoph Hellwig }
8421025774cSChristoph Hellwig 
8437d54fa64SAl Viro static struct inode *hugetlbfs_get_root(struct super_block *sb,
84432021982SDavid Howells 					struct hugetlbfs_fs_context *ctx)
8451da177e4SLinus Torvalds {
8461da177e4SLinus Torvalds 	struct inode *inode;
8471da177e4SLinus Torvalds 
8481da177e4SLinus Torvalds 	inode = new_inode(sb);
8491da177e4SLinus Torvalds 	if (inode) {
85085fe4025SChristoph Hellwig 		inode->i_ino = get_next_ino();
85132021982SDavid Howells 		inode->i_mode = S_IFDIR | ctx->mode;
85232021982SDavid Howells 		inode->i_uid = ctx->uid;
85332021982SDavid Howells 		inode->i_gid = ctx->gid;
854078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
8557d54fa64SAl Viro 		inode->i_op = &hugetlbfs_dir_inode_operations;
8567d54fa64SAl Viro 		inode->i_fop = &simple_dir_operations;
8577d54fa64SAl Viro 		/* directory inodes start off with i_nlink == 2 (for "." entry) */
8587d54fa64SAl Viro 		inc_nlink(inode);
85965ed7601SAneesh Kumar K.V 		lockdep_annotate_inode_mutex_key(inode);
8607d54fa64SAl Viro 	}
8617d54fa64SAl Viro 	return inode;
8627d54fa64SAl Viro }
8637d54fa64SAl Viro 
864b610ded7SMichal Hocko /*
865c8c06efaSDavidlohr Bueso  * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
866b610ded7SMichal Hocko  * be taken from reclaim -- unlike regular filesystems. This needs an
86788f306b6SKirill A. Shutemov  * annotation because huge_pmd_share() does an allocation under hugetlb's
868c8c06efaSDavidlohr Bueso  * i_mmap_rwsem.
869b610ded7SMichal Hocko  */
870c8c06efaSDavidlohr Bueso static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
871b610ded7SMichal Hocko 
8727d54fa64SAl Viro static struct inode *hugetlbfs_get_inode(struct super_block *sb,
8737d54fa64SAl Viro 					struct inode *dir,
87418df2252SAl Viro 					umode_t mode, dev_t dev)
8757d54fa64SAl Viro {
8767d54fa64SAl Viro 	struct inode *inode;
87758b6e5e8SMike Kravetz 	struct resv_map *resv_map = NULL;
8789119a41eSJoonsoo Kim 
87958b6e5e8SMike Kravetz 	/*
88058b6e5e8SMike Kravetz 	 * Reserve maps are only needed for inodes that can have associated
88158b6e5e8SMike Kravetz 	 * page allocations.
88258b6e5e8SMike Kravetz 	 */
88358b6e5e8SMike Kravetz 	if (S_ISREG(mode) || S_ISLNK(mode)) {
8849119a41eSJoonsoo Kim 		resv_map = resv_map_alloc();
8859119a41eSJoonsoo Kim 		if (!resv_map)
8869119a41eSJoonsoo Kim 			return NULL;
88758b6e5e8SMike Kravetz 	}
8887d54fa64SAl Viro 
8897d54fa64SAl Viro 	inode = new_inode(sb);
8907d54fa64SAl Viro 	if (inode) {
891ff62a342SMarc-André Lureau 		struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
892ff62a342SMarc-André Lureau 
8937d54fa64SAl Viro 		inode->i_ino = get_next_ino();
89421cb47beSChristian Brauner 		inode_init_owner(&init_user_ns, inode, dir, mode);
895c8c06efaSDavidlohr Bueso 		lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
896c8c06efaSDavidlohr Bueso 				&hugetlbfs_i_mmap_rwsem_key);
8971da177e4SLinus Torvalds 		inode->i_mapping->a_ops = &hugetlbfs_aops;
898078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
8999119a41eSJoonsoo Kim 		inode->i_mapping->private_data = resv_map;
900ff62a342SMarc-André Lureau 		info->seals = F_SEAL_SEAL;
9011da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
9021da177e4SLinus Torvalds 		default:
9031da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
9041da177e4SLinus Torvalds 			break;
9051da177e4SLinus Torvalds 		case S_IFREG:
9061da177e4SLinus Torvalds 			inode->i_op = &hugetlbfs_inode_operations;
9071da177e4SLinus Torvalds 			inode->i_fop = &hugetlbfs_file_operations;
9081da177e4SLinus Torvalds 			break;
9091da177e4SLinus Torvalds 		case S_IFDIR:
9101da177e4SLinus Torvalds 			inode->i_op = &hugetlbfs_dir_inode_operations;
9111da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
9121da177e4SLinus Torvalds 
9131da177e4SLinus Torvalds 			/* directory inodes start off with i_nlink == 2 (for "." entry) */
914d8c76e6fSDave Hansen 			inc_nlink(inode);
9151da177e4SLinus Torvalds 			break;
9161da177e4SLinus Torvalds 		case S_IFLNK:
9171da177e4SLinus Torvalds 			inode->i_op = &page_symlink_inode_operations;
91821fc61c7SAl Viro 			inode_nohighmem(inode);
9191da177e4SLinus Torvalds 			break;
9201da177e4SLinus Torvalds 		}
921e096d0c7SJosh Boyer 		lockdep_annotate_inode_mutex_key(inode);
92258b6e5e8SMike Kravetz 	} else {
92358b6e5e8SMike Kravetz 		if (resv_map)
9249119a41eSJoonsoo Kim 			kref_put(&resv_map->refs, resv_map_release);
92558b6e5e8SMike Kravetz 	}
9269119a41eSJoonsoo Kim 
9271da177e4SLinus Torvalds 	return inode;
9281da177e4SLinus Torvalds }
9291da177e4SLinus Torvalds 
9301da177e4SLinus Torvalds /*
9311da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
9321da177e4SLinus Torvalds  */
9331ab5b82fSPiotr Sarna static int do_hugetlbfs_mknod(struct inode *dir,
9341ab5b82fSPiotr Sarna 			struct dentry *dentry,
9351ab5b82fSPiotr Sarna 			umode_t mode,
9361ab5b82fSPiotr Sarna 			dev_t dev,
9371ab5b82fSPiotr Sarna 			bool tmpfile)
9381da177e4SLinus Torvalds {
9391da177e4SLinus Torvalds 	struct inode *inode;
9401da177e4SLinus Torvalds 	int error = -ENOSPC;
9411da177e4SLinus Torvalds 
9427d54fa64SAl Viro 	inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
9431da177e4SLinus Torvalds 	if (inode) {
944078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
9451ab5b82fSPiotr Sarna 		if (tmpfile) {
9461ab5b82fSPiotr Sarna 			d_tmpfile(dentry, inode);
9471ab5b82fSPiotr Sarna 		} else {
9481da177e4SLinus Torvalds 			d_instantiate(dentry, inode);
9491da177e4SLinus Torvalds 			dget(dentry);/* Extra count - pin the dentry in core */
9501ab5b82fSPiotr Sarna 		}
9511da177e4SLinus Torvalds 		error = 0;
9521da177e4SLinus Torvalds 	}
9531da177e4SLinus Torvalds 	return error;
9541da177e4SLinus Torvalds }
9551da177e4SLinus Torvalds 
956549c7297SChristian Brauner static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
9571ab5b82fSPiotr Sarna 			   struct dentry *dentry, umode_t mode, dev_t dev)
9581ab5b82fSPiotr Sarna {
9591ab5b82fSPiotr Sarna 	return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
9601ab5b82fSPiotr Sarna }
9611ab5b82fSPiotr Sarna 
962549c7297SChristian Brauner static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
963549c7297SChristian Brauner 			   struct dentry *dentry, umode_t mode)
9641da177e4SLinus Torvalds {
965549c7297SChristian Brauner 	int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
966549c7297SChristian Brauner 				     mode | S_IFDIR, 0);
9671da177e4SLinus Torvalds 	if (!retval)
968d8c76e6fSDave Hansen 		inc_nlink(dir);
9691da177e4SLinus Torvalds 	return retval;
9701da177e4SLinus Torvalds }
9711da177e4SLinus Torvalds 
972549c7297SChristian Brauner static int hugetlbfs_create(struct user_namespace *mnt_userns,
973549c7297SChristian Brauner 			    struct inode *dir, struct dentry *dentry,
974549c7297SChristian Brauner 			    umode_t mode, bool excl)
9751da177e4SLinus Torvalds {
976549c7297SChristian Brauner 	return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
9771da177e4SLinus Torvalds }
9781da177e4SLinus Torvalds 
979549c7297SChristian Brauner static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
980549c7297SChristian Brauner 			     struct inode *dir, struct dentry *dentry,
981549c7297SChristian Brauner 			     umode_t mode)
9821ab5b82fSPiotr Sarna {
9831ab5b82fSPiotr Sarna 	return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
9841ab5b82fSPiotr Sarna }
9851ab5b82fSPiotr Sarna 
986549c7297SChristian Brauner static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
987549c7297SChristian Brauner 			     struct inode *dir, struct dentry *dentry,
988549c7297SChristian Brauner 			     const char *symname)
9891da177e4SLinus Torvalds {
9901da177e4SLinus Torvalds 	struct inode *inode;
9911da177e4SLinus Torvalds 	int error = -ENOSPC;
9921da177e4SLinus Torvalds 
9937d54fa64SAl Viro 	inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
9941da177e4SLinus Torvalds 	if (inode) {
9951da177e4SLinus Torvalds 		int l = strlen(symname)+1;
9961da177e4SLinus Torvalds 		error = page_symlink(inode, symname, l);
9971da177e4SLinus Torvalds 		if (!error) {
9981da177e4SLinus Torvalds 			d_instantiate(dentry, inode);
9991da177e4SLinus Torvalds 			dget(dentry);
10001da177e4SLinus Torvalds 		} else
10011da177e4SLinus Torvalds 			iput(inode);
10021da177e4SLinus Torvalds 	}
1003078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
10041da177e4SLinus Torvalds 
10051da177e4SLinus Torvalds 	return error;
10061da177e4SLinus Torvalds }
10071da177e4SLinus Torvalds 
1008290408d4SNaoya Horiguchi static int hugetlbfs_migrate_page(struct address_space *mapping,
1009b969c4abSMel Gorman 				struct page *newpage, struct page *page,
1010a6bc32b8SMel Gorman 				enum migrate_mode mode)
1011290408d4SNaoya Horiguchi {
1012290408d4SNaoya Horiguchi 	int rc;
1013290408d4SNaoya Horiguchi 
1014290408d4SNaoya Horiguchi 	rc = migrate_huge_page_move_mapping(mapping, newpage, page);
101578bd5209SRafael Aquini 	if (rc != MIGRATEPAGE_SUCCESS)
1016290408d4SNaoya Horiguchi 		return rc;
1017cb6acd01SMike Kravetz 
1018d6995da3SMike Kravetz 	if (hugetlb_page_subpool(page)) {
1019d6995da3SMike Kravetz 		hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
1020d6995da3SMike Kravetz 		hugetlb_set_page_subpool(page, NULL);
1021cb6acd01SMike Kravetz 	}
1022cb6acd01SMike Kravetz 
10232916ecc0SJérôme Glisse 	if (mode != MIGRATE_SYNC_NO_COPY)
1024290408d4SNaoya Horiguchi 		migrate_page_copy(newpage, page);
10252916ecc0SJérôme Glisse 	else
10262916ecc0SJérôme Glisse 		migrate_page_states(newpage, page);
1027290408d4SNaoya Horiguchi 
102878bd5209SRafael Aquini 	return MIGRATEPAGE_SUCCESS;
1029290408d4SNaoya Horiguchi }
1030290408d4SNaoya Horiguchi 
103178bb9203SNaoya Horiguchi static int hugetlbfs_error_remove_page(struct address_space *mapping,
103278bb9203SNaoya Horiguchi 				struct page *page)
103378bb9203SNaoya Horiguchi {
103478bb9203SNaoya Horiguchi 	struct inode *inode = mapping->host;
1035ab615a5bSMike Kravetz 	pgoff_t index = page->index;
103678bb9203SNaoya Horiguchi 
103778bb9203SNaoya Horiguchi 	remove_huge_page(page);
1038ab615a5bSMike Kravetz 	if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
103978bb9203SNaoya Horiguchi 		hugetlb_fix_reserve_counts(inode);
1040ab615a5bSMike Kravetz 
104178bb9203SNaoya Horiguchi 	return 0;
104278bb9203SNaoya Horiguchi }
104378bb9203SNaoya Horiguchi 
10444a25220dSDavid Howells /*
10454a25220dSDavid Howells  * Display the mount options in /proc/mounts.
10464a25220dSDavid Howells  */
10474a25220dSDavid Howells static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
10484a25220dSDavid Howells {
10494a25220dSDavid Howells 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
10504a25220dSDavid Howells 	struct hugepage_subpool *spool = sbinfo->spool;
10514a25220dSDavid Howells 	unsigned long hpage_size = huge_page_size(sbinfo->hstate);
10524a25220dSDavid Howells 	unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
10534a25220dSDavid Howells 	char mod;
10544a25220dSDavid Howells 
10554a25220dSDavid Howells 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
10564a25220dSDavid Howells 		seq_printf(m, ",uid=%u",
10574a25220dSDavid Howells 			   from_kuid_munged(&init_user_ns, sbinfo->uid));
10584a25220dSDavid Howells 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
10594a25220dSDavid Howells 		seq_printf(m, ",gid=%u",
10604a25220dSDavid Howells 			   from_kgid_munged(&init_user_ns, sbinfo->gid));
10614a25220dSDavid Howells 	if (sbinfo->mode != 0755)
10624a25220dSDavid Howells 		seq_printf(m, ",mode=%o", sbinfo->mode);
10634a25220dSDavid Howells 	if (sbinfo->max_inodes != -1)
10644a25220dSDavid Howells 		seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
10654a25220dSDavid Howells 
10664a25220dSDavid Howells 	hpage_size /= 1024;
10674a25220dSDavid Howells 	mod = 'K';
10684a25220dSDavid Howells 	if (hpage_size >= 1024) {
10694a25220dSDavid Howells 		hpage_size /= 1024;
10704a25220dSDavid Howells 		mod = 'M';
10714a25220dSDavid Howells 	}
10724a25220dSDavid Howells 	seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
10734a25220dSDavid Howells 	if (spool) {
10744a25220dSDavid Howells 		if (spool->max_hpages != -1)
10754a25220dSDavid Howells 			seq_printf(m, ",size=%llu",
10764a25220dSDavid Howells 				   (unsigned long long)spool->max_hpages << hpage_shift);
10774a25220dSDavid Howells 		if (spool->min_hpages != -1)
10784a25220dSDavid Howells 			seq_printf(m, ",min_size=%llu",
10794a25220dSDavid Howells 				   (unsigned long long)spool->min_hpages << hpage_shift);
10804a25220dSDavid Howells 	}
10814a25220dSDavid Howells 	return 0;
10824a25220dSDavid Howells }
10834a25220dSDavid Howells 
1084726c3342SDavid Howells static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
10851da177e4SLinus Torvalds {
1086726c3342SDavid Howells 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
10872b0143b5SDavid Howells 	struct hstate *h = hstate_inode(d_inode(dentry));
10881da177e4SLinus Torvalds 
10891da177e4SLinus Torvalds 	buf->f_type = HUGETLBFS_MAGIC;
1090a5516438SAndi Kleen 	buf->f_bsize = huge_page_size(h);
10911da177e4SLinus Torvalds 	if (sbinfo) {
10921da177e4SLinus Torvalds 		spin_lock(&sbinfo->stat_lock);
1093*11680763SMiaohe Lin 		/* If no limits set, just report 0 or -1 for max/free/used
109474a8a65cSDavid Gibson 		 * blocks, like simple_statfs() */
109590481622SDavid Gibson 		if (sbinfo->spool) {
109690481622SDavid Gibson 			long free_pages;
109790481622SDavid Gibson 
10984b25f030SMina Almasry 			spin_lock_irq(&sbinfo->spool->lock);
109990481622SDavid Gibson 			buf->f_blocks = sbinfo->spool->max_hpages;
110090481622SDavid Gibson 			free_pages = sbinfo->spool->max_hpages
110190481622SDavid Gibson 				- sbinfo->spool->used_hpages;
110290481622SDavid Gibson 			buf->f_bavail = buf->f_bfree = free_pages;
11034b25f030SMina Almasry 			spin_unlock_irq(&sbinfo->spool->lock);
11041da177e4SLinus Torvalds 			buf->f_files = sbinfo->max_inodes;
11051da177e4SLinus Torvalds 			buf->f_ffree = sbinfo->free_inodes;
110674a8a65cSDavid Gibson 		}
11071da177e4SLinus Torvalds 		spin_unlock(&sbinfo->stat_lock);
11081da177e4SLinus Torvalds 	}
11091da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
11101da177e4SLinus Torvalds 	return 0;
11111da177e4SLinus Torvalds }
11121da177e4SLinus Torvalds 
11131da177e4SLinus Torvalds static void hugetlbfs_put_super(struct super_block *sb)
11141da177e4SLinus Torvalds {
11151da177e4SLinus Torvalds 	struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
11161da177e4SLinus Torvalds 
11171da177e4SLinus Torvalds 	if (sbi) {
11181da177e4SLinus Torvalds 		sb->s_fs_info = NULL;
111990481622SDavid Gibson 
112090481622SDavid Gibson 		if (sbi->spool)
112190481622SDavid Gibson 			hugepage_put_subpool(sbi->spool);
112290481622SDavid Gibson 
11231da177e4SLinus Torvalds 		kfree(sbi);
11241da177e4SLinus Torvalds 	}
11251da177e4SLinus Torvalds }
11261da177e4SLinus Torvalds 
112796527980SChristoph Hellwig static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
112896527980SChristoph Hellwig {
112996527980SChristoph Hellwig 	if (sbinfo->free_inodes >= 0) {
113096527980SChristoph Hellwig 		spin_lock(&sbinfo->stat_lock);
113196527980SChristoph Hellwig 		if (unlikely(!sbinfo->free_inodes)) {
113296527980SChristoph Hellwig 			spin_unlock(&sbinfo->stat_lock);
113396527980SChristoph Hellwig 			return 0;
113496527980SChristoph Hellwig 		}
113596527980SChristoph Hellwig 		sbinfo->free_inodes--;
113696527980SChristoph Hellwig 		spin_unlock(&sbinfo->stat_lock);
113796527980SChristoph Hellwig 	}
113896527980SChristoph Hellwig 
113996527980SChristoph Hellwig 	return 1;
114096527980SChristoph Hellwig }
114196527980SChristoph Hellwig 
114296527980SChristoph Hellwig static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
114396527980SChristoph Hellwig {
114496527980SChristoph Hellwig 	if (sbinfo->free_inodes >= 0) {
114596527980SChristoph Hellwig 		spin_lock(&sbinfo->stat_lock);
114696527980SChristoph Hellwig 		sbinfo->free_inodes++;
114796527980SChristoph Hellwig 		spin_unlock(&sbinfo->stat_lock);
114896527980SChristoph Hellwig 	}
114996527980SChristoph Hellwig }
115096527980SChristoph Hellwig 
115196527980SChristoph Hellwig 
1152e18b890bSChristoph Lameter static struct kmem_cache *hugetlbfs_inode_cachep;
11531da177e4SLinus Torvalds 
11541da177e4SLinus Torvalds static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
11551da177e4SLinus Torvalds {
115696527980SChristoph Hellwig 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
11571da177e4SLinus Torvalds 	struct hugetlbfs_inode_info *p;
11581da177e4SLinus Torvalds 
115996527980SChristoph Hellwig 	if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
11601da177e4SLinus Torvalds 		return NULL;
1161fd60b288SMuchun Song 	p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
116296527980SChristoph Hellwig 	if (unlikely(!p)) {
116396527980SChristoph Hellwig 		hugetlbfs_inc_free_inodes(sbinfo);
116496527980SChristoph Hellwig 		return NULL;
11651da177e4SLinus Torvalds 	}
11664742a35dSMike Kravetz 
11674742a35dSMike Kravetz 	/*
11684742a35dSMike Kravetz 	 * Any time after allocation, hugetlbfs_destroy_inode can be called
11694742a35dSMike Kravetz 	 * for the inode.  mpol_free_shared_policy is unconditionally called
11704742a35dSMike Kravetz 	 * as part of hugetlbfs_destroy_inode.  So, initialize policy here
11714742a35dSMike Kravetz 	 * in case of a quick call to destroy.
11724742a35dSMike Kravetz 	 *
11734742a35dSMike Kravetz 	 * Note that the policy is initialized even if we are creating a
11744742a35dSMike Kravetz 	 * private inode.  This simplifies hugetlbfs_destroy_inode.
11754742a35dSMike Kravetz 	 */
11764742a35dSMike Kravetz 	mpol_shared_policy_init(&p->policy, NULL);
11774742a35dSMike Kravetz 
117896527980SChristoph Hellwig 	return &p->vfs_inode;
11791da177e4SLinus Torvalds }
11801da177e4SLinus Torvalds 
1181b62de322SAl Viro static void hugetlbfs_free_inode(struct inode *inode)
1182fa0d7e3dSNick Piggin {
1183fa0d7e3dSNick Piggin 	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1184fa0d7e3dSNick Piggin }
1185fa0d7e3dSNick Piggin 
11861da177e4SLinus Torvalds static void hugetlbfs_destroy_inode(struct inode *inode)
11871da177e4SLinus Torvalds {
118896527980SChristoph Hellwig 	hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
11891da177e4SLinus Torvalds 	mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
11901da177e4SLinus Torvalds }
11911da177e4SLinus Torvalds 
1192f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops = {
1193800d15a5SNick Piggin 	.write_begin	= hugetlbfs_write_begin,
1194800d15a5SNick Piggin 	.write_end	= hugetlbfs_write_end,
119546de8b97SMatthew Wilcox (Oracle) 	.dirty_folio	= noop_dirty_folio,
1196290408d4SNaoya Horiguchi 	.migratepage    = hugetlbfs_migrate_page,
119778bb9203SNaoya Horiguchi 	.error_remove_page	= hugetlbfs_error_remove_page,
11981da177e4SLinus Torvalds };
11991da177e4SLinus Torvalds 
120096527980SChristoph Hellwig 
120151cc5068SAlexey Dobriyan static void init_once(void *foo)
120296527980SChristoph Hellwig {
120396527980SChristoph Hellwig 	struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
120496527980SChristoph Hellwig 
120596527980SChristoph Hellwig 	inode_init_once(&ei->vfs_inode);
120696527980SChristoph Hellwig }
120796527980SChristoph Hellwig 
12084b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations = {
120934d0640eSAl Viro 	.read_iter		= hugetlbfs_read_iter,
12101da177e4SLinus Torvalds 	.mmap			= hugetlbfs_file_mmap,
12111b061d92SChristoph Hellwig 	.fsync			= noop_fsync,
12121da177e4SLinus Torvalds 	.get_unmapped_area	= hugetlb_get_unmapped_area,
12136038f373SArnd Bergmann 	.llseek			= default_llseek,
121470c3547eSMike Kravetz 	.fallocate		= hugetlbfs_fallocate,
12151da177e4SLinus Torvalds };
12161da177e4SLinus Torvalds 
121792e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations = {
12181da177e4SLinus Torvalds 	.create		= hugetlbfs_create,
12191da177e4SLinus Torvalds 	.lookup		= simple_lookup,
12201da177e4SLinus Torvalds 	.link		= simple_link,
12211da177e4SLinus Torvalds 	.unlink		= simple_unlink,
12221da177e4SLinus Torvalds 	.symlink	= hugetlbfs_symlink,
12231da177e4SLinus Torvalds 	.mkdir		= hugetlbfs_mkdir,
12241da177e4SLinus Torvalds 	.rmdir		= simple_rmdir,
12251da177e4SLinus Torvalds 	.mknod		= hugetlbfs_mknod,
12261da177e4SLinus Torvalds 	.rename		= simple_rename,
12271da177e4SLinus Torvalds 	.setattr	= hugetlbfs_setattr,
12281ab5b82fSPiotr Sarna 	.tmpfile	= hugetlbfs_tmpfile,
12291da177e4SLinus Torvalds };
12301da177e4SLinus Torvalds 
123192e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations = {
12321da177e4SLinus Torvalds 	.setattr	= hugetlbfs_setattr,
12331da177e4SLinus Torvalds };
12341da177e4SLinus Torvalds 
1235ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops = {
12361da177e4SLinus Torvalds 	.alloc_inode    = hugetlbfs_alloc_inode,
1237b62de322SAl Viro 	.free_inode     = hugetlbfs_free_inode,
12381da177e4SLinus Torvalds 	.destroy_inode  = hugetlbfs_destroy_inode,
12392bbbda30SAl Viro 	.evict_inode	= hugetlbfs_evict_inode,
12401da177e4SLinus Torvalds 	.statfs		= hugetlbfs_statfs,
12411da177e4SLinus Torvalds 	.put_super	= hugetlbfs_put_super,
12424a25220dSDavid Howells 	.show_options	= hugetlbfs_show_options,
12431da177e4SLinus Torvalds };
12441da177e4SLinus Torvalds 
12457ca02d0aSMike Kravetz /*
12467ca02d0aSMike Kravetz  * Convert size option passed from command line to number of huge pages
12477ca02d0aSMike Kravetz  * in the pool specified by hstate.  Size option could be in bytes
12487ca02d0aSMike Kravetz  * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
12497ca02d0aSMike Kravetz  */
12504a25220dSDavid Howells static long
12517ca02d0aSMike Kravetz hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
12524a25220dSDavid Howells 			 enum hugetlbfs_size_type val_type)
12537ca02d0aSMike Kravetz {
12547ca02d0aSMike Kravetz 	if (val_type == NO_SIZE)
12557ca02d0aSMike Kravetz 		return -1;
12567ca02d0aSMike Kravetz 
12577ca02d0aSMike Kravetz 	if (val_type == SIZE_PERCENT) {
12587ca02d0aSMike Kravetz 		size_opt <<= huge_page_shift(h);
12597ca02d0aSMike Kravetz 		size_opt *= h->max_huge_pages;
12607ca02d0aSMike Kravetz 		do_div(size_opt, 100);
12617ca02d0aSMike Kravetz 	}
12627ca02d0aSMike Kravetz 
12637ca02d0aSMike Kravetz 	size_opt >>= huge_page_shift(h);
12647ca02d0aSMike Kravetz 	return size_opt;
12657ca02d0aSMike Kravetz }
12667ca02d0aSMike Kravetz 
126732021982SDavid Howells /*
126832021982SDavid Howells  * Parse one mount parameter.
126932021982SDavid Howells  */
127032021982SDavid Howells static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
12711da177e4SLinus Torvalds {
127232021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
127332021982SDavid Howells 	struct fs_parse_result result;
127432021982SDavid Howells 	char *rest;
127532021982SDavid Howells 	unsigned long ps;
127632021982SDavid Howells 	int opt;
12771da177e4SLinus Torvalds 
1278d7167b14SAl Viro 	opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
127932021982SDavid Howells 	if (opt < 0)
128032021982SDavid Howells 		return opt;
128132021982SDavid Howells 
128232021982SDavid Howells 	switch (opt) {
128332021982SDavid Howells 	case Opt_uid:
128432021982SDavid Howells 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
128532021982SDavid Howells 		if (!uid_valid(ctx->uid))
128632021982SDavid Howells 			goto bad_val;
12871da177e4SLinus Torvalds 		return 0;
12881da177e4SLinus Torvalds 
1289e73a75faSRandy Dunlap 	case Opt_gid:
129032021982SDavid Howells 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
129132021982SDavid Howells 		if (!gid_valid(ctx->gid))
1292e73a75faSRandy Dunlap 			goto bad_val;
129332021982SDavid Howells 		return 0;
1294e73a75faSRandy Dunlap 
1295e73a75faSRandy Dunlap 	case Opt_mode:
129632021982SDavid Howells 		ctx->mode = result.uint_32 & 01777U;
129732021982SDavid Howells 		return 0;
1298e73a75faSRandy Dunlap 
129932021982SDavid Howells 	case Opt_size:
1300e73a75faSRandy Dunlap 		/* memparse() will accept a K/M/G without a digit */
130132021982SDavid Howells 		if (!isdigit(param->string[0]))
1302e73a75faSRandy Dunlap 			goto bad_val;
130332021982SDavid Howells 		ctx->max_size_opt = memparse(param->string, &rest);
130432021982SDavid Howells 		ctx->max_val_type = SIZE_STD;
1305a137e1ccSAndi Kleen 		if (*rest == '%')
130632021982SDavid Howells 			ctx->max_val_type = SIZE_PERCENT;
130732021982SDavid Howells 		return 0;
13081da177e4SLinus Torvalds 
1309e73a75faSRandy Dunlap 	case Opt_nr_inodes:
1310e73a75faSRandy Dunlap 		/* memparse() will accept a K/M/G without a digit */
131132021982SDavid Howells 		if (!isdigit(param->string[0]))
1312e73a75faSRandy Dunlap 			goto bad_val;
131332021982SDavid Howells 		ctx->nr_inodes = memparse(param->string, &rest);
131432021982SDavid Howells 		return 0;
1315e73a75faSRandy Dunlap 
131632021982SDavid Howells 	case Opt_pagesize:
131732021982SDavid Howells 		ps = memparse(param->string, &rest);
131832021982SDavid Howells 		ctx->hstate = size_to_hstate(ps);
131932021982SDavid Howells 		if (!ctx->hstate) {
1320d0036517SMiaohe Lin 			pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
1321a137e1ccSAndi Kleen 			return -EINVAL;
1322a137e1ccSAndi Kleen 		}
132332021982SDavid Howells 		return 0;
1324a137e1ccSAndi Kleen 
132532021982SDavid Howells 	case Opt_min_size:
13267ca02d0aSMike Kravetz 		/* memparse() will accept a K/M/G without a digit */
132732021982SDavid Howells 		if (!isdigit(param->string[0]))
13287ca02d0aSMike Kravetz 			goto bad_val;
132932021982SDavid Howells 		ctx->min_size_opt = memparse(param->string, &rest);
133032021982SDavid Howells 		ctx->min_val_type = SIZE_STD;
13317ca02d0aSMike Kravetz 		if (*rest == '%')
133232021982SDavid Howells 			ctx->min_val_type = SIZE_PERCENT;
133332021982SDavid Howells 		return 0;
13347ca02d0aSMike Kravetz 
1335e73a75faSRandy Dunlap 	default:
1336b4c07bceSLee Schermerhorn 		return -EINVAL;
1337e73a75faSRandy Dunlap 	}
133832021982SDavid Howells 
133932021982SDavid Howells bad_val:
1340b5db30cfSAl Viro 	return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
134132021982SDavid Howells 		      param->string, param->key);
13421da177e4SLinus Torvalds }
1343a137e1ccSAndi Kleen 
13447ca02d0aSMike Kravetz /*
134532021982SDavid Howells  * Validate the parsed options.
134632021982SDavid Howells  */
134732021982SDavid Howells static int hugetlbfs_validate(struct fs_context *fc)
134832021982SDavid Howells {
134932021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
135032021982SDavid Howells 
135132021982SDavid Howells 	/*
13527ca02d0aSMike Kravetz 	 * Use huge page pool size (in hstate) to convert the size
13537ca02d0aSMike Kravetz 	 * options to number of huge pages.  If NO_SIZE, -1 is returned.
13547ca02d0aSMike Kravetz 	 */
135532021982SDavid Howells 	ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
135632021982SDavid Howells 						   ctx->max_size_opt,
135732021982SDavid Howells 						   ctx->max_val_type);
135832021982SDavid Howells 	ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
135932021982SDavid Howells 						   ctx->min_size_opt,
136032021982SDavid Howells 						   ctx->min_val_type);
13617ca02d0aSMike Kravetz 
13627ca02d0aSMike Kravetz 	/*
13637ca02d0aSMike Kravetz 	 * If max_size was specified, then min_size must be smaller
13647ca02d0aSMike Kravetz 	 */
136532021982SDavid Howells 	if (ctx->max_val_type > NO_SIZE &&
136632021982SDavid Howells 	    ctx->min_hpages > ctx->max_hpages) {
136732021982SDavid Howells 		pr_err("Minimum size can not be greater than maximum size\n");
13687ca02d0aSMike Kravetz 		return -EINVAL;
1369a137e1ccSAndi Kleen 	}
1370a137e1ccSAndi Kleen 
13711da177e4SLinus Torvalds 	return 0;
13721da177e4SLinus Torvalds }
13731da177e4SLinus Torvalds 
13741da177e4SLinus Torvalds static int
137532021982SDavid Howells hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
13761da177e4SLinus Torvalds {
137732021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
13781da177e4SLinus Torvalds 	struct hugetlbfs_sb_info *sbinfo;
13791da177e4SLinus Torvalds 
13801da177e4SLinus Torvalds 	sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
13811da177e4SLinus Torvalds 	if (!sbinfo)
13821da177e4SLinus Torvalds 		return -ENOMEM;
13831da177e4SLinus Torvalds 	sb->s_fs_info = sbinfo;
13841da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
138532021982SDavid Howells 	sbinfo->hstate		= ctx->hstate;
138632021982SDavid Howells 	sbinfo->max_inodes	= ctx->nr_inodes;
138732021982SDavid Howells 	sbinfo->free_inodes	= ctx->nr_inodes;
138890481622SDavid Gibson 	sbinfo->spool		= NULL;
138932021982SDavid Howells 	sbinfo->uid		= ctx->uid;
139032021982SDavid Howells 	sbinfo->gid		= ctx->gid;
139132021982SDavid Howells 	sbinfo->mode		= ctx->mode;
13924a25220dSDavid Howells 
13937ca02d0aSMike Kravetz 	/*
13947ca02d0aSMike Kravetz 	 * Allocate and initialize subpool if maximum or minimum size is
13951935ebd3SMiaohe Lin 	 * specified.  Any needed reservations (for minimum size) are taken
1396445c8098SMiaohe Lin 	 * when the subpool is created.
13977ca02d0aSMike Kravetz 	 */
139832021982SDavid Howells 	if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
139932021982SDavid Howells 		sbinfo->spool = hugepage_new_subpool(ctx->hstate,
140032021982SDavid Howells 						     ctx->max_hpages,
140132021982SDavid Howells 						     ctx->min_hpages);
140290481622SDavid Gibson 		if (!sbinfo->spool)
140390481622SDavid Gibson 			goto out_free;
140490481622SDavid Gibson 	}
14051da177e4SLinus Torvalds 	sb->s_maxbytes = MAX_LFS_FILESIZE;
140632021982SDavid Howells 	sb->s_blocksize = huge_page_size(ctx->hstate);
140732021982SDavid Howells 	sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
14081da177e4SLinus Torvalds 	sb->s_magic = HUGETLBFS_MAGIC;
14091da177e4SLinus Torvalds 	sb->s_op = &hugetlbfs_ops;
14101da177e4SLinus Torvalds 	sb->s_time_gran = 1;
141115568299SMike Kravetz 
141215568299SMike Kravetz 	/*
141315568299SMike Kravetz 	 * Due to the special and limited functionality of hugetlbfs, it does
141415568299SMike Kravetz 	 * not work well as a stacking filesystem.
141515568299SMike Kravetz 	 */
141615568299SMike Kravetz 	sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
141732021982SDavid Howells 	sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
141848fde701SAl Viro 	if (!sb->s_root)
14191da177e4SLinus Torvalds 		goto out_free;
14201da177e4SLinus Torvalds 	return 0;
14211da177e4SLinus Torvalds out_free:
142290481622SDavid Gibson 	kfree(sbinfo->spool);
14231da177e4SLinus Torvalds 	kfree(sbinfo);
14241da177e4SLinus Torvalds 	return -ENOMEM;
14251da177e4SLinus Torvalds }
14261da177e4SLinus Torvalds 
142732021982SDavid Howells static int hugetlbfs_get_tree(struct fs_context *fc)
14281da177e4SLinus Torvalds {
142932021982SDavid Howells 	int err = hugetlbfs_validate(fc);
143032021982SDavid Howells 	if (err)
143132021982SDavid Howells 		return err;
14322ac295d4SAl Viro 	return get_tree_nodev(fc, hugetlbfs_fill_super);
143332021982SDavid Howells }
143432021982SDavid Howells 
143532021982SDavid Howells static void hugetlbfs_fs_context_free(struct fs_context *fc)
143632021982SDavid Howells {
143732021982SDavid Howells 	kfree(fc->fs_private);
143832021982SDavid Howells }
143932021982SDavid Howells 
144032021982SDavid Howells static const struct fs_context_operations hugetlbfs_fs_context_ops = {
144132021982SDavid Howells 	.free		= hugetlbfs_fs_context_free,
144232021982SDavid Howells 	.parse_param	= hugetlbfs_parse_param,
144332021982SDavid Howells 	.get_tree	= hugetlbfs_get_tree,
144432021982SDavid Howells };
144532021982SDavid Howells 
144632021982SDavid Howells static int hugetlbfs_init_fs_context(struct fs_context *fc)
144732021982SDavid Howells {
144832021982SDavid Howells 	struct hugetlbfs_fs_context *ctx;
144932021982SDavid Howells 
145032021982SDavid Howells 	ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
145132021982SDavid Howells 	if (!ctx)
145232021982SDavid Howells 		return -ENOMEM;
145332021982SDavid Howells 
145432021982SDavid Howells 	ctx->max_hpages	= -1; /* No limit on size by default */
145532021982SDavid Howells 	ctx->nr_inodes	= -1; /* No limit on number of inodes by default */
145632021982SDavid Howells 	ctx->uid	= current_fsuid();
145732021982SDavid Howells 	ctx->gid	= current_fsgid();
145832021982SDavid Howells 	ctx->mode	= 0755;
145932021982SDavid Howells 	ctx->hstate	= &default_hstate;
146032021982SDavid Howells 	ctx->min_hpages	= -1; /* No default minimum size */
146132021982SDavid Howells 	ctx->max_val_type = NO_SIZE;
146232021982SDavid Howells 	ctx->min_val_type = NO_SIZE;
146332021982SDavid Howells 	fc->fs_private = ctx;
146432021982SDavid Howells 	fc->ops	= &hugetlbfs_fs_context_ops;
146532021982SDavid Howells 	return 0;
14661da177e4SLinus Torvalds }
14671da177e4SLinus Torvalds 
14681da177e4SLinus Torvalds static struct file_system_type hugetlbfs_fs_type = {
14691da177e4SLinus Torvalds 	.name			= "hugetlbfs",
147032021982SDavid Howells 	.init_fs_context	= hugetlbfs_init_fs_context,
1471d7167b14SAl Viro 	.parameters		= hugetlb_fs_parameters,
14721da177e4SLinus Torvalds 	.kill_sb		= kill_litter_super,
14731da177e4SLinus Torvalds };
14741da177e4SLinus Torvalds 
147542d7395fSAndi Kleen static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
14761da177e4SLinus Torvalds 
1477ef1ff6b8SFrom: Mel Gorman static int can_do_hugetlb_shm(void)
14781da177e4SLinus Torvalds {
1479a0eb3a05SEric W. Biederman 	kgid_t shm_group;
1480a0eb3a05SEric W. Biederman 	shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1481a0eb3a05SEric W. Biederman 	return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
14821da177e4SLinus Torvalds }
14831da177e4SLinus Torvalds 
148442d7395fSAndi Kleen static int get_hstate_idx(int page_size_log)
148542d7395fSAndi Kleen {
1486af73e4d9SNaoya Horiguchi 	struct hstate *h = hstate_sizelog(page_size_log);
148742d7395fSAndi Kleen 
148842d7395fSAndi Kleen 	if (!h)
148942d7395fSAndi Kleen 		return -1;
149004adbc3fSMiaohe Lin 	return hstate_index(h);
149142d7395fSAndi Kleen }
149242d7395fSAndi Kleen 
1493af73e4d9SNaoya Horiguchi /*
1494af73e4d9SNaoya Horiguchi  * Note that size should be aligned to proper hugepage size in caller side,
1495af73e4d9SNaoya Horiguchi  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1496af73e4d9SNaoya Horiguchi  */
1497af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size,
149883c1fd76Szhangyiru 				vm_flags_t acctflag, int creat_flags,
149983c1fd76Szhangyiru 				int page_size_log)
15001da177e4SLinus Torvalds {
15011da177e4SLinus Torvalds 	struct inode *inode;
1502e68375c8SAl Viro 	struct vfsmount *mnt;
150342d7395fSAndi Kleen 	int hstate_idx;
1504e68375c8SAl Viro 	struct file *file;
150542d7395fSAndi Kleen 
150642d7395fSAndi Kleen 	hstate_idx = get_hstate_idx(page_size_log);
150742d7395fSAndi Kleen 	if (hstate_idx < 0)
150842d7395fSAndi Kleen 		return ERR_PTR(-ENODEV);
15091da177e4SLinus Torvalds 
1510e68375c8SAl Viro 	mnt = hugetlbfs_vfsmount[hstate_idx];
1511e68375c8SAl Viro 	if (!mnt)
15125bc98594SAkinobu Mita 		return ERR_PTR(-ENOENT);
15135bc98594SAkinobu Mita 
1514ef1ff6b8SFrom: Mel Gorman 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
151583c1fd76Szhangyiru 		struct ucounts *ucounts = current_ucounts();
151683c1fd76Szhangyiru 
151783c1fd76Szhangyiru 		if (user_shm_lock(size, ucounts)) {
151883c1fd76Szhangyiru 			pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
151921a3c273SDavid Rientjes 				current->comm, current->pid);
152083c1fd76Szhangyiru 			user_shm_unlock(size, ucounts);
15212584e517SRavikiran G Thirumalai 		}
152283c1fd76Szhangyiru 		return ERR_PTR(-EPERM);
1523353d5c30SHugh Dickins 	}
15241da177e4SLinus Torvalds 
152539b65252SAnatol Pomozov 	file = ERR_PTR(-ENOSPC);
1526e68375c8SAl Viro 	inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
15271da177e4SLinus Torvalds 	if (!inode)
1528e68375c8SAl Viro 		goto out;
1529e1832f29SStephen Smalley 	if (creat_flags == HUGETLB_SHMFS_INODE)
1530e1832f29SStephen Smalley 		inode->i_flags |= S_PRIVATE;
15311da177e4SLinus Torvalds 
15321da177e4SLinus Torvalds 	inode->i_size = size;
15336d6b77f1SMiklos Szeredi 	clear_nlink(inode);
1534ce8d2cdfSDave Hansen 
153533b8f84aSMike Kravetz 	if (!hugetlb_reserve_pages(inode, 0,
1536e68375c8SAl Viro 			size >> huge_page_shift(hstate_inode(inode)), NULL,
1537e68375c8SAl Viro 			acctflag))
1538e68375c8SAl Viro 		file = ERR_PTR(-ENOMEM);
1539e68375c8SAl Viro 	else
1540e68375c8SAl Viro 		file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1541ce8d2cdfSDave Hansen 					&hugetlbfs_file_operations);
1542e68375c8SAl Viro 	if (!IS_ERR(file))
15431da177e4SLinus Torvalds 		return file;
15441da177e4SLinus Torvalds 
1545b45b5bd6SDavid Gibson 	iput(inode);
1546e68375c8SAl Viro out:
154739b65252SAnatol Pomozov 	return file;
15481da177e4SLinus Torvalds }
15491da177e4SLinus Torvalds 
155032021982SDavid Howells static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
155132021982SDavid Howells {
155232021982SDavid Howells 	struct fs_context *fc;
155332021982SDavid Howells 	struct vfsmount *mnt;
155432021982SDavid Howells 
155532021982SDavid Howells 	fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
155632021982SDavid Howells 	if (IS_ERR(fc)) {
155732021982SDavid Howells 		mnt = ERR_CAST(fc);
155832021982SDavid Howells 	} else {
155932021982SDavid Howells 		struct hugetlbfs_fs_context *ctx = fc->fs_private;
156032021982SDavid Howells 		ctx->hstate = h;
156132021982SDavid Howells 		mnt = fc_mount(fc);
156232021982SDavid Howells 		put_fs_context(fc);
156332021982SDavid Howells 	}
156432021982SDavid Howells 	if (IS_ERR(mnt))
1565a25fddceSMiaohe Lin 		pr_err("Cannot mount internal hugetlbfs for page size %luK",
1566d0036517SMiaohe Lin 		       huge_page_size(h) / SZ_1K);
156732021982SDavid Howells 	return mnt;
156832021982SDavid Howells }
156932021982SDavid Howells 
15701da177e4SLinus Torvalds static int __init init_hugetlbfs_fs(void)
15711da177e4SLinus Torvalds {
157232021982SDavid Howells 	struct vfsmount *mnt;
157342d7395fSAndi Kleen 	struct hstate *h;
15741da177e4SLinus Torvalds 	int error;
157542d7395fSAndi Kleen 	int i;
15761da177e4SLinus Torvalds 
1577457c1b27SNishanth Aravamudan 	if (!hugepages_supported()) {
15789b857d26SAndrew Morton 		pr_info("disabling because there are no supported hugepage sizes\n");
1579457c1b27SNishanth Aravamudan 		return -ENOTSUPP;
1580457c1b27SNishanth Aravamudan 	}
1581457c1b27SNishanth Aravamudan 
1582d1d5e05fSHillf Danton 	error = -ENOMEM;
15831da177e4SLinus Torvalds 	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
15841da177e4SLinus Torvalds 					sizeof(struct hugetlbfs_inode_info),
15855d097056SVladimir Davydov 					0, SLAB_ACCOUNT, init_once);
15861da177e4SLinus Torvalds 	if (hugetlbfs_inode_cachep == NULL)
15878fc312b3SMike Kravetz 		goto out;
15881da177e4SLinus Torvalds 
15891da177e4SLinus Torvalds 	error = register_filesystem(&hugetlbfs_fs_type);
15901da177e4SLinus Torvalds 	if (error)
15918fc312b3SMike Kravetz 		goto out_free;
15921da177e4SLinus Torvalds 
15938fc312b3SMike Kravetz 	/* default hstate mount is required */
15943b2275a8SMiaohe Lin 	mnt = mount_one_hugetlbfs(&default_hstate);
15958fc312b3SMike Kravetz 	if (IS_ERR(mnt)) {
15968fc312b3SMike Kravetz 		error = PTR_ERR(mnt);
15978fc312b3SMike Kravetz 		goto out_unreg;
15988fc312b3SMike Kravetz 	}
15998fc312b3SMike Kravetz 	hugetlbfs_vfsmount[default_hstate_idx] = mnt;
16008fc312b3SMike Kravetz 
16018fc312b3SMike Kravetz 	/* other hstates are optional */
160242d7395fSAndi Kleen 	i = 0;
160342d7395fSAndi Kleen 	for_each_hstate(h) {
160415f0ec94SJan Stancek 		if (i == default_hstate_idx) {
160515f0ec94SJan Stancek 			i++;
16068fc312b3SMike Kravetz 			continue;
160715f0ec94SJan Stancek 		}
16088fc312b3SMike Kravetz 
160932021982SDavid Howells 		mnt = mount_one_hugetlbfs(h);
16108fc312b3SMike Kravetz 		if (IS_ERR(mnt))
16118fc312b3SMike Kravetz 			hugetlbfs_vfsmount[i] = NULL;
16128fc312b3SMike Kravetz 		else
161332021982SDavid Howells 			hugetlbfs_vfsmount[i] = mnt;
161442d7395fSAndi Kleen 		i++;
161542d7395fSAndi Kleen 	}
161632021982SDavid Howells 
161742d7395fSAndi Kleen 	return 0;
16181da177e4SLinus Torvalds 
16198fc312b3SMike Kravetz  out_unreg:
16208fc312b3SMike Kravetz 	(void)unregister_filesystem(&hugetlbfs_fs_type);
16218fc312b3SMike Kravetz  out_free:
16221da177e4SLinus Torvalds 	kmem_cache_destroy(hugetlbfs_inode_cachep);
16238fc312b3SMike Kravetz  out:
16241da177e4SLinus Torvalds 	return error;
16251da177e4SLinus Torvalds }
16263e89e1c5SPaul Gortmaker fs_initcall(init_hugetlbfs_fs)
1627