xref: /openbmc/linux/fs/hugetlbfs/inode.c (revision 1935ebd3)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * hugetlbpage-backed filesystem.  Based on ramfs.
31da177e4SLinus Torvalds  *
46d49e352SNadia Yvette Chambers  * Nadia Yvette Chambers, 2002
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Copyright (C) 2002 Linus Torvalds.
73e89e1c5SPaul Gortmaker  * License: GPL
81da177e4SLinus Torvalds  */
91da177e4SLinus Torvalds 
109b857d26SAndrew Morton #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
119b857d26SAndrew Morton 
121da177e4SLinus Torvalds #include <linux/thread_info.h>
131da177e4SLinus Torvalds #include <asm/current.h>
14174cd4b1SIngo Molnar #include <linux/sched/signal.h>		/* remove ASAP */
1570c3547eSMike Kravetz #include <linux/falloc.h>
161da177e4SLinus Torvalds #include <linux/fs.h>
171da177e4SLinus Torvalds #include <linux/mount.h>
181da177e4SLinus Torvalds #include <linux/file.h>
19e73a75faSRandy Dunlap #include <linux/kernel.h>
201da177e4SLinus Torvalds #include <linux/writeback.h>
211da177e4SLinus Torvalds #include <linux/pagemap.h>
221da177e4SLinus Torvalds #include <linux/highmem.h>
231da177e4SLinus Torvalds #include <linux/init.h>
241da177e4SLinus Torvalds #include <linux/string.h>
2516f7e0feSRandy Dunlap #include <linux/capability.h>
26e73a75faSRandy Dunlap #include <linux/ctype.h>
271da177e4SLinus Torvalds #include <linux/backing-dev.h>
281da177e4SLinus Torvalds #include <linux/hugetlb.h>
291da177e4SLinus Torvalds #include <linux/pagevec.h>
3032021982SDavid Howells #include <linux/fs_parser.h>
31036e0856SBenjamin Herrenschmidt #include <linux/mman.h>
321da177e4SLinus Torvalds #include <linux/slab.h>
331da177e4SLinus Torvalds #include <linux/dnotify.h>
341da177e4SLinus Torvalds #include <linux/statfs.h>
351da177e4SLinus Torvalds #include <linux/security.h>
361fd7317dSNick Black #include <linux/magic.h>
37290408d4SNaoya Horiguchi #include <linux/migrate.h>
3834d0640eSAl Viro #include <linux/uio.h>
391da177e4SLinus Torvalds 
407c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
4188590253SShijie Hu #include <linux/sched/mm.h>
421da177e4SLinus Torvalds 
43ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops;
44f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops;
454b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations;
4692e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations;
4792e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations;
481da177e4SLinus Torvalds 
4932021982SDavid Howells enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
5032021982SDavid Howells 
5132021982SDavid Howells struct hugetlbfs_fs_context {
524a25220dSDavid Howells 	struct hstate		*hstate;
5332021982SDavid Howells 	unsigned long long	max_size_opt;
5432021982SDavid Howells 	unsigned long long	min_size_opt;
554a25220dSDavid Howells 	long			max_hpages;
564a25220dSDavid Howells 	long			nr_inodes;
574a25220dSDavid Howells 	long			min_hpages;
5832021982SDavid Howells 	enum hugetlbfs_size_type max_val_type;
5932021982SDavid Howells 	enum hugetlbfs_size_type min_val_type;
60a0eb3a05SEric W. Biederman 	kuid_t			uid;
61a0eb3a05SEric W. Biederman 	kgid_t			gid;
62a1d776eeSDavid Gibson 	umode_t			mode;
63a1d776eeSDavid Gibson };
64a1d776eeSDavid Gibson 
651da177e4SLinus Torvalds int sysctl_hugetlb_shm_group;
661da177e4SLinus Torvalds 
6732021982SDavid Howells enum hugetlb_param {
6832021982SDavid Howells 	Opt_gid,
6932021982SDavid Howells 	Opt_min_size,
7032021982SDavid Howells 	Opt_mode,
7132021982SDavid Howells 	Opt_nr_inodes,
7232021982SDavid Howells 	Opt_pagesize,
7332021982SDavid Howells 	Opt_size,
7432021982SDavid Howells 	Opt_uid,
75e73a75faSRandy Dunlap };
76e73a75faSRandy Dunlap 
77d7167b14SAl Viro static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
7832021982SDavid Howells 	fsparam_u32   ("gid",		Opt_gid),
7932021982SDavid Howells 	fsparam_string("min_size",	Opt_min_size),
8032021982SDavid Howells 	fsparam_u32   ("mode",		Opt_mode),
8132021982SDavid Howells 	fsparam_string("nr_inodes",	Opt_nr_inodes),
8232021982SDavid Howells 	fsparam_string("pagesize",	Opt_pagesize),
8332021982SDavid Howells 	fsparam_string("size",		Opt_size),
8432021982SDavid Howells 	fsparam_u32   ("uid",		Opt_uid),
8532021982SDavid Howells 	{}
8632021982SDavid Howells };
8732021982SDavid Howells 
8870c3547eSMike Kravetz #ifdef CONFIG_NUMA
8970c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
9070c3547eSMike Kravetz 					struct inode *inode, pgoff_t index)
9170c3547eSMike Kravetz {
9270c3547eSMike Kravetz 	vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
9370c3547eSMike Kravetz 							index);
9470c3547eSMike Kravetz }
9570c3547eSMike Kravetz 
9670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
9770c3547eSMike Kravetz {
9870c3547eSMike Kravetz 	mpol_cond_put(vma->vm_policy);
9970c3547eSMike Kravetz }
10070c3547eSMike Kravetz #else
10170c3547eSMike Kravetz static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
10270c3547eSMike Kravetz 					struct inode *inode, pgoff_t index)
10370c3547eSMike Kravetz {
10470c3547eSMike Kravetz }
10570c3547eSMike Kravetz 
10670c3547eSMike Kravetz static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
10770c3547eSMike Kravetz {
10870c3547eSMike Kravetz }
10970c3547eSMike Kravetz #endif
11070c3547eSMike Kravetz 
1112e9b367cSAdam Litke static void huge_pagevec_release(struct pagevec *pvec)
1122e9b367cSAdam Litke {
1132e9b367cSAdam Litke 	int i;
1142e9b367cSAdam Litke 
1152e9b367cSAdam Litke 	for (i = 0; i < pagevec_count(pvec); ++i)
1162e9b367cSAdam Litke 		put_page(pvec->pages[i]);
1172e9b367cSAdam Litke 
1182e9b367cSAdam Litke 	pagevec_reinit(pvec);
1192e9b367cSAdam Litke }
1202e9b367cSAdam Litke 
12163489f8eSMike Kravetz /*
12263489f8eSMike Kravetz  * Mask used when checking the page offset value passed in via system
12363489f8eSMike Kravetz  * calls.  This value will be converted to a loff_t which is signed.
12463489f8eSMike Kravetz  * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
12563489f8eSMike Kravetz  * value.  The extra bit (- 1 in the shift value) is to take the sign
12663489f8eSMike Kravetz  * bit into account.
12763489f8eSMike Kravetz  */
12863489f8eSMike Kravetz #define PGOFF_LOFFT_MAX \
12963489f8eSMike Kravetz 	(((1UL << (PAGE_SHIFT + 1)) - 1) <<  (BITS_PER_LONG - (PAGE_SHIFT + 1)))
13063489f8eSMike Kravetz 
1311da177e4SLinus Torvalds static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
1321da177e4SLinus Torvalds {
133496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
1341da177e4SLinus Torvalds 	loff_t len, vma_len;
1351da177e4SLinus Torvalds 	int ret;
136a5516438SAndi Kleen 	struct hstate *h = hstate_file(file);
1371da177e4SLinus Torvalds 
13868589bc3SHugh Dickins 	/*
139dec4ad86SDavid Gibson 	 * vma address alignment (but not the pgoff alignment) has
140dec4ad86SDavid Gibson 	 * already been checked by prepare_hugepage_range.  If you add
141dec4ad86SDavid Gibson 	 * any error returns here, do so after setting VM_HUGETLB, so
142dec4ad86SDavid Gibson 	 * is_vm_hugetlb_page tests below unmap_region go the right
14345e55300SPeter Collingbourne 	 * way when do_mmap unwinds (may be important on powerpc
144dec4ad86SDavid Gibson 	 * and ia64).
14568589bc3SHugh Dickins 	 */
146a2fce914SNaoya Horiguchi 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
14768589bc3SHugh Dickins 	vma->vm_ops = &hugetlb_vm_ops;
1481da177e4SLinus Torvalds 
149045c7a3fSMike Kravetz 	/*
15063489f8eSMike Kravetz 	 * page based offset in vm_pgoff could be sufficiently large to
1515df63c2aSMike Kravetz 	 * overflow a loff_t when converted to byte offset.  This can
1525df63c2aSMike Kravetz 	 * only happen on architectures where sizeof(loff_t) ==
1535df63c2aSMike Kravetz 	 * sizeof(unsigned long).  So, only check in those instances.
154045c7a3fSMike Kravetz 	 */
1555df63c2aSMike Kravetz 	if (sizeof(unsigned long) == sizeof(loff_t)) {
15663489f8eSMike Kravetz 		if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
157045c7a3fSMike Kravetz 			return -EINVAL;
1585df63c2aSMike Kravetz 	}
159045c7a3fSMike Kravetz 
16063489f8eSMike Kravetz 	/* must be huge page aligned */
1612b37c35eSBecky Bruce 	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
162dec4ad86SDavid Gibson 		return -EINVAL;
163dec4ad86SDavid Gibson 
1641da177e4SLinus Torvalds 	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
165045c7a3fSMike Kravetz 	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
166045c7a3fSMike Kravetz 	/* check for overflow */
167045c7a3fSMike Kravetz 	if (len < vma_len)
168045c7a3fSMike Kravetz 		return -EINVAL;
1691da177e4SLinus Torvalds 
1705955102cSAl Viro 	inode_lock(inode);
1711da177e4SLinus Torvalds 	file_accessed(file);
1721da177e4SLinus Torvalds 
1731da177e4SLinus Torvalds 	ret = -ENOMEM;
17433b8f84aSMike Kravetz 	if (!hugetlb_reserve_pages(inode,
175a5516438SAndi Kleen 				vma->vm_pgoff >> huge_page_order(h),
1765a6fe125SMel Gorman 				len >> huge_page_shift(h), vma,
1775a6fe125SMel Gorman 				vma->vm_flags))
178b45b5bd6SDavid Gibson 		goto out;
179b45b5bd6SDavid Gibson 
1804c887265SAdam Litke 	ret = 0;
181b6174df5SZhang, Yanmin 	if (vma->vm_flags & VM_WRITE && inode->i_size < len)
182045c7a3fSMike Kravetz 		i_size_write(inode, len);
1831da177e4SLinus Torvalds out:
1845955102cSAl Viro 	inode_unlock(inode);
1851da177e4SLinus Torvalds 
1861da177e4SLinus Torvalds 	return ret;
1871da177e4SLinus Torvalds }
1881da177e4SLinus Torvalds 
1891da177e4SLinus Torvalds /*
1903e4e28c5SMichel Lespinasse  * Called under mmap_write_lock(mm).
1911da177e4SLinus Torvalds  */
1921da177e4SLinus Torvalds 
193d2ba27e8SAdrian Bunk #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
1941da177e4SLinus Torvalds static unsigned long
19588590253SShijie Hu hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
19688590253SShijie Hu 		unsigned long len, unsigned long pgoff, unsigned long flags)
19788590253SShijie Hu {
19888590253SShijie Hu 	struct hstate *h = hstate_file(file);
19988590253SShijie Hu 	struct vm_unmapped_area_info info;
20088590253SShijie Hu 
20188590253SShijie Hu 	info.flags = 0;
20288590253SShijie Hu 	info.length = len;
20388590253SShijie Hu 	info.low_limit = current->mm->mmap_base;
20488590253SShijie Hu 	info.high_limit = TASK_SIZE;
20588590253SShijie Hu 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
20688590253SShijie Hu 	info.align_offset = 0;
20788590253SShijie Hu 	return vm_unmapped_area(&info);
20888590253SShijie Hu }
20988590253SShijie Hu 
21088590253SShijie Hu static unsigned long
21188590253SShijie Hu hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
21288590253SShijie Hu 		unsigned long len, unsigned long pgoff, unsigned long flags)
21388590253SShijie Hu {
21488590253SShijie Hu 	struct hstate *h = hstate_file(file);
21588590253SShijie Hu 	struct vm_unmapped_area_info info;
21688590253SShijie Hu 
21788590253SShijie Hu 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
21888590253SShijie Hu 	info.length = len;
21988590253SShijie Hu 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
22088590253SShijie Hu 	info.high_limit = current->mm->mmap_base;
22188590253SShijie Hu 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
22288590253SShijie Hu 	info.align_offset = 0;
22388590253SShijie Hu 	addr = vm_unmapped_area(&info);
22488590253SShijie Hu 
22588590253SShijie Hu 	/*
22688590253SShijie Hu 	 * A failed mmap() very likely causes application failure,
22788590253SShijie Hu 	 * so fall back to the bottom-up function here. This scenario
22888590253SShijie Hu 	 * can happen with large stack limits and large mmap()
22988590253SShijie Hu 	 * allocations.
23088590253SShijie Hu 	 */
23188590253SShijie Hu 	if (unlikely(offset_in_page(addr))) {
23288590253SShijie Hu 		VM_BUG_ON(addr != -ENOMEM);
23388590253SShijie Hu 		info.flags = 0;
23488590253SShijie Hu 		info.low_limit = current->mm->mmap_base;
23588590253SShijie Hu 		info.high_limit = TASK_SIZE;
23688590253SShijie Hu 		addr = vm_unmapped_area(&info);
23788590253SShijie Hu 	}
23888590253SShijie Hu 
23988590253SShijie Hu 	return addr;
24088590253SShijie Hu }
24188590253SShijie Hu 
24288590253SShijie Hu static unsigned long
2431da177e4SLinus Torvalds hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2441da177e4SLinus Torvalds 		unsigned long len, unsigned long pgoff, unsigned long flags)
2451da177e4SLinus Torvalds {
2461da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
2471da177e4SLinus Torvalds 	struct vm_area_struct *vma;
248a5516438SAndi Kleen 	struct hstate *h = hstate_file(file);
2491da177e4SLinus Torvalds 
250a5516438SAndi Kleen 	if (len & ~huge_page_mask(h))
2511da177e4SLinus Torvalds 		return -EINVAL;
2521da177e4SLinus Torvalds 	if (len > TASK_SIZE)
2531da177e4SLinus Torvalds 		return -ENOMEM;
2541da177e4SLinus Torvalds 
255036e0856SBenjamin Herrenschmidt 	if (flags & MAP_FIXED) {
256a5516438SAndi Kleen 		if (prepare_hugepage_range(file, addr, len))
257036e0856SBenjamin Herrenschmidt 			return -EINVAL;
258036e0856SBenjamin Herrenschmidt 		return addr;
259036e0856SBenjamin Herrenschmidt 	}
260036e0856SBenjamin Herrenschmidt 
2611da177e4SLinus Torvalds 	if (addr) {
262a5516438SAndi Kleen 		addr = ALIGN(addr, huge_page_size(h));
2631da177e4SLinus Torvalds 		vma = find_vma(mm, addr);
2641da177e4SLinus Torvalds 		if (TASK_SIZE - len >= addr &&
2651be7107fSHugh Dickins 		    (!vma || addr + len <= vm_start_gap(vma)))
2661da177e4SLinus Torvalds 			return addr;
2671da177e4SLinus Torvalds 	}
2681da177e4SLinus Torvalds 
26988590253SShijie Hu 	/*
27088590253SShijie Hu 	 * Use mm->get_unmapped_area value as a hint to use topdown routine.
27188590253SShijie Hu 	 * If architectures have special needs, they should define their own
27288590253SShijie Hu 	 * version of hugetlb_get_unmapped_area.
27388590253SShijie Hu 	 */
27488590253SShijie Hu 	if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
27588590253SShijie Hu 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
27688590253SShijie Hu 				pgoff, flags);
27788590253SShijie Hu 	return hugetlb_get_unmapped_area_bottomup(file, addr, len,
27888590253SShijie Hu 			pgoff, flags);
2791da177e4SLinus Torvalds }
2801da177e4SLinus Torvalds #endif
2811da177e4SLinus Torvalds 
28234d0640eSAl Viro static size_t
283e63e1e5aSBadari Pulavarty hugetlbfs_read_actor(struct page *page, unsigned long offset,
28434d0640eSAl Viro 			struct iov_iter *to, unsigned long size)
285e63e1e5aSBadari Pulavarty {
28634d0640eSAl Viro 	size_t copied = 0;
287e63e1e5aSBadari Pulavarty 	int i, chunksize;
288e63e1e5aSBadari Pulavarty 
289e63e1e5aSBadari Pulavarty 	/* Find which 4k chunk and offset with in that chunk */
29009cbfeafSKirill A. Shutemov 	i = offset >> PAGE_SHIFT;
29109cbfeafSKirill A. Shutemov 	offset = offset & ~PAGE_MASK;
292e63e1e5aSBadari Pulavarty 
293e63e1e5aSBadari Pulavarty 	while (size) {
29434d0640eSAl Viro 		size_t n;
29509cbfeafSKirill A. Shutemov 		chunksize = PAGE_SIZE;
296e63e1e5aSBadari Pulavarty 		if (offset)
297e63e1e5aSBadari Pulavarty 			chunksize -= offset;
298e63e1e5aSBadari Pulavarty 		if (chunksize > size)
299e63e1e5aSBadari Pulavarty 			chunksize = size;
30034d0640eSAl Viro 		n = copy_page_to_iter(&page[i], offset, chunksize, to);
30134d0640eSAl Viro 		copied += n;
30234d0640eSAl Viro 		if (n != chunksize)
30334d0640eSAl Viro 			return copied;
304e63e1e5aSBadari Pulavarty 		offset = 0;
305e63e1e5aSBadari Pulavarty 		size -= chunksize;
306e63e1e5aSBadari Pulavarty 		i++;
307e63e1e5aSBadari Pulavarty 	}
30834d0640eSAl Viro 	return copied;
309e63e1e5aSBadari Pulavarty }
310e63e1e5aSBadari Pulavarty 
311e63e1e5aSBadari Pulavarty /*
312e63e1e5aSBadari Pulavarty  * Support for read() - Find the page attached to f_mapping and copy out the
313c7e285e3SMiaohe Lin  * data. Its *very* similar to generic_file_buffered_read(), we can't use that
314ea1754a0SKirill A. Shutemov  * since it has PAGE_SIZE assumptions.
315e63e1e5aSBadari Pulavarty  */
31634d0640eSAl Viro static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
317e63e1e5aSBadari Pulavarty {
31834d0640eSAl Viro 	struct file *file = iocb->ki_filp;
31934d0640eSAl Viro 	struct hstate *h = hstate_file(file);
32034d0640eSAl Viro 	struct address_space *mapping = file->f_mapping;
321e63e1e5aSBadari Pulavarty 	struct inode *inode = mapping->host;
32234d0640eSAl Viro 	unsigned long index = iocb->ki_pos >> huge_page_shift(h);
32334d0640eSAl Viro 	unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
324e63e1e5aSBadari Pulavarty 	unsigned long end_index;
325e63e1e5aSBadari Pulavarty 	loff_t isize;
326e63e1e5aSBadari Pulavarty 	ssize_t retval = 0;
327e63e1e5aSBadari Pulavarty 
32834d0640eSAl Viro 	while (iov_iter_count(to)) {
329e63e1e5aSBadari Pulavarty 		struct page *page;
33034d0640eSAl Viro 		size_t nr, copied;
331e63e1e5aSBadari Pulavarty 
332e63e1e5aSBadari Pulavarty 		/* nr is the maximum number of bytes to copy from this page */
333a5516438SAndi Kleen 		nr = huge_page_size(h);
334a05b0855SAneesh Kumar K.V 		isize = i_size_read(inode);
335a05b0855SAneesh Kumar K.V 		if (!isize)
33634d0640eSAl Viro 			break;
337a05b0855SAneesh Kumar K.V 		end_index = (isize - 1) >> huge_page_shift(h);
338e63e1e5aSBadari Pulavarty 		if (index > end_index)
33934d0640eSAl Viro 			break;
34034d0640eSAl Viro 		if (index == end_index) {
341a5516438SAndi Kleen 			nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
342a05b0855SAneesh Kumar K.V 			if (nr <= offset)
34334d0640eSAl Viro 				break;
344e63e1e5aSBadari Pulavarty 		}
345e63e1e5aSBadari Pulavarty 		nr = nr - offset;
346e63e1e5aSBadari Pulavarty 
347e63e1e5aSBadari Pulavarty 		/* Find the page */
348a05b0855SAneesh Kumar K.V 		page = find_lock_page(mapping, index);
349e63e1e5aSBadari Pulavarty 		if (unlikely(page == NULL)) {
350e63e1e5aSBadari Pulavarty 			/*
351e63e1e5aSBadari Pulavarty 			 * We have a HOLE, zero out the user-buffer for the
352e63e1e5aSBadari Pulavarty 			 * length of the hole or request.
353e63e1e5aSBadari Pulavarty 			 */
35434d0640eSAl Viro 			copied = iov_iter_zero(nr, to);
355e63e1e5aSBadari Pulavarty 		} else {
356a05b0855SAneesh Kumar K.V 			unlock_page(page);
357a05b0855SAneesh Kumar K.V 
358e63e1e5aSBadari Pulavarty 			/*
359e63e1e5aSBadari Pulavarty 			 * We have the page, copy it to user space buffer.
360e63e1e5aSBadari Pulavarty 			 */
36134d0640eSAl Viro 			copied = hugetlbfs_read_actor(page, offset, to, nr);
36209cbfeafSKirill A. Shutemov 			put_page(page);
363e63e1e5aSBadari Pulavarty 		}
36434d0640eSAl Viro 		offset += copied;
36534d0640eSAl Viro 		retval += copied;
36634d0640eSAl Viro 		if (copied != nr && iov_iter_count(to)) {
36734d0640eSAl Viro 			if (!retval)
36834d0640eSAl Viro 				retval = -EFAULT;
369e63e1e5aSBadari Pulavarty 			break;
370e63e1e5aSBadari Pulavarty 		}
37134d0640eSAl Viro 		index += offset >> huge_page_shift(h);
37234d0640eSAl Viro 		offset &= ~huge_page_mask(h);
37334d0640eSAl Viro 	}
37434d0640eSAl Viro 	iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
375e63e1e5aSBadari Pulavarty 	return retval;
376e63e1e5aSBadari Pulavarty }
377e63e1e5aSBadari Pulavarty 
378800d15a5SNick Piggin static int hugetlbfs_write_begin(struct file *file,
379800d15a5SNick Piggin 			struct address_space *mapping,
380800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned flags,
381800d15a5SNick Piggin 			struct page **pagep, void **fsdata)
3821da177e4SLinus Torvalds {
3831da177e4SLinus Torvalds 	return -EINVAL;
3841da177e4SLinus Torvalds }
3851da177e4SLinus Torvalds 
386800d15a5SNick Piggin static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
387800d15a5SNick Piggin 			loff_t pos, unsigned len, unsigned copied,
388800d15a5SNick Piggin 			struct page *page, void *fsdata)
3891da177e4SLinus Torvalds {
390800d15a5SNick Piggin 	BUG();
3911da177e4SLinus Torvalds 	return -EINVAL;
3921da177e4SLinus Torvalds }
3931da177e4SLinus Torvalds 
394b5cec28dSMike Kravetz static void remove_huge_page(struct page *page)
3951da177e4SLinus Torvalds {
396b9ea2515SKonstantin Khlebnikov 	ClearPageDirty(page);
3971da177e4SLinus Torvalds 	ClearPageUptodate(page);
398bd65cb86SMinchan Kim 	delete_from_page_cache(page);
3991da177e4SLinus Torvalds }
4001da177e4SLinus Torvalds 
4014aae8d1cSMike Kravetz static void
402f808c13fSDavidlohr Bueso hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
4034aae8d1cSMike Kravetz {
4044aae8d1cSMike Kravetz 	struct vm_area_struct *vma;
4054aae8d1cSMike Kravetz 
4064aae8d1cSMike Kravetz 	/*
4074aae8d1cSMike Kravetz 	 * end == 0 indicates that the entire range after
4084aae8d1cSMike Kravetz 	 * start should be unmapped.
4094aae8d1cSMike Kravetz 	 */
4104aae8d1cSMike Kravetz 	vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
4114aae8d1cSMike Kravetz 		unsigned long v_offset;
4124aae8d1cSMike Kravetz 		unsigned long v_end;
4134aae8d1cSMike Kravetz 
4144aae8d1cSMike Kravetz 		/*
4154aae8d1cSMike Kravetz 		 * Can the expression below overflow on 32-bit arches?
4164aae8d1cSMike Kravetz 		 * No, because the interval tree returns us only those vmas
4174aae8d1cSMike Kravetz 		 * which overlap the truncated area starting at pgoff,
4184aae8d1cSMike Kravetz 		 * and no vma on a 32-bit arch can span beyond the 4GB.
4194aae8d1cSMike Kravetz 		 */
4204aae8d1cSMike Kravetz 		if (vma->vm_pgoff < start)
4214aae8d1cSMike Kravetz 			v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
4224aae8d1cSMike Kravetz 		else
4234aae8d1cSMike Kravetz 			v_offset = 0;
4244aae8d1cSMike Kravetz 
4254aae8d1cSMike Kravetz 		if (!end)
4264aae8d1cSMike Kravetz 			v_end = vma->vm_end;
4274aae8d1cSMike Kravetz 		else {
4284aae8d1cSMike Kravetz 			v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
4294aae8d1cSMike Kravetz 							+ vma->vm_start;
4304aae8d1cSMike Kravetz 			if (v_end > vma->vm_end)
4314aae8d1cSMike Kravetz 				v_end = vma->vm_end;
4324aae8d1cSMike Kravetz 		}
4334aae8d1cSMike Kravetz 
4344aae8d1cSMike Kravetz 		unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
4354aae8d1cSMike Kravetz 									NULL);
4364aae8d1cSMike Kravetz 	}
4374aae8d1cSMike Kravetz }
438b5cec28dSMike Kravetz 
439b5cec28dSMike Kravetz /*
440b5cec28dSMike Kravetz  * remove_inode_hugepages handles two distinct cases: truncation and hole
441b5cec28dSMike Kravetz  * punch.  There are subtle differences in operation for each case.
4424aae8d1cSMike Kravetz  *
443b5cec28dSMike Kravetz  * truncation is indicated by end of range being LLONG_MAX
444b5cec28dSMike Kravetz  *	In this case, we first scan the range and release found pages.
445*1935ebd3SMiaohe Lin  *	After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
446e7c58097SMike Kravetz  *	maps and global counts.  Page faults can not race with truncation
44787bf91d3SMike Kravetz  *	in this routine.  hugetlb_no_page() holds i_mmap_rwsem and prevents
44887bf91d3SMike Kravetz  *	page faults in the truncated range by checking i_size.  i_size is
44987bf91d3SMike Kravetz  *	modified while holding i_mmap_rwsem.
450b5cec28dSMike Kravetz  * hole punch is indicated if end is not LLONG_MAX
451b5cec28dSMike Kravetz  *	In the hole punch case we scan the range and release found pages.
452*1935ebd3SMiaohe Lin  *	Only when releasing a page is the associated region/reserve map
453*1935ebd3SMiaohe Lin  *	deleted.  The region/reserve map for ranges without associated
454e7c58097SMike Kravetz  *	pages are not modified.  Page faults can race with hole punch.
455e7c58097SMike Kravetz  *	This is indicated if we find a mapped page.
456b5cec28dSMike Kravetz  * Note: If the passed end of range value is beyond the end of file, but
457b5cec28dSMike Kravetz  * not LLONG_MAX this routine still performs a hole punch operation.
458b5cec28dSMike Kravetz  */
459b5cec28dSMike Kravetz static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
460b5cec28dSMike Kravetz 				   loff_t lend)
4611da177e4SLinus Torvalds {
462a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
463b45b5bd6SDavid Gibson 	struct address_space *mapping = &inode->i_data;
464a5516438SAndi Kleen 	const pgoff_t start = lstart >> huge_page_shift(h);
465b5cec28dSMike Kravetz 	const pgoff_t end = lend >> huge_page_shift(h);
466b5cec28dSMike Kravetz 	struct vm_area_struct pseudo_vma;
4671da177e4SLinus Torvalds 	struct pagevec pvec;
468d72dc8a2SJan Kara 	pgoff_t next, index;
469a43a8c39SChen, Kenneth W 	int i, freed = 0;
470b5cec28dSMike Kravetz 	bool truncate_op = (lend == LLONG_MAX);
4711da177e4SLinus Torvalds 
4722c4541e2SKirill A. Shutemov 	vma_init(&pseudo_vma, current->mm);
473b5cec28dSMike Kravetz 	pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
47486679820SMel Gorman 	pagevec_init(&pvec);
4751da177e4SLinus Torvalds 	next = start;
476b5cec28dSMike Kravetz 	while (next < end) {
477b5cec28dSMike Kravetz 		/*
4781817889eSMike Kravetz 		 * When no more pages are found, we are done.
479b5cec28dSMike Kravetz 		 */
480397162ffSJan Kara 		if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
4811da177e4SLinus Torvalds 			break;
4821da177e4SLinus Torvalds 
4831da177e4SLinus Torvalds 		for (i = 0; i < pagevec_count(&pvec); ++i) {
4841da177e4SLinus Torvalds 			struct page *page = pvec.pages[i];
485e7c58097SMike Kravetz 			u32 hash;
486b5cec28dSMike Kravetz 
487d72dc8a2SJan Kara 			index = page->index;
488188b04a7SWei Yang 			hash = hugetlb_fault_mutex_hash(mapping, index);
48987bf91d3SMike Kravetz 			if (!truncate_op) {
49087bf91d3SMike Kravetz 				/*
49187bf91d3SMike Kravetz 				 * Only need to hold the fault mutex in the
49287bf91d3SMike Kravetz 				 * hole punch case.  This prevents races with
49387bf91d3SMike Kravetz 				 * page faults.  Races are not possible in the
49487bf91d3SMike Kravetz 				 * case of truncation.
49587bf91d3SMike Kravetz 				 */
496e7c58097SMike Kravetz 				mutex_lock(&hugetlb_fault_mutex_table[hash]);
49787bf91d3SMike Kravetz 			}
498e7c58097SMike Kravetz 
499b5cec28dSMike Kravetz 			/*
500e7c58097SMike Kravetz 			 * If page is mapped, it was faulted in after being
501e7c58097SMike Kravetz 			 * unmapped in caller.  Unmap (again) now after taking
502e7c58097SMike Kravetz 			 * the fault mutex.  The mutex will prevent faults
503e7c58097SMike Kravetz 			 * until we finish removing the page.
504e7c58097SMike Kravetz 			 *
505e7c58097SMike Kravetz 			 * This race can only happen in the hole punch case.
506e7c58097SMike Kravetz 			 * Getting here in a truncate operation is a bug.
507b5cec28dSMike Kravetz 			 */
508e7c58097SMike Kravetz 			if (unlikely(page_mapped(page))) {
509e7c58097SMike Kravetz 				BUG_ON(truncate_op);
510e7c58097SMike Kravetz 
511c0d0381aSMike Kravetz 				mutex_unlock(&hugetlb_fault_mutex_table[hash]);
512e7c58097SMike Kravetz 				i_mmap_lock_write(mapping);
513c0d0381aSMike Kravetz 				mutex_lock(&hugetlb_fault_mutex_table[hash]);
514e7c58097SMike Kravetz 				hugetlb_vmdelete_list(&mapping->i_mmap,
515e7c58097SMike Kravetz 					index * pages_per_huge_page(h),
516e7c58097SMike Kravetz 					(index + 1) * pages_per_huge_page(h));
517e7c58097SMike Kravetz 				i_mmap_unlock_write(mapping);
518e7c58097SMike Kravetz 			}
5194aae8d1cSMike Kravetz 
5204aae8d1cSMike Kravetz 			lock_page(page);
5214aae8d1cSMike Kravetz 			/*
5224aae8d1cSMike Kravetz 			 * We must free the huge page and remove from page
5234aae8d1cSMike Kravetz 			 * cache (remove_huge_page) BEFORE removing the
5244aae8d1cSMike Kravetz 			 * region/reserve map (hugetlb_unreserve_pages).  In
5254aae8d1cSMike Kravetz 			 * rare out of memory conditions, removal of the
52672e2936cSzhong jiang 			 * region/reserve map could fail. Correspondingly,
52772e2936cSzhong jiang 			 * the subpool and global reserve usage count can need
52872e2936cSzhong jiang 			 * to be adjusted.
5294aae8d1cSMike Kravetz 			 */
53072e2936cSzhong jiang 			VM_BUG_ON(PagePrivate(page));
531b5cec28dSMike Kravetz 			remove_huge_page(page);
532b5cec28dSMike Kravetz 			freed++;
533b5cec28dSMike Kravetz 			if (!truncate_op) {
5344aae8d1cSMike Kravetz 				if (unlikely(hugetlb_unreserve_pages(inode,
535d72dc8a2SJan Kara 							index, index + 1, 1)))
53672e2936cSzhong jiang 					hugetlb_fix_reserve_counts(inode);
537b5cec28dSMike Kravetz 			}
538b5cec28dSMike Kravetz 
5391da177e4SLinus Torvalds 			unlock_page(page);
54087bf91d3SMike Kravetz 			if (!truncate_op)
541e7c58097SMike Kravetz 				mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5421da177e4SLinus Torvalds 		}
5431da177e4SLinus Torvalds 		huge_pagevec_release(&pvec);
5441817889eSMike Kravetz 		cond_resched();
5451da177e4SLinus Torvalds 	}
546b5cec28dSMike Kravetz 
547b5cec28dSMike Kravetz 	if (truncate_op)
548b5cec28dSMike Kravetz 		(void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
5491da177e4SLinus Torvalds }
5501da177e4SLinus Torvalds 
5512bbbda30SAl Viro static void hugetlbfs_evict_inode(struct inode *inode)
5521da177e4SLinus Torvalds {
5539119a41eSJoonsoo Kim 	struct resv_map *resv_map;
5549119a41eSJoonsoo Kim 
555b5cec28dSMike Kravetz 	remove_inode_hugepages(inode, 0, LLONG_MAX);
556f27a5136SMike Kravetz 
557f27a5136SMike Kravetz 	/*
558f27a5136SMike Kravetz 	 * Get the resv_map from the address space embedded in the inode.
559f27a5136SMike Kravetz 	 * This is the address space which points to any resv_map allocated
560f27a5136SMike Kravetz 	 * at inode creation time.  If this is a device special inode,
561f27a5136SMike Kravetz 	 * i_mapping may not point to the original address space.
562f27a5136SMike Kravetz 	 */
563f27a5136SMike Kravetz 	resv_map = (struct resv_map *)(&inode->i_data)->private_data;
564f27a5136SMike Kravetz 	/* Only regular and link inodes have associated reserve maps */
5659119a41eSJoonsoo Kim 	if (resv_map)
5669119a41eSJoonsoo Kim 		resv_map_release(&resv_map->refs);
567dbd5768fSJan Kara 	clear_inode(inode);
568149f4211SChristoph Hellwig }
569149f4211SChristoph Hellwig 
5701da177e4SLinus Torvalds static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
5711da177e4SLinus Torvalds {
572856fc295SHugh Dickins 	pgoff_t pgoff;
5731da177e4SLinus Torvalds 	struct address_space *mapping = inode->i_mapping;
574a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
5751da177e4SLinus Torvalds 
576a5516438SAndi Kleen 	BUG_ON(offset & ~huge_page_mask(h));
577856fc295SHugh Dickins 	pgoff = offset >> PAGE_SHIFT;
5781da177e4SLinus Torvalds 
57983cde9e8SDavidlohr Bueso 	i_mmap_lock_write(mapping);
58087bf91d3SMike Kravetz 	i_size_write(inode, offset);
581f808c13fSDavidlohr Bueso 	if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
5821bfad99aSMike Kravetz 		hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
583c86aa7bbSMike Kravetz 	i_mmap_unlock_write(mapping);
584e7c58097SMike Kravetz 	remove_inode_hugepages(inode, offset, LLONG_MAX);
5851da177e4SLinus Torvalds 	return 0;
5861da177e4SLinus Torvalds }
5871da177e4SLinus Torvalds 
58870c3547eSMike Kravetz static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
58970c3547eSMike Kravetz {
59070c3547eSMike Kravetz 	struct hstate *h = hstate_inode(inode);
59170c3547eSMike Kravetz 	loff_t hpage_size = huge_page_size(h);
59270c3547eSMike Kravetz 	loff_t hole_start, hole_end;
59370c3547eSMike Kravetz 
59470c3547eSMike Kravetz 	/*
59570c3547eSMike Kravetz 	 * For hole punch round up the beginning offset of the hole and
59670c3547eSMike Kravetz 	 * round down the end.
59770c3547eSMike Kravetz 	 */
59870c3547eSMike Kravetz 	hole_start = round_up(offset, hpage_size);
59970c3547eSMike Kravetz 	hole_end = round_down(offset + len, hpage_size);
60070c3547eSMike Kravetz 
60170c3547eSMike Kravetz 	if (hole_end > hole_start) {
60270c3547eSMike Kravetz 		struct address_space *mapping = inode->i_mapping;
603ff62a342SMarc-André Lureau 		struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
60470c3547eSMike Kravetz 
6055955102cSAl Viro 		inode_lock(inode);
606ff62a342SMarc-André Lureau 
607398c0da7SMiaohe Lin 		/* protected by i_rwsem */
608ab3948f5SJoel Fernandes (Google) 		if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
609ff62a342SMarc-André Lureau 			inode_unlock(inode);
610ff62a342SMarc-André Lureau 			return -EPERM;
611ff62a342SMarc-André Lureau 		}
612ff62a342SMarc-André Lureau 
61370c3547eSMike Kravetz 		i_mmap_lock_write(mapping);
614f808c13fSDavidlohr Bueso 		if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
61570c3547eSMike Kravetz 			hugetlb_vmdelete_list(&mapping->i_mmap,
61670c3547eSMike Kravetz 						hole_start >> PAGE_SHIFT,
61770c3547eSMike Kravetz 						hole_end  >> PAGE_SHIFT);
618c86aa7bbSMike Kravetz 		i_mmap_unlock_write(mapping);
619e7c58097SMike Kravetz 		remove_inode_hugepages(inode, hole_start, hole_end);
6205955102cSAl Viro 		inode_unlock(inode);
62170c3547eSMike Kravetz 	}
62270c3547eSMike Kravetz 
62370c3547eSMike Kravetz 	return 0;
62470c3547eSMike Kravetz }
62570c3547eSMike Kravetz 
62670c3547eSMike Kravetz static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
62770c3547eSMike Kravetz 				loff_t len)
62870c3547eSMike Kravetz {
62970c3547eSMike Kravetz 	struct inode *inode = file_inode(file);
630ff62a342SMarc-André Lureau 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
63170c3547eSMike Kravetz 	struct address_space *mapping = inode->i_mapping;
63270c3547eSMike Kravetz 	struct hstate *h = hstate_inode(inode);
63370c3547eSMike Kravetz 	struct vm_area_struct pseudo_vma;
63470c3547eSMike Kravetz 	struct mm_struct *mm = current->mm;
63570c3547eSMike Kravetz 	loff_t hpage_size = huge_page_size(h);
63670c3547eSMike Kravetz 	unsigned long hpage_shift = huge_page_shift(h);
63770c3547eSMike Kravetz 	pgoff_t start, index, end;
63870c3547eSMike Kravetz 	int error;
63970c3547eSMike Kravetz 	u32 hash;
64070c3547eSMike Kravetz 
64170c3547eSMike Kravetz 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
64270c3547eSMike Kravetz 		return -EOPNOTSUPP;
64370c3547eSMike Kravetz 
64470c3547eSMike Kravetz 	if (mode & FALLOC_FL_PUNCH_HOLE)
64570c3547eSMike Kravetz 		return hugetlbfs_punch_hole(inode, offset, len);
64670c3547eSMike Kravetz 
64770c3547eSMike Kravetz 	/*
64870c3547eSMike Kravetz 	 * Default preallocate case.
64970c3547eSMike Kravetz 	 * For this range, start is rounded down and end is rounded up
65070c3547eSMike Kravetz 	 * as well as being converted to page offsets.
65170c3547eSMike Kravetz 	 */
65270c3547eSMike Kravetz 	start = offset >> hpage_shift;
65370c3547eSMike Kravetz 	end = (offset + len + hpage_size - 1) >> hpage_shift;
65470c3547eSMike Kravetz 
6555955102cSAl Viro 	inode_lock(inode);
65670c3547eSMike Kravetz 
65770c3547eSMike Kravetz 	/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
65870c3547eSMike Kravetz 	error = inode_newsize_ok(inode, offset + len);
65970c3547eSMike Kravetz 	if (error)
66070c3547eSMike Kravetz 		goto out;
66170c3547eSMike Kravetz 
662ff62a342SMarc-André Lureau 	if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
663ff62a342SMarc-André Lureau 		error = -EPERM;
664ff62a342SMarc-André Lureau 		goto out;
665ff62a342SMarc-André Lureau 	}
666ff62a342SMarc-André Lureau 
66770c3547eSMike Kravetz 	/*
66870c3547eSMike Kravetz 	 * Initialize a pseudo vma as this is required by the huge page
66970c3547eSMike Kravetz 	 * allocation routines.  If NUMA is configured, use page index
67070c3547eSMike Kravetz 	 * as input to create an allocation policy.
67170c3547eSMike Kravetz 	 */
6722c4541e2SKirill A. Shutemov 	vma_init(&pseudo_vma, mm);
67370c3547eSMike Kravetz 	pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
67470c3547eSMike Kravetz 	pseudo_vma.vm_file = file;
67570c3547eSMike Kravetz 
67670c3547eSMike Kravetz 	for (index = start; index < end; index++) {
67770c3547eSMike Kravetz 		/*
67870c3547eSMike Kravetz 		 * This is supposed to be the vaddr where the page is being
67970c3547eSMike Kravetz 		 * faulted in, but we have no vaddr here.
68070c3547eSMike Kravetz 		 */
68170c3547eSMike Kravetz 		struct page *page;
68270c3547eSMike Kravetz 		unsigned long addr;
68370c3547eSMike Kravetz 
68470c3547eSMike Kravetz 		cond_resched();
68570c3547eSMike Kravetz 
68670c3547eSMike Kravetz 		/*
68770c3547eSMike Kravetz 		 * fallocate(2) manpage permits EINTR; we may have been
68870c3547eSMike Kravetz 		 * interrupted because we are using up too much memory.
68970c3547eSMike Kravetz 		 */
69070c3547eSMike Kravetz 		if (signal_pending(current)) {
69170c3547eSMike Kravetz 			error = -EINTR;
69270c3547eSMike Kravetz 			break;
69370c3547eSMike Kravetz 		}
69470c3547eSMike Kravetz 
69570c3547eSMike Kravetz 		/* Set numa allocation policy based on index */
69670c3547eSMike Kravetz 		hugetlb_set_vma_policy(&pseudo_vma, inode, index);
69770c3547eSMike Kravetz 
69870c3547eSMike Kravetz 		/* addr is the offset within the file (zero based) */
69970c3547eSMike Kravetz 		addr = index * hpage_size;
70070c3547eSMike Kravetz 
70187bf91d3SMike Kravetz 		/*
70287bf91d3SMike Kravetz 		 * fault mutex taken here, protects against fault path
70387bf91d3SMike Kravetz 		 * and hole punch.  inode_lock previously taken protects
70487bf91d3SMike Kravetz 		 * against truncation.
70587bf91d3SMike Kravetz 		 */
706188b04a7SWei Yang 		hash = hugetlb_fault_mutex_hash(mapping, index);
70770c3547eSMike Kravetz 		mutex_lock(&hugetlb_fault_mutex_table[hash]);
70870c3547eSMike Kravetz 
70970c3547eSMike Kravetz 		/* See if already present in mapping to avoid alloc/free */
71070c3547eSMike Kravetz 		page = find_get_page(mapping, index);
71170c3547eSMike Kravetz 		if (page) {
71270c3547eSMike Kravetz 			put_page(page);
71370c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
71470c3547eSMike Kravetz 			hugetlb_drop_vma_policy(&pseudo_vma);
71570c3547eSMike Kravetz 			continue;
71670c3547eSMike Kravetz 		}
71770c3547eSMike Kravetz 
71888ce3fefSMiaohe Lin 		/*
71988ce3fefSMiaohe Lin 		 * Allocate page without setting the avoid_reserve argument.
72088ce3fefSMiaohe Lin 		 * There certainly are no reserves associated with the
72188ce3fefSMiaohe Lin 		 * pseudo_vma.  However, there could be shared mappings with
72288ce3fefSMiaohe Lin 		 * reserves for the file at the inode level.  If we fallocate
72388ce3fefSMiaohe Lin 		 * pages in these areas, we need to consume the reserves
72488ce3fefSMiaohe Lin 		 * to keep reservation accounting consistent.
72588ce3fefSMiaohe Lin 		 */
72688ce3fefSMiaohe Lin 		page = alloc_huge_page(&pseudo_vma, addr, 0);
72770c3547eSMike Kravetz 		hugetlb_drop_vma_policy(&pseudo_vma);
72870c3547eSMike Kravetz 		if (IS_ERR(page)) {
72970c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
73070c3547eSMike Kravetz 			error = PTR_ERR(page);
73170c3547eSMike Kravetz 			goto out;
73270c3547eSMike Kravetz 		}
73370c3547eSMike Kravetz 		clear_huge_page(page, addr, pages_per_huge_page(h));
73470c3547eSMike Kravetz 		__SetPageUptodate(page);
73570c3547eSMike Kravetz 		error = huge_add_to_page_cache(page, mapping, index);
73670c3547eSMike Kravetz 		if (unlikely(error)) {
73770c3547eSMike Kravetz 			put_page(page);
73870c3547eSMike Kravetz 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
73970c3547eSMike Kravetz 			goto out;
74070c3547eSMike Kravetz 		}
74170c3547eSMike Kravetz 
74270c3547eSMike Kravetz 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
74370c3547eSMike Kravetz 
7448f251a3dSMike Kravetz 		SetHPageMigratable(page);
74570c3547eSMike Kravetz 		/*
74670c3547eSMike Kravetz 		 * unlock_page because locked by add_to_page_cache()
747585fc0d2SMuchun Song 		 * put_page() due to reference from alloc_huge_page()
74870c3547eSMike Kravetz 		 */
74970c3547eSMike Kravetz 		unlock_page(page);
75072639e6dSNadav Amit 		put_page(page);
75170c3547eSMike Kravetz 	}
75270c3547eSMike Kravetz 
75370c3547eSMike Kravetz 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
75470c3547eSMike Kravetz 		i_size_write(inode, offset + len);
755078cd827SDeepa Dinamani 	inode->i_ctime = current_time(inode);
75670c3547eSMike Kravetz out:
7575955102cSAl Viro 	inode_unlock(inode);
75870c3547eSMike Kravetz 	return error;
75970c3547eSMike Kravetz }
76070c3547eSMike Kravetz 
761549c7297SChristian Brauner static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
762549c7297SChristian Brauner 			     struct dentry *dentry, struct iattr *attr)
7631da177e4SLinus Torvalds {
7642b0143b5SDavid Howells 	struct inode *inode = d_inode(dentry);
765a5516438SAndi Kleen 	struct hstate *h = hstate_inode(inode);
7661da177e4SLinus Torvalds 	int error;
7671da177e4SLinus Torvalds 	unsigned int ia_valid = attr->ia_valid;
768ff62a342SMarc-André Lureau 	struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
7691da177e4SLinus Torvalds 
7702f221d6fSChristian Brauner 	error = setattr_prepare(&init_user_ns, dentry, attr);
7711da177e4SLinus Torvalds 	if (error)
7721025774cSChristoph Hellwig 		return error;
7731da177e4SLinus Torvalds 
7741da177e4SLinus Torvalds 	if (ia_valid & ATTR_SIZE) {
775ff62a342SMarc-André Lureau 		loff_t oldsize = inode->i_size;
776ff62a342SMarc-André Lureau 		loff_t newsize = attr->ia_size;
777ff62a342SMarc-André Lureau 
778ff62a342SMarc-André Lureau 		if (newsize & ~huge_page_mask(h))
7791025774cSChristoph Hellwig 			return -EINVAL;
780398c0da7SMiaohe Lin 		/* protected by i_rwsem */
781ff62a342SMarc-André Lureau 		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
782ff62a342SMarc-André Lureau 		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
783ff62a342SMarc-André Lureau 			return -EPERM;
784ff62a342SMarc-André Lureau 		error = hugetlb_vmtruncate(inode, newsize);
7851da177e4SLinus Torvalds 		if (error)
7861da177e4SLinus Torvalds 			return error;
7871da177e4SLinus Torvalds 	}
7881da177e4SLinus Torvalds 
7892f221d6fSChristian Brauner 	setattr_copy(&init_user_ns, inode, attr);
7901025774cSChristoph Hellwig 	mark_inode_dirty(inode);
7911025774cSChristoph Hellwig 	return 0;
7921025774cSChristoph Hellwig }
7931025774cSChristoph Hellwig 
7947d54fa64SAl Viro static struct inode *hugetlbfs_get_root(struct super_block *sb,
79532021982SDavid Howells 					struct hugetlbfs_fs_context *ctx)
7961da177e4SLinus Torvalds {
7971da177e4SLinus Torvalds 	struct inode *inode;
7981da177e4SLinus Torvalds 
7991da177e4SLinus Torvalds 	inode = new_inode(sb);
8001da177e4SLinus Torvalds 	if (inode) {
80185fe4025SChristoph Hellwig 		inode->i_ino = get_next_ino();
80232021982SDavid Howells 		inode->i_mode = S_IFDIR | ctx->mode;
80332021982SDavid Howells 		inode->i_uid = ctx->uid;
80432021982SDavid Howells 		inode->i_gid = ctx->gid;
805078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
8067d54fa64SAl Viro 		inode->i_op = &hugetlbfs_dir_inode_operations;
8077d54fa64SAl Viro 		inode->i_fop = &simple_dir_operations;
8087d54fa64SAl Viro 		/* directory inodes start off with i_nlink == 2 (for "." entry) */
8097d54fa64SAl Viro 		inc_nlink(inode);
81065ed7601SAneesh Kumar K.V 		lockdep_annotate_inode_mutex_key(inode);
8117d54fa64SAl Viro 	}
8127d54fa64SAl Viro 	return inode;
8137d54fa64SAl Viro }
8147d54fa64SAl Viro 
815b610ded7SMichal Hocko /*
816c8c06efaSDavidlohr Bueso  * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
817b610ded7SMichal Hocko  * be taken from reclaim -- unlike regular filesystems. This needs an
81888f306b6SKirill A. Shutemov  * annotation because huge_pmd_share() does an allocation under hugetlb's
819c8c06efaSDavidlohr Bueso  * i_mmap_rwsem.
820b610ded7SMichal Hocko  */
821c8c06efaSDavidlohr Bueso static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
822b610ded7SMichal Hocko 
8237d54fa64SAl Viro static struct inode *hugetlbfs_get_inode(struct super_block *sb,
8247d54fa64SAl Viro 					struct inode *dir,
82518df2252SAl Viro 					umode_t mode, dev_t dev)
8267d54fa64SAl Viro {
8277d54fa64SAl Viro 	struct inode *inode;
82858b6e5e8SMike Kravetz 	struct resv_map *resv_map = NULL;
8299119a41eSJoonsoo Kim 
83058b6e5e8SMike Kravetz 	/*
83158b6e5e8SMike Kravetz 	 * Reserve maps are only needed for inodes that can have associated
83258b6e5e8SMike Kravetz 	 * page allocations.
83358b6e5e8SMike Kravetz 	 */
83458b6e5e8SMike Kravetz 	if (S_ISREG(mode) || S_ISLNK(mode)) {
8359119a41eSJoonsoo Kim 		resv_map = resv_map_alloc();
8369119a41eSJoonsoo Kim 		if (!resv_map)
8379119a41eSJoonsoo Kim 			return NULL;
83858b6e5e8SMike Kravetz 	}
8397d54fa64SAl Viro 
8407d54fa64SAl Viro 	inode = new_inode(sb);
8417d54fa64SAl Viro 	if (inode) {
842ff62a342SMarc-André Lureau 		struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
843ff62a342SMarc-André Lureau 
8447d54fa64SAl Viro 		inode->i_ino = get_next_ino();
84521cb47beSChristian Brauner 		inode_init_owner(&init_user_ns, inode, dir, mode);
846c8c06efaSDavidlohr Bueso 		lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
847c8c06efaSDavidlohr Bueso 				&hugetlbfs_i_mmap_rwsem_key);
8481da177e4SLinus Torvalds 		inode->i_mapping->a_ops = &hugetlbfs_aops;
849078cd827SDeepa Dinamani 		inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
8509119a41eSJoonsoo Kim 		inode->i_mapping->private_data = resv_map;
851ff62a342SMarc-André Lureau 		info->seals = F_SEAL_SEAL;
8521da177e4SLinus Torvalds 		switch (mode & S_IFMT) {
8531da177e4SLinus Torvalds 		default:
8541da177e4SLinus Torvalds 			init_special_inode(inode, mode, dev);
8551da177e4SLinus Torvalds 			break;
8561da177e4SLinus Torvalds 		case S_IFREG:
8571da177e4SLinus Torvalds 			inode->i_op = &hugetlbfs_inode_operations;
8581da177e4SLinus Torvalds 			inode->i_fop = &hugetlbfs_file_operations;
8591da177e4SLinus Torvalds 			break;
8601da177e4SLinus Torvalds 		case S_IFDIR:
8611da177e4SLinus Torvalds 			inode->i_op = &hugetlbfs_dir_inode_operations;
8621da177e4SLinus Torvalds 			inode->i_fop = &simple_dir_operations;
8631da177e4SLinus Torvalds 
8641da177e4SLinus Torvalds 			/* directory inodes start off with i_nlink == 2 (for "." entry) */
865d8c76e6fSDave Hansen 			inc_nlink(inode);
8661da177e4SLinus Torvalds 			break;
8671da177e4SLinus Torvalds 		case S_IFLNK:
8681da177e4SLinus Torvalds 			inode->i_op = &page_symlink_inode_operations;
86921fc61c7SAl Viro 			inode_nohighmem(inode);
8701da177e4SLinus Torvalds 			break;
8711da177e4SLinus Torvalds 		}
872e096d0c7SJosh Boyer 		lockdep_annotate_inode_mutex_key(inode);
87358b6e5e8SMike Kravetz 	} else {
87458b6e5e8SMike Kravetz 		if (resv_map)
8759119a41eSJoonsoo Kim 			kref_put(&resv_map->refs, resv_map_release);
87658b6e5e8SMike Kravetz 	}
8779119a41eSJoonsoo Kim 
8781da177e4SLinus Torvalds 	return inode;
8791da177e4SLinus Torvalds }
8801da177e4SLinus Torvalds 
8811da177e4SLinus Torvalds /*
8821da177e4SLinus Torvalds  * File creation. Allocate an inode, and we're done..
8831da177e4SLinus Torvalds  */
8841ab5b82fSPiotr Sarna static int do_hugetlbfs_mknod(struct inode *dir,
8851ab5b82fSPiotr Sarna 			struct dentry *dentry,
8861ab5b82fSPiotr Sarna 			umode_t mode,
8871ab5b82fSPiotr Sarna 			dev_t dev,
8881ab5b82fSPiotr Sarna 			bool tmpfile)
8891da177e4SLinus Torvalds {
8901da177e4SLinus Torvalds 	struct inode *inode;
8911da177e4SLinus Torvalds 	int error = -ENOSPC;
8921da177e4SLinus Torvalds 
8937d54fa64SAl Viro 	inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
8941da177e4SLinus Torvalds 	if (inode) {
895078cd827SDeepa Dinamani 		dir->i_ctime = dir->i_mtime = current_time(dir);
8961ab5b82fSPiotr Sarna 		if (tmpfile) {
8971ab5b82fSPiotr Sarna 			d_tmpfile(dentry, inode);
8981ab5b82fSPiotr Sarna 		} else {
8991da177e4SLinus Torvalds 			d_instantiate(dentry, inode);
9001da177e4SLinus Torvalds 			dget(dentry);/* Extra count - pin the dentry in core */
9011ab5b82fSPiotr Sarna 		}
9021da177e4SLinus Torvalds 		error = 0;
9031da177e4SLinus Torvalds 	}
9041da177e4SLinus Torvalds 	return error;
9051da177e4SLinus Torvalds }
9061da177e4SLinus Torvalds 
907549c7297SChristian Brauner static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
9081ab5b82fSPiotr Sarna 			   struct dentry *dentry, umode_t mode, dev_t dev)
9091ab5b82fSPiotr Sarna {
9101ab5b82fSPiotr Sarna 	return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
9111ab5b82fSPiotr Sarna }
9121ab5b82fSPiotr Sarna 
913549c7297SChristian Brauner static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
914549c7297SChristian Brauner 			   struct dentry *dentry, umode_t mode)
9151da177e4SLinus Torvalds {
916549c7297SChristian Brauner 	int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
917549c7297SChristian Brauner 				     mode | S_IFDIR, 0);
9181da177e4SLinus Torvalds 	if (!retval)
919d8c76e6fSDave Hansen 		inc_nlink(dir);
9201da177e4SLinus Torvalds 	return retval;
9211da177e4SLinus Torvalds }
9221da177e4SLinus Torvalds 
923549c7297SChristian Brauner static int hugetlbfs_create(struct user_namespace *mnt_userns,
924549c7297SChristian Brauner 			    struct inode *dir, struct dentry *dentry,
925549c7297SChristian Brauner 			    umode_t mode, bool excl)
9261da177e4SLinus Torvalds {
927549c7297SChristian Brauner 	return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
9281da177e4SLinus Torvalds }
9291da177e4SLinus Torvalds 
930549c7297SChristian Brauner static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
931549c7297SChristian Brauner 			     struct inode *dir, struct dentry *dentry,
932549c7297SChristian Brauner 			     umode_t mode)
9331ab5b82fSPiotr Sarna {
9341ab5b82fSPiotr Sarna 	return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
9351ab5b82fSPiotr Sarna }
9361ab5b82fSPiotr Sarna 
937549c7297SChristian Brauner static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
938549c7297SChristian Brauner 			     struct inode *dir, struct dentry *dentry,
939549c7297SChristian Brauner 			     const char *symname)
9401da177e4SLinus Torvalds {
9411da177e4SLinus Torvalds 	struct inode *inode;
9421da177e4SLinus Torvalds 	int error = -ENOSPC;
9431da177e4SLinus Torvalds 
9447d54fa64SAl Viro 	inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
9451da177e4SLinus Torvalds 	if (inode) {
9461da177e4SLinus Torvalds 		int l = strlen(symname)+1;
9471da177e4SLinus Torvalds 		error = page_symlink(inode, symname, l);
9481da177e4SLinus Torvalds 		if (!error) {
9491da177e4SLinus Torvalds 			d_instantiate(dentry, inode);
9501da177e4SLinus Torvalds 			dget(dentry);
9511da177e4SLinus Torvalds 		} else
9521da177e4SLinus Torvalds 			iput(inode);
9531da177e4SLinus Torvalds 	}
954078cd827SDeepa Dinamani 	dir->i_ctime = dir->i_mtime = current_time(dir);
9551da177e4SLinus Torvalds 
9561da177e4SLinus Torvalds 	return error;
9571da177e4SLinus Torvalds }
9581da177e4SLinus Torvalds 
959290408d4SNaoya Horiguchi static int hugetlbfs_migrate_page(struct address_space *mapping,
960b969c4abSMel Gorman 				struct page *newpage, struct page *page,
961a6bc32b8SMel Gorman 				enum migrate_mode mode)
962290408d4SNaoya Horiguchi {
963290408d4SNaoya Horiguchi 	int rc;
964290408d4SNaoya Horiguchi 
965290408d4SNaoya Horiguchi 	rc = migrate_huge_page_move_mapping(mapping, newpage, page);
96678bd5209SRafael Aquini 	if (rc != MIGRATEPAGE_SUCCESS)
967290408d4SNaoya Horiguchi 		return rc;
968cb6acd01SMike Kravetz 
969d6995da3SMike Kravetz 	if (hugetlb_page_subpool(page)) {
970d6995da3SMike Kravetz 		hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
971d6995da3SMike Kravetz 		hugetlb_set_page_subpool(page, NULL);
972cb6acd01SMike Kravetz 	}
973cb6acd01SMike Kravetz 
9742916ecc0SJérôme Glisse 	if (mode != MIGRATE_SYNC_NO_COPY)
975290408d4SNaoya Horiguchi 		migrate_page_copy(newpage, page);
9762916ecc0SJérôme Glisse 	else
9772916ecc0SJérôme Glisse 		migrate_page_states(newpage, page);
978290408d4SNaoya Horiguchi 
97978bd5209SRafael Aquini 	return MIGRATEPAGE_SUCCESS;
980290408d4SNaoya Horiguchi }
981290408d4SNaoya Horiguchi 
98278bb9203SNaoya Horiguchi static int hugetlbfs_error_remove_page(struct address_space *mapping,
98378bb9203SNaoya Horiguchi 				struct page *page)
98478bb9203SNaoya Horiguchi {
98578bb9203SNaoya Horiguchi 	struct inode *inode = mapping->host;
986ab615a5bSMike Kravetz 	pgoff_t index = page->index;
98778bb9203SNaoya Horiguchi 
98878bb9203SNaoya Horiguchi 	remove_huge_page(page);
989ab615a5bSMike Kravetz 	if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
99078bb9203SNaoya Horiguchi 		hugetlb_fix_reserve_counts(inode);
991ab615a5bSMike Kravetz 
99278bb9203SNaoya Horiguchi 	return 0;
99378bb9203SNaoya Horiguchi }
99478bb9203SNaoya Horiguchi 
9954a25220dSDavid Howells /*
9964a25220dSDavid Howells  * Display the mount options in /proc/mounts.
9974a25220dSDavid Howells  */
9984a25220dSDavid Howells static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
9994a25220dSDavid Howells {
10004a25220dSDavid Howells 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
10014a25220dSDavid Howells 	struct hugepage_subpool *spool = sbinfo->spool;
10024a25220dSDavid Howells 	unsigned long hpage_size = huge_page_size(sbinfo->hstate);
10034a25220dSDavid Howells 	unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
10044a25220dSDavid Howells 	char mod;
10054a25220dSDavid Howells 
10064a25220dSDavid Howells 	if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
10074a25220dSDavid Howells 		seq_printf(m, ",uid=%u",
10084a25220dSDavid Howells 			   from_kuid_munged(&init_user_ns, sbinfo->uid));
10094a25220dSDavid Howells 	if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
10104a25220dSDavid Howells 		seq_printf(m, ",gid=%u",
10114a25220dSDavid Howells 			   from_kgid_munged(&init_user_ns, sbinfo->gid));
10124a25220dSDavid Howells 	if (sbinfo->mode != 0755)
10134a25220dSDavid Howells 		seq_printf(m, ",mode=%o", sbinfo->mode);
10144a25220dSDavid Howells 	if (sbinfo->max_inodes != -1)
10154a25220dSDavid Howells 		seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
10164a25220dSDavid Howells 
10174a25220dSDavid Howells 	hpage_size /= 1024;
10184a25220dSDavid Howells 	mod = 'K';
10194a25220dSDavid Howells 	if (hpage_size >= 1024) {
10204a25220dSDavid Howells 		hpage_size /= 1024;
10214a25220dSDavid Howells 		mod = 'M';
10224a25220dSDavid Howells 	}
10234a25220dSDavid Howells 	seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
10244a25220dSDavid Howells 	if (spool) {
10254a25220dSDavid Howells 		if (spool->max_hpages != -1)
10264a25220dSDavid Howells 			seq_printf(m, ",size=%llu",
10274a25220dSDavid Howells 				   (unsigned long long)spool->max_hpages << hpage_shift);
10284a25220dSDavid Howells 		if (spool->min_hpages != -1)
10294a25220dSDavid Howells 			seq_printf(m, ",min_size=%llu",
10304a25220dSDavid Howells 				   (unsigned long long)spool->min_hpages << hpage_shift);
10314a25220dSDavid Howells 	}
10324a25220dSDavid Howells 	return 0;
10334a25220dSDavid Howells }
10344a25220dSDavid Howells 
1035726c3342SDavid Howells static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
10361da177e4SLinus Torvalds {
1037726c3342SDavid Howells 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
10382b0143b5SDavid Howells 	struct hstate *h = hstate_inode(d_inode(dentry));
10391da177e4SLinus Torvalds 
10401da177e4SLinus Torvalds 	buf->f_type = HUGETLBFS_MAGIC;
1041a5516438SAndi Kleen 	buf->f_bsize = huge_page_size(h);
10421da177e4SLinus Torvalds 	if (sbinfo) {
10431da177e4SLinus Torvalds 		spin_lock(&sbinfo->stat_lock);
104474a8a65cSDavid Gibson 		/* If no limits set, just report 0 for max/free/used
104574a8a65cSDavid Gibson 		 * blocks, like simple_statfs() */
104690481622SDavid Gibson 		if (sbinfo->spool) {
104790481622SDavid Gibson 			long free_pages;
104890481622SDavid Gibson 
104990481622SDavid Gibson 			spin_lock(&sbinfo->spool->lock);
105090481622SDavid Gibson 			buf->f_blocks = sbinfo->spool->max_hpages;
105190481622SDavid Gibson 			free_pages = sbinfo->spool->max_hpages
105290481622SDavid Gibson 				- sbinfo->spool->used_hpages;
105390481622SDavid Gibson 			buf->f_bavail = buf->f_bfree = free_pages;
105490481622SDavid Gibson 			spin_unlock(&sbinfo->spool->lock);
10551da177e4SLinus Torvalds 			buf->f_files = sbinfo->max_inodes;
10561da177e4SLinus Torvalds 			buf->f_ffree = sbinfo->free_inodes;
105774a8a65cSDavid Gibson 		}
10581da177e4SLinus Torvalds 		spin_unlock(&sbinfo->stat_lock);
10591da177e4SLinus Torvalds 	}
10601da177e4SLinus Torvalds 	buf->f_namelen = NAME_MAX;
10611da177e4SLinus Torvalds 	return 0;
10621da177e4SLinus Torvalds }
10631da177e4SLinus Torvalds 
10641da177e4SLinus Torvalds static void hugetlbfs_put_super(struct super_block *sb)
10651da177e4SLinus Torvalds {
10661da177e4SLinus Torvalds 	struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
10671da177e4SLinus Torvalds 
10681da177e4SLinus Torvalds 	if (sbi) {
10691da177e4SLinus Torvalds 		sb->s_fs_info = NULL;
107090481622SDavid Gibson 
107190481622SDavid Gibson 		if (sbi->spool)
107290481622SDavid Gibson 			hugepage_put_subpool(sbi->spool);
107390481622SDavid Gibson 
10741da177e4SLinus Torvalds 		kfree(sbi);
10751da177e4SLinus Torvalds 	}
10761da177e4SLinus Torvalds }
10771da177e4SLinus Torvalds 
107896527980SChristoph Hellwig static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
107996527980SChristoph Hellwig {
108096527980SChristoph Hellwig 	if (sbinfo->free_inodes >= 0) {
108196527980SChristoph Hellwig 		spin_lock(&sbinfo->stat_lock);
108296527980SChristoph Hellwig 		if (unlikely(!sbinfo->free_inodes)) {
108396527980SChristoph Hellwig 			spin_unlock(&sbinfo->stat_lock);
108496527980SChristoph Hellwig 			return 0;
108596527980SChristoph Hellwig 		}
108696527980SChristoph Hellwig 		sbinfo->free_inodes--;
108796527980SChristoph Hellwig 		spin_unlock(&sbinfo->stat_lock);
108896527980SChristoph Hellwig 	}
108996527980SChristoph Hellwig 
109096527980SChristoph Hellwig 	return 1;
109196527980SChristoph Hellwig }
109296527980SChristoph Hellwig 
109396527980SChristoph Hellwig static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
109496527980SChristoph Hellwig {
109596527980SChristoph Hellwig 	if (sbinfo->free_inodes >= 0) {
109696527980SChristoph Hellwig 		spin_lock(&sbinfo->stat_lock);
109796527980SChristoph Hellwig 		sbinfo->free_inodes++;
109896527980SChristoph Hellwig 		spin_unlock(&sbinfo->stat_lock);
109996527980SChristoph Hellwig 	}
110096527980SChristoph Hellwig }
110196527980SChristoph Hellwig 
110296527980SChristoph Hellwig 
1103e18b890bSChristoph Lameter static struct kmem_cache *hugetlbfs_inode_cachep;
11041da177e4SLinus Torvalds 
11051da177e4SLinus Torvalds static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
11061da177e4SLinus Torvalds {
110796527980SChristoph Hellwig 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
11081da177e4SLinus Torvalds 	struct hugetlbfs_inode_info *p;
11091da177e4SLinus Torvalds 
111096527980SChristoph Hellwig 	if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
11111da177e4SLinus Torvalds 		return NULL;
1112e94b1766SChristoph Lameter 	p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
111396527980SChristoph Hellwig 	if (unlikely(!p)) {
111496527980SChristoph Hellwig 		hugetlbfs_inc_free_inodes(sbinfo);
111596527980SChristoph Hellwig 		return NULL;
11161da177e4SLinus Torvalds 	}
11174742a35dSMike Kravetz 
11184742a35dSMike Kravetz 	/*
11194742a35dSMike Kravetz 	 * Any time after allocation, hugetlbfs_destroy_inode can be called
11204742a35dSMike Kravetz 	 * for the inode.  mpol_free_shared_policy is unconditionally called
11214742a35dSMike Kravetz 	 * as part of hugetlbfs_destroy_inode.  So, initialize policy here
11224742a35dSMike Kravetz 	 * in case of a quick call to destroy.
11234742a35dSMike Kravetz 	 *
11244742a35dSMike Kravetz 	 * Note that the policy is initialized even if we are creating a
11254742a35dSMike Kravetz 	 * private inode.  This simplifies hugetlbfs_destroy_inode.
11264742a35dSMike Kravetz 	 */
11274742a35dSMike Kravetz 	mpol_shared_policy_init(&p->policy, NULL);
11284742a35dSMike Kravetz 
112996527980SChristoph Hellwig 	return &p->vfs_inode;
11301da177e4SLinus Torvalds }
11311da177e4SLinus Torvalds 
1132b62de322SAl Viro static void hugetlbfs_free_inode(struct inode *inode)
1133fa0d7e3dSNick Piggin {
1134fa0d7e3dSNick Piggin 	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1135fa0d7e3dSNick Piggin }
1136fa0d7e3dSNick Piggin 
11371da177e4SLinus Torvalds static void hugetlbfs_destroy_inode(struct inode *inode)
11381da177e4SLinus Torvalds {
113996527980SChristoph Hellwig 	hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
11401da177e4SLinus Torvalds 	mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
11411da177e4SLinus Torvalds }
11421da177e4SLinus Torvalds 
1143f5e54d6eSChristoph Hellwig static const struct address_space_operations hugetlbfs_aops = {
1144800d15a5SNick Piggin 	.write_begin	= hugetlbfs_write_begin,
1145800d15a5SNick Piggin 	.write_end	= hugetlbfs_write_end,
1146a4fa34cdSMike Kravetz 	.set_page_dirty	=  __set_page_dirty_no_writeback,
1147290408d4SNaoya Horiguchi 	.migratepage    = hugetlbfs_migrate_page,
114878bb9203SNaoya Horiguchi 	.error_remove_page	= hugetlbfs_error_remove_page,
11491da177e4SLinus Torvalds };
11501da177e4SLinus Torvalds 
115196527980SChristoph Hellwig 
115251cc5068SAlexey Dobriyan static void init_once(void *foo)
115396527980SChristoph Hellwig {
115496527980SChristoph Hellwig 	struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
115596527980SChristoph Hellwig 
115696527980SChristoph Hellwig 	inode_init_once(&ei->vfs_inode);
115796527980SChristoph Hellwig }
115896527980SChristoph Hellwig 
11594b6f5d20SArjan van de Ven const struct file_operations hugetlbfs_file_operations = {
116034d0640eSAl Viro 	.read_iter		= hugetlbfs_read_iter,
11611da177e4SLinus Torvalds 	.mmap			= hugetlbfs_file_mmap,
11621b061d92SChristoph Hellwig 	.fsync			= noop_fsync,
11631da177e4SLinus Torvalds 	.get_unmapped_area	= hugetlb_get_unmapped_area,
11646038f373SArnd Bergmann 	.llseek			= default_llseek,
116570c3547eSMike Kravetz 	.fallocate		= hugetlbfs_fallocate,
11661da177e4SLinus Torvalds };
11671da177e4SLinus Torvalds 
116892e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_dir_inode_operations = {
11691da177e4SLinus Torvalds 	.create		= hugetlbfs_create,
11701da177e4SLinus Torvalds 	.lookup		= simple_lookup,
11711da177e4SLinus Torvalds 	.link		= simple_link,
11721da177e4SLinus Torvalds 	.unlink		= simple_unlink,
11731da177e4SLinus Torvalds 	.symlink	= hugetlbfs_symlink,
11741da177e4SLinus Torvalds 	.mkdir		= hugetlbfs_mkdir,
11751da177e4SLinus Torvalds 	.rmdir		= simple_rmdir,
11761da177e4SLinus Torvalds 	.mknod		= hugetlbfs_mknod,
11771da177e4SLinus Torvalds 	.rename		= simple_rename,
11781da177e4SLinus Torvalds 	.setattr	= hugetlbfs_setattr,
11791ab5b82fSPiotr Sarna 	.tmpfile	= hugetlbfs_tmpfile,
11801da177e4SLinus Torvalds };
11811da177e4SLinus Torvalds 
118292e1d5beSArjan van de Ven static const struct inode_operations hugetlbfs_inode_operations = {
11831da177e4SLinus Torvalds 	.setattr	= hugetlbfs_setattr,
11841da177e4SLinus Torvalds };
11851da177e4SLinus Torvalds 
1186ee9b6d61SJosef 'Jeff' Sipek static const struct super_operations hugetlbfs_ops = {
11871da177e4SLinus Torvalds 	.alloc_inode    = hugetlbfs_alloc_inode,
1188b62de322SAl Viro 	.free_inode     = hugetlbfs_free_inode,
11891da177e4SLinus Torvalds 	.destroy_inode  = hugetlbfs_destroy_inode,
11902bbbda30SAl Viro 	.evict_inode	= hugetlbfs_evict_inode,
11911da177e4SLinus Torvalds 	.statfs		= hugetlbfs_statfs,
11921da177e4SLinus Torvalds 	.put_super	= hugetlbfs_put_super,
11934a25220dSDavid Howells 	.show_options	= hugetlbfs_show_options,
11941da177e4SLinus Torvalds };
11951da177e4SLinus Torvalds 
11967ca02d0aSMike Kravetz /*
11977ca02d0aSMike Kravetz  * Convert size option passed from command line to number of huge pages
11987ca02d0aSMike Kravetz  * in the pool specified by hstate.  Size option could be in bytes
11997ca02d0aSMike Kravetz  * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
12007ca02d0aSMike Kravetz  */
12014a25220dSDavid Howells static long
12027ca02d0aSMike Kravetz hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
12034a25220dSDavid Howells 			 enum hugetlbfs_size_type val_type)
12047ca02d0aSMike Kravetz {
12057ca02d0aSMike Kravetz 	if (val_type == NO_SIZE)
12067ca02d0aSMike Kravetz 		return -1;
12077ca02d0aSMike Kravetz 
12087ca02d0aSMike Kravetz 	if (val_type == SIZE_PERCENT) {
12097ca02d0aSMike Kravetz 		size_opt <<= huge_page_shift(h);
12107ca02d0aSMike Kravetz 		size_opt *= h->max_huge_pages;
12117ca02d0aSMike Kravetz 		do_div(size_opt, 100);
12127ca02d0aSMike Kravetz 	}
12137ca02d0aSMike Kravetz 
12147ca02d0aSMike Kravetz 	size_opt >>= huge_page_shift(h);
12157ca02d0aSMike Kravetz 	return size_opt;
12167ca02d0aSMike Kravetz }
12177ca02d0aSMike Kravetz 
121832021982SDavid Howells /*
121932021982SDavid Howells  * Parse one mount parameter.
122032021982SDavid Howells  */
122132021982SDavid Howells static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
12221da177e4SLinus Torvalds {
122332021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
122432021982SDavid Howells 	struct fs_parse_result result;
122532021982SDavid Howells 	char *rest;
122632021982SDavid Howells 	unsigned long ps;
122732021982SDavid Howells 	int opt;
12281da177e4SLinus Torvalds 
1229d7167b14SAl Viro 	opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
123032021982SDavid Howells 	if (opt < 0)
123132021982SDavid Howells 		return opt;
123232021982SDavid Howells 
123332021982SDavid Howells 	switch (opt) {
123432021982SDavid Howells 	case Opt_uid:
123532021982SDavid Howells 		ctx->uid = make_kuid(current_user_ns(), result.uint_32);
123632021982SDavid Howells 		if (!uid_valid(ctx->uid))
123732021982SDavid Howells 			goto bad_val;
12381da177e4SLinus Torvalds 		return 0;
12391da177e4SLinus Torvalds 
1240e73a75faSRandy Dunlap 	case Opt_gid:
124132021982SDavid Howells 		ctx->gid = make_kgid(current_user_ns(), result.uint_32);
124232021982SDavid Howells 		if (!gid_valid(ctx->gid))
1243e73a75faSRandy Dunlap 			goto bad_val;
124432021982SDavid Howells 		return 0;
1245e73a75faSRandy Dunlap 
1246e73a75faSRandy Dunlap 	case Opt_mode:
124732021982SDavid Howells 		ctx->mode = result.uint_32 & 01777U;
124832021982SDavid Howells 		return 0;
1249e73a75faSRandy Dunlap 
125032021982SDavid Howells 	case Opt_size:
1251e73a75faSRandy Dunlap 		/* memparse() will accept a K/M/G without a digit */
125232021982SDavid Howells 		if (!isdigit(param->string[0]))
1253e73a75faSRandy Dunlap 			goto bad_val;
125432021982SDavid Howells 		ctx->max_size_opt = memparse(param->string, &rest);
125532021982SDavid Howells 		ctx->max_val_type = SIZE_STD;
1256a137e1ccSAndi Kleen 		if (*rest == '%')
125732021982SDavid Howells 			ctx->max_val_type = SIZE_PERCENT;
125832021982SDavid Howells 		return 0;
12591da177e4SLinus Torvalds 
1260e73a75faSRandy Dunlap 	case Opt_nr_inodes:
1261e73a75faSRandy Dunlap 		/* memparse() will accept a K/M/G without a digit */
126232021982SDavid Howells 		if (!isdigit(param->string[0]))
1263e73a75faSRandy Dunlap 			goto bad_val;
126432021982SDavid Howells 		ctx->nr_inodes = memparse(param->string, &rest);
126532021982SDavid Howells 		return 0;
1266e73a75faSRandy Dunlap 
126732021982SDavid Howells 	case Opt_pagesize:
126832021982SDavid Howells 		ps = memparse(param->string, &rest);
126932021982SDavid Howells 		ctx->hstate = size_to_hstate(ps);
127032021982SDavid Howells 		if (!ctx->hstate) {
127132021982SDavid Howells 			pr_err("Unsupported page size %lu MB\n", ps >> 20);
1272a137e1ccSAndi Kleen 			return -EINVAL;
1273a137e1ccSAndi Kleen 		}
127432021982SDavid Howells 		return 0;
1275a137e1ccSAndi Kleen 
127632021982SDavid Howells 	case Opt_min_size:
12777ca02d0aSMike Kravetz 		/* memparse() will accept a K/M/G without a digit */
127832021982SDavid Howells 		if (!isdigit(param->string[0]))
12797ca02d0aSMike Kravetz 			goto bad_val;
128032021982SDavid Howells 		ctx->min_size_opt = memparse(param->string, &rest);
128132021982SDavid Howells 		ctx->min_val_type = SIZE_STD;
12827ca02d0aSMike Kravetz 		if (*rest == '%')
128332021982SDavid Howells 			ctx->min_val_type = SIZE_PERCENT;
128432021982SDavid Howells 		return 0;
12857ca02d0aSMike Kravetz 
1286e73a75faSRandy Dunlap 	default:
1287b4c07bceSLee Schermerhorn 		return -EINVAL;
1288e73a75faSRandy Dunlap 	}
128932021982SDavid Howells 
129032021982SDavid Howells bad_val:
1291b5db30cfSAl Viro 	return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
129232021982SDavid Howells 		      param->string, param->key);
12931da177e4SLinus Torvalds }
1294a137e1ccSAndi Kleen 
12957ca02d0aSMike Kravetz /*
129632021982SDavid Howells  * Validate the parsed options.
129732021982SDavid Howells  */
129832021982SDavid Howells static int hugetlbfs_validate(struct fs_context *fc)
129932021982SDavid Howells {
130032021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
130132021982SDavid Howells 
130232021982SDavid Howells 	/*
13037ca02d0aSMike Kravetz 	 * Use huge page pool size (in hstate) to convert the size
13047ca02d0aSMike Kravetz 	 * options to number of huge pages.  If NO_SIZE, -1 is returned.
13057ca02d0aSMike Kravetz 	 */
130632021982SDavid Howells 	ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
130732021982SDavid Howells 						   ctx->max_size_opt,
130832021982SDavid Howells 						   ctx->max_val_type);
130932021982SDavid Howells 	ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
131032021982SDavid Howells 						   ctx->min_size_opt,
131132021982SDavid Howells 						   ctx->min_val_type);
13127ca02d0aSMike Kravetz 
13137ca02d0aSMike Kravetz 	/*
13147ca02d0aSMike Kravetz 	 * If max_size was specified, then min_size must be smaller
13157ca02d0aSMike Kravetz 	 */
131632021982SDavid Howells 	if (ctx->max_val_type > NO_SIZE &&
131732021982SDavid Howells 	    ctx->min_hpages > ctx->max_hpages) {
131832021982SDavid Howells 		pr_err("Minimum size can not be greater than maximum size\n");
13197ca02d0aSMike Kravetz 		return -EINVAL;
1320a137e1ccSAndi Kleen 	}
1321a137e1ccSAndi Kleen 
13221da177e4SLinus Torvalds 	return 0;
13231da177e4SLinus Torvalds }
13241da177e4SLinus Torvalds 
13251da177e4SLinus Torvalds static int
132632021982SDavid Howells hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
13271da177e4SLinus Torvalds {
132832021982SDavid Howells 	struct hugetlbfs_fs_context *ctx = fc->fs_private;
13291da177e4SLinus Torvalds 	struct hugetlbfs_sb_info *sbinfo;
13301da177e4SLinus Torvalds 
13311da177e4SLinus Torvalds 	sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
13321da177e4SLinus Torvalds 	if (!sbinfo)
13331da177e4SLinus Torvalds 		return -ENOMEM;
13341da177e4SLinus Torvalds 	sb->s_fs_info = sbinfo;
13351da177e4SLinus Torvalds 	spin_lock_init(&sbinfo->stat_lock);
133632021982SDavid Howells 	sbinfo->hstate		= ctx->hstate;
133732021982SDavid Howells 	sbinfo->max_inodes	= ctx->nr_inodes;
133832021982SDavid Howells 	sbinfo->free_inodes	= ctx->nr_inodes;
133990481622SDavid Gibson 	sbinfo->spool		= NULL;
134032021982SDavid Howells 	sbinfo->uid		= ctx->uid;
134132021982SDavid Howells 	sbinfo->gid		= ctx->gid;
134232021982SDavid Howells 	sbinfo->mode		= ctx->mode;
13434a25220dSDavid Howells 
13447ca02d0aSMike Kravetz 	/*
13457ca02d0aSMike Kravetz 	 * Allocate and initialize subpool if maximum or minimum size is
1346*1935ebd3SMiaohe Lin 	 * specified.  Any needed reservations (for minimum size) are taken
13477ca02d0aSMike Kravetz 	 * taken when the subpool is created.
13487ca02d0aSMike Kravetz 	 */
134932021982SDavid Howells 	if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
135032021982SDavid Howells 		sbinfo->spool = hugepage_new_subpool(ctx->hstate,
135132021982SDavid Howells 						     ctx->max_hpages,
135232021982SDavid Howells 						     ctx->min_hpages);
135390481622SDavid Gibson 		if (!sbinfo->spool)
135490481622SDavid Gibson 			goto out_free;
135590481622SDavid Gibson 	}
13561da177e4SLinus Torvalds 	sb->s_maxbytes = MAX_LFS_FILESIZE;
135732021982SDavid Howells 	sb->s_blocksize = huge_page_size(ctx->hstate);
135832021982SDavid Howells 	sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
13591da177e4SLinus Torvalds 	sb->s_magic = HUGETLBFS_MAGIC;
13601da177e4SLinus Torvalds 	sb->s_op = &hugetlbfs_ops;
13611da177e4SLinus Torvalds 	sb->s_time_gran = 1;
136215568299SMike Kravetz 
136315568299SMike Kravetz 	/*
136415568299SMike Kravetz 	 * Due to the special and limited functionality of hugetlbfs, it does
136515568299SMike Kravetz 	 * not work well as a stacking filesystem.
136615568299SMike Kravetz 	 */
136715568299SMike Kravetz 	sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
136832021982SDavid Howells 	sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
136948fde701SAl Viro 	if (!sb->s_root)
13701da177e4SLinus Torvalds 		goto out_free;
13711da177e4SLinus Torvalds 	return 0;
13721da177e4SLinus Torvalds out_free:
137390481622SDavid Gibson 	kfree(sbinfo->spool);
13741da177e4SLinus Torvalds 	kfree(sbinfo);
13751da177e4SLinus Torvalds 	return -ENOMEM;
13761da177e4SLinus Torvalds }
13771da177e4SLinus Torvalds 
137832021982SDavid Howells static int hugetlbfs_get_tree(struct fs_context *fc)
13791da177e4SLinus Torvalds {
138032021982SDavid Howells 	int err = hugetlbfs_validate(fc);
138132021982SDavid Howells 	if (err)
138232021982SDavid Howells 		return err;
13832ac295d4SAl Viro 	return get_tree_nodev(fc, hugetlbfs_fill_super);
138432021982SDavid Howells }
138532021982SDavid Howells 
138632021982SDavid Howells static void hugetlbfs_fs_context_free(struct fs_context *fc)
138732021982SDavid Howells {
138832021982SDavid Howells 	kfree(fc->fs_private);
138932021982SDavid Howells }
139032021982SDavid Howells 
139132021982SDavid Howells static const struct fs_context_operations hugetlbfs_fs_context_ops = {
139232021982SDavid Howells 	.free		= hugetlbfs_fs_context_free,
139332021982SDavid Howells 	.parse_param	= hugetlbfs_parse_param,
139432021982SDavid Howells 	.get_tree	= hugetlbfs_get_tree,
139532021982SDavid Howells };
139632021982SDavid Howells 
139732021982SDavid Howells static int hugetlbfs_init_fs_context(struct fs_context *fc)
139832021982SDavid Howells {
139932021982SDavid Howells 	struct hugetlbfs_fs_context *ctx;
140032021982SDavid Howells 
140132021982SDavid Howells 	ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
140232021982SDavid Howells 	if (!ctx)
140332021982SDavid Howells 		return -ENOMEM;
140432021982SDavid Howells 
140532021982SDavid Howells 	ctx->max_hpages	= -1; /* No limit on size by default */
140632021982SDavid Howells 	ctx->nr_inodes	= -1; /* No limit on number of inodes by default */
140732021982SDavid Howells 	ctx->uid	= current_fsuid();
140832021982SDavid Howells 	ctx->gid	= current_fsgid();
140932021982SDavid Howells 	ctx->mode	= 0755;
141032021982SDavid Howells 	ctx->hstate	= &default_hstate;
141132021982SDavid Howells 	ctx->min_hpages	= -1; /* No default minimum size */
141232021982SDavid Howells 	ctx->max_val_type = NO_SIZE;
141332021982SDavid Howells 	ctx->min_val_type = NO_SIZE;
141432021982SDavid Howells 	fc->fs_private = ctx;
141532021982SDavid Howells 	fc->ops	= &hugetlbfs_fs_context_ops;
141632021982SDavid Howells 	return 0;
14171da177e4SLinus Torvalds }
14181da177e4SLinus Torvalds 
14191da177e4SLinus Torvalds static struct file_system_type hugetlbfs_fs_type = {
14201da177e4SLinus Torvalds 	.name			= "hugetlbfs",
142132021982SDavid Howells 	.init_fs_context	= hugetlbfs_init_fs_context,
1422d7167b14SAl Viro 	.parameters		= hugetlb_fs_parameters,
14231da177e4SLinus Torvalds 	.kill_sb		= kill_litter_super,
14241da177e4SLinus Torvalds };
14251da177e4SLinus Torvalds 
142642d7395fSAndi Kleen static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
14271da177e4SLinus Torvalds 
1428ef1ff6b8SFrom: Mel Gorman static int can_do_hugetlb_shm(void)
14291da177e4SLinus Torvalds {
1430a0eb3a05SEric W. Biederman 	kgid_t shm_group;
1431a0eb3a05SEric W. Biederman 	shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1432a0eb3a05SEric W. Biederman 	return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
14331da177e4SLinus Torvalds }
14341da177e4SLinus Torvalds 
143542d7395fSAndi Kleen static int get_hstate_idx(int page_size_log)
143642d7395fSAndi Kleen {
1437af73e4d9SNaoya Horiguchi 	struct hstate *h = hstate_sizelog(page_size_log);
143842d7395fSAndi Kleen 
143942d7395fSAndi Kleen 	if (!h)
144042d7395fSAndi Kleen 		return -1;
144142d7395fSAndi Kleen 	return h - hstates;
144242d7395fSAndi Kleen }
144342d7395fSAndi Kleen 
1444af73e4d9SNaoya Horiguchi /*
1445af73e4d9SNaoya Horiguchi  * Note that size should be aligned to proper hugepage size in caller side,
1446af73e4d9SNaoya Horiguchi  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1447af73e4d9SNaoya Horiguchi  */
1448af73e4d9SNaoya Horiguchi struct file *hugetlb_file_setup(const char *name, size_t size,
1449af73e4d9SNaoya Horiguchi 				vm_flags_t acctflag, struct user_struct **user,
145042d7395fSAndi Kleen 				int creat_flags, int page_size_log)
14511da177e4SLinus Torvalds {
14521da177e4SLinus Torvalds 	struct inode *inode;
1453e68375c8SAl Viro 	struct vfsmount *mnt;
145442d7395fSAndi Kleen 	int hstate_idx;
1455e68375c8SAl Viro 	struct file *file;
145642d7395fSAndi Kleen 
145742d7395fSAndi Kleen 	hstate_idx = get_hstate_idx(page_size_log);
145842d7395fSAndi Kleen 	if (hstate_idx < 0)
145942d7395fSAndi Kleen 		return ERR_PTR(-ENODEV);
14601da177e4SLinus Torvalds 
1461353d5c30SHugh Dickins 	*user = NULL;
1462e68375c8SAl Viro 	mnt = hugetlbfs_vfsmount[hstate_idx];
1463e68375c8SAl Viro 	if (!mnt)
14645bc98594SAkinobu Mita 		return ERR_PTR(-ENOENT);
14655bc98594SAkinobu Mita 
1466ef1ff6b8SFrom: Mel Gorman 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1467353d5c30SHugh Dickins 		*user = current_user();
1468353d5c30SHugh Dickins 		if (user_shm_lock(size, *user)) {
146921a3c273SDavid Rientjes 			task_lock(current);
14709b857d26SAndrew Morton 			pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
147121a3c273SDavid Rientjes 				current->comm, current->pid);
147221a3c273SDavid Rientjes 			task_unlock(current);
1473353d5c30SHugh Dickins 		} else {
1474353d5c30SHugh Dickins 			*user = NULL;
14751da177e4SLinus Torvalds 			return ERR_PTR(-EPERM);
14762584e517SRavikiran G Thirumalai 		}
1477353d5c30SHugh Dickins 	}
14781da177e4SLinus Torvalds 
147939b65252SAnatol Pomozov 	file = ERR_PTR(-ENOSPC);
1480e68375c8SAl Viro 	inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
14811da177e4SLinus Torvalds 	if (!inode)
1482e68375c8SAl Viro 		goto out;
1483e1832f29SStephen Smalley 	if (creat_flags == HUGETLB_SHMFS_INODE)
1484e1832f29SStephen Smalley 		inode->i_flags |= S_PRIVATE;
14851da177e4SLinus Torvalds 
14861da177e4SLinus Torvalds 	inode->i_size = size;
14876d6b77f1SMiklos Szeredi 	clear_nlink(inode);
1488ce8d2cdfSDave Hansen 
148933b8f84aSMike Kravetz 	if (!hugetlb_reserve_pages(inode, 0,
1490e68375c8SAl Viro 			size >> huge_page_shift(hstate_inode(inode)), NULL,
1491e68375c8SAl Viro 			acctflag))
1492e68375c8SAl Viro 		file = ERR_PTR(-ENOMEM);
1493e68375c8SAl Viro 	else
1494e68375c8SAl Viro 		file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1495ce8d2cdfSDave Hansen 					&hugetlbfs_file_operations);
1496e68375c8SAl Viro 	if (!IS_ERR(file))
14971da177e4SLinus Torvalds 		return file;
14981da177e4SLinus Torvalds 
1499b45b5bd6SDavid Gibson 	iput(inode);
1500e68375c8SAl Viro out:
1501353d5c30SHugh Dickins 	if (*user) {
1502353d5c30SHugh Dickins 		user_shm_unlock(size, *user);
1503353d5c30SHugh Dickins 		*user = NULL;
1504353d5c30SHugh Dickins 	}
150539b65252SAnatol Pomozov 	return file;
15061da177e4SLinus Torvalds }
15071da177e4SLinus Torvalds 
150832021982SDavid Howells static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
150932021982SDavid Howells {
151032021982SDavid Howells 	struct fs_context *fc;
151132021982SDavid Howells 	struct vfsmount *mnt;
151232021982SDavid Howells 
151332021982SDavid Howells 	fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
151432021982SDavid Howells 	if (IS_ERR(fc)) {
151532021982SDavid Howells 		mnt = ERR_CAST(fc);
151632021982SDavid Howells 	} else {
151732021982SDavid Howells 		struct hugetlbfs_fs_context *ctx = fc->fs_private;
151832021982SDavid Howells 		ctx->hstate = h;
151932021982SDavid Howells 		mnt = fc_mount(fc);
152032021982SDavid Howells 		put_fs_context(fc);
152132021982SDavid Howells 	}
152232021982SDavid Howells 	if (IS_ERR(mnt))
1523a25fddceSMiaohe Lin 		pr_err("Cannot mount internal hugetlbfs for page size %luK",
1524a25fddceSMiaohe Lin 		       huge_page_size(h) >> 10);
152532021982SDavid Howells 	return mnt;
152632021982SDavid Howells }
152732021982SDavid Howells 
15281da177e4SLinus Torvalds static int __init init_hugetlbfs_fs(void)
15291da177e4SLinus Torvalds {
153032021982SDavid Howells 	struct vfsmount *mnt;
153142d7395fSAndi Kleen 	struct hstate *h;
15321da177e4SLinus Torvalds 	int error;
153342d7395fSAndi Kleen 	int i;
15341da177e4SLinus Torvalds 
1535457c1b27SNishanth Aravamudan 	if (!hugepages_supported()) {
15369b857d26SAndrew Morton 		pr_info("disabling because there are no supported hugepage sizes\n");
1537457c1b27SNishanth Aravamudan 		return -ENOTSUPP;
1538457c1b27SNishanth Aravamudan 	}
1539457c1b27SNishanth Aravamudan 
1540d1d5e05fSHillf Danton 	error = -ENOMEM;
15411da177e4SLinus Torvalds 	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
15421da177e4SLinus Torvalds 					sizeof(struct hugetlbfs_inode_info),
15435d097056SVladimir Davydov 					0, SLAB_ACCOUNT, init_once);
15441da177e4SLinus Torvalds 	if (hugetlbfs_inode_cachep == NULL)
15458fc312b3SMike Kravetz 		goto out;
15461da177e4SLinus Torvalds 
15471da177e4SLinus Torvalds 	error = register_filesystem(&hugetlbfs_fs_type);
15481da177e4SLinus Torvalds 	if (error)
15498fc312b3SMike Kravetz 		goto out_free;
15501da177e4SLinus Torvalds 
15518fc312b3SMike Kravetz 	/* default hstate mount is required */
15523b2275a8SMiaohe Lin 	mnt = mount_one_hugetlbfs(&default_hstate);
15538fc312b3SMike Kravetz 	if (IS_ERR(mnt)) {
15548fc312b3SMike Kravetz 		error = PTR_ERR(mnt);
15558fc312b3SMike Kravetz 		goto out_unreg;
15568fc312b3SMike Kravetz 	}
15578fc312b3SMike Kravetz 	hugetlbfs_vfsmount[default_hstate_idx] = mnt;
15588fc312b3SMike Kravetz 
15598fc312b3SMike Kravetz 	/* other hstates are optional */
156042d7395fSAndi Kleen 	i = 0;
156142d7395fSAndi Kleen 	for_each_hstate(h) {
156215f0ec94SJan Stancek 		if (i == default_hstate_idx) {
156315f0ec94SJan Stancek 			i++;
15648fc312b3SMike Kravetz 			continue;
156515f0ec94SJan Stancek 		}
15668fc312b3SMike Kravetz 
156732021982SDavid Howells 		mnt = mount_one_hugetlbfs(h);
15688fc312b3SMike Kravetz 		if (IS_ERR(mnt))
15698fc312b3SMike Kravetz 			hugetlbfs_vfsmount[i] = NULL;
15708fc312b3SMike Kravetz 		else
157132021982SDavid Howells 			hugetlbfs_vfsmount[i] = mnt;
157242d7395fSAndi Kleen 		i++;
157342d7395fSAndi Kleen 	}
157432021982SDavid Howells 
157542d7395fSAndi Kleen 	return 0;
15761da177e4SLinus Torvalds 
15778fc312b3SMike Kravetz  out_unreg:
15788fc312b3SMike Kravetz 	(void)unregister_filesystem(&hugetlbfs_fs_type);
15798fc312b3SMike Kravetz  out_free:
15801da177e4SLinus Torvalds 	kmem_cache_destroy(hugetlbfs_inode_cachep);
15818fc312b3SMike Kravetz  out:
15821da177e4SLinus Torvalds 	return error;
15831da177e4SLinus Torvalds }
15843e89e1c5SPaul Gortmaker fs_initcall(init_hugetlbfs_fs)
1585