xref: /openbmc/linux/fs/hugetlbfs/inode.c (revision cf028200)
1 /*
2  * hugetlbpage-backed filesystem.  Based on ramfs.
3  *
4  * William Irwin, 2002
5  *
6  * Copyright (C) 2002 Linus Torvalds.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/thread_info.h>
11 #include <asm/current.h>
12 #include <linux/sched.h>		/* remove ASAP */
13 #include <linux/fs.h>
14 #include <linux/mount.h>
15 #include <linux/file.h>
16 #include <linux/kernel.h>
17 #include <linux/writeback.h>
18 #include <linux/pagemap.h>
19 #include <linux/highmem.h>
20 #include <linux/init.h>
21 #include <linux/string.h>
22 #include <linux/capability.h>
23 #include <linux/ctype.h>
24 #include <linux/backing-dev.h>
25 #include <linux/hugetlb.h>
26 #include <linux/pagevec.h>
27 #include <linux/parser.h>
28 #include <linux/mman.h>
29 #include <linux/slab.h>
30 #include <linux/dnotify.h>
31 #include <linux/statfs.h>
32 #include <linux/security.h>
33 #include <linux/magic.h>
34 #include <linux/migrate.h>
35 
36 #include <asm/uaccess.h>
37 
38 static const struct super_operations hugetlbfs_ops;
39 static const struct address_space_operations hugetlbfs_aops;
40 const struct file_operations hugetlbfs_file_operations;
41 static const struct inode_operations hugetlbfs_dir_inode_operations;
42 static const struct inode_operations hugetlbfs_inode_operations;
43 
44 struct hugetlbfs_config {
45 	kuid_t   uid;
46 	kgid_t   gid;
47 	umode_t mode;
48 	long	nr_blocks;
49 	long	nr_inodes;
50 	struct hstate *hstate;
51 };
52 
53 struct hugetlbfs_inode_info {
54 	struct shared_policy policy;
55 	struct inode vfs_inode;
56 };
57 
58 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
59 {
60 	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
61 }
62 
63 static struct backing_dev_info hugetlbfs_backing_dev_info = {
64 	.name		= "hugetlbfs",
65 	.ra_pages	= 0,	/* No readahead */
66 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
67 };
68 
69 int sysctl_hugetlb_shm_group;
70 
71 enum {
72 	Opt_size, Opt_nr_inodes,
73 	Opt_mode, Opt_uid, Opt_gid,
74 	Opt_pagesize,
75 	Opt_err,
76 };
77 
78 static const match_table_t tokens = {
79 	{Opt_size,	"size=%s"},
80 	{Opt_nr_inodes,	"nr_inodes=%s"},
81 	{Opt_mode,	"mode=%o"},
82 	{Opt_uid,	"uid=%u"},
83 	{Opt_gid,	"gid=%u"},
84 	{Opt_pagesize,	"pagesize=%s"},
85 	{Opt_err,	NULL},
86 };
87 
88 static void huge_pagevec_release(struct pagevec *pvec)
89 {
90 	int i;
91 
92 	for (i = 0; i < pagevec_count(pvec); ++i)
93 		put_page(pvec->pages[i]);
94 
95 	pagevec_reinit(pvec);
96 }
97 
98 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
99 {
100 	struct inode *inode = file->f_path.dentry->d_inode;
101 	loff_t len, vma_len;
102 	int ret;
103 	struct hstate *h = hstate_file(file);
104 
105 	/*
106 	 * vma address alignment (but not the pgoff alignment) has
107 	 * already been checked by prepare_hugepage_range.  If you add
108 	 * any error returns here, do so after setting VM_HUGETLB, so
109 	 * is_vm_hugetlb_page tests below unmap_region go the right
110 	 * way when do_mmap_pgoff unwinds (may be important on powerpc
111 	 * and ia64).
112 	 */
113 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
114 	vma->vm_ops = &hugetlb_vm_ops;
115 
116 	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
117 		return -EINVAL;
118 
119 	vma_len = (loff_t)(vma->vm_end - vma->vm_start);
120 
121 	mutex_lock(&inode->i_mutex);
122 	file_accessed(file);
123 
124 	ret = -ENOMEM;
125 	len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
126 
127 	if (hugetlb_reserve_pages(inode,
128 				vma->vm_pgoff >> huge_page_order(h),
129 				len >> huge_page_shift(h), vma,
130 				vma->vm_flags))
131 		goto out;
132 
133 	ret = 0;
134 	hugetlb_prefault_arch_hook(vma->vm_mm);
135 	if (vma->vm_flags & VM_WRITE && inode->i_size < len)
136 		inode->i_size = len;
137 out:
138 	mutex_unlock(&inode->i_mutex);
139 
140 	return ret;
141 }
142 
143 /*
144  * Called under down_write(mmap_sem).
145  */
146 
147 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
148 static unsigned long
149 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
150 		unsigned long len, unsigned long pgoff, unsigned long flags)
151 {
152 	struct mm_struct *mm = current->mm;
153 	struct vm_area_struct *vma;
154 	unsigned long start_addr;
155 	struct hstate *h = hstate_file(file);
156 
157 	if (len & ~huge_page_mask(h))
158 		return -EINVAL;
159 	if (len > TASK_SIZE)
160 		return -ENOMEM;
161 
162 	if (flags & MAP_FIXED) {
163 		if (prepare_hugepage_range(file, addr, len))
164 			return -EINVAL;
165 		return addr;
166 	}
167 
168 	if (addr) {
169 		addr = ALIGN(addr, huge_page_size(h));
170 		vma = find_vma(mm, addr);
171 		if (TASK_SIZE - len >= addr &&
172 		    (!vma || addr + len <= vma->vm_start))
173 			return addr;
174 	}
175 
176 	if (len > mm->cached_hole_size)
177 		start_addr = mm->free_area_cache;
178 	else {
179 		start_addr = TASK_UNMAPPED_BASE;
180 		mm->cached_hole_size = 0;
181 	}
182 
183 full_search:
184 	addr = ALIGN(start_addr, huge_page_size(h));
185 
186 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
187 		/* At this point:  (!vma || addr < vma->vm_end). */
188 		if (TASK_SIZE - len < addr) {
189 			/*
190 			 * Start a new search - just in case we missed
191 			 * some holes.
192 			 */
193 			if (start_addr != TASK_UNMAPPED_BASE) {
194 				start_addr = TASK_UNMAPPED_BASE;
195 				mm->cached_hole_size = 0;
196 				goto full_search;
197 			}
198 			return -ENOMEM;
199 		}
200 
201 		if (!vma || addr + len <= vma->vm_start) {
202 			mm->free_area_cache = addr + len;
203 			return addr;
204 		}
205 		if (addr + mm->cached_hole_size < vma->vm_start)
206 			mm->cached_hole_size = vma->vm_start - addr;
207 		addr = ALIGN(vma->vm_end, huge_page_size(h));
208 	}
209 }
210 #endif
211 
212 static int
213 hugetlbfs_read_actor(struct page *page, unsigned long offset,
214 			char __user *buf, unsigned long count,
215 			unsigned long size)
216 {
217 	char *kaddr;
218 	unsigned long left, copied = 0;
219 	int i, chunksize;
220 
221 	if (size > count)
222 		size = count;
223 
224 	/* Find which 4k chunk and offset with in that chunk */
225 	i = offset >> PAGE_CACHE_SHIFT;
226 	offset = offset & ~PAGE_CACHE_MASK;
227 
228 	while (size) {
229 		chunksize = PAGE_CACHE_SIZE;
230 		if (offset)
231 			chunksize -= offset;
232 		if (chunksize > size)
233 			chunksize = size;
234 		kaddr = kmap(&page[i]);
235 		left = __copy_to_user(buf, kaddr + offset, chunksize);
236 		kunmap(&page[i]);
237 		if (left) {
238 			copied += (chunksize - left);
239 			break;
240 		}
241 		offset = 0;
242 		size -= chunksize;
243 		buf += chunksize;
244 		copied += chunksize;
245 		i++;
246 	}
247 	return copied ? copied : -EFAULT;
248 }
249 
250 /*
251  * Support for read() - Find the page attached to f_mapping and copy out the
252  * data. Its *very* similar to do_generic_mapping_read(), we can't use that
253  * since it has PAGE_CACHE_SIZE assumptions.
254  */
255 static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
256 			      size_t len, loff_t *ppos)
257 {
258 	struct hstate *h = hstate_file(filp);
259 	struct address_space *mapping = filp->f_mapping;
260 	struct inode *inode = mapping->host;
261 	unsigned long index = *ppos >> huge_page_shift(h);
262 	unsigned long offset = *ppos & ~huge_page_mask(h);
263 	unsigned long end_index;
264 	loff_t isize;
265 	ssize_t retval = 0;
266 
267 	/* validate length */
268 	if (len == 0)
269 		goto out;
270 
271 	for (;;) {
272 		struct page *page;
273 		unsigned long nr, ret;
274 		int ra;
275 
276 		/* nr is the maximum number of bytes to copy from this page */
277 		nr = huge_page_size(h);
278 		isize = i_size_read(inode);
279 		if (!isize)
280 			goto out;
281 		end_index = (isize - 1) >> huge_page_shift(h);
282 		if (index >= end_index) {
283 			if (index > end_index)
284 				goto out;
285 			nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
286 			if (nr <= offset)
287 				goto out;
288 		}
289 		nr = nr - offset;
290 
291 		/* Find the page */
292 		page = find_lock_page(mapping, index);
293 		if (unlikely(page == NULL)) {
294 			/*
295 			 * We have a HOLE, zero out the user-buffer for the
296 			 * length of the hole or request.
297 			 */
298 			ret = len < nr ? len : nr;
299 			if (clear_user(buf, ret))
300 				ra = -EFAULT;
301 			else
302 				ra = 0;
303 		} else {
304 			unlock_page(page);
305 
306 			/*
307 			 * We have the page, copy it to user space buffer.
308 			 */
309 			ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
310 			ret = ra;
311 			page_cache_release(page);
312 		}
313 		if (ra < 0) {
314 			if (retval == 0)
315 				retval = ra;
316 			goto out;
317 		}
318 
319 		offset += ret;
320 		retval += ret;
321 		len -= ret;
322 		index += offset >> huge_page_shift(h);
323 		offset &= ~huge_page_mask(h);
324 
325 		/* short read or no more work */
326 		if ((ret != nr) || (len == 0))
327 			break;
328 	}
329 out:
330 	*ppos = ((loff_t)index << huge_page_shift(h)) + offset;
331 	return retval;
332 }
333 
334 static int hugetlbfs_write_begin(struct file *file,
335 			struct address_space *mapping,
336 			loff_t pos, unsigned len, unsigned flags,
337 			struct page **pagep, void **fsdata)
338 {
339 	return -EINVAL;
340 }
341 
342 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
343 			loff_t pos, unsigned len, unsigned copied,
344 			struct page *page, void *fsdata)
345 {
346 	BUG();
347 	return -EINVAL;
348 }
349 
350 static void truncate_huge_page(struct page *page)
351 {
352 	cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
353 	ClearPageUptodate(page);
354 	delete_from_page_cache(page);
355 }
356 
357 static void truncate_hugepages(struct inode *inode, loff_t lstart)
358 {
359 	struct hstate *h = hstate_inode(inode);
360 	struct address_space *mapping = &inode->i_data;
361 	const pgoff_t start = lstart >> huge_page_shift(h);
362 	struct pagevec pvec;
363 	pgoff_t next;
364 	int i, freed = 0;
365 
366 	pagevec_init(&pvec, 0);
367 	next = start;
368 	while (1) {
369 		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
370 			if (next == start)
371 				break;
372 			next = start;
373 			continue;
374 		}
375 
376 		for (i = 0; i < pagevec_count(&pvec); ++i) {
377 			struct page *page = pvec.pages[i];
378 
379 			lock_page(page);
380 			if (page->index > next)
381 				next = page->index;
382 			++next;
383 			truncate_huge_page(page);
384 			unlock_page(page);
385 			freed++;
386 		}
387 		huge_pagevec_release(&pvec);
388 	}
389 	BUG_ON(!lstart && mapping->nrpages);
390 	hugetlb_unreserve_pages(inode, start, freed);
391 }
392 
393 static void hugetlbfs_evict_inode(struct inode *inode)
394 {
395 	truncate_hugepages(inode, 0);
396 	clear_inode(inode);
397 }
398 
399 static inline void
400 hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
401 {
402 	struct vm_area_struct *vma;
403 
404 	vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
405 		unsigned long v_offset;
406 
407 		/*
408 		 * Can the expression below overflow on 32-bit arches?
409 		 * No, because the interval tree returns us only those vmas
410 		 * which overlap the truncated area starting at pgoff,
411 		 * and no vma on a 32-bit arch can span beyond the 4GB.
412 		 */
413 		if (vma->vm_pgoff < pgoff)
414 			v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
415 		else
416 			v_offset = 0;
417 
418 		unmap_hugepage_range(vma, vma->vm_start + v_offset,
419 				     vma->vm_end, NULL);
420 	}
421 }
422 
423 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
424 {
425 	pgoff_t pgoff;
426 	struct address_space *mapping = inode->i_mapping;
427 	struct hstate *h = hstate_inode(inode);
428 
429 	BUG_ON(offset & ~huge_page_mask(h));
430 	pgoff = offset >> PAGE_SHIFT;
431 
432 	i_size_write(inode, offset);
433 	mutex_lock(&mapping->i_mmap_mutex);
434 	if (!RB_EMPTY_ROOT(&mapping->i_mmap))
435 		hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
436 	mutex_unlock(&mapping->i_mmap_mutex);
437 	truncate_hugepages(inode, offset);
438 	return 0;
439 }
440 
441 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
442 {
443 	struct inode *inode = dentry->d_inode;
444 	struct hstate *h = hstate_inode(inode);
445 	int error;
446 	unsigned int ia_valid = attr->ia_valid;
447 
448 	BUG_ON(!inode);
449 
450 	error = inode_change_ok(inode, attr);
451 	if (error)
452 		return error;
453 
454 	if (ia_valid & ATTR_SIZE) {
455 		error = -EINVAL;
456 		if (attr->ia_size & ~huge_page_mask(h))
457 			return -EINVAL;
458 		error = hugetlb_vmtruncate(inode, attr->ia_size);
459 		if (error)
460 			return error;
461 	}
462 
463 	setattr_copy(inode, attr);
464 	mark_inode_dirty(inode);
465 	return 0;
466 }
467 
468 static struct inode *hugetlbfs_get_root(struct super_block *sb,
469 					struct hugetlbfs_config *config)
470 {
471 	struct inode *inode;
472 
473 	inode = new_inode(sb);
474 	if (inode) {
475 		struct hugetlbfs_inode_info *info;
476 		inode->i_ino = get_next_ino();
477 		inode->i_mode = S_IFDIR | config->mode;
478 		inode->i_uid = config->uid;
479 		inode->i_gid = config->gid;
480 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
481 		info = HUGETLBFS_I(inode);
482 		mpol_shared_policy_init(&info->policy, NULL);
483 		inode->i_op = &hugetlbfs_dir_inode_operations;
484 		inode->i_fop = &simple_dir_operations;
485 		/* directory inodes start off with i_nlink == 2 (for "." entry) */
486 		inc_nlink(inode);
487 		lockdep_annotate_inode_mutex_key(inode);
488 	}
489 	return inode;
490 }
491 
492 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
493 					struct inode *dir,
494 					umode_t mode, dev_t dev)
495 {
496 	struct inode *inode;
497 
498 	inode = new_inode(sb);
499 	if (inode) {
500 		struct hugetlbfs_inode_info *info;
501 		inode->i_ino = get_next_ino();
502 		inode_init_owner(inode, dir, mode);
503 		inode->i_mapping->a_ops = &hugetlbfs_aops;
504 		inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
505 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
506 		INIT_LIST_HEAD(&inode->i_mapping->private_list);
507 		info = HUGETLBFS_I(inode);
508 		/*
509 		 * The policy is initialized here even if we are creating a
510 		 * private inode because initialization simply creates an
511 		 * an empty rb tree and calls spin_lock_init(), later when we
512 		 * call mpol_free_shared_policy() it will just return because
513 		 * the rb tree will still be empty.
514 		 */
515 		mpol_shared_policy_init(&info->policy, NULL);
516 		switch (mode & S_IFMT) {
517 		default:
518 			init_special_inode(inode, mode, dev);
519 			break;
520 		case S_IFREG:
521 			inode->i_op = &hugetlbfs_inode_operations;
522 			inode->i_fop = &hugetlbfs_file_operations;
523 			break;
524 		case S_IFDIR:
525 			inode->i_op = &hugetlbfs_dir_inode_operations;
526 			inode->i_fop = &simple_dir_operations;
527 
528 			/* directory inodes start off with i_nlink == 2 (for "." entry) */
529 			inc_nlink(inode);
530 			break;
531 		case S_IFLNK:
532 			inode->i_op = &page_symlink_inode_operations;
533 			break;
534 		}
535 		lockdep_annotate_inode_mutex_key(inode);
536 	}
537 	return inode;
538 }
539 
540 /*
541  * File creation. Allocate an inode, and we're done..
542  */
543 static int hugetlbfs_mknod(struct inode *dir,
544 			struct dentry *dentry, umode_t mode, dev_t dev)
545 {
546 	struct inode *inode;
547 	int error = -ENOSPC;
548 
549 	inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
550 	if (inode) {
551 		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
552 		d_instantiate(dentry, inode);
553 		dget(dentry);	/* Extra count - pin the dentry in core */
554 		error = 0;
555 	}
556 	return error;
557 }
558 
559 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
560 {
561 	int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
562 	if (!retval)
563 		inc_nlink(dir);
564 	return retval;
565 }
566 
567 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
568 {
569 	return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
570 }
571 
572 static int hugetlbfs_symlink(struct inode *dir,
573 			struct dentry *dentry, const char *symname)
574 {
575 	struct inode *inode;
576 	int error = -ENOSPC;
577 
578 	inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
579 	if (inode) {
580 		int l = strlen(symname)+1;
581 		error = page_symlink(inode, symname, l);
582 		if (!error) {
583 			d_instantiate(dentry, inode);
584 			dget(dentry);
585 		} else
586 			iput(inode);
587 	}
588 	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
589 
590 	return error;
591 }
592 
593 /*
594  * mark the head page dirty
595  */
596 static int hugetlbfs_set_page_dirty(struct page *page)
597 {
598 	struct page *head = compound_head(page);
599 
600 	SetPageDirty(head);
601 	return 0;
602 }
603 
604 static int hugetlbfs_migrate_page(struct address_space *mapping,
605 				struct page *newpage, struct page *page,
606 				enum migrate_mode mode)
607 {
608 	int rc;
609 
610 	rc = migrate_huge_page_move_mapping(mapping, newpage, page);
611 	if (rc)
612 		return rc;
613 	migrate_page_copy(newpage, page);
614 
615 	return 0;
616 }
617 
618 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
619 {
620 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
621 	struct hstate *h = hstate_inode(dentry->d_inode);
622 
623 	buf->f_type = HUGETLBFS_MAGIC;
624 	buf->f_bsize = huge_page_size(h);
625 	if (sbinfo) {
626 		spin_lock(&sbinfo->stat_lock);
627 		/* If no limits set, just report 0 for max/free/used
628 		 * blocks, like simple_statfs() */
629 		if (sbinfo->spool) {
630 			long free_pages;
631 
632 			spin_lock(&sbinfo->spool->lock);
633 			buf->f_blocks = sbinfo->spool->max_hpages;
634 			free_pages = sbinfo->spool->max_hpages
635 				- sbinfo->spool->used_hpages;
636 			buf->f_bavail = buf->f_bfree = free_pages;
637 			spin_unlock(&sbinfo->spool->lock);
638 			buf->f_files = sbinfo->max_inodes;
639 			buf->f_ffree = sbinfo->free_inodes;
640 		}
641 		spin_unlock(&sbinfo->stat_lock);
642 	}
643 	buf->f_namelen = NAME_MAX;
644 	return 0;
645 }
646 
647 static void hugetlbfs_put_super(struct super_block *sb)
648 {
649 	struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
650 
651 	if (sbi) {
652 		sb->s_fs_info = NULL;
653 
654 		if (sbi->spool)
655 			hugepage_put_subpool(sbi->spool);
656 
657 		kfree(sbi);
658 	}
659 }
660 
661 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
662 {
663 	if (sbinfo->free_inodes >= 0) {
664 		spin_lock(&sbinfo->stat_lock);
665 		if (unlikely(!sbinfo->free_inodes)) {
666 			spin_unlock(&sbinfo->stat_lock);
667 			return 0;
668 		}
669 		sbinfo->free_inodes--;
670 		spin_unlock(&sbinfo->stat_lock);
671 	}
672 
673 	return 1;
674 }
675 
676 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
677 {
678 	if (sbinfo->free_inodes >= 0) {
679 		spin_lock(&sbinfo->stat_lock);
680 		sbinfo->free_inodes++;
681 		spin_unlock(&sbinfo->stat_lock);
682 	}
683 }
684 
685 
686 static struct kmem_cache *hugetlbfs_inode_cachep;
687 
688 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
689 {
690 	struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
691 	struct hugetlbfs_inode_info *p;
692 
693 	if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
694 		return NULL;
695 	p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
696 	if (unlikely(!p)) {
697 		hugetlbfs_inc_free_inodes(sbinfo);
698 		return NULL;
699 	}
700 	return &p->vfs_inode;
701 }
702 
703 static void hugetlbfs_i_callback(struct rcu_head *head)
704 {
705 	struct inode *inode = container_of(head, struct inode, i_rcu);
706 	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
707 }
708 
709 static void hugetlbfs_destroy_inode(struct inode *inode)
710 {
711 	hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
712 	mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
713 	call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
714 }
715 
716 static const struct address_space_operations hugetlbfs_aops = {
717 	.write_begin	= hugetlbfs_write_begin,
718 	.write_end	= hugetlbfs_write_end,
719 	.set_page_dirty	= hugetlbfs_set_page_dirty,
720 	.migratepage    = hugetlbfs_migrate_page,
721 };
722 
723 
724 static void init_once(void *foo)
725 {
726 	struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
727 
728 	inode_init_once(&ei->vfs_inode);
729 }
730 
731 const struct file_operations hugetlbfs_file_operations = {
732 	.read			= hugetlbfs_read,
733 	.mmap			= hugetlbfs_file_mmap,
734 	.fsync			= noop_fsync,
735 	.get_unmapped_area	= hugetlb_get_unmapped_area,
736 	.llseek		= default_llseek,
737 };
738 
739 static const struct inode_operations hugetlbfs_dir_inode_operations = {
740 	.create		= hugetlbfs_create,
741 	.lookup		= simple_lookup,
742 	.link		= simple_link,
743 	.unlink		= simple_unlink,
744 	.symlink	= hugetlbfs_symlink,
745 	.mkdir		= hugetlbfs_mkdir,
746 	.rmdir		= simple_rmdir,
747 	.mknod		= hugetlbfs_mknod,
748 	.rename		= simple_rename,
749 	.setattr	= hugetlbfs_setattr,
750 };
751 
752 static const struct inode_operations hugetlbfs_inode_operations = {
753 	.setattr	= hugetlbfs_setattr,
754 };
755 
756 static const struct super_operations hugetlbfs_ops = {
757 	.alloc_inode    = hugetlbfs_alloc_inode,
758 	.destroy_inode  = hugetlbfs_destroy_inode,
759 	.evict_inode	= hugetlbfs_evict_inode,
760 	.statfs		= hugetlbfs_statfs,
761 	.put_super	= hugetlbfs_put_super,
762 	.show_options	= generic_show_options,
763 };
764 
765 static int
766 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
767 {
768 	char *p, *rest;
769 	substring_t args[MAX_OPT_ARGS];
770 	int option;
771 	unsigned long long size = 0;
772 	enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
773 
774 	if (!options)
775 		return 0;
776 
777 	while ((p = strsep(&options, ",")) != NULL) {
778 		int token;
779 		if (!*p)
780 			continue;
781 
782 		token = match_token(p, tokens, args);
783 		switch (token) {
784 		case Opt_uid:
785 			if (match_int(&args[0], &option))
786  				goto bad_val;
787 			pconfig->uid = make_kuid(current_user_ns(), option);
788 			if (!uid_valid(pconfig->uid))
789 				goto bad_val;
790 			break;
791 
792 		case Opt_gid:
793 			if (match_int(&args[0], &option))
794  				goto bad_val;
795 			pconfig->gid = make_kgid(current_user_ns(), option);
796 			if (!gid_valid(pconfig->gid))
797 				goto bad_val;
798 			break;
799 
800 		case Opt_mode:
801 			if (match_octal(&args[0], &option))
802  				goto bad_val;
803 			pconfig->mode = option & 01777U;
804 			break;
805 
806 		case Opt_size: {
807 			/* memparse() will accept a K/M/G without a digit */
808 			if (!isdigit(*args[0].from))
809 				goto bad_val;
810 			size = memparse(args[0].from, &rest);
811 			setsize = SIZE_STD;
812 			if (*rest == '%')
813 				setsize = SIZE_PERCENT;
814 			break;
815 		}
816 
817 		case Opt_nr_inodes:
818 			/* memparse() will accept a K/M/G without a digit */
819 			if (!isdigit(*args[0].from))
820 				goto bad_val;
821 			pconfig->nr_inodes = memparse(args[0].from, &rest);
822 			break;
823 
824 		case Opt_pagesize: {
825 			unsigned long ps;
826 			ps = memparse(args[0].from, &rest);
827 			pconfig->hstate = size_to_hstate(ps);
828 			if (!pconfig->hstate) {
829 				printk(KERN_ERR
830 				"hugetlbfs: Unsupported page size %lu MB\n",
831 					ps >> 20);
832 				return -EINVAL;
833 			}
834 			break;
835 		}
836 
837 		default:
838 			printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n",
839 				 p);
840 			return -EINVAL;
841 			break;
842 		}
843 	}
844 
845 	/* Do size after hstate is set up */
846 	if (setsize > NO_SIZE) {
847 		struct hstate *h = pconfig->hstate;
848 		if (setsize == SIZE_PERCENT) {
849 			size <<= huge_page_shift(h);
850 			size *= h->max_huge_pages;
851 			do_div(size, 100);
852 		}
853 		pconfig->nr_blocks = (size >> huge_page_shift(h));
854 	}
855 
856 	return 0;
857 
858 bad_val:
859  	printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
860 	       args[0].from, p);
861  	return -EINVAL;
862 }
863 
864 static int
865 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
866 {
867 	int ret;
868 	struct hugetlbfs_config config;
869 	struct hugetlbfs_sb_info *sbinfo;
870 
871 	save_mount_options(sb, data);
872 
873 	config.nr_blocks = -1; /* No limit on size by default */
874 	config.nr_inodes = -1; /* No limit on number of inodes by default */
875 	config.uid = current_fsuid();
876 	config.gid = current_fsgid();
877 	config.mode = 0755;
878 	config.hstate = &default_hstate;
879 	ret = hugetlbfs_parse_options(data, &config);
880 	if (ret)
881 		return ret;
882 
883 	sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
884 	if (!sbinfo)
885 		return -ENOMEM;
886 	sb->s_fs_info = sbinfo;
887 	sbinfo->hstate = config.hstate;
888 	spin_lock_init(&sbinfo->stat_lock);
889 	sbinfo->max_inodes = config.nr_inodes;
890 	sbinfo->free_inodes = config.nr_inodes;
891 	sbinfo->spool = NULL;
892 	if (config.nr_blocks != -1) {
893 		sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
894 		if (!sbinfo->spool)
895 			goto out_free;
896 	}
897 	sb->s_maxbytes = MAX_LFS_FILESIZE;
898 	sb->s_blocksize = huge_page_size(config.hstate);
899 	sb->s_blocksize_bits = huge_page_shift(config.hstate);
900 	sb->s_magic = HUGETLBFS_MAGIC;
901 	sb->s_op = &hugetlbfs_ops;
902 	sb->s_time_gran = 1;
903 	sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
904 	if (!sb->s_root)
905 		goto out_free;
906 	return 0;
907 out_free:
908 	if (sbinfo->spool)
909 		kfree(sbinfo->spool);
910 	kfree(sbinfo);
911 	return -ENOMEM;
912 }
913 
914 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
915 	int flags, const char *dev_name, void *data)
916 {
917 	return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
918 }
919 
920 static struct file_system_type hugetlbfs_fs_type = {
921 	.name		= "hugetlbfs",
922 	.mount		= hugetlbfs_mount,
923 	.kill_sb	= kill_litter_super,
924 };
925 
926 static struct vfsmount *hugetlbfs_vfsmount;
927 
928 static int can_do_hugetlb_shm(void)
929 {
930 	kgid_t shm_group;
931 	shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
932 	return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
933 }
934 
935 struct file *hugetlb_file_setup(const char *name, unsigned long addr,
936 				size_t size, vm_flags_t acctflag,
937 				struct user_struct **user, int creat_flags)
938 {
939 	int error = -ENOMEM;
940 	struct file *file;
941 	struct inode *inode;
942 	struct path path;
943 	struct dentry *root;
944 	struct qstr quick_string;
945 	struct hstate *hstate;
946 	unsigned long num_pages;
947 
948 	*user = NULL;
949 	if (!hugetlbfs_vfsmount)
950 		return ERR_PTR(-ENOENT);
951 
952 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
953 		*user = current_user();
954 		if (user_shm_lock(size, *user)) {
955 			task_lock(current);
956 			printk_once(KERN_WARNING
957 				"%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
958 				current->comm, current->pid);
959 			task_unlock(current);
960 		} else {
961 			*user = NULL;
962 			return ERR_PTR(-EPERM);
963 		}
964 	}
965 
966 	root = hugetlbfs_vfsmount->mnt_root;
967 	quick_string.name = name;
968 	quick_string.len = strlen(quick_string.name);
969 	quick_string.hash = 0;
970 	path.dentry = d_alloc(root, &quick_string);
971 	if (!path.dentry)
972 		goto out_shm_unlock;
973 
974 	path.mnt = mntget(hugetlbfs_vfsmount);
975 	error = -ENOSPC;
976 	inode = hugetlbfs_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0);
977 	if (!inode)
978 		goto out_dentry;
979 
980 	hstate = hstate_inode(inode);
981 	size += addr & ~huge_page_mask(hstate);
982 	num_pages = ALIGN(size, huge_page_size(hstate)) >>
983 			huge_page_shift(hstate);
984 	error = -ENOMEM;
985 	if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag))
986 		goto out_inode;
987 
988 	d_instantiate(path.dentry, inode);
989 	inode->i_size = size;
990 	clear_nlink(inode);
991 
992 	error = -ENFILE;
993 	file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
994 			&hugetlbfs_file_operations);
995 	if (!file)
996 		goto out_dentry; /* inode is already attached */
997 
998 	return file;
999 
1000 out_inode:
1001 	iput(inode);
1002 out_dentry:
1003 	path_put(&path);
1004 out_shm_unlock:
1005 	if (*user) {
1006 		user_shm_unlock(size, *user);
1007 		*user = NULL;
1008 	}
1009 	return ERR_PTR(error);
1010 }
1011 
1012 static int __init init_hugetlbfs_fs(void)
1013 {
1014 	int error;
1015 	struct vfsmount *vfsmount;
1016 
1017 	error = bdi_init(&hugetlbfs_backing_dev_info);
1018 	if (error)
1019 		return error;
1020 
1021 	error = -ENOMEM;
1022 	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1023 					sizeof(struct hugetlbfs_inode_info),
1024 					0, 0, init_once);
1025 	if (hugetlbfs_inode_cachep == NULL)
1026 		goto out2;
1027 
1028 	error = register_filesystem(&hugetlbfs_fs_type);
1029 	if (error)
1030 		goto out;
1031 
1032 	vfsmount = kern_mount(&hugetlbfs_fs_type);
1033 
1034 	if (!IS_ERR(vfsmount)) {
1035 		hugetlbfs_vfsmount = vfsmount;
1036 		return 0;
1037 	}
1038 
1039 	error = PTR_ERR(vfsmount);
1040 
1041  out:
1042 	kmem_cache_destroy(hugetlbfs_inode_cachep);
1043  out2:
1044 	bdi_destroy(&hugetlbfs_backing_dev_info);
1045 	return error;
1046 }
1047 
1048 static void __exit exit_hugetlbfs_fs(void)
1049 {
1050 	/*
1051 	 * Make sure all delayed rcu free inodes are flushed before we
1052 	 * destroy cache.
1053 	 */
1054 	rcu_barrier();
1055 	kmem_cache_destroy(hugetlbfs_inode_cachep);
1056 	kern_unmount(hugetlbfs_vfsmount);
1057 	unregister_filesystem(&hugetlbfs_fs_type);
1058 	bdi_destroy(&hugetlbfs_backing_dev_info);
1059 }
1060 
1061 module_init(init_hugetlbfs_fs)
1062 module_exit(exit_hugetlbfs_fs)
1063 
1064 MODULE_LICENSE("GPL");
1065