1 /* 2 * hugetlbpage-backed filesystem. Based on ramfs. 3 * 4 * Nadia Yvette Chambers, 2002 5 * 6 * Copyright (C) 2002 Linus Torvalds. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/thread_info.h> 11 #include <asm/current.h> 12 #include <linux/sched.h> /* remove ASAP */ 13 #include <linux/fs.h> 14 #include <linux/mount.h> 15 #include <linux/file.h> 16 #include <linux/kernel.h> 17 #include <linux/writeback.h> 18 #include <linux/pagemap.h> 19 #include <linux/highmem.h> 20 #include <linux/init.h> 21 #include <linux/string.h> 22 #include <linux/capability.h> 23 #include <linux/ctype.h> 24 #include <linux/backing-dev.h> 25 #include <linux/hugetlb.h> 26 #include <linux/pagevec.h> 27 #include <linux/parser.h> 28 #include <linux/mman.h> 29 #include <linux/slab.h> 30 #include <linux/dnotify.h> 31 #include <linux/statfs.h> 32 #include <linux/security.h> 33 #include <linux/magic.h> 34 #include <linux/migrate.h> 35 36 #include <asm/uaccess.h> 37 38 static const struct super_operations hugetlbfs_ops; 39 static const struct address_space_operations hugetlbfs_aops; 40 const struct file_operations hugetlbfs_file_operations; 41 static const struct inode_operations hugetlbfs_dir_inode_operations; 42 static const struct inode_operations hugetlbfs_inode_operations; 43 44 struct hugetlbfs_config { 45 kuid_t uid; 46 kgid_t gid; 47 umode_t mode; 48 long nr_blocks; 49 long nr_inodes; 50 struct hstate *hstate; 51 }; 52 53 struct hugetlbfs_inode_info { 54 struct shared_policy policy; 55 struct inode vfs_inode; 56 }; 57 58 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 59 { 60 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 61 } 62 63 static struct backing_dev_info hugetlbfs_backing_dev_info = { 64 .name = "hugetlbfs", 65 .ra_pages = 0, /* No readahead */ 66 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 67 }; 68 69 int sysctl_hugetlb_shm_group; 70 71 enum { 72 Opt_size, Opt_nr_inodes, 73 Opt_mode, Opt_uid, Opt_gid, 74 Opt_pagesize, 75 Opt_err, 76 }; 77 78 static const match_table_t tokens = { 79 {Opt_size, "size=%s"}, 80 {Opt_nr_inodes, "nr_inodes=%s"}, 81 {Opt_mode, "mode=%o"}, 82 {Opt_uid, "uid=%u"}, 83 {Opt_gid, "gid=%u"}, 84 {Opt_pagesize, "pagesize=%s"}, 85 {Opt_err, NULL}, 86 }; 87 88 static void huge_pagevec_release(struct pagevec *pvec) 89 { 90 int i; 91 92 for (i = 0; i < pagevec_count(pvec); ++i) 93 put_page(pvec->pages[i]); 94 95 pagevec_reinit(pvec); 96 } 97 98 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 99 { 100 struct inode *inode = file_inode(file); 101 loff_t len, vma_len; 102 int ret; 103 struct hstate *h = hstate_file(file); 104 105 /* 106 * vma address alignment (but not the pgoff alignment) has 107 * already been checked by prepare_hugepage_range. If you add 108 * any error returns here, do so after setting VM_HUGETLB, so 109 * is_vm_hugetlb_page tests below unmap_region go the right 110 * way when do_mmap_pgoff unwinds (may be important on powerpc 111 * and ia64). 112 */ 113 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 114 vma->vm_ops = &hugetlb_vm_ops; 115 116 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 117 return -EINVAL; 118 119 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 120 121 mutex_lock(&inode->i_mutex); 122 file_accessed(file); 123 124 ret = -ENOMEM; 125 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 126 127 if (hugetlb_reserve_pages(inode, 128 vma->vm_pgoff >> huge_page_order(h), 129 len >> huge_page_shift(h), vma, 130 vma->vm_flags)) 131 goto out; 132 133 ret = 0; 134 hugetlb_prefault_arch_hook(vma->vm_mm); 135 if (vma->vm_flags & VM_WRITE && inode->i_size < len) 136 inode->i_size = len; 137 out: 138 mutex_unlock(&inode->i_mutex); 139 140 return ret; 141 } 142 143 /* 144 * Called under down_write(mmap_sem). 145 */ 146 147 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 148 static unsigned long 149 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 150 unsigned long len, unsigned long pgoff, unsigned long flags) 151 { 152 struct mm_struct *mm = current->mm; 153 struct vm_area_struct *vma; 154 struct hstate *h = hstate_file(file); 155 struct vm_unmapped_area_info info; 156 157 if (len & ~huge_page_mask(h)) 158 return -EINVAL; 159 if (len > TASK_SIZE) 160 return -ENOMEM; 161 162 if (flags & MAP_FIXED) { 163 if (prepare_hugepage_range(file, addr, len)) 164 return -EINVAL; 165 return addr; 166 } 167 168 if (addr) { 169 addr = ALIGN(addr, huge_page_size(h)); 170 vma = find_vma(mm, addr); 171 if (TASK_SIZE - len >= addr && 172 (!vma || addr + len <= vma->vm_start)) 173 return addr; 174 } 175 176 info.flags = 0; 177 info.length = len; 178 info.low_limit = TASK_UNMAPPED_BASE; 179 info.high_limit = TASK_SIZE; 180 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 181 info.align_offset = 0; 182 return vm_unmapped_area(&info); 183 } 184 #endif 185 186 static int 187 hugetlbfs_read_actor(struct page *page, unsigned long offset, 188 char __user *buf, unsigned long count, 189 unsigned long size) 190 { 191 char *kaddr; 192 unsigned long left, copied = 0; 193 int i, chunksize; 194 195 if (size > count) 196 size = count; 197 198 /* Find which 4k chunk and offset with in that chunk */ 199 i = offset >> PAGE_CACHE_SHIFT; 200 offset = offset & ~PAGE_CACHE_MASK; 201 202 while (size) { 203 chunksize = PAGE_CACHE_SIZE; 204 if (offset) 205 chunksize -= offset; 206 if (chunksize > size) 207 chunksize = size; 208 kaddr = kmap(&page[i]); 209 left = __copy_to_user(buf, kaddr + offset, chunksize); 210 kunmap(&page[i]); 211 if (left) { 212 copied += (chunksize - left); 213 break; 214 } 215 offset = 0; 216 size -= chunksize; 217 buf += chunksize; 218 copied += chunksize; 219 i++; 220 } 221 return copied ? copied : -EFAULT; 222 } 223 224 /* 225 * Support for read() - Find the page attached to f_mapping and copy out the 226 * data. Its *very* similar to do_generic_mapping_read(), we can't use that 227 * since it has PAGE_CACHE_SIZE assumptions. 228 */ 229 static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, 230 size_t len, loff_t *ppos) 231 { 232 struct hstate *h = hstate_file(filp); 233 struct address_space *mapping = filp->f_mapping; 234 struct inode *inode = mapping->host; 235 unsigned long index = *ppos >> huge_page_shift(h); 236 unsigned long offset = *ppos & ~huge_page_mask(h); 237 unsigned long end_index; 238 loff_t isize; 239 ssize_t retval = 0; 240 241 /* validate length */ 242 if (len == 0) 243 goto out; 244 245 for (;;) { 246 struct page *page; 247 unsigned long nr, ret; 248 int ra; 249 250 /* nr is the maximum number of bytes to copy from this page */ 251 nr = huge_page_size(h); 252 isize = i_size_read(inode); 253 if (!isize) 254 goto out; 255 end_index = (isize - 1) >> huge_page_shift(h); 256 if (index >= end_index) { 257 if (index > end_index) 258 goto out; 259 nr = ((isize - 1) & ~huge_page_mask(h)) + 1; 260 if (nr <= offset) 261 goto out; 262 } 263 nr = nr - offset; 264 265 /* Find the page */ 266 page = find_lock_page(mapping, index); 267 if (unlikely(page == NULL)) { 268 /* 269 * We have a HOLE, zero out the user-buffer for the 270 * length of the hole or request. 271 */ 272 ret = len < nr ? len : nr; 273 if (clear_user(buf, ret)) 274 ra = -EFAULT; 275 else 276 ra = 0; 277 } else { 278 unlock_page(page); 279 280 /* 281 * We have the page, copy it to user space buffer. 282 */ 283 ra = hugetlbfs_read_actor(page, offset, buf, len, nr); 284 ret = ra; 285 page_cache_release(page); 286 } 287 if (ra < 0) { 288 if (retval == 0) 289 retval = ra; 290 goto out; 291 } 292 293 offset += ret; 294 retval += ret; 295 len -= ret; 296 index += offset >> huge_page_shift(h); 297 offset &= ~huge_page_mask(h); 298 299 /* short read or no more work */ 300 if ((ret != nr) || (len == 0)) 301 break; 302 } 303 out: 304 *ppos = ((loff_t)index << huge_page_shift(h)) + offset; 305 return retval; 306 } 307 308 static int hugetlbfs_write_begin(struct file *file, 309 struct address_space *mapping, 310 loff_t pos, unsigned len, unsigned flags, 311 struct page **pagep, void **fsdata) 312 { 313 return -EINVAL; 314 } 315 316 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 317 loff_t pos, unsigned len, unsigned copied, 318 struct page *page, void *fsdata) 319 { 320 BUG(); 321 return -EINVAL; 322 } 323 324 static void truncate_huge_page(struct page *page) 325 { 326 cancel_dirty_page(page, /* No IO accounting for huge pages? */0); 327 ClearPageUptodate(page); 328 delete_from_page_cache(page); 329 } 330 331 static void truncate_hugepages(struct inode *inode, loff_t lstart) 332 { 333 struct hstate *h = hstate_inode(inode); 334 struct address_space *mapping = &inode->i_data; 335 const pgoff_t start = lstart >> huge_page_shift(h); 336 struct pagevec pvec; 337 pgoff_t next; 338 int i, freed = 0; 339 340 pagevec_init(&pvec, 0); 341 next = start; 342 while (1) { 343 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 344 if (next == start) 345 break; 346 next = start; 347 continue; 348 } 349 350 for (i = 0; i < pagevec_count(&pvec); ++i) { 351 struct page *page = pvec.pages[i]; 352 353 lock_page(page); 354 if (page->index > next) 355 next = page->index; 356 ++next; 357 truncate_huge_page(page); 358 unlock_page(page); 359 freed++; 360 } 361 huge_pagevec_release(&pvec); 362 } 363 BUG_ON(!lstart && mapping->nrpages); 364 hugetlb_unreserve_pages(inode, start, freed); 365 } 366 367 static void hugetlbfs_evict_inode(struct inode *inode) 368 { 369 truncate_hugepages(inode, 0); 370 clear_inode(inode); 371 } 372 373 static inline void 374 hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff) 375 { 376 struct vm_area_struct *vma; 377 378 vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) { 379 unsigned long v_offset; 380 381 /* 382 * Can the expression below overflow on 32-bit arches? 383 * No, because the interval tree returns us only those vmas 384 * which overlap the truncated area starting at pgoff, 385 * and no vma on a 32-bit arch can span beyond the 4GB. 386 */ 387 if (vma->vm_pgoff < pgoff) 388 v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT; 389 else 390 v_offset = 0; 391 392 unmap_hugepage_range(vma, vma->vm_start + v_offset, 393 vma->vm_end, NULL); 394 } 395 } 396 397 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) 398 { 399 pgoff_t pgoff; 400 struct address_space *mapping = inode->i_mapping; 401 struct hstate *h = hstate_inode(inode); 402 403 BUG_ON(offset & ~huge_page_mask(h)); 404 pgoff = offset >> PAGE_SHIFT; 405 406 i_size_write(inode, offset); 407 mutex_lock(&mapping->i_mmap_mutex); 408 if (!RB_EMPTY_ROOT(&mapping->i_mmap)) 409 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); 410 mutex_unlock(&mapping->i_mmap_mutex); 411 truncate_hugepages(inode, offset); 412 return 0; 413 } 414 415 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) 416 { 417 struct inode *inode = dentry->d_inode; 418 struct hstate *h = hstate_inode(inode); 419 int error; 420 unsigned int ia_valid = attr->ia_valid; 421 422 BUG_ON(!inode); 423 424 error = inode_change_ok(inode, attr); 425 if (error) 426 return error; 427 428 if (ia_valid & ATTR_SIZE) { 429 error = -EINVAL; 430 if (attr->ia_size & ~huge_page_mask(h)) 431 return -EINVAL; 432 error = hugetlb_vmtruncate(inode, attr->ia_size); 433 if (error) 434 return error; 435 } 436 437 setattr_copy(inode, attr); 438 mark_inode_dirty(inode); 439 return 0; 440 } 441 442 static struct inode *hugetlbfs_get_root(struct super_block *sb, 443 struct hugetlbfs_config *config) 444 { 445 struct inode *inode; 446 447 inode = new_inode(sb); 448 if (inode) { 449 struct hugetlbfs_inode_info *info; 450 inode->i_ino = get_next_ino(); 451 inode->i_mode = S_IFDIR | config->mode; 452 inode->i_uid = config->uid; 453 inode->i_gid = config->gid; 454 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 455 info = HUGETLBFS_I(inode); 456 mpol_shared_policy_init(&info->policy, NULL); 457 inode->i_op = &hugetlbfs_dir_inode_operations; 458 inode->i_fop = &simple_dir_operations; 459 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 460 inc_nlink(inode); 461 lockdep_annotate_inode_mutex_key(inode); 462 } 463 return inode; 464 } 465 466 /* 467 * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never 468 * be taken from reclaim -- unlike regular filesystems. This needs an 469 * annotation because huge_pmd_share() does an allocation under 470 * i_mmap_mutex. 471 */ 472 struct lock_class_key hugetlbfs_i_mmap_mutex_key; 473 474 static struct inode *hugetlbfs_get_inode(struct super_block *sb, 475 struct inode *dir, 476 umode_t mode, dev_t dev) 477 { 478 struct inode *inode; 479 480 inode = new_inode(sb); 481 if (inode) { 482 struct hugetlbfs_inode_info *info; 483 inode->i_ino = get_next_ino(); 484 inode_init_owner(inode, dir, mode); 485 lockdep_set_class(&inode->i_mapping->i_mmap_mutex, 486 &hugetlbfs_i_mmap_mutex_key); 487 inode->i_mapping->a_ops = &hugetlbfs_aops; 488 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; 489 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 490 INIT_LIST_HEAD(&inode->i_mapping->private_list); 491 info = HUGETLBFS_I(inode); 492 /* 493 * The policy is initialized here even if we are creating a 494 * private inode because initialization simply creates an 495 * an empty rb tree and calls spin_lock_init(), later when we 496 * call mpol_free_shared_policy() it will just return because 497 * the rb tree will still be empty. 498 */ 499 mpol_shared_policy_init(&info->policy, NULL); 500 switch (mode & S_IFMT) { 501 default: 502 init_special_inode(inode, mode, dev); 503 break; 504 case S_IFREG: 505 inode->i_op = &hugetlbfs_inode_operations; 506 inode->i_fop = &hugetlbfs_file_operations; 507 break; 508 case S_IFDIR: 509 inode->i_op = &hugetlbfs_dir_inode_operations; 510 inode->i_fop = &simple_dir_operations; 511 512 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 513 inc_nlink(inode); 514 break; 515 case S_IFLNK: 516 inode->i_op = &page_symlink_inode_operations; 517 break; 518 } 519 lockdep_annotate_inode_mutex_key(inode); 520 } 521 return inode; 522 } 523 524 /* 525 * File creation. Allocate an inode, and we're done.. 526 */ 527 static int hugetlbfs_mknod(struct inode *dir, 528 struct dentry *dentry, umode_t mode, dev_t dev) 529 { 530 struct inode *inode; 531 int error = -ENOSPC; 532 533 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); 534 if (inode) { 535 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 536 d_instantiate(dentry, inode); 537 dget(dentry); /* Extra count - pin the dentry in core */ 538 error = 0; 539 } 540 return error; 541 } 542 543 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 544 { 545 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); 546 if (!retval) 547 inc_nlink(dir); 548 return retval; 549 } 550 551 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) 552 { 553 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); 554 } 555 556 static int hugetlbfs_symlink(struct inode *dir, 557 struct dentry *dentry, const char *symname) 558 { 559 struct inode *inode; 560 int error = -ENOSPC; 561 562 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); 563 if (inode) { 564 int l = strlen(symname)+1; 565 error = page_symlink(inode, symname, l); 566 if (!error) { 567 d_instantiate(dentry, inode); 568 dget(dentry); 569 } else 570 iput(inode); 571 } 572 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 573 574 return error; 575 } 576 577 /* 578 * mark the head page dirty 579 */ 580 static int hugetlbfs_set_page_dirty(struct page *page) 581 { 582 struct page *head = compound_head(page); 583 584 SetPageDirty(head); 585 return 0; 586 } 587 588 static int hugetlbfs_migrate_page(struct address_space *mapping, 589 struct page *newpage, struct page *page, 590 enum migrate_mode mode) 591 { 592 int rc; 593 594 rc = migrate_huge_page_move_mapping(mapping, newpage, page); 595 if (rc != MIGRATEPAGE_SUCCESS) 596 return rc; 597 migrate_page_copy(newpage, page); 598 599 return MIGRATEPAGE_SUCCESS; 600 } 601 602 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 603 { 604 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 605 struct hstate *h = hstate_inode(dentry->d_inode); 606 607 buf->f_type = HUGETLBFS_MAGIC; 608 buf->f_bsize = huge_page_size(h); 609 if (sbinfo) { 610 spin_lock(&sbinfo->stat_lock); 611 /* If no limits set, just report 0 for max/free/used 612 * blocks, like simple_statfs() */ 613 if (sbinfo->spool) { 614 long free_pages; 615 616 spin_lock(&sbinfo->spool->lock); 617 buf->f_blocks = sbinfo->spool->max_hpages; 618 free_pages = sbinfo->spool->max_hpages 619 - sbinfo->spool->used_hpages; 620 buf->f_bavail = buf->f_bfree = free_pages; 621 spin_unlock(&sbinfo->spool->lock); 622 buf->f_files = sbinfo->max_inodes; 623 buf->f_ffree = sbinfo->free_inodes; 624 } 625 spin_unlock(&sbinfo->stat_lock); 626 } 627 buf->f_namelen = NAME_MAX; 628 return 0; 629 } 630 631 static void hugetlbfs_put_super(struct super_block *sb) 632 { 633 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); 634 635 if (sbi) { 636 sb->s_fs_info = NULL; 637 638 if (sbi->spool) 639 hugepage_put_subpool(sbi->spool); 640 641 kfree(sbi); 642 } 643 } 644 645 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) 646 { 647 if (sbinfo->free_inodes >= 0) { 648 spin_lock(&sbinfo->stat_lock); 649 if (unlikely(!sbinfo->free_inodes)) { 650 spin_unlock(&sbinfo->stat_lock); 651 return 0; 652 } 653 sbinfo->free_inodes--; 654 spin_unlock(&sbinfo->stat_lock); 655 } 656 657 return 1; 658 } 659 660 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) 661 { 662 if (sbinfo->free_inodes >= 0) { 663 spin_lock(&sbinfo->stat_lock); 664 sbinfo->free_inodes++; 665 spin_unlock(&sbinfo->stat_lock); 666 } 667 } 668 669 670 static struct kmem_cache *hugetlbfs_inode_cachep; 671 672 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 673 { 674 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); 675 struct hugetlbfs_inode_info *p; 676 677 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 678 return NULL; 679 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); 680 if (unlikely(!p)) { 681 hugetlbfs_inc_free_inodes(sbinfo); 682 return NULL; 683 } 684 return &p->vfs_inode; 685 } 686 687 static void hugetlbfs_i_callback(struct rcu_head *head) 688 { 689 struct inode *inode = container_of(head, struct inode, i_rcu); 690 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 691 } 692 693 static void hugetlbfs_destroy_inode(struct inode *inode) 694 { 695 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 696 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 697 call_rcu(&inode->i_rcu, hugetlbfs_i_callback); 698 } 699 700 static const struct address_space_operations hugetlbfs_aops = { 701 .write_begin = hugetlbfs_write_begin, 702 .write_end = hugetlbfs_write_end, 703 .set_page_dirty = hugetlbfs_set_page_dirty, 704 .migratepage = hugetlbfs_migrate_page, 705 }; 706 707 708 static void init_once(void *foo) 709 { 710 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 711 712 inode_init_once(&ei->vfs_inode); 713 } 714 715 const struct file_operations hugetlbfs_file_operations = { 716 .read = hugetlbfs_read, 717 .mmap = hugetlbfs_file_mmap, 718 .fsync = noop_fsync, 719 .get_unmapped_area = hugetlb_get_unmapped_area, 720 .llseek = default_llseek, 721 }; 722 723 static const struct inode_operations hugetlbfs_dir_inode_operations = { 724 .create = hugetlbfs_create, 725 .lookup = simple_lookup, 726 .link = simple_link, 727 .unlink = simple_unlink, 728 .symlink = hugetlbfs_symlink, 729 .mkdir = hugetlbfs_mkdir, 730 .rmdir = simple_rmdir, 731 .mknod = hugetlbfs_mknod, 732 .rename = simple_rename, 733 .setattr = hugetlbfs_setattr, 734 }; 735 736 static const struct inode_operations hugetlbfs_inode_operations = { 737 .setattr = hugetlbfs_setattr, 738 }; 739 740 static const struct super_operations hugetlbfs_ops = { 741 .alloc_inode = hugetlbfs_alloc_inode, 742 .destroy_inode = hugetlbfs_destroy_inode, 743 .evict_inode = hugetlbfs_evict_inode, 744 .statfs = hugetlbfs_statfs, 745 .put_super = hugetlbfs_put_super, 746 .show_options = generic_show_options, 747 }; 748 749 static int 750 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) 751 { 752 char *p, *rest; 753 substring_t args[MAX_OPT_ARGS]; 754 int option; 755 unsigned long long size = 0; 756 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE; 757 758 if (!options) 759 return 0; 760 761 while ((p = strsep(&options, ",")) != NULL) { 762 int token; 763 if (!*p) 764 continue; 765 766 token = match_token(p, tokens, args); 767 switch (token) { 768 case Opt_uid: 769 if (match_int(&args[0], &option)) 770 goto bad_val; 771 pconfig->uid = make_kuid(current_user_ns(), option); 772 if (!uid_valid(pconfig->uid)) 773 goto bad_val; 774 break; 775 776 case Opt_gid: 777 if (match_int(&args[0], &option)) 778 goto bad_val; 779 pconfig->gid = make_kgid(current_user_ns(), option); 780 if (!gid_valid(pconfig->gid)) 781 goto bad_val; 782 break; 783 784 case Opt_mode: 785 if (match_octal(&args[0], &option)) 786 goto bad_val; 787 pconfig->mode = option & 01777U; 788 break; 789 790 case Opt_size: { 791 /* memparse() will accept a K/M/G without a digit */ 792 if (!isdigit(*args[0].from)) 793 goto bad_val; 794 size = memparse(args[0].from, &rest); 795 setsize = SIZE_STD; 796 if (*rest == '%') 797 setsize = SIZE_PERCENT; 798 break; 799 } 800 801 case Opt_nr_inodes: 802 /* memparse() will accept a K/M/G without a digit */ 803 if (!isdigit(*args[0].from)) 804 goto bad_val; 805 pconfig->nr_inodes = memparse(args[0].from, &rest); 806 break; 807 808 case Opt_pagesize: { 809 unsigned long ps; 810 ps = memparse(args[0].from, &rest); 811 pconfig->hstate = size_to_hstate(ps); 812 if (!pconfig->hstate) { 813 printk(KERN_ERR 814 "hugetlbfs: Unsupported page size %lu MB\n", 815 ps >> 20); 816 return -EINVAL; 817 } 818 break; 819 } 820 821 default: 822 printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n", 823 p); 824 return -EINVAL; 825 break; 826 } 827 } 828 829 /* Do size after hstate is set up */ 830 if (setsize > NO_SIZE) { 831 struct hstate *h = pconfig->hstate; 832 if (setsize == SIZE_PERCENT) { 833 size <<= huge_page_shift(h); 834 size *= h->max_huge_pages; 835 do_div(size, 100); 836 } 837 pconfig->nr_blocks = (size >> huge_page_shift(h)); 838 } 839 840 return 0; 841 842 bad_val: 843 printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n", 844 args[0].from, p); 845 return -EINVAL; 846 } 847 848 static int 849 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) 850 { 851 int ret; 852 struct hugetlbfs_config config; 853 struct hugetlbfs_sb_info *sbinfo; 854 855 save_mount_options(sb, data); 856 857 config.nr_blocks = -1; /* No limit on size by default */ 858 config.nr_inodes = -1; /* No limit on number of inodes by default */ 859 config.uid = current_fsuid(); 860 config.gid = current_fsgid(); 861 config.mode = 0755; 862 config.hstate = &default_hstate; 863 ret = hugetlbfs_parse_options(data, &config); 864 if (ret) 865 return ret; 866 867 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); 868 if (!sbinfo) 869 return -ENOMEM; 870 sb->s_fs_info = sbinfo; 871 sbinfo->hstate = config.hstate; 872 spin_lock_init(&sbinfo->stat_lock); 873 sbinfo->max_inodes = config.nr_inodes; 874 sbinfo->free_inodes = config.nr_inodes; 875 sbinfo->spool = NULL; 876 if (config.nr_blocks != -1) { 877 sbinfo->spool = hugepage_new_subpool(config.nr_blocks); 878 if (!sbinfo->spool) 879 goto out_free; 880 } 881 sb->s_maxbytes = MAX_LFS_FILESIZE; 882 sb->s_blocksize = huge_page_size(config.hstate); 883 sb->s_blocksize_bits = huge_page_shift(config.hstate); 884 sb->s_magic = HUGETLBFS_MAGIC; 885 sb->s_op = &hugetlbfs_ops; 886 sb->s_time_gran = 1; 887 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config)); 888 if (!sb->s_root) 889 goto out_free; 890 return 0; 891 out_free: 892 if (sbinfo->spool) 893 kfree(sbinfo->spool); 894 kfree(sbinfo); 895 return -ENOMEM; 896 } 897 898 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, 899 int flags, const char *dev_name, void *data) 900 { 901 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super); 902 } 903 904 static struct file_system_type hugetlbfs_fs_type = { 905 .name = "hugetlbfs", 906 .mount = hugetlbfs_mount, 907 .kill_sb = kill_litter_super, 908 }; 909 MODULE_ALIAS_FS("hugetlbfs"); 910 911 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; 912 913 static int can_do_hugetlb_shm(void) 914 { 915 kgid_t shm_group; 916 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); 917 return capable(CAP_IPC_LOCK) || in_group_p(shm_group); 918 } 919 920 static int get_hstate_idx(int page_size_log) 921 { 922 struct hstate *h = hstate_sizelog(page_size_log); 923 924 if (!h) 925 return -1; 926 return h - hstates; 927 } 928 929 static struct dentry_operations anon_ops = { 930 .d_dname = simple_dname 931 }; 932 933 /* 934 * Note that size should be aligned to proper hugepage size in caller side, 935 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. 936 */ 937 struct file *hugetlb_file_setup(const char *name, size_t size, 938 vm_flags_t acctflag, struct user_struct **user, 939 int creat_flags, int page_size_log) 940 { 941 struct file *file = ERR_PTR(-ENOMEM); 942 struct inode *inode; 943 struct path path; 944 struct super_block *sb; 945 struct qstr quick_string; 946 int hstate_idx; 947 948 hstate_idx = get_hstate_idx(page_size_log); 949 if (hstate_idx < 0) 950 return ERR_PTR(-ENODEV); 951 952 *user = NULL; 953 if (!hugetlbfs_vfsmount[hstate_idx]) 954 return ERR_PTR(-ENOENT); 955 956 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 957 *user = current_user(); 958 if (user_shm_lock(size, *user)) { 959 task_lock(current); 960 printk_once(KERN_WARNING 961 "%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", 962 current->comm, current->pid); 963 task_unlock(current); 964 } else { 965 *user = NULL; 966 return ERR_PTR(-EPERM); 967 } 968 } 969 970 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb; 971 quick_string.name = name; 972 quick_string.len = strlen(quick_string.name); 973 quick_string.hash = 0; 974 path.dentry = d_alloc_pseudo(sb, &quick_string); 975 if (!path.dentry) 976 goto out_shm_unlock; 977 978 d_set_d_op(path.dentry, &anon_ops); 979 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]); 980 file = ERR_PTR(-ENOSPC); 981 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); 982 if (!inode) 983 goto out_dentry; 984 985 file = ERR_PTR(-ENOMEM); 986 if (hugetlb_reserve_pages(inode, 0, 987 size >> huge_page_shift(hstate_inode(inode)), NULL, 988 acctflag)) 989 goto out_inode; 990 991 d_instantiate(path.dentry, inode); 992 inode->i_size = size; 993 clear_nlink(inode); 994 995 file = alloc_file(&path, FMODE_WRITE | FMODE_READ, 996 &hugetlbfs_file_operations); 997 if (IS_ERR(file)) 998 goto out_dentry; /* inode is already attached */ 999 1000 return file; 1001 1002 out_inode: 1003 iput(inode); 1004 out_dentry: 1005 path_put(&path); 1006 out_shm_unlock: 1007 if (*user) { 1008 user_shm_unlock(size, *user); 1009 *user = NULL; 1010 } 1011 return file; 1012 } 1013 1014 static int __init init_hugetlbfs_fs(void) 1015 { 1016 struct hstate *h; 1017 int error; 1018 int i; 1019 1020 error = bdi_init(&hugetlbfs_backing_dev_info); 1021 if (error) 1022 return error; 1023 1024 error = -ENOMEM; 1025 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 1026 sizeof(struct hugetlbfs_inode_info), 1027 0, 0, init_once); 1028 if (hugetlbfs_inode_cachep == NULL) 1029 goto out2; 1030 1031 error = register_filesystem(&hugetlbfs_fs_type); 1032 if (error) 1033 goto out; 1034 1035 i = 0; 1036 for_each_hstate(h) { 1037 char buf[50]; 1038 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10); 1039 1040 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb); 1041 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type, 1042 buf); 1043 1044 if (IS_ERR(hugetlbfs_vfsmount[i])) { 1045 pr_err("hugetlb: Cannot mount internal hugetlbfs for " 1046 "page size %uK", ps_kb); 1047 error = PTR_ERR(hugetlbfs_vfsmount[i]); 1048 hugetlbfs_vfsmount[i] = NULL; 1049 } 1050 i++; 1051 } 1052 /* Non default hstates are optional */ 1053 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx])) 1054 return 0; 1055 1056 out: 1057 kmem_cache_destroy(hugetlbfs_inode_cachep); 1058 out2: 1059 bdi_destroy(&hugetlbfs_backing_dev_info); 1060 return error; 1061 } 1062 1063 static void __exit exit_hugetlbfs_fs(void) 1064 { 1065 struct hstate *h; 1066 int i; 1067 1068 1069 /* 1070 * Make sure all delayed rcu free inodes are flushed before we 1071 * destroy cache. 1072 */ 1073 rcu_barrier(); 1074 kmem_cache_destroy(hugetlbfs_inode_cachep); 1075 i = 0; 1076 for_each_hstate(h) 1077 kern_unmount(hugetlbfs_vfsmount[i++]); 1078 unregister_filesystem(&hugetlbfs_fs_type); 1079 bdi_destroy(&hugetlbfs_backing_dev_info); 1080 } 1081 1082 module_init(init_hugetlbfs_fs) 1083 module_exit(exit_hugetlbfs_fs) 1084 1085 MODULE_LICENSE("GPL"); 1086