1 /* 2 * hugetlbpage-backed filesystem. Based on ramfs. 3 * 4 * William Irwin, 2002 5 * 6 * Copyright (C) 2002 Linus Torvalds. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/thread_info.h> 11 #include <asm/current.h> 12 #include <linux/sched.h> /* remove ASAP */ 13 #include <linux/fs.h> 14 #include <linux/mount.h> 15 #include <linux/file.h> 16 #include <linux/kernel.h> 17 #include <linux/writeback.h> 18 #include <linux/pagemap.h> 19 #include <linux/highmem.h> 20 #include <linux/init.h> 21 #include <linux/string.h> 22 #include <linux/capability.h> 23 #include <linux/ctype.h> 24 #include <linux/backing-dev.h> 25 #include <linux/hugetlb.h> 26 #include <linux/pagevec.h> 27 #include <linux/parser.h> 28 #include <linux/mman.h> 29 #include <linux/slab.h> 30 #include <linux/dnotify.h> 31 #include <linux/statfs.h> 32 #include <linux/security.h> 33 34 #include <asm/uaccess.h> 35 36 /* some random number */ 37 #define HUGETLBFS_MAGIC 0x958458f6 38 39 static const struct super_operations hugetlbfs_ops; 40 static const struct address_space_operations hugetlbfs_aops; 41 const struct file_operations hugetlbfs_file_operations; 42 static const struct inode_operations hugetlbfs_dir_inode_operations; 43 static const struct inode_operations hugetlbfs_inode_operations; 44 45 static struct backing_dev_info hugetlbfs_backing_dev_info = { 46 .ra_pages = 0, /* No readahead */ 47 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 48 }; 49 50 int sysctl_hugetlb_shm_group; 51 52 enum { 53 Opt_size, Opt_nr_inodes, 54 Opt_mode, Opt_uid, Opt_gid, 55 Opt_pagesize, 56 Opt_err, 57 }; 58 59 static const match_table_t tokens = { 60 {Opt_size, "size=%s"}, 61 {Opt_nr_inodes, "nr_inodes=%s"}, 62 {Opt_mode, "mode=%o"}, 63 {Opt_uid, "uid=%u"}, 64 {Opt_gid, "gid=%u"}, 65 {Opt_pagesize, "pagesize=%s"}, 66 {Opt_err, NULL}, 67 }; 68 69 static void huge_pagevec_release(struct pagevec *pvec) 70 { 71 int i; 72 73 for (i = 0; i < pagevec_count(pvec); ++i) 74 put_page(pvec->pages[i]); 75 76 pagevec_reinit(pvec); 77 } 78 79 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 80 { 81 struct inode *inode = file->f_path.dentry->d_inode; 82 loff_t len, vma_len; 83 int ret; 84 struct hstate *h = hstate_file(file); 85 86 /* 87 * vma address alignment (but not the pgoff alignment) has 88 * already been checked by prepare_hugepage_range. If you add 89 * any error returns here, do so after setting VM_HUGETLB, so 90 * is_vm_hugetlb_page tests below unmap_region go the right 91 * way when do_mmap_pgoff unwinds (may be important on powerpc 92 * and ia64). 93 */ 94 vma->vm_flags |= VM_HUGETLB | VM_RESERVED; 95 vma->vm_ops = &hugetlb_vm_ops; 96 97 if (vma->vm_pgoff & ~(huge_page_mask(h) >> PAGE_SHIFT)) 98 return -EINVAL; 99 100 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 101 102 mutex_lock(&inode->i_mutex); 103 file_accessed(file); 104 105 ret = -ENOMEM; 106 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 107 108 if (hugetlb_reserve_pages(inode, 109 vma->vm_pgoff >> huge_page_order(h), 110 len >> huge_page_shift(h), vma, 111 vma->vm_flags)) 112 goto out; 113 114 ret = 0; 115 hugetlb_prefault_arch_hook(vma->vm_mm); 116 if (vma->vm_flags & VM_WRITE && inode->i_size < len) 117 inode->i_size = len; 118 out: 119 mutex_unlock(&inode->i_mutex); 120 121 return ret; 122 } 123 124 /* 125 * Called under down_write(mmap_sem). 126 */ 127 128 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 129 static unsigned long 130 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 131 unsigned long len, unsigned long pgoff, unsigned long flags) 132 { 133 struct mm_struct *mm = current->mm; 134 struct vm_area_struct *vma; 135 unsigned long start_addr; 136 struct hstate *h = hstate_file(file); 137 138 if (len & ~huge_page_mask(h)) 139 return -EINVAL; 140 if (len > TASK_SIZE) 141 return -ENOMEM; 142 143 if (flags & MAP_FIXED) { 144 if (prepare_hugepage_range(file, addr, len)) 145 return -EINVAL; 146 return addr; 147 } 148 149 if (addr) { 150 addr = ALIGN(addr, huge_page_size(h)); 151 vma = find_vma(mm, addr); 152 if (TASK_SIZE - len >= addr && 153 (!vma || addr + len <= vma->vm_start)) 154 return addr; 155 } 156 157 start_addr = mm->free_area_cache; 158 159 if (len <= mm->cached_hole_size) 160 start_addr = TASK_UNMAPPED_BASE; 161 162 full_search: 163 addr = ALIGN(start_addr, huge_page_size(h)); 164 165 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 166 /* At this point: (!vma || addr < vma->vm_end). */ 167 if (TASK_SIZE - len < addr) { 168 /* 169 * Start a new search - just in case we missed 170 * some holes. 171 */ 172 if (start_addr != TASK_UNMAPPED_BASE) { 173 start_addr = TASK_UNMAPPED_BASE; 174 goto full_search; 175 } 176 return -ENOMEM; 177 } 178 179 if (!vma || addr + len <= vma->vm_start) 180 return addr; 181 addr = ALIGN(vma->vm_end, huge_page_size(h)); 182 } 183 } 184 #endif 185 186 static int 187 hugetlbfs_read_actor(struct page *page, unsigned long offset, 188 char __user *buf, unsigned long count, 189 unsigned long size) 190 { 191 char *kaddr; 192 unsigned long left, copied = 0; 193 int i, chunksize; 194 195 if (size > count) 196 size = count; 197 198 /* Find which 4k chunk and offset with in that chunk */ 199 i = offset >> PAGE_CACHE_SHIFT; 200 offset = offset & ~PAGE_CACHE_MASK; 201 202 while (size) { 203 chunksize = PAGE_CACHE_SIZE; 204 if (offset) 205 chunksize -= offset; 206 if (chunksize > size) 207 chunksize = size; 208 kaddr = kmap(&page[i]); 209 left = __copy_to_user(buf, kaddr + offset, chunksize); 210 kunmap(&page[i]); 211 if (left) { 212 copied += (chunksize - left); 213 break; 214 } 215 offset = 0; 216 size -= chunksize; 217 buf += chunksize; 218 copied += chunksize; 219 i++; 220 } 221 return copied ? copied : -EFAULT; 222 } 223 224 /* 225 * Support for read() - Find the page attached to f_mapping and copy out the 226 * data. Its *very* similar to do_generic_mapping_read(), we can't use that 227 * since it has PAGE_CACHE_SIZE assumptions. 228 */ 229 static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, 230 size_t len, loff_t *ppos) 231 { 232 struct hstate *h = hstate_file(filp); 233 struct address_space *mapping = filp->f_mapping; 234 struct inode *inode = mapping->host; 235 unsigned long index = *ppos >> huge_page_shift(h); 236 unsigned long offset = *ppos & ~huge_page_mask(h); 237 unsigned long end_index; 238 loff_t isize; 239 ssize_t retval = 0; 240 241 mutex_lock(&inode->i_mutex); 242 243 /* validate length */ 244 if (len == 0) 245 goto out; 246 247 isize = i_size_read(inode); 248 if (!isize) 249 goto out; 250 251 end_index = (isize - 1) >> huge_page_shift(h); 252 for (;;) { 253 struct page *page; 254 unsigned long nr, ret; 255 int ra; 256 257 /* nr is the maximum number of bytes to copy from this page */ 258 nr = huge_page_size(h); 259 if (index >= end_index) { 260 if (index > end_index) 261 goto out; 262 nr = ((isize - 1) & ~huge_page_mask(h)) + 1; 263 if (nr <= offset) { 264 goto out; 265 } 266 } 267 nr = nr - offset; 268 269 /* Find the page */ 270 page = find_get_page(mapping, index); 271 if (unlikely(page == NULL)) { 272 /* 273 * We have a HOLE, zero out the user-buffer for the 274 * length of the hole or request. 275 */ 276 ret = len < nr ? len : nr; 277 if (clear_user(buf, ret)) 278 ra = -EFAULT; 279 else 280 ra = 0; 281 } else { 282 /* 283 * We have the page, copy it to user space buffer. 284 */ 285 ra = hugetlbfs_read_actor(page, offset, buf, len, nr); 286 ret = ra; 287 } 288 if (ra < 0) { 289 if (retval == 0) 290 retval = ra; 291 if (page) 292 page_cache_release(page); 293 goto out; 294 } 295 296 offset += ret; 297 retval += ret; 298 len -= ret; 299 index += offset >> huge_page_shift(h); 300 offset &= ~huge_page_mask(h); 301 302 if (page) 303 page_cache_release(page); 304 305 /* short read or no more work */ 306 if ((ret != nr) || (len == 0)) 307 break; 308 } 309 out: 310 *ppos = ((loff_t)index << huge_page_shift(h)) + offset; 311 mutex_unlock(&inode->i_mutex); 312 return retval; 313 } 314 315 /* 316 * Read a page. Again trivial. If it didn't already exist 317 * in the page cache, it is zero-filled. 318 */ 319 static int hugetlbfs_readpage(struct file *file, struct page * page) 320 { 321 unlock_page(page); 322 return -EINVAL; 323 } 324 325 static int hugetlbfs_write_begin(struct file *file, 326 struct address_space *mapping, 327 loff_t pos, unsigned len, unsigned flags, 328 struct page **pagep, void **fsdata) 329 { 330 return -EINVAL; 331 } 332 333 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 334 loff_t pos, unsigned len, unsigned copied, 335 struct page *page, void *fsdata) 336 { 337 BUG(); 338 return -EINVAL; 339 } 340 341 static void truncate_huge_page(struct page *page) 342 { 343 cancel_dirty_page(page, /* No IO accounting for huge pages? */0); 344 ClearPageUptodate(page); 345 remove_from_page_cache(page); 346 put_page(page); 347 } 348 349 static void truncate_hugepages(struct inode *inode, loff_t lstart) 350 { 351 struct hstate *h = hstate_inode(inode); 352 struct address_space *mapping = &inode->i_data; 353 const pgoff_t start = lstart >> huge_page_shift(h); 354 struct pagevec pvec; 355 pgoff_t next; 356 int i, freed = 0; 357 358 pagevec_init(&pvec, 0); 359 next = start; 360 while (1) { 361 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 362 if (next == start) 363 break; 364 next = start; 365 continue; 366 } 367 368 for (i = 0; i < pagevec_count(&pvec); ++i) { 369 struct page *page = pvec.pages[i]; 370 371 lock_page(page); 372 if (page->index > next) 373 next = page->index; 374 ++next; 375 truncate_huge_page(page); 376 unlock_page(page); 377 freed++; 378 } 379 huge_pagevec_release(&pvec); 380 } 381 BUG_ON(!lstart && mapping->nrpages); 382 hugetlb_unreserve_pages(inode, start, freed); 383 } 384 385 static void hugetlbfs_delete_inode(struct inode *inode) 386 { 387 truncate_hugepages(inode, 0); 388 clear_inode(inode); 389 } 390 391 static void hugetlbfs_forget_inode(struct inode *inode) __releases(inode_lock) 392 { 393 struct super_block *sb = inode->i_sb; 394 395 if (!hlist_unhashed(&inode->i_hash)) { 396 if (!(inode->i_state & (I_DIRTY|I_SYNC))) 397 list_move(&inode->i_list, &inode_unused); 398 inodes_stat.nr_unused++; 399 if (!sb || (sb->s_flags & MS_ACTIVE)) { 400 spin_unlock(&inode_lock); 401 return; 402 } 403 inode->i_state |= I_WILL_FREE; 404 spin_unlock(&inode_lock); 405 /* 406 * write_inode_now is a noop as we set BDI_CAP_NO_WRITEBACK 407 * in our backing_dev_info. 408 */ 409 write_inode_now(inode, 1); 410 spin_lock(&inode_lock); 411 inode->i_state &= ~I_WILL_FREE; 412 inodes_stat.nr_unused--; 413 hlist_del_init(&inode->i_hash); 414 } 415 list_del_init(&inode->i_list); 416 list_del_init(&inode->i_sb_list); 417 inode->i_state |= I_FREEING; 418 inodes_stat.nr_inodes--; 419 spin_unlock(&inode_lock); 420 truncate_hugepages(inode, 0); 421 clear_inode(inode); 422 destroy_inode(inode); 423 } 424 425 static void hugetlbfs_drop_inode(struct inode *inode) 426 { 427 if (!inode->i_nlink) 428 generic_delete_inode(inode); 429 else 430 hugetlbfs_forget_inode(inode); 431 } 432 433 static inline void 434 hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff) 435 { 436 struct vm_area_struct *vma; 437 struct prio_tree_iter iter; 438 439 vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) { 440 unsigned long v_offset; 441 442 /* 443 * Can the expression below overflow on 32-bit arches? 444 * No, because the prio_tree returns us only those vmas 445 * which overlap the truncated area starting at pgoff, 446 * and no vma on a 32-bit arch can span beyond the 4GB. 447 */ 448 if (vma->vm_pgoff < pgoff) 449 v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT; 450 else 451 v_offset = 0; 452 453 __unmap_hugepage_range(vma, 454 vma->vm_start + v_offset, vma->vm_end, NULL); 455 } 456 } 457 458 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) 459 { 460 pgoff_t pgoff; 461 struct address_space *mapping = inode->i_mapping; 462 struct hstate *h = hstate_inode(inode); 463 464 BUG_ON(offset & ~huge_page_mask(h)); 465 pgoff = offset >> PAGE_SHIFT; 466 467 i_size_write(inode, offset); 468 spin_lock(&mapping->i_mmap_lock); 469 if (!prio_tree_empty(&mapping->i_mmap)) 470 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); 471 spin_unlock(&mapping->i_mmap_lock); 472 truncate_hugepages(inode, offset); 473 return 0; 474 } 475 476 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) 477 { 478 struct inode *inode = dentry->d_inode; 479 struct hstate *h = hstate_inode(inode); 480 int error; 481 unsigned int ia_valid = attr->ia_valid; 482 483 BUG_ON(!inode); 484 485 error = inode_change_ok(inode, attr); 486 if (error) 487 goto out; 488 489 if (ia_valid & ATTR_SIZE) { 490 error = -EINVAL; 491 if (!(attr->ia_size & ~huge_page_mask(h))) 492 error = hugetlb_vmtruncate(inode, attr->ia_size); 493 if (error) 494 goto out; 495 attr->ia_valid &= ~ATTR_SIZE; 496 } 497 error = inode_setattr(inode, attr); 498 out: 499 return error; 500 } 501 502 static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, 503 gid_t gid, int mode, dev_t dev) 504 { 505 struct inode *inode; 506 507 inode = new_inode(sb); 508 if (inode) { 509 struct hugetlbfs_inode_info *info; 510 inode->i_mode = mode; 511 inode->i_uid = uid; 512 inode->i_gid = gid; 513 inode->i_mapping->a_ops = &hugetlbfs_aops; 514 inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; 515 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 516 INIT_LIST_HEAD(&inode->i_mapping->private_list); 517 info = HUGETLBFS_I(inode); 518 mpol_shared_policy_init(&info->policy, NULL); 519 switch (mode & S_IFMT) { 520 default: 521 init_special_inode(inode, mode, dev); 522 break; 523 case S_IFREG: 524 inode->i_op = &hugetlbfs_inode_operations; 525 inode->i_fop = &hugetlbfs_file_operations; 526 break; 527 case S_IFDIR: 528 inode->i_op = &hugetlbfs_dir_inode_operations; 529 inode->i_fop = &simple_dir_operations; 530 531 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 532 inc_nlink(inode); 533 break; 534 case S_IFLNK: 535 inode->i_op = &page_symlink_inode_operations; 536 break; 537 } 538 } 539 return inode; 540 } 541 542 /* 543 * File creation. Allocate an inode, and we're done.. 544 */ 545 static int hugetlbfs_mknod(struct inode *dir, 546 struct dentry *dentry, int mode, dev_t dev) 547 { 548 struct inode *inode; 549 int error = -ENOSPC; 550 gid_t gid; 551 552 if (dir->i_mode & S_ISGID) { 553 gid = dir->i_gid; 554 if (S_ISDIR(mode)) 555 mode |= S_ISGID; 556 } else { 557 gid = current_fsgid(); 558 } 559 inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(), gid, mode, dev); 560 if (inode) { 561 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 562 d_instantiate(dentry, inode); 563 dget(dentry); /* Extra count - pin the dentry in core */ 564 error = 0; 565 } 566 return error; 567 } 568 569 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) 570 { 571 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); 572 if (!retval) 573 inc_nlink(dir); 574 return retval; 575 } 576 577 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) 578 { 579 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); 580 } 581 582 static int hugetlbfs_symlink(struct inode *dir, 583 struct dentry *dentry, const char *symname) 584 { 585 struct inode *inode; 586 int error = -ENOSPC; 587 gid_t gid; 588 589 if (dir->i_mode & S_ISGID) 590 gid = dir->i_gid; 591 else 592 gid = current_fsgid(); 593 594 inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(), 595 gid, S_IFLNK|S_IRWXUGO, 0); 596 if (inode) { 597 int l = strlen(symname)+1; 598 error = page_symlink(inode, symname, l); 599 if (!error) { 600 d_instantiate(dentry, inode); 601 dget(dentry); 602 } else 603 iput(inode); 604 } 605 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 606 607 return error; 608 } 609 610 /* 611 * mark the head page dirty 612 */ 613 static int hugetlbfs_set_page_dirty(struct page *page) 614 { 615 struct page *head = compound_head(page); 616 617 SetPageDirty(head); 618 return 0; 619 } 620 621 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 622 { 623 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 624 struct hstate *h = hstate_inode(dentry->d_inode); 625 626 buf->f_type = HUGETLBFS_MAGIC; 627 buf->f_bsize = huge_page_size(h); 628 if (sbinfo) { 629 spin_lock(&sbinfo->stat_lock); 630 /* If no limits set, just report 0 for max/free/used 631 * blocks, like simple_statfs() */ 632 if (sbinfo->max_blocks >= 0) { 633 buf->f_blocks = sbinfo->max_blocks; 634 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; 635 buf->f_files = sbinfo->max_inodes; 636 buf->f_ffree = sbinfo->free_inodes; 637 } 638 spin_unlock(&sbinfo->stat_lock); 639 } 640 buf->f_namelen = NAME_MAX; 641 return 0; 642 } 643 644 static void hugetlbfs_put_super(struct super_block *sb) 645 { 646 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); 647 648 if (sbi) { 649 sb->s_fs_info = NULL; 650 kfree(sbi); 651 } 652 } 653 654 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) 655 { 656 if (sbinfo->free_inodes >= 0) { 657 spin_lock(&sbinfo->stat_lock); 658 if (unlikely(!sbinfo->free_inodes)) { 659 spin_unlock(&sbinfo->stat_lock); 660 return 0; 661 } 662 sbinfo->free_inodes--; 663 spin_unlock(&sbinfo->stat_lock); 664 } 665 666 return 1; 667 } 668 669 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) 670 { 671 if (sbinfo->free_inodes >= 0) { 672 spin_lock(&sbinfo->stat_lock); 673 sbinfo->free_inodes++; 674 spin_unlock(&sbinfo->stat_lock); 675 } 676 } 677 678 679 static struct kmem_cache *hugetlbfs_inode_cachep; 680 681 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 682 { 683 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); 684 struct hugetlbfs_inode_info *p; 685 686 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 687 return NULL; 688 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); 689 if (unlikely(!p)) { 690 hugetlbfs_inc_free_inodes(sbinfo); 691 return NULL; 692 } 693 return &p->vfs_inode; 694 } 695 696 static void hugetlbfs_destroy_inode(struct inode *inode) 697 { 698 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 699 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 700 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 701 } 702 703 static const struct address_space_operations hugetlbfs_aops = { 704 .readpage = hugetlbfs_readpage, 705 .write_begin = hugetlbfs_write_begin, 706 .write_end = hugetlbfs_write_end, 707 .set_page_dirty = hugetlbfs_set_page_dirty, 708 }; 709 710 711 static void init_once(void *foo) 712 { 713 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 714 715 inode_init_once(&ei->vfs_inode); 716 } 717 718 const struct file_operations hugetlbfs_file_operations = { 719 .read = hugetlbfs_read, 720 .mmap = hugetlbfs_file_mmap, 721 .fsync = simple_sync_file, 722 .get_unmapped_area = hugetlb_get_unmapped_area, 723 }; 724 725 static const struct inode_operations hugetlbfs_dir_inode_operations = { 726 .create = hugetlbfs_create, 727 .lookup = simple_lookup, 728 .link = simple_link, 729 .unlink = simple_unlink, 730 .symlink = hugetlbfs_symlink, 731 .mkdir = hugetlbfs_mkdir, 732 .rmdir = simple_rmdir, 733 .mknod = hugetlbfs_mknod, 734 .rename = simple_rename, 735 .setattr = hugetlbfs_setattr, 736 }; 737 738 static const struct inode_operations hugetlbfs_inode_operations = { 739 .setattr = hugetlbfs_setattr, 740 }; 741 742 static const struct super_operations hugetlbfs_ops = { 743 .alloc_inode = hugetlbfs_alloc_inode, 744 .destroy_inode = hugetlbfs_destroy_inode, 745 .statfs = hugetlbfs_statfs, 746 .delete_inode = hugetlbfs_delete_inode, 747 .drop_inode = hugetlbfs_drop_inode, 748 .put_super = hugetlbfs_put_super, 749 .show_options = generic_show_options, 750 }; 751 752 static int 753 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) 754 { 755 char *p, *rest; 756 substring_t args[MAX_OPT_ARGS]; 757 int option; 758 unsigned long long size = 0; 759 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE; 760 761 if (!options) 762 return 0; 763 764 while ((p = strsep(&options, ",")) != NULL) { 765 int token; 766 if (!*p) 767 continue; 768 769 token = match_token(p, tokens, args); 770 switch (token) { 771 case Opt_uid: 772 if (match_int(&args[0], &option)) 773 goto bad_val; 774 pconfig->uid = option; 775 break; 776 777 case Opt_gid: 778 if (match_int(&args[0], &option)) 779 goto bad_val; 780 pconfig->gid = option; 781 break; 782 783 case Opt_mode: 784 if (match_octal(&args[0], &option)) 785 goto bad_val; 786 pconfig->mode = option & 01777U; 787 break; 788 789 case Opt_size: { 790 /* memparse() will accept a K/M/G without a digit */ 791 if (!isdigit(*args[0].from)) 792 goto bad_val; 793 size = memparse(args[0].from, &rest); 794 setsize = SIZE_STD; 795 if (*rest == '%') 796 setsize = SIZE_PERCENT; 797 break; 798 } 799 800 case Opt_nr_inodes: 801 /* memparse() will accept a K/M/G without a digit */ 802 if (!isdigit(*args[0].from)) 803 goto bad_val; 804 pconfig->nr_inodes = memparse(args[0].from, &rest); 805 break; 806 807 case Opt_pagesize: { 808 unsigned long ps; 809 ps = memparse(args[0].from, &rest); 810 pconfig->hstate = size_to_hstate(ps); 811 if (!pconfig->hstate) { 812 printk(KERN_ERR 813 "hugetlbfs: Unsupported page size %lu MB\n", 814 ps >> 20); 815 return -EINVAL; 816 } 817 break; 818 } 819 820 default: 821 printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n", 822 p); 823 return -EINVAL; 824 break; 825 } 826 } 827 828 /* Do size after hstate is set up */ 829 if (setsize > NO_SIZE) { 830 struct hstate *h = pconfig->hstate; 831 if (setsize == SIZE_PERCENT) { 832 size <<= huge_page_shift(h); 833 size *= h->max_huge_pages; 834 do_div(size, 100); 835 } 836 pconfig->nr_blocks = (size >> huge_page_shift(h)); 837 } 838 839 return 0; 840 841 bad_val: 842 printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n", 843 args[0].from, p); 844 return -EINVAL; 845 } 846 847 static int 848 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) 849 { 850 struct inode * inode; 851 struct dentry * root; 852 int ret; 853 struct hugetlbfs_config config; 854 struct hugetlbfs_sb_info *sbinfo; 855 856 save_mount_options(sb, data); 857 858 config.nr_blocks = -1; /* No limit on size by default */ 859 config.nr_inodes = -1; /* No limit on number of inodes by default */ 860 config.uid = current_fsuid(); 861 config.gid = current_fsgid(); 862 config.mode = 0755; 863 config.hstate = &default_hstate; 864 ret = hugetlbfs_parse_options(data, &config); 865 if (ret) 866 return ret; 867 868 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); 869 if (!sbinfo) 870 return -ENOMEM; 871 sb->s_fs_info = sbinfo; 872 sbinfo->hstate = config.hstate; 873 spin_lock_init(&sbinfo->stat_lock); 874 sbinfo->max_blocks = config.nr_blocks; 875 sbinfo->free_blocks = config.nr_blocks; 876 sbinfo->max_inodes = config.nr_inodes; 877 sbinfo->free_inodes = config.nr_inodes; 878 sb->s_maxbytes = MAX_LFS_FILESIZE; 879 sb->s_blocksize = huge_page_size(config.hstate); 880 sb->s_blocksize_bits = huge_page_shift(config.hstate); 881 sb->s_magic = HUGETLBFS_MAGIC; 882 sb->s_op = &hugetlbfs_ops; 883 sb->s_time_gran = 1; 884 inode = hugetlbfs_get_inode(sb, config.uid, config.gid, 885 S_IFDIR | config.mode, 0); 886 if (!inode) 887 goto out_free; 888 889 root = d_alloc_root(inode); 890 if (!root) { 891 iput(inode); 892 goto out_free; 893 } 894 sb->s_root = root; 895 return 0; 896 out_free: 897 kfree(sbinfo); 898 return -ENOMEM; 899 } 900 901 int hugetlb_get_quota(struct address_space *mapping, long delta) 902 { 903 int ret = 0; 904 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); 905 906 if (sbinfo->free_blocks > -1) { 907 spin_lock(&sbinfo->stat_lock); 908 if (sbinfo->free_blocks - delta >= 0) 909 sbinfo->free_blocks -= delta; 910 else 911 ret = -ENOMEM; 912 spin_unlock(&sbinfo->stat_lock); 913 } 914 915 return ret; 916 } 917 918 void hugetlb_put_quota(struct address_space *mapping, long delta) 919 { 920 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); 921 922 if (sbinfo->free_blocks > -1) { 923 spin_lock(&sbinfo->stat_lock); 924 sbinfo->free_blocks += delta; 925 spin_unlock(&sbinfo->stat_lock); 926 } 927 } 928 929 static int hugetlbfs_get_sb(struct file_system_type *fs_type, 930 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 931 { 932 return get_sb_nodev(fs_type, flags, data, hugetlbfs_fill_super, mnt); 933 } 934 935 static struct file_system_type hugetlbfs_fs_type = { 936 .name = "hugetlbfs", 937 .get_sb = hugetlbfs_get_sb, 938 .kill_sb = kill_litter_super, 939 }; 940 941 static struct vfsmount *hugetlbfs_vfsmount; 942 943 static int can_do_hugetlb_shm(void) 944 { 945 return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); 946 } 947 948 struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag) 949 { 950 int error = -ENOMEM; 951 int unlock_shm = 0; 952 struct file *file; 953 struct inode *inode; 954 struct dentry *dentry, *root; 955 struct qstr quick_string; 956 struct user_struct *user = current_user(); 957 958 if (!hugetlbfs_vfsmount) 959 return ERR_PTR(-ENOENT); 960 961 if (!can_do_hugetlb_shm()) { 962 if (user_shm_lock(size, user)) { 963 unlock_shm = 1; 964 WARN_ONCE(1, 965 "Using mlock ulimits for SHM_HUGETLB deprecated\n"); 966 } else 967 return ERR_PTR(-EPERM); 968 } 969 970 root = hugetlbfs_vfsmount->mnt_root; 971 quick_string.name = name; 972 quick_string.len = strlen(quick_string.name); 973 quick_string.hash = 0; 974 dentry = d_alloc(root, &quick_string); 975 if (!dentry) 976 goto out_shm_unlock; 977 978 error = -ENOSPC; 979 inode = hugetlbfs_get_inode(root->d_sb, current_fsuid(), 980 current_fsgid(), S_IFREG | S_IRWXUGO, 0); 981 if (!inode) 982 goto out_dentry; 983 984 error = -ENOMEM; 985 if (hugetlb_reserve_pages(inode, 0, 986 size >> huge_page_shift(hstate_inode(inode)), NULL, 987 acctflag)) 988 goto out_inode; 989 990 d_instantiate(dentry, inode); 991 inode->i_size = size; 992 inode->i_nlink = 0; 993 994 error = -ENFILE; 995 file = alloc_file(hugetlbfs_vfsmount, dentry, 996 FMODE_WRITE | FMODE_READ, 997 &hugetlbfs_file_operations); 998 if (!file) 999 goto out_dentry; /* inode is already attached */ 1000 1001 return file; 1002 1003 out_inode: 1004 iput(inode); 1005 out_dentry: 1006 dput(dentry); 1007 out_shm_unlock: 1008 if (unlock_shm) 1009 user_shm_unlock(size, user); 1010 return ERR_PTR(error); 1011 } 1012 1013 static int __init init_hugetlbfs_fs(void) 1014 { 1015 int error; 1016 struct vfsmount *vfsmount; 1017 1018 error = bdi_init(&hugetlbfs_backing_dev_info); 1019 if (error) 1020 return error; 1021 1022 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 1023 sizeof(struct hugetlbfs_inode_info), 1024 0, 0, init_once); 1025 if (hugetlbfs_inode_cachep == NULL) 1026 goto out2; 1027 1028 error = register_filesystem(&hugetlbfs_fs_type); 1029 if (error) 1030 goto out; 1031 1032 vfsmount = kern_mount(&hugetlbfs_fs_type); 1033 1034 if (!IS_ERR(vfsmount)) { 1035 hugetlbfs_vfsmount = vfsmount; 1036 return 0; 1037 } 1038 1039 error = PTR_ERR(vfsmount); 1040 1041 out: 1042 if (error) 1043 kmem_cache_destroy(hugetlbfs_inode_cachep); 1044 out2: 1045 bdi_destroy(&hugetlbfs_backing_dev_info); 1046 return error; 1047 } 1048 1049 static void __exit exit_hugetlbfs_fs(void) 1050 { 1051 kmem_cache_destroy(hugetlbfs_inode_cachep); 1052 unregister_filesystem(&hugetlbfs_fs_type); 1053 bdi_destroy(&hugetlbfs_backing_dev_info); 1054 } 1055 1056 module_init(init_hugetlbfs_fs) 1057 module_exit(exit_hugetlbfs_fs) 1058 1059 MODULE_LICENSE("GPL"); 1060