1 /* 2 * hugetlbpage-backed filesystem. Based on ramfs. 3 * 4 * Nadia Yvette Chambers, 2002 5 * 6 * Copyright (C) 2002 Linus Torvalds. 7 * License: GPL 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/thread_info.h> 13 #include <asm/current.h> 14 #include <linux/sched/signal.h> /* remove ASAP */ 15 #include <linux/falloc.h> 16 #include <linux/fs.h> 17 #include <linux/mount.h> 18 #include <linux/file.h> 19 #include <linux/kernel.h> 20 #include <linux/writeback.h> 21 #include <linux/pagemap.h> 22 #include <linux/highmem.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/capability.h> 26 #include <linux/ctype.h> 27 #include <linux/backing-dev.h> 28 #include <linux/hugetlb.h> 29 #include <linux/pagevec.h> 30 #include <linux/fs_parser.h> 31 #include <linux/mman.h> 32 #include <linux/slab.h> 33 #include <linux/dnotify.h> 34 #include <linux/statfs.h> 35 #include <linux/security.h> 36 #include <linux/magic.h> 37 #include <linux/migrate.h> 38 #include <linux/uio.h> 39 40 #include <linux/uaccess.h> 41 #include <linux/sched/mm.h> 42 43 static const struct super_operations hugetlbfs_ops; 44 static const struct address_space_operations hugetlbfs_aops; 45 const struct file_operations hugetlbfs_file_operations; 46 static const struct inode_operations hugetlbfs_dir_inode_operations; 47 static const struct inode_operations hugetlbfs_inode_operations; 48 49 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; 50 51 struct hugetlbfs_fs_context { 52 struct hstate *hstate; 53 unsigned long long max_size_opt; 54 unsigned long long min_size_opt; 55 long max_hpages; 56 long nr_inodes; 57 long min_hpages; 58 enum hugetlbfs_size_type max_val_type; 59 enum hugetlbfs_size_type min_val_type; 60 kuid_t uid; 61 kgid_t gid; 62 umode_t mode; 63 }; 64 65 int sysctl_hugetlb_shm_group; 66 67 enum hugetlb_param { 68 Opt_gid, 69 Opt_min_size, 70 Opt_mode, 71 Opt_nr_inodes, 72 Opt_pagesize, 73 Opt_size, 74 Opt_uid, 75 }; 76 77 static const struct fs_parameter_spec hugetlb_fs_parameters[] = { 78 fsparam_u32 ("gid", Opt_gid), 79 fsparam_string("min_size", Opt_min_size), 80 fsparam_u32 ("mode", Opt_mode), 81 fsparam_string("nr_inodes", Opt_nr_inodes), 82 fsparam_string("pagesize", Opt_pagesize), 83 fsparam_string("size", Opt_size), 84 fsparam_u32 ("uid", Opt_uid), 85 {} 86 }; 87 88 #ifdef CONFIG_NUMA 89 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 90 struct inode *inode, pgoff_t index) 91 { 92 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, 93 index); 94 } 95 96 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 97 { 98 mpol_cond_put(vma->vm_policy); 99 } 100 #else 101 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 102 struct inode *inode, pgoff_t index) 103 { 104 } 105 106 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 107 { 108 } 109 #endif 110 111 static void huge_pagevec_release(struct pagevec *pvec) 112 { 113 int i; 114 115 for (i = 0; i < pagevec_count(pvec); ++i) 116 put_page(pvec->pages[i]); 117 118 pagevec_reinit(pvec); 119 } 120 121 /* 122 * Mask used when checking the page offset value passed in via system 123 * calls. This value will be converted to a loff_t which is signed. 124 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the 125 * value. The extra bit (- 1 in the shift value) is to take the sign 126 * bit into account. 127 */ 128 #define PGOFF_LOFFT_MAX \ 129 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) 130 131 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 132 { 133 struct inode *inode = file_inode(file); 134 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 135 loff_t len, vma_len; 136 int ret; 137 struct hstate *h = hstate_file(file); 138 139 /* 140 * vma address alignment (but not the pgoff alignment) has 141 * already been checked by prepare_hugepage_range. If you add 142 * any error returns here, do so after setting VM_HUGETLB, so 143 * is_vm_hugetlb_page tests below unmap_region go the right 144 * way when do_mmap unwinds (may be important on powerpc 145 * and ia64). 146 */ 147 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 148 vma->vm_ops = &hugetlb_vm_ops; 149 150 ret = seal_check_future_write(info->seals, vma); 151 if (ret) 152 return ret; 153 154 /* 155 * page based offset in vm_pgoff could be sufficiently large to 156 * overflow a loff_t when converted to byte offset. This can 157 * only happen on architectures where sizeof(loff_t) == 158 * sizeof(unsigned long). So, only check in those instances. 159 */ 160 if (sizeof(unsigned long) == sizeof(loff_t)) { 161 if (vma->vm_pgoff & PGOFF_LOFFT_MAX) 162 return -EINVAL; 163 } 164 165 /* must be huge page aligned */ 166 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 167 return -EINVAL; 168 169 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 170 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 171 /* check for overflow */ 172 if (len < vma_len) 173 return -EINVAL; 174 175 inode_lock(inode); 176 file_accessed(file); 177 178 ret = -ENOMEM; 179 if (hugetlb_reserve_pages(inode, 180 vma->vm_pgoff >> huge_page_order(h), 181 len >> huge_page_shift(h), vma, 182 vma->vm_flags)) 183 goto out; 184 185 ret = 0; 186 if (vma->vm_flags & VM_WRITE && inode->i_size < len) 187 i_size_write(inode, len); 188 out: 189 inode_unlock(inode); 190 191 return ret; 192 } 193 194 /* 195 * Called under mmap_write_lock(mm). 196 */ 197 198 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 199 static unsigned long 200 hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, 201 unsigned long len, unsigned long pgoff, unsigned long flags) 202 { 203 struct hstate *h = hstate_file(file); 204 struct vm_unmapped_area_info info; 205 206 info.flags = 0; 207 info.length = len; 208 info.low_limit = current->mm->mmap_base; 209 info.high_limit = TASK_SIZE; 210 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 211 info.align_offset = 0; 212 return vm_unmapped_area(&info); 213 } 214 215 static unsigned long 216 hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, 217 unsigned long len, unsigned long pgoff, unsigned long flags) 218 { 219 struct hstate *h = hstate_file(file); 220 struct vm_unmapped_area_info info; 221 222 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 223 info.length = len; 224 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 225 info.high_limit = current->mm->mmap_base; 226 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 227 info.align_offset = 0; 228 addr = vm_unmapped_area(&info); 229 230 /* 231 * A failed mmap() very likely causes application failure, 232 * so fall back to the bottom-up function here. This scenario 233 * can happen with large stack limits and large mmap() 234 * allocations. 235 */ 236 if (unlikely(offset_in_page(addr))) { 237 VM_BUG_ON(addr != -ENOMEM); 238 info.flags = 0; 239 info.low_limit = current->mm->mmap_base; 240 info.high_limit = TASK_SIZE; 241 addr = vm_unmapped_area(&info); 242 } 243 244 return addr; 245 } 246 247 static unsigned long 248 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 249 unsigned long len, unsigned long pgoff, unsigned long flags) 250 { 251 struct mm_struct *mm = current->mm; 252 struct vm_area_struct *vma; 253 struct hstate *h = hstate_file(file); 254 255 if (len & ~huge_page_mask(h)) 256 return -EINVAL; 257 if (len > TASK_SIZE) 258 return -ENOMEM; 259 260 if (flags & MAP_FIXED) { 261 if (prepare_hugepage_range(file, addr, len)) 262 return -EINVAL; 263 return addr; 264 } 265 266 if (addr) { 267 addr = ALIGN(addr, huge_page_size(h)); 268 vma = find_vma(mm, addr); 269 if (TASK_SIZE - len >= addr && 270 (!vma || addr + len <= vm_start_gap(vma))) 271 return addr; 272 } 273 274 /* 275 * Use mm->get_unmapped_area value as a hint to use topdown routine. 276 * If architectures have special needs, they should define their own 277 * version of hugetlb_get_unmapped_area. 278 */ 279 if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) 280 return hugetlb_get_unmapped_area_topdown(file, addr, len, 281 pgoff, flags); 282 return hugetlb_get_unmapped_area_bottomup(file, addr, len, 283 pgoff, flags); 284 } 285 #endif 286 287 static size_t 288 hugetlbfs_read_actor(struct page *page, unsigned long offset, 289 struct iov_iter *to, unsigned long size) 290 { 291 size_t copied = 0; 292 int i, chunksize; 293 294 /* Find which 4k chunk and offset with in that chunk */ 295 i = offset >> PAGE_SHIFT; 296 offset = offset & ~PAGE_MASK; 297 298 while (size) { 299 size_t n; 300 chunksize = PAGE_SIZE; 301 if (offset) 302 chunksize -= offset; 303 if (chunksize > size) 304 chunksize = size; 305 n = copy_page_to_iter(&page[i], offset, chunksize, to); 306 copied += n; 307 if (n != chunksize) 308 return copied; 309 offset = 0; 310 size -= chunksize; 311 i++; 312 } 313 return copied; 314 } 315 316 /* 317 * Support for read() - Find the page attached to f_mapping and copy out the 318 * data. Its *very* similar to do_generic_mapping_read(), we can't use that 319 * since it has PAGE_SIZE assumptions. 320 */ 321 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 322 { 323 struct file *file = iocb->ki_filp; 324 struct hstate *h = hstate_file(file); 325 struct address_space *mapping = file->f_mapping; 326 struct inode *inode = mapping->host; 327 unsigned long index = iocb->ki_pos >> huge_page_shift(h); 328 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); 329 unsigned long end_index; 330 loff_t isize; 331 ssize_t retval = 0; 332 333 while (iov_iter_count(to)) { 334 struct page *page; 335 size_t nr, copied; 336 337 /* nr is the maximum number of bytes to copy from this page */ 338 nr = huge_page_size(h); 339 isize = i_size_read(inode); 340 if (!isize) 341 break; 342 end_index = (isize - 1) >> huge_page_shift(h); 343 if (index > end_index) 344 break; 345 if (index == end_index) { 346 nr = ((isize - 1) & ~huge_page_mask(h)) + 1; 347 if (nr <= offset) 348 break; 349 } 350 nr = nr - offset; 351 352 /* Find the page */ 353 page = find_lock_page(mapping, index); 354 if (unlikely(page == NULL)) { 355 /* 356 * We have a HOLE, zero out the user-buffer for the 357 * length of the hole or request. 358 */ 359 copied = iov_iter_zero(nr, to); 360 } else { 361 unlock_page(page); 362 363 /* 364 * We have the page, copy it to user space buffer. 365 */ 366 copied = hugetlbfs_read_actor(page, offset, to, nr); 367 put_page(page); 368 } 369 offset += copied; 370 retval += copied; 371 if (copied != nr && iov_iter_count(to)) { 372 if (!retval) 373 retval = -EFAULT; 374 break; 375 } 376 index += offset >> huge_page_shift(h); 377 offset &= ~huge_page_mask(h); 378 } 379 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; 380 return retval; 381 } 382 383 static int hugetlbfs_write_begin(struct file *file, 384 struct address_space *mapping, 385 loff_t pos, unsigned len, unsigned flags, 386 struct page **pagep, void **fsdata) 387 { 388 return -EINVAL; 389 } 390 391 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 392 loff_t pos, unsigned len, unsigned copied, 393 struct page *page, void *fsdata) 394 { 395 BUG(); 396 return -EINVAL; 397 } 398 399 static void remove_huge_page(struct page *page) 400 { 401 ClearPageDirty(page); 402 ClearPageUptodate(page); 403 delete_from_page_cache(page); 404 } 405 406 static void 407 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) 408 { 409 struct vm_area_struct *vma; 410 411 /* 412 * end == 0 indicates that the entire range after 413 * start should be unmapped. 414 */ 415 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { 416 unsigned long v_offset; 417 unsigned long v_end; 418 419 /* 420 * Can the expression below overflow on 32-bit arches? 421 * No, because the interval tree returns us only those vmas 422 * which overlap the truncated area starting at pgoff, 423 * and no vma on a 32-bit arch can span beyond the 4GB. 424 */ 425 if (vma->vm_pgoff < start) 426 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; 427 else 428 v_offset = 0; 429 430 if (!end) 431 v_end = vma->vm_end; 432 else { 433 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) 434 + vma->vm_start; 435 if (v_end > vma->vm_end) 436 v_end = vma->vm_end; 437 } 438 439 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, 440 NULL); 441 } 442 } 443 444 /* 445 * remove_inode_hugepages handles two distinct cases: truncation and hole 446 * punch. There are subtle differences in operation for each case. 447 * 448 * truncation is indicated by end of range being LLONG_MAX 449 * In this case, we first scan the range and release found pages. 450 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 451 * maps and global counts. Page faults can not race with truncation 452 * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents 453 * page faults in the truncated range by checking i_size. i_size is 454 * modified while holding i_mmap_rwsem. 455 * hole punch is indicated if end is not LLONG_MAX 456 * In the hole punch case we scan the range and release found pages. 457 * Only when releasing a page is the associated region/reserv map 458 * deleted. The region/reserv map for ranges without associated 459 * pages are not modified. Page faults can race with hole punch. 460 * This is indicated if we find a mapped page. 461 * Note: If the passed end of range value is beyond the end of file, but 462 * not LLONG_MAX this routine still performs a hole punch operation. 463 */ 464 static void remove_inode_hugepages(struct inode *inode, loff_t lstart, 465 loff_t lend) 466 { 467 struct hstate *h = hstate_inode(inode); 468 struct address_space *mapping = &inode->i_data; 469 const pgoff_t start = lstart >> huge_page_shift(h); 470 const pgoff_t end = lend >> huge_page_shift(h); 471 struct vm_area_struct pseudo_vma; 472 struct pagevec pvec; 473 pgoff_t next, index; 474 int i, freed = 0; 475 bool truncate_op = (lend == LLONG_MAX); 476 477 vma_init(&pseudo_vma, current->mm); 478 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 479 pagevec_init(&pvec); 480 next = start; 481 while (next < end) { 482 /* 483 * When no more pages are found, we are done. 484 */ 485 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) 486 break; 487 488 for (i = 0; i < pagevec_count(&pvec); ++i) { 489 struct page *page = pvec.pages[i]; 490 u32 hash; 491 492 index = page->index; 493 hash = hugetlb_fault_mutex_hash(mapping, index); 494 if (!truncate_op) { 495 /* 496 * Only need to hold the fault mutex in the 497 * hole punch case. This prevents races with 498 * page faults. Races are not possible in the 499 * case of truncation. 500 */ 501 mutex_lock(&hugetlb_fault_mutex_table[hash]); 502 } 503 504 /* 505 * If page is mapped, it was faulted in after being 506 * unmapped in caller. Unmap (again) now after taking 507 * the fault mutex. The mutex will prevent faults 508 * until we finish removing the page. 509 * 510 * This race can only happen in the hole punch case. 511 * Getting here in a truncate operation is a bug. 512 */ 513 if (unlikely(page_mapped(page))) { 514 BUG_ON(truncate_op); 515 516 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 517 i_mmap_lock_write(mapping); 518 mutex_lock(&hugetlb_fault_mutex_table[hash]); 519 hugetlb_vmdelete_list(&mapping->i_mmap, 520 index * pages_per_huge_page(h), 521 (index + 1) * pages_per_huge_page(h)); 522 i_mmap_unlock_write(mapping); 523 } 524 525 lock_page(page); 526 /* 527 * We must free the huge page and remove from page 528 * cache (remove_huge_page) BEFORE removing the 529 * region/reserve map (hugetlb_unreserve_pages). In 530 * rare out of memory conditions, removal of the 531 * region/reserve map could fail. Correspondingly, 532 * the subpool and global reserve usage count can need 533 * to be adjusted. 534 */ 535 VM_BUG_ON(PagePrivate(page)); 536 remove_huge_page(page); 537 freed++; 538 if (!truncate_op) { 539 if (unlikely(hugetlb_unreserve_pages(inode, 540 index, index + 1, 1))) 541 hugetlb_fix_reserve_counts(inode); 542 } 543 544 unlock_page(page); 545 if (!truncate_op) 546 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 547 } 548 huge_pagevec_release(&pvec); 549 cond_resched(); 550 } 551 552 if (truncate_op) 553 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); 554 } 555 556 static void hugetlbfs_evict_inode(struct inode *inode) 557 { 558 struct resv_map *resv_map; 559 560 remove_inode_hugepages(inode, 0, LLONG_MAX); 561 562 /* 563 * Get the resv_map from the address space embedded in the inode. 564 * This is the address space which points to any resv_map allocated 565 * at inode creation time. If this is a device special inode, 566 * i_mapping may not point to the original address space. 567 */ 568 resv_map = (struct resv_map *)(&inode->i_data)->private_data; 569 /* Only regular and link inodes have associated reserve maps */ 570 if (resv_map) 571 resv_map_release(&resv_map->refs); 572 clear_inode(inode); 573 } 574 575 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) 576 { 577 pgoff_t pgoff; 578 struct address_space *mapping = inode->i_mapping; 579 struct hstate *h = hstate_inode(inode); 580 581 BUG_ON(offset & ~huge_page_mask(h)); 582 pgoff = offset >> PAGE_SHIFT; 583 584 i_mmap_lock_write(mapping); 585 i_size_write(inode, offset); 586 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 587 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); 588 i_mmap_unlock_write(mapping); 589 remove_inode_hugepages(inode, offset, LLONG_MAX); 590 return 0; 591 } 592 593 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 594 { 595 struct hstate *h = hstate_inode(inode); 596 loff_t hpage_size = huge_page_size(h); 597 loff_t hole_start, hole_end; 598 599 /* 600 * For hole punch round up the beginning offset of the hole and 601 * round down the end. 602 */ 603 hole_start = round_up(offset, hpage_size); 604 hole_end = round_down(offset + len, hpage_size); 605 606 if (hole_end > hole_start) { 607 struct address_space *mapping = inode->i_mapping; 608 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 609 610 inode_lock(inode); 611 612 /* protected by i_mutex */ 613 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 614 inode_unlock(inode); 615 return -EPERM; 616 } 617 618 i_mmap_lock_write(mapping); 619 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 620 hugetlb_vmdelete_list(&mapping->i_mmap, 621 hole_start >> PAGE_SHIFT, 622 hole_end >> PAGE_SHIFT); 623 i_mmap_unlock_write(mapping); 624 remove_inode_hugepages(inode, hole_start, hole_end); 625 inode_unlock(inode); 626 } 627 628 return 0; 629 } 630 631 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, 632 loff_t len) 633 { 634 struct inode *inode = file_inode(file); 635 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 636 struct address_space *mapping = inode->i_mapping; 637 struct hstate *h = hstate_inode(inode); 638 struct vm_area_struct pseudo_vma; 639 struct mm_struct *mm = current->mm; 640 loff_t hpage_size = huge_page_size(h); 641 unsigned long hpage_shift = huge_page_shift(h); 642 pgoff_t start, index, end; 643 int error; 644 u32 hash; 645 646 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 647 return -EOPNOTSUPP; 648 649 if (mode & FALLOC_FL_PUNCH_HOLE) 650 return hugetlbfs_punch_hole(inode, offset, len); 651 652 /* 653 * Default preallocate case. 654 * For this range, start is rounded down and end is rounded up 655 * as well as being converted to page offsets. 656 */ 657 start = offset >> hpage_shift; 658 end = (offset + len + hpage_size - 1) >> hpage_shift; 659 660 inode_lock(inode); 661 662 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 663 error = inode_newsize_ok(inode, offset + len); 664 if (error) 665 goto out; 666 667 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 668 error = -EPERM; 669 goto out; 670 } 671 672 /* 673 * Initialize a pseudo vma as this is required by the huge page 674 * allocation routines. If NUMA is configured, use page index 675 * as input to create an allocation policy. 676 */ 677 vma_init(&pseudo_vma, mm); 678 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 679 pseudo_vma.vm_file = file; 680 681 for (index = start; index < end; index++) { 682 /* 683 * This is supposed to be the vaddr where the page is being 684 * faulted in, but we have no vaddr here. 685 */ 686 struct page *page; 687 unsigned long addr; 688 int avoid_reserve = 0; 689 690 cond_resched(); 691 692 /* 693 * fallocate(2) manpage permits EINTR; we may have been 694 * interrupted because we are using up too much memory. 695 */ 696 if (signal_pending(current)) { 697 error = -EINTR; 698 break; 699 } 700 701 /* Set numa allocation policy based on index */ 702 hugetlb_set_vma_policy(&pseudo_vma, inode, index); 703 704 /* addr is the offset within the file (zero based) */ 705 addr = index * hpage_size; 706 707 /* 708 * fault mutex taken here, protects against fault path 709 * and hole punch. inode_lock previously taken protects 710 * against truncation. 711 */ 712 hash = hugetlb_fault_mutex_hash(mapping, index); 713 mutex_lock(&hugetlb_fault_mutex_table[hash]); 714 715 /* See if already present in mapping to avoid alloc/free */ 716 page = find_get_page(mapping, index); 717 if (page) { 718 put_page(page); 719 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 720 hugetlb_drop_vma_policy(&pseudo_vma); 721 continue; 722 } 723 724 /* Allocate page and add to page cache */ 725 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve); 726 hugetlb_drop_vma_policy(&pseudo_vma); 727 if (IS_ERR(page)) { 728 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 729 error = PTR_ERR(page); 730 goto out; 731 } 732 clear_huge_page(page, addr, pages_per_huge_page(h)); 733 __SetPageUptodate(page); 734 error = huge_add_to_page_cache(page, mapping, index); 735 if (unlikely(error)) { 736 put_page(page); 737 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 738 goto out; 739 } 740 741 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 742 743 set_page_huge_active(page); 744 /* 745 * unlock_page because locked by add_to_page_cache() 746 * put_page() due to reference from alloc_huge_page() 747 */ 748 unlock_page(page); 749 put_page(page); 750 } 751 752 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 753 i_size_write(inode, offset + len); 754 inode->i_ctime = current_time(inode); 755 out: 756 inode_unlock(inode); 757 return error; 758 } 759 760 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) 761 { 762 struct inode *inode = d_inode(dentry); 763 struct hstate *h = hstate_inode(inode); 764 int error; 765 unsigned int ia_valid = attr->ia_valid; 766 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 767 768 BUG_ON(!inode); 769 770 error = setattr_prepare(dentry, attr); 771 if (error) 772 return error; 773 774 if (ia_valid & ATTR_SIZE) { 775 loff_t oldsize = inode->i_size; 776 loff_t newsize = attr->ia_size; 777 778 if (newsize & ~huge_page_mask(h)) 779 return -EINVAL; 780 /* protected by i_mutex */ 781 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 782 (newsize > oldsize && (info->seals & F_SEAL_GROW))) 783 return -EPERM; 784 error = hugetlb_vmtruncate(inode, newsize); 785 if (error) 786 return error; 787 } 788 789 setattr_copy(inode, attr); 790 mark_inode_dirty(inode); 791 return 0; 792 } 793 794 static struct inode *hugetlbfs_get_root(struct super_block *sb, 795 struct hugetlbfs_fs_context *ctx) 796 { 797 struct inode *inode; 798 799 inode = new_inode(sb); 800 if (inode) { 801 inode->i_ino = get_next_ino(); 802 inode->i_mode = S_IFDIR | ctx->mode; 803 inode->i_uid = ctx->uid; 804 inode->i_gid = ctx->gid; 805 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 806 inode->i_op = &hugetlbfs_dir_inode_operations; 807 inode->i_fop = &simple_dir_operations; 808 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 809 inc_nlink(inode); 810 lockdep_annotate_inode_mutex_key(inode); 811 } 812 return inode; 813 } 814 815 /* 816 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never 817 * be taken from reclaim -- unlike regular filesystems. This needs an 818 * annotation because huge_pmd_share() does an allocation under hugetlb's 819 * i_mmap_rwsem. 820 */ 821 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; 822 823 static struct inode *hugetlbfs_get_inode(struct super_block *sb, 824 struct inode *dir, 825 umode_t mode, dev_t dev) 826 { 827 struct inode *inode; 828 struct resv_map *resv_map = NULL; 829 830 /* 831 * Reserve maps are only needed for inodes that can have associated 832 * page allocations. 833 */ 834 if (S_ISREG(mode) || S_ISLNK(mode)) { 835 resv_map = resv_map_alloc(); 836 if (!resv_map) 837 return NULL; 838 } 839 840 inode = new_inode(sb); 841 if (inode) { 842 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 843 844 inode->i_ino = get_next_ino(); 845 inode_init_owner(inode, dir, mode); 846 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 847 &hugetlbfs_i_mmap_rwsem_key); 848 inode->i_mapping->a_ops = &hugetlbfs_aops; 849 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 850 inode->i_mapping->private_data = resv_map; 851 info->seals = F_SEAL_SEAL; 852 switch (mode & S_IFMT) { 853 default: 854 init_special_inode(inode, mode, dev); 855 break; 856 case S_IFREG: 857 inode->i_op = &hugetlbfs_inode_operations; 858 inode->i_fop = &hugetlbfs_file_operations; 859 break; 860 case S_IFDIR: 861 inode->i_op = &hugetlbfs_dir_inode_operations; 862 inode->i_fop = &simple_dir_operations; 863 864 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 865 inc_nlink(inode); 866 break; 867 case S_IFLNK: 868 inode->i_op = &page_symlink_inode_operations; 869 inode_nohighmem(inode); 870 break; 871 } 872 lockdep_annotate_inode_mutex_key(inode); 873 } else { 874 if (resv_map) 875 kref_put(&resv_map->refs, resv_map_release); 876 } 877 878 return inode; 879 } 880 881 /* 882 * File creation. Allocate an inode, and we're done.. 883 */ 884 static int do_hugetlbfs_mknod(struct inode *dir, 885 struct dentry *dentry, 886 umode_t mode, 887 dev_t dev, 888 bool tmpfile) 889 { 890 struct inode *inode; 891 int error = -ENOSPC; 892 893 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); 894 if (inode) { 895 dir->i_ctime = dir->i_mtime = current_time(dir); 896 if (tmpfile) { 897 d_tmpfile(dentry, inode); 898 } else { 899 d_instantiate(dentry, inode); 900 dget(dentry);/* Extra count - pin the dentry in core */ 901 } 902 error = 0; 903 } 904 return error; 905 } 906 907 static int hugetlbfs_mknod(struct inode *dir, 908 struct dentry *dentry, umode_t mode, dev_t dev) 909 { 910 return do_hugetlbfs_mknod(dir, dentry, mode, dev, false); 911 } 912 913 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 914 { 915 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); 916 if (!retval) 917 inc_nlink(dir); 918 return retval; 919 } 920 921 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) 922 { 923 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); 924 } 925 926 static int hugetlbfs_tmpfile(struct inode *dir, 927 struct dentry *dentry, umode_t mode) 928 { 929 return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true); 930 } 931 932 static int hugetlbfs_symlink(struct inode *dir, 933 struct dentry *dentry, const char *symname) 934 { 935 struct inode *inode; 936 int error = -ENOSPC; 937 938 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); 939 if (inode) { 940 int l = strlen(symname)+1; 941 error = page_symlink(inode, symname, l); 942 if (!error) { 943 d_instantiate(dentry, inode); 944 dget(dentry); 945 } else 946 iput(inode); 947 } 948 dir->i_ctime = dir->i_mtime = current_time(dir); 949 950 return error; 951 } 952 953 /* 954 * mark the head page dirty 955 */ 956 static int hugetlbfs_set_page_dirty(struct page *page) 957 { 958 struct page *head = compound_head(page); 959 960 SetPageDirty(head); 961 return 0; 962 } 963 964 static int hugetlbfs_migrate_page(struct address_space *mapping, 965 struct page *newpage, struct page *page, 966 enum migrate_mode mode) 967 { 968 int rc; 969 970 rc = migrate_huge_page_move_mapping(mapping, newpage, page); 971 if (rc != MIGRATEPAGE_SUCCESS) 972 return rc; 973 974 /* 975 * page_private is subpool pointer in hugetlb pages. Transfer to 976 * new page. PagePrivate is not associated with page_private for 977 * hugetlb pages and can not be set here as only page_huge_active 978 * pages can be migrated. 979 */ 980 if (page_private(page)) { 981 set_page_private(newpage, page_private(page)); 982 set_page_private(page, 0); 983 } 984 985 if (mode != MIGRATE_SYNC_NO_COPY) 986 migrate_page_copy(newpage, page); 987 else 988 migrate_page_states(newpage, page); 989 990 return MIGRATEPAGE_SUCCESS; 991 } 992 993 static int hugetlbfs_error_remove_page(struct address_space *mapping, 994 struct page *page) 995 { 996 struct inode *inode = mapping->host; 997 pgoff_t index = page->index; 998 999 remove_huge_page(page); 1000 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) 1001 hugetlb_fix_reserve_counts(inode); 1002 1003 return 0; 1004 } 1005 1006 /* 1007 * Display the mount options in /proc/mounts. 1008 */ 1009 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) 1010 { 1011 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); 1012 struct hugepage_subpool *spool = sbinfo->spool; 1013 unsigned long hpage_size = huge_page_size(sbinfo->hstate); 1014 unsigned hpage_shift = huge_page_shift(sbinfo->hstate); 1015 char mod; 1016 1017 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 1018 seq_printf(m, ",uid=%u", 1019 from_kuid_munged(&init_user_ns, sbinfo->uid)); 1020 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 1021 seq_printf(m, ",gid=%u", 1022 from_kgid_munged(&init_user_ns, sbinfo->gid)); 1023 if (sbinfo->mode != 0755) 1024 seq_printf(m, ",mode=%o", sbinfo->mode); 1025 if (sbinfo->max_inodes != -1) 1026 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); 1027 1028 hpage_size /= 1024; 1029 mod = 'K'; 1030 if (hpage_size >= 1024) { 1031 hpage_size /= 1024; 1032 mod = 'M'; 1033 } 1034 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); 1035 if (spool) { 1036 if (spool->max_hpages != -1) 1037 seq_printf(m, ",size=%llu", 1038 (unsigned long long)spool->max_hpages << hpage_shift); 1039 if (spool->min_hpages != -1) 1040 seq_printf(m, ",min_size=%llu", 1041 (unsigned long long)spool->min_hpages << hpage_shift); 1042 } 1043 return 0; 1044 } 1045 1046 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 1047 { 1048 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 1049 struct hstate *h = hstate_inode(d_inode(dentry)); 1050 1051 buf->f_type = HUGETLBFS_MAGIC; 1052 buf->f_bsize = huge_page_size(h); 1053 if (sbinfo) { 1054 spin_lock(&sbinfo->stat_lock); 1055 /* If no limits set, just report 0 for max/free/used 1056 * blocks, like simple_statfs() */ 1057 if (sbinfo->spool) { 1058 long free_pages; 1059 1060 spin_lock(&sbinfo->spool->lock); 1061 buf->f_blocks = sbinfo->spool->max_hpages; 1062 free_pages = sbinfo->spool->max_hpages 1063 - sbinfo->spool->used_hpages; 1064 buf->f_bavail = buf->f_bfree = free_pages; 1065 spin_unlock(&sbinfo->spool->lock); 1066 buf->f_files = sbinfo->max_inodes; 1067 buf->f_ffree = sbinfo->free_inodes; 1068 } 1069 spin_unlock(&sbinfo->stat_lock); 1070 } 1071 buf->f_namelen = NAME_MAX; 1072 return 0; 1073 } 1074 1075 static void hugetlbfs_put_super(struct super_block *sb) 1076 { 1077 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); 1078 1079 if (sbi) { 1080 sb->s_fs_info = NULL; 1081 1082 if (sbi->spool) 1083 hugepage_put_subpool(sbi->spool); 1084 1085 kfree(sbi); 1086 } 1087 } 1088 1089 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) 1090 { 1091 if (sbinfo->free_inodes >= 0) { 1092 spin_lock(&sbinfo->stat_lock); 1093 if (unlikely(!sbinfo->free_inodes)) { 1094 spin_unlock(&sbinfo->stat_lock); 1095 return 0; 1096 } 1097 sbinfo->free_inodes--; 1098 spin_unlock(&sbinfo->stat_lock); 1099 } 1100 1101 return 1; 1102 } 1103 1104 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) 1105 { 1106 if (sbinfo->free_inodes >= 0) { 1107 spin_lock(&sbinfo->stat_lock); 1108 sbinfo->free_inodes++; 1109 spin_unlock(&sbinfo->stat_lock); 1110 } 1111 } 1112 1113 1114 static struct kmem_cache *hugetlbfs_inode_cachep; 1115 1116 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 1117 { 1118 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); 1119 struct hugetlbfs_inode_info *p; 1120 1121 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 1122 return NULL; 1123 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); 1124 if (unlikely(!p)) { 1125 hugetlbfs_inc_free_inodes(sbinfo); 1126 return NULL; 1127 } 1128 1129 /* 1130 * Any time after allocation, hugetlbfs_destroy_inode can be called 1131 * for the inode. mpol_free_shared_policy is unconditionally called 1132 * as part of hugetlbfs_destroy_inode. So, initialize policy here 1133 * in case of a quick call to destroy. 1134 * 1135 * Note that the policy is initialized even if we are creating a 1136 * private inode. This simplifies hugetlbfs_destroy_inode. 1137 */ 1138 mpol_shared_policy_init(&p->policy, NULL); 1139 1140 return &p->vfs_inode; 1141 } 1142 1143 static void hugetlbfs_free_inode(struct inode *inode) 1144 { 1145 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 1146 } 1147 1148 static void hugetlbfs_destroy_inode(struct inode *inode) 1149 { 1150 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 1151 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 1152 } 1153 1154 static const struct address_space_operations hugetlbfs_aops = { 1155 .write_begin = hugetlbfs_write_begin, 1156 .write_end = hugetlbfs_write_end, 1157 .set_page_dirty = hugetlbfs_set_page_dirty, 1158 .migratepage = hugetlbfs_migrate_page, 1159 .error_remove_page = hugetlbfs_error_remove_page, 1160 }; 1161 1162 1163 static void init_once(void *foo) 1164 { 1165 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 1166 1167 inode_init_once(&ei->vfs_inode); 1168 } 1169 1170 const struct file_operations hugetlbfs_file_operations = { 1171 .read_iter = hugetlbfs_read_iter, 1172 .mmap = hugetlbfs_file_mmap, 1173 .fsync = noop_fsync, 1174 .get_unmapped_area = hugetlb_get_unmapped_area, 1175 .llseek = default_llseek, 1176 .fallocate = hugetlbfs_fallocate, 1177 }; 1178 1179 static const struct inode_operations hugetlbfs_dir_inode_operations = { 1180 .create = hugetlbfs_create, 1181 .lookup = simple_lookup, 1182 .link = simple_link, 1183 .unlink = simple_unlink, 1184 .symlink = hugetlbfs_symlink, 1185 .mkdir = hugetlbfs_mkdir, 1186 .rmdir = simple_rmdir, 1187 .mknod = hugetlbfs_mknod, 1188 .rename = simple_rename, 1189 .setattr = hugetlbfs_setattr, 1190 .tmpfile = hugetlbfs_tmpfile, 1191 }; 1192 1193 static const struct inode_operations hugetlbfs_inode_operations = { 1194 .setattr = hugetlbfs_setattr, 1195 }; 1196 1197 static const struct super_operations hugetlbfs_ops = { 1198 .alloc_inode = hugetlbfs_alloc_inode, 1199 .free_inode = hugetlbfs_free_inode, 1200 .destroy_inode = hugetlbfs_destroy_inode, 1201 .evict_inode = hugetlbfs_evict_inode, 1202 .statfs = hugetlbfs_statfs, 1203 .put_super = hugetlbfs_put_super, 1204 .show_options = hugetlbfs_show_options, 1205 }; 1206 1207 /* 1208 * Convert size option passed from command line to number of huge pages 1209 * in the pool specified by hstate. Size option could be in bytes 1210 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). 1211 */ 1212 static long 1213 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, 1214 enum hugetlbfs_size_type val_type) 1215 { 1216 if (val_type == NO_SIZE) 1217 return -1; 1218 1219 if (val_type == SIZE_PERCENT) { 1220 size_opt <<= huge_page_shift(h); 1221 size_opt *= h->max_huge_pages; 1222 do_div(size_opt, 100); 1223 } 1224 1225 size_opt >>= huge_page_shift(h); 1226 return size_opt; 1227 } 1228 1229 /* 1230 * Parse one mount parameter. 1231 */ 1232 static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) 1233 { 1234 struct hugetlbfs_fs_context *ctx = fc->fs_private; 1235 struct fs_parse_result result; 1236 char *rest; 1237 unsigned long ps; 1238 int opt; 1239 1240 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); 1241 if (opt < 0) 1242 return opt; 1243 1244 switch (opt) { 1245 case Opt_uid: 1246 ctx->uid = make_kuid(current_user_ns(), result.uint_32); 1247 if (!uid_valid(ctx->uid)) 1248 goto bad_val; 1249 return 0; 1250 1251 case Opt_gid: 1252 ctx->gid = make_kgid(current_user_ns(), result.uint_32); 1253 if (!gid_valid(ctx->gid)) 1254 goto bad_val; 1255 return 0; 1256 1257 case Opt_mode: 1258 ctx->mode = result.uint_32 & 01777U; 1259 return 0; 1260 1261 case Opt_size: 1262 /* memparse() will accept a K/M/G without a digit */ 1263 if (!isdigit(param->string[0])) 1264 goto bad_val; 1265 ctx->max_size_opt = memparse(param->string, &rest); 1266 ctx->max_val_type = SIZE_STD; 1267 if (*rest == '%') 1268 ctx->max_val_type = SIZE_PERCENT; 1269 return 0; 1270 1271 case Opt_nr_inodes: 1272 /* memparse() will accept a K/M/G without a digit */ 1273 if (!isdigit(param->string[0])) 1274 goto bad_val; 1275 ctx->nr_inodes = memparse(param->string, &rest); 1276 return 0; 1277 1278 case Opt_pagesize: 1279 ps = memparse(param->string, &rest); 1280 ctx->hstate = size_to_hstate(ps); 1281 if (!ctx->hstate) { 1282 pr_err("Unsupported page size %lu MB\n", ps >> 20); 1283 return -EINVAL; 1284 } 1285 return 0; 1286 1287 case Opt_min_size: 1288 /* memparse() will accept a K/M/G without a digit */ 1289 if (!isdigit(param->string[0])) 1290 goto bad_val; 1291 ctx->min_size_opt = memparse(param->string, &rest); 1292 ctx->min_val_type = SIZE_STD; 1293 if (*rest == '%') 1294 ctx->min_val_type = SIZE_PERCENT; 1295 return 0; 1296 1297 default: 1298 return -EINVAL; 1299 } 1300 1301 bad_val: 1302 return invalfc(fc, "Bad value '%s' for mount option '%s'\n", 1303 param->string, param->key); 1304 } 1305 1306 /* 1307 * Validate the parsed options. 1308 */ 1309 static int hugetlbfs_validate(struct fs_context *fc) 1310 { 1311 struct hugetlbfs_fs_context *ctx = fc->fs_private; 1312 1313 /* 1314 * Use huge page pool size (in hstate) to convert the size 1315 * options to number of huge pages. If NO_SIZE, -1 is returned. 1316 */ 1317 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 1318 ctx->max_size_opt, 1319 ctx->max_val_type); 1320 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 1321 ctx->min_size_opt, 1322 ctx->min_val_type); 1323 1324 /* 1325 * If max_size was specified, then min_size must be smaller 1326 */ 1327 if (ctx->max_val_type > NO_SIZE && 1328 ctx->min_hpages > ctx->max_hpages) { 1329 pr_err("Minimum size can not be greater than maximum size\n"); 1330 return -EINVAL; 1331 } 1332 1333 return 0; 1334 } 1335 1336 static int 1337 hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) 1338 { 1339 struct hugetlbfs_fs_context *ctx = fc->fs_private; 1340 struct hugetlbfs_sb_info *sbinfo; 1341 1342 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); 1343 if (!sbinfo) 1344 return -ENOMEM; 1345 sb->s_fs_info = sbinfo; 1346 spin_lock_init(&sbinfo->stat_lock); 1347 sbinfo->hstate = ctx->hstate; 1348 sbinfo->max_inodes = ctx->nr_inodes; 1349 sbinfo->free_inodes = ctx->nr_inodes; 1350 sbinfo->spool = NULL; 1351 sbinfo->uid = ctx->uid; 1352 sbinfo->gid = ctx->gid; 1353 sbinfo->mode = ctx->mode; 1354 1355 /* 1356 * Allocate and initialize subpool if maximum or minimum size is 1357 * specified. Any needed reservations (for minimim size) are taken 1358 * taken when the subpool is created. 1359 */ 1360 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { 1361 sbinfo->spool = hugepage_new_subpool(ctx->hstate, 1362 ctx->max_hpages, 1363 ctx->min_hpages); 1364 if (!sbinfo->spool) 1365 goto out_free; 1366 } 1367 sb->s_maxbytes = MAX_LFS_FILESIZE; 1368 sb->s_blocksize = huge_page_size(ctx->hstate); 1369 sb->s_blocksize_bits = huge_page_shift(ctx->hstate); 1370 sb->s_magic = HUGETLBFS_MAGIC; 1371 sb->s_op = &hugetlbfs_ops; 1372 sb->s_time_gran = 1; 1373 1374 /* 1375 * Due to the special and limited functionality of hugetlbfs, it does 1376 * not work well as a stacking filesystem. 1377 */ 1378 sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; 1379 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); 1380 if (!sb->s_root) 1381 goto out_free; 1382 return 0; 1383 out_free: 1384 kfree(sbinfo->spool); 1385 kfree(sbinfo); 1386 return -ENOMEM; 1387 } 1388 1389 static int hugetlbfs_get_tree(struct fs_context *fc) 1390 { 1391 int err = hugetlbfs_validate(fc); 1392 if (err) 1393 return err; 1394 return get_tree_nodev(fc, hugetlbfs_fill_super); 1395 } 1396 1397 static void hugetlbfs_fs_context_free(struct fs_context *fc) 1398 { 1399 kfree(fc->fs_private); 1400 } 1401 1402 static const struct fs_context_operations hugetlbfs_fs_context_ops = { 1403 .free = hugetlbfs_fs_context_free, 1404 .parse_param = hugetlbfs_parse_param, 1405 .get_tree = hugetlbfs_get_tree, 1406 }; 1407 1408 static int hugetlbfs_init_fs_context(struct fs_context *fc) 1409 { 1410 struct hugetlbfs_fs_context *ctx; 1411 1412 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); 1413 if (!ctx) 1414 return -ENOMEM; 1415 1416 ctx->max_hpages = -1; /* No limit on size by default */ 1417 ctx->nr_inodes = -1; /* No limit on number of inodes by default */ 1418 ctx->uid = current_fsuid(); 1419 ctx->gid = current_fsgid(); 1420 ctx->mode = 0755; 1421 ctx->hstate = &default_hstate; 1422 ctx->min_hpages = -1; /* No default minimum size */ 1423 ctx->max_val_type = NO_SIZE; 1424 ctx->min_val_type = NO_SIZE; 1425 fc->fs_private = ctx; 1426 fc->ops = &hugetlbfs_fs_context_ops; 1427 return 0; 1428 } 1429 1430 static struct file_system_type hugetlbfs_fs_type = { 1431 .name = "hugetlbfs", 1432 .init_fs_context = hugetlbfs_init_fs_context, 1433 .parameters = hugetlb_fs_parameters, 1434 .kill_sb = kill_litter_super, 1435 }; 1436 1437 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; 1438 1439 static int can_do_hugetlb_shm(void) 1440 { 1441 kgid_t shm_group; 1442 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); 1443 return capable(CAP_IPC_LOCK) || in_group_p(shm_group); 1444 } 1445 1446 static int get_hstate_idx(int page_size_log) 1447 { 1448 struct hstate *h = hstate_sizelog(page_size_log); 1449 1450 if (!h) 1451 return -1; 1452 return h - hstates; 1453 } 1454 1455 /* 1456 * Note that size should be aligned to proper hugepage size in caller side, 1457 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. 1458 */ 1459 struct file *hugetlb_file_setup(const char *name, size_t size, 1460 vm_flags_t acctflag, struct user_struct **user, 1461 int creat_flags, int page_size_log) 1462 { 1463 struct inode *inode; 1464 struct vfsmount *mnt; 1465 int hstate_idx; 1466 struct file *file; 1467 1468 hstate_idx = get_hstate_idx(page_size_log); 1469 if (hstate_idx < 0) 1470 return ERR_PTR(-ENODEV); 1471 1472 *user = NULL; 1473 mnt = hugetlbfs_vfsmount[hstate_idx]; 1474 if (!mnt) 1475 return ERR_PTR(-ENOENT); 1476 1477 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 1478 *user = current_user(); 1479 if (user_shm_lock(size, *user)) { 1480 task_lock(current); 1481 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", 1482 current->comm, current->pid); 1483 task_unlock(current); 1484 } else { 1485 *user = NULL; 1486 return ERR_PTR(-EPERM); 1487 } 1488 } 1489 1490 file = ERR_PTR(-ENOSPC); 1491 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); 1492 if (!inode) 1493 goto out; 1494 if (creat_flags == HUGETLB_SHMFS_INODE) 1495 inode->i_flags |= S_PRIVATE; 1496 1497 inode->i_size = size; 1498 clear_nlink(inode); 1499 1500 if (hugetlb_reserve_pages(inode, 0, 1501 size >> huge_page_shift(hstate_inode(inode)), NULL, 1502 acctflag)) 1503 file = ERR_PTR(-ENOMEM); 1504 else 1505 file = alloc_file_pseudo(inode, mnt, name, O_RDWR, 1506 &hugetlbfs_file_operations); 1507 if (!IS_ERR(file)) 1508 return file; 1509 1510 iput(inode); 1511 out: 1512 if (*user) { 1513 user_shm_unlock(size, *user); 1514 *user = NULL; 1515 } 1516 return file; 1517 } 1518 1519 static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) 1520 { 1521 struct fs_context *fc; 1522 struct vfsmount *mnt; 1523 1524 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); 1525 if (IS_ERR(fc)) { 1526 mnt = ERR_CAST(fc); 1527 } else { 1528 struct hugetlbfs_fs_context *ctx = fc->fs_private; 1529 ctx->hstate = h; 1530 mnt = fc_mount(fc); 1531 put_fs_context(fc); 1532 } 1533 if (IS_ERR(mnt)) 1534 pr_err("Cannot mount internal hugetlbfs for page size %uK", 1535 1U << (h->order + PAGE_SHIFT - 10)); 1536 return mnt; 1537 } 1538 1539 static int __init init_hugetlbfs_fs(void) 1540 { 1541 struct vfsmount *mnt; 1542 struct hstate *h; 1543 int error; 1544 int i; 1545 1546 if (!hugepages_supported()) { 1547 pr_info("disabling because there are no supported hugepage sizes\n"); 1548 return -ENOTSUPP; 1549 } 1550 1551 error = -ENOMEM; 1552 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 1553 sizeof(struct hugetlbfs_inode_info), 1554 0, SLAB_ACCOUNT, init_once); 1555 if (hugetlbfs_inode_cachep == NULL) 1556 goto out; 1557 1558 error = register_filesystem(&hugetlbfs_fs_type); 1559 if (error) 1560 goto out_free; 1561 1562 /* default hstate mount is required */ 1563 mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]); 1564 if (IS_ERR(mnt)) { 1565 error = PTR_ERR(mnt); 1566 goto out_unreg; 1567 } 1568 hugetlbfs_vfsmount[default_hstate_idx] = mnt; 1569 1570 /* other hstates are optional */ 1571 i = 0; 1572 for_each_hstate(h) { 1573 if (i == default_hstate_idx) { 1574 i++; 1575 continue; 1576 } 1577 1578 mnt = mount_one_hugetlbfs(h); 1579 if (IS_ERR(mnt)) 1580 hugetlbfs_vfsmount[i] = NULL; 1581 else 1582 hugetlbfs_vfsmount[i] = mnt; 1583 i++; 1584 } 1585 1586 return 0; 1587 1588 out_unreg: 1589 (void)unregister_filesystem(&hugetlbfs_fs_type); 1590 out_free: 1591 kmem_cache_destroy(hugetlbfs_inode_cachep); 1592 out: 1593 return error; 1594 } 1595 fs_initcall(init_hugetlbfs_fs) 1596