1 /* 2 * hugetlbpage-backed filesystem. Based on ramfs. 3 * 4 * Nadia Yvette Chambers, 2002 5 * 6 * Copyright (C) 2002 Linus Torvalds. 7 * License: GPL 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/thread_info.h> 13 #include <asm/current.h> 14 #include <linux/sched/signal.h> /* remove ASAP */ 15 #include <linux/falloc.h> 16 #include <linux/fs.h> 17 #include <linux/mount.h> 18 #include <linux/file.h> 19 #include <linux/kernel.h> 20 #include <linux/writeback.h> 21 #include <linux/pagemap.h> 22 #include <linux/highmem.h> 23 #include <linux/init.h> 24 #include <linux/string.h> 25 #include <linux/capability.h> 26 #include <linux/ctype.h> 27 #include <linux/backing-dev.h> 28 #include <linux/hugetlb.h> 29 #include <linux/pagevec.h> 30 #include <linux/fs_parser.h> 31 #include <linux/mman.h> 32 #include <linux/slab.h> 33 #include <linux/dnotify.h> 34 #include <linux/statfs.h> 35 #include <linux/security.h> 36 #include <linux/magic.h> 37 #include <linux/migrate.h> 38 #include <linux/uio.h> 39 40 #include <linux/uaccess.h> 41 42 static const struct super_operations hugetlbfs_ops; 43 static const struct address_space_operations hugetlbfs_aops; 44 const struct file_operations hugetlbfs_file_operations; 45 static const struct inode_operations hugetlbfs_dir_inode_operations; 46 static const struct inode_operations hugetlbfs_inode_operations; 47 48 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; 49 50 struct hugetlbfs_fs_context { 51 struct hstate *hstate; 52 unsigned long long max_size_opt; 53 unsigned long long min_size_opt; 54 long max_hpages; 55 long nr_inodes; 56 long min_hpages; 57 enum hugetlbfs_size_type max_val_type; 58 enum hugetlbfs_size_type min_val_type; 59 kuid_t uid; 60 kgid_t gid; 61 umode_t mode; 62 }; 63 64 int sysctl_hugetlb_shm_group; 65 66 enum hugetlb_param { 67 Opt_gid, 68 Opt_min_size, 69 Opt_mode, 70 Opt_nr_inodes, 71 Opt_pagesize, 72 Opt_size, 73 Opt_uid, 74 }; 75 76 static const struct fs_parameter_spec hugetlb_param_specs[] = { 77 fsparam_u32 ("gid", Opt_gid), 78 fsparam_string("min_size", Opt_min_size), 79 fsparam_u32 ("mode", Opt_mode), 80 fsparam_string("nr_inodes", Opt_nr_inodes), 81 fsparam_string("pagesize", Opt_pagesize), 82 fsparam_string("size", Opt_size), 83 fsparam_u32 ("uid", Opt_uid), 84 {} 85 }; 86 87 static const struct fs_parameter_description hugetlb_fs_parameters = { 88 .specs = hugetlb_param_specs, 89 }; 90 91 #ifdef CONFIG_NUMA 92 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 93 struct inode *inode, pgoff_t index) 94 { 95 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, 96 index); 97 } 98 99 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 100 { 101 mpol_cond_put(vma->vm_policy); 102 } 103 #else 104 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, 105 struct inode *inode, pgoff_t index) 106 { 107 } 108 109 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) 110 { 111 } 112 #endif 113 114 static void huge_pagevec_release(struct pagevec *pvec) 115 { 116 int i; 117 118 for (i = 0; i < pagevec_count(pvec); ++i) 119 put_page(pvec->pages[i]); 120 121 pagevec_reinit(pvec); 122 } 123 124 /* 125 * Mask used when checking the page offset value passed in via system 126 * calls. This value will be converted to a loff_t which is signed. 127 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the 128 * value. The extra bit (- 1 in the shift value) is to take the sign 129 * bit into account. 130 */ 131 #define PGOFF_LOFFT_MAX \ 132 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) 133 134 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 135 { 136 struct inode *inode = file_inode(file); 137 loff_t len, vma_len; 138 int ret; 139 struct hstate *h = hstate_file(file); 140 141 /* 142 * vma address alignment (but not the pgoff alignment) has 143 * already been checked by prepare_hugepage_range. If you add 144 * any error returns here, do so after setting VM_HUGETLB, so 145 * is_vm_hugetlb_page tests below unmap_region go the right 146 * way when do_mmap_pgoff unwinds (may be important on powerpc 147 * and ia64). 148 */ 149 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; 150 vma->vm_ops = &hugetlb_vm_ops; 151 152 /* 153 * page based offset in vm_pgoff could be sufficiently large to 154 * overflow a loff_t when converted to byte offset. This can 155 * only happen on architectures where sizeof(loff_t) == 156 * sizeof(unsigned long). So, only check in those instances. 157 */ 158 if (sizeof(unsigned long) == sizeof(loff_t)) { 159 if (vma->vm_pgoff & PGOFF_LOFFT_MAX) 160 return -EINVAL; 161 } 162 163 /* must be huge page aligned */ 164 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 165 return -EINVAL; 166 167 vma_len = (loff_t)(vma->vm_end - vma->vm_start); 168 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 169 /* check for overflow */ 170 if (len < vma_len) 171 return -EINVAL; 172 173 inode_lock(inode); 174 file_accessed(file); 175 176 ret = -ENOMEM; 177 if (hugetlb_reserve_pages(inode, 178 vma->vm_pgoff >> huge_page_order(h), 179 len >> huge_page_shift(h), vma, 180 vma->vm_flags)) 181 goto out; 182 183 ret = 0; 184 if (vma->vm_flags & VM_WRITE && inode->i_size < len) 185 i_size_write(inode, len); 186 out: 187 inode_unlock(inode); 188 189 return ret; 190 } 191 192 /* 193 * Called under down_write(mmap_sem). 194 */ 195 196 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 197 static unsigned long 198 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 199 unsigned long len, unsigned long pgoff, unsigned long flags) 200 { 201 struct mm_struct *mm = current->mm; 202 struct vm_area_struct *vma; 203 struct hstate *h = hstate_file(file); 204 struct vm_unmapped_area_info info; 205 206 if (len & ~huge_page_mask(h)) 207 return -EINVAL; 208 if (len > TASK_SIZE) 209 return -ENOMEM; 210 211 if (flags & MAP_FIXED) { 212 if (prepare_hugepage_range(file, addr, len)) 213 return -EINVAL; 214 return addr; 215 } 216 217 if (addr) { 218 addr = ALIGN(addr, huge_page_size(h)); 219 vma = find_vma(mm, addr); 220 if (TASK_SIZE - len >= addr && 221 (!vma || addr + len <= vm_start_gap(vma))) 222 return addr; 223 } 224 225 info.flags = 0; 226 info.length = len; 227 info.low_limit = TASK_UNMAPPED_BASE; 228 info.high_limit = TASK_SIZE; 229 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 230 info.align_offset = 0; 231 return vm_unmapped_area(&info); 232 } 233 #endif 234 235 static size_t 236 hugetlbfs_read_actor(struct page *page, unsigned long offset, 237 struct iov_iter *to, unsigned long size) 238 { 239 size_t copied = 0; 240 int i, chunksize; 241 242 /* Find which 4k chunk and offset with in that chunk */ 243 i = offset >> PAGE_SHIFT; 244 offset = offset & ~PAGE_MASK; 245 246 while (size) { 247 size_t n; 248 chunksize = PAGE_SIZE; 249 if (offset) 250 chunksize -= offset; 251 if (chunksize > size) 252 chunksize = size; 253 n = copy_page_to_iter(&page[i], offset, chunksize, to); 254 copied += n; 255 if (n != chunksize) 256 return copied; 257 offset = 0; 258 size -= chunksize; 259 i++; 260 } 261 return copied; 262 } 263 264 /* 265 * Support for read() - Find the page attached to f_mapping and copy out the 266 * data. Its *very* similar to do_generic_mapping_read(), we can't use that 267 * since it has PAGE_SIZE assumptions. 268 */ 269 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 270 { 271 struct file *file = iocb->ki_filp; 272 struct hstate *h = hstate_file(file); 273 struct address_space *mapping = file->f_mapping; 274 struct inode *inode = mapping->host; 275 unsigned long index = iocb->ki_pos >> huge_page_shift(h); 276 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); 277 unsigned long end_index; 278 loff_t isize; 279 ssize_t retval = 0; 280 281 while (iov_iter_count(to)) { 282 struct page *page; 283 size_t nr, copied; 284 285 /* nr is the maximum number of bytes to copy from this page */ 286 nr = huge_page_size(h); 287 isize = i_size_read(inode); 288 if (!isize) 289 break; 290 end_index = (isize - 1) >> huge_page_shift(h); 291 if (index > end_index) 292 break; 293 if (index == end_index) { 294 nr = ((isize - 1) & ~huge_page_mask(h)) + 1; 295 if (nr <= offset) 296 break; 297 } 298 nr = nr - offset; 299 300 /* Find the page */ 301 page = find_lock_page(mapping, index); 302 if (unlikely(page == NULL)) { 303 /* 304 * We have a HOLE, zero out the user-buffer for the 305 * length of the hole or request. 306 */ 307 copied = iov_iter_zero(nr, to); 308 } else { 309 unlock_page(page); 310 311 /* 312 * We have the page, copy it to user space buffer. 313 */ 314 copied = hugetlbfs_read_actor(page, offset, to, nr); 315 put_page(page); 316 } 317 offset += copied; 318 retval += copied; 319 if (copied != nr && iov_iter_count(to)) { 320 if (!retval) 321 retval = -EFAULT; 322 break; 323 } 324 index += offset >> huge_page_shift(h); 325 offset &= ~huge_page_mask(h); 326 } 327 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; 328 return retval; 329 } 330 331 static int hugetlbfs_write_begin(struct file *file, 332 struct address_space *mapping, 333 loff_t pos, unsigned len, unsigned flags, 334 struct page **pagep, void **fsdata) 335 { 336 return -EINVAL; 337 } 338 339 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, 340 loff_t pos, unsigned len, unsigned copied, 341 struct page *page, void *fsdata) 342 { 343 BUG(); 344 return -EINVAL; 345 } 346 347 static void remove_huge_page(struct page *page) 348 { 349 ClearPageDirty(page); 350 ClearPageUptodate(page); 351 delete_from_page_cache(page); 352 } 353 354 static void 355 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) 356 { 357 struct vm_area_struct *vma; 358 359 /* 360 * end == 0 indicates that the entire range after 361 * start should be unmapped. 362 */ 363 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { 364 unsigned long v_offset; 365 unsigned long v_end; 366 367 /* 368 * Can the expression below overflow on 32-bit arches? 369 * No, because the interval tree returns us only those vmas 370 * which overlap the truncated area starting at pgoff, 371 * and no vma on a 32-bit arch can span beyond the 4GB. 372 */ 373 if (vma->vm_pgoff < start) 374 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; 375 else 376 v_offset = 0; 377 378 if (!end) 379 v_end = vma->vm_end; 380 else { 381 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) 382 + vma->vm_start; 383 if (v_end > vma->vm_end) 384 v_end = vma->vm_end; 385 } 386 387 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, 388 NULL); 389 } 390 } 391 392 /* 393 * remove_inode_hugepages handles two distinct cases: truncation and hole 394 * punch. There are subtle differences in operation for each case. 395 * 396 * truncation is indicated by end of range being LLONG_MAX 397 * In this case, we first scan the range and release found pages. 398 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 399 * maps and global counts. Page faults can not race with truncation 400 * in this routine. hugetlb_no_page() prevents page faults in the 401 * truncated range. It checks i_size before allocation, and again after 402 * with the page table lock for the page held. The same lock must be 403 * acquired to unmap a page. 404 * hole punch is indicated if end is not LLONG_MAX 405 * In the hole punch case we scan the range and release found pages. 406 * Only when releasing a page is the associated region/reserv map 407 * deleted. The region/reserv map for ranges without associated 408 * pages are not modified. Page faults can race with hole punch. 409 * This is indicated if we find a mapped page. 410 * Note: If the passed end of range value is beyond the end of file, but 411 * not LLONG_MAX this routine still performs a hole punch operation. 412 */ 413 static void remove_inode_hugepages(struct inode *inode, loff_t lstart, 414 loff_t lend) 415 { 416 struct hstate *h = hstate_inode(inode); 417 struct address_space *mapping = &inode->i_data; 418 const pgoff_t start = lstart >> huge_page_shift(h); 419 const pgoff_t end = lend >> huge_page_shift(h); 420 struct vm_area_struct pseudo_vma; 421 struct pagevec pvec; 422 pgoff_t next, index; 423 int i, freed = 0; 424 bool truncate_op = (lend == LLONG_MAX); 425 426 vma_init(&pseudo_vma, current->mm); 427 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 428 pagevec_init(&pvec); 429 next = start; 430 while (next < end) { 431 /* 432 * When no more pages are found, we are done. 433 */ 434 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) 435 break; 436 437 for (i = 0; i < pagevec_count(&pvec); ++i) { 438 struct page *page = pvec.pages[i]; 439 u32 hash; 440 441 index = page->index; 442 hash = hugetlb_fault_mutex_hash(mapping, index); 443 mutex_lock(&hugetlb_fault_mutex_table[hash]); 444 445 /* 446 * If page is mapped, it was faulted in after being 447 * unmapped in caller. Unmap (again) now after taking 448 * the fault mutex. The mutex will prevent faults 449 * until we finish removing the page. 450 * 451 * This race can only happen in the hole punch case. 452 * Getting here in a truncate operation is a bug. 453 */ 454 if (unlikely(page_mapped(page))) { 455 BUG_ON(truncate_op); 456 457 i_mmap_lock_write(mapping); 458 hugetlb_vmdelete_list(&mapping->i_mmap, 459 index * pages_per_huge_page(h), 460 (index + 1) * pages_per_huge_page(h)); 461 i_mmap_unlock_write(mapping); 462 } 463 464 lock_page(page); 465 /* 466 * We must free the huge page and remove from page 467 * cache (remove_huge_page) BEFORE removing the 468 * region/reserve map (hugetlb_unreserve_pages). In 469 * rare out of memory conditions, removal of the 470 * region/reserve map could fail. Correspondingly, 471 * the subpool and global reserve usage count can need 472 * to be adjusted. 473 */ 474 VM_BUG_ON(PagePrivate(page)); 475 remove_huge_page(page); 476 freed++; 477 if (!truncate_op) { 478 if (unlikely(hugetlb_unreserve_pages(inode, 479 index, index + 1, 1))) 480 hugetlb_fix_reserve_counts(inode); 481 } 482 483 unlock_page(page); 484 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 485 } 486 huge_pagevec_release(&pvec); 487 cond_resched(); 488 } 489 490 if (truncate_op) 491 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); 492 } 493 494 static void hugetlbfs_evict_inode(struct inode *inode) 495 { 496 struct resv_map *resv_map; 497 498 remove_inode_hugepages(inode, 0, LLONG_MAX); 499 500 /* 501 * Get the resv_map from the address space embedded in the inode. 502 * This is the address space which points to any resv_map allocated 503 * at inode creation time. If this is a device special inode, 504 * i_mapping may not point to the original address space. 505 */ 506 resv_map = (struct resv_map *)(&inode->i_data)->private_data; 507 /* Only regular and link inodes have associated reserve maps */ 508 if (resv_map) 509 resv_map_release(&resv_map->refs); 510 clear_inode(inode); 511 } 512 513 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) 514 { 515 pgoff_t pgoff; 516 struct address_space *mapping = inode->i_mapping; 517 struct hstate *h = hstate_inode(inode); 518 519 BUG_ON(offset & ~huge_page_mask(h)); 520 pgoff = offset >> PAGE_SHIFT; 521 522 i_size_write(inode, offset); 523 i_mmap_lock_write(mapping); 524 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 525 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); 526 i_mmap_unlock_write(mapping); 527 remove_inode_hugepages(inode, offset, LLONG_MAX); 528 return 0; 529 } 530 531 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) 532 { 533 struct hstate *h = hstate_inode(inode); 534 loff_t hpage_size = huge_page_size(h); 535 loff_t hole_start, hole_end; 536 537 /* 538 * For hole punch round up the beginning offset of the hole and 539 * round down the end. 540 */ 541 hole_start = round_up(offset, hpage_size); 542 hole_end = round_down(offset + len, hpage_size); 543 544 if (hole_end > hole_start) { 545 struct address_space *mapping = inode->i_mapping; 546 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 547 548 inode_lock(inode); 549 550 /* protected by i_mutex */ 551 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 552 inode_unlock(inode); 553 return -EPERM; 554 } 555 556 i_mmap_lock_write(mapping); 557 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 558 hugetlb_vmdelete_list(&mapping->i_mmap, 559 hole_start >> PAGE_SHIFT, 560 hole_end >> PAGE_SHIFT); 561 i_mmap_unlock_write(mapping); 562 remove_inode_hugepages(inode, hole_start, hole_end); 563 inode_unlock(inode); 564 } 565 566 return 0; 567 } 568 569 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, 570 loff_t len) 571 { 572 struct inode *inode = file_inode(file); 573 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 574 struct address_space *mapping = inode->i_mapping; 575 struct hstate *h = hstate_inode(inode); 576 struct vm_area_struct pseudo_vma; 577 struct mm_struct *mm = current->mm; 578 loff_t hpage_size = huge_page_size(h); 579 unsigned long hpage_shift = huge_page_shift(h); 580 pgoff_t start, index, end; 581 int error; 582 u32 hash; 583 584 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 585 return -EOPNOTSUPP; 586 587 if (mode & FALLOC_FL_PUNCH_HOLE) 588 return hugetlbfs_punch_hole(inode, offset, len); 589 590 /* 591 * Default preallocate case. 592 * For this range, start is rounded down and end is rounded up 593 * as well as being converted to page offsets. 594 */ 595 start = offset >> hpage_shift; 596 end = (offset + len + hpage_size - 1) >> hpage_shift; 597 598 inode_lock(inode); 599 600 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 601 error = inode_newsize_ok(inode, offset + len); 602 if (error) 603 goto out; 604 605 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 606 error = -EPERM; 607 goto out; 608 } 609 610 /* 611 * Initialize a pseudo vma as this is required by the huge page 612 * allocation routines. If NUMA is configured, use page index 613 * as input to create an allocation policy. 614 */ 615 vma_init(&pseudo_vma, mm); 616 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 617 pseudo_vma.vm_file = file; 618 619 for (index = start; index < end; index++) { 620 /* 621 * This is supposed to be the vaddr where the page is being 622 * faulted in, but we have no vaddr here. 623 */ 624 struct page *page; 625 unsigned long addr; 626 int avoid_reserve = 0; 627 628 cond_resched(); 629 630 /* 631 * fallocate(2) manpage permits EINTR; we may have been 632 * interrupted because we are using up too much memory. 633 */ 634 if (signal_pending(current)) { 635 error = -EINTR; 636 break; 637 } 638 639 /* Set numa allocation policy based on index */ 640 hugetlb_set_vma_policy(&pseudo_vma, inode, index); 641 642 /* addr is the offset within the file (zero based) */ 643 addr = index * hpage_size; 644 645 /* mutex taken here, fault path and hole punch */ 646 hash = hugetlb_fault_mutex_hash(mapping, index); 647 mutex_lock(&hugetlb_fault_mutex_table[hash]); 648 649 /* See if already present in mapping to avoid alloc/free */ 650 page = find_get_page(mapping, index); 651 if (page) { 652 put_page(page); 653 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 654 hugetlb_drop_vma_policy(&pseudo_vma); 655 continue; 656 } 657 658 /* Allocate page and add to page cache */ 659 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve); 660 hugetlb_drop_vma_policy(&pseudo_vma); 661 if (IS_ERR(page)) { 662 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 663 error = PTR_ERR(page); 664 goto out; 665 } 666 clear_huge_page(page, addr, pages_per_huge_page(h)); 667 __SetPageUptodate(page); 668 error = huge_add_to_page_cache(page, mapping, index); 669 if (unlikely(error)) { 670 put_page(page); 671 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 672 goto out; 673 } 674 675 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 676 677 /* 678 * unlock_page because locked by add_to_page_cache() 679 * page_put due to reference from alloc_huge_page() 680 */ 681 unlock_page(page); 682 put_page(page); 683 } 684 685 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 686 i_size_write(inode, offset + len); 687 inode->i_ctime = current_time(inode); 688 out: 689 inode_unlock(inode); 690 return error; 691 } 692 693 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) 694 { 695 struct inode *inode = d_inode(dentry); 696 struct hstate *h = hstate_inode(inode); 697 int error; 698 unsigned int ia_valid = attr->ia_valid; 699 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 700 701 BUG_ON(!inode); 702 703 error = setattr_prepare(dentry, attr); 704 if (error) 705 return error; 706 707 if (ia_valid & ATTR_SIZE) { 708 loff_t oldsize = inode->i_size; 709 loff_t newsize = attr->ia_size; 710 711 if (newsize & ~huge_page_mask(h)) 712 return -EINVAL; 713 /* protected by i_mutex */ 714 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 715 (newsize > oldsize && (info->seals & F_SEAL_GROW))) 716 return -EPERM; 717 error = hugetlb_vmtruncate(inode, newsize); 718 if (error) 719 return error; 720 } 721 722 setattr_copy(inode, attr); 723 mark_inode_dirty(inode); 724 return 0; 725 } 726 727 static struct inode *hugetlbfs_get_root(struct super_block *sb, 728 struct hugetlbfs_fs_context *ctx) 729 { 730 struct inode *inode; 731 732 inode = new_inode(sb); 733 if (inode) { 734 inode->i_ino = get_next_ino(); 735 inode->i_mode = S_IFDIR | ctx->mode; 736 inode->i_uid = ctx->uid; 737 inode->i_gid = ctx->gid; 738 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 739 inode->i_op = &hugetlbfs_dir_inode_operations; 740 inode->i_fop = &simple_dir_operations; 741 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 742 inc_nlink(inode); 743 lockdep_annotate_inode_mutex_key(inode); 744 } 745 return inode; 746 } 747 748 /* 749 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never 750 * be taken from reclaim -- unlike regular filesystems. This needs an 751 * annotation because huge_pmd_share() does an allocation under hugetlb's 752 * i_mmap_rwsem. 753 */ 754 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; 755 756 static struct inode *hugetlbfs_get_inode(struct super_block *sb, 757 struct inode *dir, 758 umode_t mode, dev_t dev) 759 { 760 struct inode *inode; 761 struct resv_map *resv_map = NULL; 762 763 /* 764 * Reserve maps are only needed for inodes that can have associated 765 * page allocations. 766 */ 767 if (S_ISREG(mode) || S_ISLNK(mode)) { 768 resv_map = resv_map_alloc(); 769 if (!resv_map) 770 return NULL; 771 } 772 773 inode = new_inode(sb); 774 if (inode) { 775 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); 776 777 inode->i_ino = get_next_ino(); 778 inode_init_owner(inode, dir, mode); 779 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, 780 &hugetlbfs_i_mmap_rwsem_key); 781 inode->i_mapping->a_ops = &hugetlbfs_aops; 782 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 783 inode->i_mapping->private_data = resv_map; 784 info->seals = F_SEAL_SEAL; 785 switch (mode & S_IFMT) { 786 default: 787 init_special_inode(inode, mode, dev); 788 break; 789 case S_IFREG: 790 inode->i_op = &hugetlbfs_inode_operations; 791 inode->i_fop = &hugetlbfs_file_operations; 792 break; 793 case S_IFDIR: 794 inode->i_op = &hugetlbfs_dir_inode_operations; 795 inode->i_fop = &simple_dir_operations; 796 797 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 798 inc_nlink(inode); 799 break; 800 case S_IFLNK: 801 inode->i_op = &page_symlink_inode_operations; 802 inode_nohighmem(inode); 803 break; 804 } 805 lockdep_annotate_inode_mutex_key(inode); 806 } else { 807 if (resv_map) 808 kref_put(&resv_map->refs, resv_map_release); 809 } 810 811 return inode; 812 } 813 814 /* 815 * File creation. Allocate an inode, and we're done.. 816 */ 817 static int do_hugetlbfs_mknod(struct inode *dir, 818 struct dentry *dentry, 819 umode_t mode, 820 dev_t dev, 821 bool tmpfile) 822 { 823 struct inode *inode; 824 int error = -ENOSPC; 825 826 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); 827 if (inode) { 828 dir->i_ctime = dir->i_mtime = current_time(dir); 829 if (tmpfile) { 830 d_tmpfile(dentry, inode); 831 } else { 832 d_instantiate(dentry, inode); 833 dget(dentry);/* Extra count - pin the dentry in core */ 834 } 835 error = 0; 836 } 837 return error; 838 } 839 840 static int hugetlbfs_mknod(struct inode *dir, 841 struct dentry *dentry, umode_t mode, dev_t dev) 842 { 843 return do_hugetlbfs_mknod(dir, dentry, mode, dev, false); 844 } 845 846 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 847 { 848 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); 849 if (!retval) 850 inc_nlink(dir); 851 return retval; 852 } 853 854 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) 855 { 856 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); 857 } 858 859 static int hugetlbfs_tmpfile(struct inode *dir, 860 struct dentry *dentry, umode_t mode) 861 { 862 return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true); 863 } 864 865 static int hugetlbfs_symlink(struct inode *dir, 866 struct dentry *dentry, const char *symname) 867 { 868 struct inode *inode; 869 int error = -ENOSPC; 870 871 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); 872 if (inode) { 873 int l = strlen(symname)+1; 874 error = page_symlink(inode, symname, l); 875 if (!error) { 876 d_instantiate(dentry, inode); 877 dget(dentry); 878 } else 879 iput(inode); 880 } 881 dir->i_ctime = dir->i_mtime = current_time(dir); 882 883 return error; 884 } 885 886 /* 887 * mark the head page dirty 888 */ 889 static int hugetlbfs_set_page_dirty(struct page *page) 890 { 891 struct page *head = compound_head(page); 892 893 SetPageDirty(head); 894 return 0; 895 } 896 897 static int hugetlbfs_migrate_page(struct address_space *mapping, 898 struct page *newpage, struct page *page, 899 enum migrate_mode mode) 900 { 901 int rc; 902 903 rc = migrate_huge_page_move_mapping(mapping, newpage, page); 904 if (rc != MIGRATEPAGE_SUCCESS) 905 return rc; 906 907 /* 908 * page_private is subpool pointer in hugetlb pages. Transfer to 909 * new page. PagePrivate is not associated with page_private for 910 * hugetlb pages and can not be set here as only page_huge_active 911 * pages can be migrated. 912 */ 913 if (page_private(page)) { 914 set_page_private(newpage, page_private(page)); 915 set_page_private(page, 0); 916 } 917 918 if (mode != MIGRATE_SYNC_NO_COPY) 919 migrate_page_copy(newpage, page); 920 else 921 migrate_page_states(newpage, page); 922 923 return MIGRATEPAGE_SUCCESS; 924 } 925 926 static int hugetlbfs_error_remove_page(struct address_space *mapping, 927 struct page *page) 928 { 929 struct inode *inode = mapping->host; 930 pgoff_t index = page->index; 931 932 remove_huge_page(page); 933 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) 934 hugetlb_fix_reserve_counts(inode); 935 936 return 0; 937 } 938 939 /* 940 * Display the mount options in /proc/mounts. 941 */ 942 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) 943 { 944 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); 945 struct hugepage_subpool *spool = sbinfo->spool; 946 unsigned long hpage_size = huge_page_size(sbinfo->hstate); 947 unsigned hpage_shift = huge_page_shift(sbinfo->hstate); 948 char mod; 949 950 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 951 seq_printf(m, ",uid=%u", 952 from_kuid_munged(&init_user_ns, sbinfo->uid)); 953 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 954 seq_printf(m, ",gid=%u", 955 from_kgid_munged(&init_user_ns, sbinfo->gid)); 956 if (sbinfo->mode != 0755) 957 seq_printf(m, ",mode=%o", sbinfo->mode); 958 if (sbinfo->max_inodes != -1) 959 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); 960 961 hpage_size /= 1024; 962 mod = 'K'; 963 if (hpage_size >= 1024) { 964 hpage_size /= 1024; 965 mod = 'M'; 966 } 967 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); 968 if (spool) { 969 if (spool->max_hpages != -1) 970 seq_printf(m, ",size=%llu", 971 (unsigned long long)spool->max_hpages << hpage_shift); 972 if (spool->min_hpages != -1) 973 seq_printf(m, ",min_size=%llu", 974 (unsigned long long)spool->min_hpages << hpage_shift); 975 } 976 return 0; 977 } 978 979 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) 980 { 981 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); 982 struct hstate *h = hstate_inode(d_inode(dentry)); 983 984 buf->f_type = HUGETLBFS_MAGIC; 985 buf->f_bsize = huge_page_size(h); 986 if (sbinfo) { 987 spin_lock(&sbinfo->stat_lock); 988 /* If no limits set, just report 0 for max/free/used 989 * blocks, like simple_statfs() */ 990 if (sbinfo->spool) { 991 long free_pages; 992 993 spin_lock(&sbinfo->spool->lock); 994 buf->f_blocks = sbinfo->spool->max_hpages; 995 free_pages = sbinfo->spool->max_hpages 996 - sbinfo->spool->used_hpages; 997 buf->f_bavail = buf->f_bfree = free_pages; 998 spin_unlock(&sbinfo->spool->lock); 999 buf->f_files = sbinfo->max_inodes; 1000 buf->f_ffree = sbinfo->free_inodes; 1001 } 1002 spin_unlock(&sbinfo->stat_lock); 1003 } 1004 buf->f_namelen = NAME_MAX; 1005 return 0; 1006 } 1007 1008 static void hugetlbfs_put_super(struct super_block *sb) 1009 { 1010 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); 1011 1012 if (sbi) { 1013 sb->s_fs_info = NULL; 1014 1015 if (sbi->spool) 1016 hugepage_put_subpool(sbi->spool); 1017 1018 kfree(sbi); 1019 } 1020 } 1021 1022 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) 1023 { 1024 if (sbinfo->free_inodes >= 0) { 1025 spin_lock(&sbinfo->stat_lock); 1026 if (unlikely(!sbinfo->free_inodes)) { 1027 spin_unlock(&sbinfo->stat_lock); 1028 return 0; 1029 } 1030 sbinfo->free_inodes--; 1031 spin_unlock(&sbinfo->stat_lock); 1032 } 1033 1034 return 1; 1035 } 1036 1037 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) 1038 { 1039 if (sbinfo->free_inodes >= 0) { 1040 spin_lock(&sbinfo->stat_lock); 1041 sbinfo->free_inodes++; 1042 spin_unlock(&sbinfo->stat_lock); 1043 } 1044 } 1045 1046 1047 static struct kmem_cache *hugetlbfs_inode_cachep; 1048 1049 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 1050 { 1051 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); 1052 struct hugetlbfs_inode_info *p; 1053 1054 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 1055 return NULL; 1056 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); 1057 if (unlikely(!p)) { 1058 hugetlbfs_inc_free_inodes(sbinfo); 1059 return NULL; 1060 } 1061 1062 /* 1063 * Any time after allocation, hugetlbfs_destroy_inode can be called 1064 * for the inode. mpol_free_shared_policy is unconditionally called 1065 * as part of hugetlbfs_destroy_inode. So, initialize policy here 1066 * in case of a quick call to destroy. 1067 * 1068 * Note that the policy is initialized even if we are creating a 1069 * private inode. This simplifies hugetlbfs_destroy_inode. 1070 */ 1071 mpol_shared_policy_init(&p->policy, NULL); 1072 1073 return &p->vfs_inode; 1074 } 1075 1076 static void hugetlbfs_free_inode(struct inode *inode) 1077 { 1078 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); 1079 } 1080 1081 static void hugetlbfs_destroy_inode(struct inode *inode) 1082 { 1083 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); 1084 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); 1085 } 1086 1087 static const struct address_space_operations hugetlbfs_aops = { 1088 .write_begin = hugetlbfs_write_begin, 1089 .write_end = hugetlbfs_write_end, 1090 .set_page_dirty = hugetlbfs_set_page_dirty, 1091 .migratepage = hugetlbfs_migrate_page, 1092 .error_remove_page = hugetlbfs_error_remove_page, 1093 }; 1094 1095 1096 static void init_once(void *foo) 1097 { 1098 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 1099 1100 inode_init_once(&ei->vfs_inode); 1101 } 1102 1103 const struct file_operations hugetlbfs_file_operations = { 1104 .read_iter = hugetlbfs_read_iter, 1105 .mmap = hugetlbfs_file_mmap, 1106 .fsync = noop_fsync, 1107 .get_unmapped_area = hugetlb_get_unmapped_area, 1108 .llseek = default_llseek, 1109 .fallocate = hugetlbfs_fallocate, 1110 }; 1111 1112 static const struct inode_operations hugetlbfs_dir_inode_operations = { 1113 .create = hugetlbfs_create, 1114 .lookup = simple_lookup, 1115 .link = simple_link, 1116 .unlink = simple_unlink, 1117 .symlink = hugetlbfs_symlink, 1118 .mkdir = hugetlbfs_mkdir, 1119 .rmdir = simple_rmdir, 1120 .mknod = hugetlbfs_mknod, 1121 .rename = simple_rename, 1122 .setattr = hugetlbfs_setattr, 1123 .tmpfile = hugetlbfs_tmpfile, 1124 }; 1125 1126 static const struct inode_operations hugetlbfs_inode_operations = { 1127 .setattr = hugetlbfs_setattr, 1128 }; 1129 1130 static const struct super_operations hugetlbfs_ops = { 1131 .alloc_inode = hugetlbfs_alloc_inode, 1132 .free_inode = hugetlbfs_free_inode, 1133 .destroy_inode = hugetlbfs_destroy_inode, 1134 .evict_inode = hugetlbfs_evict_inode, 1135 .statfs = hugetlbfs_statfs, 1136 .put_super = hugetlbfs_put_super, 1137 .show_options = hugetlbfs_show_options, 1138 }; 1139 1140 /* 1141 * Convert size option passed from command line to number of huge pages 1142 * in the pool specified by hstate. Size option could be in bytes 1143 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). 1144 */ 1145 static long 1146 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, 1147 enum hugetlbfs_size_type val_type) 1148 { 1149 if (val_type == NO_SIZE) 1150 return -1; 1151 1152 if (val_type == SIZE_PERCENT) { 1153 size_opt <<= huge_page_shift(h); 1154 size_opt *= h->max_huge_pages; 1155 do_div(size_opt, 100); 1156 } 1157 1158 size_opt >>= huge_page_shift(h); 1159 return size_opt; 1160 } 1161 1162 /* 1163 * Parse one mount parameter. 1164 */ 1165 static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) 1166 { 1167 struct hugetlbfs_fs_context *ctx = fc->fs_private; 1168 struct fs_parse_result result; 1169 char *rest; 1170 unsigned long ps; 1171 int opt; 1172 1173 opt = fs_parse(fc, &hugetlb_fs_parameters, param, &result); 1174 if (opt < 0) 1175 return opt; 1176 1177 switch (opt) { 1178 case Opt_uid: 1179 ctx->uid = make_kuid(current_user_ns(), result.uint_32); 1180 if (!uid_valid(ctx->uid)) 1181 goto bad_val; 1182 return 0; 1183 1184 case Opt_gid: 1185 ctx->gid = make_kgid(current_user_ns(), result.uint_32); 1186 if (!gid_valid(ctx->gid)) 1187 goto bad_val; 1188 return 0; 1189 1190 case Opt_mode: 1191 ctx->mode = result.uint_32 & 01777U; 1192 return 0; 1193 1194 case Opt_size: 1195 /* memparse() will accept a K/M/G without a digit */ 1196 if (!isdigit(param->string[0])) 1197 goto bad_val; 1198 ctx->max_size_opt = memparse(param->string, &rest); 1199 ctx->max_val_type = SIZE_STD; 1200 if (*rest == '%') 1201 ctx->max_val_type = SIZE_PERCENT; 1202 return 0; 1203 1204 case Opt_nr_inodes: 1205 /* memparse() will accept a K/M/G without a digit */ 1206 if (!isdigit(param->string[0])) 1207 goto bad_val; 1208 ctx->nr_inodes = memparse(param->string, &rest); 1209 return 0; 1210 1211 case Opt_pagesize: 1212 ps = memparse(param->string, &rest); 1213 ctx->hstate = size_to_hstate(ps); 1214 if (!ctx->hstate) { 1215 pr_err("Unsupported page size %lu MB\n", ps >> 20); 1216 return -EINVAL; 1217 } 1218 return 0; 1219 1220 case Opt_min_size: 1221 /* memparse() will accept a K/M/G without a digit */ 1222 if (!isdigit(param->string[0])) 1223 goto bad_val; 1224 ctx->min_size_opt = memparse(param->string, &rest); 1225 ctx->min_val_type = SIZE_STD; 1226 if (*rest == '%') 1227 ctx->min_val_type = SIZE_PERCENT; 1228 return 0; 1229 1230 default: 1231 return -EINVAL; 1232 } 1233 1234 bad_val: 1235 return invalf(fc, "hugetlbfs: Bad value '%s' for mount option '%s'\n", 1236 param->string, param->key); 1237 } 1238 1239 /* 1240 * Validate the parsed options. 1241 */ 1242 static int hugetlbfs_validate(struct fs_context *fc) 1243 { 1244 struct hugetlbfs_fs_context *ctx = fc->fs_private; 1245 1246 /* 1247 * Use huge page pool size (in hstate) to convert the size 1248 * options to number of huge pages. If NO_SIZE, -1 is returned. 1249 */ 1250 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 1251 ctx->max_size_opt, 1252 ctx->max_val_type); 1253 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, 1254 ctx->min_size_opt, 1255 ctx->min_val_type); 1256 1257 /* 1258 * If max_size was specified, then min_size must be smaller 1259 */ 1260 if (ctx->max_val_type > NO_SIZE && 1261 ctx->min_hpages > ctx->max_hpages) { 1262 pr_err("Minimum size can not be greater than maximum size\n"); 1263 return -EINVAL; 1264 } 1265 1266 return 0; 1267 } 1268 1269 static int 1270 hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) 1271 { 1272 struct hugetlbfs_fs_context *ctx = fc->fs_private; 1273 struct hugetlbfs_sb_info *sbinfo; 1274 1275 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); 1276 if (!sbinfo) 1277 return -ENOMEM; 1278 sb->s_fs_info = sbinfo; 1279 spin_lock_init(&sbinfo->stat_lock); 1280 sbinfo->hstate = ctx->hstate; 1281 sbinfo->max_inodes = ctx->nr_inodes; 1282 sbinfo->free_inodes = ctx->nr_inodes; 1283 sbinfo->spool = NULL; 1284 sbinfo->uid = ctx->uid; 1285 sbinfo->gid = ctx->gid; 1286 sbinfo->mode = ctx->mode; 1287 1288 /* 1289 * Allocate and initialize subpool if maximum or minimum size is 1290 * specified. Any needed reservations (for minimim size) are taken 1291 * taken when the subpool is created. 1292 */ 1293 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { 1294 sbinfo->spool = hugepage_new_subpool(ctx->hstate, 1295 ctx->max_hpages, 1296 ctx->min_hpages); 1297 if (!sbinfo->spool) 1298 goto out_free; 1299 } 1300 sb->s_maxbytes = MAX_LFS_FILESIZE; 1301 sb->s_blocksize = huge_page_size(ctx->hstate); 1302 sb->s_blocksize_bits = huge_page_shift(ctx->hstate); 1303 sb->s_magic = HUGETLBFS_MAGIC; 1304 sb->s_op = &hugetlbfs_ops; 1305 sb->s_time_gran = 1; 1306 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); 1307 if (!sb->s_root) 1308 goto out_free; 1309 return 0; 1310 out_free: 1311 kfree(sbinfo->spool); 1312 kfree(sbinfo); 1313 return -ENOMEM; 1314 } 1315 1316 static int hugetlbfs_get_tree(struct fs_context *fc) 1317 { 1318 int err = hugetlbfs_validate(fc); 1319 if (err) 1320 return err; 1321 return get_tree_nodev(fc, hugetlbfs_fill_super); 1322 } 1323 1324 static void hugetlbfs_fs_context_free(struct fs_context *fc) 1325 { 1326 kfree(fc->fs_private); 1327 } 1328 1329 static const struct fs_context_operations hugetlbfs_fs_context_ops = { 1330 .free = hugetlbfs_fs_context_free, 1331 .parse_param = hugetlbfs_parse_param, 1332 .get_tree = hugetlbfs_get_tree, 1333 }; 1334 1335 static int hugetlbfs_init_fs_context(struct fs_context *fc) 1336 { 1337 struct hugetlbfs_fs_context *ctx; 1338 1339 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); 1340 if (!ctx) 1341 return -ENOMEM; 1342 1343 ctx->max_hpages = -1; /* No limit on size by default */ 1344 ctx->nr_inodes = -1; /* No limit on number of inodes by default */ 1345 ctx->uid = current_fsuid(); 1346 ctx->gid = current_fsgid(); 1347 ctx->mode = 0755; 1348 ctx->hstate = &default_hstate; 1349 ctx->min_hpages = -1; /* No default minimum size */ 1350 ctx->max_val_type = NO_SIZE; 1351 ctx->min_val_type = NO_SIZE; 1352 fc->fs_private = ctx; 1353 fc->ops = &hugetlbfs_fs_context_ops; 1354 return 0; 1355 } 1356 1357 static struct file_system_type hugetlbfs_fs_type = { 1358 .name = "hugetlbfs", 1359 .init_fs_context = hugetlbfs_init_fs_context, 1360 .parameters = &hugetlb_fs_parameters, 1361 .kill_sb = kill_litter_super, 1362 }; 1363 1364 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; 1365 1366 static int can_do_hugetlb_shm(void) 1367 { 1368 kgid_t shm_group; 1369 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); 1370 return capable(CAP_IPC_LOCK) || in_group_p(shm_group); 1371 } 1372 1373 static int get_hstate_idx(int page_size_log) 1374 { 1375 struct hstate *h = hstate_sizelog(page_size_log); 1376 1377 if (!h) 1378 return -1; 1379 return h - hstates; 1380 } 1381 1382 /* 1383 * Note that size should be aligned to proper hugepage size in caller side, 1384 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. 1385 */ 1386 struct file *hugetlb_file_setup(const char *name, size_t size, 1387 vm_flags_t acctflag, struct user_struct **user, 1388 int creat_flags, int page_size_log) 1389 { 1390 struct inode *inode; 1391 struct vfsmount *mnt; 1392 int hstate_idx; 1393 struct file *file; 1394 1395 hstate_idx = get_hstate_idx(page_size_log); 1396 if (hstate_idx < 0) 1397 return ERR_PTR(-ENODEV); 1398 1399 *user = NULL; 1400 mnt = hugetlbfs_vfsmount[hstate_idx]; 1401 if (!mnt) 1402 return ERR_PTR(-ENOENT); 1403 1404 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 1405 *user = current_user(); 1406 if (user_shm_lock(size, *user)) { 1407 task_lock(current); 1408 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", 1409 current->comm, current->pid); 1410 task_unlock(current); 1411 } else { 1412 *user = NULL; 1413 return ERR_PTR(-EPERM); 1414 } 1415 } 1416 1417 file = ERR_PTR(-ENOSPC); 1418 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); 1419 if (!inode) 1420 goto out; 1421 if (creat_flags == HUGETLB_SHMFS_INODE) 1422 inode->i_flags |= S_PRIVATE; 1423 1424 inode->i_size = size; 1425 clear_nlink(inode); 1426 1427 if (hugetlb_reserve_pages(inode, 0, 1428 size >> huge_page_shift(hstate_inode(inode)), NULL, 1429 acctflag)) 1430 file = ERR_PTR(-ENOMEM); 1431 else 1432 file = alloc_file_pseudo(inode, mnt, name, O_RDWR, 1433 &hugetlbfs_file_operations); 1434 if (!IS_ERR(file)) 1435 return file; 1436 1437 iput(inode); 1438 out: 1439 if (*user) { 1440 user_shm_unlock(size, *user); 1441 *user = NULL; 1442 } 1443 return file; 1444 } 1445 1446 static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) 1447 { 1448 struct fs_context *fc; 1449 struct vfsmount *mnt; 1450 1451 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); 1452 if (IS_ERR(fc)) { 1453 mnt = ERR_CAST(fc); 1454 } else { 1455 struct hugetlbfs_fs_context *ctx = fc->fs_private; 1456 ctx->hstate = h; 1457 mnt = fc_mount(fc); 1458 put_fs_context(fc); 1459 } 1460 if (IS_ERR(mnt)) 1461 pr_err("Cannot mount internal hugetlbfs for page size %uK", 1462 1U << (h->order + PAGE_SHIFT - 10)); 1463 return mnt; 1464 } 1465 1466 static int __init init_hugetlbfs_fs(void) 1467 { 1468 struct vfsmount *mnt; 1469 struct hstate *h; 1470 int error; 1471 int i; 1472 1473 if (!hugepages_supported()) { 1474 pr_info("disabling because there are no supported hugepage sizes\n"); 1475 return -ENOTSUPP; 1476 } 1477 1478 error = -ENOMEM; 1479 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 1480 sizeof(struct hugetlbfs_inode_info), 1481 0, SLAB_ACCOUNT, init_once); 1482 if (hugetlbfs_inode_cachep == NULL) 1483 goto out; 1484 1485 error = register_filesystem(&hugetlbfs_fs_type); 1486 if (error) 1487 goto out_free; 1488 1489 /* default hstate mount is required */ 1490 mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]); 1491 if (IS_ERR(mnt)) { 1492 error = PTR_ERR(mnt); 1493 goto out_unreg; 1494 } 1495 hugetlbfs_vfsmount[default_hstate_idx] = mnt; 1496 1497 /* other hstates are optional */ 1498 i = 0; 1499 for_each_hstate(h) { 1500 if (i == default_hstate_idx) { 1501 i++; 1502 continue; 1503 } 1504 1505 mnt = mount_one_hugetlbfs(h); 1506 if (IS_ERR(mnt)) 1507 hugetlbfs_vfsmount[i] = NULL; 1508 else 1509 hugetlbfs_vfsmount[i] = mnt; 1510 i++; 1511 } 1512 1513 return 0; 1514 1515 out_unreg: 1516 (void)unregister_filesystem(&hugetlbfs_fs_type); 1517 out_free: 1518 kmem_cache_destroy(hugetlbfs_inode_cachep); 1519 out: 1520 return error; 1521 } 1522 fs_initcall(init_hugetlbfs_fs) 1523