1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * Regular file handling primitives for NTFS-based filesystems. 7 * 8 */ 9 10 #include <linux/backing-dev.h> 11 #include <linux/buffer_head.h> 12 #include <linux/compat.h> 13 #include <linux/falloc.h> 14 #include <linux/fiemap.h> 15 16 #include "debug.h" 17 #include "ntfs.h" 18 #include "ntfs_fs.h" 19 20 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) 21 { 22 struct fstrim_range __user *user_range; 23 struct fstrim_range range; 24 struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev); 25 int err; 26 27 if (!capable(CAP_SYS_ADMIN)) 28 return -EPERM; 29 30 if (!blk_queue_discard(q)) 31 return -EOPNOTSUPP; 32 33 user_range = (struct fstrim_range __user *)arg; 34 if (copy_from_user(&range, user_range, sizeof(range))) 35 return -EFAULT; 36 37 range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity); 38 39 err = ntfs_trim_fs(sbi, &range); 40 if (err < 0) 41 return err; 42 43 if (copy_to_user(user_range, &range, sizeof(range))) 44 return -EFAULT; 45 46 return 0; 47 } 48 49 static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 50 { 51 struct inode *inode = file_inode(filp); 52 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 53 54 switch (cmd) { 55 case FITRIM: 56 return ntfs_ioctl_fitrim(sbi, arg); 57 } 58 return -ENOTTY; /* Inappropriate ioctl for device. */ 59 } 60 61 #ifdef CONFIG_COMPAT 62 static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 63 64 { 65 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 66 } 67 #endif 68 69 /* 70 * ntfs_getattr - inode_operations::getattr 71 */ 72 int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path, 73 struct kstat *stat, u32 request_mask, u32 flags) 74 { 75 struct inode *inode = d_inode(path->dentry); 76 struct ntfs_inode *ni = ntfs_i(inode); 77 78 if (is_compressed(ni)) 79 stat->attributes |= STATX_ATTR_COMPRESSED; 80 81 if (is_encrypted(ni)) 82 stat->attributes |= STATX_ATTR_ENCRYPTED; 83 84 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED; 85 86 generic_fillattr(mnt_userns, inode, stat); 87 88 stat->result_mask |= STATX_BTIME; 89 stat->btime = ni->i_crtime; 90 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ 91 92 return 0; 93 } 94 95 static int ntfs_extend_initialized_size(struct file *file, 96 struct ntfs_inode *ni, 97 const loff_t valid, 98 const loff_t new_valid) 99 { 100 struct inode *inode = &ni->vfs_inode; 101 struct address_space *mapping = inode->i_mapping; 102 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 103 loff_t pos = valid; 104 int err; 105 106 if (is_resident(ni)) { 107 ni->i_valid = new_valid; 108 return 0; 109 } 110 111 WARN_ON(is_compressed(ni)); 112 WARN_ON(valid >= new_valid); 113 114 for (;;) { 115 u32 zerofrom, len; 116 struct page *page; 117 void *fsdata; 118 u8 bits; 119 CLST vcn, lcn, clen; 120 121 if (is_sparsed(ni)) { 122 bits = sbi->cluster_bits; 123 vcn = pos >> bits; 124 125 err = attr_data_get_block(ni, vcn, 0, &lcn, &clen, 126 NULL); 127 if (err) 128 goto out; 129 130 if (lcn == SPARSE_LCN) { 131 loff_t vbo = (loff_t)vcn << bits; 132 loff_t to = vbo + ((loff_t)clen << bits); 133 134 if (to <= new_valid) { 135 ni->i_valid = to; 136 pos = to; 137 goto next; 138 } 139 140 if (vbo < pos) { 141 pos = vbo; 142 } else { 143 to = (new_valid >> bits) << bits; 144 if (pos < to) { 145 ni->i_valid = to; 146 pos = to; 147 goto next; 148 } 149 } 150 } 151 } 152 153 zerofrom = pos & (PAGE_SIZE - 1); 154 len = PAGE_SIZE - zerofrom; 155 156 if (pos + len > new_valid) 157 len = new_valid - pos; 158 159 err = pagecache_write_begin(file, mapping, pos, len, 0, &page, 160 &fsdata); 161 if (err) 162 goto out; 163 164 zero_user_segment(page, zerofrom, PAGE_SIZE); 165 166 /* This function in any case puts page. */ 167 err = pagecache_write_end(file, mapping, pos, len, len, page, 168 fsdata); 169 if (err < 0) 170 goto out; 171 pos += len; 172 173 next: 174 if (pos >= new_valid) 175 break; 176 177 balance_dirty_pages_ratelimited(mapping); 178 cond_resched(); 179 } 180 181 return 0; 182 183 out: 184 ni->i_valid = valid; 185 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", 186 new_valid); 187 return err; 188 } 189 190 /* 191 * ntfs_zero_range - Helper function for punch_hole. 192 * 193 * It zeroes a range [vbo, vbo_to). 194 */ 195 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) 196 { 197 int err = 0; 198 struct address_space *mapping = inode->i_mapping; 199 u32 blocksize = 1 << inode->i_blkbits; 200 pgoff_t idx = vbo >> PAGE_SHIFT; 201 u32 z_start = vbo & (PAGE_SIZE - 1); 202 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; 203 loff_t page_off; 204 struct buffer_head *head, *bh; 205 u32 bh_next, bh_off, z_end; 206 sector_t iblock; 207 struct page *page; 208 209 for (; idx < idx_end; idx += 1, z_start = 0) { 210 page_off = (loff_t)idx << PAGE_SHIFT; 211 z_end = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) 212 : PAGE_SIZE; 213 iblock = page_off >> inode->i_blkbits; 214 215 page = find_or_create_page(mapping, idx, 216 mapping_gfp_constraint(mapping, 217 ~__GFP_FS)); 218 if (!page) 219 return -ENOMEM; 220 221 if (!page_has_buffers(page)) 222 create_empty_buffers(page, blocksize, 0); 223 224 bh = head = page_buffers(page); 225 bh_off = 0; 226 do { 227 bh_next = bh_off + blocksize; 228 229 if (bh_next <= z_start || bh_off >= z_end) 230 continue; 231 232 if (!buffer_mapped(bh)) { 233 ntfs_get_block(inode, iblock, bh, 0); 234 /* Unmapped? It's a hole - nothing to do. */ 235 if (!buffer_mapped(bh)) 236 continue; 237 } 238 239 /* Ok, it's mapped. Make sure it's up-to-date. */ 240 if (PageUptodate(page)) 241 set_buffer_uptodate(bh); 242 243 if (!buffer_uptodate(bh)) { 244 lock_buffer(bh); 245 bh->b_end_io = end_buffer_read_sync; 246 get_bh(bh); 247 submit_bh(REQ_OP_READ, 0, bh); 248 249 wait_on_buffer(bh); 250 if (!buffer_uptodate(bh)) { 251 unlock_page(page); 252 put_page(page); 253 err = -EIO; 254 goto out; 255 } 256 } 257 258 mark_buffer_dirty(bh); 259 260 } while (bh_off = bh_next, iblock += 1, 261 head != (bh = bh->b_this_page)); 262 263 zero_user_segment(page, z_start, z_end); 264 265 unlock_page(page); 266 put_page(page); 267 cond_resched(); 268 } 269 out: 270 mark_inode_dirty(inode); 271 return err; 272 } 273 274 /* 275 * ntfs_sparse_cluster - Helper function to zero a new allocated clusters. 276 * 277 * NOTE: 512 <= cluster size <= 2M 278 */ 279 void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn, 280 CLST len) 281 { 282 struct address_space *mapping = inode->i_mapping; 283 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 284 u64 vbo = (u64)vcn << sbi->cluster_bits; 285 u64 bytes = (u64)len << sbi->cluster_bits; 286 u32 blocksize = 1 << inode->i_blkbits; 287 pgoff_t idx0 = page0 ? page0->index : -1; 288 loff_t vbo_clst = vbo & sbi->cluster_mask_inv; 289 loff_t end = ntfs_up_cluster(sbi, vbo + bytes); 290 pgoff_t idx = vbo_clst >> PAGE_SHIFT; 291 u32 from = vbo_clst & (PAGE_SIZE - 1); 292 pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 293 loff_t page_off; 294 u32 to; 295 bool partial; 296 struct page *page; 297 298 for (; idx < idx_end; idx += 1, from = 0) { 299 page = idx == idx0 ? page0 : grab_cache_page(mapping, idx); 300 301 if (!page) 302 continue; 303 304 page_off = (loff_t)idx << PAGE_SHIFT; 305 to = (page_off + PAGE_SIZE) > end ? (end - page_off) 306 : PAGE_SIZE; 307 partial = false; 308 309 if ((from || PAGE_SIZE != to) && 310 likely(!page_has_buffers(page))) { 311 create_empty_buffers(page, blocksize, 0); 312 } 313 314 if (page_has_buffers(page)) { 315 struct buffer_head *head, *bh; 316 u32 bh_off = 0; 317 318 bh = head = page_buffers(page); 319 do { 320 u32 bh_next = bh_off + blocksize; 321 322 if (from <= bh_off && bh_next <= to) { 323 set_buffer_uptodate(bh); 324 mark_buffer_dirty(bh); 325 } else if (!buffer_uptodate(bh)) { 326 partial = true; 327 } 328 bh_off = bh_next; 329 } while (head != (bh = bh->b_this_page)); 330 } 331 332 zero_user_segment(page, from, to); 333 334 if (!partial) { 335 if (!PageUptodate(page)) 336 SetPageUptodate(page); 337 set_page_dirty(page); 338 } 339 340 if (idx != idx0) { 341 unlock_page(page); 342 put_page(page); 343 } 344 cond_resched(); 345 } 346 mark_inode_dirty(inode); 347 } 348 349 /* 350 * ntfs_file_mmap - file_operations::mmap 351 */ 352 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) 353 { 354 struct address_space *mapping = file->f_mapping; 355 struct inode *inode = mapping->host; 356 struct ntfs_inode *ni = ntfs_i(inode); 357 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); 358 bool rw = vma->vm_flags & VM_WRITE; 359 int err; 360 361 if (is_encrypted(ni)) { 362 ntfs_inode_warn(inode, "mmap encrypted not supported"); 363 return -EOPNOTSUPP; 364 } 365 366 if (is_dedup(ni)) { 367 ntfs_inode_warn(inode, "mmap deduplicated not supported"); 368 return -EOPNOTSUPP; 369 } 370 371 if (is_compressed(ni) && rw) { 372 ntfs_inode_warn(inode, "mmap(write) compressed not supported"); 373 return -EOPNOTSUPP; 374 } 375 376 if (rw) { 377 u64 to = min_t(loff_t, i_size_read(inode), 378 from + vma->vm_end - vma->vm_start); 379 380 if (is_sparsed(ni)) { 381 /* Allocate clusters for rw map. */ 382 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 383 CLST lcn, len; 384 CLST vcn = from >> sbi->cluster_bits; 385 CLST end = bytes_to_cluster(sbi, to); 386 bool new; 387 388 for (; vcn < end; vcn += len) { 389 err = attr_data_get_block(ni, vcn, 1, &lcn, 390 &len, &new); 391 if (err) 392 goto out; 393 394 if (!new) 395 continue; 396 ntfs_sparse_cluster(inode, NULL, vcn, 1); 397 } 398 } 399 400 if (ni->i_valid < to) { 401 if (!inode_trylock(inode)) { 402 err = -EAGAIN; 403 goto out; 404 } 405 err = ntfs_extend_initialized_size(file, ni, 406 ni->i_valid, to); 407 inode_unlock(inode); 408 if (err) 409 goto out; 410 } 411 } 412 413 err = generic_file_mmap(file, vma); 414 out: 415 return err; 416 } 417 418 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, 419 struct file *file) 420 { 421 struct ntfs_inode *ni = ntfs_i(inode); 422 struct address_space *mapping = inode->i_mapping; 423 loff_t end = pos + count; 424 bool extend_init = file && pos > ni->i_valid; 425 int err; 426 427 if (end <= inode->i_size && !extend_init) 428 return 0; 429 430 /* Mark rw ntfs as dirty. It will be cleared at umount. */ 431 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY); 432 433 if (end > inode->i_size) { 434 err = ntfs_set_size(inode, end); 435 if (err) 436 goto out; 437 inode->i_size = end; 438 } 439 440 if (extend_init && !is_compressed(ni)) { 441 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); 442 if (err) 443 goto out; 444 } else { 445 err = 0; 446 } 447 448 inode->i_ctime = inode->i_mtime = current_time(inode); 449 mark_inode_dirty(inode); 450 451 if (IS_SYNC(inode)) { 452 int err2; 453 454 err = filemap_fdatawrite_range(mapping, pos, end - 1); 455 err2 = sync_mapping_buffers(mapping); 456 if (!err) 457 err = err2; 458 err2 = write_inode_now(inode, 1); 459 if (!err) 460 err = err2; 461 if (!err) 462 err = filemap_fdatawait_range(mapping, pos, end - 1); 463 } 464 465 out: 466 return err; 467 } 468 469 static int ntfs_truncate(struct inode *inode, loff_t new_size) 470 { 471 struct super_block *sb = inode->i_sb; 472 struct ntfs_inode *ni = ntfs_i(inode); 473 int err, dirty = 0; 474 u64 new_valid; 475 476 if (!S_ISREG(inode->i_mode)) 477 return 0; 478 479 if (is_compressed(ni)) { 480 if (ni->i_valid > new_size) 481 ni->i_valid = new_size; 482 } else { 483 err = block_truncate_page(inode->i_mapping, new_size, 484 ntfs_get_block); 485 if (err) 486 return err; 487 } 488 489 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); 490 491 ni_lock(ni); 492 493 truncate_setsize(inode, new_size); 494 495 down_write(&ni->file.run_lock); 496 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 497 &new_valid, ni->mi.sbi->options->prealloc, NULL); 498 up_write(&ni->file.run_lock); 499 500 if (new_valid < ni->i_valid) 501 ni->i_valid = new_valid; 502 503 ni_unlock(ni); 504 505 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 506 inode->i_ctime = inode->i_mtime = current_time(inode); 507 if (!IS_DIRSYNC(inode)) { 508 dirty = 1; 509 } else { 510 err = ntfs_sync_inode(inode); 511 if (err) 512 return err; 513 } 514 515 if (dirty) 516 mark_inode_dirty(inode); 517 518 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ 519 520 return 0; 521 } 522 523 /* 524 * ntfs_fallocate 525 * 526 * Preallocate space for a file. This implements ntfs's fallocate file 527 * operation, which gets called from sys_fallocate system call. User 528 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set 529 * we just allocate clusters without zeroing them out. Otherwise we 530 * allocate and zero out clusters via an expanding truncate. 531 */ 532 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) 533 { 534 struct inode *inode = file->f_mapping->host; 535 struct super_block *sb = inode->i_sb; 536 struct ntfs_sb_info *sbi = sb->s_fs_info; 537 struct ntfs_inode *ni = ntfs_i(inode); 538 loff_t end = vbo + len; 539 loff_t vbo_down = round_down(vbo, PAGE_SIZE); 540 loff_t i_size; 541 int err; 542 543 /* No support for dir. */ 544 if (!S_ISREG(inode->i_mode)) 545 return -EOPNOTSUPP; 546 547 /* Return error if mode is not supported. */ 548 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 549 FALLOC_FL_COLLAPSE_RANGE)) { 550 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported", 551 mode); 552 return -EOPNOTSUPP; 553 } 554 555 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 556 557 inode_lock(inode); 558 i_size = inode->i_size; 559 560 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 561 /* Should never be here, see ntfs_file_open. */ 562 err = -EOPNOTSUPP; 563 goto out; 564 } 565 566 if (mode & FALLOC_FL_PUNCH_HOLE) { 567 u32 frame_size; 568 loff_t mask, vbo_a, end_a, tmp; 569 570 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 571 err = -EINVAL; 572 goto out; 573 } 574 575 err = filemap_write_and_wait_range(inode->i_mapping, vbo, 576 end - 1); 577 if (err) 578 goto out; 579 580 err = filemap_write_and_wait_range(inode->i_mapping, end, 581 LLONG_MAX); 582 if (err) 583 goto out; 584 585 inode_dio_wait(inode); 586 587 truncate_pagecache(inode, vbo_down); 588 589 if (!is_sparsed(ni) && !is_compressed(ni)) { 590 /* 591 * Normal file, can't make hole. 592 * TODO: Try to find way to save info about hole. 593 */ 594 err = -EOPNOTSUPP; 595 goto out; 596 } 597 598 ni_lock(ni); 599 err = attr_punch_hole(ni, vbo, len, &frame_size); 600 ni_unlock(ni); 601 if (err != E_NTFS_NOTALIGNED) 602 goto out; 603 604 /* Process not aligned punch. */ 605 mask = frame_size - 1; 606 vbo_a = (vbo + mask) & ~mask; 607 end_a = end & ~mask; 608 609 tmp = min(vbo_a, end); 610 if (tmp > vbo) { 611 err = ntfs_zero_range(inode, vbo, tmp); 612 if (err) 613 goto out; 614 } 615 616 if (vbo < end_a && end_a < end) { 617 err = ntfs_zero_range(inode, end_a, end); 618 if (err) 619 goto out; 620 } 621 622 /* Aligned punch_hole */ 623 if (end_a > vbo_a) { 624 ni_lock(ni); 625 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); 626 ni_unlock(ni); 627 } 628 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 629 if (mode & ~FALLOC_FL_COLLAPSE_RANGE) { 630 err = -EINVAL; 631 goto out; 632 } 633 634 /* 635 * Write tail of the last page before removed range since 636 * it will get removed from the page cache below. 637 */ 638 err = filemap_write_and_wait_range(inode->i_mapping, vbo_down, 639 vbo); 640 if (err) 641 goto out; 642 643 /* 644 * Write data that will be shifted to preserve them 645 * when discarding page cache below. 646 */ 647 err = filemap_write_and_wait_range(inode->i_mapping, end, 648 LLONG_MAX); 649 if (err) 650 goto out; 651 652 /* Wait for existing dio to complete. */ 653 inode_dio_wait(inode); 654 655 truncate_pagecache(inode, vbo_down); 656 657 ni_lock(ni); 658 err = attr_collapse_range(ni, vbo, len); 659 ni_unlock(ni); 660 } else { 661 /* 662 * Normal file: Allocate clusters, do not change 'valid' size. 663 */ 664 loff_t new_size = max(end, i_size); 665 666 err = inode_newsize_ok(inode, new_size); 667 if (err) 668 goto out; 669 670 err = ntfs_set_size(inode, new_size); 671 if (err) 672 goto out; 673 674 if (is_sparsed(ni) || is_compressed(ni)) { 675 CLST vcn_v = ni->i_valid >> sbi->cluster_bits; 676 CLST vcn = vbo >> sbi->cluster_bits; 677 CLST cend = bytes_to_cluster(sbi, end); 678 CLST lcn, clen; 679 bool new; 680 681 /* 682 * Allocate but do not zero new clusters. (see below comments) 683 * This breaks security: One can read unused on-disk areas. 684 * Zeroing these clusters may be too long. 685 * Maybe we should check here for root rights? 686 */ 687 for (; vcn < cend; vcn += clen) { 688 err = attr_data_get_block(ni, vcn, cend - vcn, 689 &lcn, &clen, &new); 690 if (err) 691 goto out; 692 if (!new || vcn >= vcn_v) 693 continue; 694 695 /* 696 * Unwritten area. 697 * NTFS is not able to store several unwritten areas. 698 * Activate 'ntfs_sparse_cluster' to zero new allocated clusters. 699 * 700 * Dangerous in case: 701 * 1G of sparsed clusters + 1 cluster of data => 702 * valid_size == 1G + 1 cluster 703 * fallocate(1G) will zero 1G and this can be very long 704 * xfstest 016/086 will fail without 'ntfs_sparse_cluster'. 705 */ 706 ntfs_sparse_cluster(inode, NULL, vcn, 707 min(vcn_v - vcn, clen)); 708 } 709 } 710 711 if (mode & FALLOC_FL_KEEP_SIZE) { 712 ni_lock(ni); 713 /* True - Keep preallocated. */ 714 err = attr_set_size(ni, ATTR_DATA, NULL, 0, 715 &ni->file.run, i_size, &ni->i_valid, 716 true, NULL); 717 ni_unlock(ni); 718 } 719 } 720 721 out: 722 if (err == -EFBIG) 723 err = -ENOSPC; 724 725 if (!err) { 726 inode->i_ctime = inode->i_mtime = current_time(inode); 727 mark_inode_dirty(inode); 728 } 729 730 inode_unlock(inode); 731 return err; 732 } 733 734 /* 735 * ntfs3_setattr - inode_operations::setattr 736 */ 737 int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 738 struct iattr *attr) 739 { 740 struct super_block *sb = dentry->d_sb; 741 struct ntfs_sb_info *sbi = sb->s_fs_info; 742 struct inode *inode = d_inode(dentry); 743 struct ntfs_inode *ni = ntfs_i(inode); 744 u32 ia_valid = attr->ia_valid; 745 umode_t mode = inode->i_mode; 746 int err; 747 748 if (sbi->options->noacsrules) { 749 /* "No access rules" - Force any changes of time etc. */ 750 attr->ia_valid |= ATTR_FORCE; 751 /* and disable for editing some attributes. */ 752 attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE); 753 ia_valid = attr->ia_valid; 754 } 755 756 err = setattr_prepare(mnt_userns, dentry, attr); 757 if (err) 758 goto out; 759 760 if (ia_valid & ATTR_SIZE) { 761 loff_t oldsize = inode->i_size; 762 763 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 764 /* Should never be here, see ntfs_file_open(). */ 765 err = -EOPNOTSUPP; 766 goto out; 767 } 768 inode_dio_wait(inode); 769 770 if (attr->ia_size <= oldsize) 771 err = ntfs_truncate(inode, attr->ia_size); 772 else if (attr->ia_size > oldsize) 773 err = ntfs_extend(inode, attr->ia_size, 0, NULL); 774 775 if (err) 776 goto out; 777 778 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 779 } 780 781 setattr_copy(mnt_userns, inode, attr); 782 783 if (mode != inode->i_mode) { 784 err = ntfs_acl_chmod(mnt_userns, inode); 785 if (err) 786 goto out; 787 788 /* Linux 'w' -> Windows 'ro'. */ 789 if (0222 & inode->i_mode) 790 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY; 791 else 792 ni->std_fa |= FILE_ATTRIBUTE_READONLY; 793 } 794 795 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) 796 ntfs_save_wsl_perm(inode); 797 mark_inode_dirty(inode); 798 out: 799 return err; 800 } 801 802 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 803 { 804 struct file *file = iocb->ki_filp; 805 struct inode *inode = file->f_mapping->host; 806 struct ntfs_inode *ni = ntfs_i(inode); 807 808 if (is_encrypted(ni)) { 809 ntfs_inode_warn(inode, "encrypted i/o not supported"); 810 return -EOPNOTSUPP; 811 } 812 813 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 814 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 815 return -EOPNOTSUPP; 816 } 817 818 #ifndef CONFIG_NTFS3_LZX_XPRESS 819 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { 820 ntfs_inode_warn( 821 inode, 822 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); 823 return -EOPNOTSUPP; 824 } 825 #endif 826 827 if (is_dedup(ni)) { 828 ntfs_inode_warn(inode, "read deduplicated not supported"); 829 return -EOPNOTSUPP; 830 } 831 832 return generic_file_read_iter(iocb, iter); 833 } 834 835 /* 836 * ntfs_get_frame_pages 837 * 838 * Return: Array of locked pages. 839 */ 840 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, 841 struct page **pages, u32 pages_per_frame, 842 bool *frame_uptodate) 843 { 844 gfp_t gfp_mask = mapping_gfp_mask(mapping); 845 u32 npages; 846 847 *frame_uptodate = true; 848 849 for (npages = 0; npages < pages_per_frame; npages++, index++) { 850 struct page *page; 851 852 page = find_or_create_page(mapping, index, gfp_mask); 853 if (!page) { 854 while (npages--) { 855 page = pages[npages]; 856 unlock_page(page); 857 put_page(page); 858 } 859 860 return -ENOMEM; 861 } 862 863 if (!PageUptodate(page)) 864 *frame_uptodate = false; 865 866 pages[npages] = page; 867 } 868 869 return 0; 870 } 871 872 /* 873 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files). 874 */ 875 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) 876 { 877 int err; 878 struct file *file = iocb->ki_filp; 879 size_t count = iov_iter_count(from); 880 loff_t pos = iocb->ki_pos; 881 struct inode *inode = file_inode(file); 882 loff_t i_size = inode->i_size; 883 struct address_space *mapping = inode->i_mapping; 884 struct ntfs_inode *ni = ntfs_i(inode); 885 u64 valid = ni->i_valid; 886 struct ntfs_sb_info *sbi = ni->mi.sbi; 887 struct page *page, **pages = NULL; 888 size_t written = 0; 889 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; 890 u32 frame_size = 1u << frame_bits; 891 u32 pages_per_frame = frame_size >> PAGE_SHIFT; 892 u32 ip, off; 893 CLST frame; 894 u64 frame_vbo; 895 pgoff_t index; 896 bool frame_uptodate; 897 898 if (frame_size < PAGE_SIZE) { 899 /* 900 * frame_size == 8K if cluster 512 901 * frame_size == 64K if cluster 4096 902 */ 903 ntfs_inode_warn(inode, "page size is bigger than frame size"); 904 return -EOPNOTSUPP; 905 } 906 907 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS); 908 if (!pages) 909 return -ENOMEM; 910 911 current->backing_dev_info = inode_to_bdi(inode); 912 err = file_remove_privs(file); 913 if (err) 914 goto out; 915 916 err = file_update_time(file); 917 if (err) 918 goto out; 919 920 /* Zero range [valid : pos). */ 921 while (valid < pos) { 922 CLST lcn, clen; 923 924 frame = valid >> frame_bits; 925 frame_vbo = valid & ~(frame_size - 1); 926 off = valid & (frame_size - 1); 927 928 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn, 929 &clen, NULL); 930 if (err) 931 goto out; 932 933 if (lcn == SPARSE_LCN) { 934 ni->i_valid = valid = 935 frame_vbo + ((u64)clen << sbi->cluster_bits); 936 continue; 937 } 938 939 /* Load full frame. */ 940 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, 941 pages, pages_per_frame, 942 &frame_uptodate); 943 if (err) 944 goto out; 945 946 if (!frame_uptodate && off) { 947 err = ni_read_frame(ni, frame_vbo, pages, 948 pages_per_frame); 949 if (err) { 950 for (ip = 0; ip < pages_per_frame; ip++) { 951 page = pages[ip]; 952 unlock_page(page); 953 put_page(page); 954 } 955 goto out; 956 } 957 } 958 959 ip = off >> PAGE_SHIFT; 960 off = offset_in_page(valid); 961 for (; ip < pages_per_frame; ip++, off = 0) { 962 page = pages[ip]; 963 zero_user_segment(page, off, PAGE_SIZE); 964 flush_dcache_page(page); 965 SetPageUptodate(page); 966 } 967 968 ni_lock(ni); 969 err = ni_write_frame(ni, pages, pages_per_frame); 970 ni_unlock(ni); 971 972 for (ip = 0; ip < pages_per_frame; ip++) { 973 page = pages[ip]; 974 SetPageUptodate(page); 975 unlock_page(page); 976 put_page(page); 977 } 978 979 if (err) 980 goto out; 981 982 ni->i_valid = valid = frame_vbo + frame_size; 983 } 984 985 /* Copy user data [pos : pos + count). */ 986 while (count) { 987 size_t copied, bytes; 988 989 off = pos & (frame_size - 1); 990 bytes = frame_size - off; 991 if (bytes > count) 992 bytes = count; 993 994 frame = pos >> frame_bits; 995 frame_vbo = pos & ~(frame_size - 1); 996 index = frame_vbo >> PAGE_SHIFT; 997 998 if (unlikely(iov_iter_fault_in_readable(from, bytes))) { 999 err = -EFAULT; 1000 goto out; 1001 } 1002 1003 /* Load full frame. */ 1004 err = ntfs_get_frame_pages(mapping, index, pages, 1005 pages_per_frame, &frame_uptodate); 1006 if (err) 1007 goto out; 1008 1009 if (!frame_uptodate) { 1010 loff_t to = pos + bytes; 1011 1012 if (off || (to < i_size && (to & (frame_size - 1)))) { 1013 err = ni_read_frame(ni, frame_vbo, pages, 1014 pages_per_frame); 1015 if (err) { 1016 for (ip = 0; ip < pages_per_frame; 1017 ip++) { 1018 page = pages[ip]; 1019 unlock_page(page); 1020 put_page(page); 1021 } 1022 goto out; 1023 } 1024 } 1025 } 1026 1027 WARN_ON(!bytes); 1028 copied = 0; 1029 ip = off >> PAGE_SHIFT; 1030 off = offset_in_page(pos); 1031 1032 /* Copy user data to pages. */ 1033 for (;;) { 1034 size_t cp, tail = PAGE_SIZE - off; 1035 1036 page = pages[ip]; 1037 cp = copy_page_from_iter_atomic(page, off, 1038 min(tail, bytes), from); 1039 flush_dcache_page(page); 1040 1041 copied += cp; 1042 bytes -= cp; 1043 if (!bytes || !cp) 1044 break; 1045 1046 if (cp < tail) { 1047 off += cp; 1048 } else { 1049 ip++; 1050 off = 0; 1051 } 1052 } 1053 1054 ni_lock(ni); 1055 err = ni_write_frame(ni, pages, pages_per_frame); 1056 ni_unlock(ni); 1057 1058 for (ip = 0; ip < pages_per_frame; ip++) { 1059 page = pages[ip]; 1060 ClearPageDirty(page); 1061 SetPageUptodate(page); 1062 unlock_page(page); 1063 put_page(page); 1064 } 1065 1066 if (err) 1067 goto out; 1068 1069 /* 1070 * We can loop for a long time in here. Be nice and allow 1071 * us to schedule out to avoid softlocking if preempt 1072 * is disabled. 1073 */ 1074 cond_resched(); 1075 1076 pos += copied; 1077 written += copied; 1078 1079 count = iov_iter_count(from); 1080 } 1081 1082 out: 1083 kfree(pages); 1084 1085 current->backing_dev_info = NULL; 1086 1087 if (err < 0) 1088 return err; 1089 1090 iocb->ki_pos += written; 1091 if (iocb->ki_pos > ni->i_valid) 1092 ni->i_valid = iocb->ki_pos; 1093 1094 return written; 1095 } 1096 1097 /* 1098 * ntfs_file_write_iter - file_operations::write_iter 1099 */ 1100 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1101 { 1102 struct file *file = iocb->ki_filp; 1103 struct address_space *mapping = file->f_mapping; 1104 struct inode *inode = mapping->host; 1105 ssize_t ret; 1106 struct ntfs_inode *ni = ntfs_i(inode); 1107 1108 if (is_encrypted(ni)) { 1109 ntfs_inode_warn(inode, "encrypted i/o not supported"); 1110 return -EOPNOTSUPP; 1111 } 1112 1113 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 1114 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 1115 return -EOPNOTSUPP; 1116 } 1117 1118 if (is_dedup(ni)) { 1119 ntfs_inode_warn(inode, "write into deduplicated not supported"); 1120 return -EOPNOTSUPP; 1121 } 1122 1123 if (!inode_trylock(inode)) { 1124 if (iocb->ki_flags & IOCB_NOWAIT) 1125 return -EAGAIN; 1126 inode_lock(inode); 1127 } 1128 1129 ret = generic_write_checks(iocb, from); 1130 if (ret <= 0) 1131 goto out; 1132 1133 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 1134 /* Should never be here, see ntfs_file_open(). */ 1135 ret = -EOPNOTSUPP; 1136 goto out; 1137 } 1138 1139 ret = ntfs_extend(inode, iocb->ki_pos, ret, file); 1140 if (ret) 1141 goto out; 1142 1143 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) 1144 : __generic_file_write_iter(iocb, from); 1145 1146 out: 1147 inode_unlock(inode); 1148 1149 if (ret > 0) 1150 ret = generic_write_sync(iocb, ret); 1151 1152 return ret; 1153 } 1154 1155 /* 1156 * ntfs_file_open - file_operations::open 1157 */ 1158 int ntfs_file_open(struct inode *inode, struct file *file) 1159 { 1160 struct ntfs_inode *ni = ntfs_i(inode); 1161 1162 if (unlikely((is_compressed(ni) || is_encrypted(ni)) && 1163 (file->f_flags & O_DIRECT))) { 1164 return -EOPNOTSUPP; 1165 } 1166 1167 /* Decompress "external compressed" file if opened for rw. */ 1168 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) && 1169 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) { 1170 #ifdef CONFIG_NTFS3_LZX_XPRESS 1171 int err = ni_decompress_file(ni); 1172 1173 if (err) 1174 return err; 1175 #else 1176 ntfs_inode_warn( 1177 inode, 1178 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files"); 1179 return -EOPNOTSUPP; 1180 #endif 1181 } 1182 1183 return generic_file_open(inode, file); 1184 } 1185 1186 /* 1187 * ntfs_file_release - file_operations::release 1188 */ 1189 static int ntfs_file_release(struct inode *inode, struct file *file) 1190 { 1191 struct ntfs_inode *ni = ntfs_i(inode); 1192 struct ntfs_sb_info *sbi = ni->mi.sbi; 1193 int err = 0; 1194 1195 /* If we are last writer on the inode, drop the block reservation. */ 1196 if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) && 1197 atomic_read(&inode->i_writecount) == 1)) { 1198 ni_lock(ni); 1199 down_write(&ni->file.run_lock); 1200 1201 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, 1202 inode->i_size, &ni->i_valid, false, NULL); 1203 1204 up_write(&ni->file.run_lock); 1205 ni_unlock(ni); 1206 } 1207 return err; 1208 } 1209 1210 /* 1211 * ntfs_fiemap - file_operations::fiemap 1212 */ 1213 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1214 __u64 start, __u64 len) 1215 { 1216 int err; 1217 struct ntfs_inode *ni = ntfs_i(inode); 1218 1219 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); 1220 if (err) 1221 return err; 1222 1223 ni_lock(ni); 1224 1225 err = ni_fiemap(ni, fieinfo, start, len); 1226 1227 ni_unlock(ni); 1228 1229 return err; 1230 } 1231 1232 // clang-format off 1233 const struct inode_operations ntfs_file_inode_operations = { 1234 .getattr = ntfs_getattr, 1235 .setattr = ntfs3_setattr, 1236 .listxattr = ntfs_listxattr, 1237 .permission = ntfs_permission, 1238 .get_acl = ntfs_get_acl, 1239 .set_acl = ntfs_set_acl, 1240 .fiemap = ntfs_fiemap, 1241 }; 1242 1243 const struct file_operations ntfs_file_operations = { 1244 .llseek = generic_file_llseek, 1245 .read_iter = ntfs_file_read_iter, 1246 .write_iter = ntfs_file_write_iter, 1247 .unlocked_ioctl = ntfs_ioctl, 1248 #ifdef CONFIG_COMPAT 1249 .compat_ioctl = ntfs_compat_ioctl, 1250 #endif 1251 .splice_read = generic_file_splice_read, 1252 .mmap = ntfs_file_mmap, 1253 .open = ntfs_file_open, 1254 .fsync = generic_file_fsync, 1255 .splice_write = iter_file_splice_write, 1256 .fallocate = ntfs_fallocate, 1257 .release = ntfs_file_release, 1258 }; 1259 // clang-format on 1260