1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * Regular file handling primitives for NTFS-based filesystems. 7 * 8 */ 9 10 #include <linux/backing-dev.h> 11 #include <linux/buffer_head.h> 12 #include <linux/compat.h> 13 #include <linux/falloc.h> 14 #include <linux/fiemap.h> 15 16 #include "debug.h" 17 #include "ntfs.h" 18 #include "ntfs_fs.h" 19 20 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) 21 { 22 struct fstrim_range __user *user_range; 23 struct fstrim_range range; 24 struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev); 25 int err; 26 27 if (!capable(CAP_SYS_ADMIN)) 28 return -EPERM; 29 30 if (!blk_queue_discard(q)) 31 return -EOPNOTSUPP; 32 33 user_range = (struct fstrim_range __user *)arg; 34 if (copy_from_user(&range, user_range, sizeof(range))) 35 return -EFAULT; 36 37 range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity); 38 39 err = ntfs_trim_fs(sbi, &range); 40 if (err < 0) 41 return err; 42 43 if (copy_to_user(user_range, &range, sizeof(range))) 44 return -EFAULT; 45 46 return 0; 47 } 48 49 static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 50 { 51 struct inode *inode = file_inode(filp); 52 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 53 54 switch (cmd) { 55 case FITRIM: 56 return ntfs_ioctl_fitrim(sbi, arg); 57 } 58 return -ENOTTY; /* Inappropriate ioctl for device. */ 59 } 60 61 #ifdef CONFIG_COMPAT 62 static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 63 64 { 65 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 66 } 67 #endif 68 69 /* 70 * ntfs_getattr - inode_operations::getattr 71 */ 72 int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path, 73 struct kstat *stat, u32 request_mask, u32 flags) 74 { 75 struct inode *inode = d_inode(path->dentry); 76 struct ntfs_inode *ni = ntfs_i(inode); 77 78 if (is_compressed(ni)) 79 stat->attributes |= STATX_ATTR_COMPRESSED; 80 81 if (is_encrypted(ni)) 82 stat->attributes |= STATX_ATTR_ENCRYPTED; 83 84 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED; 85 86 generic_fillattr(mnt_userns, inode, stat); 87 88 stat->result_mask |= STATX_BTIME; 89 stat->btime = ni->i_crtime; 90 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ 91 92 return 0; 93 } 94 95 static int ntfs_extend_initialized_size(struct file *file, 96 struct ntfs_inode *ni, 97 const loff_t valid, 98 const loff_t new_valid) 99 { 100 struct inode *inode = &ni->vfs_inode; 101 struct address_space *mapping = inode->i_mapping; 102 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 103 loff_t pos = valid; 104 int err; 105 106 if (is_resident(ni)) { 107 ni->i_valid = new_valid; 108 return 0; 109 } 110 111 WARN_ON(is_compressed(ni)); 112 WARN_ON(valid >= new_valid); 113 114 for (;;) { 115 u32 zerofrom, len; 116 struct page *page; 117 void *fsdata; 118 u8 bits; 119 CLST vcn, lcn, clen; 120 121 if (is_sparsed(ni)) { 122 bits = sbi->cluster_bits; 123 vcn = pos >> bits; 124 125 err = attr_data_get_block(ni, vcn, 0, &lcn, &clen, 126 NULL); 127 if (err) 128 goto out; 129 130 if (lcn == SPARSE_LCN) { 131 loff_t vbo = (loff_t)vcn << bits; 132 loff_t to = vbo + ((loff_t)clen << bits); 133 134 if (to <= new_valid) { 135 ni->i_valid = to; 136 pos = to; 137 goto next; 138 } 139 140 if (vbo < pos) { 141 pos = vbo; 142 } else { 143 to = (new_valid >> bits) << bits; 144 if (pos < to) { 145 ni->i_valid = to; 146 pos = to; 147 goto next; 148 } 149 } 150 } 151 } 152 153 zerofrom = pos & (PAGE_SIZE - 1); 154 len = PAGE_SIZE - zerofrom; 155 156 if (pos + len > new_valid) 157 len = new_valid - pos; 158 159 err = pagecache_write_begin(file, mapping, pos, len, 0, &page, 160 &fsdata); 161 if (err) 162 goto out; 163 164 zero_user_segment(page, zerofrom, PAGE_SIZE); 165 166 /* This function in any case puts page. */ 167 err = pagecache_write_end(file, mapping, pos, len, len, page, 168 fsdata); 169 if (err < 0) 170 goto out; 171 pos += len; 172 173 next: 174 if (pos >= new_valid) 175 break; 176 177 balance_dirty_pages_ratelimited(mapping); 178 cond_resched(); 179 } 180 181 return 0; 182 183 out: 184 ni->i_valid = valid; 185 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", 186 new_valid); 187 return err; 188 } 189 190 /* 191 * ntfs_zero_range - Helper function for punch_hole. 192 * 193 * It zeroes a range [vbo, vbo_to). 194 */ 195 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) 196 { 197 int err = 0; 198 struct address_space *mapping = inode->i_mapping; 199 u32 blocksize = 1 << inode->i_blkbits; 200 pgoff_t idx = vbo >> PAGE_SHIFT; 201 u32 z_start = vbo & (PAGE_SIZE - 1); 202 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; 203 loff_t page_off; 204 struct buffer_head *head, *bh; 205 u32 bh_next, bh_off, z_end; 206 sector_t iblock; 207 struct page *page; 208 209 for (; idx < idx_end; idx += 1, z_start = 0) { 210 page_off = (loff_t)idx << PAGE_SHIFT; 211 z_end = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) 212 : PAGE_SIZE; 213 iblock = page_off >> inode->i_blkbits; 214 215 page = find_or_create_page(mapping, idx, 216 mapping_gfp_constraint(mapping, 217 ~__GFP_FS)); 218 if (!page) 219 return -ENOMEM; 220 221 if (!page_has_buffers(page)) 222 create_empty_buffers(page, blocksize, 0); 223 224 bh = head = page_buffers(page); 225 bh_off = 0; 226 do { 227 bh_next = bh_off + blocksize; 228 229 if (bh_next <= z_start || bh_off >= z_end) 230 continue; 231 232 if (!buffer_mapped(bh)) { 233 ntfs_get_block(inode, iblock, bh, 0); 234 /* Unmapped? It's a hole - nothing to do. */ 235 if (!buffer_mapped(bh)) 236 continue; 237 } 238 239 /* Ok, it's mapped. Make sure it's up-to-date. */ 240 if (PageUptodate(page)) 241 set_buffer_uptodate(bh); 242 243 if (!buffer_uptodate(bh)) { 244 lock_buffer(bh); 245 bh->b_end_io = end_buffer_read_sync; 246 get_bh(bh); 247 submit_bh(REQ_OP_READ, 0, bh); 248 249 wait_on_buffer(bh); 250 if (!buffer_uptodate(bh)) { 251 unlock_page(page); 252 put_page(page); 253 err = -EIO; 254 goto out; 255 } 256 } 257 258 mark_buffer_dirty(bh); 259 260 } while (bh_off = bh_next, iblock += 1, 261 head != (bh = bh->b_this_page)); 262 263 zero_user_segment(page, z_start, z_end); 264 265 unlock_page(page); 266 put_page(page); 267 cond_resched(); 268 } 269 out: 270 mark_inode_dirty(inode); 271 return err; 272 } 273 274 /* 275 * ntfs_sparse_cluster - Helper function to zero a new allocated clusters. 276 * 277 * NOTE: 512 <= cluster size <= 2M 278 */ 279 void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn, 280 CLST len) 281 { 282 struct address_space *mapping = inode->i_mapping; 283 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 284 u64 vbo = (u64)vcn << sbi->cluster_bits; 285 u64 bytes = (u64)len << sbi->cluster_bits; 286 u32 blocksize = 1 << inode->i_blkbits; 287 pgoff_t idx0 = page0 ? page0->index : -1; 288 loff_t vbo_clst = vbo & sbi->cluster_mask_inv; 289 loff_t end = ntfs_up_cluster(sbi, vbo + bytes); 290 pgoff_t idx = vbo_clst >> PAGE_SHIFT; 291 u32 from = vbo_clst & (PAGE_SIZE - 1); 292 pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 293 loff_t page_off; 294 u32 to; 295 bool partial; 296 struct page *page; 297 298 for (; idx < idx_end; idx += 1, from = 0) { 299 page = idx == idx0 ? page0 : grab_cache_page(mapping, idx); 300 301 if (!page) 302 continue; 303 304 page_off = (loff_t)idx << PAGE_SHIFT; 305 to = (page_off + PAGE_SIZE) > end ? (end - page_off) 306 : PAGE_SIZE; 307 partial = false; 308 309 if ((from || PAGE_SIZE != to) && 310 likely(!page_has_buffers(page))) { 311 create_empty_buffers(page, blocksize, 0); 312 } 313 314 if (page_has_buffers(page)) { 315 struct buffer_head *head, *bh; 316 u32 bh_off = 0; 317 318 bh = head = page_buffers(page); 319 do { 320 u32 bh_next = bh_off + blocksize; 321 322 if (from <= bh_off && bh_next <= to) { 323 set_buffer_uptodate(bh); 324 mark_buffer_dirty(bh); 325 } else if (!buffer_uptodate(bh)) { 326 partial = true; 327 } 328 bh_off = bh_next; 329 } while (head != (bh = bh->b_this_page)); 330 } 331 332 zero_user_segment(page, from, to); 333 334 if (!partial) { 335 if (!PageUptodate(page)) 336 SetPageUptodate(page); 337 set_page_dirty(page); 338 } 339 340 if (idx != idx0) { 341 unlock_page(page); 342 put_page(page); 343 } 344 cond_resched(); 345 } 346 mark_inode_dirty(inode); 347 } 348 349 /* 350 * ntfs_file_mmap - file_operations::mmap 351 */ 352 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) 353 { 354 struct address_space *mapping = file->f_mapping; 355 struct inode *inode = mapping->host; 356 struct ntfs_inode *ni = ntfs_i(inode); 357 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); 358 bool rw = vma->vm_flags & VM_WRITE; 359 int err; 360 361 if (is_encrypted(ni)) { 362 ntfs_inode_warn(inode, "mmap encrypted not supported"); 363 return -EOPNOTSUPP; 364 } 365 366 if (is_dedup(ni)) { 367 ntfs_inode_warn(inode, "mmap deduplicated not supported"); 368 return -EOPNOTSUPP; 369 } 370 371 if (is_compressed(ni) && rw) { 372 ntfs_inode_warn(inode, "mmap(write) compressed not supported"); 373 return -EOPNOTSUPP; 374 } 375 376 if (rw) { 377 u64 to = min_t(loff_t, i_size_read(inode), 378 from + vma->vm_end - vma->vm_start); 379 380 if (is_sparsed(ni)) { 381 /* Allocate clusters for rw map. */ 382 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 383 CLST lcn, len; 384 CLST vcn = from >> sbi->cluster_bits; 385 CLST end = bytes_to_cluster(sbi, to); 386 bool new; 387 388 for (; vcn < end; vcn += len) { 389 err = attr_data_get_block(ni, vcn, 1, &lcn, 390 &len, &new); 391 if (err) 392 goto out; 393 394 if (!new) 395 continue; 396 ntfs_sparse_cluster(inode, NULL, vcn, 1); 397 } 398 } 399 400 if (ni->i_valid < to) { 401 if (!inode_trylock(inode)) { 402 err = -EAGAIN; 403 goto out; 404 } 405 err = ntfs_extend_initialized_size(file, ni, 406 ni->i_valid, to); 407 inode_unlock(inode); 408 if (err) 409 goto out; 410 } 411 } 412 413 err = generic_file_mmap(file, vma); 414 out: 415 return err; 416 } 417 418 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, 419 struct file *file) 420 { 421 struct ntfs_inode *ni = ntfs_i(inode); 422 struct address_space *mapping = inode->i_mapping; 423 loff_t end = pos + count; 424 bool extend_init = file && pos > ni->i_valid; 425 int err; 426 427 if (end <= inode->i_size && !extend_init) 428 return 0; 429 430 /* Mark rw ntfs as dirty. It will be cleared at umount. */ 431 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY); 432 433 if (end > inode->i_size) { 434 err = ntfs_set_size(inode, end); 435 if (err) 436 goto out; 437 inode->i_size = end; 438 } 439 440 if (extend_init && !is_compressed(ni)) { 441 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); 442 if (err) 443 goto out; 444 } else { 445 err = 0; 446 } 447 448 inode->i_ctime = inode->i_mtime = current_time(inode); 449 mark_inode_dirty(inode); 450 451 if (IS_SYNC(inode)) { 452 int err2; 453 454 err = filemap_fdatawrite_range(mapping, pos, end - 1); 455 err2 = sync_mapping_buffers(mapping); 456 if (!err) 457 err = err2; 458 err2 = write_inode_now(inode, 1); 459 if (!err) 460 err = err2; 461 if (!err) 462 err = filemap_fdatawait_range(mapping, pos, end - 1); 463 } 464 465 out: 466 return err; 467 } 468 469 static int ntfs_truncate(struct inode *inode, loff_t new_size) 470 { 471 struct super_block *sb = inode->i_sb; 472 struct ntfs_inode *ni = ntfs_i(inode); 473 int err, dirty = 0; 474 u64 new_valid; 475 476 if (!S_ISREG(inode->i_mode)) 477 return 0; 478 479 if (is_compressed(ni)) { 480 if (ni->i_valid > new_size) 481 ni->i_valid = new_size; 482 } else { 483 err = block_truncate_page(inode->i_mapping, new_size, 484 ntfs_get_block); 485 if (err) 486 return err; 487 } 488 489 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); 490 491 ni_lock(ni); 492 493 truncate_setsize(inode, new_size); 494 495 down_write(&ni->file.run_lock); 496 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 497 &new_valid, true, NULL); 498 up_write(&ni->file.run_lock); 499 500 if (new_valid < ni->i_valid) 501 ni->i_valid = new_valid; 502 503 ni_unlock(ni); 504 505 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 506 inode->i_ctime = inode->i_mtime = current_time(inode); 507 if (!IS_DIRSYNC(inode)) { 508 dirty = 1; 509 } else { 510 err = ntfs_sync_inode(inode); 511 if (err) 512 return err; 513 } 514 515 if (dirty) 516 mark_inode_dirty(inode); 517 518 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ 519 520 return 0; 521 } 522 523 /* 524 * ntfs_fallocate 525 * 526 * Preallocate space for a file. This implements ntfs's fallocate file 527 * operation, which gets called from sys_fallocate system call. User 528 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set 529 * we just allocate clusters without zeroing them out. Otherwise we 530 * allocate and zero out clusters via an expanding truncate. 531 */ 532 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) 533 { 534 struct inode *inode = file->f_mapping->host; 535 struct super_block *sb = inode->i_sb; 536 struct ntfs_sb_info *sbi = sb->s_fs_info; 537 struct ntfs_inode *ni = ntfs_i(inode); 538 loff_t end = vbo + len; 539 loff_t vbo_down = round_down(vbo, PAGE_SIZE); 540 loff_t i_size; 541 int err; 542 543 /* No support for dir. */ 544 if (!S_ISREG(inode->i_mode)) 545 return -EOPNOTSUPP; 546 547 /* Return error if mode is not supported. */ 548 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 549 FALLOC_FL_COLLAPSE_RANGE)) { 550 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported", 551 mode); 552 return -EOPNOTSUPP; 553 } 554 555 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 556 557 inode_lock(inode); 558 i_size = inode->i_size; 559 560 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 561 /* Should never be here, see ntfs_file_open. */ 562 err = -EOPNOTSUPP; 563 goto out; 564 } 565 566 if (mode & FALLOC_FL_PUNCH_HOLE) { 567 u32 frame_size; 568 loff_t mask, vbo_a, end_a, tmp; 569 570 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 571 err = -EINVAL; 572 goto out; 573 } 574 575 err = filemap_write_and_wait_range(inode->i_mapping, vbo, 576 end - 1); 577 if (err) 578 goto out; 579 580 err = filemap_write_and_wait_range(inode->i_mapping, end, 581 LLONG_MAX); 582 if (err) 583 goto out; 584 585 inode_dio_wait(inode); 586 587 truncate_pagecache(inode, vbo_down); 588 589 if (!is_sparsed(ni) && !is_compressed(ni)) { 590 /* 591 * Normal file, can't make hole. 592 * TODO: Try to find way to save info about hole. 593 */ 594 err = -EOPNOTSUPP; 595 goto out; 596 } 597 598 ni_lock(ni); 599 err = attr_punch_hole(ni, vbo, len, &frame_size); 600 ni_unlock(ni); 601 if (err != E_NTFS_NOTALIGNED) 602 goto out; 603 604 /* Process not aligned punch. */ 605 mask = frame_size - 1; 606 vbo_a = (vbo + mask) & ~mask; 607 end_a = end & ~mask; 608 609 tmp = min(vbo_a, end); 610 if (tmp > vbo) { 611 err = ntfs_zero_range(inode, vbo, tmp); 612 if (err) 613 goto out; 614 } 615 616 if (vbo < end_a && end_a < end) { 617 err = ntfs_zero_range(inode, end_a, end); 618 if (err) 619 goto out; 620 } 621 622 /* Aligned punch_hole */ 623 if (end_a > vbo_a) { 624 ni_lock(ni); 625 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); 626 ni_unlock(ni); 627 } 628 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 629 if (mode & ~FALLOC_FL_COLLAPSE_RANGE) { 630 err = -EINVAL; 631 goto out; 632 } 633 634 /* 635 * Write tail of the last page before removed range since 636 * it will get removed from the page cache below. 637 */ 638 err = filemap_write_and_wait_range(inode->i_mapping, vbo_down, 639 vbo); 640 if (err) 641 goto out; 642 643 /* 644 * Write data that will be shifted to preserve them 645 * when discarding page cache below. 646 */ 647 err = filemap_write_and_wait_range(inode->i_mapping, end, 648 LLONG_MAX); 649 if (err) 650 goto out; 651 652 /* Wait for existing dio to complete. */ 653 inode_dio_wait(inode); 654 655 truncate_pagecache(inode, vbo_down); 656 657 ni_lock(ni); 658 err = attr_collapse_range(ni, vbo, len); 659 ni_unlock(ni); 660 } else { 661 /* 662 * Normal file: Allocate clusters, do not change 'valid' size. 663 */ 664 err = ntfs_set_size(inode, max(end, i_size)); 665 if (err) 666 goto out; 667 668 if (is_sparsed(ni) || is_compressed(ni)) { 669 CLST vcn_v = ni->i_valid >> sbi->cluster_bits; 670 CLST vcn = vbo >> sbi->cluster_bits; 671 CLST cend = bytes_to_cluster(sbi, end); 672 CLST lcn, clen; 673 bool new; 674 675 /* 676 * Allocate but do not zero new clusters. (see below comments) 677 * This breaks security: One can read unused on-disk areas. 678 * Zeroing these clusters may be too long. 679 * Maybe we should check here for root rights? 680 */ 681 for (; vcn < cend; vcn += clen) { 682 err = attr_data_get_block(ni, vcn, cend - vcn, 683 &lcn, &clen, &new); 684 if (err) 685 goto out; 686 if (!new || vcn >= vcn_v) 687 continue; 688 689 /* 690 * Unwritten area. 691 * NTFS is not able to store several unwritten areas. 692 * Activate 'ntfs_sparse_cluster' to zero new allocated clusters. 693 * 694 * Dangerous in case: 695 * 1G of sparsed clusters + 1 cluster of data => 696 * valid_size == 1G + 1 cluster 697 * fallocate(1G) will zero 1G and this can be very long 698 * xfstest 016/086 will fail without 'ntfs_sparse_cluster'. 699 */ 700 ntfs_sparse_cluster(inode, NULL, vcn, 701 min(vcn_v - vcn, clen)); 702 } 703 } 704 705 if (mode & FALLOC_FL_KEEP_SIZE) { 706 ni_lock(ni); 707 /* True - Keep preallocated. */ 708 err = attr_set_size(ni, ATTR_DATA, NULL, 0, 709 &ni->file.run, i_size, &ni->i_valid, 710 true, NULL); 711 ni_unlock(ni); 712 } 713 } 714 715 out: 716 if (err == -EFBIG) 717 err = -ENOSPC; 718 719 if (!err) { 720 inode->i_ctime = inode->i_mtime = current_time(inode); 721 mark_inode_dirty(inode); 722 } 723 724 inode_unlock(inode); 725 return err; 726 } 727 728 /* 729 * ntfs3_setattr - inode_operations::setattr 730 */ 731 int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 732 struct iattr *attr) 733 { 734 struct super_block *sb = dentry->d_sb; 735 struct ntfs_sb_info *sbi = sb->s_fs_info; 736 struct inode *inode = d_inode(dentry); 737 struct ntfs_inode *ni = ntfs_i(inode); 738 u32 ia_valid = attr->ia_valid; 739 umode_t mode = inode->i_mode; 740 int err; 741 742 if (sbi->options->noacsrules) { 743 /* "No access rules" - Force any changes of time etc. */ 744 attr->ia_valid |= ATTR_FORCE; 745 /* and disable for editing some attributes. */ 746 attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE); 747 ia_valid = attr->ia_valid; 748 } 749 750 err = setattr_prepare(mnt_userns, dentry, attr); 751 if (err) 752 goto out; 753 754 if (ia_valid & ATTR_SIZE) { 755 loff_t oldsize = inode->i_size; 756 757 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 758 /* Should never be here, see ntfs_file_open(). */ 759 err = -EOPNOTSUPP; 760 goto out; 761 } 762 inode_dio_wait(inode); 763 764 if (attr->ia_size < oldsize) 765 err = ntfs_truncate(inode, attr->ia_size); 766 else if (attr->ia_size > oldsize) 767 err = ntfs_extend(inode, attr->ia_size, 0, NULL); 768 769 if (err) 770 goto out; 771 772 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 773 } 774 775 setattr_copy(mnt_userns, inode, attr); 776 777 if (mode != inode->i_mode) { 778 err = ntfs_acl_chmod(mnt_userns, inode); 779 if (err) 780 goto out; 781 782 /* Linux 'w' -> Windows 'ro'. */ 783 if (0222 & inode->i_mode) 784 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY; 785 else 786 ni->std_fa |= FILE_ATTRIBUTE_READONLY; 787 } 788 789 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) 790 ntfs_save_wsl_perm(inode); 791 mark_inode_dirty(inode); 792 out: 793 return err; 794 } 795 796 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 797 { 798 struct file *file = iocb->ki_filp; 799 struct inode *inode = file->f_mapping->host; 800 struct ntfs_inode *ni = ntfs_i(inode); 801 802 if (is_encrypted(ni)) { 803 ntfs_inode_warn(inode, "encrypted i/o not supported"); 804 return -EOPNOTSUPP; 805 } 806 807 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 808 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 809 return -EOPNOTSUPP; 810 } 811 812 #ifndef CONFIG_NTFS3_LZX_XPRESS 813 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { 814 ntfs_inode_warn( 815 inode, 816 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); 817 return -EOPNOTSUPP; 818 } 819 #endif 820 821 if (is_dedup(ni)) { 822 ntfs_inode_warn(inode, "read deduplicated not supported"); 823 return -EOPNOTSUPP; 824 } 825 826 return generic_file_read_iter(iocb, iter); 827 } 828 829 /* 830 * ntfs_get_frame_pages 831 * 832 * Return: Array of locked pages. 833 */ 834 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, 835 struct page **pages, u32 pages_per_frame, 836 bool *frame_uptodate) 837 { 838 gfp_t gfp_mask = mapping_gfp_mask(mapping); 839 u32 npages; 840 841 *frame_uptodate = true; 842 843 for (npages = 0; npages < pages_per_frame; npages++, index++) { 844 struct page *page; 845 846 page = find_or_create_page(mapping, index, gfp_mask); 847 if (!page) { 848 while (npages--) { 849 page = pages[npages]; 850 unlock_page(page); 851 put_page(page); 852 } 853 854 return -ENOMEM; 855 } 856 857 if (!PageUptodate(page)) 858 *frame_uptodate = false; 859 860 pages[npages] = page; 861 } 862 863 return 0; 864 } 865 866 /* 867 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files). 868 */ 869 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) 870 { 871 int err; 872 struct file *file = iocb->ki_filp; 873 size_t count = iov_iter_count(from); 874 loff_t pos = iocb->ki_pos; 875 struct inode *inode = file_inode(file); 876 loff_t i_size = inode->i_size; 877 struct address_space *mapping = inode->i_mapping; 878 struct ntfs_inode *ni = ntfs_i(inode); 879 u64 valid = ni->i_valid; 880 struct ntfs_sb_info *sbi = ni->mi.sbi; 881 struct page *page, **pages = NULL; 882 size_t written = 0; 883 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; 884 u32 frame_size = 1u << frame_bits; 885 u32 pages_per_frame = frame_size >> PAGE_SHIFT; 886 u32 ip, off; 887 CLST frame; 888 u64 frame_vbo; 889 pgoff_t index; 890 bool frame_uptodate; 891 892 if (frame_size < PAGE_SIZE) { 893 /* 894 * frame_size == 8K if cluster 512 895 * frame_size == 64K if cluster 4096 896 */ 897 ntfs_inode_warn(inode, "page size is bigger than frame size"); 898 return -EOPNOTSUPP; 899 } 900 901 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS); 902 if (!pages) 903 return -ENOMEM; 904 905 current->backing_dev_info = inode_to_bdi(inode); 906 err = file_remove_privs(file); 907 if (err) 908 goto out; 909 910 err = file_update_time(file); 911 if (err) 912 goto out; 913 914 /* Zero range [valid : pos). */ 915 while (valid < pos) { 916 CLST lcn, clen; 917 918 frame = valid >> frame_bits; 919 frame_vbo = valid & ~(frame_size - 1); 920 off = valid & (frame_size - 1); 921 922 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn, 923 &clen, NULL); 924 if (err) 925 goto out; 926 927 if (lcn == SPARSE_LCN) { 928 ni->i_valid = valid = 929 frame_vbo + ((u64)clen << sbi->cluster_bits); 930 continue; 931 } 932 933 /* Load full frame. */ 934 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, 935 pages, pages_per_frame, 936 &frame_uptodate); 937 if (err) 938 goto out; 939 940 if (!frame_uptodate && off) { 941 err = ni_read_frame(ni, frame_vbo, pages, 942 pages_per_frame); 943 if (err) { 944 for (ip = 0; ip < pages_per_frame; ip++) { 945 page = pages[ip]; 946 unlock_page(page); 947 put_page(page); 948 } 949 goto out; 950 } 951 } 952 953 ip = off >> PAGE_SHIFT; 954 off = offset_in_page(valid); 955 for (; ip < pages_per_frame; ip++, off = 0) { 956 page = pages[ip]; 957 zero_user_segment(page, off, PAGE_SIZE); 958 flush_dcache_page(page); 959 SetPageUptodate(page); 960 } 961 962 ni_lock(ni); 963 err = ni_write_frame(ni, pages, pages_per_frame); 964 ni_unlock(ni); 965 966 for (ip = 0; ip < pages_per_frame; ip++) { 967 page = pages[ip]; 968 SetPageUptodate(page); 969 unlock_page(page); 970 put_page(page); 971 } 972 973 if (err) 974 goto out; 975 976 ni->i_valid = valid = frame_vbo + frame_size; 977 } 978 979 /* Copy user data [pos : pos + count). */ 980 while (count) { 981 size_t copied, bytes; 982 983 off = pos & (frame_size - 1); 984 bytes = frame_size - off; 985 if (bytes > count) 986 bytes = count; 987 988 frame = pos >> frame_bits; 989 frame_vbo = pos & ~(frame_size - 1); 990 index = frame_vbo >> PAGE_SHIFT; 991 992 if (unlikely(iov_iter_fault_in_readable(from, bytes))) { 993 err = -EFAULT; 994 goto out; 995 } 996 997 /* Load full frame. */ 998 err = ntfs_get_frame_pages(mapping, index, pages, 999 pages_per_frame, &frame_uptodate); 1000 if (err) 1001 goto out; 1002 1003 if (!frame_uptodate) { 1004 loff_t to = pos + bytes; 1005 1006 if (off || (to < i_size && (to & (frame_size - 1)))) { 1007 err = ni_read_frame(ni, frame_vbo, pages, 1008 pages_per_frame); 1009 if (err) { 1010 for (ip = 0; ip < pages_per_frame; 1011 ip++) { 1012 page = pages[ip]; 1013 unlock_page(page); 1014 put_page(page); 1015 } 1016 goto out; 1017 } 1018 } 1019 } 1020 1021 WARN_ON(!bytes); 1022 copied = 0; 1023 ip = off >> PAGE_SHIFT; 1024 off = offset_in_page(pos); 1025 1026 /* Copy user data to pages. */ 1027 for (;;) { 1028 size_t cp, tail = PAGE_SIZE - off; 1029 1030 page = pages[ip]; 1031 cp = copy_page_from_iter_atomic(page, off, 1032 min(tail, bytes), from); 1033 flush_dcache_page(page); 1034 1035 copied += cp; 1036 bytes -= cp; 1037 if (!bytes || !cp) 1038 break; 1039 1040 if (cp < tail) { 1041 off += cp; 1042 } else { 1043 ip++; 1044 off = 0; 1045 } 1046 } 1047 1048 ni_lock(ni); 1049 err = ni_write_frame(ni, pages, pages_per_frame); 1050 ni_unlock(ni); 1051 1052 for (ip = 0; ip < pages_per_frame; ip++) { 1053 page = pages[ip]; 1054 ClearPageDirty(page); 1055 SetPageUptodate(page); 1056 unlock_page(page); 1057 put_page(page); 1058 } 1059 1060 if (err) 1061 goto out; 1062 1063 /* 1064 * We can loop for a long time in here. Be nice and allow 1065 * us to schedule out to avoid softlocking if preempt 1066 * is disabled. 1067 */ 1068 cond_resched(); 1069 1070 pos += copied; 1071 written += copied; 1072 1073 count = iov_iter_count(from); 1074 } 1075 1076 out: 1077 kfree(pages); 1078 1079 current->backing_dev_info = NULL; 1080 1081 if (err < 0) 1082 return err; 1083 1084 iocb->ki_pos += written; 1085 if (iocb->ki_pos > ni->i_valid) 1086 ni->i_valid = iocb->ki_pos; 1087 1088 return written; 1089 } 1090 1091 /* 1092 * ntfs_file_write_iter - file_operations::write_iter 1093 */ 1094 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1095 { 1096 struct file *file = iocb->ki_filp; 1097 struct address_space *mapping = file->f_mapping; 1098 struct inode *inode = mapping->host; 1099 ssize_t ret; 1100 struct ntfs_inode *ni = ntfs_i(inode); 1101 1102 if (is_encrypted(ni)) { 1103 ntfs_inode_warn(inode, "encrypted i/o not supported"); 1104 return -EOPNOTSUPP; 1105 } 1106 1107 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 1108 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 1109 return -EOPNOTSUPP; 1110 } 1111 1112 if (is_dedup(ni)) { 1113 ntfs_inode_warn(inode, "write into deduplicated not supported"); 1114 return -EOPNOTSUPP; 1115 } 1116 1117 if (!inode_trylock(inode)) { 1118 if (iocb->ki_flags & IOCB_NOWAIT) 1119 return -EAGAIN; 1120 inode_lock(inode); 1121 } 1122 1123 ret = generic_write_checks(iocb, from); 1124 if (ret <= 0) 1125 goto out; 1126 1127 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 1128 /* Should never be here, see ntfs_file_open(). */ 1129 ret = -EOPNOTSUPP; 1130 goto out; 1131 } 1132 1133 ret = ntfs_extend(inode, iocb->ki_pos, ret, file); 1134 if (ret) 1135 goto out; 1136 1137 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) 1138 : __generic_file_write_iter(iocb, from); 1139 1140 out: 1141 inode_unlock(inode); 1142 1143 if (ret > 0) 1144 ret = generic_write_sync(iocb, ret); 1145 1146 return ret; 1147 } 1148 1149 /* 1150 * ntfs_file_open - file_operations::open 1151 */ 1152 int ntfs_file_open(struct inode *inode, struct file *file) 1153 { 1154 struct ntfs_inode *ni = ntfs_i(inode); 1155 1156 if (unlikely((is_compressed(ni) || is_encrypted(ni)) && 1157 (file->f_flags & O_DIRECT))) { 1158 return -EOPNOTSUPP; 1159 } 1160 1161 /* Decompress "external compressed" file if opened for rw. */ 1162 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) && 1163 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) { 1164 #ifdef CONFIG_NTFS3_LZX_XPRESS 1165 int err = ni_decompress_file(ni); 1166 1167 if (err) 1168 return err; 1169 #else 1170 ntfs_inode_warn( 1171 inode, 1172 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files"); 1173 return -EOPNOTSUPP; 1174 #endif 1175 } 1176 1177 return generic_file_open(inode, file); 1178 } 1179 1180 /* 1181 * ntfs_file_release - file_operations::release 1182 */ 1183 static int ntfs_file_release(struct inode *inode, struct file *file) 1184 { 1185 struct ntfs_inode *ni = ntfs_i(inode); 1186 struct ntfs_sb_info *sbi = ni->mi.sbi; 1187 int err = 0; 1188 1189 /* If we are last writer on the inode, drop the block reservation. */ 1190 if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) && 1191 atomic_read(&inode->i_writecount) == 1)) { 1192 ni_lock(ni); 1193 down_write(&ni->file.run_lock); 1194 1195 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, 1196 inode->i_size, &ni->i_valid, false, NULL); 1197 1198 up_write(&ni->file.run_lock); 1199 ni_unlock(ni); 1200 } 1201 return err; 1202 } 1203 1204 /* 1205 * ntfs_fiemap - file_operations::fiemap 1206 */ 1207 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1208 __u64 start, __u64 len) 1209 { 1210 int err; 1211 struct ntfs_inode *ni = ntfs_i(inode); 1212 1213 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); 1214 if (err) 1215 return err; 1216 1217 ni_lock(ni); 1218 1219 err = ni_fiemap(ni, fieinfo, start, len); 1220 1221 ni_unlock(ni); 1222 1223 return err; 1224 } 1225 1226 // clang-format off 1227 const struct inode_operations ntfs_file_inode_operations = { 1228 .getattr = ntfs_getattr, 1229 .setattr = ntfs3_setattr, 1230 .listxattr = ntfs_listxattr, 1231 .permission = ntfs_permission, 1232 .get_acl = ntfs_get_acl, 1233 .set_acl = ntfs_set_acl, 1234 .fiemap = ntfs_fiemap, 1235 }; 1236 1237 const struct file_operations ntfs_file_operations = { 1238 .llseek = generic_file_llseek, 1239 .read_iter = ntfs_file_read_iter, 1240 .write_iter = ntfs_file_write_iter, 1241 .unlocked_ioctl = ntfs_ioctl, 1242 #ifdef CONFIG_COMPAT 1243 .compat_ioctl = ntfs_compat_ioctl, 1244 #endif 1245 .splice_read = generic_file_splice_read, 1246 .mmap = ntfs_file_mmap, 1247 .open = ntfs_file_open, 1248 .fsync = generic_file_fsync, 1249 .splice_write = iter_file_splice_write, 1250 .fallocate = ntfs_fallocate, 1251 .release = ntfs_file_release, 1252 }; 1253 // clang-format on 1254