1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * Regular file handling primitives for NTFS-based filesystems. 7 * 8 */ 9 10 #include <linux/backing-dev.h> 11 #include <linux/blkdev.h> 12 #include <linux/buffer_head.h> 13 #include <linux/compat.h> 14 #include <linux/falloc.h> 15 #include <linux/fiemap.h> 16 17 #include "debug.h" 18 #include "ntfs.h" 19 #include "ntfs_fs.h" 20 21 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) 22 { 23 struct fstrim_range __user *user_range; 24 struct fstrim_range range; 25 struct block_device *dev; 26 int err; 27 28 if (!capable(CAP_SYS_ADMIN)) 29 return -EPERM; 30 31 dev = sbi->sb->s_bdev; 32 if (!bdev_max_discard_sectors(dev)) 33 return -EOPNOTSUPP; 34 35 user_range = (struct fstrim_range __user *)arg; 36 if (copy_from_user(&range, user_range, sizeof(range))) 37 return -EFAULT; 38 39 range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev)); 40 41 err = ntfs_trim_fs(sbi, &range); 42 if (err < 0) 43 return err; 44 45 if (copy_to_user(user_range, &range, sizeof(range))) 46 return -EFAULT; 47 48 return 0; 49 } 50 51 static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 52 { 53 struct inode *inode = file_inode(filp); 54 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 55 56 switch (cmd) { 57 case FITRIM: 58 return ntfs_ioctl_fitrim(sbi, arg); 59 } 60 return -ENOTTY; /* Inappropriate ioctl for device. */ 61 } 62 63 #ifdef CONFIG_COMPAT 64 static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) 65 66 { 67 return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 68 } 69 #endif 70 71 /* 72 * ntfs_getattr - inode_operations::getattr 73 */ 74 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, 75 struct kstat *stat, u32 request_mask, u32 flags) 76 { 77 struct inode *inode = d_inode(path->dentry); 78 struct ntfs_inode *ni = ntfs_i(inode); 79 80 if (is_compressed(ni)) 81 stat->attributes |= STATX_ATTR_COMPRESSED; 82 83 if (is_encrypted(ni)) 84 stat->attributes |= STATX_ATTR_ENCRYPTED; 85 86 stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED; 87 88 generic_fillattr(idmap, inode, stat); 89 90 stat->result_mask |= STATX_BTIME; 91 stat->btime = ni->i_crtime; 92 stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ 93 94 return 0; 95 } 96 97 static int ntfs_extend_initialized_size(struct file *file, 98 struct ntfs_inode *ni, 99 const loff_t valid, 100 const loff_t new_valid) 101 { 102 struct inode *inode = &ni->vfs_inode; 103 struct address_space *mapping = inode->i_mapping; 104 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 105 loff_t pos = valid; 106 int err; 107 108 if (is_resident(ni)) { 109 ni->i_valid = new_valid; 110 return 0; 111 } 112 113 WARN_ON(is_compressed(ni)); 114 WARN_ON(valid >= new_valid); 115 116 for (;;) { 117 u32 zerofrom, len; 118 struct page *page; 119 u8 bits; 120 CLST vcn, lcn, clen; 121 122 if (is_sparsed(ni)) { 123 bits = sbi->cluster_bits; 124 vcn = pos >> bits; 125 126 err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, 127 false); 128 if (err) 129 goto out; 130 131 if (lcn == SPARSE_LCN) { 132 pos = ((loff_t)clen + vcn) << bits; 133 ni->i_valid = pos; 134 goto next; 135 } 136 } 137 138 zerofrom = pos & (PAGE_SIZE - 1); 139 len = PAGE_SIZE - zerofrom; 140 141 if (pos + len > new_valid) 142 len = new_valid - pos; 143 144 err = ntfs_write_begin(file, mapping, pos, len, &page, NULL); 145 if (err) 146 goto out; 147 148 zero_user_segment(page, zerofrom, PAGE_SIZE); 149 150 /* This function in any case puts page. */ 151 err = ntfs_write_end(file, mapping, pos, len, len, page, NULL); 152 if (err < 0) 153 goto out; 154 pos += len; 155 156 next: 157 if (pos >= new_valid) 158 break; 159 160 balance_dirty_pages_ratelimited(mapping); 161 cond_resched(); 162 } 163 164 return 0; 165 166 out: 167 ni->i_valid = valid; 168 ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", 169 new_valid); 170 return err; 171 } 172 173 /* 174 * ntfs_zero_range - Helper function for punch_hole. 175 * 176 * It zeroes a range [vbo, vbo_to). 177 */ 178 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) 179 { 180 int err = 0; 181 struct address_space *mapping = inode->i_mapping; 182 u32 blocksize = 1 << inode->i_blkbits; 183 pgoff_t idx = vbo >> PAGE_SHIFT; 184 u32 from = vbo & (PAGE_SIZE - 1); 185 pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; 186 loff_t page_off; 187 struct buffer_head *head, *bh; 188 u32 bh_next, bh_off, to; 189 sector_t iblock; 190 struct page *page; 191 192 for (; idx < idx_end; idx += 1, from = 0) { 193 page_off = (loff_t)idx << PAGE_SHIFT; 194 to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : 195 PAGE_SIZE; 196 iblock = page_off >> inode->i_blkbits; 197 198 page = find_or_create_page(mapping, idx, 199 mapping_gfp_constraint(mapping, 200 ~__GFP_FS)); 201 if (!page) 202 return -ENOMEM; 203 204 if (!page_has_buffers(page)) 205 create_empty_buffers(page, blocksize, 0); 206 207 bh = head = page_buffers(page); 208 bh_off = 0; 209 do { 210 bh_next = bh_off + blocksize; 211 212 if (bh_next <= from || bh_off >= to) 213 continue; 214 215 if (!buffer_mapped(bh)) { 216 ntfs_get_block(inode, iblock, bh, 0); 217 /* Unmapped? It's a hole - nothing to do. */ 218 if (!buffer_mapped(bh)) 219 continue; 220 } 221 222 /* Ok, it's mapped. Make sure it's up-to-date. */ 223 if (PageUptodate(page)) 224 set_buffer_uptodate(bh); 225 226 if (!buffer_uptodate(bh)) { 227 err = bh_read(bh, 0); 228 if (err < 0) { 229 unlock_page(page); 230 put_page(page); 231 goto out; 232 } 233 } 234 235 mark_buffer_dirty(bh); 236 237 } while (bh_off = bh_next, iblock += 1, 238 head != (bh = bh->b_this_page)); 239 240 zero_user_segment(page, from, to); 241 242 unlock_page(page); 243 put_page(page); 244 cond_resched(); 245 } 246 out: 247 mark_inode_dirty(inode); 248 return err; 249 } 250 251 /* 252 * ntfs_file_mmap - file_operations::mmap 253 */ 254 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) 255 { 256 struct address_space *mapping = file->f_mapping; 257 struct inode *inode = mapping->host; 258 struct ntfs_inode *ni = ntfs_i(inode); 259 u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT); 260 bool rw = vma->vm_flags & VM_WRITE; 261 int err; 262 263 if (is_encrypted(ni)) { 264 ntfs_inode_warn(inode, "mmap encrypted not supported"); 265 return -EOPNOTSUPP; 266 } 267 268 if (is_dedup(ni)) { 269 ntfs_inode_warn(inode, "mmap deduplicated not supported"); 270 return -EOPNOTSUPP; 271 } 272 273 if (is_compressed(ni) && rw) { 274 ntfs_inode_warn(inode, "mmap(write) compressed not supported"); 275 return -EOPNOTSUPP; 276 } 277 278 if (rw) { 279 u64 to = min_t(loff_t, i_size_read(inode), 280 from + vma->vm_end - vma->vm_start); 281 282 if (is_sparsed(ni)) { 283 /* Allocate clusters for rw map. */ 284 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 285 CLST lcn, len; 286 CLST vcn = from >> sbi->cluster_bits; 287 CLST end = bytes_to_cluster(sbi, to); 288 bool new; 289 290 for (; vcn < end; vcn += len) { 291 err = attr_data_get_block(ni, vcn, 1, &lcn, 292 &len, &new, true); 293 if (err) 294 goto out; 295 } 296 } 297 298 if (ni->i_valid < to) { 299 if (!inode_trylock(inode)) { 300 err = -EAGAIN; 301 goto out; 302 } 303 err = ntfs_extend_initialized_size(file, ni, 304 ni->i_valid, to); 305 inode_unlock(inode); 306 if (err) 307 goto out; 308 } 309 } 310 311 err = generic_file_mmap(file, vma); 312 out: 313 return err; 314 } 315 316 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, 317 struct file *file) 318 { 319 struct ntfs_inode *ni = ntfs_i(inode); 320 struct address_space *mapping = inode->i_mapping; 321 loff_t end = pos + count; 322 bool extend_init = file && pos > ni->i_valid; 323 int err; 324 325 if (end <= inode->i_size && !extend_init) 326 return 0; 327 328 /* Mark rw ntfs as dirty. It will be cleared at umount. */ 329 ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY); 330 331 if (end > inode->i_size) { 332 err = ntfs_set_size(inode, end); 333 if (err) 334 goto out; 335 } 336 337 if (extend_init && !is_compressed(ni)) { 338 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); 339 if (err) 340 goto out; 341 } else { 342 err = 0; 343 } 344 345 inode->i_ctime = inode->i_mtime = current_time(inode); 346 mark_inode_dirty(inode); 347 348 if (IS_SYNC(inode)) { 349 int err2; 350 351 err = filemap_fdatawrite_range(mapping, pos, end - 1); 352 err2 = sync_mapping_buffers(mapping); 353 if (!err) 354 err = err2; 355 err2 = write_inode_now(inode, 1); 356 if (!err) 357 err = err2; 358 if (!err) 359 err = filemap_fdatawait_range(mapping, pos, end - 1); 360 } 361 362 out: 363 return err; 364 } 365 366 static int ntfs_truncate(struct inode *inode, loff_t new_size) 367 { 368 struct super_block *sb = inode->i_sb; 369 struct ntfs_inode *ni = ntfs_i(inode); 370 int err, dirty = 0; 371 u64 new_valid; 372 373 if (!S_ISREG(inode->i_mode)) 374 return 0; 375 376 if (is_compressed(ni)) { 377 if (ni->i_valid > new_size) 378 ni->i_valid = new_size; 379 } else { 380 err = block_truncate_page(inode->i_mapping, new_size, 381 ntfs_get_block); 382 if (err) 383 return err; 384 } 385 386 new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); 387 388 truncate_setsize(inode, new_size); 389 390 ni_lock(ni); 391 392 down_write(&ni->file.run_lock); 393 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 394 &new_valid, ni->mi.sbi->options->prealloc, NULL); 395 up_write(&ni->file.run_lock); 396 397 if (new_valid < ni->i_valid) 398 ni->i_valid = new_valid; 399 400 ni_unlock(ni); 401 402 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 403 inode->i_ctime = inode->i_mtime = current_time(inode); 404 if (!IS_DIRSYNC(inode)) { 405 dirty = 1; 406 } else { 407 err = ntfs_sync_inode(inode); 408 if (err) 409 return err; 410 } 411 412 if (dirty) 413 mark_inode_dirty(inode); 414 415 /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ 416 417 return 0; 418 } 419 420 /* 421 * ntfs_fallocate 422 * 423 * Preallocate space for a file. This implements ntfs's fallocate file 424 * operation, which gets called from sys_fallocate system call. User 425 * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set 426 * we just allocate clusters without zeroing them out. Otherwise we 427 * allocate and zero out clusters via an expanding truncate. 428 */ 429 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) 430 { 431 struct inode *inode = file->f_mapping->host; 432 struct address_space *mapping = inode->i_mapping; 433 struct super_block *sb = inode->i_sb; 434 struct ntfs_sb_info *sbi = sb->s_fs_info; 435 struct ntfs_inode *ni = ntfs_i(inode); 436 loff_t end = vbo + len; 437 loff_t vbo_down = round_down(vbo, max_t(unsigned long, 438 sbi->cluster_size, PAGE_SIZE)); 439 bool is_supported_holes = is_sparsed(ni) || is_compressed(ni); 440 loff_t i_size, new_size; 441 bool map_locked; 442 int err; 443 444 /* No support for dir. */ 445 if (!S_ISREG(inode->i_mode)) 446 return -EOPNOTSUPP; 447 448 /* 449 * vfs_fallocate checks all possible combinations of mode. 450 * Do additional checks here before ntfs_set_state(dirty). 451 */ 452 if (mode & FALLOC_FL_PUNCH_HOLE) { 453 if (!is_supported_holes) 454 return -EOPNOTSUPP; 455 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 456 } else if (mode & FALLOC_FL_INSERT_RANGE) { 457 if (!is_supported_holes) 458 return -EOPNOTSUPP; 459 } else if (mode & 460 ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 461 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) { 462 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported", 463 mode); 464 return -EOPNOTSUPP; 465 } 466 467 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 468 469 inode_lock(inode); 470 i_size = inode->i_size; 471 new_size = max(end, i_size); 472 map_locked = false; 473 474 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 475 /* Should never be here, see ntfs_file_open. */ 476 err = -EOPNOTSUPP; 477 goto out; 478 } 479 480 if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | 481 FALLOC_FL_INSERT_RANGE)) { 482 inode_dio_wait(inode); 483 filemap_invalidate_lock(mapping); 484 map_locked = true; 485 } 486 487 if (mode & FALLOC_FL_PUNCH_HOLE) { 488 u32 frame_size; 489 loff_t mask, vbo_a, end_a, tmp; 490 491 err = filemap_write_and_wait_range(mapping, vbo_down, 492 LLONG_MAX); 493 if (err) 494 goto out; 495 496 truncate_pagecache(inode, vbo_down); 497 498 ni_lock(ni); 499 err = attr_punch_hole(ni, vbo, len, &frame_size); 500 ni_unlock(ni); 501 if (err != E_NTFS_NOTALIGNED) 502 goto out; 503 504 /* Process not aligned punch. */ 505 mask = frame_size - 1; 506 vbo_a = (vbo + mask) & ~mask; 507 end_a = end & ~mask; 508 509 tmp = min(vbo_a, end); 510 if (tmp > vbo) { 511 err = ntfs_zero_range(inode, vbo, tmp); 512 if (err) 513 goto out; 514 } 515 516 if (vbo < end_a && end_a < end) { 517 err = ntfs_zero_range(inode, end_a, end); 518 if (err) 519 goto out; 520 } 521 522 /* Aligned punch_hole */ 523 if (end_a > vbo_a) { 524 ni_lock(ni); 525 err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); 526 ni_unlock(ni); 527 } 528 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 529 /* 530 * Write tail of the last page before removed range since 531 * it will get removed from the page cache below. 532 */ 533 err = filemap_write_and_wait_range(mapping, vbo_down, vbo); 534 if (err) 535 goto out; 536 537 /* 538 * Write data that will be shifted to preserve them 539 * when discarding page cache below. 540 */ 541 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX); 542 if (err) 543 goto out; 544 545 truncate_pagecache(inode, vbo_down); 546 547 ni_lock(ni); 548 err = attr_collapse_range(ni, vbo, len); 549 ni_unlock(ni); 550 } else if (mode & FALLOC_FL_INSERT_RANGE) { 551 /* Check new size. */ 552 err = inode_newsize_ok(inode, new_size); 553 if (err) 554 goto out; 555 556 /* Write out all dirty pages. */ 557 err = filemap_write_and_wait_range(mapping, vbo_down, 558 LLONG_MAX); 559 if (err) 560 goto out; 561 truncate_pagecache(inode, vbo_down); 562 563 ni_lock(ni); 564 err = attr_insert_range(ni, vbo, len); 565 ni_unlock(ni); 566 } else { 567 /* Check new size. */ 568 u8 cluster_bits = sbi->cluster_bits; 569 570 /* generic/213: expected -ENOSPC instead of -EFBIG. */ 571 if (!is_supported_holes) { 572 loff_t to_alloc = new_size - inode_get_bytes(inode); 573 574 if (to_alloc > 0 && 575 (to_alloc >> cluster_bits) > 576 wnd_zeroes(&sbi->used.bitmap)) { 577 err = -ENOSPC; 578 goto out; 579 } 580 } 581 582 err = inode_newsize_ok(inode, new_size); 583 if (err) 584 goto out; 585 586 if (new_size > i_size) { 587 /* 588 * Allocate clusters, do not change 'valid' size. 589 */ 590 err = ntfs_set_size(inode, new_size); 591 if (err) 592 goto out; 593 } 594 595 if (is_supported_holes) { 596 CLST vcn = vbo >> cluster_bits; 597 CLST cend = bytes_to_cluster(sbi, end); 598 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 599 CLST lcn, clen; 600 bool new; 601 602 if (cend_v > cend) 603 cend_v = cend; 604 605 /* 606 * Allocate and zero new clusters. 607 * Zeroing these clusters may be too long. 608 */ 609 for (; vcn < cend_v; vcn += clen) { 610 err = attr_data_get_block(ni, vcn, cend_v - vcn, 611 &lcn, &clen, &new, 612 true); 613 if (err) 614 goto out; 615 } 616 /* 617 * Allocate but not zero new clusters. 618 */ 619 for (; vcn < cend; vcn += clen) { 620 err = attr_data_get_block(ni, vcn, cend - vcn, 621 &lcn, &clen, &new, 622 false); 623 if (err) 624 goto out; 625 } 626 } 627 628 if (mode & FALLOC_FL_KEEP_SIZE) { 629 ni_lock(ni); 630 /* True - Keep preallocated. */ 631 err = attr_set_size(ni, ATTR_DATA, NULL, 0, 632 &ni->file.run, i_size, &ni->i_valid, 633 true, NULL); 634 ni_unlock(ni); 635 } else if (new_size > i_size) { 636 inode->i_size = new_size; 637 } 638 } 639 640 out: 641 if (map_locked) 642 filemap_invalidate_unlock(mapping); 643 644 if (!err) { 645 inode->i_ctime = inode->i_mtime = current_time(inode); 646 mark_inode_dirty(inode); 647 } 648 649 inode_unlock(inode); 650 return err; 651 } 652 653 /* 654 * ntfs3_setattr - inode_operations::setattr 655 */ 656 int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 657 struct iattr *attr) 658 { 659 struct inode *inode = d_inode(dentry); 660 struct ntfs_inode *ni = ntfs_i(inode); 661 u32 ia_valid = attr->ia_valid; 662 umode_t mode = inode->i_mode; 663 int err; 664 665 err = setattr_prepare(idmap, dentry, attr); 666 if (err) 667 goto out; 668 669 if (ia_valid & ATTR_SIZE) { 670 loff_t newsize, oldsize; 671 672 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 673 /* Should never be here, see ntfs_file_open(). */ 674 err = -EOPNOTSUPP; 675 goto out; 676 } 677 inode_dio_wait(inode); 678 oldsize = inode->i_size; 679 newsize = attr->ia_size; 680 681 if (newsize <= oldsize) 682 err = ntfs_truncate(inode, newsize); 683 else 684 err = ntfs_extend(inode, newsize, 0, NULL); 685 686 if (err) 687 goto out; 688 689 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 690 inode->i_size = newsize; 691 } 692 693 setattr_copy(idmap, inode, attr); 694 695 if (mode != inode->i_mode) { 696 err = ntfs_acl_chmod(idmap, dentry); 697 if (err) 698 goto out; 699 700 /* Linux 'w' -> Windows 'ro'. */ 701 if (0222 & inode->i_mode) 702 ni->std_fa &= ~FILE_ATTRIBUTE_READONLY; 703 else 704 ni->std_fa |= FILE_ATTRIBUTE_READONLY; 705 } 706 707 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) 708 ntfs_save_wsl_perm(inode, NULL); 709 mark_inode_dirty(inode); 710 out: 711 return err; 712 } 713 714 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 715 { 716 struct file *file = iocb->ki_filp; 717 struct inode *inode = file->f_mapping->host; 718 struct ntfs_inode *ni = ntfs_i(inode); 719 720 if (is_encrypted(ni)) { 721 ntfs_inode_warn(inode, "encrypted i/o not supported"); 722 return -EOPNOTSUPP; 723 } 724 725 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 726 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 727 return -EOPNOTSUPP; 728 } 729 730 #ifndef CONFIG_NTFS3_LZX_XPRESS 731 if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { 732 ntfs_inode_warn( 733 inode, 734 "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); 735 return -EOPNOTSUPP; 736 } 737 #endif 738 739 if (is_dedup(ni)) { 740 ntfs_inode_warn(inode, "read deduplicated not supported"); 741 return -EOPNOTSUPP; 742 } 743 744 return generic_file_read_iter(iocb, iter); 745 } 746 747 /* 748 * ntfs_get_frame_pages 749 * 750 * Return: Array of locked pages. 751 */ 752 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, 753 struct page **pages, u32 pages_per_frame, 754 bool *frame_uptodate) 755 { 756 gfp_t gfp_mask = mapping_gfp_mask(mapping); 757 u32 npages; 758 759 *frame_uptodate = true; 760 761 for (npages = 0; npages < pages_per_frame; npages++, index++) { 762 struct page *page; 763 764 page = find_or_create_page(mapping, index, gfp_mask); 765 if (!page) { 766 while (npages--) { 767 page = pages[npages]; 768 unlock_page(page); 769 put_page(page); 770 } 771 772 return -ENOMEM; 773 } 774 775 if (!PageUptodate(page)) 776 *frame_uptodate = false; 777 778 pages[npages] = page; 779 } 780 781 return 0; 782 } 783 784 /* 785 * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files). 786 */ 787 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) 788 { 789 int err; 790 struct file *file = iocb->ki_filp; 791 size_t count = iov_iter_count(from); 792 loff_t pos = iocb->ki_pos; 793 struct inode *inode = file_inode(file); 794 loff_t i_size = inode->i_size; 795 struct address_space *mapping = inode->i_mapping; 796 struct ntfs_inode *ni = ntfs_i(inode); 797 u64 valid = ni->i_valid; 798 struct ntfs_sb_info *sbi = ni->mi.sbi; 799 struct page *page, **pages = NULL; 800 size_t written = 0; 801 u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; 802 u32 frame_size = 1u << frame_bits; 803 u32 pages_per_frame = frame_size >> PAGE_SHIFT; 804 u32 ip, off; 805 CLST frame; 806 u64 frame_vbo; 807 pgoff_t index; 808 bool frame_uptodate; 809 810 if (frame_size < PAGE_SIZE) { 811 /* 812 * frame_size == 8K if cluster 512 813 * frame_size == 64K if cluster 4096 814 */ 815 ntfs_inode_warn(inode, "page size is bigger than frame size"); 816 return -EOPNOTSUPP; 817 } 818 819 pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS); 820 if (!pages) 821 return -ENOMEM; 822 823 err = file_remove_privs(file); 824 if (err) 825 goto out; 826 827 err = file_update_time(file); 828 if (err) 829 goto out; 830 831 /* Zero range [valid : pos). */ 832 while (valid < pos) { 833 CLST lcn, clen; 834 835 frame = valid >> frame_bits; 836 frame_vbo = valid & ~(frame_size - 1); 837 off = valid & (frame_size - 1); 838 839 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn, 840 &clen, NULL, false); 841 if (err) 842 goto out; 843 844 if (lcn == SPARSE_LCN) { 845 ni->i_valid = valid = 846 frame_vbo + ((u64)clen << sbi->cluster_bits); 847 continue; 848 } 849 850 /* Load full frame. */ 851 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, 852 pages, pages_per_frame, 853 &frame_uptodate); 854 if (err) 855 goto out; 856 857 if (!frame_uptodate && off) { 858 err = ni_read_frame(ni, frame_vbo, pages, 859 pages_per_frame); 860 if (err) { 861 for (ip = 0; ip < pages_per_frame; ip++) { 862 page = pages[ip]; 863 unlock_page(page); 864 put_page(page); 865 } 866 goto out; 867 } 868 } 869 870 ip = off >> PAGE_SHIFT; 871 off = offset_in_page(valid); 872 for (; ip < pages_per_frame; ip++, off = 0) { 873 page = pages[ip]; 874 zero_user_segment(page, off, PAGE_SIZE); 875 flush_dcache_page(page); 876 SetPageUptodate(page); 877 } 878 879 ni_lock(ni); 880 err = ni_write_frame(ni, pages, pages_per_frame); 881 ni_unlock(ni); 882 883 for (ip = 0; ip < pages_per_frame; ip++) { 884 page = pages[ip]; 885 SetPageUptodate(page); 886 unlock_page(page); 887 put_page(page); 888 } 889 890 if (err) 891 goto out; 892 893 ni->i_valid = valid = frame_vbo + frame_size; 894 } 895 896 /* Copy user data [pos : pos + count). */ 897 while (count) { 898 size_t copied, bytes; 899 900 off = pos & (frame_size - 1); 901 bytes = frame_size - off; 902 if (bytes > count) 903 bytes = count; 904 905 frame_vbo = pos & ~(frame_size - 1); 906 index = frame_vbo >> PAGE_SHIFT; 907 908 if (unlikely(fault_in_iov_iter_readable(from, bytes))) { 909 err = -EFAULT; 910 goto out; 911 } 912 913 /* Load full frame. */ 914 err = ntfs_get_frame_pages(mapping, index, pages, 915 pages_per_frame, &frame_uptodate); 916 if (err) 917 goto out; 918 919 if (!frame_uptodate) { 920 loff_t to = pos + bytes; 921 922 if (off || (to < i_size && (to & (frame_size - 1)))) { 923 err = ni_read_frame(ni, frame_vbo, pages, 924 pages_per_frame); 925 if (err) { 926 for (ip = 0; ip < pages_per_frame; 927 ip++) { 928 page = pages[ip]; 929 unlock_page(page); 930 put_page(page); 931 } 932 goto out; 933 } 934 } 935 } 936 937 WARN_ON(!bytes); 938 copied = 0; 939 ip = off >> PAGE_SHIFT; 940 off = offset_in_page(pos); 941 942 /* Copy user data to pages. */ 943 for (;;) { 944 size_t cp, tail = PAGE_SIZE - off; 945 946 page = pages[ip]; 947 cp = copy_page_from_iter_atomic(page, off, 948 min(tail, bytes), from); 949 flush_dcache_page(page); 950 951 copied += cp; 952 bytes -= cp; 953 if (!bytes || !cp) 954 break; 955 956 if (cp < tail) { 957 off += cp; 958 } else { 959 ip++; 960 off = 0; 961 } 962 } 963 964 ni_lock(ni); 965 err = ni_write_frame(ni, pages, pages_per_frame); 966 ni_unlock(ni); 967 968 for (ip = 0; ip < pages_per_frame; ip++) { 969 page = pages[ip]; 970 ClearPageDirty(page); 971 SetPageUptodate(page); 972 unlock_page(page); 973 put_page(page); 974 } 975 976 if (err) 977 goto out; 978 979 /* 980 * We can loop for a long time in here. Be nice and allow 981 * us to schedule out to avoid softlocking if preempt 982 * is disabled. 983 */ 984 cond_resched(); 985 986 pos += copied; 987 written += copied; 988 989 count = iov_iter_count(from); 990 } 991 992 out: 993 kfree(pages); 994 995 if (err < 0) 996 return err; 997 998 iocb->ki_pos += written; 999 if (iocb->ki_pos > ni->i_valid) 1000 ni->i_valid = iocb->ki_pos; 1001 1002 return written; 1003 } 1004 1005 /* 1006 * ntfs_file_write_iter - file_operations::write_iter 1007 */ 1008 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1009 { 1010 struct file *file = iocb->ki_filp; 1011 struct address_space *mapping = file->f_mapping; 1012 struct inode *inode = mapping->host; 1013 ssize_t ret; 1014 struct ntfs_inode *ni = ntfs_i(inode); 1015 1016 if (is_encrypted(ni)) { 1017 ntfs_inode_warn(inode, "encrypted i/o not supported"); 1018 return -EOPNOTSUPP; 1019 } 1020 1021 if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 1022 ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 1023 return -EOPNOTSUPP; 1024 } 1025 1026 if (is_dedup(ni)) { 1027 ntfs_inode_warn(inode, "write into deduplicated not supported"); 1028 return -EOPNOTSUPP; 1029 } 1030 1031 if (!inode_trylock(inode)) { 1032 if (iocb->ki_flags & IOCB_NOWAIT) 1033 return -EAGAIN; 1034 inode_lock(inode); 1035 } 1036 1037 ret = generic_write_checks(iocb, from); 1038 if (ret <= 0) 1039 goto out; 1040 1041 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { 1042 /* Should never be here, see ntfs_file_open(). */ 1043 ret = -EOPNOTSUPP; 1044 goto out; 1045 } 1046 1047 ret = ntfs_extend(inode, iocb->ki_pos, ret, file); 1048 if (ret) 1049 goto out; 1050 1051 ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : 1052 __generic_file_write_iter(iocb, from); 1053 1054 out: 1055 inode_unlock(inode); 1056 1057 if (ret > 0) 1058 ret = generic_write_sync(iocb, ret); 1059 1060 return ret; 1061 } 1062 1063 /* 1064 * ntfs_file_open - file_operations::open 1065 */ 1066 int ntfs_file_open(struct inode *inode, struct file *file) 1067 { 1068 struct ntfs_inode *ni = ntfs_i(inode); 1069 1070 if (unlikely((is_compressed(ni) || is_encrypted(ni)) && 1071 (file->f_flags & O_DIRECT))) { 1072 return -EOPNOTSUPP; 1073 } 1074 1075 /* Decompress "external compressed" file if opened for rw. */ 1076 if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) && 1077 (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) { 1078 #ifdef CONFIG_NTFS3_LZX_XPRESS 1079 int err = ni_decompress_file(ni); 1080 1081 if (err) 1082 return err; 1083 #else 1084 ntfs_inode_warn( 1085 inode, 1086 "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files"); 1087 return -EOPNOTSUPP; 1088 #endif 1089 } 1090 1091 return generic_file_open(inode, file); 1092 } 1093 1094 /* 1095 * ntfs_file_release - file_operations::release 1096 */ 1097 static int ntfs_file_release(struct inode *inode, struct file *file) 1098 { 1099 struct ntfs_inode *ni = ntfs_i(inode); 1100 struct ntfs_sb_info *sbi = ni->mi.sbi; 1101 int err = 0; 1102 1103 /* If we are last writer on the inode, drop the block reservation. */ 1104 if (sbi->options->prealloc && 1105 ((file->f_mode & FMODE_WRITE) && 1106 atomic_read(&inode->i_writecount) == 1)) { 1107 ni_lock(ni); 1108 down_write(&ni->file.run_lock); 1109 1110 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, 1111 inode->i_size, &ni->i_valid, false, NULL); 1112 1113 up_write(&ni->file.run_lock); 1114 ni_unlock(ni); 1115 } 1116 return err; 1117 } 1118 1119 /* 1120 * ntfs_fiemap - file_operations::fiemap 1121 */ 1122 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1123 __u64 start, __u64 len) 1124 { 1125 int err; 1126 struct ntfs_inode *ni = ntfs_i(inode); 1127 1128 err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); 1129 if (err) 1130 return err; 1131 1132 ni_lock(ni); 1133 1134 err = ni_fiemap(ni, fieinfo, start, len); 1135 1136 ni_unlock(ni); 1137 1138 return err; 1139 } 1140 1141 // clang-format off 1142 const struct inode_operations ntfs_file_inode_operations = { 1143 .getattr = ntfs_getattr, 1144 .setattr = ntfs3_setattr, 1145 .listxattr = ntfs_listxattr, 1146 .get_acl = ntfs_get_acl, 1147 .set_acl = ntfs_set_acl, 1148 .fiemap = ntfs_fiemap, 1149 }; 1150 1151 const struct file_operations ntfs_file_operations = { 1152 .llseek = generic_file_llseek, 1153 .read_iter = ntfs_file_read_iter, 1154 .write_iter = ntfs_file_write_iter, 1155 .unlocked_ioctl = ntfs_ioctl, 1156 #ifdef CONFIG_COMPAT 1157 .compat_ioctl = ntfs_compat_ioctl, 1158 #endif 1159 .splice_read = generic_file_splice_read, 1160 .mmap = ntfs_file_mmap, 1161 .open = ntfs_file_open, 1162 .fsync = generic_file_fsync, 1163 .splice_write = iter_file_splice_write, 1164 .fallocate = ntfs_fallocate, 1165 .release = ntfs_file_release, 1166 }; 1167 // clang-format on 1168