1 /* 2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. 3 * 4 * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc. 5 * 6 * This program/include file is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as published 8 * by the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program/include file is distributed in the hope that it will be 12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty 13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program (in the main directory of the Linux-NTFS 18 * distribution in the file COPYING); if not, write to the Free Software 19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22 #include <linux/backing-dev.h> 23 #include <linux/buffer_head.h> 24 #include <linux/gfp.h> 25 #include <linux/pagemap.h> 26 #include <linux/pagevec.h> 27 #include <linux/sched.h> 28 #include <linux/swap.h> 29 #include <linux/uio.h> 30 #include <linux/writeback.h> 31 32 #include <asm/page.h> 33 #include <asm/uaccess.h> 34 35 #include "attrib.h" 36 #include "bitmap.h" 37 #include "inode.h" 38 #include "debug.h" 39 #include "lcnalloc.h" 40 #include "malloc.h" 41 #include "mft.h" 42 #include "ntfs.h" 43 44 /** 45 * ntfs_file_open - called when an inode is about to be opened 46 * @vi: inode to be opened 47 * @filp: file structure describing the inode 48 * 49 * Limit file size to the page cache limit on architectures where unsigned long 50 * is 32-bits. This is the most we can do for now without overflowing the page 51 * cache page index. Doing it this way means we don't run into problems because 52 * of existing too large files. It would be better to allow the user to read 53 * the beginning of the file but I doubt very much anyone is going to hit this 54 * check on a 32-bit architecture, so there is no point in adding the extra 55 * complexity required to support this. 56 * 57 * On 64-bit architectures, the check is hopefully optimized away by the 58 * compiler. 59 * 60 * After the check passes, just call generic_file_open() to do its work. 61 */ 62 static int ntfs_file_open(struct inode *vi, struct file *filp) 63 { 64 if (sizeof(unsigned long) < 8) { 65 if (i_size_read(vi) > MAX_LFS_FILESIZE) 66 return -EOVERFLOW; 67 } 68 return generic_file_open(vi, filp); 69 } 70 71 #ifdef NTFS_RW 72 73 /** 74 * ntfs_attr_extend_initialized - extend the initialized size of an attribute 75 * @ni: ntfs inode of the attribute to extend 76 * @new_init_size: requested new initialized size in bytes 77 * 78 * Extend the initialized size of an attribute described by the ntfs inode @ni 79 * to @new_init_size bytes. This involves zeroing any non-sparse space between 80 * the old initialized size and @new_init_size both in the page cache and on 81 * disk (if relevant complete pages are already uptodate in the page cache then 82 * these are simply marked dirty). 83 * 84 * As a side-effect, the file size (vfs inode->i_size) may be incremented as, 85 * in the resident attribute case, it is tied to the initialized size and, in 86 * the non-resident attribute case, it may not fall below the initialized size. 87 * 88 * Note that if the attribute is resident, we do not need to touch the page 89 * cache at all. This is because if the page cache page is not uptodate we 90 * bring it uptodate later, when doing the write to the mft record since we 91 * then already have the page mapped. And if the page is uptodate, the 92 * non-initialized region will already have been zeroed when the page was 93 * brought uptodate and the region may in fact already have been overwritten 94 * with new data via mmap() based writes, so we cannot just zero it. And since 95 * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped 96 * is unspecified, we choose not to do zeroing and thus we do not need to touch 97 * the page at all. For a more detailed explanation see ntfs_truncate() in 98 * fs/ntfs/inode.c. 99 * 100 * Return 0 on success and -errno on error. In the case that an error is 101 * encountered it is possible that the initialized size will already have been 102 * incremented some way towards @new_init_size but it is guaranteed that if 103 * this is the case, the necessary zeroing will also have happened and that all 104 * metadata is self-consistent. 105 * 106 * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be 107 * held by the caller. 108 */ 109 static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size) 110 { 111 s64 old_init_size; 112 loff_t old_i_size; 113 pgoff_t index, end_index; 114 unsigned long flags; 115 struct inode *vi = VFS_I(ni); 116 ntfs_inode *base_ni; 117 MFT_RECORD *m = NULL; 118 ATTR_RECORD *a; 119 ntfs_attr_search_ctx *ctx = NULL; 120 struct address_space *mapping; 121 struct page *page = NULL; 122 u8 *kattr; 123 int err; 124 u32 attr_len; 125 126 read_lock_irqsave(&ni->size_lock, flags); 127 old_init_size = ni->initialized_size; 128 old_i_size = i_size_read(vi); 129 BUG_ON(new_init_size > ni->allocated_size); 130 read_unlock_irqrestore(&ni->size_lock, flags); 131 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, " 132 "old_initialized_size 0x%llx, " 133 "new_initialized_size 0x%llx, i_size 0x%llx.", 134 vi->i_ino, (unsigned)le32_to_cpu(ni->type), 135 (unsigned long long)old_init_size, 136 (unsigned long long)new_init_size, old_i_size); 137 if (!NInoAttr(ni)) 138 base_ni = ni; 139 else 140 base_ni = ni->ext.base_ntfs_ino; 141 /* Use goto to reduce indentation and we need the label below anyway. */ 142 if (NInoNonResident(ni)) 143 goto do_non_resident_extend; 144 BUG_ON(old_init_size != old_i_size); 145 m = map_mft_record(base_ni); 146 if (IS_ERR(m)) { 147 err = PTR_ERR(m); 148 m = NULL; 149 goto err_out; 150 } 151 ctx = ntfs_attr_get_search_ctx(base_ni, m); 152 if (unlikely(!ctx)) { 153 err = -ENOMEM; 154 goto err_out; 155 } 156 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 157 CASE_SENSITIVE, 0, NULL, 0, ctx); 158 if (unlikely(err)) { 159 if (err == -ENOENT) 160 err = -EIO; 161 goto err_out; 162 } 163 m = ctx->mrec; 164 a = ctx->attr; 165 BUG_ON(a->non_resident); 166 /* The total length of the attribute value. */ 167 attr_len = le32_to_cpu(a->data.resident.value_length); 168 BUG_ON(old_i_size != (loff_t)attr_len); 169 /* 170 * Do the zeroing in the mft record and update the attribute size in 171 * the mft record. 172 */ 173 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); 174 memset(kattr + attr_len, 0, new_init_size - attr_len); 175 a->data.resident.value_length = cpu_to_le32((u32)new_init_size); 176 /* Finally, update the sizes in the vfs and ntfs inodes. */ 177 write_lock_irqsave(&ni->size_lock, flags); 178 i_size_write(vi, new_init_size); 179 ni->initialized_size = new_init_size; 180 write_unlock_irqrestore(&ni->size_lock, flags); 181 goto done; 182 do_non_resident_extend: 183 /* 184 * If the new initialized size @new_init_size exceeds the current file 185 * size (vfs inode->i_size), we need to extend the file size to the 186 * new initialized size. 187 */ 188 if (new_init_size > old_i_size) { 189 m = map_mft_record(base_ni); 190 if (IS_ERR(m)) { 191 err = PTR_ERR(m); 192 m = NULL; 193 goto err_out; 194 } 195 ctx = ntfs_attr_get_search_ctx(base_ni, m); 196 if (unlikely(!ctx)) { 197 err = -ENOMEM; 198 goto err_out; 199 } 200 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 201 CASE_SENSITIVE, 0, NULL, 0, ctx); 202 if (unlikely(err)) { 203 if (err == -ENOENT) 204 err = -EIO; 205 goto err_out; 206 } 207 m = ctx->mrec; 208 a = ctx->attr; 209 BUG_ON(!a->non_resident); 210 BUG_ON(old_i_size != (loff_t) 211 sle64_to_cpu(a->data.non_resident.data_size)); 212 a->data.non_resident.data_size = cpu_to_sle64(new_init_size); 213 flush_dcache_mft_record_page(ctx->ntfs_ino); 214 mark_mft_record_dirty(ctx->ntfs_ino); 215 /* Update the file size in the vfs inode. */ 216 i_size_write(vi, new_init_size); 217 ntfs_attr_put_search_ctx(ctx); 218 ctx = NULL; 219 unmap_mft_record(base_ni); 220 m = NULL; 221 } 222 mapping = vi->i_mapping; 223 index = old_init_size >> PAGE_CACHE_SHIFT; 224 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 225 do { 226 /* 227 * Read the page. If the page is not present, this will zero 228 * the uninitialized regions for us. 229 */ 230 page = read_mapping_page(mapping, index, NULL); 231 if (IS_ERR(page)) { 232 err = PTR_ERR(page); 233 goto init_err_out; 234 } 235 if (unlikely(PageError(page))) { 236 page_cache_release(page); 237 err = -EIO; 238 goto init_err_out; 239 } 240 /* 241 * Update the initialized size in the ntfs inode. This is 242 * enough to make ntfs_writepage() work. 243 */ 244 write_lock_irqsave(&ni->size_lock, flags); 245 ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT; 246 if (ni->initialized_size > new_init_size) 247 ni->initialized_size = new_init_size; 248 write_unlock_irqrestore(&ni->size_lock, flags); 249 /* Set the page dirty so it gets written out. */ 250 set_page_dirty(page); 251 page_cache_release(page); 252 /* 253 * Play nice with the vm and the rest of the system. This is 254 * very much needed as we can potentially be modifying the 255 * initialised size from a very small value to a really huge 256 * value, e.g. 257 * f = open(somefile, O_TRUNC); 258 * truncate(f, 10GiB); 259 * seek(f, 10GiB); 260 * write(f, 1); 261 * And this would mean we would be marking dirty hundreds of 262 * thousands of pages or as in the above example more than 263 * two and a half million pages! 264 * 265 * TODO: For sparse pages could optimize this workload by using 266 * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This 267 * would be set in readpage for sparse pages and here we would 268 * not need to mark dirty any pages which have this bit set. 269 * The only caveat is that we have to clear the bit everywhere 270 * where we allocate any clusters that lie in the page or that 271 * contain the page. 272 * 273 * TODO: An even greater optimization would be for us to only 274 * call readpage() on pages which are not in sparse regions as 275 * determined from the runlist. This would greatly reduce the 276 * number of pages we read and make dirty in the case of sparse 277 * files. 278 */ 279 balance_dirty_pages_ratelimited(mapping); 280 cond_resched(); 281 } while (++index < end_index); 282 read_lock_irqsave(&ni->size_lock, flags); 283 BUG_ON(ni->initialized_size != new_init_size); 284 read_unlock_irqrestore(&ni->size_lock, flags); 285 /* Now bring in sync the initialized_size in the mft record. */ 286 m = map_mft_record(base_ni); 287 if (IS_ERR(m)) { 288 err = PTR_ERR(m); 289 m = NULL; 290 goto init_err_out; 291 } 292 ctx = ntfs_attr_get_search_ctx(base_ni, m); 293 if (unlikely(!ctx)) { 294 err = -ENOMEM; 295 goto init_err_out; 296 } 297 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 298 CASE_SENSITIVE, 0, NULL, 0, ctx); 299 if (unlikely(err)) { 300 if (err == -ENOENT) 301 err = -EIO; 302 goto init_err_out; 303 } 304 m = ctx->mrec; 305 a = ctx->attr; 306 BUG_ON(!a->non_resident); 307 a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size); 308 done: 309 flush_dcache_mft_record_page(ctx->ntfs_ino); 310 mark_mft_record_dirty(ctx->ntfs_ino); 311 if (ctx) 312 ntfs_attr_put_search_ctx(ctx); 313 if (m) 314 unmap_mft_record(base_ni); 315 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.", 316 (unsigned long long)new_init_size, i_size_read(vi)); 317 return 0; 318 init_err_out: 319 write_lock_irqsave(&ni->size_lock, flags); 320 ni->initialized_size = old_init_size; 321 write_unlock_irqrestore(&ni->size_lock, flags); 322 err_out: 323 if (ctx) 324 ntfs_attr_put_search_ctx(ctx); 325 if (m) 326 unmap_mft_record(base_ni); 327 ntfs_debug("Failed. Returning error code %i.", err); 328 return err; 329 } 330 331 static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb, 332 struct iov_iter *from) 333 { 334 loff_t pos; 335 s64 end, ll; 336 ssize_t err; 337 unsigned long flags; 338 struct file *file = iocb->ki_filp; 339 struct inode *vi = file_inode(file); 340 ntfs_inode *base_ni, *ni = NTFS_I(vi); 341 ntfs_volume *vol = ni->vol; 342 343 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos " 344 "0x%llx, count 0x%zx.", vi->i_ino, 345 (unsigned)le32_to_cpu(ni->type), 346 (unsigned long long)iocb->ki_pos, 347 iov_iter_count(from)); 348 err = generic_write_checks(iocb, from); 349 if (unlikely(err <= 0)) 350 goto out; 351 /* 352 * All checks have passed. Before we start doing any writing we want 353 * to abort any totally illegal writes. 354 */ 355 BUG_ON(NInoMstProtected(ni)); 356 BUG_ON(ni->type != AT_DATA); 357 /* If file is encrypted, deny access, just like NT4. */ 358 if (NInoEncrypted(ni)) { 359 /* Only $DATA attributes can be encrypted. */ 360 /* 361 * Reminder for later: Encrypted files are _always_ 362 * non-resident so that the content can always be encrypted. 363 */ 364 ntfs_debug("Denying write access to encrypted file."); 365 err = -EACCES; 366 goto out; 367 } 368 if (NInoCompressed(ni)) { 369 /* Only unnamed $DATA attribute can be compressed. */ 370 BUG_ON(ni->name_len); 371 /* 372 * Reminder for later: If resident, the data is not actually 373 * compressed. Only on the switch to non-resident does 374 * compression kick in. This is in contrast to encrypted files 375 * (see above). 376 */ 377 ntfs_error(vi->i_sb, "Writing to compressed files is not " 378 "implemented yet. Sorry."); 379 err = -EOPNOTSUPP; 380 goto out; 381 } 382 base_ni = ni; 383 if (NInoAttr(ni)) 384 base_ni = ni->ext.base_ntfs_ino; 385 err = file_remove_privs(file); 386 if (unlikely(err)) 387 goto out; 388 /* 389 * Our ->update_time method always succeeds thus file_update_time() 390 * cannot fail either so there is no need to check the return code. 391 */ 392 file_update_time(file); 393 pos = iocb->ki_pos; 394 /* The first byte after the last cluster being written to. */ 395 end = (pos + iov_iter_count(from) + vol->cluster_size_mask) & 396 ~(u64)vol->cluster_size_mask; 397 /* 398 * If the write goes beyond the allocated size, extend the allocation 399 * to cover the whole of the write, rounded up to the nearest cluster. 400 */ 401 read_lock_irqsave(&ni->size_lock, flags); 402 ll = ni->allocated_size; 403 read_unlock_irqrestore(&ni->size_lock, flags); 404 if (end > ll) { 405 /* 406 * Extend the allocation without changing the data size. 407 * 408 * Note we ensure the allocation is big enough to at least 409 * write some data but we do not require the allocation to be 410 * complete, i.e. it may be partial. 411 */ 412 ll = ntfs_attr_extend_allocation(ni, end, -1, pos); 413 if (likely(ll >= 0)) { 414 BUG_ON(pos >= ll); 415 /* If the extension was partial truncate the write. */ 416 if (end > ll) { 417 ntfs_debug("Truncating write to inode 0x%lx, " 418 "attribute type 0x%x, because " 419 "the allocation was only " 420 "partially extended.", 421 vi->i_ino, (unsigned) 422 le32_to_cpu(ni->type)); 423 iov_iter_truncate(from, ll - pos); 424 } 425 } else { 426 err = ll; 427 read_lock_irqsave(&ni->size_lock, flags); 428 ll = ni->allocated_size; 429 read_unlock_irqrestore(&ni->size_lock, flags); 430 /* Perform a partial write if possible or fail. */ 431 if (pos < ll) { 432 ntfs_debug("Truncating write to inode 0x%lx " 433 "attribute type 0x%x, because " 434 "extending the allocation " 435 "failed (error %d).", 436 vi->i_ino, (unsigned) 437 le32_to_cpu(ni->type), 438 (int)-err); 439 iov_iter_truncate(from, ll - pos); 440 } else { 441 if (err != -ENOSPC) 442 ntfs_error(vi->i_sb, "Cannot perform " 443 "write to inode " 444 "0x%lx, attribute " 445 "type 0x%x, because " 446 "extending the " 447 "allocation failed " 448 "(error %ld).", 449 vi->i_ino, (unsigned) 450 le32_to_cpu(ni->type), 451 (long)-err); 452 else 453 ntfs_debug("Cannot perform write to " 454 "inode 0x%lx, " 455 "attribute type 0x%x, " 456 "because there is not " 457 "space left.", 458 vi->i_ino, (unsigned) 459 le32_to_cpu(ni->type)); 460 goto out; 461 } 462 } 463 } 464 /* 465 * If the write starts beyond the initialized size, extend it up to the 466 * beginning of the write and initialize all non-sparse space between 467 * the old initialized size and the new one. This automatically also 468 * increments the vfs inode->i_size to keep it above or equal to the 469 * initialized_size. 470 */ 471 read_lock_irqsave(&ni->size_lock, flags); 472 ll = ni->initialized_size; 473 read_unlock_irqrestore(&ni->size_lock, flags); 474 if (pos > ll) { 475 /* 476 * Wait for ongoing direct i/o to complete before proceeding. 477 * New direct i/o cannot start as we hold i_mutex. 478 */ 479 inode_dio_wait(vi); 480 err = ntfs_attr_extend_initialized(ni, pos); 481 if (unlikely(err < 0)) 482 ntfs_error(vi->i_sb, "Cannot perform write to inode " 483 "0x%lx, attribute type 0x%x, because " 484 "extending the initialized size " 485 "failed (error %d).", vi->i_ino, 486 (unsigned)le32_to_cpu(ni->type), 487 (int)-err); 488 } 489 out: 490 return err; 491 } 492 493 /** 494 * __ntfs_grab_cache_pages - obtain a number of locked pages 495 * @mapping: address space mapping from which to obtain page cache pages 496 * @index: starting index in @mapping at which to begin obtaining pages 497 * @nr_pages: number of page cache pages to obtain 498 * @pages: array of pages in which to return the obtained page cache pages 499 * @cached_page: allocated but as yet unused page 500 * 501 * Obtain @nr_pages locked page cache pages from the mapping @mapping and 502 * starting at index @index. 503 * 504 * If a page is newly created, add it to lru list 505 * 506 * Note, the page locks are obtained in ascending page index order. 507 */ 508 static inline int __ntfs_grab_cache_pages(struct address_space *mapping, 509 pgoff_t index, const unsigned nr_pages, struct page **pages, 510 struct page **cached_page) 511 { 512 int err, nr; 513 514 BUG_ON(!nr_pages); 515 err = nr = 0; 516 do { 517 pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK | 518 FGP_ACCESSED); 519 if (!pages[nr]) { 520 if (!*cached_page) { 521 *cached_page = page_cache_alloc(mapping); 522 if (unlikely(!*cached_page)) { 523 err = -ENOMEM; 524 goto err_out; 525 } 526 } 527 err = add_to_page_cache_lru(*cached_page, mapping, 528 index, 529 mapping_gfp_constraint(mapping, GFP_KERNEL)); 530 if (unlikely(err)) { 531 if (err == -EEXIST) 532 continue; 533 goto err_out; 534 } 535 pages[nr] = *cached_page; 536 *cached_page = NULL; 537 } 538 index++; 539 nr++; 540 } while (nr < nr_pages); 541 out: 542 return err; 543 err_out: 544 while (nr > 0) { 545 unlock_page(pages[--nr]); 546 page_cache_release(pages[nr]); 547 } 548 goto out; 549 } 550 551 static inline int ntfs_submit_bh_for_read(struct buffer_head *bh) 552 { 553 lock_buffer(bh); 554 get_bh(bh); 555 bh->b_end_io = end_buffer_read_sync; 556 return submit_bh(READ, bh); 557 } 558 559 /** 560 * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data 561 * @pages: array of destination pages 562 * @nr_pages: number of pages in @pages 563 * @pos: byte position in file at which the write begins 564 * @bytes: number of bytes to be written 565 * 566 * This is called for non-resident attributes from ntfs_file_buffered_write() 567 * with i_mutex held on the inode (@pages[0]->mapping->host). There are 568 * @nr_pages pages in @pages which are locked but not kmap()ped. The source 569 * data has not yet been copied into the @pages. 570 * 571 * Need to fill any holes with actual clusters, allocate buffers if necessary, 572 * ensure all the buffers are mapped, and bring uptodate any buffers that are 573 * only partially being written to. 574 * 575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is 576 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside 577 * the same cluster and that they are the entirety of that cluster, and that 578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. 579 * 580 * i_size is not to be modified yet. 581 * 582 * Return 0 on success or -errno on error. 583 */ 584 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, 585 unsigned nr_pages, s64 pos, size_t bytes) 586 { 587 VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend; 588 LCN lcn; 589 s64 bh_pos, vcn_len, end, initialized_size; 590 sector_t lcn_block; 591 struct page *page; 592 struct inode *vi; 593 ntfs_inode *ni, *base_ni = NULL; 594 ntfs_volume *vol; 595 runlist_element *rl, *rl2; 596 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; 597 ntfs_attr_search_ctx *ctx = NULL; 598 MFT_RECORD *m = NULL; 599 ATTR_RECORD *a = NULL; 600 unsigned long flags; 601 u32 attr_rec_len = 0; 602 unsigned blocksize, u; 603 int err, mp_size; 604 bool rl_write_locked, was_hole, is_retry; 605 unsigned char blocksize_bits; 606 struct { 607 u8 runlist_merged:1; 608 u8 mft_attr_mapped:1; 609 u8 mp_rebuilt:1; 610 u8 attr_switched:1; 611 } status = { 0, 0, 0, 0 }; 612 613 BUG_ON(!nr_pages); 614 BUG_ON(!pages); 615 BUG_ON(!*pages); 616 vi = pages[0]->mapping->host; 617 ni = NTFS_I(vi); 618 vol = ni->vol; 619 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page " 620 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.", 621 vi->i_ino, ni->type, pages[0]->index, nr_pages, 622 (long long)pos, bytes); 623 blocksize = vol->sb->s_blocksize; 624 blocksize_bits = vol->sb->s_blocksize_bits; 625 u = 0; 626 do { 627 page = pages[u]; 628 BUG_ON(!page); 629 /* 630 * create_empty_buffers() will create uptodate/dirty buffers if 631 * the page is uptodate/dirty. 632 */ 633 if (!page_has_buffers(page)) { 634 create_empty_buffers(page, blocksize, 0); 635 if (unlikely(!page_has_buffers(page))) 636 return -ENOMEM; 637 } 638 } while (++u < nr_pages); 639 rl_write_locked = false; 640 rl = NULL; 641 err = 0; 642 vcn = lcn = -1; 643 vcn_len = 0; 644 lcn_block = -1; 645 was_hole = false; 646 cpos = pos >> vol->cluster_size_bits; 647 end = pos + bytes; 648 cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits; 649 /* 650 * Loop over each page and for each page over each buffer. Use goto to 651 * reduce indentation. 652 */ 653 u = 0; 654 do_next_page: 655 page = pages[u]; 656 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 657 bh = head = page_buffers(page); 658 do { 659 VCN cdelta; 660 s64 bh_end; 661 unsigned bh_cofs; 662 663 /* Clear buffer_new on all buffers to reinitialise state. */ 664 if (buffer_new(bh)) 665 clear_buffer_new(bh); 666 bh_end = bh_pos + blocksize; 667 bh_cpos = bh_pos >> vol->cluster_size_bits; 668 bh_cofs = bh_pos & vol->cluster_size_mask; 669 if (buffer_mapped(bh)) { 670 /* 671 * The buffer is already mapped. If it is uptodate, 672 * ignore it. 673 */ 674 if (buffer_uptodate(bh)) 675 continue; 676 /* 677 * The buffer is not uptodate. If the page is uptodate 678 * set the buffer uptodate and otherwise ignore it. 679 */ 680 if (PageUptodate(page)) { 681 set_buffer_uptodate(bh); 682 continue; 683 } 684 /* 685 * Neither the page nor the buffer are uptodate. If 686 * the buffer is only partially being written to, we 687 * need to read it in before the write, i.e. now. 688 */ 689 if ((bh_pos < pos && bh_end > pos) || 690 (bh_pos < end && bh_end > end)) { 691 /* 692 * If the buffer is fully or partially within 693 * the initialized size, do an actual read. 694 * Otherwise, simply zero the buffer. 695 */ 696 read_lock_irqsave(&ni->size_lock, flags); 697 initialized_size = ni->initialized_size; 698 read_unlock_irqrestore(&ni->size_lock, flags); 699 if (bh_pos < initialized_size) { 700 ntfs_submit_bh_for_read(bh); 701 *wait_bh++ = bh; 702 } else { 703 zero_user(page, bh_offset(bh), 704 blocksize); 705 set_buffer_uptodate(bh); 706 } 707 } 708 continue; 709 } 710 /* Unmapped buffer. Need to map it. */ 711 bh->b_bdev = vol->sb->s_bdev; 712 /* 713 * If the current buffer is in the same clusters as the map 714 * cache, there is no need to check the runlist again. The 715 * map cache is made up of @vcn, which is the first cached file 716 * cluster, @vcn_len which is the number of cached file 717 * clusters, @lcn is the device cluster corresponding to @vcn, 718 * and @lcn_block is the block number corresponding to @lcn. 719 */ 720 cdelta = bh_cpos - vcn; 721 if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) { 722 map_buffer_cached: 723 BUG_ON(lcn < 0); 724 bh->b_blocknr = lcn_block + 725 (cdelta << (vol->cluster_size_bits - 726 blocksize_bits)) + 727 (bh_cofs >> blocksize_bits); 728 set_buffer_mapped(bh); 729 /* 730 * If the page is uptodate so is the buffer. If the 731 * buffer is fully outside the write, we ignore it if 732 * it was already allocated and we mark it dirty so it 733 * gets written out if we allocated it. On the other 734 * hand, if we allocated the buffer but we are not 735 * marking it dirty we set buffer_new so we can do 736 * error recovery. 737 */ 738 if (PageUptodate(page)) { 739 if (!buffer_uptodate(bh)) 740 set_buffer_uptodate(bh); 741 if (unlikely(was_hole)) { 742 /* We allocated the buffer. */ 743 unmap_underlying_metadata(bh->b_bdev, 744 bh->b_blocknr); 745 if (bh_end <= pos || bh_pos >= end) 746 mark_buffer_dirty(bh); 747 else 748 set_buffer_new(bh); 749 } 750 continue; 751 } 752 /* Page is _not_ uptodate. */ 753 if (likely(!was_hole)) { 754 /* 755 * Buffer was already allocated. If it is not 756 * uptodate and is only partially being written 757 * to, we need to read it in before the write, 758 * i.e. now. 759 */ 760 if (!buffer_uptodate(bh) && bh_pos < end && 761 bh_end > pos && 762 (bh_pos < pos || 763 bh_end > end)) { 764 /* 765 * If the buffer is fully or partially 766 * within the initialized size, do an 767 * actual read. Otherwise, simply zero 768 * the buffer. 769 */ 770 read_lock_irqsave(&ni->size_lock, 771 flags); 772 initialized_size = ni->initialized_size; 773 read_unlock_irqrestore(&ni->size_lock, 774 flags); 775 if (bh_pos < initialized_size) { 776 ntfs_submit_bh_for_read(bh); 777 *wait_bh++ = bh; 778 } else { 779 zero_user(page, bh_offset(bh), 780 blocksize); 781 set_buffer_uptodate(bh); 782 } 783 } 784 continue; 785 } 786 /* We allocated the buffer. */ 787 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 788 /* 789 * If the buffer is fully outside the write, zero it, 790 * set it uptodate, and mark it dirty so it gets 791 * written out. If it is partially being written to, 792 * zero region surrounding the write but leave it to 793 * commit write to do anything else. Finally, if the 794 * buffer is fully being overwritten, do nothing. 795 */ 796 if (bh_end <= pos || bh_pos >= end) { 797 if (!buffer_uptodate(bh)) { 798 zero_user(page, bh_offset(bh), 799 blocksize); 800 set_buffer_uptodate(bh); 801 } 802 mark_buffer_dirty(bh); 803 continue; 804 } 805 set_buffer_new(bh); 806 if (!buffer_uptodate(bh) && 807 (bh_pos < pos || bh_end > end)) { 808 u8 *kaddr; 809 unsigned pofs; 810 811 kaddr = kmap_atomic(page); 812 if (bh_pos < pos) { 813 pofs = bh_pos & ~PAGE_CACHE_MASK; 814 memset(kaddr + pofs, 0, pos - bh_pos); 815 } 816 if (bh_end > end) { 817 pofs = end & ~PAGE_CACHE_MASK; 818 memset(kaddr + pofs, 0, bh_end - end); 819 } 820 kunmap_atomic(kaddr); 821 flush_dcache_page(page); 822 } 823 continue; 824 } 825 /* 826 * Slow path: this is the first buffer in the cluster. If it 827 * is outside allocated size and is not uptodate, zero it and 828 * set it uptodate. 829 */ 830 read_lock_irqsave(&ni->size_lock, flags); 831 initialized_size = ni->allocated_size; 832 read_unlock_irqrestore(&ni->size_lock, flags); 833 if (bh_pos > initialized_size) { 834 if (PageUptodate(page)) { 835 if (!buffer_uptodate(bh)) 836 set_buffer_uptodate(bh); 837 } else if (!buffer_uptodate(bh)) { 838 zero_user(page, bh_offset(bh), blocksize); 839 set_buffer_uptodate(bh); 840 } 841 continue; 842 } 843 is_retry = false; 844 if (!rl) { 845 down_read(&ni->runlist.lock); 846 retry_remap: 847 rl = ni->runlist.rl; 848 } 849 if (likely(rl != NULL)) { 850 /* Seek to element containing target cluster. */ 851 while (rl->length && rl[1].vcn <= bh_cpos) 852 rl++; 853 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos); 854 if (likely(lcn >= 0)) { 855 /* 856 * Successful remap, setup the map cache and 857 * use that to deal with the buffer. 858 */ 859 was_hole = false; 860 vcn = bh_cpos; 861 vcn_len = rl[1].vcn - vcn; 862 lcn_block = lcn << (vol->cluster_size_bits - 863 blocksize_bits); 864 cdelta = 0; 865 /* 866 * If the number of remaining clusters touched 867 * by the write is smaller or equal to the 868 * number of cached clusters, unlock the 869 * runlist as the map cache will be used from 870 * now on. 871 */ 872 if (likely(vcn + vcn_len >= cend)) { 873 if (rl_write_locked) { 874 up_write(&ni->runlist.lock); 875 rl_write_locked = false; 876 } else 877 up_read(&ni->runlist.lock); 878 rl = NULL; 879 } 880 goto map_buffer_cached; 881 } 882 } else 883 lcn = LCN_RL_NOT_MAPPED; 884 /* 885 * If it is not a hole and not out of bounds, the runlist is 886 * probably unmapped so try to map it now. 887 */ 888 if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) { 889 if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) { 890 /* Attempt to map runlist. */ 891 if (!rl_write_locked) { 892 /* 893 * We need the runlist locked for 894 * writing, so if it is locked for 895 * reading relock it now and retry in 896 * case it changed whilst we dropped 897 * the lock. 898 */ 899 up_read(&ni->runlist.lock); 900 down_write(&ni->runlist.lock); 901 rl_write_locked = true; 902 goto retry_remap; 903 } 904 err = ntfs_map_runlist_nolock(ni, bh_cpos, 905 NULL); 906 if (likely(!err)) { 907 is_retry = true; 908 goto retry_remap; 909 } 910 /* 911 * If @vcn is out of bounds, pretend @lcn is 912 * LCN_ENOENT. As long as the buffer is out 913 * of bounds this will work fine. 914 */ 915 if (err == -ENOENT) { 916 lcn = LCN_ENOENT; 917 err = 0; 918 goto rl_not_mapped_enoent; 919 } 920 } else 921 err = -EIO; 922 /* Failed to map the buffer, even after retrying. */ 923 bh->b_blocknr = -1; 924 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " 925 "attribute type 0x%x, vcn 0x%llx, " 926 "vcn offset 0x%x, because its " 927 "location on disk could not be " 928 "determined%s (error code %i).", 929 ni->mft_no, ni->type, 930 (unsigned long long)bh_cpos, 931 (unsigned)bh_pos & 932 vol->cluster_size_mask, 933 is_retry ? " even after retrying" : "", 934 err); 935 break; 936 } 937 rl_not_mapped_enoent: 938 /* 939 * The buffer is in a hole or out of bounds. We need to fill 940 * the hole, unless the buffer is in a cluster which is not 941 * touched by the write, in which case we just leave the buffer 942 * unmapped. This can only happen when the cluster size is 943 * less than the page cache size. 944 */ 945 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) { 946 bh_cend = (bh_end + vol->cluster_size - 1) >> 947 vol->cluster_size_bits; 948 if ((bh_cend <= cpos || bh_cpos >= cend)) { 949 bh->b_blocknr = -1; 950 /* 951 * If the buffer is uptodate we skip it. If it 952 * is not but the page is uptodate, we can set 953 * the buffer uptodate. If the page is not 954 * uptodate, we can clear the buffer and set it 955 * uptodate. Whether this is worthwhile is 956 * debatable and this could be removed. 957 */ 958 if (PageUptodate(page)) { 959 if (!buffer_uptodate(bh)) 960 set_buffer_uptodate(bh); 961 } else if (!buffer_uptodate(bh)) { 962 zero_user(page, bh_offset(bh), 963 blocksize); 964 set_buffer_uptodate(bh); 965 } 966 continue; 967 } 968 } 969 /* 970 * Out of bounds buffer is invalid if it was not really out of 971 * bounds. 972 */ 973 BUG_ON(lcn != LCN_HOLE); 974 /* 975 * We need the runlist locked for writing, so if it is locked 976 * for reading relock it now and retry in case it changed 977 * whilst we dropped the lock. 978 */ 979 BUG_ON(!rl); 980 if (!rl_write_locked) { 981 up_read(&ni->runlist.lock); 982 down_write(&ni->runlist.lock); 983 rl_write_locked = true; 984 goto retry_remap; 985 } 986 /* Find the previous last allocated cluster. */ 987 BUG_ON(rl->lcn != LCN_HOLE); 988 lcn = -1; 989 rl2 = rl; 990 while (--rl2 >= ni->runlist.rl) { 991 if (rl2->lcn >= 0) { 992 lcn = rl2->lcn + rl2->length; 993 break; 994 } 995 } 996 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE, 997 false); 998 if (IS_ERR(rl2)) { 999 err = PTR_ERR(rl2); 1000 ntfs_debug("Failed to allocate cluster, error code %i.", 1001 err); 1002 break; 1003 } 1004 lcn = rl2->lcn; 1005 rl = ntfs_runlists_merge(ni->runlist.rl, rl2); 1006 if (IS_ERR(rl)) { 1007 err = PTR_ERR(rl); 1008 if (err != -ENOMEM) 1009 err = -EIO; 1010 if (ntfs_cluster_free_from_rl(vol, rl2)) { 1011 ntfs_error(vol->sb, "Failed to release " 1012 "allocated cluster in error " 1013 "code path. Run chkdsk to " 1014 "recover the lost cluster."); 1015 NVolSetErrors(vol); 1016 } 1017 ntfs_free(rl2); 1018 break; 1019 } 1020 ni->runlist.rl = rl; 1021 status.runlist_merged = 1; 1022 ntfs_debug("Allocated cluster, lcn 0x%llx.", 1023 (unsigned long long)lcn); 1024 /* Map and lock the mft record and get the attribute record. */ 1025 if (!NInoAttr(ni)) 1026 base_ni = ni; 1027 else 1028 base_ni = ni->ext.base_ntfs_ino; 1029 m = map_mft_record(base_ni); 1030 if (IS_ERR(m)) { 1031 err = PTR_ERR(m); 1032 break; 1033 } 1034 ctx = ntfs_attr_get_search_ctx(base_ni, m); 1035 if (unlikely(!ctx)) { 1036 err = -ENOMEM; 1037 unmap_mft_record(base_ni); 1038 break; 1039 } 1040 status.mft_attr_mapped = 1; 1041 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 1042 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx); 1043 if (unlikely(err)) { 1044 if (err == -ENOENT) 1045 err = -EIO; 1046 break; 1047 } 1048 m = ctx->mrec; 1049 a = ctx->attr; 1050 /* 1051 * Find the runlist element with which the attribute extent 1052 * starts. Note, we cannot use the _attr_ version because we 1053 * have mapped the mft record. That is ok because we know the 1054 * runlist fragment must be mapped already to have ever gotten 1055 * here, so we can just use the _rl_ version. 1056 */ 1057 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn); 1058 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn); 1059 BUG_ON(!rl2); 1060 BUG_ON(!rl2->length); 1061 BUG_ON(rl2->lcn < LCN_HOLE); 1062 highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn); 1063 /* 1064 * If @highest_vcn is zero, calculate the real highest_vcn 1065 * (which can really be zero). 1066 */ 1067 if (!highest_vcn) 1068 highest_vcn = (sle64_to_cpu( 1069 a->data.non_resident.allocated_size) >> 1070 vol->cluster_size_bits) - 1; 1071 /* 1072 * Determine the size of the mapping pairs array for the new 1073 * extent, i.e. the old extent with the hole filled. 1074 */ 1075 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn, 1076 highest_vcn); 1077 if (unlikely(mp_size <= 0)) { 1078 if (!(err = mp_size)) 1079 err = -EIO; 1080 ntfs_debug("Failed to get size for mapping pairs " 1081 "array, error code %i.", err); 1082 break; 1083 } 1084 /* 1085 * Resize the attribute record to fit the new mapping pairs 1086 * array. 1087 */ 1088 attr_rec_len = le32_to_cpu(a->length); 1089 err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu( 1090 a->data.non_resident.mapping_pairs_offset)); 1091 if (unlikely(err)) { 1092 BUG_ON(err != -ENOSPC); 1093 // TODO: Deal with this by using the current attribute 1094 // and fill it with as much of the mapping pairs 1095 // array as possible. Then loop over each attribute 1096 // extent rewriting the mapping pairs arrays as we go 1097 // along and if when we reach the end we have not 1098 // enough space, try to resize the last attribute 1099 // extent and if even that fails, add a new attribute 1100 // extent. 1101 // We could also try to resize at each step in the hope 1102 // that we will not need to rewrite every single extent. 1103 // Note, we may need to decompress some extents to fill 1104 // the runlist as we are walking the extents... 1105 ntfs_error(vol->sb, "Not enough space in the mft " 1106 "record for the extended attribute " 1107 "record. This case is not " 1108 "implemented yet."); 1109 err = -EOPNOTSUPP; 1110 break ; 1111 } 1112 status.mp_rebuilt = 1; 1113 /* 1114 * Generate the mapping pairs array directly into the attribute 1115 * record. 1116 */ 1117 err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu( 1118 a->data.non_resident.mapping_pairs_offset), 1119 mp_size, rl2, vcn, highest_vcn, NULL); 1120 if (unlikely(err)) { 1121 ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, " 1122 "attribute type 0x%x, because building " 1123 "the mapping pairs failed with error " 1124 "code %i.", vi->i_ino, 1125 (unsigned)le32_to_cpu(ni->type), err); 1126 err = -EIO; 1127 break; 1128 } 1129 /* Update the highest_vcn but only if it was not set. */ 1130 if (unlikely(!a->data.non_resident.highest_vcn)) 1131 a->data.non_resident.highest_vcn = 1132 cpu_to_sle64(highest_vcn); 1133 /* 1134 * If the attribute is sparse/compressed, update the compressed 1135 * size in the ntfs_inode structure and the attribute record. 1136 */ 1137 if (likely(NInoSparse(ni) || NInoCompressed(ni))) { 1138 /* 1139 * If we are not in the first attribute extent, switch 1140 * to it, but first ensure the changes will make it to 1141 * disk later. 1142 */ 1143 if (a->data.non_resident.lowest_vcn) { 1144 flush_dcache_mft_record_page(ctx->ntfs_ino); 1145 mark_mft_record_dirty(ctx->ntfs_ino); 1146 ntfs_attr_reinit_search_ctx(ctx); 1147 err = ntfs_attr_lookup(ni->type, ni->name, 1148 ni->name_len, CASE_SENSITIVE, 1149 0, NULL, 0, ctx); 1150 if (unlikely(err)) { 1151 status.attr_switched = 1; 1152 break; 1153 } 1154 /* @m is not used any more so do not set it. */ 1155 a = ctx->attr; 1156 } 1157 write_lock_irqsave(&ni->size_lock, flags); 1158 ni->itype.compressed.size += vol->cluster_size; 1159 a->data.non_resident.compressed_size = 1160 cpu_to_sle64(ni->itype.compressed.size); 1161 write_unlock_irqrestore(&ni->size_lock, flags); 1162 } 1163 /* Ensure the changes make it to disk. */ 1164 flush_dcache_mft_record_page(ctx->ntfs_ino); 1165 mark_mft_record_dirty(ctx->ntfs_ino); 1166 ntfs_attr_put_search_ctx(ctx); 1167 unmap_mft_record(base_ni); 1168 /* Successfully filled the hole. */ 1169 status.runlist_merged = 0; 1170 status.mft_attr_mapped = 0; 1171 status.mp_rebuilt = 0; 1172 /* Setup the map cache and use that to deal with the buffer. */ 1173 was_hole = true; 1174 vcn = bh_cpos; 1175 vcn_len = 1; 1176 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits); 1177 cdelta = 0; 1178 /* 1179 * If the number of remaining clusters in the @pages is smaller 1180 * or equal to the number of cached clusters, unlock the 1181 * runlist as the map cache will be used from now on. 1182 */ 1183 if (likely(vcn + vcn_len >= cend)) { 1184 up_write(&ni->runlist.lock); 1185 rl_write_locked = false; 1186 rl = NULL; 1187 } 1188 goto map_buffer_cached; 1189 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head); 1190 /* If there are no errors, do the next page. */ 1191 if (likely(!err && ++u < nr_pages)) 1192 goto do_next_page; 1193 /* If there are no errors, release the runlist lock if we took it. */ 1194 if (likely(!err)) { 1195 if (unlikely(rl_write_locked)) { 1196 up_write(&ni->runlist.lock); 1197 rl_write_locked = false; 1198 } else if (unlikely(rl)) 1199 up_read(&ni->runlist.lock); 1200 rl = NULL; 1201 } 1202 /* If we issued read requests, let them complete. */ 1203 read_lock_irqsave(&ni->size_lock, flags); 1204 initialized_size = ni->initialized_size; 1205 read_unlock_irqrestore(&ni->size_lock, flags); 1206 while (wait_bh > wait) { 1207 bh = *--wait_bh; 1208 wait_on_buffer(bh); 1209 if (likely(buffer_uptodate(bh))) { 1210 page = bh->b_page; 1211 bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) + 1212 bh_offset(bh); 1213 /* 1214 * If the buffer overflows the initialized size, need 1215 * to zero the overflowing region. 1216 */ 1217 if (unlikely(bh_pos + blocksize > initialized_size)) { 1218 int ofs = 0; 1219 1220 if (likely(bh_pos < initialized_size)) 1221 ofs = initialized_size - bh_pos; 1222 zero_user_segment(page, bh_offset(bh) + ofs, 1223 blocksize); 1224 } 1225 } else /* if (unlikely(!buffer_uptodate(bh))) */ 1226 err = -EIO; 1227 } 1228 if (likely(!err)) { 1229 /* Clear buffer_new on all buffers. */ 1230 u = 0; 1231 do { 1232 bh = head = page_buffers(pages[u]); 1233 do { 1234 if (buffer_new(bh)) 1235 clear_buffer_new(bh); 1236 } while ((bh = bh->b_this_page) != head); 1237 } while (++u < nr_pages); 1238 ntfs_debug("Done."); 1239 return err; 1240 } 1241 if (status.attr_switched) { 1242 /* Get back to the attribute extent we modified. */ 1243 ntfs_attr_reinit_search_ctx(ctx); 1244 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 1245 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) { 1246 ntfs_error(vol->sb, "Failed to find required " 1247 "attribute extent of attribute in " 1248 "error code path. Run chkdsk to " 1249 "recover."); 1250 write_lock_irqsave(&ni->size_lock, flags); 1251 ni->itype.compressed.size += vol->cluster_size; 1252 write_unlock_irqrestore(&ni->size_lock, flags); 1253 flush_dcache_mft_record_page(ctx->ntfs_ino); 1254 mark_mft_record_dirty(ctx->ntfs_ino); 1255 /* 1256 * The only thing that is now wrong is the compressed 1257 * size of the base attribute extent which chkdsk 1258 * should be able to fix. 1259 */ 1260 NVolSetErrors(vol); 1261 } else { 1262 m = ctx->mrec; 1263 a = ctx->attr; 1264 status.attr_switched = 0; 1265 } 1266 } 1267 /* 1268 * If the runlist has been modified, need to restore it by punching a 1269 * hole into it and we then need to deallocate the on-disk cluster as 1270 * well. Note, we only modify the runlist if we are able to generate a 1271 * new mapping pairs array, i.e. only when the mapped attribute extent 1272 * is not switched. 1273 */ 1274 if (status.runlist_merged && !status.attr_switched) { 1275 BUG_ON(!rl_write_locked); 1276 /* Make the file cluster we allocated sparse in the runlist. */ 1277 if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) { 1278 ntfs_error(vol->sb, "Failed to punch hole into " 1279 "attribute runlist in error code " 1280 "path. Run chkdsk to recover the " 1281 "lost cluster."); 1282 NVolSetErrors(vol); 1283 } else /* if (success) */ { 1284 status.runlist_merged = 0; 1285 /* 1286 * Deallocate the on-disk cluster we allocated but only 1287 * if we succeeded in punching its vcn out of the 1288 * runlist. 1289 */ 1290 down_write(&vol->lcnbmp_lock); 1291 if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) { 1292 ntfs_error(vol->sb, "Failed to release " 1293 "allocated cluster in error " 1294 "code path. Run chkdsk to " 1295 "recover the lost cluster."); 1296 NVolSetErrors(vol); 1297 } 1298 up_write(&vol->lcnbmp_lock); 1299 } 1300 } 1301 /* 1302 * Resize the attribute record to its old size and rebuild the mapping 1303 * pairs array. Note, we only can do this if the runlist has been 1304 * restored to its old state which also implies that the mapped 1305 * attribute extent is not switched. 1306 */ 1307 if (status.mp_rebuilt && !status.runlist_merged) { 1308 if (ntfs_attr_record_resize(m, a, attr_rec_len)) { 1309 ntfs_error(vol->sb, "Failed to restore attribute " 1310 "record in error code path. Run " 1311 "chkdsk to recover."); 1312 NVolSetErrors(vol); 1313 } else /* if (success) */ { 1314 if (ntfs_mapping_pairs_build(vol, (u8*)a + 1315 le16_to_cpu(a->data.non_resident. 1316 mapping_pairs_offset), attr_rec_len - 1317 le16_to_cpu(a->data.non_resident. 1318 mapping_pairs_offset), ni->runlist.rl, 1319 vcn, highest_vcn, NULL)) { 1320 ntfs_error(vol->sb, "Failed to restore " 1321 "mapping pairs array in error " 1322 "code path. Run chkdsk to " 1323 "recover."); 1324 NVolSetErrors(vol); 1325 } 1326 flush_dcache_mft_record_page(ctx->ntfs_ino); 1327 mark_mft_record_dirty(ctx->ntfs_ino); 1328 } 1329 } 1330 /* Release the mft record and the attribute. */ 1331 if (status.mft_attr_mapped) { 1332 ntfs_attr_put_search_ctx(ctx); 1333 unmap_mft_record(base_ni); 1334 } 1335 /* Release the runlist lock. */ 1336 if (rl_write_locked) 1337 up_write(&ni->runlist.lock); 1338 else if (rl) 1339 up_read(&ni->runlist.lock); 1340 /* 1341 * Zero out any newly allocated blocks to avoid exposing stale data. 1342 * If BH_New is set, we know that the block was newly allocated above 1343 * and that it has not been fully zeroed and marked dirty yet. 1344 */ 1345 nr_pages = u; 1346 u = 0; 1347 end = bh_cpos << vol->cluster_size_bits; 1348 do { 1349 page = pages[u]; 1350 bh = head = page_buffers(page); 1351 do { 1352 if (u == nr_pages && 1353 ((s64)page->index << PAGE_CACHE_SHIFT) + 1354 bh_offset(bh) >= end) 1355 break; 1356 if (!buffer_new(bh)) 1357 continue; 1358 clear_buffer_new(bh); 1359 if (!buffer_uptodate(bh)) { 1360 if (PageUptodate(page)) 1361 set_buffer_uptodate(bh); 1362 else { 1363 zero_user(page, bh_offset(bh), 1364 blocksize); 1365 set_buffer_uptodate(bh); 1366 } 1367 } 1368 mark_buffer_dirty(bh); 1369 } while ((bh = bh->b_this_page) != head); 1370 } while (++u <= nr_pages); 1371 ntfs_error(vol->sb, "Failed. Returning error code %i.", err); 1372 return err; 1373 } 1374 1375 static inline void ntfs_flush_dcache_pages(struct page **pages, 1376 unsigned nr_pages) 1377 { 1378 BUG_ON(!nr_pages); 1379 /* 1380 * Warning: Do not do the decrement at the same time as the call to 1381 * flush_dcache_page() because it is a NULL macro on i386 and hence the 1382 * decrement never happens so the loop never terminates. 1383 */ 1384 do { 1385 --nr_pages; 1386 flush_dcache_page(pages[nr_pages]); 1387 } while (nr_pages > 0); 1388 } 1389 1390 /** 1391 * ntfs_commit_pages_after_non_resident_write - commit the received data 1392 * @pages: array of destination pages 1393 * @nr_pages: number of pages in @pages 1394 * @pos: byte position in file at which the write begins 1395 * @bytes: number of bytes to be written 1396 * 1397 * See description of ntfs_commit_pages_after_write(), below. 1398 */ 1399 static inline int ntfs_commit_pages_after_non_resident_write( 1400 struct page **pages, const unsigned nr_pages, 1401 s64 pos, size_t bytes) 1402 { 1403 s64 end, initialized_size; 1404 struct inode *vi; 1405 ntfs_inode *ni, *base_ni; 1406 struct buffer_head *bh, *head; 1407 ntfs_attr_search_ctx *ctx; 1408 MFT_RECORD *m; 1409 ATTR_RECORD *a; 1410 unsigned long flags; 1411 unsigned blocksize, u; 1412 int err; 1413 1414 vi = pages[0]->mapping->host; 1415 ni = NTFS_I(vi); 1416 blocksize = vi->i_sb->s_blocksize; 1417 end = pos + bytes; 1418 u = 0; 1419 do { 1420 s64 bh_pos; 1421 struct page *page; 1422 bool partial; 1423 1424 page = pages[u]; 1425 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 1426 bh = head = page_buffers(page); 1427 partial = false; 1428 do { 1429 s64 bh_end; 1430 1431 bh_end = bh_pos + blocksize; 1432 if (bh_end <= pos || bh_pos >= end) { 1433 if (!buffer_uptodate(bh)) 1434 partial = true; 1435 } else { 1436 set_buffer_uptodate(bh); 1437 mark_buffer_dirty(bh); 1438 } 1439 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head); 1440 /* 1441 * If all buffers are now uptodate but the page is not, set the 1442 * page uptodate. 1443 */ 1444 if (!partial && !PageUptodate(page)) 1445 SetPageUptodate(page); 1446 } while (++u < nr_pages); 1447 /* 1448 * Finally, if we do not need to update initialized_size or i_size we 1449 * are finished. 1450 */ 1451 read_lock_irqsave(&ni->size_lock, flags); 1452 initialized_size = ni->initialized_size; 1453 read_unlock_irqrestore(&ni->size_lock, flags); 1454 if (end <= initialized_size) { 1455 ntfs_debug("Done."); 1456 return 0; 1457 } 1458 /* 1459 * Update initialized_size/i_size as appropriate, both in the inode and 1460 * the mft record. 1461 */ 1462 if (!NInoAttr(ni)) 1463 base_ni = ni; 1464 else 1465 base_ni = ni->ext.base_ntfs_ino; 1466 /* Map, pin, and lock the mft record. */ 1467 m = map_mft_record(base_ni); 1468 if (IS_ERR(m)) { 1469 err = PTR_ERR(m); 1470 m = NULL; 1471 ctx = NULL; 1472 goto err_out; 1473 } 1474 BUG_ON(!NInoNonResident(ni)); 1475 ctx = ntfs_attr_get_search_ctx(base_ni, m); 1476 if (unlikely(!ctx)) { 1477 err = -ENOMEM; 1478 goto err_out; 1479 } 1480 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 1481 CASE_SENSITIVE, 0, NULL, 0, ctx); 1482 if (unlikely(err)) { 1483 if (err == -ENOENT) 1484 err = -EIO; 1485 goto err_out; 1486 } 1487 a = ctx->attr; 1488 BUG_ON(!a->non_resident); 1489 write_lock_irqsave(&ni->size_lock, flags); 1490 BUG_ON(end > ni->allocated_size); 1491 ni->initialized_size = end; 1492 a->data.non_resident.initialized_size = cpu_to_sle64(end); 1493 if (end > i_size_read(vi)) { 1494 i_size_write(vi, end); 1495 a->data.non_resident.data_size = 1496 a->data.non_resident.initialized_size; 1497 } 1498 write_unlock_irqrestore(&ni->size_lock, flags); 1499 /* Mark the mft record dirty, so it gets written back. */ 1500 flush_dcache_mft_record_page(ctx->ntfs_ino); 1501 mark_mft_record_dirty(ctx->ntfs_ino); 1502 ntfs_attr_put_search_ctx(ctx); 1503 unmap_mft_record(base_ni); 1504 ntfs_debug("Done."); 1505 return 0; 1506 err_out: 1507 if (ctx) 1508 ntfs_attr_put_search_ctx(ctx); 1509 if (m) 1510 unmap_mft_record(base_ni); 1511 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error " 1512 "code %i).", err); 1513 if (err != -ENOMEM) 1514 NVolSetErrors(ni->vol); 1515 return err; 1516 } 1517 1518 /** 1519 * ntfs_commit_pages_after_write - commit the received data 1520 * @pages: array of destination pages 1521 * @nr_pages: number of pages in @pages 1522 * @pos: byte position in file at which the write begins 1523 * @bytes: number of bytes to be written 1524 * 1525 * This is called from ntfs_file_buffered_write() with i_mutex held on the inode 1526 * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are 1527 * locked but not kmap()ped. The source data has already been copied into the 1528 * @page. ntfs_prepare_pages_for_non_resident_write() has been called before 1529 * the data was copied (for non-resident attributes only) and it returned 1530 * success. 1531 * 1532 * Need to set uptodate and mark dirty all buffers within the boundary of the 1533 * write. If all buffers in a page are uptodate we set the page uptodate, too. 1534 * 1535 * Setting the buffers dirty ensures that they get written out later when 1536 * ntfs_writepage() is invoked by the VM. 1537 * 1538 * Finally, we need to update i_size and initialized_size as appropriate both 1539 * in the inode and the mft record. 1540 * 1541 * This is modelled after fs/buffer.c::generic_commit_write(), which marks 1542 * buffers uptodate and dirty, sets the page uptodate if all buffers in the 1543 * page are uptodate, and updates i_size if the end of io is beyond i_size. In 1544 * that case, it also marks the inode dirty. 1545 * 1546 * If things have gone as outlined in 1547 * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page 1548 * content modifications here for non-resident attributes. For resident 1549 * attributes we need to do the uptodate bringing here which we combine with 1550 * the copying into the mft record which means we save one atomic kmap. 1551 * 1552 * Return 0 on success or -errno on error. 1553 */ 1554 static int ntfs_commit_pages_after_write(struct page **pages, 1555 const unsigned nr_pages, s64 pos, size_t bytes) 1556 { 1557 s64 end, initialized_size; 1558 loff_t i_size; 1559 struct inode *vi; 1560 ntfs_inode *ni, *base_ni; 1561 struct page *page; 1562 ntfs_attr_search_ctx *ctx; 1563 MFT_RECORD *m; 1564 ATTR_RECORD *a; 1565 char *kattr, *kaddr; 1566 unsigned long flags; 1567 u32 attr_len; 1568 int err; 1569 1570 BUG_ON(!nr_pages); 1571 BUG_ON(!pages); 1572 page = pages[0]; 1573 BUG_ON(!page); 1574 vi = page->mapping->host; 1575 ni = NTFS_I(vi); 1576 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page " 1577 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.", 1578 vi->i_ino, ni->type, page->index, nr_pages, 1579 (long long)pos, bytes); 1580 if (NInoNonResident(ni)) 1581 return ntfs_commit_pages_after_non_resident_write(pages, 1582 nr_pages, pos, bytes); 1583 BUG_ON(nr_pages > 1); 1584 /* 1585 * Attribute is resident, implying it is not compressed, encrypted, or 1586 * sparse. 1587 */ 1588 if (!NInoAttr(ni)) 1589 base_ni = ni; 1590 else 1591 base_ni = ni->ext.base_ntfs_ino; 1592 BUG_ON(NInoNonResident(ni)); 1593 /* Map, pin, and lock the mft record. */ 1594 m = map_mft_record(base_ni); 1595 if (IS_ERR(m)) { 1596 err = PTR_ERR(m); 1597 m = NULL; 1598 ctx = NULL; 1599 goto err_out; 1600 } 1601 ctx = ntfs_attr_get_search_ctx(base_ni, m); 1602 if (unlikely(!ctx)) { 1603 err = -ENOMEM; 1604 goto err_out; 1605 } 1606 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 1607 CASE_SENSITIVE, 0, NULL, 0, ctx); 1608 if (unlikely(err)) { 1609 if (err == -ENOENT) 1610 err = -EIO; 1611 goto err_out; 1612 } 1613 a = ctx->attr; 1614 BUG_ON(a->non_resident); 1615 /* The total length of the attribute value. */ 1616 attr_len = le32_to_cpu(a->data.resident.value_length); 1617 i_size = i_size_read(vi); 1618 BUG_ON(attr_len != i_size); 1619 BUG_ON(pos > attr_len); 1620 end = pos + bytes; 1621 BUG_ON(end > le32_to_cpu(a->length) - 1622 le16_to_cpu(a->data.resident.value_offset)); 1623 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); 1624 kaddr = kmap_atomic(page); 1625 /* Copy the received data from the page to the mft record. */ 1626 memcpy(kattr + pos, kaddr + pos, bytes); 1627 /* Update the attribute length if necessary. */ 1628 if (end > attr_len) { 1629 attr_len = end; 1630 a->data.resident.value_length = cpu_to_le32(attr_len); 1631 } 1632 /* 1633 * If the page is not uptodate, bring the out of bounds area(s) 1634 * uptodate by copying data from the mft record to the page. 1635 */ 1636 if (!PageUptodate(page)) { 1637 if (pos > 0) 1638 memcpy(kaddr, kattr, pos); 1639 if (end < attr_len) 1640 memcpy(kaddr + end, kattr + end, attr_len - end); 1641 /* Zero the region outside the end of the attribute value. */ 1642 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1643 flush_dcache_page(page); 1644 SetPageUptodate(page); 1645 } 1646 kunmap_atomic(kaddr); 1647 /* Update initialized_size/i_size if necessary. */ 1648 read_lock_irqsave(&ni->size_lock, flags); 1649 initialized_size = ni->initialized_size; 1650 BUG_ON(end > ni->allocated_size); 1651 read_unlock_irqrestore(&ni->size_lock, flags); 1652 BUG_ON(initialized_size != i_size); 1653 if (end > initialized_size) { 1654 write_lock_irqsave(&ni->size_lock, flags); 1655 ni->initialized_size = end; 1656 i_size_write(vi, end); 1657 write_unlock_irqrestore(&ni->size_lock, flags); 1658 } 1659 /* Mark the mft record dirty, so it gets written back. */ 1660 flush_dcache_mft_record_page(ctx->ntfs_ino); 1661 mark_mft_record_dirty(ctx->ntfs_ino); 1662 ntfs_attr_put_search_ctx(ctx); 1663 unmap_mft_record(base_ni); 1664 ntfs_debug("Done."); 1665 return 0; 1666 err_out: 1667 if (err == -ENOMEM) { 1668 ntfs_warning(vi->i_sb, "Error allocating memory required to " 1669 "commit the write."); 1670 if (PageUptodate(page)) { 1671 ntfs_warning(vi->i_sb, "Page is uptodate, setting " 1672 "dirty so the write will be retried " 1673 "later on by the VM."); 1674 /* 1675 * Put the page on mapping->dirty_pages, but leave its 1676 * buffers' dirty state as-is. 1677 */ 1678 __set_page_dirty_nobuffers(page); 1679 err = 0; 1680 } else 1681 ntfs_error(vi->i_sb, "Page is not uptodate. Written " 1682 "data has been lost."); 1683 } else { 1684 ntfs_error(vi->i_sb, "Resident attribute commit write failed " 1685 "with error %i.", err); 1686 NVolSetErrors(ni->vol); 1687 } 1688 if (ctx) 1689 ntfs_attr_put_search_ctx(ctx); 1690 if (m) 1691 unmap_mft_record(base_ni); 1692 return err; 1693 } 1694 1695 /* 1696 * Copy as much as we can into the pages and return the number of bytes which 1697 * were successfully copied. If a fault is encountered then clear the pages 1698 * out to (ofs + bytes) and return the number of bytes which were copied. 1699 */ 1700 static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, 1701 unsigned ofs, struct iov_iter *i, size_t bytes) 1702 { 1703 struct page **last_page = pages + nr_pages; 1704 size_t total = 0; 1705 struct iov_iter data = *i; 1706 unsigned len, copied; 1707 1708 do { 1709 len = PAGE_CACHE_SIZE - ofs; 1710 if (len > bytes) 1711 len = bytes; 1712 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, 1713 len); 1714 total += copied; 1715 bytes -= copied; 1716 if (!bytes) 1717 break; 1718 iov_iter_advance(&data, copied); 1719 if (copied < len) 1720 goto err; 1721 ofs = 0; 1722 } while (++pages < last_page); 1723 out: 1724 return total; 1725 err: 1726 /* Zero the rest of the target like __copy_from_user(). */ 1727 len = PAGE_CACHE_SIZE - copied; 1728 do { 1729 if (len > bytes) 1730 len = bytes; 1731 zero_user(*pages, copied, len); 1732 bytes -= len; 1733 copied = 0; 1734 len = PAGE_CACHE_SIZE; 1735 } while (++pages < last_page); 1736 goto out; 1737 } 1738 1739 /** 1740 * ntfs_perform_write - perform buffered write to a file 1741 * @file: file to write to 1742 * @i: iov_iter with data to write 1743 * @pos: byte offset in file at which to begin writing to 1744 */ 1745 static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, 1746 loff_t pos) 1747 { 1748 struct address_space *mapping = file->f_mapping; 1749 struct inode *vi = mapping->host; 1750 ntfs_inode *ni = NTFS_I(vi); 1751 ntfs_volume *vol = ni->vol; 1752 struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; 1753 struct page *cached_page = NULL; 1754 VCN last_vcn; 1755 LCN lcn; 1756 size_t bytes; 1757 ssize_t status, written = 0; 1758 unsigned nr_pages; 1759 1760 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos " 1761 "0x%llx, count 0x%lx.", vi->i_ino, 1762 (unsigned)le32_to_cpu(ni->type), 1763 (unsigned long long)pos, 1764 (unsigned long)iov_iter_count(i)); 1765 /* 1766 * If a previous ntfs_truncate() failed, repeat it and abort if it 1767 * fails again. 1768 */ 1769 if (unlikely(NInoTruncateFailed(ni))) { 1770 int err; 1771 1772 inode_dio_wait(vi); 1773 err = ntfs_truncate(vi); 1774 if (err || NInoTruncateFailed(ni)) { 1775 if (!err) 1776 err = -EIO; 1777 ntfs_error(vol->sb, "Cannot perform write to inode " 1778 "0x%lx, attribute type 0x%x, because " 1779 "ntfs_truncate() failed (error code " 1780 "%i).", vi->i_ino, 1781 (unsigned)le32_to_cpu(ni->type), err); 1782 return err; 1783 } 1784 } 1785 /* 1786 * Determine the number of pages per cluster for non-resident 1787 * attributes. 1788 */ 1789 nr_pages = 1; 1790 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) 1791 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; 1792 last_vcn = -1; 1793 do { 1794 VCN vcn; 1795 pgoff_t idx, start_idx; 1796 unsigned ofs, do_pages, u; 1797 size_t copied; 1798 1799 start_idx = idx = pos >> PAGE_CACHE_SHIFT; 1800 ofs = pos & ~PAGE_CACHE_MASK; 1801 bytes = PAGE_CACHE_SIZE - ofs; 1802 do_pages = 1; 1803 if (nr_pages > 1) { 1804 vcn = pos >> vol->cluster_size_bits; 1805 if (vcn != last_vcn) { 1806 last_vcn = vcn; 1807 /* 1808 * Get the lcn of the vcn the write is in. If 1809 * it is a hole, need to lock down all pages in 1810 * the cluster. 1811 */ 1812 down_read(&ni->runlist.lock); 1813 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >> 1814 vol->cluster_size_bits, false); 1815 up_read(&ni->runlist.lock); 1816 if (unlikely(lcn < LCN_HOLE)) { 1817 if (lcn == LCN_ENOMEM) 1818 status = -ENOMEM; 1819 else { 1820 status = -EIO; 1821 ntfs_error(vol->sb, "Cannot " 1822 "perform write to " 1823 "inode 0x%lx, " 1824 "attribute type 0x%x, " 1825 "because the attribute " 1826 "is corrupt.", 1827 vi->i_ino, (unsigned) 1828 le32_to_cpu(ni->type)); 1829 } 1830 break; 1831 } 1832 if (lcn == LCN_HOLE) { 1833 start_idx = (pos & ~(s64) 1834 vol->cluster_size_mask) 1835 >> PAGE_CACHE_SHIFT; 1836 bytes = vol->cluster_size - (pos & 1837 vol->cluster_size_mask); 1838 do_pages = nr_pages; 1839 } 1840 } 1841 } 1842 if (bytes > iov_iter_count(i)) 1843 bytes = iov_iter_count(i); 1844 again: 1845 /* 1846 * Bring in the user page(s) that we will copy from _first_. 1847 * Otherwise there is a nasty deadlock on copying from the same 1848 * page(s) as we are writing to, without it/them being marked 1849 * up-to-date. Note, at present there is nothing to stop the 1850 * pages being swapped out between us bringing them into memory 1851 * and doing the actual copying. 1852 */ 1853 if (unlikely(iov_iter_fault_in_multipages_readable(i, bytes))) { 1854 status = -EFAULT; 1855 break; 1856 } 1857 /* Get and lock @do_pages starting at index @start_idx. */ 1858 status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages, 1859 pages, &cached_page); 1860 if (unlikely(status)) 1861 break; 1862 /* 1863 * For non-resident attributes, we need to fill any holes with 1864 * actual clusters and ensure all bufferes are mapped. We also 1865 * need to bring uptodate any buffers that are only partially 1866 * being written to. 1867 */ 1868 if (NInoNonResident(ni)) { 1869 status = ntfs_prepare_pages_for_non_resident_write( 1870 pages, do_pages, pos, bytes); 1871 if (unlikely(status)) { 1872 do { 1873 unlock_page(pages[--do_pages]); 1874 page_cache_release(pages[do_pages]); 1875 } while (do_pages); 1876 break; 1877 } 1878 } 1879 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; 1880 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, 1881 i, bytes); 1882 ntfs_flush_dcache_pages(pages + u, do_pages - u); 1883 status = 0; 1884 if (likely(copied == bytes)) { 1885 status = ntfs_commit_pages_after_write(pages, do_pages, 1886 pos, bytes); 1887 if (!status) 1888 status = bytes; 1889 } 1890 do { 1891 unlock_page(pages[--do_pages]); 1892 page_cache_release(pages[do_pages]); 1893 } while (do_pages); 1894 if (unlikely(status < 0)) 1895 break; 1896 copied = status; 1897 cond_resched(); 1898 if (unlikely(!copied)) { 1899 size_t sc; 1900 1901 /* 1902 * We failed to copy anything. Fall back to single 1903 * segment length write. 1904 * 1905 * This is needed to avoid possible livelock in the 1906 * case that all segments in the iov cannot be copied 1907 * at once without a pagefault. 1908 */ 1909 sc = iov_iter_single_seg_count(i); 1910 if (bytes > sc) 1911 bytes = sc; 1912 goto again; 1913 } 1914 iov_iter_advance(i, copied); 1915 pos += copied; 1916 written += copied; 1917 balance_dirty_pages_ratelimited(mapping); 1918 if (fatal_signal_pending(current)) { 1919 status = -EINTR; 1920 break; 1921 } 1922 } while (iov_iter_count(i)); 1923 if (cached_page) 1924 page_cache_release(cached_page); 1925 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", 1926 written ? "written" : "status", (unsigned long)written, 1927 (long)status); 1928 return written ? written : status; 1929 } 1930 1931 /** 1932 * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock() 1933 * @iocb: IO state structure 1934 * @from: iov_iter with data to write 1935 * 1936 * Basically the same as generic_file_write_iter() except that it ends up 1937 * up calling ntfs_perform_write() instead of generic_perform_write() and that 1938 * O_DIRECT is not implemented. 1939 */ 1940 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1941 { 1942 struct file *file = iocb->ki_filp; 1943 struct inode *vi = file_inode(file); 1944 ssize_t written = 0; 1945 ssize_t err; 1946 1947 mutex_lock(&vi->i_mutex); 1948 /* We can write back this queue in page reclaim. */ 1949 current->backing_dev_info = inode_to_bdi(vi); 1950 err = ntfs_prepare_file_for_write(iocb, from); 1951 if (iov_iter_count(from) && !err) 1952 written = ntfs_perform_write(file, from, iocb->ki_pos); 1953 current->backing_dev_info = NULL; 1954 mutex_unlock(&vi->i_mutex); 1955 if (likely(written > 0)) { 1956 err = generic_write_sync(file, iocb->ki_pos, written); 1957 if (err < 0) 1958 written = 0; 1959 } 1960 iocb->ki_pos += written; 1961 return written ? written : err; 1962 } 1963 1964 /** 1965 * ntfs_file_fsync - sync a file to disk 1966 * @filp: file to be synced 1967 * @datasync: if non-zero only flush user data and not metadata 1968 * 1969 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync 1970 * system calls. This function is inspired by fs/buffer.c::file_fsync(). 1971 * 1972 * If @datasync is false, write the mft record and all associated extent mft 1973 * records as well as the $DATA attribute and then sync the block device. 1974 * 1975 * If @datasync is true and the attribute is non-resident, we skip the writing 1976 * of the mft record and all associated extent mft records (this might still 1977 * happen due to the write_inode_now() call). 1978 * 1979 * Also, if @datasync is true, we do not wait on the inode to be written out 1980 * but we always wait on the page cache pages to be written out. 1981 * 1982 * Locking: Caller must hold i_mutex on the inode. 1983 * 1984 * TODO: We should probably also write all attribute/index inodes associated 1985 * with this inode but since we have no simple way of getting to them we ignore 1986 * this problem for now. 1987 */ 1988 static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end, 1989 int datasync) 1990 { 1991 struct inode *vi = filp->f_mapping->host; 1992 int err, ret = 0; 1993 1994 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); 1995 1996 err = filemap_write_and_wait_range(vi->i_mapping, start, end); 1997 if (err) 1998 return err; 1999 mutex_lock(&vi->i_mutex); 2000 2001 BUG_ON(S_ISDIR(vi->i_mode)); 2002 if (!datasync || !NInoNonResident(NTFS_I(vi))) 2003 ret = __ntfs_write_inode(vi, 1); 2004 write_inode_now(vi, !datasync); 2005 /* 2006 * NOTE: If we were to use mapping->private_list (see ext2 and 2007 * fs/buffer.c) for dirty blocks then we could optimize the below to be 2008 * sync_mapping_buffers(vi->i_mapping). 2009 */ 2010 err = sync_blockdev(vi->i_sb->s_bdev); 2011 if (unlikely(err && !ret)) 2012 ret = err; 2013 if (likely(!ret)) 2014 ntfs_debug("Done."); 2015 else 2016 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error " 2017 "%u.", datasync ? "data" : "", vi->i_ino, -ret); 2018 mutex_unlock(&vi->i_mutex); 2019 return ret; 2020 } 2021 2022 #endif /* NTFS_RW */ 2023 2024 const struct file_operations ntfs_file_ops = { 2025 .llseek = generic_file_llseek, 2026 .read_iter = generic_file_read_iter, 2027 #ifdef NTFS_RW 2028 .write_iter = ntfs_file_write_iter, 2029 .fsync = ntfs_file_fsync, 2030 #endif /* NTFS_RW */ 2031 .mmap = generic_file_mmap, 2032 .open = ntfs_file_open, 2033 .splice_read = generic_file_splice_read, 2034 }; 2035 2036 const struct inode_operations ntfs_file_inode_ops = { 2037 #ifdef NTFS_RW 2038 .setattr = ntfs_setattr, 2039 #endif /* NTFS_RW */ 2040 }; 2041 2042 const struct file_operations ntfs_empty_file_ops = {}; 2043 2044 const struct inode_operations ntfs_empty_inode_ops = {}; 2045