1 /* 2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. 3 * 4 * Copyright (c) 2001-2006 Anton Altaparmakov 5 * 6 * This program/include file is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as published 8 * by the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program/include file is distributed in the hope that it will be 12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty 13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program (in the main directory of the Linux-NTFS 18 * distribution in the file COPYING); if not, write to the Free Software 19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 */ 21 22 #include <linux/buffer_head.h> 23 #include <linux/pagemap.h> 24 #include <linux/pagevec.h> 25 #include <linux/sched.h> 26 #include <linux/swap.h> 27 #include <linux/uio.h> 28 #include <linux/writeback.h> 29 30 #include <asm/page.h> 31 #include <asm/uaccess.h> 32 33 #include "attrib.h" 34 #include "bitmap.h" 35 #include "inode.h" 36 #include "debug.h" 37 #include "lcnalloc.h" 38 #include "malloc.h" 39 #include "mft.h" 40 #include "ntfs.h" 41 42 /** 43 * ntfs_file_open - called when an inode is about to be opened 44 * @vi: inode to be opened 45 * @filp: file structure describing the inode 46 * 47 * Limit file size to the page cache limit on architectures where unsigned long 48 * is 32-bits. This is the most we can do for now without overflowing the page 49 * cache page index. Doing it this way means we don't run into problems because 50 * of existing too large files. It would be better to allow the user to read 51 * the beginning of the file but I doubt very much anyone is going to hit this 52 * check on a 32-bit architecture, so there is no point in adding the extra 53 * complexity required to support this. 54 * 55 * On 64-bit architectures, the check is hopefully optimized away by the 56 * compiler. 57 * 58 * After the check passes, just call generic_file_open() to do its work. 59 */ 60 static int ntfs_file_open(struct inode *vi, struct file *filp) 61 { 62 if (sizeof(unsigned long) < 8) { 63 if (i_size_read(vi) > MAX_LFS_FILESIZE) 64 return -EFBIG; 65 } 66 return generic_file_open(vi, filp); 67 } 68 69 #ifdef NTFS_RW 70 71 /** 72 * ntfs_attr_extend_initialized - extend the initialized size of an attribute 73 * @ni: ntfs inode of the attribute to extend 74 * @new_init_size: requested new initialized size in bytes 75 * @cached_page: store any allocated but unused page here 76 * @lru_pvec: lru-buffering pagevec of the caller 77 * 78 * Extend the initialized size of an attribute described by the ntfs inode @ni 79 * to @new_init_size bytes. This involves zeroing any non-sparse space between 80 * the old initialized size and @new_init_size both in the page cache and on 81 * disk (if relevant complete pages are already uptodate in the page cache then 82 * these are simply marked dirty). 83 * 84 * As a side-effect, the file size (vfs inode->i_size) may be incremented as, 85 * in the resident attribute case, it is tied to the initialized size and, in 86 * the non-resident attribute case, it may not fall below the initialized size. 87 * 88 * Note that if the attribute is resident, we do not need to touch the page 89 * cache at all. This is because if the page cache page is not uptodate we 90 * bring it uptodate later, when doing the write to the mft record since we 91 * then already have the page mapped. And if the page is uptodate, the 92 * non-initialized region will already have been zeroed when the page was 93 * brought uptodate and the region may in fact already have been overwritten 94 * with new data via mmap() based writes, so we cannot just zero it. And since 95 * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped 96 * is unspecified, we choose not to do zeroing and thus we do not need to touch 97 * the page at all. For a more detailed explanation see ntfs_truncate() in 98 * fs/ntfs/inode.c. 99 * 100 * @cached_page and @lru_pvec are just optimizations for dealing with multiple 101 * pages. 102 * 103 * Return 0 on success and -errno on error. In the case that an error is 104 * encountered it is possible that the initialized size will already have been 105 * incremented some way towards @new_init_size but it is guaranteed that if 106 * this is the case, the necessary zeroing will also have happened and that all 107 * metadata is self-consistent. 108 * 109 * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be 110 * held by the caller. 111 */ 112 static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size, 113 struct page **cached_page, struct pagevec *lru_pvec) 114 { 115 s64 old_init_size; 116 loff_t old_i_size; 117 pgoff_t index, end_index; 118 unsigned long flags; 119 struct inode *vi = VFS_I(ni); 120 ntfs_inode *base_ni; 121 MFT_RECORD *m = NULL; 122 ATTR_RECORD *a; 123 ntfs_attr_search_ctx *ctx = NULL; 124 struct address_space *mapping; 125 struct page *page = NULL; 126 u8 *kattr; 127 int err; 128 u32 attr_len; 129 130 read_lock_irqsave(&ni->size_lock, flags); 131 old_init_size = ni->initialized_size; 132 old_i_size = i_size_read(vi); 133 BUG_ON(new_init_size > ni->allocated_size); 134 read_unlock_irqrestore(&ni->size_lock, flags); 135 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, " 136 "old_initialized_size 0x%llx, " 137 "new_initialized_size 0x%llx, i_size 0x%llx.", 138 vi->i_ino, (unsigned)le32_to_cpu(ni->type), 139 (unsigned long long)old_init_size, 140 (unsigned long long)new_init_size, old_i_size); 141 if (!NInoAttr(ni)) 142 base_ni = ni; 143 else 144 base_ni = ni->ext.base_ntfs_ino; 145 /* Use goto to reduce indentation and we need the label below anyway. */ 146 if (NInoNonResident(ni)) 147 goto do_non_resident_extend; 148 BUG_ON(old_init_size != old_i_size); 149 m = map_mft_record(base_ni); 150 if (IS_ERR(m)) { 151 err = PTR_ERR(m); 152 m = NULL; 153 goto err_out; 154 } 155 ctx = ntfs_attr_get_search_ctx(base_ni, m); 156 if (unlikely(!ctx)) { 157 err = -ENOMEM; 158 goto err_out; 159 } 160 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 161 CASE_SENSITIVE, 0, NULL, 0, ctx); 162 if (unlikely(err)) { 163 if (err == -ENOENT) 164 err = -EIO; 165 goto err_out; 166 } 167 m = ctx->mrec; 168 a = ctx->attr; 169 BUG_ON(a->non_resident); 170 /* The total length of the attribute value. */ 171 attr_len = le32_to_cpu(a->data.resident.value_length); 172 BUG_ON(old_i_size != (loff_t)attr_len); 173 /* 174 * Do the zeroing in the mft record and update the attribute size in 175 * the mft record. 176 */ 177 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); 178 memset(kattr + attr_len, 0, new_init_size - attr_len); 179 a->data.resident.value_length = cpu_to_le32((u32)new_init_size); 180 /* Finally, update the sizes in the vfs and ntfs inodes. */ 181 write_lock_irqsave(&ni->size_lock, flags); 182 i_size_write(vi, new_init_size); 183 ni->initialized_size = new_init_size; 184 write_unlock_irqrestore(&ni->size_lock, flags); 185 goto done; 186 do_non_resident_extend: 187 /* 188 * If the new initialized size @new_init_size exceeds the current file 189 * size (vfs inode->i_size), we need to extend the file size to the 190 * new initialized size. 191 */ 192 if (new_init_size > old_i_size) { 193 m = map_mft_record(base_ni); 194 if (IS_ERR(m)) { 195 err = PTR_ERR(m); 196 m = NULL; 197 goto err_out; 198 } 199 ctx = ntfs_attr_get_search_ctx(base_ni, m); 200 if (unlikely(!ctx)) { 201 err = -ENOMEM; 202 goto err_out; 203 } 204 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 205 CASE_SENSITIVE, 0, NULL, 0, ctx); 206 if (unlikely(err)) { 207 if (err == -ENOENT) 208 err = -EIO; 209 goto err_out; 210 } 211 m = ctx->mrec; 212 a = ctx->attr; 213 BUG_ON(!a->non_resident); 214 BUG_ON(old_i_size != (loff_t) 215 sle64_to_cpu(a->data.non_resident.data_size)); 216 a->data.non_resident.data_size = cpu_to_sle64(new_init_size); 217 flush_dcache_mft_record_page(ctx->ntfs_ino); 218 mark_mft_record_dirty(ctx->ntfs_ino); 219 /* Update the file size in the vfs inode. */ 220 i_size_write(vi, new_init_size); 221 ntfs_attr_put_search_ctx(ctx); 222 ctx = NULL; 223 unmap_mft_record(base_ni); 224 m = NULL; 225 } 226 mapping = vi->i_mapping; 227 index = old_init_size >> PAGE_CACHE_SHIFT; 228 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 229 do { 230 /* 231 * Read the page. If the page is not present, this will zero 232 * the uninitialized regions for us. 233 */ 234 page = read_mapping_page(mapping, index, NULL); 235 if (IS_ERR(page)) { 236 err = PTR_ERR(page); 237 goto init_err_out; 238 } 239 if (unlikely(PageError(page))) { 240 page_cache_release(page); 241 err = -EIO; 242 goto init_err_out; 243 } 244 /* 245 * Update the initialized size in the ntfs inode. This is 246 * enough to make ntfs_writepage() work. 247 */ 248 write_lock_irqsave(&ni->size_lock, flags); 249 ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT; 250 if (ni->initialized_size > new_init_size) 251 ni->initialized_size = new_init_size; 252 write_unlock_irqrestore(&ni->size_lock, flags); 253 /* Set the page dirty so it gets written out. */ 254 set_page_dirty(page); 255 page_cache_release(page); 256 /* 257 * Play nice with the vm and the rest of the system. This is 258 * very much needed as we can potentially be modifying the 259 * initialised size from a very small value to a really huge 260 * value, e.g. 261 * f = open(somefile, O_TRUNC); 262 * truncate(f, 10GiB); 263 * seek(f, 10GiB); 264 * write(f, 1); 265 * And this would mean we would be marking dirty hundreds of 266 * thousands of pages or as in the above example more than 267 * two and a half million pages! 268 * 269 * TODO: For sparse pages could optimize this workload by using 270 * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This 271 * would be set in readpage for sparse pages and here we would 272 * not need to mark dirty any pages which have this bit set. 273 * The only caveat is that we have to clear the bit everywhere 274 * where we allocate any clusters that lie in the page or that 275 * contain the page. 276 * 277 * TODO: An even greater optimization would be for us to only 278 * call readpage() on pages which are not in sparse regions as 279 * determined from the runlist. This would greatly reduce the 280 * number of pages we read and make dirty in the case of sparse 281 * files. 282 */ 283 balance_dirty_pages_ratelimited(mapping); 284 cond_resched(); 285 } while (++index < end_index); 286 read_lock_irqsave(&ni->size_lock, flags); 287 BUG_ON(ni->initialized_size != new_init_size); 288 read_unlock_irqrestore(&ni->size_lock, flags); 289 /* Now bring in sync the initialized_size in the mft record. */ 290 m = map_mft_record(base_ni); 291 if (IS_ERR(m)) { 292 err = PTR_ERR(m); 293 m = NULL; 294 goto init_err_out; 295 } 296 ctx = ntfs_attr_get_search_ctx(base_ni, m); 297 if (unlikely(!ctx)) { 298 err = -ENOMEM; 299 goto init_err_out; 300 } 301 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 302 CASE_SENSITIVE, 0, NULL, 0, ctx); 303 if (unlikely(err)) { 304 if (err == -ENOENT) 305 err = -EIO; 306 goto init_err_out; 307 } 308 m = ctx->mrec; 309 a = ctx->attr; 310 BUG_ON(!a->non_resident); 311 a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size); 312 done: 313 flush_dcache_mft_record_page(ctx->ntfs_ino); 314 mark_mft_record_dirty(ctx->ntfs_ino); 315 if (ctx) 316 ntfs_attr_put_search_ctx(ctx); 317 if (m) 318 unmap_mft_record(base_ni); 319 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.", 320 (unsigned long long)new_init_size, i_size_read(vi)); 321 return 0; 322 init_err_out: 323 write_lock_irqsave(&ni->size_lock, flags); 324 ni->initialized_size = old_init_size; 325 write_unlock_irqrestore(&ni->size_lock, flags); 326 err_out: 327 if (ctx) 328 ntfs_attr_put_search_ctx(ctx); 329 if (m) 330 unmap_mft_record(base_ni); 331 ntfs_debug("Failed. Returning error code %i.", err); 332 return err; 333 } 334 335 /** 336 * ntfs_fault_in_pages_readable - 337 * 338 * Fault a number of userspace pages into pagetables. 339 * 340 * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes 341 * with more than two userspace pages as well as handling the single page case 342 * elegantly. 343 * 344 * If you find this difficult to understand, then think of the while loop being 345 * the following code, except that we do without the integer variable ret: 346 * 347 * do { 348 * ret = __get_user(c, uaddr); 349 * uaddr += PAGE_SIZE; 350 * } while (!ret && uaddr < end); 351 * 352 * Note, the final __get_user() may well run out-of-bounds of the user buffer, 353 * but _not_ out-of-bounds of the page the user buffer belongs to, and since 354 * this is only a read and not a write, and since it is still in the same page, 355 * it should not matter and this makes the code much simpler. 356 */ 357 static inline void ntfs_fault_in_pages_readable(const char __user *uaddr, 358 int bytes) 359 { 360 const char __user *end; 361 volatile char c; 362 363 /* Set @end to the first byte outside the last page we care about. */ 364 end = (const char __user*)PAGE_ALIGN((ptrdiff_t __user)uaddr + bytes); 365 366 while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end)) 367 ; 368 } 369 370 /** 371 * ntfs_fault_in_pages_readable_iovec - 372 * 373 * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs. 374 */ 375 static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov, 376 size_t iov_ofs, int bytes) 377 { 378 do { 379 const char __user *buf; 380 unsigned len; 381 382 buf = iov->iov_base + iov_ofs; 383 len = iov->iov_len - iov_ofs; 384 if (len > bytes) 385 len = bytes; 386 ntfs_fault_in_pages_readable(buf, len); 387 bytes -= len; 388 iov++; 389 iov_ofs = 0; 390 } while (bytes); 391 } 392 393 /** 394 * __ntfs_grab_cache_pages - obtain a number of locked pages 395 * @mapping: address space mapping from which to obtain page cache pages 396 * @index: starting index in @mapping at which to begin obtaining pages 397 * @nr_pages: number of page cache pages to obtain 398 * @pages: array of pages in which to return the obtained page cache pages 399 * @cached_page: allocated but as yet unused page 400 * @lru_pvec: lru-buffering pagevec of caller 401 * 402 * Obtain @nr_pages locked page cache pages from the mapping @maping and 403 * starting at index @index. 404 * 405 * If a page is newly created, increment its refcount and add it to the 406 * caller's lru-buffering pagevec @lru_pvec. 407 * 408 * This is the same as mm/filemap.c::__grab_cache_page(), except that @nr_pages 409 * are obtained at once instead of just one page and that 0 is returned on 410 * success and -errno on error. 411 * 412 * Note, the page locks are obtained in ascending page index order. 413 */ 414 static inline int __ntfs_grab_cache_pages(struct address_space *mapping, 415 pgoff_t index, const unsigned nr_pages, struct page **pages, 416 struct page **cached_page, struct pagevec *lru_pvec) 417 { 418 int err, nr; 419 420 BUG_ON(!nr_pages); 421 err = nr = 0; 422 do { 423 pages[nr] = find_lock_page(mapping, index); 424 if (!pages[nr]) { 425 if (!*cached_page) { 426 *cached_page = page_cache_alloc(mapping); 427 if (unlikely(!*cached_page)) { 428 err = -ENOMEM; 429 goto err_out; 430 } 431 } 432 err = add_to_page_cache(*cached_page, mapping, index, 433 GFP_KERNEL); 434 if (unlikely(err)) { 435 if (err == -EEXIST) 436 continue; 437 goto err_out; 438 } 439 pages[nr] = *cached_page; 440 page_cache_get(*cached_page); 441 if (unlikely(!pagevec_add(lru_pvec, *cached_page))) 442 __pagevec_lru_add(lru_pvec); 443 *cached_page = NULL; 444 } 445 index++; 446 nr++; 447 } while (nr < nr_pages); 448 out: 449 return err; 450 err_out: 451 while (nr > 0) { 452 unlock_page(pages[--nr]); 453 page_cache_release(pages[nr]); 454 } 455 goto out; 456 } 457 458 static inline int ntfs_submit_bh_for_read(struct buffer_head *bh) 459 { 460 lock_buffer(bh); 461 get_bh(bh); 462 bh->b_end_io = end_buffer_read_sync; 463 return submit_bh(READ, bh); 464 } 465 466 /** 467 * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data 468 * @pages: array of destination pages 469 * @nr_pages: number of pages in @pages 470 * @pos: byte position in file at which the write begins 471 * @bytes: number of bytes to be written 472 * 473 * This is called for non-resident attributes from ntfs_file_buffered_write() 474 * with i_mutex held on the inode (@pages[0]->mapping->host). There are 475 * @nr_pages pages in @pages which are locked but not kmap()ped. The source 476 * data has not yet been copied into the @pages. 477 * 478 * Need to fill any holes with actual clusters, allocate buffers if necessary, 479 * ensure all the buffers are mapped, and bring uptodate any buffers that are 480 * only partially being written to. 481 * 482 * If @nr_pages is greater than one, we are guaranteed that the cluster size is 483 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside 484 * the same cluster and that they are the entirety of that cluster, and that 485 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. 486 * 487 * i_size is not to be modified yet. 488 * 489 * Return 0 on success or -errno on error. 490 */ 491 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, 492 unsigned nr_pages, s64 pos, size_t bytes) 493 { 494 VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend; 495 LCN lcn; 496 s64 bh_pos, vcn_len, end, initialized_size; 497 sector_t lcn_block; 498 struct page *page; 499 struct inode *vi; 500 ntfs_inode *ni, *base_ni = NULL; 501 ntfs_volume *vol; 502 runlist_element *rl, *rl2; 503 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; 504 ntfs_attr_search_ctx *ctx = NULL; 505 MFT_RECORD *m = NULL; 506 ATTR_RECORD *a = NULL; 507 unsigned long flags; 508 u32 attr_rec_len = 0; 509 unsigned blocksize, u; 510 int err, mp_size; 511 bool rl_write_locked, was_hole, is_retry; 512 unsigned char blocksize_bits; 513 struct { 514 u8 runlist_merged:1; 515 u8 mft_attr_mapped:1; 516 u8 mp_rebuilt:1; 517 u8 attr_switched:1; 518 } status = { 0, 0, 0, 0 }; 519 520 BUG_ON(!nr_pages); 521 BUG_ON(!pages); 522 BUG_ON(!*pages); 523 vi = pages[0]->mapping->host; 524 ni = NTFS_I(vi); 525 vol = ni->vol; 526 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page " 527 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.", 528 vi->i_ino, ni->type, pages[0]->index, nr_pages, 529 (long long)pos, bytes); 530 blocksize = vol->sb->s_blocksize; 531 blocksize_bits = vol->sb->s_blocksize_bits; 532 u = 0; 533 do { 534 struct page *page = pages[u]; 535 /* 536 * create_empty_buffers() will create uptodate/dirty buffers if 537 * the page is uptodate/dirty. 538 */ 539 if (!page_has_buffers(page)) { 540 create_empty_buffers(page, blocksize, 0); 541 if (unlikely(!page_has_buffers(page))) 542 return -ENOMEM; 543 } 544 } while (++u < nr_pages); 545 rl_write_locked = false; 546 rl = NULL; 547 err = 0; 548 vcn = lcn = -1; 549 vcn_len = 0; 550 lcn_block = -1; 551 was_hole = false; 552 cpos = pos >> vol->cluster_size_bits; 553 end = pos + bytes; 554 cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits; 555 /* 556 * Loop over each page and for each page over each buffer. Use goto to 557 * reduce indentation. 558 */ 559 u = 0; 560 do_next_page: 561 page = pages[u]; 562 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 563 bh = head = page_buffers(page); 564 do { 565 VCN cdelta; 566 s64 bh_end; 567 unsigned bh_cofs; 568 569 /* Clear buffer_new on all buffers to reinitialise state. */ 570 if (buffer_new(bh)) 571 clear_buffer_new(bh); 572 bh_end = bh_pos + blocksize; 573 bh_cpos = bh_pos >> vol->cluster_size_bits; 574 bh_cofs = bh_pos & vol->cluster_size_mask; 575 if (buffer_mapped(bh)) { 576 /* 577 * The buffer is already mapped. If it is uptodate, 578 * ignore it. 579 */ 580 if (buffer_uptodate(bh)) 581 continue; 582 /* 583 * The buffer is not uptodate. If the page is uptodate 584 * set the buffer uptodate and otherwise ignore it. 585 */ 586 if (PageUptodate(page)) { 587 set_buffer_uptodate(bh); 588 continue; 589 } 590 /* 591 * Neither the page nor the buffer are uptodate. If 592 * the buffer is only partially being written to, we 593 * need to read it in before the write, i.e. now. 594 */ 595 if ((bh_pos < pos && bh_end > pos) || 596 (bh_pos < end && bh_end > end)) { 597 /* 598 * If the buffer is fully or partially within 599 * the initialized size, do an actual read. 600 * Otherwise, simply zero the buffer. 601 */ 602 read_lock_irqsave(&ni->size_lock, flags); 603 initialized_size = ni->initialized_size; 604 read_unlock_irqrestore(&ni->size_lock, flags); 605 if (bh_pos < initialized_size) { 606 ntfs_submit_bh_for_read(bh); 607 *wait_bh++ = bh; 608 } else { 609 zero_user_page(page, bh_offset(bh), 610 blocksize, KM_USER0); 611 set_buffer_uptodate(bh); 612 } 613 } 614 continue; 615 } 616 /* Unmapped buffer. Need to map it. */ 617 bh->b_bdev = vol->sb->s_bdev; 618 /* 619 * If the current buffer is in the same clusters as the map 620 * cache, there is no need to check the runlist again. The 621 * map cache is made up of @vcn, which is the first cached file 622 * cluster, @vcn_len which is the number of cached file 623 * clusters, @lcn is the device cluster corresponding to @vcn, 624 * and @lcn_block is the block number corresponding to @lcn. 625 */ 626 cdelta = bh_cpos - vcn; 627 if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) { 628 map_buffer_cached: 629 BUG_ON(lcn < 0); 630 bh->b_blocknr = lcn_block + 631 (cdelta << (vol->cluster_size_bits - 632 blocksize_bits)) + 633 (bh_cofs >> blocksize_bits); 634 set_buffer_mapped(bh); 635 /* 636 * If the page is uptodate so is the buffer. If the 637 * buffer is fully outside the write, we ignore it if 638 * it was already allocated and we mark it dirty so it 639 * gets written out if we allocated it. On the other 640 * hand, if we allocated the buffer but we are not 641 * marking it dirty we set buffer_new so we can do 642 * error recovery. 643 */ 644 if (PageUptodate(page)) { 645 if (!buffer_uptodate(bh)) 646 set_buffer_uptodate(bh); 647 if (unlikely(was_hole)) { 648 /* We allocated the buffer. */ 649 unmap_underlying_metadata(bh->b_bdev, 650 bh->b_blocknr); 651 if (bh_end <= pos || bh_pos >= end) 652 mark_buffer_dirty(bh); 653 else 654 set_buffer_new(bh); 655 } 656 continue; 657 } 658 /* Page is _not_ uptodate. */ 659 if (likely(!was_hole)) { 660 /* 661 * Buffer was already allocated. If it is not 662 * uptodate and is only partially being written 663 * to, we need to read it in before the write, 664 * i.e. now. 665 */ 666 if (!buffer_uptodate(bh) && bh_pos < end && 667 bh_end > pos && 668 (bh_pos < pos || 669 bh_end > end)) { 670 /* 671 * If the buffer is fully or partially 672 * within the initialized size, do an 673 * actual read. Otherwise, simply zero 674 * the buffer. 675 */ 676 read_lock_irqsave(&ni->size_lock, 677 flags); 678 initialized_size = ni->initialized_size; 679 read_unlock_irqrestore(&ni->size_lock, 680 flags); 681 if (bh_pos < initialized_size) { 682 ntfs_submit_bh_for_read(bh); 683 *wait_bh++ = bh; 684 } else { 685 zero_user_page(page, 686 bh_offset(bh), 687 blocksize, KM_USER0); 688 set_buffer_uptodate(bh); 689 } 690 } 691 continue; 692 } 693 /* We allocated the buffer. */ 694 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); 695 /* 696 * If the buffer is fully outside the write, zero it, 697 * set it uptodate, and mark it dirty so it gets 698 * written out. If it is partially being written to, 699 * zero region surrounding the write but leave it to 700 * commit write to do anything else. Finally, if the 701 * buffer is fully being overwritten, do nothing. 702 */ 703 if (bh_end <= pos || bh_pos >= end) { 704 if (!buffer_uptodate(bh)) { 705 zero_user_page(page, bh_offset(bh), 706 blocksize, KM_USER0); 707 set_buffer_uptodate(bh); 708 } 709 mark_buffer_dirty(bh); 710 continue; 711 } 712 set_buffer_new(bh); 713 if (!buffer_uptodate(bh) && 714 (bh_pos < pos || bh_end > end)) { 715 u8 *kaddr; 716 unsigned pofs; 717 718 kaddr = kmap_atomic(page, KM_USER0); 719 if (bh_pos < pos) { 720 pofs = bh_pos & ~PAGE_CACHE_MASK; 721 memset(kaddr + pofs, 0, pos - bh_pos); 722 } 723 if (bh_end > end) { 724 pofs = end & ~PAGE_CACHE_MASK; 725 memset(kaddr + pofs, 0, bh_end - end); 726 } 727 kunmap_atomic(kaddr, KM_USER0); 728 flush_dcache_page(page); 729 } 730 continue; 731 } 732 /* 733 * Slow path: this is the first buffer in the cluster. If it 734 * is outside allocated size and is not uptodate, zero it and 735 * set it uptodate. 736 */ 737 read_lock_irqsave(&ni->size_lock, flags); 738 initialized_size = ni->allocated_size; 739 read_unlock_irqrestore(&ni->size_lock, flags); 740 if (bh_pos > initialized_size) { 741 if (PageUptodate(page)) { 742 if (!buffer_uptodate(bh)) 743 set_buffer_uptodate(bh); 744 } else if (!buffer_uptodate(bh)) { 745 zero_user_page(page, bh_offset(bh), blocksize, 746 KM_USER0); 747 set_buffer_uptodate(bh); 748 } 749 continue; 750 } 751 is_retry = false; 752 if (!rl) { 753 down_read(&ni->runlist.lock); 754 retry_remap: 755 rl = ni->runlist.rl; 756 } 757 if (likely(rl != NULL)) { 758 /* Seek to element containing target cluster. */ 759 while (rl->length && rl[1].vcn <= bh_cpos) 760 rl++; 761 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos); 762 if (likely(lcn >= 0)) { 763 /* 764 * Successful remap, setup the map cache and 765 * use that to deal with the buffer. 766 */ 767 was_hole = false; 768 vcn = bh_cpos; 769 vcn_len = rl[1].vcn - vcn; 770 lcn_block = lcn << (vol->cluster_size_bits - 771 blocksize_bits); 772 cdelta = 0; 773 /* 774 * If the number of remaining clusters touched 775 * by the write is smaller or equal to the 776 * number of cached clusters, unlock the 777 * runlist as the map cache will be used from 778 * now on. 779 */ 780 if (likely(vcn + vcn_len >= cend)) { 781 if (rl_write_locked) { 782 up_write(&ni->runlist.lock); 783 rl_write_locked = false; 784 } else 785 up_read(&ni->runlist.lock); 786 rl = NULL; 787 } 788 goto map_buffer_cached; 789 } 790 } else 791 lcn = LCN_RL_NOT_MAPPED; 792 /* 793 * If it is not a hole and not out of bounds, the runlist is 794 * probably unmapped so try to map it now. 795 */ 796 if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) { 797 if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) { 798 /* Attempt to map runlist. */ 799 if (!rl_write_locked) { 800 /* 801 * We need the runlist locked for 802 * writing, so if it is locked for 803 * reading relock it now and retry in 804 * case it changed whilst we dropped 805 * the lock. 806 */ 807 up_read(&ni->runlist.lock); 808 down_write(&ni->runlist.lock); 809 rl_write_locked = true; 810 goto retry_remap; 811 } 812 err = ntfs_map_runlist_nolock(ni, bh_cpos, 813 NULL); 814 if (likely(!err)) { 815 is_retry = true; 816 goto retry_remap; 817 } 818 /* 819 * If @vcn is out of bounds, pretend @lcn is 820 * LCN_ENOENT. As long as the buffer is out 821 * of bounds this will work fine. 822 */ 823 if (err == -ENOENT) { 824 lcn = LCN_ENOENT; 825 err = 0; 826 goto rl_not_mapped_enoent; 827 } 828 } else 829 err = -EIO; 830 /* Failed to map the buffer, even after retrying. */ 831 bh->b_blocknr = -1; 832 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " 833 "attribute type 0x%x, vcn 0x%llx, " 834 "vcn offset 0x%x, because its " 835 "location on disk could not be " 836 "determined%s (error code %i).", 837 ni->mft_no, ni->type, 838 (unsigned long long)bh_cpos, 839 (unsigned)bh_pos & 840 vol->cluster_size_mask, 841 is_retry ? " even after retrying" : "", 842 err); 843 break; 844 } 845 rl_not_mapped_enoent: 846 /* 847 * The buffer is in a hole or out of bounds. We need to fill 848 * the hole, unless the buffer is in a cluster which is not 849 * touched by the write, in which case we just leave the buffer 850 * unmapped. This can only happen when the cluster size is 851 * less than the page cache size. 852 */ 853 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) { 854 bh_cend = (bh_end + vol->cluster_size - 1) >> 855 vol->cluster_size_bits; 856 if ((bh_cend <= cpos || bh_cpos >= cend)) { 857 bh->b_blocknr = -1; 858 /* 859 * If the buffer is uptodate we skip it. If it 860 * is not but the page is uptodate, we can set 861 * the buffer uptodate. If the page is not 862 * uptodate, we can clear the buffer and set it 863 * uptodate. Whether this is worthwhile is 864 * debatable and this could be removed. 865 */ 866 if (PageUptodate(page)) { 867 if (!buffer_uptodate(bh)) 868 set_buffer_uptodate(bh); 869 } else if (!buffer_uptodate(bh)) { 870 zero_user_page(page, bh_offset(bh), 871 blocksize, KM_USER0); 872 set_buffer_uptodate(bh); 873 } 874 continue; 875 } 876 } 877 /* 878 * Out of bounds buffer is invalid if it was not really out of 879 * bounds. 880 */ 881 BUG_ON(lcn != LCN_HOLE); 882 /* 883 * We need the runlist locked for writing, so if it is locked 884 * for reading relock it now and retry in case it changed 885 * whilst we dropped the lock. 886 */ 887 BUG_ON(!rl); 888 if (!rl_write_locked) { 889 up_read(&ni->runlist.lock); 890 down_write(&ni->runlist.lock); 891 rl_write_locked = true; 892 goto retry_remap; 893 } 894 /* Find the previous last allocated cluster. */ 895 BUG_ON(rl->lcn != LCN_HOLE); 896 lcn = -1; 897 rl2 = rl; 898 while (--rl2 >= ni->runlist.rl) { 899 if (rl2->lcn >= 0) { 900 lcn = rl2->lcn + rl2->length; 901 break; 902 } 903 } 904 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE, 905 false); 906 if (IS_ERR(rl2)) { 907 err = PTR_ERR(rl2); 908 ntfs_debug("Failed to allocate cluster, error code %i.", 909 err); 910 break; 911 } 912 lcn = rl2->lcn; 913 rl = ntfs_runlists_merge(ni->runlist.rl, rl2); 914 if (IS_ERR(rl)) { 915 err = PTR_ERR(rl); 916 if (err != -ENOMEM) 917 err = -EIO; 918 if (ntfs_cluster_free_from_rl(vol, rl2)) { 919 ntfs_error(vol->sb, "Failed to release " 920 "allocated cluster in error " 921 "code path. Run chkdsk to " 922 "recover the lost cluster."); 923 NVolSetErrors(vol); 924 } 925 ntfs_free(rl2); 926 break; 927 } 928 ni->runlist.rl = rl; 929 status.runlist_merged = 1; 930 ntfs_debug("Allocated cluster, lcn 0x%llx.", 931 (unsigned long long)lcn); 932 /* Map and lock the mft record and get the attribute record. */ 933 if (!NInoAttr(ni)) 934 base_ni = ni; 935 else 936 base_ni = ni->ext.base_ntfs_ino; 937 m = map_mft_record(base_ni); 938 if (IS_ERR(m)) { 939 err = PTR_ERR(m); 940 break; 941 } 942 ctx = ntfs_attr_get_search_ctx(base_ni, m); 943 if (unlikely(!ctx)) { 944 err = -ENOMEM; 945 unmap_mft_record(base_ni); 946 break; 947 } 948 status.mft_attr_mapped = 1; 949 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 950 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx); 951 if (unlikely(err)) { 952 if (err == -ENOENT) 953 err = -EIO; 954 break; 955 } 956 m = ctx->mrec; 957 a = ctx->attr; 958 /* 959 * Find the runlist element with which the attribute extent 960 * starts. Note, we cannot use the _attr_ version because we 961 * have mapped the mft record. That is ok because we know the 962 * runlist fragment must be mapped already to have ever gotten 963 * here, so we can just use the _rl_ version. 964 */ 965 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn); 966 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn); 967 BUG_ON(!rl2); 968 BUG_ON(!rl2->length); 969 BUG_ON(rl2->lcn < LCN_HOLE); 970 highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn); 971 /* 972 * If @highest_vcn is zero, calculate the real highest_vcn 973 * (which can really be zero). 974 */ 975 if (!highest_vcn) 976 highest_vcn = (sle64_to_cpu( 977 a->data.non_resident.allocated_size) >> 978 vol->cluster_size_bits) - 1; 979 /* 980 * Determine the size of the mapping pairs array for the new 981 * extent, i.e. the old extent with the hole filled. 982 */ 983 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn, 984 highest_vcn); 985 if (unlikely(mp_size <= 0)) { 986 if (!(err = mp_size)) 987 err = -EIO; 988 ntfs_debug("Failed to get size for mapping pairs " 989 "array, error code %i.", err); 990 break; 991 } 992 /* 993 * Resize the attribute record to fit the new mapping pairs 994 * array. 995 */ 996 attr_rec_len = le32_to_cpu(a->length); 997 err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu( 998 a->data.non_resident.mapping_pairs_offset)); 999 if (unlikely(err)) { 1000 BUG_ON(err != -ENOSPC); 1001 // TODO: Deal with this by using the current attribute 1002 // and fill it with as much of the mapping pairs 1003 // array as possible. Then loop over each attribute 1004 // extent rewriting the mapping pairs arrays as we go 1005 // along and if when we reach the end we have not 1006 // enough space, try to resize the last attribute 1007 // extent and if even that fails, add a new attribute 1008 // extent. 1009 // We could also try to resize at each step in the hope 1010 // that we will not need to rewrite every single extent. 1011 // Note, we may need to decompress some extents to fill 1012 // the runlist as we are walking the extents... 1013 ntfs_error(vol->sb, "Not enough space in the mft " 1014 "record for the extended attribute " 1015 "record. This case is not " 1016 "implemented yet."); 1017 err = -EOPNOTSUPP; 1018 break ; 1019 } 1020 status.mp_rebuilt = 1; 1021 /* 1022 * Generate the mapping pairs array directly into the attribute 1023 * record. 1024 */ 1025 err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu( 1026 a->data.non_resident.mapping_pairs_offset), 1027 mp_size, rl2, vcn, highest_vcn, NULL); 1028 if (unlikely(err)) { 1029 ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, " 1030 "attribute type 0x%x, because building " 1031 "the mapping pairs failed with error " 1032 "code %i.", vi->i_ino, 1033 (unsigned)le32_to_cpu(ni->type), err); 1034 err = -EIO; 1035 break; 1036 } 1037 /* Update the highest_vcn but only if it was not set. */ 1038 if (unlikely(!a->data.non_resident.highest_vcn)) 1039 a->data.non_resident.highest_vcn = 1040 cpu_to_sle64(highest_vcn); 1041 /* 1042 * If the attribute is sparse/compressed, update the compressed 1043 * size in the ntfs_inode structure and the attribute record. 1044 */ 1045 if (likely(NInoSparse(ni) || NInoCompressed(ni))) { 1046 /* 1047 * If we are not in the first attribute extent, switch 1048 * to it, but first ensure the changes will make it to 1049 * disk later. 1050 */ 1051 if (a->data.non_resident.lowest_vcn) { 1052 flush_dcache_mft_record_page(ctx->ntfs_ino); 1053 mark_mft_record_dirty(ctx->ntfs_ino); 1054 ntfs_attr_reinit_search_ctx(ctx); 1055 err = ntfs_attr_lookup(ni->type, ni->name, 1056 ni->name_len, CASE_SENSITIVE, 1057 0, NULL, 0, ctx); 1058 if (unlikely(err)) { 1059 status.attr_switched = 1; 1060 break; 1061 } 1062 /* @m is not used any more so do not set it. */ 1063 a = ctx->attr; 1064 } 1065 write_lock_irqsave(&ni->size_lock, flags); 1066 ni->itype.compressed.size += vol->cluster_size; 1067 a->data.non_resident.compressed_size = 1068 cpu_to_sle64(ni->itype.compressed.size); 1069 write_unlock_irqrestore(&ni->size_lock, flags); 1070 } 1071 /* Ensure the changes make it to disk. */ 1072 flush_dcache_mft_record_page(ctx->ntfs_ino); 1073 mark_mft_record_dirty(ctx->ntfs_ino); 1074 ntfs_attr_put_search_ctx(ctx); 1075 unmap_mft_record(base_ni); 1076 /* Successfully filled the hole. */ 1077 status.runlist_merged = 0; 1078 status.mft_attr_mapped = 0; 1079 status.mp_rebuilt = 0; 1080 /* Setup the map cache and use that to deal with the buffer. */ 1081 was_hole = true; 1082 vcn = bh_cpos; 1083 vcn_len = 1; 1084 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits); 1085 cdelta = 0; 1086 /* 1087 * If the number of remaining clusters in the @pages is smaller 1088 * or equal to the number of cached clusters, unlock the 1089 * runlist as the map cache will be used from now on. 1090 */ 1091 if (likely(vcn + vcn_len >= cend)) { 1092 up_write(&ni->runlist.lock); 1093 rl_write_locked = false; 1094 rl = NULL; 1095 } 1096 goto map_buffer_cached; 1097 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head); 1098 /* If there are no errors, do the next page. */ 1099 if (likely(!err && ++u < nr_pages)) 1100 goto do_next_page; 1101 /* If there are no errors, release the runlist lock if we took it. */ 1102 if (likely(!err)) { 1103 if (unlikely(rl_write_locked)) { 1104 up_write(&ni->runlist.lock); 1105 rl_write_locked = false; 1106 } else if (unlikely(rl)) 1107 up_read(&ni->runlist.lock); 1108 rl = NULL; 1109 } 1110 /* If we issued read requests, let them complete. */ 1111 read_lock_irqsave(&ni->size_lock, flags); 1112 initialized_size = ni->initialized_size; 1113 read_unlock_irqrestore(&ni->size_lock, flags); 1114 while (wait_bh > wait) { 1115 bh = *--wait_bh; 1116 wait_on_buffer(bh); 1117 if (likely(buffer_uptodate(bh))) { 1118 page = bh->b_page; 1119 bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) + 1120 bh_offset(bh); 1121 /* 1122 * If the buffer overflows the initialized size, need 1123 * to zero the overflowing region. 1124 */ 1125 if (unlikely(bh_pos + blocksize > initialized_size)) { 1126 int ofs = 0; 1127 1128 if (likely(bh_pos < initialized_size)) 1129 ofs = initialized_size - bh_pos; 1130 zero_user_page(page, bh_offset(bh) + ofs, 1131 blocksize - ofs, KM_USER0); 1132 } 1133 } else /* if (unlikely(!buffer_uptodate(bh))) */ 1134 err = -EIO; 1135 } 1136 if (likely(!err)) { 1137 /* Clear buffer_new on all buffers. */ 1138 u = 0; 1139 do { 1140 bh = head = page_buffers(pages[u]); 1141 do { 1142 if (buffer_new(bh)) 1143 clear_buffer_new(bh); 1144 } while ((bh = bh->b_this_page) != head); 1145 } while (++u < nr_pages); 1146 ntfs_debug("Done."); 1147 return err; 1148 } 1149 if (status.attr_switched) { 1150 /* Get back to the attribute extent we modified. */ 1151 ntfs_attr_reinit_search_ctx(ctx); 1152 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 1153 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) { 1154 ntfs_error(vol->sb, "Failed to find required " 1155 "attribute extent of attribute in " 1156 "error code path. Run chkdsk to " 1157 "recover."); 1158 write_lock_irqsave(&ni->size_lock, flags); 1159 ni->itype.compressed.size += vol->cluster_size; 1160 write_unlock_irqrestore(&ni->size_lock, flags); 1161 flush_dcache_mft_record_page(ctx->ntfs_ino); 1162 mark_mft_record_dirty(ctx->ntfs_ino); 1163 /* 1164 * The only thing that is now wrong is the compressed 1165 * size of the base attribute extent which chkdsk 1166 * should be able to fix. 1167 */ 1168 NVolSetErrors(vol); 1169 } else { 1170 m = ctx->mrec; 1171 a = ctx->attr; 1172 status.attr_switched = 0; 1173 } 1174 } 1175 /* 1176 * If the runlist has been modified, need to restore it by punching a 1177 * hole into it and we then need to deallocate the on-disk cluster as 1178 * well. Note, we only modify the runlist if we are able to generate a 1179 * new mapping pairs array, i.e. only when the mapped attribute extent 1180 * is not switched. 1181 */ 1182 if (status.runlist_merged && !status.attr_switched) { 1183 BUG_ON(!rl_write_locked); 1184 /* Make the file cluster we allocated sparse in the runlist. */ 1185 if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) { 1186 ntfs_error(vol->sb, "Failed to punch hole into " 1187 "attribute runlist in error code " 1188 "path. Run chkdsk to recover the " 1189 "lost cluster."); 1190 NVolSetErrors(vol); 1191 } else /* if (success) */ { 1192 status.runlist_merged = 0; 1193 /* 1194 * Deallocate the on-disk cluster we allocated but only 1195 * if we succeeded in punching its vcn out of the 1196 * runlist. 1197 */ 1198 down_write(&vol->lcnbmp_lock); 1199 if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) { 1200 ntfs_error(vol->sb, "Failed to release " 1201 "allocated cluster in error " 1202 "code path. Run chkdsk to " 1203 "recover the lost cluster."); 1204 NVolSetErrors(vol); 1205 } 1206 up_write(&vol->lcnbmp_lock); 1207 } 1208 } 1209 /* 1210 * Resize the attribute record to its old size and rebuild the mapping 1211 * pairs array. Note, we only can do this if the runlist has been 1212 * restored to its old state which also implies that the mapped 1213 * attribute extent is not switched. 1214 */ 1215 if (status.mp_rebuilt && !status.runlist_merged) { 1216 if (ntfs_attr_record_resize(m, a, attr_rec_len)) { 1217 ntfs_error(vol->sb, "Failed to restore attribute " 1218 "record in error code path. Run " 1219 "chkdsk to recover."); 1220 NVolSetErrors(vol); 1221 } else /* if (success) */ { 1222 if (ntfs_mapping_pairs_build(vol, (u8*)a + 1223 le16_to_cpu(a->data.non_resident. 1224 mapping_pairs_offset), attr_rec_len - 1225 le16_to_cpu(a->data.non_resident. 1226 mapping_pairs_offset), ni->runlist.rl, 1227 vcn, highest_vcn, NULL)) { 1228 ntfs_error(vol->sb, "Failed to restore " 1229 "mapping pairs array in error " 1230 "code path. Run chkdsk to " 1231 "recover."); 1232 NVolSetErrors(vol); 1233 } 1234 flush_dcache_mft_record_page(ctx->ntfs_ino); 1235 mark_mft_record_dirty(ctx->ntfs_ino); 1236 } 1237 } 1238 /* Release the mft record and the attribute. */ 1239 if (status.mft_attr_mapped) { 1240 ntfs_attr_put_search_ctx(ctx); 1241 unmap_mft_record(base_ni); 1242 } 1243 /* Release the runlist lock. */ 1244 if (rl_write_locked) 1245 up_write(&ni->runlist.lock); 1246 else if (rl) 1247 up_read(&ni->runlist.lock); 1248 /* 1249 * Zero out any newly allocated blocks to avoid exposing stale data. 1250 * If BH_New is set, we know that the block was newly allocated above 1251 * and that it has not been fully zeroed and marked dirty yet. 1252 */ 1253 nr_pages = u; 1254 u = 0; 1255 end = bh_cpos << vol->cluster_size_bits; 1256 do { 1257 page = pages[u]; 1258 bh = head = page_buffers(page); 1259 do { 1260 if (u == nr_pages && 1261 ((s64)page->index << PAGE_CACHE_SHIFT) + 1262 bh_offset(bh) >= end) 1263 break; 1264 if (!buffer_new(bh)) 1265 continue; 1266 clear_buffer_new(bh); 1267 if (!buffer_uptodate(bh)) { 1268 if (PageUptodate(page)) 1269 set_buffer_uptodate(bh); 1270 else { 1271 zero_user_page(page, bh_offset(bh), 1272 blocksize, KM_USER0); 1273 set_buffer_uptodate(bh); 1274 } 1275 } 1276 mark_buffer_dirty(bh); 1277 } while ((bh = bh->b_this_page) != head); 1278 } while (++u <= nr_pages); 1279 ntfs_error(vol->sb, "Failed. Returning error code %i.", err); 1280 return err; 1281 } 1282 1283 /* 1284 * Copy as much as we can into the pages and return the number of bytes which 1285 * were sucessfully copied. If a fault is encountered then clear the pages 1286 * out to (ofs + bytes) and return the number of bytes which were copied. 1287 */ 1288 static inline size_t ntfs_copy_from_user(struct page **pages, 1289 unsigned nr_pages, unsigned ofs, const char __user *buf, 1290 size_t bytes) 1291 { 1292 struct page **last_page = pages + nr_pages; 1293 char *kaddr; 1294 size_t total = 0; 1295 unsigned len; 1296 int left; 1297 1298 do { 1299 len = PAGE_CACHE_SIZE - ofs; 1300 if (len > bytes) 1301 len = bytes; 1302 kaddr = kmap_atomic(*pages, KM_USER0); 1303 left = __copy_from_user_inatomic(kaddr + ofs, buf, len); 1304 kunmap_atomic(kaddr, KM_USER0); 1305 if (unlikely(left)) { 1306 /* Do it the slow way. */ 1307 kaddr = kmap(*pages); 1308 left = __copy_from_user(kaddr + ofs, buf, len); 1309 kunmap(*pages); 1310 if (unlikely(left)) 1311 goto err_out; 1312 } 1313 total += len; 1314 bytes -= len; 1315 if (!bytes) 1316 break; 1317 buf += len; 1318 ofs = 0; 1319 } while (++pages < last_page); 1320 out: 1321 return total; 1322 err_out: 1323 total += len - left; 1324 /* Zero the rest of the target like __copy_from_user(). */ 1325 while (++pages < last_page) { 1326 bytes -= len; 1327 if (!bytes) 1328 break; 1329 len = PAGE_CACHE_SIZE; 1330 if (len > bytes) 1331 len = bytes; 1332 zero_user_page(*pages, 0, len, KM_USER0); 1333 } 1334 goto out; 1335 } 1336 1337 static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr, 1338 const struct iovec *iov, size_t iov_ofs, size_t bytes) 1339 { 1340 size_t total = 0; 1341 1342 while (1) { 1343 const char __user *buf = iov->iov_base + iov_ofs; 1344 unsigned len; 1345 size_t left; 1346 1347 len = iov->iov_len - iov_ofs; 1348 if (len > bytes) 1349 len = bytes; 1350 left = __copy_from_user_inatomic(vaddr, buf, len); 1351 total += len; 1352 bytes -= len; 1353 vaddr += len; 1354 if (unlikely(left)) { 1355 total -= left; 1356 break; 1357 } 1358 if (!bytes) 1359 break; 1360 iov++; 1361 iov_ofs = 0; 1362 } 1363 return total; 1364 } 1365 1366 static inline void ntfs_set_next_iovec(const struct iovec **iovp, 1367 size_t *iov_ofsp, size_t bytes) 1368 { 1369 const struct iovec *iov = *iovp; 1370 size_t iov_ofs = *iov_ofsp; 1371 1372 while (bytes) { 1373 unsigned len; 1374 1375 len = iov->iov_len - iov_ofs; 1376 if (len > bytes) 1377 len = bytes; 1378 bytes -= len; 1379 iov_ofs += len; 1380 if (iov->iov_len == iov_ofs) { 1381 iov++; 1382 iov_ofs = 0; 1383 } 1384 } 1385 *iovp = iov; 1386 *iov_ofsp = iov_ofs; 1387 } 1388 1389 /* 1390 * This has the same side-effects and return value as ntfs_copy_from_user(). 1391 * The difference is that on a fault we need to memset the remainder of the 1392 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s 1393 * single-segment behaviour. 1394 * 1395 * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both 1396 * when atomic and when not atomic. This is ok because 1397 * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic() 1398 * and it is ok to call this when non-atomic. 1399 * Infact, the only difference between __copy_from_user_inatomic() and 1400 * __copy_from_user() is that the latter calls might_sleep() and the former 1401 * should not zero the tail of the buffer on error. And on many 1402 * architectures __copy_from_user_inatomic() is just defined to 1403 * __copy_from_user() so it makes no difference at all on those architectures. 1404 */ 1405 static inline size_t ntfs_copy_from_user_iovec(struct page **pages, 1406 unsigned nr_pages, unsigned ofs, const struct iovec **iov, 1407 size_t *iov_ofs, size_t bytes) 1408 { 1409 struct page **last_page = pages + nr_pages; 1410 char *kaddr; 1411 size_t copied, len, total = 0; 1412 1413 do { 1414 len = PAGE_CACHE_SIZE - ofs; 1415 if (len > bytes) 1416 len = bytes; 1417 kaddr = kmap_atomic(*pages, KM_USER0); 1418 copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs, 1419 *iov, *iov_ofs, len); 1420 kunmap_atomic(kaddr, KM_USER0); 1421 if (unlikely(copied != len)) { 1422 /* Do it the slow way. */ 1423 kaddr = kmap(*pages); 1424 copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs, 1425 *iov, *iov_ofs, len); 1426 /* 1427 * Zero the rest of the target like __copy_from_user(). 1428 */ 1429 memset(kaddr + ofs + copied, 0, len - copied); 1430 kunmap(*pages); 1431 if (unlikely(copied != len)) 1432 goto err_out; 1433 } 1434 total += len; 1435 bytes -= len; 1436 if (!bytes) 1437 break; 1438 ntfs_set_next_iovec(iov, iov_ofs, len); 1439 ofs = 0; 1440 } while (++pages < last_page); 1441 out: 1442 return total; 1443 err_out: 1444 total += copied; 1445 /* Zero the rest of the target like __copy_from_user(). */ 1446 while (++pages < last_page) { 1447 bytes -= len; 1448 if (!bytes) 1449 break; 1450 len = PAGE_CACHE_SIZE; 1451 if (len > bytes) 1452 len = bytes; 1453 zero_user_page(*pages, 0, len, KM_USER0); 1454 } 1455 goto out; 1456 } 1457 1458 static inline void ntfs_flush_dcache_pages(struct page **pages, 1459 unsigned nr_pages) 1460 { 1461 BUG_ON(!nr_pages); 1462 /* 1463 * Warning: Do not do the decrement at the same time as the call to 1464 * flush_dcache_page() because it is a NULL macro on i386 and hence the 1465 * decrement never happens so the loop never terminates. 1466 */ 1467 do { 1468 --nr_pages; 1469 flush_dcache_page(pages[nr_pages]); 1470 } while (nr_pages > 0); 1471 } 1472 1473 /** 1474 * ntfs_commit_pages_after_non_resident_write - commit the received data 1475 * @pages: array of destination pages 1476 * @nr_pages: number of pages in @pages 1477 * @pos: byte position in file at which the write begins 1478 * @bytes: number of bytes to be written 1479 * 1480 * See description of ntfs_commit_pages_after_write(), below. 1481 */ 1482 static inline int ntfs_commit_pages_after_non_resident_write( 1483 struct page **pages, const unsigned nr_pages, 1484 s64 pos, size_t bytes) 1485 { 1486 s64 end, initialized_size; 1487 struct inode *vi; 1488 ntfs_inode *ni, *base_ni; 1489 struct buffer_head *bh, *head; 1490 ntfs_attr_search_ctx *ctx; 1491 MFT_RECORD *m; 1492 ATTR_RECORD *a; 1493 unsigned long flags; 1494 unsigned blocksize, u; 1495 int err; 1496 1497 vi = pages[0]->mapping->host; 1498 ni = NTFS_I(vi); 1499 blocksize = vi->i_sb->s_blocksize; 1500 end = pos + bytes; 1501 u = 0; 1502 do { 1503 s64 bh_pos; 1504 struct page *page; 1505 bool partial; 1506 1507 page = pages[u]; 1508 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; 1509 bh = head = page_buffers(page); 1510 partial = false; 1511 do { 1512 s64 bh_end; 1513 1514 bh_end = bh_pos + blocksize; 1515 if (bh_end <= pos || bh_pos >= end) { 1516 if (!buffer_uptodate(bh)) 1517 partial = true; 1518 } else { 1519 set_buffer_uptodate(bh); 1520 mark_buffer_dirty(bh); 1521 } 1522 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head); 1523 /* 1524 * If all buffers are now uptodate but the page is not, set the 1525 * page uptodate. 1526 */ 1527 if (!partial && !PageUptodate(page)) 1528 SetPageUptodate(page); 1529 } while (++u < nr_pages); 1530 /* 1531 * Finally, if we do not need to update initialized_size or i_size we 1532 * are finished. 1533 */ 1534 read_lock_irqsave(&ni->size_lock, flags); 1535 initialized_size = ni->initialized_size; 1536 read_unlock_irqrestore(&ni->size_lock, flags); 1537 if (end <= initialized_size) { 1538 ntfs_debug("Done."); 1539 return 0; 1540 } 1541 /* 1542 * Update initialized_size/i_size as appropriate, both in the inode and 1543 * the mft record. 1544 */ 1545 if (!NInoAttr(ni)) 1546 base_ni = ni; 1547 else 1548 base_ni = ni->ext.base_ntfs_ino; 1549 /* Map, pin, and lock the mft record. */ 1550 m = map_mft_record(base_ni); 1551 if (IS_ERR(m)) { 1552 err = PTR_ERR(m); 1553 m = NULL; 1554 ctx = NULL; 1555 goto err_out; 1556 } 1557 BUG_ON(!NInoNonResident(ni)); 1558 ctx = ntfs_attr_get_search_ctx(base_ni, m); 1559 if (unlikely(!ctx)) { 1560 err = -ENOMEM; 1561 goto err_out; 1562 } 1563 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 1564 CASE_SENSITIVE, 0, NULL, 0, ctx); 1565 if (unlikely(err)) { 1566 if (err == -ENOENT) 1567 err = -EIO; 1568 goto err_out; 1569 } 1570 a = ctx->attr; 1571 BUG_ON(!a->non_resident); 1572 write_lock_irqsave(&ni->size_lock, flags); 1573 BUG_ON(end > ni->allocated_size); 1574 ni->initialized_size = end; 1575 a->data.non_resident.initialized_size = cpu_to_sle64(end); 1576 if (end > i_size_read(vi)) { 1577 i_size_write(vi, end); 1578 a->data.non_resident.data_size = 1579 a->data.non_resident.initialized_size; 1580 } 1581 write_unlock_irqrestore(&ni->size_lock, flags); 1582 /* Mark the mft record dirty, so it gets written back. */ 1583 flush_dcache_mft_record_page(ctx->ntfs_ino); 1584 mark_mft_record_dirty(ctx->ntfs_ino); 1585 ntfs_attr_put_search_ctx(ctx); 1586 unmap_mft_record(base_ni); 1587 ntfs_debug("Done."); 1588 return 0; 1589 err_out: 1590 if (ctx) 1591 ntfs_attr_put_search_ctx(ctx); 1592 if (m) 1593 unmap_mft_record(base_ni); 1594 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error " 1595 "code %i).", err); 1596 if (err != -ENOMEM) 1597 NVolSetErrors(ni->vol); 1598 return err; 1599 } 1600 1601 /** 1602 * ntfs_commit_pages_after_write - commit the received data 1603 * @pages: array of destination pages 1604 * @nr_pages: number of pages in @pages 1605 * @pos: byte position in file at which the write begins 1606 * @bytes: number of bytes to be written 1607 * 1608 * This is called from ntfs_file_buffered_write() with i_mutex held on the inode 1609 * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are 1610 * locked but not kmap()ped. The source data has already been copied into the 1611 * @page. ntfs_prepare_pages_for_non_resident_write() has been called before 1612 * the data was copied (for non-resident attributes only) and it returned 1613 * success. 1614 * 1615 * Need to set uptodate and mark dirty all buffers within the boundary of the 1616 * write. If all buffers in a page are uptodate we set the page uptodate, too. 1617 * 1618 * Setting the buffers dirty ensures that they get written out later when 1619 * ntfs_writepage() is invoked by the VM. 1620 * 1621 * Finally, we need to update i_size and initialized_size as appropriate both 1622 * in the inode and the mft record. 1623 * 1624 * This is modelled after fs/buffer.c::generic_commit_write(), which marks 1625 * buffers uptodate and dirty, sets the page uptodate if all buffers in the 1626 * page are uptodate, and updates i_size if the end of io is beyond i_size. In 1627 * that case, it also marks the inode dirty. 1628 * 1629 * If things have gone as outlined in 1630 * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page 1631 * content modifications here for non-resident attributes. For resident 1632 * attributes we need to do the uptodate bringing here which we combine with 1633 * the copying into the mft record which means we save one atomic kmap. 1634 * 1635 * Return 0 on success or -errno on error. 1636 */ 1637 static int ntfs_commit_pages_after_write(struct page **pages, 1638 const unsigned nr_pages, s64 pos, size_t bytes) 1639 { 1640 s64 end, initialized_size; 1641 loff_t i_size; 1642 struct inode *vi; 1643 ntfs_inode *ni, *base_ni; 1644 struct page *page; 1645 ntfs_attr_search_ctx *ctx; 1646 MFT_RECORD *m; 1647 ATTR_RECORD *a; 1648 char *kattr, *kaddr; 1649 unsigned long flags; 1650 u32 attr_len; 1651 int err; 1652 1653 BUG_ON(!nr_pages); 1654 BUG_ON(!pages); 1655 page = pages[0]; 1656 BUG_ON(!page); 1657 vi = page->mapping->host; 1658 ni = NTFS_I(vi); 1659 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page " 1660 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.", 1661 vi->i_ino, ni->type, page->index, nr_pages, 1662 (long long)pos, bytes); 1663 if (NInoNonResident(ni)) 1664 return ntfs_commit_pages_after_non_resident_write(pages, 1665 nr_pages, pos, bytes); 1666 BUG_ON(nr_pages > 1); 1667 /* 1668 * Attribute is resident, implying it is not compressed, encrypted, or 1669 * sparse. 1670 */ 1671 if (!NInoAttr(ni)) 1672 base_ni = ni; 1673 else 1674 base_ni = ni->ext.base_ntfs_ino; 1675 BUG_ON(NInoNonResident(ni)); 1676 /* Map, pin, and lock the mft record. */ 1677 m = map_mft_record(base_ni); 1678 if (IS_ERR(m)) { 1679 err = PTR_ERR(m); 1680 m = NULL; 1681 ctx = NULL; 1682 goto err_out; 1683 } 1684 ctx = ntfs_attr_get_search_ctx(base_ni, m); 1685 if (unlikely(!ctx)) { 1686 err = -ENOMEM; 1687 goto err_out; 1688 } 1689 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 1690 CASE_SENSITIVE, 0, NULL, 0, ctx); 1691 if (unlikely(err)) { 1692 if (err == -ENOENT) 1693 err = -EIO; 1694 goto err_out; 1695 } 1696 a = ctx->attr; 1697 BUG_ON(a->non_resident); 1698 /* The total length of the attribute value. */ 1699 attr_len = le32_to_cpu(a->data.resident.value_length); 1700 i_size = i_size_read(vi); 1701 BUG_ON(attr_len != i_size); 1702 BUG_ON(pos > attr_len); 1703 end = pos + bytes; 1704 BUG_ON(end > le32_to_cpu(a->length) - 1705 le16_to_cpu(a->data.resident.value_offset)); 1706 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); 1707 kaddr = kmap_atomic(page, KM_USER0); 1708 /* Copy the received data from the page to the mft record. */ 1709 memcpy(kattr + pos, kaddr + pos, bytes); 1710 /* Update the attribute length if necessary. */ 1711 if (end > attr_len) { 1712 attr_len = end; 1713 a->data.resident.value_length = cpu_to_le32(attr_len); 1714 } 1715 /* 1716 * If the page is not uptodate, bring the out of bounds area(s) 1717 * uptodate by copying data from the mft record to the page. 1718 */ 1719 if (!PageUptodate(page)) { 1720 if (pos > 0) 1721 memcpy(kaddr, kattr, pos); 1722 if (end < attr_len) 1723 memcpy(kaddr + end, kattr + end, attr_len - end); 1724 /* Zero the region outside the end of the attribute value. */ 1725 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1726 flush_dcache_page(page); 1727 SetPageUptodate(page); 1728 } 1729 kunmap_atomic(kaddr, KM_USER0); 1730 /* Update initialized_size/i_size if necessary. */ 1731 read_lock_irqsave(&ni->size_lock, flags); 1732 initialized_size = ni->initialized_size; 1733 BUG_ON(end > ni->allocated_size); 1734 read_unlock_irqrestore(&ni->size_lock, flags); 1735 BUG_ON(initialized_size != i_size); 1736 if (end > initialized_size) { 1737 unsigned long flags; 1738 1739 write_lock_irqsave(&ni->size_lock, flags); 1740 ni->initialized_size = end; 1741 i_size_write(vi, end); 1742 write_unlock_irqrestore(&ni->size_lock, flags); 1743 } 1744 /* Mark the mft record dirty, so it gets written back. */ 1745 flush_dcache_mft_record_page(ctx->ntfs_ino); 1746 mark_mft_record_dirty(ctx->ntfs_ino); 1747 ntfs_attr_put_search_ctx(ctx); 1748 unmap_mft_record(base_ni); 1749 ntfs_debug("Done."); 1750 return 0; 1751 err_out: 1752 if (err == -ENOMEM) { 1753 ntfs_warning(vi->i_sb, "Error allocating memory required to " 1754 "commit the write."); 1755 if (PageUptodate(page)) { 1756 ntfs_warning(vi->i_sb, "Page is uptodate, setting " 1757 "dirty so the write will be retried " 1758 "later on by the VM."); 1759 /* 1760 * Put the page on mapping->dirty_pages, but leave its 1761 * buffers' dirty state as-is. 1762 */ 1763 __set_page_dirty_nobuffers(page); 1764 err = 0; 1765 } else 1766 ntfs_error(vi->i_sb, "Page is not uptodate. Written " 1767 "data has been lost."); 1768 } else { 1769 ntfs_error(vi->i_sb, "Resident attribute commit write failed " 1770 "with error %i.", err); 1771 NVolSetErrors(ni->vol); 1772 } 1773 if (ctx) 1774 ntfs_attr_put_search_ctx(ctx); 1775 if (m) 1776 unmap_mft_record(base_ni); 1777 return err; 1778 } 1779 1780 /** 1781 * ntfs_file_buffered_write - 1782 * 1783 * Locking: The vfs is holding ->i_mutex on the inode. 1784 */ 1785 static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, 1786 const struct iovec *iov, unsigned long nr_segs, 1787 loff_t pos, loff_t *ppos, size_t count) 1788 { 1789 struct file *file = iocb->ki_filp; 1790 struct address_space *mapping = file->f_mapping; 1791 struct inode *vi = mapping->host; 1792 ntfs_inode *ni = NTFS_I(vi); 1793 ntfs_volume *vol = ni->vol; 1794 struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; 1795 struct page *cached_page = NULL; 1796 char __user *buf = NULL; 1797 s64 end, ll; 1798 VCN last_vcn; 1799 LCN lcn; 1800 unsigned long flags; 1801 size_t bytes, iov_ofs = 0; /* Offset in the current iovec. */ 1802 ssize_t status, written; 1803 unsigned nr_pages; 1804 int err; 1805 struct pagevec lru_pvec; 1806 1807 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, " 1808 "pos 0x%llx, count 0x%lx.", 1809 vi->i_ino, (unsigned)le32_to_cpu(ni->type), 1810 (unsigned long long)pos, (unsigned long)count); 1811 if (unlikely(!count)) 1812 return 0; 1813 BUG_ON(NInoMstProtected(ni)); 1814 /* 1815 * If the attribute is not an index root and it is encrypted or 1816 * compressed, we cannot write to it yet. Note we need to check for 1817 * AT_INDEX_ALLOCATION since this is the type of both directory and 1818 * index inodes. 1819 */ 1820 if (ni->type != AT_INDEX_ALLOCATION) { 1821 /* If file is encrypted, deny access, just like NT4. */ 1822 if (NInoEncrypted(ni)) { 1823 /* 1824 * Reminder for later: Encrypted files are _always_ 1825 * non-resident so that the content can always be 1826 * encrypted. 1827 */ 1828 ntfs_debug("Denying write access to encrypted file."); 1829 return -EACCES; 1830 } 1831 if (NInoCompressed(ni)) { 1832 /* Only unnamed $DATA attribute can be compressed. */ 1833 BUG_ON(ni->type != AT_DATA); 1834 BUG_ON(ni->name_len); 1835 /* 1836 * Reminder for later: If resident, the data is not 1837 * actually compressed. Only on the switch to non- 1838 * resident does compression kick in. This is in 1839 * contrast to encrypted files (see above). 1840 */ 1841 ntfs_error(vi->i_sb, "Writing to compressed files is " 1842 "not implemented yet. Sorry."); 1843 return -EOPNOTSUPP; 1844 } 1845 } 1846 /* 1847 * If a previous ntfs_truncate() failed, repeat it and abort if it 1848 * fails again. 1849 */ 1850 if (unlikely(NInoTruncateFailed(ni))) { 1851 down_write(&vi->i_alloc_sem); 1852 err = ntfs_truncate(vi); 1853 up_write(&vi->i_alloc_sem); 1854 if (err || NInoTruncateFailed(ni)) { 1855 if (!err) 1856 err = -EIO; 1857 ntfs_error(vol->sb, "Cannot perform write to inode " 1858 "0x%lx, attribute type 0x%x, because " 1859 "ntfs_truncate() failed (error code " 1860 "%i).", vi->i_ino, 1861 (unsigned)le32_to_cpu(ni->type), err); 1862 return err; 1863 } 1864 } 1865 /* The first byte after the write. */ 1866 end = pos + count; 1867 /* 1868 * If the write goes beyond the allocated size, extend the allocation 1869 * to cover the whole of the write, rounded up to the nearest cluster. 1870 */ 1871 read_lock_irqsave(&ni->size_lock, flags); 1872 ll = ni->allocated_size; 1873 read_unlock_irqrestore(&ni->size_lock, flags); 1874 if (end > ll) { 1875 /* Extend the allocation without changing the data size. */ 1876 ll = ntfs_attr_extend_allocation(ni, end, -1, pos); 1877 if (likely(ll >= 0)) { 1878 BUG_ON(pos >= ll); 1879 /* If the extension was partial truncate the write. */ 1880 if (end > ll) { 1881 ntfs_debug("Truncating write to inode 0x%lx, " 1882 "attribute type 0x%x, because " 1883 "the allocation was only " 1884 "partially extended.", 1885 vi->i_ino, (unsigned) 1886 le32_to_cpu(ni->type)); 1887 end = ll; 1888 count = ll - pos; 1889 } 1890 } else { 1891 err = ll; 1892 read_lock_irqsave(&ni->size_lock, flags); 1893 ll = ni->allocated_size; 1894 read_unlock_irqrestore(&ni->size_lock, flags); 1895 /* Perform a partial write if possible or fail. */ 1896 if (pos < ll) { 1897 ntfs_debug("Truncating write to inode 0x%lx, " 1898 "attribute type 0x%x, because " 1899 "extending the allocation " 1900 "failed (error code %i).", 1901 vi->i_ino, (unsigned) 1902 le32_to_cpu(ni->type), err); 1903 end = ll; 1904 count = ll - pos; 1905 } else { 1906 ntfs_error(vol->sb, "Cannot perform write to " 1907 "inode 0x%lx, attribute type " 1908 "0x%x, because extending the " 1909 "allocation failed (error " 1910 "code %i).", vi->i_ino, 1911 (unsigned) 1912 le32_to_cpu(ni->type), err); 1913 return err; 1914 } 1915 } 1916 } 1917 pagevec_init(&lru_pvec, 0); 1918 written = 0; 1919 /* 1920 * If the write starts beyond the initialized size, extend it up to the 1921 * beginning of the write and initialize all non-sparse space between 1922 * the old initialized size and the new one. This automatically also 1923 * increments the vfs inode->i_size to keep it above or equal to the 1924 * initialized_size. 1925 */ 1926 read_lock_irqsave(&ni->size_lock, flags); 1927 ll = ni->initialized_size; 1928 read_unlock_irqrestore(&ni->size_lock, flags); 1929 if (pos > ll) { 1930 err = ntfs_attr_extend_initialized(ni, pos, &cached_page, 1931 &lru_pvec); 1932 if (err < 0) { 1933 ntfs_error(vol->sb, "Cannot perform write to inode " 1934 "0x%lx, attribute type 0x%x, because " 1935 "extending the initialized size " 1936 "failed (error code %i).", vi->i_ino, 1937 (unsigned)le32_to_cpu(ni->type), err); 1938 status = err; 1939 goto err_out; 1940 } 1941 } 1942 /* 1943 * Determine the number of pages per cluster for non-resident 1944 * attributes. 1945 */ 1946 nr_pages = 1; 1947 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) 1948 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; 1949 /* Finally, perform the actual write. */ 1950 last_vcn = -1; 1951 if (likely(nr_segs == 1)) 1952 buf = iov->iov_base; 1953 do { 1954 VCN vcn; 1955 pgoff_t idx, start_idx; 1956 unsigned ofs, do_pages, u; 1957 size_t copied; 1958 1959 start_idx = idx = pos >> PAGE_CACHE_SHIFT; 1960 ofs = pos & ~PAGE_CACHE_MASK; 1961 bytes = PAGE_CACHE_SIZE - ofs; 1962 do_pages = 1; 1963 if (nr_pages > 1) { 1964 vcn = pos >> vol->cluster_size_bits; 1965 if (vcn != last_vcn) { 1966 last_vcn = vcn; 1967 /* 1968 * Get the lcn of the vcn the write is in. If 1969 * it is a hole, need to lock down all pages in 1970 * the cluster. 1971 */ 1972 down_read(&ni->runlist.lock); 1973 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >> 1974 vol->cluster_size_bits, false); 1975 up_read(&ni->runlist.lock); 1976 if (unlikely(lcn < LCN_HOLE)) { 1977 status = -EIO; 1978 if (lcn == LCN_ENOMEM) 1979 status = -ENOMEM; 1980 else 1981 ntfs_error(vol->sb, "Cannot " 1982 "perform write to " 1983 "inode 0x%lx, " 1984 "attribute type 0x%x, " 1985 "because the attribute " 1986 "is corrupt.", 1987 vi->i_ino, (unsigned) 1988 le32_to_cpu(ni->type)); 1989 break; 1990 } 1991 if (lcn == LCN_HOLE) { 1992 start_idx = (pos & ~(s64) 1993 vol->cluster_size_mask) 1994 >> PAGE_CACHE_SHIFT; 1995 bytes = vol->cluster_size - (pos & 1996 vol->cluster_size_mask); 1997 do_pages = nr_pages; 1998 } 1999 } 2000 } 2001 if (bytes > count) 2002 bytes = count; 2003 /* 2004 * Bring in the user page(s) that we will copy from _first_. 2005 * Otherwise there is a nasty deadlock on copying from the same 2006 * page(s) as we are writing to, without it/them being marked 2007 * up-to-date. Note, at present there is nothing to stop the 2008 * pages being swapped out between us bringing them into memory 2009 * and doing the actual copying. 2010 */ 2011 if (likely(nr_segs == 1)) 2012 ntfs_fault_in_pages_readable(buf, bytes); 2013 else 2014 ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes); 2015 /* Get and lock @do_pages starting at index @start_idx. */ 2016 status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages, 2017 pages, &cached_page, &lru_pvec); 2018 if (unlikely(status)) 2019 break; 2020 /* 2021 * For non-resident attributes, we need to fill any holes with 2022 * actual clusters and ensure all bufferes are mapped. We also 2023 * need to bring uptodate any buffers that are only partially 2024 * being written to. 2025 */ 2026 if (NInoNonResident(ni)) { 2027 status = ntfs_prepare_pages_for_non_resident_write( 2028 pages, do_pages, pos, bytes); 2029 if (unlikely(status)) { 2030 loff_t i_size; 2031 2032 do { 2033 unlock_page(pages[--do_pages]); 2034 page_cache_release(pages[do_pages]); 2035 } while (do_pages); 2036 /* 2037 * The write preparation may have instantiated 2038 * allocated space outside i_size. Trim this 2039 * off again. We can ignore any errors in this 2040 * case as we will just be waisting a bit of 2041 * allocated space, which is not a disaster. 2042 */ 2043 i_size = i_size_read(vi); 2044 if (pos + bytes > i_size) 2045 vmtruncate(vi, i_size); 2046 break; 2047 } 2048 } 2049 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; 2050 if (likely(nr_segs == 1)) { 2051 copied = ntfs_copy_from_user(pages + u, do_pages - u, 2052 ofs, buf, bytes); 2053 buf += copied; 2054 } else 2055 copied = ntfs_copy_from_user_iovec(pages + u, 2056 do_pages - u, ofs, &iov, &iov_ofs, 2057 bytes); 2058 ntfs_flush_dcache_pages(pages + u, do_pages - u); 2059 status = ntfs_commit_pages_after_write(pages, do_pages, pos, 2060 bytes); 2061 if (likely(!status)) { 2062 written += copied; 2063 count -= copied; 2064 pos += copied; 2065 if (unlikely(copied != bytes)) 2066 status = -EFAULT; 2067 } 2068 do { 2069 unlock_page(pages[--do_pages]); 2070 mark_page_accessed(pages[do_pages]); 2071 page_cache_release(pages[do_pages]); 2072 } while (do_pages); 2073 if (unlikely(status)) 2074 break; 2075 balance_dirty_pages_ratelimited(mapping); 2076 cond_resched(); 2077 } while (count); 2078 err_out: 2079 *ppos = pos; 2080 if (cached_page) 2081 page_cache_release(cached_page); 2082 /* For now, when the user asks for O_SYNC, we actually give O_DSYNC. */ 2083 if (likely(!status)) { 2084 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(vi))) { 2085 if (!mapping->a_ops->writepage || !is_sync_kiocb(iocb)) 2086 status = generic_osync_inode(vi, mapping, 2087 OSYNC_METADATA|OSYNC_DATA); 2088 } 2089 } 2090 pagevec_lru_add(&lru_pvec); 2091 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", 2092 written ? "written" : "status", (unsigned long)written, 2093 (long)status); 2094 return written ? written : status; 2095 } 2096 2097 /** 2098 * ntfs_file_aio_write_nolock - 2099 */ 2100 static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, 2101 const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) 2102 { 2103 struct file *file = iocb->ki_filp; 2104 struct address_space *mapping = file->f_mapping; 2105 struct inode *inode = mapping->host; 2106 loff_t pos; 2107 size_t count; /* after file limit checks */ 2108 ssize_t written, err; 2109 2110 count = 0; 2111 err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); 2112 if (err) 2113 return err; 2114 pos = *ppos; 2115 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 2116 /* We can write back this queue in page reclaim. */ 2117 current->backing_dev_info = mapping->backing_dev_info; 2118 written = 0; 2119 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 2120 if (err) 2121 goto out; 2122 if (!count) 2123 goto out; 2124 err = remove_suid(file->f_path.dentry); 2125 if (err) 2126 goto out; 2127 file_update_time(file); 2128 written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos, 2129 count); 2130 out: 2131 current->backing_dev_info = NULL; 2132 return written ? written : err; 2133 } 2134 2135 /** 2136 * ntfs_file_aio_write - 2137 */ 2138 static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 2139 unsigned long nr_segs, loff_t pos) 2140 { 2141 struct file *file = iocb->ki_filp; 2142 struct address_space *mapping = file->f_mapping; 2143 struct inode *inode = mapping->host; 2144 ssize_t ret; 2145 2146 BUG_ON(iocb->ki_pos != pos); 2147 2148 mutex_lock(&inode->i_mutex); 2149 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); 2150 mutex_unlock(&inode->i_mutex); 2151 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2152 int err = sync_page_range(inode, mapping, pos, ret); 2153 if (err < 0) 2154 ret = err; 2155 } 2156 return ret; 2157 } 2158 2159 /** 2160 * ntfs_file_writev - 2161 * 2162 * Basically the same as generic_file_writev() except that it ends up calling 2163 * ntfs_file_aio_write_nolock() instead of __generic_file_aio_write_nolock(). 2164 */ 2165 static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov, 2166 unsigned long nr_segs, loff_t *ppos) 2167 { 2168 struct address_space *mapping = file->f_mapping; 2169 struct inode *inode = mapping->host; 2170 struct kiocb kiocb; 2171 ssize_t ret; 2172 2173 mutex_lock(&inode->i_mutex); 2174 init_sync_kiocb(&kiocb, file); 2175 ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); 2176 if (ret == -EIOCBQUEUED) 2177 ret = wait_on_sync_kiocb(&kiocb); 2178 mutex_unlock(&inode->i_mutex); 2179 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { 2180 int err = sync_page_range(inode, mapping, *ppos - ret, ret); 2181 if (err < 0) 2182 ret = err; 2183 } 2184 return ret; 2185 } 2186 2187 /** 2188 * ntfs_file_write - simple wrapper for ntfs_file_writev() 2189 */ 2190 static ssize_t ntfs_file_write(struct file *file, const char __user *buf, 2191 size_t count, loff_t *ppos) 2192 { 2193 struct iovec local_iov = { .iov_base = (void __user *)buf, 2194 .iov_len = count }; 2195 2196 return ntfs_file_writev(file, &local_iov, 1, ppos); 2197 } 2198 2199 /** 2200 * ntfs_file_fsync - sync a file to disk 2201 * @filp: file to be synced 2202 * @dentry: dentry describing the file to sync 2203 * @datasync: if non-zero only flush user data and not metadata 2204 * 2205 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync 2206 * system calls. This function is inspired by fs/buffer.c::file_fsync(). 2207 * 2208 * If @datasync is false, write the mft record and all associated extent mft 2209 * records as well as the $DATA attribute and then sync the block device. 2210 * 2211 * If @datasync is true and the attribute is non-resident, we skip the writing 2212 * of the mft record and all associated extent mft records (this might still 2213 * happen due to the write_inode_now() call). 2214 * 2215 * Also, if @datasync is true, we do not wait on the inode to be written out 2216 * but we always wait on the page cache pages to be written out. 2217 * 2218 * Note: In the past @filp could be NULL so we ignore it as we don't need it 2219 * anyway. 2220 * 2221 * Locking: Caller must hold i_mutex on the inode. 2222 * 2223 * TODO: We should probably also write all attribute/index inodes associated 2224 * with this inode but since we have no simple way of getting to them we ignore 2225 * this problem for now. 2226 */ 2227 static int ntfs_file_fsync(struct file *filp, struct dentry *dentry, 2228 int datasync) 2229 { 2230 struct inode *vi = dentry->d_inode; 2231 int err, ret = 0; 2232 2233 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); 2234 BUG_ON(S_ISDIR(vi->i_mode)); 2235 if (!datasync || !NInoNonResident(NTFS_I(vi))) 2236 ret = ntfs_write_inode(vi, 1); 2237 write_inode_now(vi, !datasync); 2238 /* 2239 * NOTE: If we were to use mapping->private_list (see ext2 and 2240 * fs/buffer.c) for dirty blocks then we could optimize the below to be 2241 * sync_mapping_buffers(vi->i_mapping). 2242 */ 2243 err = sync_blockdev(vi->i_sb->s_bdev); 2244 if (unlikely(err && !ret)) 2245 ret = err; 2246 if (likely(!ret)) 2247 ntfs_debug("Done."); 2248 else 2249 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error " 2250 "%u.", datasync ? "data" : "", vi->i_ino, -ret); 2251 return ret; 2252 } 2253 2254 #endif /* NTFS_RW */ 2255 2256 const struct file_operations ntfs_file_ops = { 2257 .llseek = generic_file_llseek, /* Seek inside file. */ 2258 .read = do_sync_read, /* Read from file. */ 2259 .aio_read = generic_file_aio_read, /* Async read from file. */ 2260 #ifdef NTFS_RW 2261 .write = ntfs_file_write, /* Write to file. */ 2262 .aio_write = ntfs_file_aio_write, /* Async write to file. */ 2263 /*.release = ,*/ /* Last file is closed. See 2264 fs/ext2/file.c:: 2265 ext2_release_file() for 2266 how to use this to discard 2267 preallocated space for 2268 write opened files. */ 2269 .fsync = ntfs_file_fsync, /* Sync a file to disk. */ 2270 /*.aio_fsync = ,*/ /* Sync all outstanding async 2271 i/o operations on a 2272 kiocb. */ 2273 #endif /* NTFS_RW */ 2274 /*.ioctl = ,*/ /* Perform function on the 2275 mounted filesystem. */ 2276 .mmap = generic_file_mmap, /* Mmap file. */ 2277 .open = ntfs_file_open, /* Open file. */ 2278 .sendfile = generic_file_sendfile, /* Zero-copy data send with 2279 the data source being on 2280 the ntfs partition. We do 2281 not need to care about the 2282 data destination. */ 2283 /*.sendpage = ,*/ /* Zero-copy data send with 2284 the data destination being 2285 on the ntfs partition. We 2286 do not need to care about 2287 the data source. */ 2288 }; 2289 2290 const struct inode_operations ntfs_file_inode_ops = { 2291 #ifdef NTFS_RW 2292 .truncate = ntfs_truncate_vfs, 2293 .setattr = ntfs_setattr, 2294 #endif /* NTFS_RW */ 2295 }; 2296 2297 const struct file_operations ntfs_empty_file_ops = {}; 2298 2299 const struct inode_operations ntfs_empty_inode_ops = {}; 2300