1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame? 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/slab.h> 11 #include <linux/kernel.h> 12 13 #include "debug.h" 14 #include "ntfs.h" 15 #include "ntfs_fs.h" 16 17 /* 18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage 19 * preallocate algorithm. 20 */ 21 #ifndef NTFS_MIN_LOG2_OF_CLUMP 22 #define NTFS_MIN_LOG2_OF_CLUMP 16 23 #endif 24 25 #ifndef NTFS_MAX_LOG2_OF_CLUMP 26 #define NTFS_MAX_LOG2_OF_CLUMP 26 27 #endif 28 29 // 16M 30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8)) 31 // 16G 32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8)) 33 34 static inline u64 get_pre_allocated(u64 size) 35 { 36 u32 clump; 37 u8 align_shift; 38 u64 ret; 39 40 if (size <= NTFS_CLUMP_MIN) { 41 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP; 42 align_shift = NTFS_MIN_LOG2_OF_CLUMP; 43 } else if (size >= NTFS_CLUMP_MAX) { 44 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP; 45 align_shift = NTFS_MAX_LOG2_OF_CLUMP; 46 } else { 47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 + 48 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP)); 49 clump = 1u << align_shift; 50 } 51 52 ret = (((size + clump - 1) >> align_shift)) << align_shift; 53 54 return ret; 55 } 56 57 /* 58 * attr_load_runs - Load all runs stored in @attr. 59 */ 60 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni, 61 struct runs_tree *run, const CLST *vcn) 62 { 63 int err; 64 CLST svcn = le64_to_cpu(attr->nres.svcn); 65 CLST evcn = le64_to_cpu(attr->nres.evcn); 66 u32 asize; 67 u16 run_off; 68 69 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn)) 70 return 0; 71 72 if (vcn && (evcn < *vcn || *vcn < svcn)) 73 return -EINVAL; 74 75 asize = le32_to_cpu(attr->size); 76 run_off = le16_to_cpu(attr->nres.run_off); 77 78 if (run_off > asize) 79 return -EINVAL; 80 81 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, 82 vcn ? *vcn : svcn, Add2Ptr(attr, run_off), 83 asize - run_off); 84 if (err < 0) 85 return err; 86 87 return 0; 88 } 89 90 /* 91 * run_deallocate_ex - Deallocate clusters. 92 */ 93 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run, 94 CLST vcn, CLST len, CLST *done, bool trim) 95 { 96 int err = 0; 97 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0; 98 size_t idx; 99 100 if (!len) 101 goto out; 102 103 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { 104 failed: 105 run_truncate(run, vcn0); 106 err = -EINVAL; 107 goto out; 108 } 109 110 for (;;) { 111 if (clen > len) 112 clen = len; 113 114 if (!clen) { 115 err = -EINVAL; 116 goto out; 117 } 118 119 if (lcn != SPARSE_LCN) { 120 if (sbi) { 121 /* mark bitmap range [lcn + clen) as free and trim clusters. */ 122 mark_as_free_ex(sbi, lcn, clen, trim); 123 } 124 dn += clen; 125 } 126 127 len -= clen; 128 if (!len) 129 break; 130 131 vcn_next = vcn + clen; 132 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || 133 vcn != vcn_next) { 134 /* Save memory - don't load entire run. */ 135 goto failed; 136 } 137 } 138 139 out: 140 if (done) 141 *done += dn; 142 143 return err; 144 } 145 146 /* 147 * attr_allocate_clusters - Find free space, mark it as used and store in @run. 148 */ 149 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run, 150 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc, 151 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr, 152 CLST *new_lcn, CLST *new_len) 153 { 154 int err; 155 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0; 156 size_t cnt = run->count; 157 158 for (;;) { 159 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen, 160 opt); 161 162 if (err == -ENOSPC && pre) { 163 pre = 0; 164 if (*pre_alloc) 165 *pre_alloc = 0; 166 continue; 167 } 168 169 if (err) 170 goto out; 171 172 if (vcn == vcn0) { 173 /* Return the first fragment. */ 174 if (new_lcn) 175 *new_lcn = lcn; 176 if (new_len) 177 *new_len = flen; 178 } 179 180 /* Add new fragment into run storage. */ 181 if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) { 182 /* Undo last 'ntfs_look_for_free_space' */ 183 mark_as_free_ex(sbi, lcn, len, false); 184 err = -ENOMEM; 185 goto out; 186 } 187 188 if (opt & ALLOCATE_ZERO) { 189 u8 shift = sbi->cluster_bits - SECTOR_SHIFT; 190 191 err = blkdev_issue_zeroout(sbi->sb->s_bdev, 192 (sector_t)lcn << shift, 193 (sector_t)flen << shift, 194 GFP_NOFS, 0); 195 if (err) 196 goto out; 197 } 198 199 vcn += flen; 200 201 if (flen >= len || (opt & ALLOCATE_MFT) || 202 (fr && run->count - cnt >= fr)) { 203 *alen = vcn - vcn0; 204 return 0; 205 } 206 207 len -= flen; 208 } 209 210 out: 211 /* Undo 'ntfs_look_for_free_space' */ 212 if (vcn - vcn0) { 213 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false); 214 run_truncate(run, vcn0); 215 } 216 217 return err; 218 } 219 220 /* 221 * attr_make_nonresident 222 * 223 * If page is not NULL - it is already contains resident data 224 * and locked (called from ni_write_frame()). 225 */ 226 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr, 227 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi, 228 u64 new_size, struct runs_tree *run, 229 struct ATTRIB **ins_attr, struct page *page) 230 { 231 struct ntfs_sb_info *sbi; 232 struct ATTRIB *attr_s; 233 struct MFT_REC *rec; 234 u32 used, asize, rsize, aoff, align; 235 bool is_data; 236 CLST len, alen; 237 char *next; 238 int err; 239 240 if (attr->non_res) { 241 *ins_attr = attr; 242 return 0; 243 } 244 245 sbi = mi->sbi; 246 rec = mi->mrec; 247 attr_s = NULL; 248 used = le32_to_cpu(rec->used); 249 asize = le32_to_cpu(attr->size); 250 next = Add2Ptr(attr, asize); 251 aoff = PtrOffset(rec, attr); 252 rsize = le32_to_cpu(attr->res.data_size); 253 is_data = attr->type == ATTR_DATA && !attr->name_len; 254 255 align = sbi->cluster_size; 256 if (is_attr_compressed(attr)) 257 align <<= COMPRESSION_UNIT; 258 len = (rsize + align - 1) >> sbi->cluster_bits; 259 260 run_init(run); 261 262 /* Make a copy of original attribute. */ 263 attr_s = kmemdup(attr, asize, GFP_NOFS); 264 if (!attr_s) { 265 err = -ENOMEM; 266 goto out; 267 } 268 269 if (!len) { 270 /* Empty resident -> Empty nonresident. */ 271 alen = 0; 272 } else { 273 const char *data = resident_data(attr); 274 275 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL, 276 ALLOCATE_DEF, &alen, 0, NULL, 277 NULL); 278 if (err) 279 goto out1; 280 281 if (!rsize) { 282 /* Empty resident -> Non empty nonresident. */ 283 } else if (!is_data) { 284 err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0); 285 if (err) 286 goto out2; 287 } else if (!page) { 288 char *kaddr; 289 290 page = grab_cache_page(ni->vfs_inode.i_mapping, 0); 291 if (!page) { 292 err = -ENOMEM; 293 goto out2; 294 } 295 kaddr = kmap_atomic(page); 296 memcpy(kaddr, data, rsize); 297 memset(kaddr + rsize, 0, PAGE_SIZE - rsize); 298 kunmap_atomic(kaddr); 299 flush_dcache_page(page); 300 SetPageUptodate(page); 301 set_page_dirty(page); 302 unlock_page(page); 303 put_page(page); 304 } 305 } 306 307 /* Remove original attribute. */ 308 used -= asize; 309 memmove(attr, Add2Ptr(attr, asize), used - aoff); 310 rec->used = cpu_to_le32(used); 311 mi->dirty = true; 312 if (le) 313 al_remove_le(ni, le); 314 315 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s), 316 attr_s->name_len, run, 0, alen, 317 attr_s->flags, &attr, NULL, NULL); 318 if (err) 319 goto out3; 320 321 kfree(attr_s); 322 attr->nres.data_size = cpu_to_le64(rsize); 323 attr->nres.valid_size = attr->nres.data_size; 324 325 *ins_attr = attr; 326 327 if (is_data) 328 ni->ni_flags &= ~NI_FLAG_RESIDENT; 329 330 /* Resident attribute becomes non resident. */ 331 return 0; 332 333 out3: 334 attr = Add2Ptr(rec, aoff); 335 memmove(next, attr, used - aoff); 336 memcpy(attr, attr_s, asize); 337 rec->used = cpu_to_le32(used + asize); 338 mi->dirty = true; 339 out2: 340 /* Undo: do not trim new allocated clusters. */ 341 run_deallocate(sbi, run, false); 342 run_close(run); 343 out1: 344 kfree(attr_s); 345 out: 346 return err; 347 } 348 349 /* 350 * attr_set_size_res - Helper for attr_set_size(). 351 */ 352 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr, 353 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi, 354 u64 new_size, struct runs_tree *run, 355 struct ATTRIB **ins_attr) 356 { 357 struct ntfs_sb_info *sbi = mi->sbi; 358 struct MFT_REC *rec = mi->mrec; 359 u32 used = le32_to_cpu(rec->used); 360 u32 asize = le32_to_cpu(attr->size); 361 u32 aoff = PtrOffset(rec, attr); 362 u32 rsize = le32_to_cpu(attr->res.data_size); 363 u32 tail = used - aoff - asize; 364 char *next = Add2Ptr(attr, asize); 365 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8); 366 367 if (dsize < 0) { 368 memmove(next + dsize, next, tail); 369 } else if (dsize > 0) { 370 if (used + dsize > sbi->max_bytes_per_attr) 371 return attr_make_nonresident(ni, attr, le, mi, new_size, 372 run, ins_attr, NULL); 373 374 memmove(next + dsize, next, tail); 375 memset(next, 0, dsize); 376 } 377 378 if (new_size > rsize) 379 memset(Add2Ptr(resident_data(attr), rsize), 0, 380 new_size - rsize); 381 382 rec->used = cpu_to_le32(used + dsize); 383 attr->size = cpu_to_le32(asize + dsize); 384 attr->res.data_size = cpu_to_le32(new_size); 385 mi->dirty = true; 386 *ins_attr = attr; 387 388 return 0; 389 } 390 391 /* 392 * attr_set_size - Change the size of attribute. 393 * 394 * Extend: 395 * - Sparse/compressed: No allocated clusters. 396 * - Normal: Append allocated and preallocated new clusters. 397 * Shrink: 398 * - No deallocate if @keep_prealloc is set. 399 */ 400 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, 401 const __le16 *name, u8 name_len, struct runs_tree *run, 402 u64 new_size, const u64 *new_valid, bool keep_prealloc, 403 struct ATTRIB **ret) 404 { 405 int err = 0; 406 struct ntfs_sb_info *sbi = ni->mi.sbi; 407 u8 cluster_bits = sbi->cluster_bits; 408 bool is_mft = 409 ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len; 410 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp; 411 struct ATTRIB *attr = NULL, *attr_b; 412 struct ATTR_LIST_ENTRY *le, *le_b; 413 struct mft_inode *mi, *mi_b; 414 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn; 415 CLST next_svcn, pre_alloc = -1, done = 0; 416 bool is_ext, is_bad = false; 417 bool dirty = false; 418 u32 align; 419 struct MFT_REC *rec; 420 421 again: 422 alen = 0; 423 le_b = NULL; 424 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL, 425 &mi_b); 426 if (!attr_b) { 427 err = -ENOENT; 428 goto bad_inode; 429 } 430 431 if (!attr_b->non_res) { 432 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run, 433 &attr_b); 434 if (err) 435 return err; 436 437 /* Return if file is still resident. */ 438 if (!attr_b->non_res) { 439 dirty = true; 440 goto ok1; 441 } 442 443 /* Layout of records may be changed, so do a full search. */ 444 goto again; 445 } 446 447 is_ext = is_attr_ext(attr_b); 448 align = sbi->cluster_size; 449 if (is_ext) 450 align <<= attr_b->nres.c_unit; 451 452 old_valid = le64_to_cpu(attr_b->nres.valid_size); 453 old_size = le64_to_cpu(attr_b->nres.data_size); 454 old_alloc = le64_to_cpu(attr_b->nres.alloc_size); 455 456 again_1: 457 old_alen = old_alloc >> cluster_bits; 458 459 new_alloc = (new_size + align - 1) & ~(u64)(align - 1); 460 new_alen = new_alloc >> cluster_bits; 461 462 if (keep_prealloc && new_size < old_size) { 463 attr_b->nres.data_size = cpu_to_le64(new_size); 464 mi_b->dirty = dirty = true; 465 goto ok; 466 } 467 468 vcn = old_alen - 1; 469 470 svcn = le64_to_cpu(attr_b->nres.svcn); 471 evcn = le64_to_cpu(attr_b->nres.evcn); 472 473 if (svcn <= vcn && vcn <= evcn) { 474 attr = attr_b; 475 le = le_b; 476 mi = mi_b; 477 } else if (!le_b) { 478 err = -EINVAL; 479 goto bad_inode; 480 } else { 481 le = le_b; 482 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn, 483 &mi); 484 if (!attr) { 485 err = -EINVAL; 486 goto bad_inode; 487 } 488 489 next_le_1: 490 svcn = le64_to_cpu(attr->nres.svcn); 491 evcn = le64_to_cpu(attr->nres.evcn); 492 } 493 /* 494 * Here we have: 495 * attr,mi,le - last attribute segment (containing 'vcn'). 496 * attr_b,mi_b,le_b - base (primary) attribute segment. 497 */ 498 next_le: 499 rec = mi->mrec; 500 err = attr_load_runs(attr, ni, run, NULL); 501 if (err) 502 goto out; 503 504 if (new_size > old_size) { 505 CLST to_allocate; 506 size_t free; 507 508 if (new_alloc <= old_alloc) { 509 attr_b->nres.data_size = cpu_to_le64(new_size); 510 mi_b->dirty = dirty = true; 511 goto ok; 512 } 513 514 /* 515 * Add clusters. In simple case we have to: 516 * - allocate space (vcn, lcn, len) 517 * - update packed run in 'mi' 518 * - update attr->nres.evcn 519 * - update attr_b->nres.data_size/attr_b->nres.alloc_size 520 */ 521 to_allocate = new_alen - old_alen; 522 add_alloc_in_same_attr_seg: 523 lcn = 0; 524 if (is_mft) { 525 /* MFT allocates clusters from MFT zone. */ 526 pre_alloc = 0; 527 } else if (is_ext) { 528 /* No preallocate for sparse/compress. */ 529 pre_alloc = 0; 530 } else if (pre_alloc == -1) { 531 pre_alloc = 0; 532 if (type == ATTR_DATA && !name_len && 533 sbi->options->prealloc) { 534 pre_alloc = 535 bytes_to_cluster( 536 sbi, 537 get_pre_allocated(new_size)) - 538 new_alen; 539 } 540 541 /* Get the last LCN to allocate from. */ 542 if (old_alen && 543 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) { 544 lcn = SPARSE_LCN; 545 } 546 547 if (lcn == SPARSE_LCN) 548 lcn = 0; 549 else if (lcn) 550 lcn += 1; 551 552 free = wnd_zeroes(&sbi->used.bitmap); 553 if (to_allocate > free) { 554 err = -ENOSPC; 555 goto out; 556 } 557 558 if (pre_alloc && to_allocate + pre_alloc > free) 559 pre_alloc = 0; 560 } 561 562 vcn = old_alen; 563 564 if (is_ext) { 565 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate, 566 false)) { 567 err = -ENOMEM; 568 goto out; 569 } 570 alen = to_allocate; 571 } else { 572 /* ~3 bytes per fragment. */ 573 err = attr_allocate_clusters( 574 sbi, run, vcn, lcn, to_allocate, &pre_alloc, 575 is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen, 576 is_mft ? 0 577 : (sbi->record_size - 578 le32_to_cpu(rec->used) + 8) / 579 3 + 580 1, 581 NULL, NULL); 582 if (err) 583 goto out; 584 } 585 586 done += alen; 587 vcn += alen; 588 if (to_allocate > alen) 589 to_allocate -= alen; 590 else 591 to_allocate = 0; 592 593 pack_runs: 594 err = mi_pack_runs(mi, attr, run, vcn - svcn); 595 if (err) 596 goto undo_1; 597 598 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 599 new_alloc_tmp = (u64)next_svcn << cluster_bits; 600 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); 601 mi_b->dirty = dirty = true; 602 603 if (next_svcn >= vcn && !to_allocate) { 604 /* Normal way. Update attribute and exit. */ 605 attr_b->nres.data_size = cpu_to_le64(new_size); 606 goto ok; 607 } 608 609 /* At least two MFT to avoid recursive loop. */ 610 if (is_mft && next_svcn == vcn && 611 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) { 612 new_size = new_alloc_tmp; 613 attr_b->nres.data_size = attr_b->nres.alloc_size; 614 goto ok; 615 } 616 617 if (le32_to_cpu(rec->used) < sbi->record_size) { 618 old_alen = next_svcn; 619 evcn = old_alen - 1; 620 goto add_alloc_in_same_attr_seg; 621 } 622 623 attr_b->nres.data_size = attr_b->nres.alloc_size; 624 if (new_alloc_tmp < old_valid) 625 attr_b->nres.valid_size = attr_b->nres.data_size; 626 627 if (type == ATTR_LIST) { 628 err = ni_expand_list(ni); 629 if (err) 630 goto undo_2; 631 if (next_svcn < vcn) 632 goto pack_runs; 633 634 /* Layout of records is changed. */ 635 goto again; 636 } 637 638 if (!ni->attr_list.size) { 639 err = ni_create_attr_list(ni); 640 /* In case of error layout of records is not changed. */ 641 if (err) 642 goto undo_2; 643 /* Layout of records is changed. */ 644 } 645 646 if (next_svcn >= vcn) { 647 /* This is MFT data, repeat. */ 648 goto again; 649 } 650 651 /* Insert new attribute segment. */ 652 err = ni_insert_nonresident(ni, type, name, name_len, run, 653 next_svcn, vcn - next_svcn, 654 attr_b->flags, &attr, &mi, NULL); 655 656 /* 657 * Layout of records maybe changed. 658 * Find base attribute to update. 659 */ 660 le_b = NULL; 661 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, 662 NULL, &mi_b); 663 if (!attr_b) { 664 err = -EINVAL; 665 goto bad_inode; 666 } 667 668 if (err) { 669 /* ni_insert_nonresident failed. */ 670 attr = NULL; 671 goto undo_2; 672 } 673 674 if (!is_mft) 675 run_truncate_head(run, evcn + 1); 676 677 svcn = le64_to_cpu(attr->nres.svcn); 678 evcn = le64_to_cpu(attr->nres.evcn); 679 680 /* 681 * Attribute is in consistency state. 682 * Save this point to restore to if next steps fail. 683 */ 684 old_valid = old_size = old_alloc = (u64)vcn << cluster_bits; 685 attr_b->nres.valid_size = attr_b->nres.data_size = 686 attr_b->nres.alloc_size = cpu_to_le64(old_size); 687 mi_b->dirty = dirty = true; 688 goto again_1; 689 } 690 691 if (new_size != old_size || 692 (new_alloc != old_alloc && !keep_prealloc)) { 693 /* 694 * Truncate clusters. In simple case we have to: 695 * - update packed run in 'mi' 696 * - update attr->nres.evcn 697 * - update attr_b->nres.data_size/attr_b->nres.alloc_size 698 * - mark and trim clusters as free (vcn, lcn, len) 699 */ 700 CLST dlen = 0; 701 702 vcn = max(svcn, new_alen); 703 new_alloc_tmp = (u64)vcn << cluster_bits; 704 705 if (vcn > svcn) { 706 err = mi_pack_runs(mi, attr, run, vcn - svcn); 707 if (err) 708 goto out; 709 } else if (le && le->vcn) { 710 u16 le_sz = le16_to_cpu(le->size); 711 712 /* 713 * NOTE: List entries for one attribute are always 714 * the same size. We deal with last entry (vcn==0) 715 * and it is not first in entries array 716 * (list entry for std attribute always first). 717 * So it is safe to step back. 718 */ 719 mi_remove_attr(NULL, mi, attr); 720 721 if (!al_remove_le(ni, le)) { 722 err = -EINVAL; 723 goto bad_inode; 724 } 725 726 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); 727 } else { 728 attr->nres.evcn = cpu_to_le64((u64)vcn - 1); 729 mi->dirty = true; 730 } 731 732 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); 733 734 if (vcn == new_alen) { 735 attr_b->nres.data_size = cpu_to_le64(new_size); 736 if (new_size < old_valid) 737 attr_b->nres.valid_size = 738 attr_b->nres.data_size; 739 } else { 740 if (new_alloc_tmp <= 741 le64_to_cpu(attr_b->nres.data_size)) 742 attr_b->nres.data_size = 743 attr_b->nres.alloc_size; 744 if (new_alloc_tmp < 745 le64_to_cpu(attr_b->nres.valid_size)) 746 attr_b->nres.valid_size = 747 attr_b->nres.alloc_size; 748 } 749 mi_b->dirty = dirty = true; 750 751 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen, 752 true); 753 if (err) 754 goto out; 755 756 if (is_ext) { 757 /* dlen - really deallocated clusters. */ 758 le64_sub_cpu(&attr_b->nres.total_size, 759 ((u64)dlen << cluster_bits)); 760 } 761 762 run_truncate(run, vcn); 763 764 if (new_alloc_tmp <= new_alloc) 765 goto ok; 766 767 old_size = new_alloc_tmp; 768 vcn = svcn - 1; 769 770 if (le == le_b) { 771 attr = attr_b; 772 mi = mi_b; 773 evcn = svcn - 1; 774 svcn = 0; 775 goto next_le; 776 } 777 778 if (le->type != type || le->name_len != name_len || 779 memcmp(le_name(le), name, name_len * sizeof(short))) { 780 err = -EINVAL; 781 goto bad_inode; 782 } 783 784 err = ni_load_mi(ni, le, &mi); 785 if (err) 786 goto out; 787 788 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id); 789 if (!attr) { 790 err = -EINVAL; 791 goto bad_inode; 792 } 793 goto next_le_1; 794 } 795 796 ok: 797 if (new_valid) { 798 __le64 valid = cpu_to_le64(min(*new_valid, new_size)); 799 800 if (attr_b->nres.valid_size != valid) { 801 attr_b->nres.valid_size = valid; 802 mi_b->dirty = true; 803 } 804 } 805 806 ok1: 807 if (ret) 808 *ret = attr_b; 809 810 if (((type == ATTR_DATA && !name_len) || 811 (type == ATTR_ALLOC && name == I30_NAME))) { 812 /* Update inode_set_bytes. */ 813 if (attr_b->non_res) { 814 new_alloc = le64_to_cpu(attr_b->nres.alloc_size); 815 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) { 816 inode_set_bytes(&ni->vfs_inode, new_alloc); 817 dirty = true; 818 } 819 } 820 821 /* Don't forget to update duplicate information in parent. */ 822 if (dirty) { 823 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 824 mark_inode_dirty(&ni->vfs_inode); 825 } 826 } 827 828 return 0; 829 830 undo_2: 831 vcn -= alen; 832 attr_b->nres.data_size = cpu_to_le64(old_size); 833 attr_b->nres.valid_size = cpu_to_le64(old_valid); 834 attr_b->nres.alloc_size = cpu_to_le64(old_alloc); 835 836 /* Restore 'attr' and 'mi'. */ 837 if (attr) 838 goto restore_run; 839 840 if (le64_to_cpu(attr_b->nres.svcn) <= svcn && 841 svcn <= le64_to_cpu(attr_b->nres.evcn)) { 842 attr = attr_b; 843 le = le_b; 844 mi = mi_b; 845 } else if (!le_b) { 846 err = -EINVAL; 847 goto bad_inode; 848 } else { 849 le = le_b; 850 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, 851 &svcn, &mi); 852 if (!attr) 853 goto bad_inode; 854 } 855 856 restore_run: 857 if (mi_pack_runs(mi, attr, run, evcn - svcn + 1)) 858 is_bad = true; 859 860 undo_1: 861 run_deallocate_ex(sbi, run, vcn, alen, NULL, false); 862 863 run_truncate(run, vcn); 864 out: 865 if (is_bad) { 866 bad_inode: 867 _ntfs_bad_inode(&ni->vfs_inode); 868 } 869 return err; 870 } 871 872 /* 873 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'. 874 * 875 * @new == NULL means just to get current mapping for 'vcn' 876 * @new != NULL means allocate real cluster if 'vcn' maps to hole 877 * @zero - zeroout new allocated clusters 878 * 879 * NOTE: 880 * - @new != NULL is called only for sparsed or compressed attributes. 881 * - new allocated clusters are zeroed via blkdev_issue_zeroout. 882 */ 883 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, 884 CLST *len, bool *new, bool zero) 885 { 886 int err = 0; 887 struct runs_tree *run = &ni->file.run; 888 struct ntfs_sb_info *sbi; 889 u8 cluster_bits; 890 struct ATTRIB *attr = NULL, *attr_b; 891 struct ATTR_LIST_ENTRY *le, *le_b; 892 struct mft_inode *mi, *mi_b; 893 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen; 894 CLST alloc, evcn; 895 unsigned fr; 896 u64 total_size, total_size0; 897 int step = 0; 898 899 if (new) 900 *new = false; 901 902 /* Try to find in cache. */ 903 down_read(&ni->file.run_lock); 904 if (!run_lookup_entry(run, vcn, lcn, len, NULL)) 905 *len = 0; 906 up_read(&ni->file.run_lock); 907 908 if (*len) { 909 if (*lcn != SPARSE_LCN || !new) 910 return 0; /* Fast normal way without allocation. */ 911 else if (clen > *len) 912 clen = *len; 913 } 914 915 /* No cluster in cache or we need to allocate cluster in hole. */ 916 sbi = ni->mi.sbi; 917 cluster_bits = sbi->cluster_bits; 918 919 ni_lock(ni); 920 down_write(&ni->file.run_lock); 921 922 le_b = NULL; 923 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 924 if (!attr_b) { 925 err = -ENOENT; 926 goto out; 927 } 928 929 if (!attr_b->non_res) { 930 *lcn = RESIDENT_LCN; 931 *len = 1; 932 goto out; 933 } 934 935 asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits; 936 if (vcn >= asize) { 937 if (new) { 938 err = -EINVAL; 939 } else { 940 *len = 1; 941 *lcn = SPARSE_LCN; 942 } 943 goto out; 944 } 945 946 svcn = le64_to_cpu(attr_b->nres.svcn); 947 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 948 949 attr = attr_b; 950 le = le_b; 951 mi = mi_b; 952 953 if (le_b && (vcn < svcn || evcn1 <= vcn)) { 954 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 955 &mi); 956 if (!attr) { 957 err = -EINVAL; 958 goto out; 959 } 960 svcn = le64_to_cpu(attr->nres.svcn); 961 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 962 } 963 964 /* Load in cache actual information. */ 965 err = attr_load_runs(attr, ni, run, NULL); 966 if (err) 967 goto out; 968 969 if (!*len) { 970 if (run_lookup_entry(run, vcn, lcn, len, NULL)) { 971 if (*lcn != SPARSE_LCN || !new) 972 goto ok; /* Slow normal way without allocation. */ 973 974 if (clen > *len) 975 clen = *len; 976 } else if (!new) { 977 /* Here we may return -ENOENT. 978 * In any case caller gets zero length. */ 979 goto ok; 980 } 981 } 982 983 if (!is_attr_ext(attr_b)) { 984 /* The code below only for sparsed or compressed attributes. */ 985 err = -EINVAL; 986 goto out; 987 } 988 989 vcn0 = vcn; 990 to_alloc = clen; 991 fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1; 992 /* Allocate frame aligned clusters. 993 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed. 994 * ntfs3 uses 1 cluster per frame for new created sparsed files. */ 995 if (attr_b->nres.c_unit) { 996 CLST clst_per_frame = 1u << attr_b->nres.c_unit; 997 CLST cmask = ~(clst_per_frame - 1); 998 999 /* Get frame aligned vcn and to_alloc. */ 1000 vcn = vcn0 & cmask; 1001 to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn; 1002 if (fr < clst_per_frame) 1003 fr = clst_per_frame; 1004 zero = true; 1005 1006 /* Check if 'vcn' and 'vcn0' in different attribute segments. */ 1007 if (vcn < svcn || evcn1 <= vcn) { 1008 /* Load attribute for truncated vcn. */ 1009 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, 1010 &vcn, &mi); 1011 if (!attr) { 1012 err = -EINVAL; 1013 goto out; 1014 } 1015 svcn = le64_to_cpu(attr->nres.svcn); 1016 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1017 err = attr_load_runs(attr, ni, run, NULL); 1018 if (err) 1019 goto out; 1020 } 1021 } 1022 1023 if (vcn + to_alloc > asize) 1024 to_alloc = asize - vcn; 1025 1026 /* Get the last LCN to allocate from. */ 1027 hint = 0; 1028 1029 if (vcn > evcn1) { 1030 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1, 1031 false)) { 1032 err = -ENOMEM; 1033 goto out; 1034 } 1035 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) { 1036 hint = -1; 1037 } 1038 1039 /* Allocate and zeroout new clusters. */ 1040 err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL, 1041 zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen, 1042 fr, lcn, len); 1043 if (err) 1044 goto out; 1045 *new = true; 1046 step = 1; 1047 1048 end = vcn + alen; 1049 /* Save 'total_size0' to restore if error. */ 1050 total_size0 = le64_to_cpu(attr_b->nres.total_size); 1051 total_size = total_size0 + ((u64)alen << cluster_bits); 1052 1053 if (vcn != vcn0) { 1054 if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) { 1055 err = -EINVAL; 1056 goto out; 1057 } 1058 if (*lcn == SPARSE_LCN) { 1059 /* Internal error. Should not happened. */ 1060 WARN_ON(1); 1061 err = -EINVAL; 1062 goto out; 1063 } 1064 /* Check case when vcn0 + len overlaps new allocated clusters. */ 1065 if (vcn0 + *len > end) 1066 *len = end - vcn0; 1067 } 1068 1069 repack: 1070 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); 1071 if (err) 1072 goto out; 1073 1074 attr_b->nres.total_size = cpu_to_le64(total_size); 1075 inode_set_bytes(&ni->vfs_inode, total_size); 1076 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 1077 1078 mi_b->dirty = true; 1079 mark_inode_dirty(&ni->vfs_inode); 1080 1081 /* Stored [vcn : next_svcn) from [vcn : end). */ 1082 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 1083 1084 if (end <= evcn1) { 1085 if (next_svcn == evcn1) { 1086 /* Normal way. Update attribute and exit. */ 1087 goto ok; 1088 } 1089 /* Add new segment [next_svcn : evcn1 - next_svcn). */ 1090 if (!ni->attr_list.size) { 1091 err = ni_create_attr_list(ni); 1092 if (err) 1093 goto undo1; 1094 /* Layout of records is changed. */ 1095 le_b = NULL; 1096 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 1097 0, NULL, &mi_b); 1098 if (!attr_b) { 1099 err = -ENOENT; 1100 goto out; 1101 } 1102 1103 attr = attr_b; 1104 le = le_b; 1105 mi = mi_b; 1106 goto repack; 1107 } 1108 } 1109 1110 /* 1111 * The code below may require additional cluster (to extend attribute list) 1112 * and / or one MFT record 1113 * It is too complex to undo operations if -ENOSPC occurs deep inside 1114 * in 'ni_insert_nonresident'. 1115 * Return in advance -ENOSPC here if there are no free cluster and no free MFT. 1116 */ 1117 if (!ntfs_check_for_free_space(sbi, 1, 1)) { 1118 /* Undo step 1. */ 1119 err = -ENOSPC; 1120 goto undo1; 1121 } 1122 1123 step = 2; 1124 svcn = evcn1; 1125 1126 /* Estimate next attribute. */ 1127 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi); 1128 1129 if (!attr) { 1130 /* Insert new attribute segment. */ 1131 goto ins_ext; 1132 } 1133 1134 /* Try to update existed attribute segment. */ 1135 alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size)); 1136 evcn = le64_to_cpu(attr->nres.evcn); 1137 1138 if (end < next_svcn) 1139 end = next_svcn; 1140 while (end > evcn) { 1141 /* Remove segment [svcn : evcn). */ 1142 mi_remove_attr(NULL, mi, attr); 1143 1144 if (!al_remove_le(ni, le)) { 1145 err = -EINVAL; 1146 goto out; 1147 } 1148 1149 if (evcn + 1 >= alloc) { 1150 /* Last attribute segment. */ 1151 evcn1 = evcn + 1; 1152 goto ins_ext; 1153 } 1154 1155 if (ni_load_mi(ni, le, &mi)) { 1156 attr = NULL; 1157 goto out; 1158 } 1159 1160 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id); 1161 if (!attr) { 1162 err = -EINVAL; 1163 goto out; 1164 } 1165 svcn = le64_to_cpu(attr->nres.svcn); 1166 evcn = le64_to_cpu(attr->nres.evcn); 1167 } 1168 1169 if (end < svcn) 1170 end = svcn; 1171 1172 err = attr_load_runs(attr, ni, run, &end); 1173 if (err) 1174 goto out; 1175 1176 evcn1 = evcn + 1; 1177 attr->nres.svcn = cpu_to_le64(next_svcn); 1178 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); 1179 if (err) 1180 goto out; 1181 1182 le->vcn = cpu_to_le64(next_svcn); 1183 ni->attr_list.dirty = true; 1184 mi->dirty = true; 1185 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 1186 1187 ins_ext: 1188 if (evcn1 > next_svcn) { 1189 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, 1190 next_svcn, evcn1 - next_svcn, 1191 attr_b->flags, &attr, &mi, NULL); 1192 if (err) 1193 goto out; 1194 } 1195 ok: 1196 run_truncate_around(run, vcn); 1197 out: 1198 if (err && step > 1) { 1199 /* Too complex to restore. */ 1200 _ntfs_bad_inode(&ni->vfs_inode); 1201 } 1202 up_write(&ni->file.run_lock); 1203 ni_unlock(ni); 1204 1205 return err; 1206 1207 undo1: 1208 /* Undo step1. */ 1209 attr_b->nres.total_size = cpu_to_le64(total_size0); 1210 inode_set_bytes(&ni->vfs_inode, total_size0); 1211 1212 if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) || 1213 !run_add_entry(run, vcn, SPARSE_LCN, alen, false) || 1214 mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) { 1215 _ntfs_bad_inode(&ni->vfs_inode); 1216 } 1217 goto out; 1218 } 1219 1220 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page) 1221 { 1222 u64 vbo; 1223 struct ATTRIB *attr; 1224 u32 data_size; 1225 1226 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL); 1227 if (!attr) 1228 return -EINVAL; 1229 1230 if (attr->non_res) 1231 return E_NTFS_NONRESIDENT; 1232 1233 vbo = page->index << PAGE_SHIFT; 1234 data_size = le32_to_cpu(attr->res.data_size); 1235 if (vbo < data_size) { 1236 const char *data = resident_data(attr); 1237 char *kaddr = kmap_atomic(page); 1238 u32 use = data_size - vbo; 1239 1240 if (use > PAGE_SIZE) 1241 use = PAGE_SIZE; 1242 1243 memcpy(kaddr, data + vbo, use); 1244 memset(kaddr + use, 0, PAGE_SIZE - use); 1245 kunmap_atomic(kaddr); 1246 flush_dcache_page(page); 1247 SetPageUptodate(page); 1248 } else if (!PageUptodate(page)) { 1249 zero_user_segment(page, 0, PAGE_SIZE); 1250 SetPageUptodate(page); 1251 } 1252 1253 return 0; 1254 } 1255 1256 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page) 1257 { 1258 u64 vbo; 1259 struct mft_inode *mi; 1260 struct ATTRIB *attr; 1261 u32 data_size; 1262 1263 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi); 1264 if (!attr) 1265 return -EINVAL; 1266 1267 if (attr->non_res) { 1268 /* Return special error code to check this case. */ 1269 return E_NTFS_NONRESIDENT; 1270 } 1271 1272 vbo = page->index << PAGE_SHIFT; 1273 data_size = le32_to_cpu(attr->res.data_size); 1274 if (vbo < data_size) { 1275 char *data = resident_data(attr); 1276 char *kaddr = kmap_atomic(page); 1277 u32 use = data_size - vbo; 1278 1279 if (use > PAGE_SIZE) 1280 use = PAGE_SIZE; 1281 memcpy(data + vbo, kaddr, use); 1282 kunmap_atomic(kaddr); 1283 mi->dirty = true; 1284 } 1285 ni->i_valid = data_size; 1286 1287 return 0; 1288 } 1289 1290 /* 1291 * attr_load_runs_vcn - Load runs with VCN. 1292 */ 1293 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type, 1294 const __le16 *name, u8 name_len, struct runs_tree *run, 1295 CLST vcn) 1296 { 1297 struct ATTRIB *attr; 1298 int err; 1299 CLST svcn, evcn; 1300 u16 ro; 1301 1302 if (!ni) { 1303 /* Is record corrupted? */ 1304 return -ENOENT; 1305 } 1306 1307 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL); 1308 if (!attr) { 1309 /* Is record corrupted? */ 1310 return -ENOENT; 1311 } 1312 1313 svcn = le64_to_cpu(attr->nres.svcn); 1314 evcn = le64_to_cpu(attr->nres.evcn); 1315 1316 if (evcn < vcn || vcn < svcn) { 1317 /* Is record corrupted? */ 1318 return -EINVAL; 1319 } 1320 1321 ro = le16_to_cpu(attr->nres.run_off); 1322 1323 if (ro > le32_to_cpu(attr->size)) 1324 return -EINVAL; 1325 1326 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn, 1327 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro); 1328 if (err < 0) 1329 return err; 1330 return 0; 1331 } 1332 1333 /* 1334 * attr_load_runs_range - Load runs for given range [from to). 1335 */ 1336 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type, 1337 const __le16 *name, u8 name_len, struct runs_tree *run, 1338 u64 from, u64 to) 1339 { 1340 struct ntfs_sb_info *sbi = ni->mi.sbi; 1341 u8 cluster_bits = sbi->cluster_bits; 1342 CLST vcn; 1343 CLST vcn_last = (to - 1) >> cluster_bits; 1344 CLST lcn, clen; 1345 int err; 1346 1347 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) { 1348 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) { 1349 err = attr_load_runs_vcn(ni, type, name, name_len, run, 1350 vcn); 1351 if (err) 1352 return err; 1353 clen = 0; /* Next run_lookup_entry(vcn) must be success. */ 1354 } 1355 } 1356 1357 return 0; 1358 } 1359 1360 #ifdef CONFIG_NTFS3_LZX_XPRESS 1361 /* 1362 * attr_wof_frame_info 1363 * 1364 * Read header of Xpress/LZX file to get info about frame. 1365 */ 1366 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr, 1367 struct runs_tree *run, u64 frame, u64 frames, 1368 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data) 1369 { 1370 struct ntfs_sb_info *sbi = ni->mi.sbi; 1371 u64 vbo[2], off[2], wof_size; 1372 u32 voff; 1373 u8 bytes_per_off; 1374 char *addr; 1375 struct page *page; 1376 int i, err; 1377 __le32 *off32; 1378 __le64 *off64; 1379 1380 if (ni->vfs_inode.i_size < 0x100000000ull) { 1381 /* File starts with array of 32 bit offsets. */ 1382 bytes_per_off = sizeof(__le32); 1383 vbo[1] = frame << 2; 1384 *vbo_data = frames << 2; 1385 } else { 1386 /* File starts with array of 64 bit offsets. */ 1387 bytes_per_off = sizeof(__le64); 1388 vbo[1] = frame << 3; 1389 *vbo_data = frames << 3; 1390 } 1391 1392 /* 1393 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts. 1394 * Read 4/8 bytes at [vbo] == offset where compressed frame ends. 1395 */ 1396 if (!attr->non_res) { 1397 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) { 1398 ntfs_inode_err(&ni->vfs_inode, "is corrupted"); 1399 return -EINVAL; 1400 } 1401 addr = resident_data(attr); 1402 1403 if (bytes_per_off == sizeof(__le32)) { 1404 off32 = Add2Ptr(addr, vbo[1]); 1405 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0; 1406 off[1] = le32_to_cpu(off32[0]); 1407 } else { 1408 off64 = Add2Ptr(addr, vbo[1]); 1409 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0; 1410 off[1] = le64_to_cpu(off64[0]); 1411 } 1412 1413 *vbo_data += off[0]; 1414 *ondisk_size = off[1] - off[0]; 1415 return 0; 1416 } 1417 1418 wof_size = le64_to_cpu(attr->nres.data_size); 1419 down_write(&ni->file.run_lock); 1420 page = ni->file.offs_page; 1421 if (!page) { 1422 page = alloc_page(GFP_KERNEL); 1423 if (!page) { 1424 err = -ENOMEM; 1425 goto out; 1426 } 1427 page->index = -1; 1428 ni->file.offs_page = page; 1429 } 1430 lock_page(page); 1431 addr = page_address(page); 1432 1433 if (vbo[1]) { 1434 voff = vbo[1] & (PAGE_SIZE - 1); 1435 vbo[0] = vbo[1] - bytes_per_off; 1436 i = 0; 1437 } else { 1438 voff = 0; 1439 vbo[0] = 0; 1440 off[0] = 0; 1441 i = 1; 1442 } 1443 1444 do { 1445 pgoff_t index = vbo[i] >> PAGE_SHIFT; 1446 1447 if (index != page->index) { 1448 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1); 1449 u64 to = min(from + PAGE_SIZE, wof_size); 1450 1451 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME, 1452 ARRAY_SIZE(WOF_NAME), run, 1453 from, to); 1454 if (err) 1455 goto out1; 1456 1457 err = ntfs_bio_pages(sbi, run, &page, 1, from, 1458 to - from, REQ_OP_READ); 1459 if (err) { 1460 page->index = -1; 1461 goto out1; 1462 } 1463 page->index = index; 1464 } 1465 1466 if (i) { 1467 if (bytes_per_off == sizeof(__le32)) { 1468 off32 = Add2Ptr(addr, voff); 1469 off[1] = le32_to_cpu(*off32); 1470 } else { 1471 off64 = Add2Ptr(addr, voff); 1472 off[1] = le64_to_cpu(*off64); 1473 } 1474 } else if (!voff) { 1475 if (bytes_per_off == sizeof(__le32)) { 1476 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32)); 1477 off[0] = le32_to_cpu(*off32); 1478 } else { 1479 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64)); 1480 off[0] = le64_to_cpu(*off64); 1481 } 1482 } else { 1483 /* Two values in one page. */ 1484 if (bytes_per_off == sizeof(__le32)) { 1485 off32 = Add2Ptr(addr, voff); 1486 off[0] = le32_to_cpu(off32[-1]); 1487 off[1] = le32_to_cpu(off32[0]); 1488 } else { 1489 off64 = Add2Ptr(addr, voff); 1490 off[0] = le64_to_cpu(off64[-1]); 1491 off[1] = le64_to_cpu(off64[0]); 1492 } 1493 break; 1494 } 1495 } while (++i < 2); 1496 1497 *vbo_data += off[0]; 1498 *ondisk_size = off[1] - off[0]; 1499 1500 out1: 1501 unlock_page(page); 1502 out: 1503 up_write(&ni->file.run_lock); 1504 return err; 1505 } 1506 #endif 1507 1508 /* 1509 * attr_is_frame_compressed - Used to detect compressed frame. 1510 */ 1511 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr, 1512 CLST frame, CLST *clst_data) 1513 { 1514 int err; 1515 u32 clst_frame; 1516 CLST clen, lcn, vcn, alen, slen, vcn_next; 1517 size_t idx; 1518 struct runs_tree *run; 1519 1520 *clst_data = 0; 1521 1522 if (!is_attr_compressed(attr)) 1523 return 0; 1524 1525 if (!attr->non_res) 1526 return 0; 1527 1528 clst_frame = 1u << attr->nres.c_unit; 1529 vcn = frame * clst_frame; 1530 run = &ni->file.run; 1531 1532 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { 1533 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr), 1534 attr->name_len, run, vcn); 1535 if (err) 1536 return err; 1537 1538 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) 1539 return -EINVAL; 1540 } 1541 1542 if (lcn == SPARSE_LCN) { 1543 /* Sparsed frame. */ 1544 return 0; 1545 } 1546 1547 if (clen >= clst_frame) { 1548 /* 1549 * The frame is not compressed 'cause 1550 * it does not contain any sparse clusters. 1551 */ 1552 *clst_data = clst_frame; 1553 return 0; 1554 } 1555 1556 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size)); 1557 slen = 0; 1558 *clst_data = clen; 1559 1560 /* 1561 * The frame is compressed if *clst_data + slen >= clst_frame. 1562 * Check next fragments. 1563 */ 1564 while ((vcn += clen) < alen) { 1565 vcn_next = vcn; 1566 1567 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || 1568 vcn_next != vcn) { 1569 err = attr_load_runs_vcn(ni, attr->type, 1570 attr_name(attr), 1571 attr->name_len, run, vcn_next); 1572 if (err) 1573 return err; 1574 vcn = vcn_next; 1575 1576 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) 1577 return -EINVAL; 1578 } 1579 1580 if (lcn == SPARSE_LCN) { 1581 slen += clen; 1582 } else { 1583 if (slen) { 1584 /* 1585 * Data_clusters + sparse_clusters = 1586 * not enough for frame. 1587 */ 1588 return -EINVAL; 1589 } 1590 *clst_data += clen; 1591 } 1592 1593 if (*clst_data + slen >= clst_frame) { 1594 if (!slen) { 1595 /* 1596 * There is no sparsed clusters in this frame 1597 * so it is not compressed. 1598 */ 1599 *clst_data = clst_frame; 1600 } else { 1601 /* Frame is compressed. */ 1602 } 1603 break; 1604 } 1605 } 1606 1607 return 0; 1608 } 1609 1610 /* 1611 * attr_allocate_frame - Allocate/free clusters for @frame. 1612 * 1613 * Assumed: down_write(&ni->file.run_lock); 1614 */ 1615 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size, 1616 u64 new_valid) 1617 { 1618 int err = 0; 1619 struct runs_tree *run = &ni->file.run; 1620 struct ntfs_sb_info *sbi = ni->mi.sbi; 1621 struct ATTRIB *attr = NULL, *attr_b; 1622 struct ATTR_LIST_ENTRY *le, *le_b; 1623 struct mft_inode *mi, *mi_b; 1624 CLST svcn, evcn1, next_svcn, len; 1625 CLST vcn, end, clst_data; 1626 u64 total_size, valid_size, data_size; 1627 1628 le_b = NULL; 1629 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 1630 if (!attr_b) 1631 return -ENOENT; 1632 1633 if (!is_attr_ext(attr_b)) 1634 return -EINVAL; 1635 1636 vcn = frame << NTFS_LZNT_CUNIT; 1637 total_size = le64_to_cpu(attr_b->nres.total_size); 1638 1639 svcn = le64_to_cpu(attr_b->nres.svcn); 1640 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 1641 data_size = le64_to_cpu(attr_b->nres.data_size); 1642 1643 if (svcn <= vcn && vcn < evcn1) { 1644 attr = attr_b; 1645 le = le_b; 1646 mi = mi_b; 1647 } else if (!le_b) { 1648 err = -EINVAL; 1649 goto out; 1650 } else { 1651 le = le_b; 1652 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 1653 &mi); 1654 if (!attr) { 1655 err = -EINVAL; 1656 goto out; 1657 } 1658 svcn = le64_to_cpu(attr->nres.svcn); 1659 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1660 } 1661 1662 err = attr_load_runs(attr, ni, run, NULL); 1663 if (err) 1664 goto out; 1665 1666 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data); 1667 if (err) 1668 goto out; 1669 1670 total_size -= (u64)clst_data << sbi->cluster_bits; 1671 1672 len = bytes_to_cluster(sbi, compr_size); 1673 1674 if (len == clst_data) 1675 goto out; 1676 1677 if (len < clst_data) { 1678 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len, 1679 NULL, true); 1680 if (err) 1681 goto out; 1682 1683 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len, 1684 false)) { 1685 err = -ENOMEM; 1686 goto out; 1687 } 1688 end = vcn + clst_data; 1689 /* Run contains updated range [vcn + len : end). */ 1690 } else { 1691 CLST alen, hint = 0; 1692 /* Get the last LCN to allocate from. */ 1693 if (vcn + clst_data && 1694 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL, 1695 NULL)) { 1696 hint = -1; 1697 } 1698 1699 err = attr_allocate_clusters(sbi, run, vcn + clst_data, 1700 hint + 1, len - clst_data, NULL, 1701 ALLOCATE_DEF, &alen, 0, NULL, 1702 NULL); 1703 if (err) 1704 goto out; 1705 1706 end = vcn + len; 1707 /* Run contains updated range [vcn + clst_data : end). */ 1708 } 1709 1710 total_size += (u64)len << sbi->cluster_bits; 1711 1712 repack: 1713 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); 1714 if (err) 1715 goto out; 1716 1717 attr_b->nres.total_size = cpu_to_le64(total_size); 1718 inode_set_bytes(&ni->vfs_inode, total_size); 1719 1720 mi_b->dirty = true; 1721 mark_inode_dirty(&ni->vfs_inode); 1722 1723 /* Stored [vcn : next_svcn) from [vcn : end). */ 1724 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 1725 1726 if (end <= evcn1) { 1727 if (next_svcn == evcn1) { 1728 /* Normal way. Update attribute and exit. */ 1729 goto ok; 1730 } 1731 /* Add new segment [next_svcn : evcn1 - next_svcn). */ 1732 if (!ni->attr_list.size) { 1733 err = ni_create_attr_list(ni); 1734 if (err) 1735 goto out; 1736 /* Layout of records is changed. */ 1737 le_b = NULL; 1738 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 1739 0, NULL, &mi_b); 1740 if (!attr_b) { 1741 err = -ENOENT; 1742 goto out; 1743 } 1744 1745 attr = attr_b; 1746 le = le_b; 1747 mi = mi_b; 1748 goto repack; 1749 } 1750 } 1751 1752 svcn = evcn1; 1753 1754 /* Estimate next attribute. */ 1755 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi); 1756 1757 if (attr) { 1758 CLST alloc = bytes_to_cluster( 1759 sbi, le64_to_cpu(attr_b->nres.alloc_size)); 1760 CLST evcn = le64_to_cpu(attr->nres.evcn); 1761 1762 if (end < next_svcn) 1763 end = next_svcn; 1764 while (end > evcn) { 1765 /* Remove segment [svcn : evcn). */ 1766 mi_remove_attr(NULL, mi, attr); 1767 1768 if (!al_remove_le(ni, le)) { 1769 err = -EINVAL; 1770 goto out; 1771 } 1772 1773 if (evcn + 1 >= alloc) { 1774 /* Last attribute segment. */ 1775 evcn1 = evcn + 1; 1776 goto ins_ext; 1777 } 1778 1779 if (ni_load_mi(ni, le, &mi)) { 1780 attr = NULL; 1781 goto out; 1782 } 1783 1784 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, 1785 &le->id); 1786 if (!attr) { 1787 err = -EINVAL; 1788 goto out; 1789 } 1790 svcn = le64_to_cpu(attr->nres.svcn); 1791 evcn = le64_to_cpu(attr->nres.evcn); 1792 } 1793 1794 if (end < svcn) 1795 end = svcn; 1796 1797 err = attr_load_runs(attr, ni, run, &end); 1798 if (err) 1799 goto out; 1800 1801 evcn1 = evcn + 1; 1802 attr->nres.svcn = cpu_to_le64(next_svcn); 1803 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); 1804 if (err) 1805 goto out; 1806 1807 le->vcn = cpu_to_le64(next_svcn); 1808 ni->attr_list.dirty = true; 1809 mi->dirty = true; 1810 1811 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 1812 } 1813 ins_ext: 1814 if (evcn1 > next_svcn) { 1815 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, 1816 next_svcn, evcn1 - next_svcn, 1817 attr_b->flags, &attr, &mi, NULL); 1818 if (err) 1819 goto out; 1820 } 1821 ok: 1822 run_truncate_around(run, vcn); 1823 out: 1824 if (new_valid > data_size) 1825 new_valid = data_size; 1826 1827 valid_size = le64_to_cpu(attr_b->nres.valid_size); 1828 if (new_valid != valid_size) { 1829 attr_b->nres.valid_size = cpu_to_le64(valid_size); 1830 mi_b->dirty = true; 1831 } 1832 1833 return err; 1834 } 1835 1836 /* 1837 * attr_collapse_range - Collapse range in file. 1838 */ 1839 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) 1840 { 1841 int err = 0; 1842 struct runs_tree *run = &ni->file.run; 1843 struct ntfs_sb_info *sbi = ni->mi.sbi; 1844 struct ATTRIB *attr = NULL, *attr_b; 1845 struct ATTR_LIST_ENTRY *le, *le_b; 1846 struct mft_inode *mi, *mi_b; 1847 CLST svcn, evcn1, len, dealloc, alen; 1848 CLST vcn, end; 1849 u64 valid_size, data_size, alloc_size, total_size; 1850 u32 mask; 1851 __le16 a_flags; 1852 1853 if (!bytes) 1854 return 0; 1855 1856 le_b = NULL; 1857 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 1858 if (!attr_b) 1859 return -ENOENT; 1860 1861 if (!attr_b->non_res) { 1862 /* Attribute is resident. Nothing to do? */ 1863 return 0; 1864 } 1865 1866 data_size = le64_to_cpu(attr_b->nres.data_size); 1867 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); 1868 a_flags = attr_b->flags; 1869 1870 if (is_attr_ext(attr_b)) { 1871 total_size = le64_to_cpu(attr_b->nres.total_size); 1872 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; 1873 } else { 1874 total_size = alloc_size; 1875 mask = sbi->cluster_mask; 1876 } 1877 1878 if ((vbo & mask) || (bytes & mask)) { 1879 /* Allow to collapse only cluster aligned ranges. */ 1880 return -EINVAL; 1881 } 1882 1883 if (vbo > data_size) 1884 return -EINVAL; 1885 1886 down_write(&ni->file.run_lock); 1887 1888 if (vbo + bytes >= data_size) { 1889 u64 new_valid = min(ni->i_valid, vbo); 1890 1891 /* Simple truncate file at 'vbo'. */ 1892 truncate_setsize(&ni->vfs_inode, vbo); 1893 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo, 1894 &new_valid, true, NULL); 1895 1896 if (!err && new_valid < ni->i_valid) 1897 ni->i_valid = new_valid; 1898 1899 goto out; 1900 } 1901 1902 /* 1903 * Enumerate all attribute segments and collapse. 1904 */ 1905 alen = alloc_size >> sbi->cluster_bits; 1906 vcn = vbo >> sbi->cluster_bits; 1907 len = bytes >> sbi->cluster_bits; 1908 end = vcn + len; 1909 dealloc = 0; 1910 1911 svcn = le64_to_cpu(attr_b->nres.svcn); 1912 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 1913 1914 if (svcn <= vcn && vcn < evcn1) { 1915 attr = attr_b; 1916 le = le_b; 1917 mi = mi_b; 1918 } else if (!le_b) { 1919 err = -EINVAL; 1920 goto out; 1921 } else { 1922 le = le_b; 1923 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 1924 &mi); 1925 if (!attr) { 1926 err = -EINVAL; 1927 goto out; 1928 } 1929 1930 svcn = le64_to_cpu(attr->nres.svcn); 1931 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1932 } 1933 1934 for (;;) { 1935 if (svcn >= end) { 1936 /* Shift VCN- */ 1937 attr->nres.svcn = cpu_to_le64(svcn - len); 1938 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len); 1939 if (le) { 1940 le->vcn = attr->nres.svcn; 1941 ni->attr_list.dirty = true; 1942 } 1943 mi->dirty = true; 1944 } else if (svcn < vcn || end < evcn1) { 1945 CLST vcn1, eat, next_svcn; 1946 1947 /* Collapse a part of this attribute segment. */ 1948 err = attr_load_runs(attr, ni, run, &svcn); 1949 if (err) 1950 goto out; 1951 vcn1 = max(vcn, svcn); 1952 eat = min(end, evcn1) - vcn1; 1953 1954 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, 1955 true); 1956 if (err) 1957 goto out; 1958 1959 if (!run_collapse_range(run, vcn1, eat)) { 1960 err = -ENOMEM; 1961 goto out; 1962 } 1963 1964 if (svcn >= vcn) { 1965 /* Shift VCN */ 1966 attr->nres.svcn = cpu_to_le64(vcn); 1967 if (le) { 1968 le->vcn = attr->nres.svcn; 1969 ni->attr_list.dirty = true; 1970 } 1971 } 1972 1973 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat); 1974 if (err) 1975 goto out; 1976 1977 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 1978 if (next_svcn + eat < evcn1) { 1979 err = ni_insert_nonresident( 1980 ni, ATTR_DATA, NULL, 0, run, next_svcn, 1981 evcn1 - eat - next_svcn, a_flags, &attr, 1982 &mi, &le); 1983 if (err) 1984 goto out; 1985 1986 /* Layout of records maybe changed. */ 1987 attr_b = NULL; 1988 } 1989 1990 /* Free all allocated memory. */ 1991 run_truncate(run, 0); 1992 } else { 1993 u16 le_sz; 1994 u16 roff = le16_to_cpu(attr->nres.run_off); 1995 1996 if (roff > le32_to_cpu(attr->size)) { 1997 err = -EINVAL; 1998 goto out; 1999 } 2000 2001 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, 2002 evcn1 - 1, svcn, Add2Ptr(attr, roff), 2003 le32_to_cpu(attr->size) - roff); 2004 2005 /* Delete this attribute segment. */ 2006 mi_remove_attr(NULL, mi, attr); 2007 if (!le) 2008 break; 2009 2010 le_sz = le16_to_cpu(le->size); 2011 if (!al_remove_le(ni, le)) { 2012 err = -EINVAL; 2013 goto out; 2014 } 2015 2016 if (evcn1 >= alen) 2017 break; 2018 2019 if (!svcn) { 2020 /* Load next record that contains this attribute. */ 2021 if (ni_load_mi(ni, le, &mi)) { 2022 err = -EINVAL; 2023 goto out; 2024 } 2025 2026 /* Look for required attribute. */ 2027 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 2028 0, &le->id); 2029 if (!attr) { 2030 err = -EINVAL; 2031 goto out; 2032 } 2033 goto next_attr; 2034 } 2035 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); 2036 } 2037 2038 if (evcn1 >= alen) 2039 break; 2040 2041 attr = ni_enum_attr_ex(ni, attr, &le, &mi); 2042 if (!attr) { 2043 err = -EINVAL; 2044 goto out; 2045 } 2046 2047 next_attr: 2048 svcn = le64_to_cpu(attr->nres.svcn); 2049 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2050 } 2051 2052 if (!attr_b) { 2053 le_b = NULL; 2054 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, 2055 &mi_b); 2056 if (!attr_b) { 2057 err = -ENOENT; 2058 goto out; 2059 } 2060 } 2061 2062 data_size -= bytes; 2063 valid_size = ni->i_valid; 2064 if (vbo + bytes <= valid_size) 2065 valid_size -= bytes; 2066 else if (vbo < valid_size) 2067 valid_size = vbo; 2068 2069 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes); 2070 attr_b->nres.data_size = cpu_to_le64(data_size); 2071 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size)); 2072 total_size -= (u64)dealloc << sbi->cluster_bits; 2073 if (is_attr_ext(attr_b)) 2074 attr_b->nres.total_size = cpu_to_le64(total_size); 2075 mi_b->dirty = true; 2076 2077 /* Update inode size. */ 2078 ni->i_valid = valid_size; 2079 ni->vfs_inode.i_size = data_size; 2080 inode_set_bytes(&ni->vfs_inode, total_size); 2081 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 2082 mark_inode_dirty(&ni->vfs_inode); 2083 2084 out: 2085 up_write(&ni->file.run_lock); 2086 if (err) 2087 _ntfs_bad_inode(&ni->vfs_inode); 2088 2089 return err; 2090 } 2091 2092 /* 2093 * attr_punch_hole 2094 * 2095 * Not for normal files. 2096 */ 2097 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size) 2098 { 2099 int err = 0; 2100 struct runs_tree *run = &ni->file.run; 2101 struct ntfs_sb_info *sbi = ni->mi.sbi; 2102 struct ATTRIB *attr = NULL, *attr_b; 2103 struct ATTR_LIST_ENTRY *le, *le_b; 2104 struct mft_inode *mi, *mi_b; 2105 CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn; 2106 u64 total_size, alloc_size; 2107 u32 mask; 2108 __le16 a_flags; 2109 struct runs_tree run2; 2110 2111 if (!bytes) 2112 return 0; 2113 2114 le_b = NULL; 2115 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 2116 if (!attr_b) 2117 return -ENOENT; 2118 2119 if (!attr_b->non_res) { 2120 u32 data_size = le32_to_cpu(attr_b->res.data_size); 2121 u32 from, to; 2122 2123 if (vbo > data_size) 2124 return 0; 2125 2126 from = vbo; 2127 to = min_t(u64, vbo + bytes, data_size); 2128 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from); 2129 return 0; 2130 } 2131 2132 if (!is_attr_ext(attr_b)) 2133 return -EOPNOTSUPP; 2134 2135 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); 2136 total_size = le64_to_cpu(attr_b->nres.total_size); 2137 2138 if (vbo >= alloc_size) { 2139 /* NOTE: It is allowed. */ 2140 return 0; 2141 } 2142 2143 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; 2144 2145 bytes += vbo; 2146 if (bytes > alloc_size) 2147 bytes = alloc_size; 2148 bytes -= vbo; 2149 2150 if ((vbo & mask) || (bytes & mask)) { 2151 /* We have to zero a range(s). */ 2152 if (frame_size == NULL) { 2153 /* Caller insists range is aligned. */ 2154 return -EINVAL; 2155 } 2156 *frame_size = mask + 1; 2157 return E_NTFS_NOTALIGNED; 2158 } 2159 2160 down_write(&ni->file.run_lock); 2161 run_init(&run2); 2162 run_truncate(run, 0); 2163 2164 /* 2165 * Enumerate all attribute segments and punch hole where necessary. 2166 */ 2167 alen = alloc_size >> sbi->cluster_bits; 2168 vcn = vbo >> sbi->cluster_bits; 2169 len = bytes >> sbi->cluster_bits; 2170 end = vcn + len; 2171 hole = 0; 2172 2173 svcn = le64_to_cpu(attr_b->nres.svcn); 2174 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 2175 a_flags = attr_b->flags; 2176 2177 if (svcn <= vcn && vcn < evcn1) { 2178 attr = attr_b; 2179 le = le_b; 2180 mi = mi_b; 2181 } else if (!le_b) { 2182 err = -EINVAL; 2183 goto bad_inode; 2184 } else { 2185 le = le_b; 2186 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 2187 &mi); 2188 if (!attr) { 2189 err = -EINVAL; 2190 goto bad_inode; 2191 } 2192 2193 svcn = le64_to_cpu(attr->nres.svcn); 2194 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2195 } 2196 2197 while (svcn < end) { 2198 CLST vcn1, zero, hole2 = hole; 2199 2200 err = attr_load_runs(attr, ni, run, &svcn); 2201 if (err) 2202 goto done; 2203 vcn1 = max(vcn, svcn); 2204 zero = min(end, evcn1) - vcn1; 2205 2206 /* 2207 * Check range [vcn1 + zero). 2208 * Calculate how many clusters there are. 2209 * Don't do any destructive actions. 2210 */ 2211 err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false); 2212 if (err) 2213 goto done; 2214 2215 /* Check if required range is already hole. */ 2216 if (hole2 == hole) 2217 goto next_attr; 2218 2219 /* Make a clone of run to undo. */ 2220 err = run_clone(run, &run2); 2221 if (err) 2222 goto done; 2223 2224 /* Make a hole range (sparse) [vcn1 + zero). */ 2225 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) { 2226 err = -ENOMEM; 2227 goto done; 2228 } 2229 2230 /* Update run in attribute segment. */ 2231 err = mi_pack_runs(mi, attr, run, evcn1 - svcn); 2232 if (err) 2233 goto done; 2234 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 2235 if (next_svcn < evcn1) { 2236 /* Insert new attribute segment. */ 2237 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, 2238 next_svcn, 2239 evcn1 - next_svcn, a_flags, 2240 &attr, &mi, &le); 2241 if (err) 2242 goto undo_punch; 2243 2244 /* Layout of records maybe changed. */ 2245 attr_b = NULL; 2246 } 2247 2248 /* Real deallocate. Should not fail. */ 2249 run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true); 2250 2251 next_attr: 2252 /* Free all allocated memory. */ 2253 run_truncate(run, 0); 2254 2255 if (evcn1 >= alen) 2256 break; 2257 2258 /* Get next attribute segment. */ 2259 attr = ni_enum_attr_ex(ni, attr, &le, &mi); 2260 if (!attr) { 2261 err = -EINVAL; 2262 goto bad_inode; 2263 } 2264 2265 svcn = le64_to_cpu(attr->nres.svcn); 2266 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2267 } 2268 2269 done: 2270 if (!hole) 2271 goto out; 2272 2273 if (!attr_b) { 2274 attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, 2275 &mi_b); 2276 if (!attr_b) { 2277 err = -EINVAL; 2278 goto bad_inode; 2279 } 2280 } 2281 2282 total_size -= (u64)hole << sbi->cluster_bits; 2283 attr_b->nres.total_size = cpu_to_le64(total_size); 2284 mi_b->dirty = true; 2285 2286 /* Update inode size. */ 2287 inode_set_bytes(&ni->vfs_inode, total_size); 2288 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 2289 mark_inode_dirty(&ni->vfs_inode); 2290 2291 out: 2292 run_close(&run2); 2293 up_write(&ni->file.run_lock); 2294 return err; 2295 2296 bad_inode: 2297 _ntfs_bad_inode(&ni->vfs_inode); 2298 goto out; 2299 2300 undo_punch: 2301 /* 2302 * Restore packed runs. 2303 * 'mi_pack_runs' should not fail, cause we restore original. 2304 */ 2305 if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn)) 2306 goto bad_inode; 2307 2308 goto done; 2309 } 2310 2311 /* 2312 * attr_insert_range - Insert range (hole) in file. 2313 * Not for normal files. 2314 */ 2315 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) 2316 { 2317 int err = 0; 2318 struct runs_tree *run = &ni->file.run; 2319 struct ntfs_sb_info *sbi = ni->mi.sbi; 2320 struct ATTRIB *attr = NULL, *attr_b; 2321 struct ATTR_LIST_ENTRY *le, *le_b; 2322 struct mft_inode *mi, *mi_b; 2323 CLST vcn, svcn, evcn1, len, next_svcn; 2324 u64 data_size, alloc_size; 2325 u32 mask; 2326 __le16 a_flags; 2327 2328 if (!bytes) 2329 return 0; 2330 2331 le_b = NULL; 2332 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); 2333 if (!attr_b) 2334 return -ENOENT; 2335 2336 if (!is_attr_ext(attr_b)) { 2337 /* It was checked above. See fallocate. */ 2338 return -EOPNOTSUPP; 2339 } 2340 2341 if (!attr_b->non_res) { 2342 data_size = le32_to_cpu(attr_b->res.data_size); 2343 alloc_size = data_size; 2344 mask = sbi->cluster_mask; /* cluster_size - 1 */ 2345 } else { 2346 data_size = le64_to_cpu(attr_b->nres.data_size); 2347 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); 2348 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; 2349 } 2350 2351 if (vbo > data_size) { 2352 /* Insert range after the file size is not allowed. */ 2353 return -EINVAL; 2354 } 2355 2356 if ((vbo & mask) || (bytes & mask)) { 2357 /* Allow to insert only frame aligned ranges. */ 2358 return -EINVAL; 2359 } 2360 2361 /* 2362 * valid_size <= data_size <= alloc_size 2363 * Check alloc_size for maximum possible. 2364 */ 2365 if (bytes > sbi->maxbytes_sparse - alloc_size) 2366 return -EFBIG; 2367 2368 vcn = vbo >> sbi->cluster_bits; 2369 len = bytes >> sbi->cluster_bits; 2370 2371 down_write(&ni->file.run_lock); 2372 2373 if (!attr_b->non_res) { 2374 err = attr_set_size(ni, ATTR_DATA, NULL, 0, run, 2375 data_size + bytes, NULL, false, NULL); 2376 2377 le_b = NULL; 2378 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, 2379 &mi_b); 2380 if (!attr_b) { 2381 err = -EINVAL; 2382 goto bad_inode; 2383 } 2384 2385 if (err) 2386 goto out; 2387 2388 if (!attr_b->non_res) { 2389 /* Still resident. */ 2390 char *data = Add2Ptr(attr_b, 2391 le16_to_cpu(attr_b->res.data_off)); 2392 2393 memmove(data + bytes, data, bytes); 2394 memset(data, 0, bytes); 2395 goto done; 2396 } 2397 2398 /* Resident files becomes nonresident. */ 2399 data_size = le64_to_cpu(attr_b->nres.data_size); 2400 alloc_size = le64_to_cpu(attr_b->nres.alloc_size); 2401 } 2402 2403 /* 2404 * Enumerate all attribute segments and shift start vcn. 2405 */ 2406 a_flags = attr_b->flags; 2407 svcn = le64_to_cpu(attr_b->nres.svcn); 2408 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 2409 2410 if (svcn <= vcn && vcn < evcn1) { 2411 attr = attr_b; 2412 le = le_b; 2413 mi = mi_b; 2414 } else if (!le_b) { 2415 err = -EINVAL; 2416 goto bad_inode; 2417 } else { 2418 le = le_b; 2419 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 2420 &mi); 2421 if (!attr) { 2422 err = -EINVAL; 2423 goto bad_inode; 2424 } 2425 2426 svcn = le64_to_cpu(attr->nres.svcn); 2427 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2428 } 2429 2430 run_truncate(run, 0); /* clear cached values. */ 2431 err = attr_load_runs(attr, ni, run, NULL); 2432 if (err) 2433 goto out; 2434 2435 if (!run_insert_range(run, vcn, len)) { 2436 err = -ENOMEM; 2437 goto out; 2438 } 2439 2440 /* Try to pack in current record as much as possible. */ 2441 err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn); 2442 if (err) 2443 goto out; 2444 2445 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 2446 2447 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) && 2448 attr->type == ATTR_DATA && !attr->name_len) { 2449 le64_add_cpu(&attr->nres.svcn, len); 2450 le64_add_cpu(&attr->nres.evcn, len); 2451 if (le) { 2452 le->vcn = attr->nres.svcn; 2453 ni->attr_list.dirty = true; 2454 } 2455 mi->dirty = true; 2456 } 2457 2458 if (next_svcn < evcn1 + len) { 2459 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, 2460 next_svcn, evcn1 + len - next_svcn, 2461 a_flags, NULL, NULL, NULL); 2462 2463 le_b = NULL; 2464 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, 2465 &mi_b); 2466 if (!attr_b) { 2467 err = -EINVAL; 2468 goto bad_inode; 2469 } 2470 2471 if (err) { 2472 /* ni_insert_nonresident failed. Try to undo. */ 2473 goto undo_insert_range; 2474 } 2475 } 2476 2477 /* 2478 * Update primary attribute segment. 2479 */ 2480 if (vbo <= ni->i_valid) 2481 ni->i_valid += bytes; 2482 2483 attr_b->nres.data_size = cpu_to_le64(data_size + bytes); 2484 attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes); 2485 2486 /* ni->valid may be not equal valid_size (temporary). */ 2487 if (ni->i_valid > data_size + bytes) 2488 attr_b->nres.valid_size = attr_b->nres.data_size; 2489 else 2490 attr_b->nres.valid_size = cpu_to_le64(ni->i_valid); 2491 mi_b->dirty = true; 2492 2493 done: 2494 ni->vfs_inode.i_size += bytes; 2495 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 2496 mark_inode_dirty(&ni->vfs_inode); 2497 2498 out: 2499 run_truncate(run, 0); /* clear cached values. */ 2500 2501 up_write(&ni->file.run_lock); 2502 2503 return err; 2504 2505 bad_inode: 2506 _ntfs_bad_inode(&ni->vfs_inode); 2507 goto out; 2508 2509 undo_insert_range: 2510 svcn = le64_to_cpu(attr_b->nres.svcn); 2511 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; 2512 2513 if (svcn <= vcn && vcn < evcn1) { 2514 attr = attr_b; 2515 le = le_b; 2516 mi = mi_b; 2517 } else if (!le_b) { 2518 goto bad_inode; 2519 } else { 2520 le = le_b; 2521 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 2522 &mi); 2523 if (!attr) { 2524 goto bad_inode; 2525 } 2526 2527 svcn = le64_to_cpu(attr->nres.svcn); 2528 evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2529 } 2530 2531 if (attr_load_runs(attr, ni, run, NULL)) 2532 goto bad_inode; 2533 2534 if (!run_collapse_range(run, vcn, len)) 2535 goto bad_inode; 2536 2537 if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn)) 2538 goto bad_inode; 2539 2540 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) && 2541 attr->type == ATTR_DATA && !attr->name_len) { 2542 le64_sub_cpu(&attr->nres.svcn, len); 2543 le64_sub_cpu(&attr->nres.evcn, len); 2544 if (le) { 2545 le->vcn = attr->nres.svcn; 2546 ni->attr_list.dirty = true; 2547 } 2548 mi->dirty = true; 2549 } 2550 2551 goto out; 2552 } 2553