1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 */ 7 8 #include <linux/buffer_head.h> 9 #include <linux/fs.h> 10 #include <linux/mpage.h> 11 #include <linux/namei.h> 12 #include <linux/nls.h> 13 #include <linux/uio.h> 14 #include <linux/writeback.h> 15 16 #include "debug.h" 17 #include "ntfs.h" 18 #include "ntfs_fs.h" 19 20 /* 21 * ntfs_read_mft - Read record and parses MFT. 22 */ 23 static struct inode *ntfs_read_mft(struct inode *inode, 24 const struct cpu_str *name, 25 const struct MFT_REF *ref) 26 { 27 int err = 0; 28 struct ntfs_inode *ni = ntfs_i(inode); 29 struct super_block *sb = inode->i_sb; 30 struct ntfs_sb_info *sbi = sb->s_fs_info; 31 mode_t mode = 0; 32 struct ATTR_STD_INFO5 *std5 = NULL; 33 struct ATTR_LIST_ENTRY *le; 34 struct ATTRIB *attr; 35 bool is_match = false; 36 bool is_root = false; 37 bool is_dir; 38 unsigned long ino = inode->i_ino; 39 u32 rp_fa = 0, asize, t32; 40 u16 roff, rsize, names = 0; 41 const struct ATTR_FILE_NAME *fname = NULL; 42 const struct INDEX_ROOT *root; 43 struct REPARSE_DATA_BUFFER rp; // 0x18 bytes 44 u64 t64; 45 struct MFT_REC *rec; 46 struct runs_tree *run; 47 struct timespec64 ctime; 48 49 inode->i_op = NULL; 50 /* Setup 'uid' and 'gid' */ 51 inode->i_uid = sbi->options->fs_uid; 52 inode->i_gid = sbi->options->fs_gid; 53 54 err = mi_init(&ni->mi, sbi, ino); 55 if (err) 56 goto out; 57 58 if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) { 59 t64 = sbi->mft.lbo >> sbi->cluster_bits; 60 t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size); 61 sbi->mft.ni = ni; 62 init_rwsem(&ni->file.run_lock); 63 64 if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) { 65 err = -ENOMEM; 66 goto out; 67 } 68 } 69 70 err = mi_read(&ni->mi, ino == MFT_REC_MFT); 71 72 if (err) 73 goto out; 74 75 rec = ni->mi.mrec; 76 77 if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) { 78 ; 79 } else if (ref->seq != rec->seq) { 80 err = -EINVAL; 81 ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino, 82 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq)); 83 goto out; 84 } else if (!is_rec_inuse(rec)) { 85 err = -ESTALE; 86 ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino); 87 goto out; 88 } 89 90 if (le32_to_cpu(rec->total) != sbi->record_size) { 91 /* Bad inode? */ 92 err = -EINVAL; 93 goto out; 94 } 95 96 if (!is_rec_base(rec)) { 97 err = -EINVAL; 98 goto out; 99 } 100 101 /* Record should contain $I30 root. */ 102 is_dir = rec->flags & RECORD_FLAG_DIR; 103 104 /* MFT_REC_MFT is not a dir */ 105 if (is_dir && ino == MFT_REC_MFT) { 106 err = -EINVAL; 107 goto out; 108 } 109 110 inode->i_generation = le16_to_cpu(rec->seq); 111 112 /* Enumerate all struct Attributes MFT. */ 113 le = NULL; 114 attr = NULL; 115 116 /* 117 * To reduce tab pressure use goto instead of 118 * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) )) 119 */ 120 next_attr: 121 run = NULL; 122 err = -EINVAL; 123 attr = ni_enum_attr_ex(ni, attr, &le, NULL); 124 if (!attr) 125 goto end_enum; 126 127 if (le && le->vcn) { 128 /* This is non primary attribute segment. Ignore if not MFT. */ 129 if (ino != MFT_REC_MFT || attr->type != ATTR_DATA) 130 goto next_attr; 131 132 run = &ni->file.run; 133 asize = le32_to_cpu(attr->size); 134 goto attr_unpack_run; 135 } 136 137 roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off); 138 rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size); 139 asize = le32_to_cpu(attr->size); 140 141 /* 142 * Really this check was done in 'ni_enum_attr_ex' -> ... 'mi_enum_attr'. 143 * There not critical to check this case again 144 */ 145 if (attr->name_len && 146 sizeof(short) * attr->name_len + le16_to_cpu(attr->name_off) > 147 asize) 148 goto out; 149 150 if (attr->non_res) { 151 t64 = le64_to_cpu(attr->nres.alloc_size); 152 if (le64_to_cpu(attr->nres.data_size) > t64 || 153 le64_to_cpu(attr->nres.valid_size) > t64) 154 goto out; 155 } 156 157 switch (attr->type) { 158 case ATTR_STD: 159 if (attr->non_res || 160 asize < sizeof(struct ATTR_STD_INFO) + roff || 161 rsize < sizeof(struct ATTR_STD_INFO)) 162 goto out; 163 164 if (std5) 165 goto next_attr; 166 167 std5 = Add2Ptr(attr, roff); 168 169 #ifdef STATX_BTIME 170 nt2kernel(std5->cr_time, &ni->i_crtime); 171 #endif 172 nt2kernel(std5->a_time, &inode->i_atime); 173 nt2kernel(std5->c_time, &ctime); 174 inode_set_ctime_to_ts(inode, ctime); 175 nt2kernel(std5->m_time, &inode->i_mtime); 176 177 ni->std_fa = std5->fa; 178 179 if (asize >= sizeof(struct ATTR_STD_INFO5) + roff && 180 rsize >= sizeof(struct ATTR_STD_INFO5)) 181 ni->std_security_id = std5->security_id; 182 goto next_attr; 183 184 case ATTR_LIST: 185 if (attr->name_len || le || ino == MFT_REC_LOG) 186 goto out; 187 188 err = ntfs_load_attr_list(ni, attr); 189 if (err) 190 goto out; 191 192 le = NULL; 193 attr = NULL; 194 goto next_attr; 195 196 case ATTR_NAME: 197 if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff || 198 rsize < SIZEOF_ATTRIBUTE_FILENAME) 199 goto out; 200 201 fname = Add2Ptr(attr, roff); 202 if (fname->type == FILE_NAME_DOS) 203 goto next_attr; 204 205 names += 1; 206 if (name && name->len == fname->name_len && 207 !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len, 208 NULL, false)) 209 is_match = true; 210 211 goto next_attr; 212 213 case ATTR_DATA: 214 if (is_dir) { 215 /* Ignore data attribute in dir record. */ 216 goto next_attr; 217 } 218 219 if (ino == MFT_REC_BADCLUST && !attr->non_res) 220 goto next_attr; 221 222 if (attr->name_len && 223 ((ino != MFT_REC_BADCLUST || !attr->non_res || 224 attr->name_len != ARRAY_SIZE(BAD_NAME) || 225 memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) && 226 (ino != MFT_REC_SECURE || !attr->non_res || 227 attr->name_len != ARRAY_SIZE(SDS_NAME) || 228 memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) { 229 /* File contains stream attribute. Ignore it. */ 230 goto next_attr; 231 } 232 233 if (is_attr_sparsed(attr)) 234 ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE; 235 else 236 ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE; 237 238 if (is_attr_compressed(attr)) 239 ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED; 240 else 241 ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED; 242 243 if (is_attr_encrypted(attr)) 244 ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED; 245 else 246 ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED; 247 248 if (!attr->non_res) { 249 ni->i_valid = inode->i_size = rsize; 250 inode_set_bytes(inode, rsize); 251 } 252 253 mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv); 254 255 if (!attr->non_res) { 256 ni->ni_flags |= NI_FLAG_RESIDENT; 257 goto next_attr; 258 } 259 260 inode_set_bytes(inode, attr_ondisk_size(attr)); 261 262 ni->i_valid = le64_to_cpu(attr->nres.valid_size); 263 inode->i_size = le64_to_cpu(attr->nres.data_size); 264 if (!attr->nres.alloc_size) 265 goto next_attr; 266 267 run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run : 268 &ni->file.run; 269 break; 270 271 case ATTR_ROOT: 272 if (attr->non_res) 273 goto out; 274 275 root = Add2Ptr(attr, roff); 276 277 if (attr->name_len != ARRAY_SIZE(I30_NAME) || 278 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) 279 goto next_attr; 280 281 if (root->type != ATTR_NAME || 282 root->rule != NTFS_COLLATION_TYPE_FILENAME) 283 goto out; 284 285 if (!is_dir) 286 goto next_attr; 287 288 is_root = true; 289 ni->ni_flags |= NI_FLAG_DIR; 290 291 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30); 292 if (err) 293 goto out; 294 295 mode = sb->s_root ? 296 (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) : 297 (S_IFDIR | 0777); 298 goto next_attr; 299 300 case ATTR_ALLOC: 301 if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) || 302 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) 303 goto next_attr; 304 305 inode->i_size = le64_to_cpu(attr->nres.data_size); 306 ni->i_valid = le64_to_cpu(attr->nres.valid_size); 307 inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size)); 308 309 run = &ni->dir.alloc_run; 310 break; 311 312 case ATTR_BITMAP: 313 if (ino == MFT_REC_MFT) { 314 if (!attr->non_res) 315 goto out; 316 #ifndef CONFIG_NTFS3_64BIT_CLUSTER 317 /* 0x20000000 = 2^32 / 8 */ 318 if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000) 319 goto out; 320 #endif 321 run = &sbi->mft.bitmap.run; 322 break; 323 } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) && 324 !memcmp(attr_name(attr), I30_NAME, 325 sizeof(I30_NAME)) && 326 attr->non_res) { 327 run = &ni->dir.bitmap_run; 328 break; 329 } 330 goto next_attr; 331 332 case ATTR_REPARSE: 333 if (attr->name_len) 334 goto next_attr; 335 336 rp_fa = ni_parse_reparse(ni, attr, &rp); 337 switch (rp_fa) { 338 case REPARSE_LINK: 339 /* 340 * Normal symlink. 341 * Assume one unicode symbol == one utf8. 342 */ 343 inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer 344 .PrintNameLength) / 345 sizeof(u16); 346 347 ni->i_valid = inode->i_size; 348 349 /* Clear directory bit. */ 350 if (ni->ni_flags & NI_FLAG_DIR) { 351 indx_clear(&ni->dir); 352 memset(&ni->dir, 0, sizeof(ni->dir)); 353 ni->ni_flags &= ~NI_FLAG_DIR; 354 } else { 355 run_close(&ni->file.run); 356 } 357 mode = S_IFLNK | 0777; 358 is_dir = false; 359 if (attr->non_res) { 360 run = &ni->file.run; 361 goto attr_unpack_run; // Double break. 362 } 363 break; 364 365 case REPARSE_COMPRESSED: 366 break; 367 368 case REPARSE_DEDUPLICATED: 369 break; 370 } 371 goto next_attr; 372 373 case ATTR_EA_INFO: 374 if (!attr->name_len && 375 resident_data_ex(attr, sizeof(struct EA_INFO))) { 376 ni->ni_flags |= NI_FLAG_EA; 377 /* 378 * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode 379 */ 380 inode->i_mode = mode; 381 ntfs_get_wsl_perm(inode); 382 mode = inode->i_mode; 383 } 384 goto next_attr; 385 386 default: 387 goto next_attr; 388 } 389 390 attr_unpack_run: 391 roff = le16_to_cpu(attr->nres.run_off); 392 393 if (roff > asize) { 394 err = -EINVAL; 395 goto out; 396 } 397 398 t64 = le64_to_cpu(attr->nres.svcn); 399 400 err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn), 401 t64, Add2Ptr(attr, roff), asize - roff); 402 if (err < 0) 403 goto out; 404 err = 0; 405 goto next_attr; 406 407 end_enum: 408 409 if (!std5) 410 goto out; 411 412 if (!is_match && name) { 413 /* Reuse rec as buffer for ascii name. */ 414 err = -ENOENT; 415 goto out; 416 } 417 418 if (std5->fa & FILE_ATTRIBUTE_READONLY) 419 mode &= ~0222; 420 421 if (!names) { 422 err = -EINVAL; 423 goto out; 424 } 425 426 if (names != le16_to_cpu(rec->hard_links)) { 427 /* Correct minor error on the fly. Do not mark inode as dirty. */ 428 rec->hard_links = cpu_to_le16(names); 429 ni->mi.dirty = true; 430 } 431 432 set_nlink(inode, names); 433 434 if (S_ISDIR(mode)) { 435 ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY; 436 437 /* 438 * Dot and dot-dot should be included in count but was not 439 * included in enumeration. 440 * Usually a hard links to directories are disabled. 441 */ 442 inode->i_op = &ntfs_dir_inode_operations; 443 inode->i_fop = &ntfs_dir_operations; 444 ni->i_valid = 0; 445 } else if (S_ISLNK(mode)) { 446 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY; 447 inode->i_op = &ntfs_link_inode_operations; 448 inode->i_fop = NULL; 449 inode_nohighmem(inode); 450 } else if (S_ISREG(mode)) { 451 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY; 452 inode->i_op = &ntfs_file_inode_operations; 453 inode->i_fop = &ntfs_file_operations; 454 inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr : 455 &ntfs_aops; 456 if (ino != MFT_REC_MFT) 457 init_rwsem(&ni->file.run_lock); 458 } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) || 459 S_ISSOCK(mode)) { 460 inode->i_op = &ntfs_special_inode_operations; 461 init_special_inode(inode, mode, inode->i_rdev); 462 } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) && 463 fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) { 464 /* Records in $Extend are not a files or general directories. */ 465 inode->i_op = &ntfs_file_inode_operations; 466 } else { 467 err = -EINVAL; 468 goto out; 469 } 470 471 if ((sbi->options->sys_immutable && 472 (std5->fa & FILE_ATTRIBUTE_SYSTEM)) && 473 !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) { 474 inode->i_flags |= S_IMMUTABLE; 475 } else { 476 inode->i_flags &= ~S_IMMUTABLE; 477 } 478 479 inode->i_mode = mode; 480 if (!(ni->ni_flags & NI_FLAG_EA)) { 481 /* If no xattr then no security (stored in xattr). */ 482 inode->i_flags |= S_NOSEC; 483 } 484 485 if (ino == MFT_REC_MFT && !sb->s_root) 486 sbi->mft.ni = NULL; 487 488 unlock_new_inode(inode); 489 490 return inode; 491 492 out: 493 if (ino == MFT_REC_MFT && !sb->s_root) 494 sbi->mft.ni = NULL; 495 496 iget_failed(inode); 497 return ERR_PTR(err); 498 } 499 500 /* 501 * ntfs_test_inode 502 * 503 * Return: 1 if match. 504 */ 505 static int ntfs_test_inode(struct inode *inode, void *data) 506 { 507 struct MFT_REF *ref = data; 508 509 return ino_get(ref) == inode->i_ino; 510 } 511 512 static int ntfs_set_inode(struct inode *inode, void *data) 513 { 514 const struct MFT_REF *ref = data; 515 516 inode->i_ino = ino_get(ref); 517 return 0; 518 } 519 520 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref, 521 const struct cpu_str *name) 522 { 523 struct inode *inode; 524 525 inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode, 526 (void *)ref); 527 if (unlikely(!inode)) 528 return ERR_PTR(-ENOMEM); 529 530 /* If this is a freshly allocated inode, need to read it now. */ 531 if (inode->i_state & I_NEW) 532 inode = ntfs_read_mft(inode, name, ref); 533 else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) { 534 /* Inode overlaps? */ 535 _ntfs_bad_inode(inode); 536 } 537 538 if (IS_ERR(inode) && name) 539 ntfs_set_state(sb->s_fs_info, NTFS_DIRTY_ERROR); 540 541 return inode; 542 } 543 544 enum get_block_ctx { 545 GET_BLOCK_GENERAL = 0, 546 GET_BLOCK_WRITE_BEGIN = 1, 547 GET_BLOCK_DIRECT_IO_R = 2, 548 GET_BLOCK_DIRECT_IO_W = 3, 549 GET_BLOCK_BMAP = 4, 550 }; 551 552 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo, 553 struct buffer_head *bh, int create, 554 enum get_block_ctx ctx) 555 { 556 struct super_block *sb = inode->i_sb; 557 struct ntfs_sb_info *sbi = sb->s_fs_info; 558 struct ntfs_inode *ni = ntfs_i(inode); 559 struct folio *folio = bh->b_folio; 560 u8 cluster_bits = sbi->cluster_bits; 561 u32 block_size = sb->s_blocksize; 562 u64 bytes, lbo, valid; 563 u32 off; 564 int err; 565 CLST vcn, lcn, len; 566 bool new; 567 568 /* Clear previous state. */ 569 clear_buffer_new(bh); 570 clear_buffer_uptodate(bh); 571 572 if (is_resident(ni)) { 573 ni_lock(ni); 574 err = attr_data_read_resident(ni, &folio->page); 575 ni_unlock(ni); 576 577 if (!err) 578 set_buffer_uptodate(bh); 579 bh->b_size = block_size; 580 return err; 581 } 582 583 vcn = vbo >> cluster_bits; 584 off = vbo & sbi->cluster_mask; 585 new = false; 586 587 err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL, 588 create && sbi->cluster_size > PAGE_SIZE); 589 if (err) 590 goto out; 591 592 if (!len) 593 return 0; 594 595 bytes = ((u64)len << cluster_bits) - off; 596 597 if (lcn == SPARSE_LCN) { 598 if (!create) { 599 if (bh->b_size > bytes) 600 bh->b_size = bytes; 601 return 0; 602 } 603 WARN_ON(1); 604 } 605 606 if (new) 607 set_buffer_new(bh); 608 609 lbo = ((u64)lcn << cluster_bits) + off; 610 611 set_buffer_mapped(bh); 612 bh->b_bdev = sb->s_bdev; 613 bh->b_blocknr = lbo >> sb->s_blocksize_bits; 614 615 valid = ni->i_valid; 616 617 if (ctx == GET_BLOCK_DIRECT_IO_W) { 618 /* ntfs_direct_IO will update ni->i_valid. */ 619 if (vbo >= valid) 620 set_buffer_new(bh); 621 } else if (create) { 622 /* Normal write. */ 623 if (bytes > bh->b_size) 624 bytes = bh->b_size; 625 626 if (vbo >= valid) 627 set_buffer_new(bh); 628 629 if (vbo + bytes > valid) { 630 ni->i_valid = vbo + bytes; 631 mark_inode_dirty(inode); 632 } 633 } else if (vbo >= valid) { 634 /* Read out of valid data. */ 635 clear_buffer_mapped(bh); 636 } else if (vbo + bytes <= valid) { 637 /* Normal read. */ 638 } else if (vbo + block_size <= valid) { 639 /* Normal short read. */ 640 bytes = block_size; 641 } else { 642 /* 643 * Read across valid size: vbo < valid && valid < vbo + block_size 644 */ 645 bytes = block_size; 646 647 if (folio) { 648 u32 voff = valid - vbo; 649 650 bh->b_size = block_size; 651 off = vbo & (PAGE_SIZE - 1); 652 folio_set_bh(bh, folio, off); 653 654 err = bh_read(bh, 0); 655 if (err < 0) 656 goto out; 657 folio_zero_segment(folio, off + voff, off + block_size); 658 } 659 } 660 661 if (bh->b_size > bytes) 662 bh->b_size = bytes; 663 664 #ifndef __LP64__ 665 if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) { 666 static_assert(sizeof(size_t) < sizeof(loff_t)); 667 if (bytes > 0x40000000u) 668 bh->b_size = 0x40000000u; 669 } 670 #endif 671 672 return 0; 673 674 out: 675 return err; 676 } 677 678 int ntfs_get_block(struct inode *inode, sector_t vbn, 679 struct buffer_head *bh_result, int create) 680 { 681 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits, 682 bh_result, create, GET_BLOCK_GENERAL); 683 } 684 685 static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn, 686 struct buffer_head *bh_result, int create) 687 { 688 return ntfs_get_block_vbo(inode, 689 (u64)vsn << inode->i_sb->s_blocksize_bits, 690 bh_result, create, GET_BLOCK_BMAP); 691 } 692 693 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block) 694 { 695 return generic_block_bmap(mapping, block, ntfs_get_block_bmap); 696 } 697 698 static int ntfs_read_folio(struct file *file, struct folio *folio) 699 { 700 struct page *page = &folio->page; 701 int err; 702 struct address_space *mapping = page->mapping; 703 struct inode *inode = mapping->host; 704 struct ntfs_inode *ni = ntfs_i(inode); 705 706 if (is_resident(ni)) { 707 ni_lock(ni); 708 err = attr_data_read_resident(ni, page); 709 ni_unlock(ni); 710 if (err != E_NTFS_NONRESIDENT) { 711 unlock_page(page); 712 return err; 713 } 714 } 715 716 if (is_compressed(ni)) { 717 ni_lock(ni); 718 err = ni_readpage_cmpr(ni, page); 719 ni_unlock(ni); 720 return err; 721 } 722 723 /* Normal + sparse files. */ 724 return mpage_read_folio(folio, ntfs_get_block); 725 } 726 727 static void ntfs_readahead(struct readahead_control *rac) 728 { 729 struct address_space *mapping = rac->mapping; 730 struct inode *inode = mapping->host; 731 struct ntfs_inode *ni = ntfs_i(inode); 732 u64 valid; 733 loff_t pos; 734 735 if (is_resident(ni)) { 736 /* No readahead for resident. */ 737 return; 738 } 739 740 if (is_compressed(ni)) { 741 /* No readahead for compressed. */ 742 return; 743 } 744 745 valid = ni->i_valid; 746 pos = readahead_pos(rac); 747 748 if (valid < i_size_read(inode) && pos <= valid && 749 valid < pos + readahead_length(rac)) { 750 /* Range cross 'valid'. Read it page by page. */ 751 return; 752 } 753 754 mpage_readahead(rac, ntfs_get_block); 755 } 756 757 static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock, 758 struct buffer_head *bh_result, int create) 759 { 760 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits, 761 bh_result, create, GET_BLOCK_DIRECT_IO_R); 762 } 763 764 static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock, 765 struct buffer_head *bh_result, int create) 766 { 767 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits, 768 bh_result, create, GET_BLOCK_DIRECT_IO_W); 769 } 770 771 static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 772 { 773 struct file *file = iocb->ki_filp; 774 struct address_space *mapping = file->f_mapping; 775 struct inode *inode = mapping->host; 776 struct ntfs_inode *ni = ntfs_i(inode); 777 loff_t vbo = iocb->ki_pos; 778 loff_t end; 779 int wr = iov_iter_rw(iter) & WRITE; 780 size_t iter_count = iov_iter_count(iter); 781 loff_t valid; 782 ssize_t ret; 783 784 if (is_resident(ni)) { 785 /* Switch to buffered write. */ 786 ret = 0; 787 goto out; 788 } 789 790 ret = blockdev_direct_IO(iocb, inode, iter, 791 wr ? ntfs_get_block_direct_IO_W : 792 ntfs_get_block_direct_IO_R); 793 794 if (ret > 0) 795 end = vbo + ret; 796 else if (wr && ret == -EIOCBQUEUED) 797 end = vbo + iter_count; 798 else 799 goto out; 800 801 valid = ni->i_valid; 802 if (wr) { 803 if (end > valid && !S_ISBLK(inode->i_mode)) { 804 ni->i_valid = end; 805 mark_inode_dirty(inode); 806 } 807 } else if (vbo < valid && valid < end) { 808 /* Fix page. */ 809 iov_iter_revert(iter, end - valid); 810 iov_iter_zero(end - valid, iter); 811 } 812 813 out: 814 return ret; 815 } 816 817 int ntfs_set_size(struct inode *inode, u64 new_size) 818 { 819 struct super_block *sb = inode->i_sb; 820 struct ntfs_sb_info *sbi = sb->s_fs_info; 821 struct ntfs_inode *ni = ntfs_i(inode); 822 int err; 823 824 /* Check for maximum file size. */ 825 if (is_sparsed(ni) || is_compressed(ni)) { 826 if (new_size > sbi->maxbytes_sparse) { 827 err = -EFBIG; 828 goto out; 829 } 830 } else if (new_size > sbi->maxbytes) { 831 err = -EFBIG; 832 goto out; 833 } 834 835 ni_lock(ni); 836 down_write(&ni->file.run_lock); 837 838 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, 839 &ni->i_valid, true, NULL); 840 841 up_write(&ni->file.run_lock); 842 ni_unlock(ni); 843 844 mark_inode_dirty(inode); 845 846 out: 847 return err; 848 } 849 850 static int ntfs_resident_writepage(struct folio *folio, 851 struct writeback_control *wbc, void *data) 852 { 853 struct address_space *mapping = data; 854 struct ntfs_inode *ni = ntfs_i(mapping->host); 855 int ret; 856 857 ni_lock(ni); 858 ret = attr_data_write_resident(ni, &folio->page); 859 ni_unlock(ni); 860 861 if (ret != E_NTFS_NONRESIDENT) 862 folio_unlock(folio); 863 mapping_set_error(mapping, ret); 864 return ret; 865 } 866 867 static int ntfs_writepages(struct address_space *mapping, 868 struct writeback_control *wbc) 869 { 870 if (is_resident(ntfs_i(mapping->host))) 871 return write_cache_pages(mapping, wbc, ntfs_resident_writepage, 872 mapping); 873 return mpage_writepages(mapping, wbc, ntfs_get_block); 874 } 875 876 static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn, 877 struct buffer_head *bh_result, int create) 878 { 879 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits, 880 bh_result, create, GET_BLOCK_WRITE_BEGIN); 881 } 882 883 int ntfs_write_begin(struct file *file, struct address_space *mapping, 884 loff_t pos, u32 len, struct page **pagep, void **fsdata) 885 { 886 int err; 887 struct inode *inode = mapping->host; 888 struct ntfs_inode *ni = ntfs_i(inode); 889 890 *pagep = NULL; 891 if (is_resident(ni)) { 892 struct page *page = 893 grab_cache_page_write_begin(mapping, pos >> PAGE_SHIFT); 894 895 if (!page) { 896 err = -ENOMEM; 897 goto out; 898 } 899 900 ni_lock(ni); 901 err = attr_data_read_resident(ni, page); 902 ni_unlock(ni); 903 904 if (!err) { 905 *pagep = page; 906 goto out; 907 } 908 unlock_page(page); 909 put_page(page); 910 911 if (err != E_NTFS_NONRESIDENT) 912 goto out; 913 } 914 915 err = block_write_begin(mapping, pos, len, pagep, 916 ntfs_get_block_write_begin); 917 918 out: 919 return err; 920 } 921 922 /* 923 * ntfs_write_end - Address_space_operations::write_end. 924 */ 925 int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, 926 u32 len, u32 copied, struct page *page, void *fsdata) 927 { 928 struct inode *inode = mapping->host; 929 struct ntfs_inode *ni = ntfs_i(inode); 930 u64 valid = ni->i_valid; 931 bool dirty = false; 932 int err; 933 934 if (is_resident(ni)) { 935 ni_lock(ni); 936 err = attr_data_write_resident(ni, page); 937 ni_unlock(ni); 938 if (!err) { 939 dirty = true; 940 /* Clear any buffers in page. */ 941 if (page_has_buffers(page)) { 942 struct buffer_head *head, *bh; 943 944 bh = head = page_buffers(page); 945 do { 946 clear_buffer_dirty(bh); 947 clear_buffer_mapped(bh); 948 set_buffer_uptodate(bh); 949 } while (head != (bh = bh->b_this_page)); 950 } 951 SetPageUptodate(page); 952 err = copied; 953 } 954 unlock_page(page); 955 put_page(page); 956 } else { 957 err = generic_write_end(file, mapping, pos, len, copied, page, 958 fsdata); 959 } 960 961 if (err >= 0) { 962 if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) { 963 inode->i_mtime = inode_set_ctime_current(inode); 964 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; 965 dirty = true; 966 } 967 968 if (valid != ni->i_valid) { 969 /* ni->i_valid is changed in ntfs_get_block_vbo. */ 970 dirty = true; 971 } 972 973 if (pos + err > inode->i_size) { 974 inode->i_size = pos + err; 975 dirty = true; 976 } 977 978 if (dirty) 979 mark_inode_dirty(inode); 980 } 981 982 return err; 983 } 984 985 int reset_log_file(struct inode *inode) 986 { 987 int err; 988 loff_t pos = 0; 989 u32 log_size = inode->i_size; 990 struct address_space *mapping = inode->i_mapping; 991 992 for (;;) { 993 u32 len; 994 void *kaddr; 995 struct page *page; 996 997 len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE; 998 999 err = block_write_begin(mapping, pos, len, &page, 1000 ntfs_get_block_write_begin); 1001 if (err) 1002 goto out; 1003 1004 kaddr = kmap_atomic(page); 1005 memset(kaddr, -1, len); 1006 kunmap_atomic(kaddr); 1007 flush_dcache_page(page); 1008 1009 err = block_write_end(NULL, mapping, pos, len, len, page, NULL); 1010 if (err < 0) 1011 goto out; 1012 pos += len; 1013 1014 if (pos >= log_size) 1015 break; 1016 balance_dirty_pages_ratelimited(mapping); 1017 } 1018 out: 1019 mark_inode_dirty_sync(inode); 1020 1021 return err; 1022 } 1023 1024 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc) 1025 { 1026 return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 1027 } 1028 1029 int ntfs_sync_inode(struct inode *inode) 1030 { 1031 return _ni_write_inode(inode, 1); 1032 } 1033 1034 /* 1035 * writeback_inode - Helper function for ntfs_flush_inodes(). 1036 * 1037 * This writes both the inode and the file data blocks, waiting 1038 * for in flight data blocks before the start of the call. It 1039 * does not wait for any io started during the call. 1040 */ 1041 static int writeback_inode(struct inode *inode) 1042 { 1043 int ret = sync_inode_metadata(inode, 0); 1044 1045 if (!ret) 1046 ret = filemap_fdatawrite(inode->i_mapping); 1047 return ret; 1048 } 1049 1050 /* 1051 * ntfs_flush_inodes 1052 * 1053 * Write data and metadata corresponding to i1 and i2. The io is 1054 * started but we do not wait for any of it to finish. 1055 * 1056 * filemap_flush() is used for the block device, so if there is a dirty 1057 * page for a block already in flight, we will not wait and start the 1058 * io over again. 1059 */ 1060 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1, 1061 struct inode *i2) 1062 { 1063 int ret = 0; 1064 1065 if (i1) 1066 ret = writeback_inode(i1); 1067 if (!ret && i2) 1068 ret = writeback_inode(i2); 1069 if (!ret) 1070 ret = sync_blockdev_nowait(sb->s_bdev); 1071 return ret; 1072 } 1073 1074 int inode_write_data(struct inode *inode, const void *data, size_t bytes) 1075 { 1076 pgoff_t idx; 1077 1078 /* Write non resident data. */ 1079 for (idx = 0; bytes; idx++) { 1080 size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes; 1081 struct page *page = ntfs_map_page(inode->i_mapping, idx); 1082 1083 if (IS_ERR(page)) 1084 return PTR_ERR(page); 1085 1086 lock_page(page); 1087 WARN_ON(!PageUptodate(page)); 1088 ClearPageUptodate(page); 1089 1090 memcpy(page_address(page), data, op); 1091 1092 flush_dcache_page(page); 1093 SetPageUptodate(page); 1094 unlock_page(page); 1095 1096 ntfs_unmap_page(page); 1097 1098 bytes -= op; 1099 data = Add2Ptr(data, PAGE_SIZE); 1100 } 1101 return 0; 1102 } 1103 1104 /* 1105 * ntfs_reparse_bytes 1106 * 1107 * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK) 1108 * for unicode string of @uni_len length. 1109 */ 1110 static inline u32 ntfs_reparse_bytes(u32 uni_len) 1111 { 1112 /* Header + unicode string + decorated unicode string. */ 1113 return sizeof(short) * (2 * uni_len + 4) + 1114 offsetof(struct REPARSE_DATA_BUFFER, 1115 SymbolicLinkReparseBuffer.PathBuffer); 1116 } 1117 1118 static struct REPARSE_DATA_BUFFER * 1119 ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, 1120 u32 size, u16 *nsize) 1121 { 1122 int i, err; 1123 struct REPARSE_DATA_BUFFER *rp; 1124 __le16 *rp_name; 1125 typeof(rp->SymbolicLinkReparseBuffer) *rs; 1126 1127 rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS); 1128 if (!rp) 1129 return ERR_PTR(-ENOMEM); 1130 1131 rs = &rp->SymbolicLinkReparseBuffer; 1132 rp_name = rs->PathBuffer; 1133 1134 /* Convert link name to UTF-16. */ 1135 err = ntfs_nls_to_utf16(sbi, symname, size, 1136 (struct cpu_str *)(rp_name - 1), 2 * size, 1137 UTF16_LITTLE_ENDIAN); 1138 if (err < 0) 1139 goto out; 1140 1141 /* err = the length of unicode name of symlink. */ 1142 *nsize = ntfs_reparse_bytes(err); 1143 1144 if (*nsize > sbi->reparse.max_size) { 1145 err = -EFBIG; 1146 goto out; 1147 } 1148 1149 /* Translate Linux '/' into Windows '\'. */ 1150 for (i = 0; i < err; i++) { 1151 if (rp_name[i] == cpu_to_le16('/')) 1152 rp_name[i] = cpu_to_le16('\\'); 1153 } 1154 1155 rp->ReparseTag = IO_REPARSE_TAG_SYMLINK; 1156 rp->ReparseDataLength = 1157 cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER, 1158 SymbolicLinkReparseBuffer)); 1159 1160 /* PrintName + SubstituteName. */ 1161 rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err); 1162 rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8); 1163 rs->PrintNameLength = rs->SubstituteNameOffset; 1164 1165 /* 1166 * TODO: Use relative path if possible to allow Windows to 1167 * parse this path. 1168 * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE). 1169 */ 1170 rs->Flags = 0; 1171 1172 memmove(rp_name + err + 4, rp_name, sizeof(short) * err); 1173 1174 /* Decorate SubstituteName. */ 1175 rp_name += err; 1176 rp_name[0] = cpu_to_le16('\\'); 1177 rp_name[1] = cpu_to_le16('?'); 1178 rp_name[2] = cpu_to_le16('?'); 1179 rp_name[3] = cpu_to_le16('\\'); 1180 1181 return rp; 1182 out: 1183 kfree(rp); 1184 return ERR_PTR(err); 1185 } 1186 1187 /* 1188 * ntfs_create_inode 1189 * 1190 * Helper function for: 1191 * - ntfs_create 1192 * - ntfs_mknod 1193 * - ntfs_symlink 1194 * - ntfs_mkdir 1195 * - ntfs_atomic_open 1196 * 1197 * NOTE: if fnd != NULL (ntfs_atomic_open) then @dir is locked 1198 */ 1199 struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir, 1200 struct dentry *dentry, 1201 const struct cpu_str *uni, umode_t mode, 1202 dev_t dev, const char *symname, u32 size, 1203 struct ntfs_fnd *fnd) 1204 { 1205 int err; 1206 struct super_block *sb = dir->i_sb; 1207 struct ntfs_sb_info *sbi = sb->s_fs_info; 1208 const struct qstr *name = &dentry->d_name; 1209 CLST ino = 0; 1210 struct ntfs_inode *dir_ni = ntfs_i(dir); 1211 struct ntfs_inode *ni = NULL; 1212 struct inode *inode = NULL; 1213 struct ATTRIB *attr; 1214 struct ATTR_STD_INFO5 *std5; 1215 struct ATTR_FILE_NAME *fname; 1216 struct MFT_REC *rec; 1217 u32 asize, dsize, sd_size; 1218 enum FILE_ATTRIBUTE fa; 1219 __le32 security_id = SECURITY_ID_INVALID; 1220 CLST vcn; 1221 const void *sd; 1222 u16 t16, nsize = 0, aid = 0; 1223 struct INDEX_ROOT *root, *dir_root; 1224 struct NTFS_DE *e, *new_de = NULL; 1225 struct REPARSE_DATA_BUFFER *rp = NULL; 1226 bool rp_inserted = false; 1227 1228 if (!fnd) 1229 ni_lock_dir(dir_ni); 1230 1231 dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL); 1232 if (!dir_root) { 1233 err = -EINVAL; 1234 goto out1; 1235 } 1236 1237 if (S_ISDIR(mode)) { 1238 /* Use parent's directory attributes. */ 1239 fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY | 1240 FILE_ATTRIBUTE_ARCHIVE; 1241 /* 1242 * By default child directory inherits parent attributes. 1243 * Root directory is hidden + system. 1244 * Make an exception for children in root. 1245 */ 1246 if (dir->i_ino == MFT_REC_ROOT) 1247 fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM); 1248 } else if (S_ISLNK(mode)) { 1249 /* It is good idea that link should be the same type (file/dir) as target */ 1250 fa = FILE_ATTRIBUTE_REPARSE_POINT; 1251 1252 /* 1253 * Linux: there are dir/file/symlink and so on. 1254 * NTFS: symlinks are "dir + reparse" or "file + reparse" 1255 * It is good idea to create: 1256 * dir + reparse if 'symname' points to directory 1257 * or 1258 * file + reparse if 'symname' points to file 1259 * Unfortunately kern_path hangs if symname contains 'dir'. 1260 */ 1261 1262 /* 1263 * struct path path; 1264 * 1265 * if (!kern_path(symname, LOOKUP_FOLLOW, &path)){ 1266 * struct inode *target = d_inode(path.dentry); 1267 * 1268 * if (S_ISDIR(target->i_mode)) 1269 * fa |= FILE_ATTRIBUTE_DIRECTORY; 1270 * // if ( target->i_sb == sb ){ 1271 * // use relative path? 1272 * // } 1273 * path_put(&path); 1274 * } 1275 */ 1276 } else if (S_ISREG(mode)) { 1277 if (sbi->options->sparse) { 1278 /* Sparsed regular file, cause option 'sparse'. */ 1279 fa = FILE_ATTRIBUTE_SPARSE_FILE | 1280 FILE_ATTRIBUTE_ARCHIVE; 1281 } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) { 1282 /* Compressed regular file, if parent is compressed. */ 1283 fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE; 1284 } else { 1285 /* Regular file, default attributes. */ 1286 fa = FILE_ATTRIBUTE_ARCHIVE; 1287 } 1288 } else { 1289 fa = FILE_ATTRIBUTE_ARCHIVE; 1290 } 1291 1292 /* If option "hide_dot_files" then set hidden attribute for dot files. */ 1293 if (sbi->options->hide_dot_files && name->name[0] == '.') 1294 fa |= FILE_ATTRIBUTE_HIDDEN; 1295 1296 if (!(mode & 0222)) 1297 fa |= FILE_ATTRIBUTE_READONLY; 1298 1299 /* Allocate PATH_MAX bytes. */ 1300 new_de = __getname(); 1301 if (!new_de) { 1302 err = -ENOMEM; 1303 goto out1; 1304 } 1305 1306 /* Mark rw ntfs as dirty. it will be cleared at umount. */ 1307 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 1308 1309 /* Step 1: allocate and fill new mft record. */ 1310 err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL); 1311 if (err) 1312 goto out2; 1313 1314 ni = ntfs_new_inode(sbi, ino, S_ISDIR(mode) ? RECORD_FLAG_DIR : 0); 1315 if (IS_ERR(ni)) { 1316 err = PTR_ERR(ni); 1317 ni = NULL; 1318 goto out3; 1319 } 1320 inode = &ni->vfs_inode; 1321 inode_init_owner(idmap, inode, dir, mode); 1322 mode = inode->i_mode; 1323 1324 ni->i_crtime = current_time(inode); 1325 1326 rec = ni->mi.mrec; 1327 rec->hard_links = cpu_to_le16(1); 1328 attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off)); 1329 1330 /* Get default security id. */ 1331 sd = s_default_security; 1332 sd_size = sizeof(s_default_security); 1333 1334 if (is_ntfs3(sbi)) { 1335 security_id = dir_ni->std_security_id; 1336 if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) { 1337 security_id = sbi->security.def_security_id; 1338 1339 if (security_id == SECURITY_ID_INVALID && 1340 !ntfs_insert_security(sbi, sd, sd_size, 1341 &security_id, NULL)) 1342 sbi->security.def_security_id = security_id; 1343 } 1344 } 1345 1346 /* Insert standard info. */ 1347 std5 = Add2Ptr(attr, SIZEOF_RESIDENT); 1348 1349 if (security_id == SECURITY_ID_INVALID) { 1350 dsize = sizeof(struct ATTR_STD_INFO); 1351 } else { 1352 dsize = sizeof(struct ATTR_STD_INFO5); 1353 std5->security_id = security_id; 1354 ni->std_security_id = security_id; 1355 } 1356 asize = SIZEOF_RESIDENT + dsize; 1357 1358 attr->type = ATTR_STD; 1359 attr->size = cpu_to_le32(asize); 1360 attr->id = cpu_to_le16(aid++); 1361 attr->res.data_off = SIZEOF_RESIDENT_LE; 1362 attr->res.data_size = cpu_to_le32(dsize); 1363 1364 std5->cr_time = std5->m_time = std5->c_time = std5->a_time = 1365 kernel2nt(&ni->i_crtime); 1366 1367 std5->fa = ni->std_fa = fa; 1368 1369 attr = Add2Ptr(attr, asize); 1370 1371 /* Insert file name. */ 1372 err = fill_name_de(sbi, new_de, name, uni); 1373 if (err) 1374 goto out4; 1375 1376 mi_get_ref(&ni->mi, &new_de->ref); 1377 1378 fname = (struct ATTR_FILE_NAME *)(new_de + 1); 1379 1380 if (sbi->options->windows_names && 1381 !valid_windows_name(sbi, (struct le_str *)&fname->name_len)) { 1382 err = -EINVAL; 1383 goto out4; 1384 } 1385 1386 mi_get_ref(&dir_ni->mi, &fname->home); 1387 fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time = 1388 fname->dup.a_time = std5->cr_time; 1389 fname->dup.alloc_size = fname->dup.data_size = 0; 1390 fname->dup.fa = std5->fa; 1391 fname->dup.ea_size = fname->dup.reparse = 0; 1392 1393 dsize = le16_to_cpu(new_de->key_size); 1394 asize = ALIGN(SIZEOF_RESIDENT + dsize, 8); 1395 1396 attr->type = ATTR_NAME; 1397 attr->size = cpu_to_le32(asize); 1398 attr->res.data_off = SIZEOF_RESIDENT_LE; 1399 attr->res.flags = RESIDENT_FLAG_INDEXED; 1400 attr->id = cpu_to_le16(aid++); 1401 attr->res.data_size = cpu_to_le32(dsize); 1402 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize); 1403 1404 attr = Add2Ptr(attr, asize); 1405 1406 if (security_id == SECURITY_ID_INVALID) { 1407 /* Insert security attribute. */ 1408 asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8); 1409 1410 attr->type = ATTR_SECURE; 1411 attr->size = cpu_to_le32(asize); 1412 attr->id = cpu_to_le16(aid++); 1413 attr->res.data_off = SIZEOF_RESIDENT_LE; 1414 attr->res.data_size = cpu_to_le32(sd_size); 1415 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size); 1416 1417 attr = Add2Ptr(attr, asize); 1418 } 1419 1420 attr->id = cpu_to_le16(aid++); 1421 if (fa & FILE_ATTRIBUTE_DIRECTORY) { 1422 /* 1423 * Regular directory or symlink to directory. 1424 * Create root attribute. 1425 */ 1426 dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE); 1427 asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize; 1428 1429 attr->type = ATTR_ROOT; 1430 attr->size = cpu_to_le32(asize); 1431 1432 attr->name_len = ARRAY_SIZE(I30_NAME); 1433 attr->name_off = SIZEOF_RESIDENT_LE; 1434 attr->res.data_off = 1435 cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT); 1436 attr->res.data_size = cpu_to_le32(dsize); 1437 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME, 1438 sizeof(I30_NAME)); 1439 1440 root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT); 1441 memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr)); 1442 root->ihdr.de_off = cpu_to_le32(sizeof(struct INDEX_HDR)); 1443 root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) + 1444 sizeof(struct NTFS_DE)); 1445 root->ihdr.total = root->ihdr.used; 1446 1447 e = Add2Ptr(root, sizeof(struct INDEX_ROOT)); 1448 e->size = cpu_to_le16(sizeof(struct NTFS_DE)); 1449 e->flags = NTFS_IE_LAST; 1450 } else if (S_ISLNK(mode)) { 1451 /* 1452 * Symlink to file. 1453 * Create empty resident data attribute. 1454 */ 1455 asize = SIZEOF_RESIDENT; 1456 1457 /* Insert empty ATTR_DATA */ 1458 attr->type = ATTR_DATA; 1459 attr->size = cpu_to_le32(SIZEOF_RESIDENT); 1460 attr->name_off = SIZEOF_RESIDENT_LE; 1461 attr->res.data_off = SIZEOF_RESIDENT_LE; 1462 } else if (S_ISREG(mode)) { 1463 /* 1464 * Regular file. Create empty non resident data attribute. 1465 */ 1466 attr->type = ATTR_DATA; 1467 attr->non_res = 1; 1468 attr->nres.evcn = cpu_to_le64(-1ll); 1469 if (fa & FILE_ATTRIBUTE_SPARSE_FILE) { 1470 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8); 1471 attr->name_off = SIZEOF_NONRESIDENT_EX_LE; 1472 attr->flags = ATTR_FLAG_SPARSED; 1473 asize = SIZEOF_NONRESIDENT_EX + 8; 1474 } else if (fa & FILE_ATTRIBUTE_COMPRESSED) { 1475 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8); 1476 attr->name_off = SIZEOF_NONRESIDENT_EX_LE; 1477 attr->flags = ATTR_FLAG_COMPRESSED; 1478 attr->nres.c_unit = COMPRESSION_UNIT; 1479 asize = SIZEOF_NONRESIDENT_EX + 8; 1480 } else { 1481 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8); 1482 attr->name_off = SIZEOF_NONRESIDENT_LE; 1483 asize = SIZEOF_NONRESIDENT + 8; 1484 } 1485 attr->nres.run_off = attr->name_off; 1486 } else { 1487 /* 1488 * Node. Create empty resident data attribute. 1489 */ 1490 attr->type = ATTR_DATA; 1491 attr->size = cpu_to_le32(SIZEOF_RESIDENT); 1492 attr->name_off = SIZEOF_RESIDENT_LE; 1493 if (fa & FILE_ATTRIBUTE_SPARSE_FILE) 1494 attr->flags = ATTR_FLAG_SPARSED; 1495 else if (fa & FILE_ATTRIBUTE_COMPRESSED) 1496 attr->flags = ATTR_FLAG_COMPRESSED; 1497 attr->res.data_off = SIZEOF_RESIDENT_LE; 1498 asize = SIZEOF_RESIDENT; 1499 ni->ni_flags |= NI_FLAG_RESIDENT; 1500 } 1501 1502 if (S_ISDIR(mode)) { 1503 ni->ni_flags |= NI_FLAG_DIR; 1504 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30); 1505 if (err) 1506 goto out4; 1507 } else if (S_ISLNK(mode)) { 1508 rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize); 1509 1510 if (IS_ERR(rp)) { 1511 err = PTR_ERR(rp); 1512 rp = NULL; 1513 goto out4; 1514 } 1515 1516 /* 1517 * Insert ATTR_REPARSE. 1518 */ 1519 attr = Add2Ptr(attr, asize); 1520 attr->type = ATTR_REPARSE; 1521 attr->id = cpu_to_le16(aid++); 1522 1523 /* Resident or non resident? */ 1524 asize = ALIGN(SIZEOF_RESIDENT + nsize, 8); 1525 t16 = PtrOffset(rec, attr); 1526 1527 /* 1528 * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes. 1529 * It is good idea to keep extened attributes resident. 1530 */ 1531 if (asize + t16 + 0x78 + 8 > sbi->record_size) { 1532 CLST alen; 1533 CLST clst = bytes_to_cluster(sbi, nsize); 1534 1535 /* Bytes per runs. */ 1536 t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT; 1537 1538 attr->non_res = 1; 1539 attr->nres.evcn = cpu_to_le64(clst - 1); 1540 attr->name_off = SIZEOF_NONRESIDENT_LE; 1541 attr->nres.run_off = attr->name_off; 1542 attr->nres.data_size = cpu_to_le64(nsize); 1543 attr->nres.valid_size = attr->nres.data_size; 1544 attr->nres.alloc_size = 1545 cpu_to_le64(ntfs_up_cluster(sbi, nsize)); 1546 1547 err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0, 1548 clst, NULL, ALLOCATE_DEF, 1549 &alen, 0, NULL, NULL); 1550 if (err) 1551 goto out5; 1552 1553 err = run_pack(&ni->file.run, 0, clst, 1554 Add2Ptr(attr, SIZEOF_NONRESIDENT), t16, 1555 &vcn); 1556 if (err < 0) 1557 goto out5; 1558 1559 if (vcn != clst) { 1560 err = -EINVAL; 1561 goto out5; 1562 } 1563 1564 asize = SIZEOF_NONRESIDENT + ALIGN(err, 8); 1565 /* Write non resident data. */ 1566 err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, 1567 nsize, 0); 1568 if (err) 1569 goto out5; 1570 } else { 1571 attr->res.data_off = SIZEOF_RESIDENT_LE; 1572 attr->res.data_size = cpu_to_le32(nsize); 1573 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize); 1574 } 1575 /* Size of symlink equals the length of input string. */ 1576 inode->i_size = size; 1577 1578 attr->size = cpu_to_le32(asize); 1579 1580 err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK, 1581 &new_de->ref); 1582 if (err) 1583 goto out5; 1584 1585 rp_inserted = true; 1586 } 1587 1588 attr = Add2Ptr(attr, asize); 1589 attr->type = ATTR_END; 1590 1591 rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8); 1592 rec->next_attr_id = cpu_to_le16(aid); 1593 1594 inode->i_generation = le16_to_cpu(rec->seq); 1595 1596 if (S_ISDIR(mode)) { 1597 inode->i_op = &ntfs_dir_inode_operations; 1598 inode->i_fop = &ntfs_dir_operations; 1599 } else if (S_ISLNK(mode)) { 1600 inode->i_op = &ntfs_link_inode_operations; 1601 inode->i_fop = NULL; 1602 inode->i_mapping->a_ops = &ntfs_aops; 1603 inode->i_size = size; 1604 inode_nohighmem(inode); 1605 } else if (S_ISREG(mode)) { 1606 inode->i_op = &ntfs_file_inode_operations; 1607 inode->i_fop = &ntfs_file_operations; 1608 inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr : 1609 &ntfs_aops; 1610 init_rwsem(&ni->file.run_lock); 1611 } else { 1612 inode->i_op = &ntfs_special_inode_operations; 1613 init_special_inode(inode, mode, dev); 1614 } 1615 1616 #ifdef CONFIG_NTFS3_FS_POSIX_ACL 1617 if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) { 1618 err = ntfs_init_acl(idmap, inode, dir); 1619 if (err) 1620 goto out5; 1621 } else 1622 #endif 1623 { 1624 inode->i_flags |= S_NOSEC; 1625 } 1626 1627 /* 1628 * ntfs_init_acl and ntfs_save_wsl_perm update extended attribute. 1629 * The packed size of extended attribute is stored in direntry too. 1630 * 'fname' here points to inside new_de. 1631 */ 1632 ntfs_save_wsl_perm(inode, &fname->dup.ea_size); 1633 1634 /* 1635 * update ea_size in file_name attribute too. 1636 * Use ni_find_attr cause layout of MFT record may be changed 1637 * in ntfs_init_acl and ntfs_save_wsl_perm. 1638 */ 1639 attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL, NULL); 1640 if (attr) { 1641 struct ATTR_FILE_NAME *fn; 1642 1643 fn = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME); 1644 if (fn) 1645 fn->dup.ea_size = fname->dup.ea_size; 1646 } 1647 1648 /* We do not need to update parent directory later */ 1649 ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT; 1650 1651 /* Step 2: Add new name in index. */ 1652 err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0); 1653 if (err) 1654 goto out6; 1655 1656 /* 1657 * Call 'd_instantiate' after inode->i_op is set 1658 * but before finish_open. 1659 */ 1660 d_instantiate(dentry, inode); 1661 1662 /* Set original time. inode times (i_ctime) may be changed in ntfs_init_acl. */ 1663 inode->i_atime = inode->i_mtime = 1664 inode_set_ctime_to_ts(inode, ni->i_crtime); 1665 dir->i_mtime = inode_set_ctime_to_ts(dir, ni->i_crtime); 1666 1667 mark_inode_dirty(dir); 1668 mark_inode_dirty(inode); 1669 1670 /* Normal exit. */ 1671 goto out2; 1672 1673 out6: 1674 if (rp_inserted) 1675 ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref); 1676 1677 out5: 1678 if (!S_ISDIR(mode)) 1679 run_deallocate(sbi, &ni->file.run, false); 1680 1681 out4: 1682 clear_rec_inuse(rec); 1683 clear_nlink(inode); 1684 ni->mi.dirty = false; 1685 discard_new_inode(inode); 1686 out3: 1687 ntfs_mark_rec_free(sbi, ino, false); 1688 1689 out2: 1690 __putname(new_de); 1691 kfree(rp); 1692 1693 out1: 1694 if (!fnd) 1695 ni_unlock(dir_ni); 1696 1697 if (err) 1698 return ERR_PTR(err); 1699 1700 unlock_new_inode(inode); 1701 1702 return inode; 1703 } 1704 1705 int ntfs_link_inode(struct inode *inode, struct dentry *dentry) 1706 { 1707 int err; 1708 struct ntfs_inode *ni = ntfs_i(inode); 1709 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 1710 struct NTFS_DE *de; 1711 1712 /* Allocate PATH_MAX bytes. */ 1713 de = __getname(); 1714 if (!de) 1715 return -ENOMEM; 1716 1717 /* Mark rw ntfs as dirty. It will be cleared at umount. */ 1718 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); 1719 1720 /* Construct 'de'. */ 1721 err = fill_name_de(sbi, de, &dentry->d_name, NULL); 1722 if (err) 1723 goto out; 1724 1725 err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de); 1726 out: 1727 __putname(de); 1728 return err; 1729 } 1730 1731 /* 1732 * ntfs_unlink_inode 1733 * 1734 * inode_operations::unlink 1735 * inode_operations::rmdir 1736 */ 1737 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry) 1738 { 1739 int err; 1740 struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info; 1741 struct inode *inode = d_inode(dentry); 1742 struct ntfs_inode *ni = ntfs_i(inode); 1743 struct ntfs_inode *dir_ni = ntfs_i(dir); 1744 struct NTFS_DE *de, *de2 = NULL; 1745 int undo_remove; 1746 1747 if (ntfs_is_meta_file(sbi, ni->mi.rno)) 1748 return -EINVAL; 1749 1750 /* Allocate PATH_MAX bytes. */ 1751 de = __getname(); 1752 if (!de) 1753 return -ENOMEM; 1754 1755 ni_lock(ni); 1756 1757 if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) { 1758 err = -ENOTEMPTY; 1759 goto out; 1760 } 1761 1762 err = fill_name_de(sbi, de, &dentry->d_name, NULL); 1763 if (err < 0) 1764 goto out; 1765 1766 undo_remove = 0; 1767 err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove); 1768 1769 if (!err) { 1770 drop_nlink(inode); 1771 dir->i_mtime = inode_set_ctime_current(dir); 1772 mark_inode_dirty(dir); 1773 inode_set_ctime_to_ts(inode, inode_get_ctime(dir)); 1774 if (inode->i_nlink) 1775 mark_inode_dirty(inode); 1776 } else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) { 1777 _ntfs_bad_inode(inode); 1778 } else { 1779 if (ni_is_dirty(dir)) 1780 mark_inode_dirty(dir); 1781 if (ni_is_dirty(inode)) 1782 mark_inode_dirty(inode); 1783 } 1784 1785 out: 1786 ni_unlock(ni); 1787 __putname(de); 1788 return err; 1789 } 1790 1791 void ntfs_evict_inode(struct inode *inode) 1792 { 1793 truncate_inode_pages_final(&inode->i_data); 1794 1795 invalidate_inode_buffers(inode); 1796 clear_inode(inode); 1797 1798 ni_clear(ntfs_i(inode)); 1799 } 1800 1801 /* 1802 * ntfs_translate_junction 1803 * 1804 * Translate a Windows junction target to the Linux equivalent. 1805 * On junctions, targets are always absolute (they include the drive 1806 * letter). We have no way of knowing if the target is for the current 1807 * mounted device or not so we just assume it is. 1808 */ 1809 static int ntfs_translate_junction(const struct super_block *sb, 1810 const struct dentry *link_de, char *target, 1811 int target_len, int target_max) 1812 { 1813 int tl_len, err = target_len; 1814 char *link_path_buffer = NULL, *link_path; 1815 char *translated = NULL; 1816 char *target_start; 1817 int copy_len; 1818 1819 link_path_buffer = kmalloc(PATH_MAX, GFP_NOFS); 1820 if (!link_path_buffer) { 1821 err = -ENOMEM; 1822 goto out; 1823 } 1824 /* Get link path, relative to mount point */ 1825 link_path = dentry_path_raw(link_de, link_path_buffer, PATH_MAX); 1826 if (IS_ERR(link_path)) { 1827 ntfs_err(sb, "Error getting link path"); 1828 err = -EINVAL; 1829 goto out; 1830 } 1831 1832 translated = kmalloc(PATH_MAX, GFP_NOFS); 1833 if (!translated) { 1834 err = -ENOMEM; 1835 goto out; 1836 } 1837 1838 /* Make translated path a relative path to mount point */ 1839 strcpy(translated, "./"); 1840 ++link_path; /* Skip leading / */ 1841 for (tl_len = sizeof("./") - 1; *link_path; ++link_path) { 1842 if (*link_path == '/') { 1843 if (PATH_MAX - tl_len < sizeof("../")) { 1844 ntfs_err(sb, 1845 "Link path %s has too many components", 1846 link_path); 1847 err = -EINVAL; 1848 goto out; 1849 } 1850 strcpy(translated + tl_len, "../"); 1851 tl_len += sizeof("../") - 1; 1852 } 1853 } 1854 1855 /* Skip drive letter */ 1856 target_start = target; 1857 while (*target_start && *target_start != ':') 1858 ++target_start; 1859 1860 if (!*target_start) { 1861 ntfs_err(sb, "Link target (%s) missing drive separator", 1862 target); 1863 err = -EINVAL; 1864 goto out; 1865 } 1866 1867 /* Skip drive separator and leading /, if exists */ 1868 target_start += 1 + (target_start[1] == '/'); 1869 copy_len = target_len - (target_start - target); 1870 1871 if (PATH_MAX - tl_len <= copy_len) { 1872 ntfs_err(sb, "Link target %s too large for buffer (%d <= %d)", 1873 target_start, PATH_MAX - tl_len, copy_len); 1874 err = -EINVAL; 1875 goto out; 1876 } 1877 1878 /* translated path has a trailing / and target_start does not */ 1879 strcpy(translated + tl_len, target_start); 1880 tl_len += copy_len; 1881 if (target_max <= tl_len) { 1882 ntfs_err(sb, "Target path %s too large for buffer (%d <= %d)", 1883 translated, target_max, tl_len); 1884 err = -EINVAL; 1885 goto out; 1886 } 1887 strcpy(target, translated); 1888 err = tl_len; 1889 1890 out: 1891 kfree(link_path_buffer); 1892 kfree(translated); 1893 return err; 1894 } 1895 1896 static noinline int ntfs_readlink_hlp(const struct dentry *link_de, 1897 struct inode *inode, char *buffer, 1898 int buflen) 1899 { 1900 int i, err = -EINVAL; 1901 struct ntfs_inode *ni = ntfs_i(inode); 1902 struct super_block *sb = inode->i_sb; 1903 struct ntfs_sb_info *sbi = sb->s_fs_info; 1904 u64 size; 1905 u16 ulen = 0; 1906 void *to_free = NULL; 1907 struct REPARSE_DATA_BUFFER *rp; 1908 const __le16 *uname; 1909 struct ATTRIB *attr; 1910 1911 /* Reparse data present. Try to parse it. */ 1912 static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag)); 1913 static_assert(sizeof(u32) == sizeof(rp->ReparseTag)); 1914 1915 *buffer = 0; 1916 1917 attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL); 1918 if (!attr) 1919 goto out; 1920 1921 if (!attr->non_res) { 1922 rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER)); 1923 if (!rp) 1924 goto out; 1925 size = le32_to_cpu(attr->res.data_size); 1926 } else { 1927 size = le64_to_cpu(attr->nres.data_size); 1928 rp = NULL; 1929 } 1930 1931 if (size > sbi->reparse.max_size || size <= sizeof(u32)) 1932 goto out; 1933 1934 if (!rp) { 1935 rp = kmalloc(size, GFP_NOFS); 1936 if (!rp) { 1937 err = -ENOMEM; 1938 goto out; 1939 } 1940 to_free = rp; 1941 /* Read into temporal buffer. */ 1942 err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL); 1943 if (err) 1944 goto out; 1945 } 1946 1947 /* Microsoft Tag. */ 1948 switch (rp->ReparseTag) { 1949 case IO_REPARSE_TAG_MOUNT_POINT: 1950 /* Mount points and junctions. */ 1951 /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */ 1952 if (size <= offsetof(struct REPARSE_DATA_BUFFER, 1953 MountPointReparseBuffer.PathBuffer)) 1954 goto out; 1955 uname = Add2Ptr(rp, 1956 offsetof(struct REPARSE_DATA_BUFFER, 1957 MountPointReparseBuffer.PathBuffer) + 1958 le16_to_cpu(rp->MountPointReparseBuffer 1959 .PrintNameOffset)); 1960 ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength); 1961 break; 1962 1963 case IO_REPARSE_TAG_SYMLINK: 1964 /* FolderSymbolicLink */ 1965 /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */ 1966 if (size <= offsetof(struct REPARSE_DATA_BUFFER, 1967 SymbolicLinkReparseBuffer.PathBuffer)) 1968 goto out; 1969 uname = Add2Ptr( 1970 rp, offsetof(struct REPARSE_DATA_BUFFER, 1971 SymbolicLinkReparseBuffer.PathBuffer) + 1972 le16_to_cpu(rp->SymbolicLinkReparseBuffer 1973 .PrintNameOffset)); 1974 ulen = le16_to_cpu( 1975 rp->SymbolicLinkReparseBuffer.PrintNameLength); 1976 break; 1977 1978 case IO_REPARSE_TAG_CLOUD: 1979 case IO_REPARSE_TAG_CLOUD_1: 1980 case IO_REPARSE_TAG_CLOUD_2: 1981 case IO_REPARSE_TAG_CLOUD_3: 1982 case IO_REPARSE_TAG_CLOUD_4: 1983 case IO_REPARSE_TAG_CLOUD_5: 1984 case IO_REPARSE_TAG_CLOUD_6: 1985 case IO_REPARSE_TAG_CLOUD_7: 1986 case IO_REPARSE_TAG_CLOUD_8: 1987 case IO_REPARSE_TAG_CLOUD_9: 1988 case IO_REPARSE_TAG_CLOUD_A: 1989 case IO_REPARSE_TAG_CLOUD_B: 1990 case IO_REPARSE_TAG_CLOUD_C: 1991 case IO_REPARSE_TAG_CLOUD_D: 1992 case IO_REPARSE_TAG_CLOUD_E: 1993 case IO_REPARSE_TAG_CLOUD_F: 1994 err = sizeof("OneDrive") - 1; 1995 if (err > buflen) 1996 err = buflen; 1997 memcpy(buffer, "OneDrive", err); 1998 goto out; 1999 2000 default: 2001 if (IsReparseTagMicrosoft(rp->ReparseTag)) { 2002 /* Unknown Microsoft Tag. */ 2003 goto out; 2004 } 2005 if (!IsReparseTagNameSurrogate(rp->ReparseTag) || 2006 size <= sizeof(struct REPARSE_POINT)) { 2007 goto out; 2008 } 2009 2010 /* Users tag. */ 2011 uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT)); 2012 ulen = le16_to_cpu(rp->ReparseDataLength) - 2013 sizeof(struct REPARSE_POINT); 2014 } 2015 2016 /* Convert nlen from bytes to UNICODE chars. */ 2017 ulen >>= 1; 2018 2019 /* Check that name is available. */ 2020 if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size)) 2021 goto out; 2022 2023 /* If name is already zero terminated then truncate it now. */ 2024 if (!uname[ulen - 1]) 2025 ulen -= 1; 2026 2027 err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen); 2028 2029 if (err < 0) 2030 goto out; 2031 2032 /* Translate Windows '\' into Linux '/'. */ 2033 for (i = 0; i < err; i++) { 2034 if (buffer[i] == '\\') 2035 buffer[i] = '/'; 2036 } 2037 2038 /* Always set last zero. */ 2039 buffer[err] = 0; 2040 2041 /* If this is a junction, translate the link target. */ 2042 if (rp->ReparseTag == IO_REPARSE_TAG_MOUNT_POINT) 2043 err = ntfs_translate_junction(sb, link_de, buffer, err, buflen); 2044 2045 out: 2046 kfree(to_free); 2047 return err; 2048 } 2049 2050 static const char *ntfs_get_link(struct dentry *de, struct inode *inode, 2051 struct delayed_call *done) 2052 { 2053 int err; 2054 char *ret; 2055 2056 if (!de) 2057 return ERR_PTR(-ECHILD); 2058 2059 ret = kmalloc(PAGE_SIZE, GFP_NOFS); 2060 if (!ret) 2061 return ERR_PTR(-ENOMEM); 2062 2063 err = ntfs_readlink_hlp(de, inode, ret, PAGE_SIZE); 2064 if (err < 0) { 2065 kfree(ret); 2066 return ERR_PTR(err); 2067 } 2068 2069 set_delayed_call(done, kfree_link, ret); 2070 2071 return ret; 2072 } 2073 2074 // clang-format off 2075 const struct inode_operations ntfs_link_inode_operations = { 2076 .get_link = ntfs_get_link, 2077 .setattr = ntfs3_setattr, 2078 .listxattr = ntfs_listxattr, 2079 }; 2080 2081 const struct address_space_operations ntfs_aops = { 2082 .read_folio = ntfs_read_folio, 2083 .readahead = ntfs_readahead, 2084 .writepages = ntfs_writepages, 2085 .write_begin = ntfs_write_begin, 2086 .write_end = ntfs_write_end, 2087 .direct_IO = ntfs_direct_IO, 2088 .bmap = ntfs_bmap, 2089 .dirty_folio = block_dirty_folio, 2090 .migrate_folio = buffer_migrate_folio, 2091 .invalidate_folio = block_invalidate_folio, 2092 }; 2093 2094 const struct address_space_operations ntfs_aops_cmpr = { 2095 .read_folio = ntfs_read_folio, 2096 .readahead = ntfs_readahead, 2097 }; 2098 // clang-format on 2099