1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 */ 7 8 #include <linux/fs.h> 9 10 #include "debug.h" 11 #include "ntfs.h" 12 #include "ntfs_fs.h" 13 14 static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type, 15 const __le16 *name, u8 name_len, 16 const u16 *upcase) 17 { 18 /* First, compare the type codes. */ 19 int diff = le32_to_cpu(left->type) - le32_to_cpu(type); 20 21 if (diff) 22 return diff; 23 24 /* They have the same type code, so we have to compare the names. */ 25 return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len, 26 upcase, true); 27 } 28 29 /* 30 * mi_new_attt_id 31 * 32 * Return: Unused attribute id that is less than mrec->next_attr_id. 33 */ 34 static __le16 mi_new_attt_id(struct mft_inode *mi) 35 { 36 u16 free_id, max_id, t16; 37 struct MFT_REC *rec = mi->mrec; 38 struct ATTRIB *attr; 39 __le16 id; 40 41 id = rec->next_attr_id; 42 free_id = le16_to_cpu(id); 43 if (free_id < 0x7FFF) { 44 rec->next_attr_id = cpu_to_le16(free_id + 1); 45 return id; 46 } 47 48 /* One record can store up to 1024/24 ~= 42 attributes. */ 49 free_id = 0; 50 max_id = 0; 51 52 attr = NULL; 53 54 for (;;) { 55 attr = mi_enum_attr(mi, attr); 56 if (!attr) { 57 rec->next_attr_id = cpu_to_le16(max_id + 1); 58 mi->dirty = true; 59 return cpu_to_le16(free_id); 60 } 61 62 t16 = le16_to_cpu(attr->id); 63 if (t16 == free_id) { 64 free_id += 1; 65 attr = NULL; 66 } else if (max_id < t16) 67 max_id = t16; 68 } 69 } 70 71 int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi) 72 { 73 int err; 74 struct mft_inode *m = kzalloc(sizeof(struct mft_inode), GFP_NOFS); 75 76 if (!m) 77 return -ENOMEM; 78 79 err = mi_init(m, sbi, rno); 80 if (err) { 81 kfree(m); 82 return err; 83 } 84 85 err = mi_read(m, false); 86 if (err) { 87 mi_put(m); 88 return err; 89 } 90 91 *mi = m; 92 return 0; 93 } 94 95 void mi_put(struct mft_inode *mi) 96 { 97 mi_clear(mi); 98 kfree(mi); 99 } 100 101 int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno) 102 { 103 mi->sbi = sbi; 104 mi->rno = rno; 105 mi->mrec = kmalloc(sbi->record_size, GFP_NOFS); 106 if (!mi->mrec) 107 return -ENOMEM; 108 109 return 0; 110 } 111 112 /* 113 * mi_read - Read MFT data. 114 */ 115 int mi_read(struct mft_inode *mi, bool is_mft) 116 { 117 int err; 118 struct MFT_REC *rec = mi->mrec; 119 struct ntfs_sb_info *sbi = mi->sbi; 120 u32 bpr = sbi->record_size; 121 u64 vbo = (u64)mi->rno << sbi->record_bits; 122 struct ntfs_inode *mft_ni = sbi->mft.ni; 123 struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL; 124 struct rw_semaphore *rw_lock = NULL; 125 126 if (is_mounted(sbi)) { 127 if (!is_mft) { 128 rw_lock = &mft_ni->file.run_lock; 129 down_read(rw_lock); 130 } 131 } 132 133 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); 134 if (rw_lock) 135 up_read(rw_lock); 136 if (!err) 137 goto ok; 138 139 if (err == -E_NTFS_FIXUP) { 140 mi->dirty = true; 141 goto ok; 142 } 143 144 if (err != -ENOENT) 145 goto out; 146 147 if (rw_lock) { 148 ni_lock(mft_ni); 149 down_write(rw_lock); 150 } 151 err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run, 152 vbo >> sbi->cluster_bits); 153 if (rw_lock) { 154 up_write(rw_lock); 155 ni_unlock(mft_ni); 156 } 157 if (err) 158 goto out; 159 160 if (rw_lock) 161 down_read(rw_lock); 162 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb); 163 if (rw_lock) 164 up_read(rw_lock); 165 166 if (err == -E_NTFS_FIXUP) { 167 mi->dirty = true; 168 goto ok; 169 } 170 if (err) 171 goto out; 172 173 ok: 174 /* Check field 'total' only here. */ 175 if (le32_to_cpu(rec->total) != bpr) { 176 err = -EINVAL; 177 goto out; 178 } 179 180 return 0; 181 182 out: 183 return err; 184 } 185 186 struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) 187 { 188 const struct MFT_REC *rec = mi->mrec; 189 u32 used = le32_to_cpu(rec->used); 190 u32 t32, off, asize; 191 u16 t16; 192 193 if (!attr) { 194 u32 total = le32_to_cpu(rec->total); 195 196 off = le16_to_cpu(rec->attr_off); 197 198 if (used > total) 199 return NULL; 200 201 if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 || 202 !IS_ALIGNED(off, 4)) { 203 return NULL; 204 } 205 206 /* Skip non-resident records. */ 207 if (!is_rec_inuse(rec)) 208 return NULL; 209 210 attr = Add2Ptr(rec, off); 211 } else { 212 /* Check if input attr inside record. */ 213 off = PtrOffset(rec, attr); 214 if (off >= used) 215 return NULL; 216 217 asize = le32_to_cpu(attr->size); 218 if (asize < SIZEOF_RESIDENT) { 219 /* Impossible 'cause we should not return such attribute. */ 220 return NULL; 221 } 222 223 attr = Add2Ptr(attr, asize); 224 off += asize; 225 } 226 227 asize = le32_to_cpu(attr->size); 228 229 /* Can we use the first field (attr->type). */ 230 if (off + 8 > used) { 231 static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8); 232 return NULL; 233 } 234 235 if (attr->type == ATTR_END) { 236 /* End of enumeration. */ 237 return NULL; 238 } 239 240 /* 0x100 is last known attribute for now. */ 241 t32 = le32_to_cpu(attr->type); 242 if ((t32 & 0xf) || (t32 > 0x100)) 243 return NULL; 244 245 /* Check boundary. */ 246 if (off + asize > used) 247 return NULL; 248 249 /* Check size of attribute. */ 250 if (!attr->non_res) { 251 if (asize < SIZEOF_RESIDENT) 252 return NULL; 253 254 t16 = le16_to_cpu(attr->res.data_off); 255 256 if (t16 > asize) 257 return NULL; 258 259 t32 = le32_to_cpu(attr->res.data_size); 260 if (t16 + t32 > asize) 261 return NULL; 262 263 return attr; 264 } 265 266 /* Check some nonresident fields. */ 267 if (attr->name_len && 268 le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len > 269 le16_to_cpu(attr->nres.run_off)) { 270 return NULL; 271 } 272 273 if (attr->nres.svcn || !is_attr_ext(attr)) { 274 if (asize + 8 < SIZEOF_NONRESIDENT) 275 return NULL; 276 277 if (attr->nres.c_unit) 278 return NULL; 279 } else if (asize + 8 < SIZEOF_NONRESIDENT_EX) 280 return NULL; 281 282 return attr; 283 } 284 285 /* 286 * mi_find_attr - Find the attribute by type and name and id. 287 */ 288 struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr, 289 enum ATTR_TYPE type, const __le16 *name, 290 size_t name_len, const __le16 *id) 291 { 292 u32 type_in = le32_to_cpu(type); 293 u32 atype; 294 295 next_attr: 296 attr = mi_enum_attr(mi, attr); 297 if (!attr) 298 return NULL; 299 300 atype = le32_to_cpu(attr->type); 301 if (atype > type_in) 302 return NULL; 303 304 if (atype < type_in) 305 goto next_attr; 306 307 if (attr->name_len != name_len) 308 goto next_attr; 309 310 if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short))) 311 goto next_attr; 312 313 if (id && *id != attr->id) 314 goto next_attr; 315 316 return attr; 317 } 318 319 int mi_write(struct mft_inode *mi, int wait) 320 { 321 struct MFT_REC *rec; 322 int err; 323 struct ntfs_sb_info *sbi; 324 325 if (!mi->dirty) 326 return 0; 327 328 sbi = mi->sbi; 329 rec = mi->mrec; 330 331 err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait); 332 if (err) 333 return err; 334 335 if (mi->rno < sbi->mft.recs_mirr) 336 sbi->flags |= NTFS_FLAGS_MFTMIRR; 337 338 mi->dirty = false; 339 340 return 0; 341 } 342 343 int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno, 344 __le16 flags, bool is_mft) 345 { 346 int err; 347 u16 seq = 1; 348 struct MFT_REC *rec; 349 u64 vbo = (u64)rno << sbi->record_bits; 350 351 err = mi_init(mi, sbi, rno); 352 if (err) 353 return err; 354 355 rec = mi->mrec; 356 357 if (rno == MFT_REC_MFT) { 358 ; 359 } else if (rno < MFT_REC_FREE) { 360 seq = rno; 361 } else if (rno >= sbi->mft.used) { 362 ; 363 } else if (mi_read(mi, is_mft)) { 364 ; 365 } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) { 366 /* Record is reused. Update its sequence number. */ 367 seq = le16_to_cpu(rec->seq) + 1; 368 if (!seq) 369 seq = 1; 370 } 371 372 memcpy(rec, sbi->new_rec, sbi->record_size); 373 374 rec->seq = cpu_to_le16(seq); 375 rec->flags = RECORD_FLAG_IN_USE | flags; 376 377 mi->dirty = true; 378 379 if (!mi->nb.nbufs) { 380 struct ntfs_inode *ni = sbi->mft.ni; 381 bool lock = false; 382 383 if (is_mounted(sbi) && !is_mft) { 384 down_read(&ni->file.run_lock); 385 lock = true; 386 } 387 388 err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size, 389 &mi->nb); 390 if (lock) 391 up_read(&ni->file.run_lock); 392 } 393 394 return err; 395 } 396 397 /* 398 * mi_mark_free - Mark record as unused and marks it as free in bitmap. 399 */ 400 void mi_mark_free(struct mft_inode *mi) 401 { 402 CLST rno = mi->rno; 403 struct ntfs_sb_info *sbi = mi->sbi; 404 405 if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) { 406 ntfs_clear_mft_tail(sbi, rno, rno + 1); 407 mi->dirty = false; 408 return; 409 } 410 411 if (mi->mrec) { 412 clear_rec_inuse(mi->mrec); 413 mi->dirty = true; 414 mi_write(mi, 0); 415 } 416 ntfs_mark_rec_free(sbi, rno); 417 } 418 419 /* 420 * mi_insert_attr - Reserve space for new attribute. 421 * 422 * Return: Not full constructed attribute or NULL if not possible to create. 423 */ 424 struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type, 425 const __le16 *name, u8 name_len, u32 asize, 426 u16 name_off) 427 { 428 size_t tail; 429 struct ATTRIB *attr; 430 __le16 id; 431 struct MFT_REC *rec = mi->mrec; 432 struct ntfs_sb_info *sbi = mi->sbi; 433 u32 used = le32_to_cpu(rec->used); 434 const u16 *upcase = sbi->upcase; 435 int diff; 436 437 /* Can we insert mi attribute? */ 438 if (used + asize > mi->sbi->record_size) 439 return NULL; 440 441 /* 442 * Scan through the list of attributes to find the point 443 * at which we should insert it. 444 */ 445 attr = NULL; 446 while ((attr = mi_enum_attr(mi, attr))) { 447 diff = compare_attr(attr, type, name, name_len, upcase); 448 if (diff > 0) 449 break; 450 if (diff < 0) 451 continue; 452 453 if (!is_attr_indexed(attr)) 454 return NULL; 455 break; 456 } 457 458 if (!attr) { 459 tail = 8; /* Not used, just to suppress warning. */ 460 attr = Add2Ptr(rec, used - 8); 461 } else { 462 tail = used - PtrOffset(rec, attr); 463 } 464 465 id = mi_new_attt_id(mi); 466 467 memmove(Add2Ptr(attr, asize), attr, tail); 468 memset(attr, 0, asize); 469 470 attr->type = type; 471 attr->size = cpu_to_le32(asize); 472 attr->name_len = name_len; 473 attr->name_off = cpu_to_le16(name_off); 474 attr->id = id; 475 476 memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short)); 477 rec->used = cpu_to_le32(used + asize); 478 479 mi->dirty = true; 480 481 return attr; 482 } 483 484 /* 485 * mi_remove_attr - Remove the attribute from record. 486 * 487 * NOTE: The source attr will point to next attribute. 488 */ 489 bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi, 490 struct ATTRIB *attr) 491 { 492 struct MFT_REC *rec = mi->mrec; 493 u32 aoff = PtrOffset(rec, attr); 494 u32 used = le32_to_cpu(rec->used); 495 u32 asize = le32_to_cpu(attr->size); 496 497 if (aoff + asize > used) 498 return false; 499 500 if (ni && is_attr_indexed(attr)) { 501 le16_add_cpu(&ni->mi.mrec->hard_links, -1); 502 ni->mi.dirty = true; 503 } 504 505 used -= asize; 506 memmove(attr, Add2Ptr(attr, asize), used - aoff); 507 rec->used = cpu_to_le32(used); 508 mi->dirty = true; 509 510 return true; 511 } 512 513 /* bytes = "new attribute size" - "old attribute size" */ 514 bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes) 515 { 516 struct MFT_REC *rec = mi->mrec; 517 u32 aoff = PtrOffset(rec, attr); 518 u32 total, used = le32_to_cpu(rec->used); 519 u32 nsize, asize = le32_to_cpu(attr->size); 520 u32 rsize = le32_to_cpu(attr->res.data_size); 521 int tail = (int)(used - aoff - asize); 522 int dsize; 523 char *next; 524 525 if (tail < 0 || aoff >= used) 526 return false; 527 528 if (!bytes) 529 return true; 530 531 total = le32_to_cpu(rec->total); 532 next = Add2Ptr(attr, asize); 533 534 if (bytes > 0) { 535 dsize = ALIGN(bytes, 8); 536 if (used + dsize > total) 537 return false; 538 nsize = asize + dsize; 539 /* Move tail */ 540 memmove(next + dsize, next, tail); 541 memset(next, 0, dsize); 542 used += dsize; 543 rsize += dsize; 544 } else { 545 dsize = ALIGN(-bytes, 8); 546 if (dsize > asize) 547 return false; 548 nsize = asize - dsize; 549 memmove(next - dsize, next, tail); 550 used -= dsize; 551 rsize -= dsize; 552 } 553 554 rec->used = cpu_to_le32(used); 555 attr->size = cpu_to_le32(nsize); 556 if (!attr->non_res) 557 attr->res.data_size = cpu_to_le32(rsize); 558 mi->dirty = true; 559 560 return true; 561 } 562 563 int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr, 564 struct runs_tree *run, CLST len) 565 { 566 int err = 0; 567 struct ntfs_sb_info *sbi = mi->sbi; 568 u32 new_run_size; 569 CLST plen; 570 struct MFT_REC *rec = mi->mrec; 571 CLST svcn = le64_to_cpu(attr->nres.svcn); 572 u32 used = le32_to_cpu(rec->used); 573 u32 aoff = PtrOffset(rec, attr); 574 u32 asize = le32_to_cpu(attr->size); 575 char *next = Add2Ptr(attr, asize); 576 u16 run_off = le16_to_cpu(attr->nres.run_off); 577 u32 run_size = asize - run_off; 578 u32 tail = used - aoff - asize; 579 u32 dsize = sbi->record_size - used; 580 581 /* Make a maximum gap in current record. */ 582 memmove(next + dsize, next, tail); 583 584 /* Pack as much as possible. */ 585 err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize, 586 &plen); 587 if (err < 0) { 588 memmove(next, next + dsize, tail); 589 return err; 590 } 591 592 new_run_size = ALIGN(err, 8); 593 594 memmove(next + new_run_size - run_size, next + dsize, tail); 595 596 attr->size = cpu_to_le32(asize + new_run_size - run_size); 597 attr->nres.evcn = cpu_to_le64(svcn + plen - 1); 598 rec->used = cpu_to_le32(used + new_run_size - run_size); 599 mi->dirty = true; 600 601 return 0; 602 } 603