1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. 5 * 6 */ 7 8 #include <linux/blkdev.h> 9 #include <linux/fs.h> 10 #include <linux/random.h> 11 #include <linux/slab.h> 12 13 #include "debug.h" 14 #include "ntfs.h" 15 #include "ntfs_fs.h" 16 17 /* 18 * LOG FILE structs 19 */ 20 21 // clang-format off 22 23 #define MaxLogFileSize 0x100000000ull 24 #define DefaultLogPageSize 4096 25 #define MinLogRecordPages 0x30 26 27 struct RESTART_HDR { 28 struct NTFS_RECORD_HEADER rhdr; // 'RSTR' 29 __le32 sys_page_size; // 0x10: Page size of the system which initialized the log. 30 __le32 page_size; // 0x14: Log page size used for this log file. 31 __le16 ra_off; // 0x18: 32 __le16 minor_ver; // 0x1A: 33 __le16 major_ver; // 0x1C: 34 __le16 fixups[]; 35 }; 36 37 #define LFS_NO_CLIENT 0xffff 38 #define LFS_NO_CLIENT_LE cpu_to_le16(0xffff) 39 40 struct CLIENT_REC { 41 __le64 oldest_lsn; 42 __le64 restart_lsn; // 0x08: 43 __le16 prev_client; // 0x10: 44 __le16 next_client; // 0x12: 45 __le16 seq_num; // 0x14: 46 u8 align[6]; // 0x16: 47 __le32 name_bytes; // 0x1C: In bytes. 48 __le16 name[32]; // 0x20: Name of client. 49 }; 50 51 static_assert(sizeof(struct CLIENT_REC) == 0x60); 52 53 /* Two copies of these will exist at the beginning of the log file */ 54 struct RESTART_AREA { 55 __le64 current_lsn; // 0x00: Current logical end of log file. 56 __le16 log_clients; // 0x08: Maximum number of clients. 57 __le16 client_idx[2]; // 0x0A: Free/use index into the client record arrays. 58 __le16 flags; // 0x0E: See RESTART_SINGLE_PAGE_IO. 59 __le32 seq_num_bits; // 0x10: The number of bits in sequence number. 60 __le16 ra_len; // 0x14: 61 __le16 client_off; // 0x16: 62 __le64 l_size; // 0x18: Usable log file size. 63 __le32 last_lsn_data_len; // 0x20: 64 __le16 rec_hdr_len; // 0x24: Log page data offset. 65 __le16 data_off; // 0x26: Log page data length. 66 __le32 open_log_count; // 0x28: 67 __le32 align[5]; // 0x2C: 68 struct CLIENT_REC clients[]; // 0x40: 69 }; 70 71 struct LOG_REC_HDR { 72 __le16 redo_op; // 0x00: NTFS_LOG_OPERATION 73 __le16 undo_op; // 0x02: NTFS_LOG_OPERATION 74 __le16 redo_off; // 0x04: Offset to Redo record. 75 __le16 redo_len; // 0x06: Redo length. 76 __le16 undo_off; // 0x08: Offset to Undo record. 77 __le16 undo_len; // 0x0A: Undo length. 78 __le16 target_attr; // 0x0C: 79 __le16 lcns_follow; // 0x0E: 80 __le16 record_off; // 0x10: 81 __le16 attr_off; // 0x12: 82 __le16 cluster_off; // 0x14: 83 __le16 reserved; // 0x16: 84 __le64 target_vcn; // 0x18: 85 __le64 page_lcns[]; // 0x20: 86 }; 87 88 static_assert(sizeof(struct LOG_REC_HDR) == 0x20); 89 90 #define RESTART_ENTRY_ALLOCATED 0xFFFFFFFF 91 #define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF) 92 93 struct RESTART_TABLE { 94 __le16 size; // 0x00: In bytes 95 __le16 used; // 0x02: Entries 96 __le16 total; // 0x04: Entries 97 __le16 res[3]; // 0x06: 98 __le32 free_goal; // 0x0C: 99 __le32 first_free; // 0x10: 100 __le32 last_free; // 0x14: 101 102 }; 103 104 static_assert(sizeof(struct RESTART_TABLE) == 0x18); 105 106 struct ATTR_NAME_ENTRY { 107 __le16 off; // Offset in the Open attribute Table. 108 __le16 name_bytes; 109 __le16 name[]; 110 }; 111 112 struct OPEN_ATTR_ENRTY { 113 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated 114 __le32 bytes_per_index; // 0x04: 115 enum ATTR_TYPE type; // 0x08: 116 u8 is_dirty_pages; // 0x0C: 117 u8 is_attr_name; // 0x0B: Faked field to manage 'ptr' 118 u8 name_len; // 0x0C: Faked field to manage 'ptr' 119 u8 res; 120 struct MFT_REF ref; // 0x10: File Reference of file containing attribute 121 __le64 open_record_lsn; // 0x18: 122 void *ptr; // 0x20: 123 }; 124 125 /* 32 bit version of 'struct OPEN_ATTR_ENRTY' */ 126 struct OPEN_ATTR_ENRTY_32 { 127 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated 128 __le32 ptr; // 0x04: 129 struct MFT_REF ref; // 0x08: 130 __le64 open_record_lsn; // 0x10: 131 u8 is_dirty_pages; // 0x18: 132 u8 is_attr_name; // 0x19: 133 u8 res1[2]; 134 enum ATTR_TYPE type; // 0x1C: 135 u8 name_len; // 0x20: In wchar 136 u8 res2[3]; 137 __le32 AttributeName; // 0x24: 138 __le32 bytes_per_index; // 0x28: 139 }; 140 141 #define SIZEOF_OPENATTRIBUTEENTRY0 0x2c 142 // static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) ); 143 static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0); 144 145 /* 146 * One entry exists in the Dirty Pages Table for each page which is dirty at 147 * the time the Restart Area is written. 148 */ 149 struct DIR_PAGE_ENTRY { 150 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated 151 __le32 target_attr; // 0x04: Index into the Open attribute Table 152 __le32 transfer_len; // 0x08: 153 __le32 lcns_follow; // 0x0C: 154 __le64 vcn; // 0x10: Vcn of dirty page 155 __le64 oldest_lsn; // 0x18: 156 __le64 page_lcns[]; // 0x20: 157 }; 158 159 static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20); 160 161 /* 32 bit version of 'struct DIR_PAGE_ENTRY' */ 162 struct DIR_PAGE_ENTRY_32 { 163 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated 164 __le32 target_attr; // 0x04: Index into the Open attribute Table 165 __le32 transfer_len; // 0x08: 166 __le32 lcns_follow; // 0x0C: 167 __le32 reserved; // 0x10: 168 __le32 vcn_low; // 0x14: Vcn of dirty page 169 __le32 vcn_hi; // 0x18: Vcn of dirty page 170 __le32 oldest_lsn_low; // 0x1C: 171 __le32 oldest_lsn_hi; // 0x1C: 172 __le32 page_lcns_low; // 0x24: 173 __le32 page_lcns_hi; // 0x24: 174 }; 175 176 static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14); 177 static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c); 178 179 enum transact_state { 180 TransactionUninitialized = 0, 181 TransactionActive, 182 TransactionPrepared, 183 TransactionCommitted 184 }; 185 186 struct TRANSACTION_ENTRY { 187 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated 188 u8 transact_state; // 0x04: 189 u8 reserved[3]; // 0x05: 190 __le64 first_lsn; // 0x08: 191 __le64 prev_lsn; // 0x10: 192 __le64 undo_next_lsn; // 0x18: 193 __le32 undo_records; // 0x20: Number of undo log records pending abort 194 __le32 undo_len; // 0x24: Total undo size 195 }; 196 197 static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28); 198 199 struct NTFS_RESTART { 200 __le32 major_ver; // 0x00: 201 __le32 minor_ver; // 0x04: 202 __le64 check_point_start; // 0x08: 203 __le64 open_attr_table_lsn; // 0x10: 204 __le64 attr_names_lsn; // 0x18: 205 __le64 dirty_pages_table_lsn; // 0x20: 206 __le64 transact_table_lsn; // 0x28: 207 __le32 open_attr_len; // 0x30: In bytes 208 __le32 attr_names_len; // 0x34: In bytes 209 __le32 dirty_pages_len; // 0x38: In bytes 210 __le32 transact_table_len; // 0x3C: In bytes 211 }; 212 213 static_assert(sizeof(struct NTFS_RESTART) == 0x40); 214 215 struct NEW_ATTRIBUTE_SIZES { 216 __le64 alloc_size; 217 __le64 valid_size; 218 __le64 data_size; 219 __le64 total_size; 220 }; 221 222 struct BITMAP_RANGE { 223 __le32 bitmap_off; 224 __le32 bits; 225 }; 226 227 struct LCN_RANGE { 228 __le64 lcn; 229 __le64 len; 230 }; 231 232 /* The following type defines the different log record types. */ 233 #define LfsClientRecord cpu_to_le32(1) 234 #define LfsClientRestart cpu_to_le32(2) 235 236 /* This is used to uniquely identify a client for a particular log file. */ 237 struct CLIENT_ID { 238 __le16 seq_num; 239 __le16 client_idx; 240 }; 241 242 /* This is the header that begins every Log Record in the log file. */ 243 struct LFS_RECORD_HDR { 244 __le64 this_lsn; // 0x00: 245 __le64 client_prev_lsn; // 0x08: 246 __le64 client_undo_next_lsn; // 0x10: 247 __le32 client_data_len; // 0x18: 248 struct CLIENT_ID client; // 0x1C: Owner of this log record. 249 __le32 record_type; // 0x20: LfsClientRecord or LfsClientRestart. 250 __le32 transact_id; // 0x24: 251 __le16 flags; // 0x28: LOG_RECORD_MULTI_PAGE 252 u8 align[6]; // 0x2A: 253 }; 254 255 #define LOG_RECORD_MULTI_PAGE cpu_to_le16(1) 256 257 static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30); 258 259 struct LFS_RECORD { 260 __le16 next_record_off; // 0x00: Offset of the free space in the page, 261 u8 align[6]; // 0x02: 262 __le64 last_end_lsn; // 0x08: lsn for the last log record which ends on the page, 263 }; 264 265 static_assert(sizeof(struct LFS_RECORD) == 0x10); 266 267 struct RECORD_PAGE_HDR { 268 struct NTFS_RECORD_HEADER rhdr; // 'RCRD' 269 __le32 rflags; // 0x10: See LOG_PAGE_LOG_RECORD_END 270 __le16 page_count; // 0x14: 271 __le16 page_pos; // 0x16: 272 struct LFS_RECORD record_hdr; // 0x18: 273 __le16 fixups[10]; // 0x28: 274 __le32 file_off; // 0x3c: Used when major version >= 2 275 }; 276 277 // clang-format on 278 279 // Page contains the end of a log record. 280 #define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001) 281 282 static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr) 283 { 284 return hdr->rflags & LOG_PAGE_LOG_RECORD_END; 285 } 286 287 static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c); 288 289 /* 290 * END of NTFS LOG structures 291 */ 292 293 /* Define some tuning parameters to keep the restart tables a reasonable size. */ 294 #define INITIAL_NUMBER_TRANSACTIONS 5 295 296 enum NTFS_LOG_OPERATION { 297 298 Noop = 0x00, 299 CompensationLogRecord = 0x01, 300 InitializeFileRecordSegment = 0x02, 301 DeallocateFileRecordSegment = 0x03, 302 WriteEndOfFileRecordSegment = 0x04, 303 CreateAttribute = 0x05, 304 DeleteAttribute = 0x06, 305 UpdateResidentValue = 0x07, 306 UpdateNonresidentValue = 0x08, 307 UpdateMappingPairs = 0x09, 308 DeleteDirtyClusters = 0x0A, 309 SetNewAttributeSizes = 0x0B, 310 AddIndexEntryRoot = 0x0C, 311 DeleteIndexEntryRoot = 0x0D, 312 AddIndexEntryAllocation = 0x0E, 313 DeleteIndexEntryAllocation = 0x0F, 314 WriteEndOfIndexBuffer = 0x10, 315 SetIndexEntryVcnRoot = 0x11, 316 SetIndexEntryVcnAllocation = 0x12, 317 UpdateFileNameRoot = 0x13, 318 UpdateFileNameAllocation = 0x14, 319 SetBitsInNonresidentBitMap = 0x15, 320 ClearBitsInNonresidentBitMap = 0x16, 321 HotFix = 0x17, 322 EndTopLevelAction = 0x18, 323 PrepareTransaction = 0x19, 324 CommitTransaction = 0x1A, 325 ForgetTransaction = 0x1B, 326 OpenNonresidentAttribute = 0x1C, 327 OpenAttributeTableDump = 0x1D, 328 AttributeNamesDump = 0x1E, 329 DirtyPageTableDump = 0x1F, 330 TransactionTableDump = 0x20, 331 UpdateRecordDataRoot = 0x21, 332 UpdateRecordDataAllocation = 0x22, 333 334 UpdateRelativeDataInIndex = 335 0x23, // NtOfsRestartUpdateRelativeDataInIndex 336 UpdateRelativeDataInIndex2 = 0x24, 337 ZeroEndOfFileRecord = 0x25, 338 }; 339 340 /* 341 * Array for log records which require a target attribute. 342 * A true indicates that the corresponding restart operation 343 * requires a target attribute. 344 */ 345 static const u8 AttributeRequired[] = { 346 0xFC, 0xFB, 0xFF, 0x10, 0x06, 347 }; 348 349 static inline bool is_target_required(u16 op) 350 { 351 bool ret = op <= UpdateRecordDataAllocation && 352 (AttributeRequired[op >> 3] >> (op & 7) & 1); 353 return ret; 354 } 355 356 static inline bool can_skip_action(enum NTFS_LOG_OPERATION op) 357 { 358 switch (op) { 359 case Noop: 360 case DeleteDirtyClusters: 361 case HotFix: 362 case EndTopLevelAction: 363 case PrepareTransaction: 364 case CommitTransaction: 365 case ForgetTransaction: 366 case CompensationLogRecord: 367 case OpenNonresidentAttribute: 368 case OpenAttributeTableDump: 369 case AttributeNamesDump: 370 case DirtyPageTableDump: 371 case TransactionTableDump: 372 return true; 373 default: 374 return false; 375 } 376 } 377 378 enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next }; 379 380 /* Bytes per restart table. */ 381 static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt) 382 { 383 return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) + 384 sizeof(struct RESTART_TABLE); 385 } 386 387 /* Log record length. */ 388 static inline u32 lrh_length(const struct LOG_REC_HDR *lr) 389 { 390 u16 t16 = le16_to_cpu(lr->lcns_follow); 391 392 return struct_size(lr, page_lcns, max_t(u16, 1, t16)); 393 } 394 395 struct lcb { 396 struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn. 397 struct LOG_REC_HDR *log_rec; 398 u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next 399 struct CLIENT_ID client; 400 bool alloc; // If true the we should deallocate 'log_rec'. 401 }; 402 403 static void lcb_put(struct lcb *lcb) 404 { 405 if (lcb->alloc) 406 kfree(lcb->log_rec); 407 kfree(lcb->lrh); 408 kfree(lcb); 409 } 410 411 /* Find the oldest lsn from active clients. */ 412 static inline void oldest_client_lsn(const struct CLIENT_REC *ca, 413 __le16 next_client, u64 *oldest_lsn) 414 { 415 while (next_client != LFS_NO_CLIENT_LE) { 416 const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client); 417 u64 lsn = le64_to_cpu(cr->oldest_lsn); 418 419 /* Ignore this block if it's oldest lsn is 0. */ 420 if (lsn && lsn < *oldest_lsn) 421 *oldest_lsn = lsn; 422 423 next_client = cr->next_client; 424 } 425 } 426 427 static inline bool is_rst_page_hdr_valid(u32 file_off, 428 const struct RESTART_HDR *rhdr) 429 { 430 u32 sys_page = le32_to_cpu(rhdr->sys_page_size); 431 u32 page_size = le32_to_cpu(rhdr->page_size); 432 u32 end_usa; 433 u16 ro; 434 435 if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE || 436 sys_page & (sys_page - 1) || page_size & (page_size - 1)) { 437 return false; 438 } 439 440 /* Check that if the file offset isn't 0, it is the system page size. */ 441 if (file_off && file_off != sys_page) 442 return false; 443 444 /* Check support version 1.1+. */ 445 if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver) 446 return false; 447 448 if (le16_to_cpu(rhdr->major_ver) > 2) 449 return false; 450 451 ro = le16_to_cpu(rhdr->ra_off); 452 if (!IS_ALIGNED(ro, 8) || ro > sys_page) 453 return false; 454 455 end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short); 456 end_usa += le16_to_cpu(rhdr->rhdr.fix_off); 457 458 if (ro < end_usa) 459 return false; 460 461 return true; 462 } 463 464 static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr) 465 { 466 const struct RESTART_AREA *ra; 467 u16 cl, fl, ul; 468 u32 off, l_size, file_dat_bits, file_size_round; 469 u16 ro = le16_to_cpu(rhdr->ra_off); 470 u32 sys_page = le32_to_cpu(rhdr->sys_page_size); 471 472 if (ro + offsetof(struct RESTART_AREA, l_size) > 473 SECTOR_SIZE - sizeof(short)) 474 return false; 475 476 ra = Add2Ptr(rhdr, ro); 477 cl = le16_to_cpu(ra->log_clients); 478 479 if (cl > 1) 480 return false; 481 482 off = le16_to_cpu(ra->client_off); 483 484 if (!IS_ALIGNED(off, 8) || ro + off > SECTOR_SIZE - sizeof(short)) 485 return false; 486 487 off += cl * sizeof(struct CLIENT_REC); 488 489 if (off > sys_page) 490 return false; 491 492 /* 493 * Check the restart length field and whether the entire 494 * restart area is contained that length. 495 */ 496 if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page || 497 off > le16_to_cpu(ra->ra_len)) { 498 return false; 499 } 500 501 /* 502 * As a final check make sure that the use list and the free list 503 * are either empty or point to a valid client. 504 */ 505 fl = le16_to_cpu(ra->client_idx[0]); 506 ul = le16_to_cpu(ra->client_idx[1]); 507 if ((fl != LFS_NO_CLIENT && fl >= cl) || 508 (ul != LFS_NO_CLIENT && ul >= cl)) 509 return false; 510 511 /* Make sure the sequence number bits match the log file size. */ 512 l_size = le64_to_cpu(ra->l_size); 513 514 file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits); 515 file_size_round = 1u << (file_dat_bits + 3); 516 if (file_size_round != l_size && 517 (file_size_round < l_size || (file_size_round / 2) > l_size)) { 518 return false; 519 } 520 521 /* The log page data offset and record header length must be quad-aligned. */ 522 if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) || 523 !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8)) 524 return false; 525 526 return true; 527 } 528 529 static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr, 530 bool usa_error) 531 { 532 u16 ro = le16_to_cpu(rhdr->ra_off); 533 const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro); 534 u16 ra_len = le16_to_cpu(ra->ra_len); 535 const struct CLIENT_REC *ca; 536 u32 i; 537 538 if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short)) 539 return false; 540 541 /* Find the start of the client array. */ 542 ca = Add2Ptr(ra, le16_to_cpu(ra->client_off)); 543 544 /* 545 * Start with the free list. 546 * Check that all the clients are valid and that there isn't a cycle. 547 * Do the in-use list on the second pass. 548 */ 549 for (i = 0; i < 2; i++) { 550 u16 client_idx = le16_to_cpu(ra->client_idx[i]); 551 bool first_client = true; 552 u16 clients = le16_to_cpu(ra->log_clients); 553 554 while (client_idx != LFS_NO_CLIENT) { 555 const struct CLIENT_REC *cr; 556 557 if (!clients || 558 client_idx >= le16_to_cpu(ra->log_clients)) 559 return false; 560 561 clients -= 1; 562 cr = ca + client_idx; 563 564 client_idx = le16_to_cpu(cr->next_client); 565 566 if (first_client) { 567 first_client = false; 568 if (cr->prev_client != LFS_NO_CLIENT_LE) 569 return false; 570 } 571 } 572 } 573 574 return true; 575 } 576 577 /* 578 * remove_client 579 * 580 * Remove a client record from a client record list an restart area. 581 */ 582 static inline void remove_client(struct CLIENT_REC *ca, 583 const struct CLIENT_REC *cr, __le16 *head) 584 { 585 if (cr->prev_client == LFS_NO_CLIENT_LE) 586 *head = cr->next_client; 587 else 588 ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client; 589 590 if (cr->next_client != LFS_NO_CLIENT_LE) 591 ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client; 592 } 593 594 /* 595 * add_client - Add a client record to the start of a list. 596 */ 597 static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head) 598 { 599 struct CLIENT_REC *cr = ca + index; 600 601 cr->prev_client = LFS_NO_CLIENT_LE; 602 cr->next_client = *head; 603 604 if (*head != LFS_NO_CLIENT_LE) 605 ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index); 606 607 *head = cpu_to_le16(index); 608 } 609 610 static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c) 611 { 612 __le32 *e; 613 u32 bprt; 614 u16 rsize = t ? le16_to_cpu(t->size) : 0; 615 616 if (!c) { 617 if (!t || !t->total) 618 return NULL; 619 e = Add2Ptr(t, sizeof(struct RESTART_TABLE)); 620 } else { 621 e = Add2Ptr(c, rsize); 622 } 623 624 /* Loop until we hit the first one allocated, or the end of the list. */ 625 for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt; 626 e = Add2Ptr(e, rsize)) { 627 if (*e == RESTART_ENTRY_ALLOCATED_LE) 628 return e; 629 } 630 return NULL; 631 } 632 633 /* 634 * find_dp - Search for a @vcn in Dirty Page Table. 635 */ 636 static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl, 637 u32 target_attr, u64 vcn) 638 { 639 __le32 ta = cpu_to_le32(target_attr); 640 struct DIR_PAGE_ENTRY *dp = NULL; 641 642 while ((dp = enum_rstbl(dptbl, dp))) { 643 u64 dp_vcn = le64_to_cpu(dp->vcn); 644 645 if (dp->target_attr == ta && vcn >= dp_vcn && 646 vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) { 647 return dp; 648 } 649 } 650 return NULL; 651 } 652 653 static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default) 654 { 655 if (use_default) 656 page_size = DefaultLogPageSize; 657 658 /* Round the file size down to a system page boundary. */ 659 *l_size &= ~(page_size - 1); 660 661 /* File should contain at least 2 restart pages and MinLogRecordPages pages. */ 662 if (*l_size < (MinLogRecordPages + 2) * page_size) 663 return 0; 664 665 return page_size; 666 } 667 668 static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr, 669 u32 bytes_per_attr_entry) 670 { 671 u16 t16; 672 673 if (bytes < sizeof(struct LOG_REC_HDR)) 674 return false; 675 if (!tr) 676 return false; 677 678 if ((tr - sizeof(struct RESTART_TABLE)) % 679 sizeof(struct TRANSACTION_ENTRY)) 680 return false; 681 682 if (le16_to_cpu(lr->redo_off) & 7) 683 return false; 684 685 if (le16_to_cpu(lr->undo_off) & 7) 686 return false; 687 688 if (lr->target_attr) 689 goto check_lcns; 690 691 if (is_target_required(le16_to_cpu(lr->redo_op))) 692 return false; 693 694 if (is_target_required(le16_to_cpu(lr->undo_op))) 695 return false; 696 697 check_lcns: 698 if (!lr->lcns_follow) 699 goto check_length; 700 701 t16 = le16_to_cpu(lr->target_attr); 702 if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry) 703 return false; 704 705 check_length: 706 if (bytes < lrh_length(lr)) 707 return false; 708 709 return true; 710 } 711 712 static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes) 713 { 714 u32 ts; 715 u32 i, off; 716 u16 rsize = le16_to_cpu(rt->size); 717 u16 ne = le16_to_cpu(rt->used); 718 u32 ff = le32_to_cpu(rt->first_free); 719 u32 lf = le32_to_cpu(rt->last_free); 720 721 ts = rsize * ne + sizeof(struct RESTART_TABLE); 722 723 if (!rsize || rsize > bytes || 724 rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts || 725 le16_to_cpu(rt->total) > ne || ff > ts || lf > ts || 726 (ff && ff < sizeof(struct RESTART_TABLE)) || 727 (lf && lf < sizeof(struct RESTART_TABLE))) { 728 return false; 729 } 730 731 /* 732 * Verify each entry is either allocated or points 733 * to a valid offset the table. 734 */ 735 for (i = 0; i < ne; i++) { 736 off = le32_to_cpu(*(__le32 *)Add2Ptr( 737 rt, i * rsize + sizeof(struct RESTART_TABLE))); 738 739 if (off != RESTART_ENTRY_ALLOCATED && off && 740 (off < sizeof(struct RESTART_TABLE) || 741 ((off - sizeof(struct RESTART_TABLE)) % rsize))) { 742 return false; 743 } 744 } 745 746 /* 747 * Walk through the list headed by the first entry to make 748 * sure none of the entries are currently being used. 749 */ 750 for (off = ff; off;) { 751 if (off == RESTART_ENTRY_ALLOCATED) 752 return false; 753 754 off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off)); 755 } 756 757 return true; 758 } 759 760 /* 761 * free_rsttbl_idx - Free a previously allocated index a Restart Table. 762 */ 763 static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off) 764 { 765 __le32 *e; 766 u32 lf = le32_to_cpu(rt->last_free); 767 __le32 off_le = cpu_to_le32(off); 768 769 e = Add2Ptr(rt, off); 770 771 if (off < le32_to_cpu(rt->free_goal)) { 772 *e = rt->first_free; 773 rt->first_free = off_le; 774 if (!lf) 775 rt->last_free = off_le; 776 } else { 777 if (lf) 778 *(__le32 *)Add2Ptr(rt, lf) = off_le; 779 else 780 rt->first_free = off_le; 781 782 rt->last_free = off_le; 783 *e = 0; 784 } 785 786 le16_sub_cpu(&rt->total, 1); 787 } 788 789 static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used) 790 { 791 __le32 *e, *last_free; 792 u32 off; 793 u32 bytes = esize * used + sizeof(struct RESTART_TABLE); 794 u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize; 795 struct RESTART_TABLE *t = kzalloc(bytes, GFP_NOFS); 796 797 if (!t) 798 return NULL; 799 800 t->size = cpu_to_le16(esize); 801 t->used = cpu_to_le16(used); 802 t->free_goal = cpu_to_le32(~0u); 803 t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE)); 804 t->last_free = cpu_to_le32(lf); 805 806 e = (__le32 *)(t + 1); 807 last_free = Add2Ptr(t, lf); 808 809 for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free; 810 e = Add2Ptr(e, esize), off += esize) { 811 *e = cpu_to_le32(off); 812 } 813 return t; 814 } 815 816 static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl, 817 u32 add, u32 free_goal) 818 { 819 u16 esize = le16_to_cpu(tbl->size); 820 __le32 osize = cpu_to_le32(bytes_per_rt(tbl)); 821 u32 used = le16_to_cpu(tbl->used); 822 struct RESTART_TABLE *rt; 823 824 rt = init_rsttbl(esize, used + add); 825 if (!rt) 826 return NULL; 827 828 memcpy(rt + 1, tbl + 1, esize * used); 829 830 rt->free_goal = free_goal == ~0u ? 831 cpu_to_le32(~0u) : 832 cpu_to_le32(sizeof(struct RESTART_TABLE) + 833 free_goal * esize); 834 835 if (tbl->first_free) { 836 rt->first_free = tbl->first_free; 837 *(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize; 838 } else { 839 rt->first_free = osize; 840 } 841 842 rt->total = tbl->total; 843 844 kfree(tbl); 845 return rt; 846 } 847 848 /* 849 * alloc_rsttbl_idx 850 * 851 * Allocate an index from within a previously initialized Restart Table. 852 */ 853 static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl) 854 { 855 u32 off; 856 __le32 *e; 857 struct RESTART_TABLE *t = *tbl; 858 859 if (!t->first_free) { 860 *tbl = t = extend_rsttbl(t, 16, ~0u); 861 if (!t) 862 return NULL; 863 } 864 865 off = le32_to_cpu(t->first_free); 866 867 /* Dequeue this entry and zero it. */ 868 e = Add2Ptr(t, off); 869 870 t->first_free = *e; 871 872 memset(e, 0, le16_to_cpu(t->size)); 873 874 *e = RESTART_ENTRY_ALLOCATED_LE; 875 876 /* If list is going empty, then we fix the last_free as well. */ 877 if (!t->first_free) 878 t->last_free = 0; 879 880 le16_add_cpu(&t->total, 1); 881 882 return Add2Ptr(t, off); 883 } 884 885 /* 886 * alloc_rsttbl_from_idx 887 * 888 * Allocate a specific index from within a previously initialized Restart Table. 889 */ 890 static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo) 891 { 892 u32 off; 893 __le32 *e; 894 struct RESTART_TABLE *rt = *tbl; 895 u32 bytes = bytes_per_rt(rt); 896 u16 esize = le16_to_cpu(rt->size); 897 898 /* If the entry is not the table, we will have to extend the table. */ 899 if (vbo >= bytes) { 900 /* 901 * Extend the size by computing the number of entries between 902 * the existing size and the desired index and adding 1 to that. 903 */ 904 u32 bytes2idx = vbo - bytes; 905 906 /* 907 * There should always be an integral number of entries 908 * being added. Now extend the table. 909 */ 910 *tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes); 911 if (!rt) 912 return NULL; 913 } 914 915 /* See if the entry is already allocated, and just return if it is. */ 916 e = Add2Ptr(rt, vbo); 917 918 if (*e == RESTART_ENTRY_ALLOCATED_LE) 919 return e; 920 921 /* 922 * Walk through the table, looking for the entry we're 923 * interested and the previous entry. 924 */ 925 off = le32_to_cpu(rt->first_free); 926 e = Add2Ptr(rt, off); 927 928 if (off == vbo) { 929 /* this is a match */ 930 rt->first_free = *e; 931 goto skip_looking; 932 } 933 934 /* 935 * Need to walk through the list looking for the predecessor 936 * of our entry. 937 */ 938 for (;;) { 939 /* Remember the entry just found */ 940 u32 last_off = off; 941 __le32 *last_e = e; 942 943 /* Should never run of entries. */ 944 945 /* Lookup up the next entry the list. */ 946 off = le32_to_cpu(*last_e); 947 e = Add2Ptr(rt, off); 948 949 /* If this is our match we are done. */ 950 if (off == vbo) { 951 *last_e = *e; 952 953 /* 954 * If this was the last entry, we update that 955 * table as well. 956 */ 957 if (le32_to_cpu(rt->last_free) == off) 958 rt->last_free = cpu_to_le32(last_off); 959 break; 960 } 961 } 962 963 skip_looking: 964 /* If the list is now empty, we fix the last_free as well. */ 965 if (!rt->first_free) 966 rt->last_free = 0; 967 968 /* Zero this entry. */ 969 memset(e, 0, esize); 970 *e = RESTART_ENTRY_ALLOCATED_LE; 971 972 le16_add_cpu(&rt->total, 1); 973 974 return e; 975 } 976 977 struct restart_info { 978 u64 last_lsn; 979 struct RESTART_HDR *r_page; 980 u32 vbo; 981 bool chkdsk_was_run; 982 bool valid_page; 983 bool initialized; 984 bool restart; 985 }; 986 987 #define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001) 988 989 #define NTFSLOG_WRAPPED 0x00000001 990 #define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002 991 #define NTFSLOG_NO_LAST_LSN 0x00000004 992 #define NTFSLOG_REUSE_TAIL 0x00000010 993 #define NTFSLOG_NO_OLDEST_LSN 0x00000020 994 995 /* Helper struct to work with NTFS $LogFile. */ 996 struct ntfs_log { 997 struct ntfs_inode *ni; 998 999 u32 l_size; 1000 u32 orig_file_size; 1001 u32 sys_page_size; 1002 u32 sys_page_mask; 1003 u32 page_size; 1004 u32 page_mask; // page_size - 1 1005 u8 page_bits; 1006 struct RECORD_PAGE_HDR *one_page_buf; 1007 1008 struct RESTART_TABLE *open_attr_tbl; 1009 u32 transaction_id; 1010 u32 clst_per_page; 1011 1012 u32 first_page; 1013 u32 next_page; 1014 u32 ra_off; 1015 u32 data_off; 1016 u32 restart_size; 1017 u32 data_size; 1018 u16 record_header_len; 1019 u64 seq_num; 1020 u32 seq_num_bits; 1021 u32 file_data_bits; 1022 u32 seq_num_mask; /* (1 << file_data_bits) - 1 */ 1023 1024 struct RESTART_AREA *ra; /* In-memory image of the next restart area. */ 1025 u32 ra_size; /* The usable size of the restart area. */ 1026 1027 /* 1028 * If true, then the in-memory restart area is to be written 1029 * to the first position on the disk. 1030 */ 1031 bool init_ra; 1032 bool set_dirty; /* True if we need to set dirty flag. */ 1033 1034 u64 oldest_lsn; 1035 1036 u32 oldest_lsn_off; 1037 u64 last_lsn; 1038 1039 u32 total_avail; 1040 u32 total_avail_pages; 1041 u32 total_undo_commit; 1042 u32 max_current_avail; 1043 u32 current_avail; 1044 u32 reserved; 1045 1046 short major_ver; 1047 short minor_ver; 1048 1049 u32 l_flags; /* See NTFSLOG_XXX */ 1050 u32 current_openlog_count; /* On-disk value for open_log_count. */ 1051 1052 struct CLIENT_ID client_id; 1053 u32 client_undo_commit; 1054 1055 struct restart_info rst_info, rst_info2; 1056 }; 1057 1058 static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn) 1059 { 1060 u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3); 1061 1062 return vbo; 1063 } 1064 1065 /* Compute the offset in the log file of the next log page. */ 1066 static inline u32 next_page_off(struct ntfs_log *log, u32 off) 1067 { 1068 off = (off & ~log->sys_page_mask) + log->page_size; 1069 return off >= log->l_size ? log->first_page : off; 1070 } 1071 1072 static inline u32 lsn_to_page_off(struct ntfs_log *log, u64 lsn) 1073 { 1074 return (((u32)lsn) << 3) & log->page_mask; 1075 } 1076 1077 static inline u64 vbo_to_lsn(struct ntfs_log *log, u32 off, u64 Seq) 1078 { 1079 return (off >> 3) + (Seq << log->file_data_bits); 1080 } 1081 1082 static inline bool is_lsn_in_file(struct ntfs_log *log, u64 lsn) 1083 { 1084 return lsn >= log->oldest_lsn && 1085 lsn <= le64_to_cpu(log->ra->current_lsn); 1086 } 1087 1088 static inline u32 hdr_file_off(struct ntfs_log *log, 1089 struct RECORD_PAGE_HDR *hdr) 1090 { 1091 if (log->major_ver < 2) 1092 return le64_to_cpu(hdr->rhdr.lsn); 1093 1094 return le32_to_cpu(hdr->file_off); 1095 } 1096 1097 static inline u64 base_lsn(struct ntfs_log *log, 1098 const struct RECORD_PAGE_HDR *hdr, u64 lsn) 1099 { 1100 u64 h_lsn = le64_to_cpu(hdr->rhdr.lsn); 1101 u64 ret = (((h_lsn >> log->file_data_bits) + 1102 (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0)) 1103 << log->file_data_bits) + 1104 ((((is_log_record_end(hdr) && 1105 h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn)) ? 1106 le16_to_cpu(hdr->record_hdr.next_record_off) : 1107 log->page_size) + 1108 lsn) >> 1109 3); 1110 1111 return ret; 1112 } 1113 1114 static inline bool verify_client_lsn(struct ntfs_log *log, 1115 const struct CLIENT_REC *client, u64 lsn) 1116 { 1117 return lsn >= le64_to_cpu(client->oldest_lsn) && 1118 lsn <= le64_to_cpu(log->ra->current_lsn) && lsn; 1119 } 1120 1121 static int read_log_page(struct ntfs_log *log, u32 vbo, 1122 struct RECORD_PAGE_HDR **buffer, bool *usa_error) 1123 { 1124 int err = 0; 1125 u32 page_idx = vbo >> log->page_bits; 1126 u32 page_off = vbo & log->page_mask; 1127 u32 bytes = log->page_size - page_off; 1128 void *to_free = NULL; 1129 u32 page_vbo = page_idx << log->page_bits; 1130 struct RECORD_PAGE_HDR *page_buf; 1131 struct ntfs_inode *ni = log->ni; 1132 bool bBAAD; 1133 1134 if (vbo >= log->l_size) 1135 return -EINVAL; 1136 1137 if (!*buffer) { 1138 to_free = kmalloc(log->page_size, GFP_NOFS); 1139 if (!to_free) 1140 return -ENOMEM; 1141 *buffer = to_free; 1142 } 1143 1144 page_buf = page_off ? log->one_page_buf : *buffer; 1145 1146 err = ntfs_read_run_nb(ni->mi.sbi, &ni->file.run, page_vbo, page_buf, 1147 log->page_size, NULL); 1148 if (err) 1149 goto out; 1150 1151 if (page_buf->rhdr.sign != NTFS_FFFF_SIGNATURE) 1152 ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false); 1153 1154 if (page_buf != *buffer) 1155 memcpy(*buffer, Add2Ptr(page_buf, page_off), bytes); 1156 1157 bBAAD = page_buf->rhdr.sign == NTFS_BAAD_SIGNATURE; 1158 1159 if (usa_error) 1160 *usa_error = bBAAD; 1161 /* Check that the update sequence array for this page is valid */ 1162 /* If we don't allow errors, raise an error status */ 1163 else if (bBAAD) 1164 err = -EINVAL; 1165 1166 out: 1167 if (err && to_free) { 1168 kfree(to_free); 1169 *buffer = NULL; 1170 } 1171 1172 return err; 1173 } 1174 1175 /* 1176 * log_read_rst 1177 * 1178 * It walks through 512 blocks of the file looking for a valid 1179 * restart page header. It will stop the first time we find a 1180 * valid page header. 1181 */ 1182 static int log_read_rst(struct ntfs_log *log, bool first, 1183 struct restart_info *info) 1184 { 1185 u32 skip, vbo; 1186 struct RESTART_HDR *r_page = NULL; 1187 1188 /* Determine which restart area we are looking for. */ 1189 if (first) { 1190 vbo = 0; 1191 skip = 512; 1192 } else { 1193 vbo = 512; 1194 skip = 0; 1195 } 1196 1197 /* Loop continuously until we succeed. */ 1198 for (; vbo < log->l_size; vbo = 2 * vbo + skip, skip = 0) { 1199 bool usa_error; 1200 bool brst, bchk; 1201 struct RESTART_AREA *ra; 1202 1203 /* Read a page header at the current offset. */ 1204 if (read_log_page(log, vbo, (struct RECORD_PAGE_HDR **)&r_page, 1205 &usa_error)) { 1206 /* Ignore any errors. */ 1207 continue; 1208 } 1209 1210 /* Exit if the signature is a log record page. */ 1211 if (r_page->rhdr.sign == NTFS_RCRD_SIGNATURE) { 1212 info->initialized = true; 1213 break; 1214 } 1215 1216 brst = r_page->rhdr.sign == NTFS_RSTR_SIGNATURE; 1217 bchk = r_page->rhdr.sign == NTFS_CHKD_SIGNATURE; 1218 1219 if (!bchk && !brst) { 1220 if (r_page->rhdr.sign != NTFS_FFFF_SIGNATURE) { 1221 /* 1222 * Remember if the signature does not 1223 * indicate uninitialized file. 1224 */ 1225 info->initialized = true; 1226 } 1227 continue; 1228 } 1229 1230 ra = NULL; 1231 info->valid_page = false; 1232 info->initialized = true; 1233 info->vbo = vbo; 1234 1235 /* Let's check the restart area if this is a valid page. */ 1236 if (!is_rst_page_hdr_valid(vbo, r_page)) 1237 goto check_result; 1238 ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off)); 1239 1240 if (!is_rst_area_valid(r_page)) 1241 goto check_result; 1242 1243 /* 1244 * We have a valid restart page header and restart area. 1245 * If chkdsk was run or we have no clients then we have 1246 * no more checking to do. 1247 */ 1248 if (bchk || ra->client_idx[1] == LFS_NO_CLIENT_LE) { 1249 info->valid_page = true; 1250 goto check_result; 1251 } 1252 1253 if (is_client_area_valid(r_page, usa_error)) { 1254 info->valid_page = true; 1255 ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off)); 1256 } 1257 1258 check_result: 1259 /* 1260 * If chkdsk was run then update the caller's 1261 * values and return. 1262 */ 1263 if (r_page->rhdr.sign == NTFS_CHKD_SIGNATURE) { 1264 info->chkdsk_was_run = true; 1265 info->last_lsn = le64_to_cpu(r_page->rhdr.lsn); 1266 info->restart = true; 1267 info->r_page = r_page; 1268 return 0; 1269 } 1270 1271 /* 1272 * If we have a valid page then copy the values 1273 * we need from it. 1274 */ 1275 if (info->valid_page) { 1276 info->last_lsn = le64_to_cpu(ra->current_lsn); 1277 info->restart = true; 1278 info->r_page = r_page; 1279 return 0; 1280 } 1281 } 1282 1283 kfree(r_page); 1284 1285 return 0; 1286 } 1287 1288 /* 1289 * Ilog_init_pg_hdr - Init @log from restart page header. 1290 */ 1291 static void log_init_pg_hdr(struct ntfs_log *log, u16 major_ver, u16 minor_ver) 1292 { 1293 log->sys_page_size = log->page_size; 1294 log->sys_page_mask = log->page_mask; 1295 1296 log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits; 1297 if (!log->clst_per_page) 1298 log->clst_per_page = 1; 1299 1300 log->first_page = major_ver >= 2 ? 0x22 * log->page_size : 1301 4 * log->page_size; 1302 log->major_ver = major_ver; 1303 log->minor_ver = minor_ver; 1304 } 1305 1306 /* 1307 * log_create - Init @log in cases when we don't have a restart area to use. 1308 */ 1309 static void log_create(struct ntfs_log *log, const u64 last_lsn, 1310 u32 open_log_count, bool wrapped, bool use_multi_page) 1311 { 1312 /* All file offsets must be quadword aligned. */ 1313 log->file_data_bits = blksize_bits(log->l_size) - 3; 1314 log->seq_num_mask = (8 << log->file_data_bits) - 1; 1315 log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits; 1316 log->seq_num = (last_lsn >> log->file_data_bits) + 2; 1317 log->next_page = log->first_page; 1318 log->oldest_lsn = log->seq_num << log->file_data_bits; 1319 log->oldest_lsn_off = 0; 1320 log->last_lsn = log->oldest_lsn; 1321 1322 log->l_flags |= NTFSLOG_NO_LAST_LSN | NTFSLOG_NO_OLDEST_LSN; 1323 1324 /* Set the correct flags for the I/O and indicate if we have wrapped. */ 1325 if (wrapped) 1326 log->l_flags |= NTFSLOG_WRAPPED; 1327 1328 if (use_multi_page) 1329 log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO; 1330 1331 /* Compute the log page values. */ 1332 log->data_off = ALIGN( 1333 offsetof(struct RECORD_PAGE_HDR, fixups) + 1334 sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1), 1335 8); 1336 log->data_size = log->page_size - log->data_off; 1337 log->record_header_len = sizeof(struct LFS_RECORD_HDR); 1338 1339 /* Remember the different page sizes for reservation. */ 1340 log->reserved = log->data_size - log->record_header_len; 1341 1342 /* Compute the restart page values. */ 1343 log->ra_off = ALIGN( 1344 offsetof(struct RESTART_HDR, fixups) + 1345 sizeof(short) * 1346 ((log->sys_page_size >> SECTOR_SHIFT) + 1), 1347 8); 1348 log->restart_size = log->sys_page_size - log->ra_off; 1349 log->ra_size = struct_size(log->ra, clients, 1); 1350 log->current_openlog_count = open_log_count; 1351 1352 /* 1353 * The total available log file space is the number of 1354 * log file pages times the space available on each page. 1355 */ 1356 log->total_avail_pages = log->l_size - log->first_page; 1357 log->total_avail = log->total_avail_pages >> log->page_bits; 1358 1359 /* 1360 * We assume that we can't use the end of the page less than 1361 * the file record size. 1362 * Then we won't need to reserve more than the caller asks for. 1363 */ 1364 log->max_current_avail = log->total_avail * log->reserved; 1365 log->total_avail = log->total_avail * log->data_size; 1366 log->current_avail = log->max_current_avail; 1367 } 1368 1369 /* 1370 * log_create_ra - Fill a restart area from the values stored in @log. 1371 */ 1372 static struct RESTART_AREA *log_create_ra(struct ntfs_log *log) 1373 { 1374 struct CLIENT_REC *cr; 1375 struct RESTART_AREA *ra = kzalloc(log->restart_size, GFP_NOFS); 1376 1377 if (!ra) 1378 return NULL; 1379 1380 ra->current_lsn = cpu_to_le64(log->last_lsn); 1381 ra->log_clients = cpu_to_le16(1); 1382 ra->client_idx[1] = LFS_NO_CLIENT_LE; 1383 if (log->l_flags & NTFSLOG_MULTIPLE_PAGE_IO) 1384 ra->flags = RESTART_SINGLE_PAGE_IO; 1385 ra->seq_num_bits = cpu_to_le32(log->seq_num_bits); 1386 ra->ra_len = cpu_to_le16(log->ra_size); 1387 ra->client_off = cpu_to_le16(offsetof(struct RESTART_AREA, clients)); 1388 ra->l_size = cpu_to_le64(log->l_size); 1389 ra->rec_hdr_len = cpu_to_le16(log->record_header_len); 1390 ra->data_off = cpu_to_le16(log->data_off); 1391 ra->open_log_count = cpu_to_le32(log->current_openlog_count + 1); 1392 1393 cr = ra->clients; 1394 1395 cr->prev_client = LFS_NO_CLIENT_LE; 1396 cr->next_client = LFS_NO_CLIENT_LE; 1397 1398 return ra; 1399 } 1400 1401 static u32 final_log_off(struct ntfs_log *log, u64 lsn, u32 data_len) 1402 { 1403 u32 base_vbo = lsn << 3; 1404 u32 final_log_off = (base_vbo & log->seq_num_mask) & ~log->page_mask; 1405 u32 page_off = base_vbo & log->page_mask; 1406 u32 tail = log->page_size - page_off; 1407 1408 page_off -= 1; 1409 1410 /* Add the length of the header. */ 1411 data_len += log->record_header_len; 1412 1413 /* 1414 * If this lsn is contained this log page we are done. 1415 * Otherwise we need to walk through several log pages. 1416 */ 1417 if (data_len > tail) { 1418 data_len -= tail; 1419 tail = log->data_size; 1420 page_off = log->data_off - 1; 1421 1422 for (;;) { 1423 final_log_off = next_page_off(log, final_log_off); 1424 1425 /* 1426 * We are done if the remaining bytes 1427 * fit on this page. 1428 */ 1429 if (data_len <= tail) 1430 break; 1431 data_len -= tail; 1432 } 1433 } 1434 1435 /* 1436 * We add the remaining bytes to our starting position on this page 1437 * and then add that value to the file offset of this log page. 1438 */ 1439 return final_log_off + data_len + page_off; 1440 } 1441 1442 static int next_log_lsn(struct ntfs_log *log, const struct LFS_RECORD_HDR *rh, 1443 u64 *lsn) 1444 { 1445 int err; 1446 u64 this_lsn = le64_to_cpu(rh->this_lsn); 1447 u32 vbo = lsn_to_vbo(log, this_lsn); 1448 u32 end = 1449 final_log_off(log, this_lsn, le32_to_cpu(rh->client_data_len)); 1450 u32 hdr_off = end & ~log->sys_page_mask; 1451 u64 seq = this_lsn >> log->file_data_bits; 1452 struct RECORD_PAGE_HDR *page = NULL; 1453 1454 /* Remember if we wrapped. */ 1455 if (end <= vbo) 1456 seq += 1; 1457 1458 /* Log page header for this page. */ 1459 err = read_log_page(log, hdr_off, &page, NULL); 1460 if (err) 1461 return err; 1462 1463 /* 1464 * If the lsn we were given was not the last lsn on this page, 1465 * then the starting offset for the next lsn is on a quad word 1466 * boundary following the last file offset for the current lsn. 1467 * Otherwise the file offset is the start of the data on the next page. 1468 */ 1469 if (this_lsn == le64_to_cpu(page->rhdr.lsn)) { 1470 /* If we wrapped, we need to increment the sequence number. */ 1471 hdr_off = next_page_off(log, hdr_off); 1472 if (hdr_off == log->first_page) 1473 seq += 1; 1474 1475 vbo = hdr_off + log->data_off; 1476 } else { 1477 vbo = ALIGN(end, 8); 1478 } 1479 1480 /* Compute the lsn based on the file offset and the sequence count. */ 1481 *lsn = vbo_to_lsn(log, vbo, seq); 1482 1483 /* 1484 * If this lsn is within the legal range for the file, we return true. 1485 * Otherwise false indicates that there are no more lsn's. 1486 */ 1487 if (!is_lsn_in_file(log, *lsn)) 1488 *lsn = 0; 1489 1490 kfree(page); 1491 1492 return 0; 1493 } 1494 1495 /* 1496 * current_log_avail - Calculate the number of bytes available for log records. 1497 */ 1498 static u32 current_log_avail(struct ntfs_log *log) 1499 { 1500 u32 oldest_off, next_free_off, free_bytes; 1501 1502 if (log->l_flags & NTFSLOG_NO_LAST_LSN) { 1503 /* The entire file is available. */ 1504 return log->max_current_avail; 1505 } 1506 1507 /* 1508 * If there is a last lsn the restart area then we know that we will 1509 * have to compute the free range. 1510 * If there is no oldest lsn then start at the first page of the file. 1511 */ 1512 oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN) ? 1513 log->first_page : 1514 (log->oldest_lsn_off & ~log->sys_page_mask); 1515 1516 /* 1517 * We will use the next log page offset to compute the next free page. 1518 * If we are going to reuse this page go to the next page. 1519 * If we are at the first page then use the end of the file. 1520 */ 1521 next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL) ? 1522 log->next_page + log->page_size : 1523 log->next_page == log->first_page ? log->l_size : 1524 log->next_page; 1525 1526 /* If the two offsets are the same then there is no available space. */ 1527 if (oldest_off == next_free_off) 1528 return 0; 1529 /* 1530 * If the free offset follows the oldest offset then subtract 1531 * this range from the total available pages. 1532 */ 1533 free_bytes = 1534 oldest_off < next_free_off ? 1535 log->total_avail_pages - (next_free_off - oldest_off) : 1536 oldest_off - next_free_off; 1537 1538 free_bytes >>= log->page_bits; 1539 return free_bytes * log->reserved; 1540 } 1541 1542 static bool check_subseq_log_page(struct ntfs_log *log, 1543 const struct RECORD_PAGE_HDR *rp, u32 vbo, 1544 u64 seq) 1545 { 1546 u64 lsn_seq; 1547 const struct NTFS_RECORD_HEADER *rhdr = &rp->rhdr; 1548 u64 lsn = le64_to_cpu(rhdr->lsn); 1549 1550 if (rhdr->sign == NTFS_FFFF_SIGNATURE || !rhdr->sign) 1551 return false; 1552 1553 /* 1554 * If the last lsn on the page occurs was written after the page 1555 * that caused the original error then we have a fatal error. 1556 */ 1557 lsn_seq = lsn >> log->file_data_bits; 1558 1559 /* 1560 * If the sequence number for the lsn the page is equal or greater 1561 * than lsn we expect, then this is a subsequent write. 1562 */ 1563 return lsn_seq >= seq || 1564 (lsn_seq == seq - 1 && log->first_page == vbo && 1565 vbo != (lsn_to_vbo(log, lsn) & ~log->page_mask)); 1566 } 1567 1568 /* 1569 * last_log_lsn 1570 * 1571 * Walks through the log pages for a file, searching for the 1572 * last log page written to the file. 1573 */ 1574 static int last_log_lsn(struct ntfs_log *log) 1575 { 1576 int err; 1577 bool usa_error = false; 1578 bool replace_page = false; 1579 bool reuse_page = log->l_flags & NTFSLOG_REUSE_TAIL; 1580 bool wrapped_file, wrapped; 1581 1582 u32 page_cnt = 1, page_pos = 1; 1583 u32 page_off = 0, page_off1 = 0, saved_off = 0; 1584 u32 final_off, second_off, final_off_prev = 0, second_off_prev = 0; 1585 u32 first_file_off = 0, second_file_off = 0; 1586 u32 part_io_count = 0; 1587 u32 tails = 0; 1588 u32 this_off, curpage_off, nextpage_off, remain_pages; 1589 1590 u64 expected_seq, seq_base = 0, lsn_base = 0; 1591 u64 best_lsn, best_lsn1, best_lsn2; 1592 u64 lsn_cur, lsn1, lsn2; 1593 u64 last_ok_lsn = reuse_page ? log->last_lsn : 0; 1594 1595 u16 cur_pos, best_page_pos; 1596 1597 struct RECORD_PAGE_HDR *page = NULL; 1598 struct RECORD_PAGE_HDR *tst_page = NULL; 1599 struct RECORD_PAGE_HDR *first_tail = NULL; 1600 struct RECORD_PAGE_HDR *second_tail = NULL; 1601 struct RECORD_PAGE_HDR *tail_page = NULL; 1602 struct RECORD_PAGE_HDR *second_tail_prev = NULL; 1603 struct RECORD_PAGE_HDR *first_tail_prev = NULL; 1604 struct RECORD_PAGE_HDR *page_bufs = NULL; 1605 struct RECORD_PAGE_HDR *best_page; 1606 1607 if (log->major_ver >= 2) { 1608 final_off = 0x02 * log->page_size; 1609 second_off = 0x12 * log->page_size; 1610 1611 // 0x10 == 0x12 - 0x2 1612 page_bufs = kmalloc(log->page_size * 0x10, GFP_NOFS); 1613 if (!page_bufs) 1614 return -ENOMEM; 1615 } else { 1616 second_off = log->first_page - log->page_size; 1617 final_off = second_off - log->page_size; 1618 } 1619 1620 next_tail: 1621 /* Read second tail page (at pos 3/0x12000). */ 1622 if (read_log_page(log, second_off, &second_tail, &usa_error) || 1623 usa_error || second_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) { 1624 kfree(second_tail); 1625 second_tail = NULL; 1626 second_file_off = 0; 1627 lsn2 = 0; 1628 } else { 1629 second_file_off = hdr_file_off(log, second_tail); 1630 lsn2 = le64_to_cpu(second_tail->record_hdr.last_end_lsn); 1631 } 1632 1633 /* Read first tail page (at pos 2/0x2000). */ 1634 if (read_log_page(log, final_off, &first_tail, &usa_error) || 1635 usa_error || first_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) { 1636 kfree(first_tail); 1637 first_tail = NULL; 1638 first_file_off = 0; 1639 lsn1 = 0; 1640 } else { 1641 first_file_off = hdr_file_off(log, first_tail); 1642 lsn1 = le64_to_cpu(first_tail->record_hdr.last_end_lsn); 1643 } 1644 1645 if (log->major_ver < 2) { 1646 int best_page; 1647 1648 first_tail_prev = first_tail; 1649 final_off_prev = first_file_off; 1650 second_tail_prev = second_tail; 1651 second_off_prev = second_file_off; 1652 tails = 1; 1653 1654 if (!first_tail && !second_tail) 1655 goto tail_read; 1656 1657 if (first_tail && second_tail) 1658 best_page = lsn1 < lsn2 ? 1 : 0; 1659 else if (first_tail) 1660 best_page = 0; 1661 else 1662 best_page = 1; 1663 1664 page_off = best_page ? second_file_off : first_file_off; 1665 seq_base = (best_page ? lsn2 : lsn1) >> log->file_data_bits; 1666 goto tail_read; 1667 } 1668 1669 best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0; 1670 best_lsn2 = second_tail ? base_lsn(log, second_tail, second_file_off) : 1671 0; 1672 1673 if (first_tail && second_tail) { 1674 if (best_lsn1 > best_lsn2) { 1675 best_lsn = best_lsn1; 1676 best_page = first_tail; 1677 this_off = first_file_off; 1678 } else { 1679 best_lsn = best_lsn2; 1680 best_page = second_tail; 1681 this_off = second_file_off; 1682 } 1683 } else if (first_tail) { 1684 best_lsn = best_lsn1; 1685 best_page = first_tail; 1686 this_off = first_file_off; 1687 } else if (second_tail) { 1688 best_lsn = best_lsn2; 1689 best_page = second_tail; 1690 this_off = second_file_off; 1691 } else { 1692 goto tail_read; 1693 } 1694 1695 best_page_pos = le16_to_cpu(best_page->page_pos); 1696 1697 if (!tails) { 1698 if (best_page_pos == page_pos) { 1699 seq_base = best_lsn >> log->file_data_bits; 1700 saved_off = page_off = le32_to_cpu(best_page->file_off); 1701 lsn_base = best_lsn; 1702 1703 memmove(page_bufs, best_page, log->page_size); 1704 1705 page_cnt = le16_to_cpu(best_page->page_count); 1706 if (page_cnt > 1) 1707 page_pos += 1; 1708 1709 tails = 1; 1710 } 1711 } else if (seq_base == (best_lsn >> log->file_data_bits) && 1712 saved_off + log->page_size == this_off && 1713 lsn_base < best_lsn && 1714 (page_pos != page_cnt || best_page_pos == page_pos || 1715 best_page_pos == 1) && 1716 (page_pos >= page_cnt || best_page_pos == page_pos)) { 1717 u16 bppc = le16_to_cpu(best_page->page_count); 1718 1719 saved_off += log->page_size; 1720 lsn_base = best_lsn; 1721 1722 memmove(Add2Ptr(page_bufs, tails * log->page_size), best_page, 1723 log->page_size); 1724 1725 tails += 1; 1726 1727 if (best_page_pos != bppc) { 1728 page_cnt = bppc; 1729 page_pos = best_page_pos; 1730 1731 if (page_cnt > 1) 1732 page_pos += 1; 1733 } else { 1734 page_pos = page_cnt = 1; 1735 } 1736 } else { 1737 kfree(first_tail); 1738 kfree(second_tail); 1739 goto tail_read; 1740 } 1741 1742 kfree(first_tail_prev); 1743 first_tail_prev = first_tail; 1744 final_off_prev = first_file_off; 1745 first_tail = NULL; 1746 1747 kfree(second_tail_prev); 1748 second_tail_prev = second_tail; 1749 second_off_prev = second_file_off; 1750 second_tail = NULL; 1751 1752 final_off += log->page_size; 1753 second_off += log->page_size; 1754 1755 if (tails < 0x10) 1756 goto next_tail; 1757 tail_read: 1758 first_tail = first_tail_prev; 1759 final_off = final_off_prev; 1760 1761 second_tail = second_tail_prev; 1762 second_off = second_off_prev; 1763 1764 page_cnt = page_pos = 1; 1765 1766 curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off) : 1767 log->next_page; 1768 1769 wrapped_file = 1770 curpage_off == log->first_page && 1771 !(log->l_flags & (NTFSLOG_NO_LAST_LSN | NTFSLOG_REUSE_TAIL)); 1772 1773 expected_seq = wrapped_file ? (log->seq_num + 1) : log->seq_num; 1774 1775 nextpage_off = curpage_off; 1776 1777 next_page: 1778 tail_page = NULL; 1779 /* Read the next log page. */ 1780 err = read_log_page(log, curpage_off, &page, &usa_error); 1781 1782 /* Compute the next log page offset the file. */ 1783 nextpage_off = next_page_off(log, curpage_off); 1784 wrapped = nextpage_off == log->first_page; 1785 1786 if (tails > 1) { 1787 struct RECORD_PAGE_HDR *cur_page = 1788 Add2Ptr(page_bufs, curpage_off - page_off); 1789 1790 if (curpage_off == saved_off) { 1791 tail_page = cur_page; 1792 goto use_tail_page; 1793 } 1794 1795 if (page_off > curpage_off || curpage_off >= saved_off) 1796 goto use_tail_page; 1797 1798 if (page_off1) 1799 goto use_cur_page; 1800 1801 if (!err && !usa_error && 1802 page->rhdr.sign == NTFS_RCRD_SIGNATURE && 1803 cur_page->rhdr.lsn == page->rhdr.lsn && 1804 cur_page->record_hdr.next_record_off == 1805 page->record_hdr.next_record_off && 1806 ((page_pos == page_cnt && 1807 le16_to_cpu(page->page_pos) == 1) || 1808 (page_pos != page_cnt && 1809 le16_to_cpu(page->page_pos) == page_pos + 1 && 1810 le16_to_cpu(page->page_count) == page_cnt))) { 1811 cur_page = NULL; 1812 goto use_tail_page; 1813 } 1814 1815 page_off1 = page_off; 1816 1817 use_cur_page: 1818 1819 lsn_cur = le64_to_cpu(cur_page->rhdr.lsn); 1820 1821 if (last_ok_lsn != 1822 le64_to_cpu(cur_page->record_hdr.last_end_lsn) && 1823 ((lsn_cur >> log->file_data_bits) + 1824 ((curpage_off < 1825 (lsn_to_vbo(log, lsn_cur) & ~log->page_mask)) ? 1826 1 : 1827 0)) != expected_seq) { 1828 goto check_tail; 1829 } 1830 1831 if (!is_log_record_end(cur_page)) { 1832 tail_page = NULL; 1833 last_ok_lsn = lsn_cur; 1834 goto next_page_1; 1835 } 1836 1837 log->seq_num = expected_seq; 1838 log->l_flags &= ~NTFSLOG_NO_LAST_LSN; 1839 log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn); 1840 log->ra->current_lsn = cur_page->record_hdr.last_end_lsn; 1841 1842 if (log->record_header_len <= 1843 log->page_size - 1844 le16_to_cpu(cur_page->record_hdr.next_record_off)) { 1845 log->l_flags |= NTFSLOG_REUSE_TAIL; 1846 log->next_page = curpage_off; 1847 } else { 1848 log->l_flags &= ~NTFSLOG_REUSE_TAIL; 1849 log->next_page = nextpage_off; 1850 } 1851 1852 if (wrapped_file) 1853 log->l_flags |= NTFSLOG_WRAPPED; 1854 1855 last_ok_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn); 1856 goto next_page_1; 1857 } 1858 1859 /* 1860 * If we are at the expected first page of a transfer check to see 1861 * if either tail copy is at this offset. 1862 * If this page is the last page of a transfer, check if we wrote 1863 * a subsequent tail copy. 1864 */ 1865 if (page_cnt == page_pos || page_cnt == page_pos + 1) { 1866 /* 1867 * Check if the offset matches either the first or second 1868 * tail copy. It is possible it will match both. 1869 */ 1870 if (curpage_off == final_off) 1871 tail_page = first_tail; 1872 1873 /* 1874 * If we already matched on the first page then 1875 * check the ending lsn's. 1876 */ 1877 if (curpage_off == second_off) { 1878 if (!tail_page || 1879 (second_tail && 1880 le64_to_cpu(second_tail->record_hdr.last_end_lsn) > 1881 le64_to_cpu(first_tail->record_hdr 1882 .last_end_lsn))) { 1883 tail_page = second_tail; 1884 } 1885 } 1886 } 1887 1888 use_tail_page: 1889 if (tail_page) { 1890 /* We have a candidate for a tail copy. */ 1891 lsn_cur = le64_to_cpu(tail_page->record_hdr.last_end_lsn); 1892 1893 if (last_ok_lsn < lsn_cur) { 1894 /* 1895 * If the sequence number is not expected, 1896 * then don't use the tail copy. 1897 */ 1898 if (expected_seq != (lsn_cur >> log->file_data_bits)) 1899 tail_page = NULL; 1900 } else if (last_ok_lsn > lsn_cur) { 1901 /* 1902 * If the last lsn is greater than the one on 1903 * this page then forget this tail. 1904 */ 1905 tail_page = NULL; 1906 } 1907 } 1908 1909 /* 1910 *If we have an error on the current page, 1911 * we will break of this loop. 1912 */ 1913 if (err || usa_error) 1914 goto check_tail; 1915 1916 /* 1917 * Done if the last lsn on this page doesn't match the previous known 1918 * last lsn or the sequence number is not expected. 1919 */ 1920 lsn_cur = le64_to_cpu(page->rhdr.lsn); 1921 if (last_ok_lsn != lsn_cur && 1922 expected_seq != (lsn_cur >> log->file_data_bits)) { 1923 goto check_tail; 1924 } 1925 1926 /* 1927 * Check that the page position and page count values are correct. 1928 * If this is the first page of a transfer the position must be 1 1929 * and the count will be unknown. 1930 */ 1931 if (page_cnt == page_pos) { 1932 if (page->page_pos != cpu_to_le16(1) && 1933 (!reuse_page || page->page_pos != page->page_count)) { 1934 /* 1935 * If the current page is the first page we are 1936 * looking at and we are reusing this page then 1937 * it can be either the first or last page of a 1938 * transfer. Otherwise it can only be the first. 1939 */ 1940 goto check_tail; 1941 } 1942 } else if (le16_to_cpu(page->page_count) != page_cnt || 1943 le16_to_cpu(page->page_pos) != page_pos + 1) { 1944 /* 1945 * The page position better be 1 more than the last page 1946 * position and the page count better match. 1947 */ 1948 goto check_tail; 1949 } 1950 1951 /* 1952 * We have a valid page the file and may have a valid page 1953 * the tail copy area. 1954 * If the tail page was written after the page the file then 1955 * break of the loop. 1956 */ 1957 if (tail_page && 1958 le64_to_cpu(tail_page->record_hdr.last_end_lsn) > lsn_cur) { 1959 /* Remember if we will replace the page. */ 1960 replace_page = true; 1961 goto check_tail; 1962 } 1963 1964 tail_page = NULL; 1965 1966 if (is_log_record_end(page)) { 1967 /* 1968 * Since we have read this page we know the sequence number 1969 * is the same as our expected value. 1970 */ 1971 log->seq_num = expected_seq; 1972 log->last_lsn = le64_to_cpu(page->record_hdr.last_end_lsn); 1973 log->ra->current_lsn = page->record_hdr.last_end_lsn; 1974 log->l_flags &= ~NTFSLOG_NO_LAST_LSN; 1975 1976 /* 1977 * If there is room on this page for another header then 1978 * remember we want to reuse the page. 1979 */ 1980 if (log->record_header_len <= 1981 log->page_size - 1982 le16_to_cpu(page->record_hdr.next_record_off)) { 1983 log->l_flags |= NTFSLOG_REUSE_TAIL; 1984 log->next_page = curpage_off; 1985 } else { 1986 log->l_flags &= ~NTFSLOG_REUSE_TAIL; 1987 log->next_page = nextpage_off; 1988 } 1989 1990 /* Remember if we wrapped the log file. */ 1991 if (wrapped_file) 1992 log->l_flags |= NTFSLOG_WRAPPED; 1993 } 1994 1995 /* 1996 * Remember the last page count and position. 1997 * Also remember the last known lsn. 1998 */ 1999 page_cnt = le16_to_cpu(page->page_count); 2000 page_pos = le16_to_cpu(page->page_pos); 2001 last_ok_lsn = le64_to_cpu(page->rhdr.lsn); 2002 2003 next_page_1: 2004 2005 if (wrapped) { 2006 expected_seq += 1; 2007 wrapped_file = 1; 2008 } 2009 2010 curpage_off = nextpage_off; 2011 kfree(page); 2012 page = NULL; 2013 reuse_page = 0; 2014 goto next_page; 2015 2016 check_tail: 2017 if (tail_page) { 2018 log->seq_num = expected_seq; 2019 log->last_lsn = le64_to_cpu(tail_page->record_hdr.last_end_lsn); 2020 log->ra->current_lsn = tail_page->record_hdr.last_end_lsn; 2021 log->l_flags &= ~NTFSLOG_NO_LAST_LSN; 2022 2023 if (log->page_size - 2024 le16_to_cpu( 2025 tail_page->record_hdr.next_record_off) >= 2026 log->record_header_len) { 2027 log->l_flags |= NTFSLOG_REUSE_TAIL; 2028 log->next_page = curpage_off; 2029 } else { 2030 log->l_flags &= ~NTFSLOG_REUSE_TAIL; 2031 log->next_page = nextpage_off; 2032 } 2033 2034 if (wrapped) 2035 log->l_flags |= NTFSLOG_WRAPPED; 2036 } 2037 2038 /* Remember that the partial IO will start at the next page. */ 2039 second_off = nextpage_off; 2040 2041 /* 2042 * If the next page is the first page of the file then update 2043 * the sequence number for log records which begon the next page. 2044 */ 2045 if (wrapped) 2046 expected_seq += 1; 2047 2048 /* 2049 * If we have a tail copy or are performing single page I/O we can 2050 * immediately look at the next page. 2051 */ 2052 if (replace_page || (log->ra->flags & RESTART_SINGLE_PAGE_IO)) { 2053 page_cnt = 2; 2054 page_pos = 1; 2055 goto check_valid; 2056 } 2057 2058 if (page_pos != page_cnt) 2059 goto check_valid; 2060 /* 2061 * If the next page causes us to wrap to the beginning of the log 2062 * file then we know which page to check next. 2063 */ 2064 if (wrapped) { 2065 page_cnt = 2; 2066 page_pos = 1; 2067 goto check_valid; 2068 } 2069 2070 cur_pos = 2; 2071 2072 next_test_page: 2073 kfree(tst_page); 2074 tst_page = NULL; 2075 2076 /* Walk through the file, reading log pages. */ 2077 err = read_log_page(log, nextpage_off, &tst_page, &usa_error); 2078 2079 /* 2080 * If we get a USA error then assume that we correctly found 2081 * the end of the original transfer. 2082 */ 2083 if (usa_error) 2084 goto file_is_valid; 2085 2086 /* 2087 * If we were able to read the page, we examine it to see if it 2088 * is the same or different Io block. 2089 */ 2090 if (err) 2091 goto next_test_page_1; 2092 2093 if (le16_to_cpu(tst_page->page_pos) == cur_pos && 2094 check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) { 2095 page_cnt = le16_to_cpu(tst_page->page_count) + 1; 2096 page_pos = le16_to_cpu(tst_page->page_pos); 2097 goto check_valid; 2098 } else { 2099 goto file_is_valid; 2100 } 2101 2102 next_test_page_1: 2103 2104 nextpage_off = next_page_off(log, curpage_off); 2105 wrapped = nextpage_off == log->first_page; 2106 2107 if (wrapped) { 2108 expected_seq += 1; 2109 page_cnt = 2; 2110 page_pos = 1; 2111 } 2112 2113 cur_pos += 1; 2114 part_io_count += 1; 2115 if (!wrapped) 2116 goto next_test_page; 2117 2118 check_valid: 2119 /* Skip over the remaining pages this transfer. */ 2120 remain_pages = page_cnt - page_pos - 1; 2121 part_io_count += remain_pages; 2122 2123 while (remain_pages--) { 2124 nextpage_off = next_page_off(log, curpage_off); 2125 wrapped = nextpage_off == log->first_page; 2126 2127 if (wrapped) 2128 expected_seq += 1; 2129 } 2130 2131 /* Call our routine to check this log page. */ 2132 kfree(tst_page); 2133 tst_page = NULL; 2134 2135 err = read_log_page(log, nextpage_off, &tst_page, &usa_error); 2136 if (!err && !usa_error && 2137 check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) { 2138 err = -EINVAL; 2139 goto out; 2140 } 2141 2142 file_is_valid: 2143 2144 /* We have a valid file. */ 2145 if (page_off1 || tail_page) { 2146 struct RECORD_PAGE_HDR *tmp_page; 2147 2148 if (sb_rdonly(log->ni->mi.sbi->sb)) { 2149 err = -EROFS; 2150 goto out; 2151 } 2152 2153 if (page_off1) { 2154 tmp_page = Add2Ptr(page_bufs, page_off1 - page_off); 2155 tails -= (page_off1 - page_off) / log->page_size; 2156 if (!tail_page) 2157 tails -= 1; 2158 } else { 2159 tmp_page = tail_page; 2160 tails = 1; 2161 } 2162 2163 while (tails--) { 2164 u64 off = hdr_file_off(log, tmp_page); 2165 2166 if (!page) { 2167 page = kmalloc(log->page_size, GFP_NOFS); 2168 if (!page) { 2169 err = -ENOMEM; 2170 goto out; 2171 } 2172 } 2173 2174 /* 2175 * Correct page and copy the data from this page 2176 * into it and flush it to disk. 2177 */ 2178 memcpy(page, tmp_page, log->page_size); 2179 2180 /* Fill last flushed lsn value flush the page. */ 2181 if (log->major_ver < 2) 2182 page->rhdr.lsn = page->record_hdr.last_end_lsn; 2183 else 2184 page->file_off = 0; 2185 2186 page->page_pos = page->page_count = cpu_to_le16(1); 2187 2188 ntfs_fix_pre_write(&page->rhdr, log->page_size); 2189 2190 err = ntfs_sb_write_run(log->ni->mi.sbi, 2191 &log->ni->file.run, off, page, 2192 log->page_size, 0); 2193 2194 if (err) 2195 goto out; 2196 2197 if (part_io_count && second_off == off) { 2198 second_off += log->page_size; 2199 part_io_count -= 1; 2200 } 2201 2202 tmp_page = Add2Ptr(tmp_page, log->page_size); 2203 } 2204 } 2205 2206 if (part_io_count) { 2207 if (sb_rdonly(log->ni->mi.sbi->sb)) { 2208 err = -EROFS; 2209 goto out; 2210 } 2211 } 2212 2213 out: 2214 kfree(second_tail); 2215 kfree(first_tail); 2216 kfree(page); 2217 kfree(tst_page); 2218 kfree(page_bufs); 2219 2220 return err; 2221 } 2222 2223 /* 2224 * read_log_rec_buf - Copy a log record from the file to a buffer. 2225 * 2226 * The log record may span several log pages and may even wrap the file. 2227 */ 2228 static int read_log_rec_buf(struct ntfs_log *log, 2229 const struct LFS_RECORD_HDR *rh, void *buffer) 2230 { 2231 int err; 2232 struct RECORD_PAGE_HDR *ph = NULL; 2233 u64 lsn = le64_to_cpu(rh->this_lsn); 2234 u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask; 2235 u32 off = lsn_to_page_off(log, lsn) + log->record_header_len; 2236 u32 data_len = le32_to_cpu(rh->client_data_len); 2237 2238 /* 2239 * While there are more bytes to transfer, 2240 * we continue to attempt to perform the read. 2241 */ 2242 for (;;) { 2243 bool usa_error; 2244 u32 tail = log->page_size - off; 2245 2246 if (tail >= data_len) 2247 tail = data_len; 2248 2249 data_len -= tail; 2250 2251 err = read_log_page(log, vbo, &ph, &usa_error); 2252 if (err) 2253 goto out; 2254 2255 /* 2256 * The last lsn on this page better be greater or equal 2257 * to the lsn we are copying. 2258 */ 2259 if (lsn > le64_to_cpu(ph->rhdr.lsn)) { 2260 err = -EINVAL; 2261 goto out; 2262 } 2263 2264 memcpy(buffer, Add2Ptr(ph, off), tail); 2265 2266 /* If there are no more bytes to transfer, we exit the loop. */ 2267 if (!data_len) { 2268 if (!is_log_record_end(ph) || 2269 lsn > le64_to_cpu(ph->record_hdr.last_end_lsn)) { 2270 err = -EINVAL; 2271 goto out; 2272 } 2273 break; 2274 } 2275 2276 if (ph->rhdr.lsn == ph->record_hdr.last_end_lsn || 2277 lsn > le64_to_cpu(ph->rhdr.lsn)) { 2278 err = -EINVAL; 2279 goto out; 2280 } 2281 2282 vbo = next_page_off(log, vbo); 2283 off = log->data_off; 2284 2285 /* 2286 * Adjust our pointer the user's buffer to transfer 2287 * the next block to. 2288 */ 2289 buffer = Add2Ptr(buffer, tail); 2290 } 2291 2292 out: 2293 kfree(ph); 2294 return err; 2295 } 2296 2297 static int read_rst_area(struct ntfs_log *log, struct NTFS_RESTART **rst_, 2298 u64 *lsn) 2299 { 2300 int err; 2301 struct LFS_RECORD_HDR *rh = NULL; 2302 const struct CLIENT_REC *cr = 2303 Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)); 2304 u64 lsnr, lsnc = le64_to_cpu(cr->restart_lsn); 2305 u32 len; 2306 struct NTFS_RESTART *rst; 2307 2308 *lsn = 0; 2309 *rst_ = NULL; 2310 2311 /* If the client doesn't have a restart area, go ahead and exit now. */ 2312 if (!lsnc) 2313 return 0; 2314 2315 err = read_log_page(log, lsn_to_vbo(log, lsnc), 2316 (struct RECORD_PAGE_HDR **)&rh, NULL); 2317 if (err) 2318 return err; 2319 2320 rst = NULL; 2321 lsnr = le64_to_cpu(rh->this_lsn); 2322 2323 if (lsnc != lsnr) { 2324 /* If the lsn values don't match, then the disk is corrupt. */ 2325 err = -EINVAL; 2326 goto out; 2327 } 2328 2329 *lsn = lsnr; 2330 len = le32_to_cpu(rh->client_data_len); 2331 2332 if (!len) { 2333 err = 0; 2334 goto out; 2335 } 2336 2337 if (len < sizeof(struct NTFS_RESTART)) { 2338 err = -EINVAL; 2339 goto out; 2340 } 2341 2342 rst = kmalloc(len, GFP_NOFS); 2343 if (!rst) { 2344 err = -ENOMEM; 2345 goto out; 2346 } 2347 2348 /* Copy the data into the 'rst' buffer. */ 2349 err = read_log_rec_buf(log, rh, rst); 2350 if (err) 2351 goto out; 2352 2353 *rst_ = rst; 2354 rst = NULL; 2355 2356 out: 2357 kfree(rh); 2358 kfree(rst); 2359 2360 return err; 2361 } 2362 2363 static int find_log_rec(struct ntfs_log *log, u64 lsn, struct lcb *lcb) 2364 { 2365 int err; 2366 struct LFS_RECORD_HDR *rh = lcb->lrh; 2367 u32 rec_len, len; 2368 2369 /* Read the record header for this lsn. */ 2370 if (!rh) { 2371 err = read_log_page(log, lsn_to_vbo(log, lsn), 2372 (struct RECORD_PAGE_HDR **)&rh, NULL); 2373 2374 lcb->lrh = rh; 2375 if (err) 2376 return err; 2377 } 2378 2379 /* 2380 * If the lsn the log record doesn't match the desired 2381 * lsn then the disk is corrupt. 2382 */ 2383 if (lsn != le64_to_cpu(rh->this_lsn)) 2384 return -EINVAL; 2385 2386 len = le32_to_cpu(rh->client_data_len); 2387 2388 /* 2389 * Check that the length field isn't greater than the total 2390 * available space the log file. 2391 */ 2392 rec_len = len + log->record_header_len; 2393 if (rec_len >= log->total_avail) 2394 return -EINVAL; 2395 2396 /* 2397 * If the entire log record is on this log page, 2398 * put a pointer to the log record the context block. 2399 */ 2400 if (rh->flags & LOG_RECORD_MULTI_PAGE) { 2401 void *lr = kmalloc(len, GFP_NOFS); 2402 2403 if (!lr) 2404 return -ENOMEM; 2405 2406 lcb->log_rec = lr; 2407 lcb->alloc = true; 2408 2409 /* Copy the data into the buffer returned. */ 2410 err = read_log_rec_buf(log, rh, lr); 2411 if (err) 2412 return err; 2413 } else { 2414 /* If beyond the end of the current page -> an error. */ 2415 u32 page_off = lsn_to_page_off(log, lsn); 2416 2417 if (page_off + len + log->record_header_len > log->page_size) 2418 return -EINVAL; 2419 2420 lcb->log_rec = Add2Ptr(rh, sizeof(struct LFS_RECORD_HDR)); 2421 lcb->alloc = false; 2422 } 2423 2424 return 0; 2425 } 2426 2427 /* 2428 * read_log_rec_lcb - Init the query operation. 2429 */ 2430 static int read_log_rec_lcb(struct ntfs_log *log, u64 lsn, u32 ctx_mode, 2431 struct lcb **lcb_) 2432 { 2433 int err; 2434 const struct CLIENT_REC *cr; 2435 struct lcb *lcb; 2436 2437 switch (ctx_mode) { 2438 case lcb_ctx_undo_next: 2439 case lcb_ctx_prev: 2440 case lcb_ctx_next: 2441 break; 2442 default: 2443 return -EINVAL; 2444 } 2445 2446 /* Check that the given lsn is the legal range for this client. */ 2447 cr = Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)); 2448 2449 if (!verify_client_lsn(log, cr, lsn)) 2450 return -EINVAL; 2451 2452 lcb = kzalloc(sizeof(struct lcb), GFP_NOFS); 2453 if (!lcb) 2454 return -ENOMEM; 2455 lcb->client = log->client_id; 2456 lcb->ctx_mode = ctx_mode; 2457 2458 /* Find the log record indicated by the given lsn. */ 2459 err = find_log_rec(log, lsn, lcb); 2460 if (err) 2461 goto out; 2462 2463 *lcb_ = lcb; 2464 return 0; 2465 2466 out: 2467 lcb_put(lcb); 2468 *lcb_ = NULL; 2469 return err; 2470 } 2471 2472 /* 2473 * find_client_next_lsn 2474 * 2475 * Attempt to find the next lsn to return to a client based on the context mode. 2476 */ 2477 static int find_client_next_lsn(struct ntfs_log *log, struct lcb *lcb, u64 *lsn) 2478 { 2479 int err; 2480 u64 next_lsn; 2481 struct LFS_RECORD_HDR *hdr; 2482 2483 hdr = lcb->lrh; 2484 *lsn = 0; 2485 2486 if (lcb_ctx_next != lcb->ctx_mode) 2487 goto check_undo_next; 2488 2489 /* Loop as long as another lsn can be found. */ 2490 for (;;) { 2491 u64 current_lsn; 2492 2493 err = next_log_lsn(log, hdr, ¤t_lsn); 2494 if (err) 2495 goto out; 2496 2497 if (!current_lsn) 2498 break; 2499 2500 if (hdr != lcb->lrh) 2501 kfree(hdr); 2502 2503 hdr = NULL; 2504 err = read_log_page(log, lsn_to_vbo(log, current_lsn), 2505 (struct RECORD_PAGE_HDR **)&hdr, NULL); 2506 if (err) 2507 goto out; 2508 2509 if (memcmp(&hdr->client, &lcb->client, 2510 sizeof(struct CLIENT_ID))) { 2511 /*err = -EINVAL; */ 2512 } else if (LfsClientRecord == hdr->record_type) { 2513 kfree(lcb->lrh); 2514 lcb->lrh = hdr; 2515 *lsn = current_lsn; 2516 return 0; 2517 } 2518 } 2519 2520 out: 2521 if (hdr != lcb->lrh) 2522 kfree(hdr); 2523 return err; 2524 2525 check_undo_next: 2526 if (lcb_ctx_undo_next == lcb->ctx_mode) 2527 next_lsn = le64_to_cpu(hdr->client_undo_next_lsn); 2528 else if (lcb_ctx_prev == lcb->ctx_mode) 2529 next_lsn = le64_to_cpu(hdr->client_prev_lsn); 2530 else 2531 return 0; 2532 2533 if (!next_lsn) 2534 return 0; 2535 2536 if (!verify_client_lsn( 2537 log, Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)), 2538 next_lsn)) 2539 return 0; 2540 2541 hdr = NULL; 2542 err = read_log_page(log, lsn_to_vbo(log, next_lsn), 2543 (struct RECORD_PAGE_HDR **)&hdr, NULL); 2544 if (err) 2545 return err; 2546 kfree(lcb->lrh); 2547 lcb->lrh = hdr; 2548 2549 *lsn = next_lsn; 2550 2551 return 0; 2552 } 2553 2554 static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn) 2555 { 2556 int err; 2557 2558 err = find_client_next_lsn(log, lcb, lsn); 2559 if (err) 2560 return err; 2561 2562 if (!*lsn) 2563 return 0; 2564 2565 if (lcb->alloc) 2566 kfree(lcb->log_rec); 2567 2568 lcb->log_rec = NULL; 2569 lcb->alloc = false; 2570 kfree(lcb->lrh); 2571 lcb->lrh = NULL; 2572 2573 return find_log_rec(log, *lsn, lcb); 2574 } 2575 2576 bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes) 2577 { 2578 __le16 mask; 2579 u32 min_de, de_off, used, total; 2580 const struct NTFS_DE *e; 2581 2582 if (hdr_has_subnode(hdr)) { 2583 min_de = sizeof(struct NTFS_DE) + sizeof(u64); 2584 mask = NTFS_IE_HAS_SUBNODES; 2585 } else { 2586 min_de = sizeof(struct NTFS_DE); 2587 mask = 0; 2588 } 2589 2590 de_off = le32_to_cpu(hdr->de_off); 2591 used = le32_to_cpu(hdr->used); 2592 total = le32_to_cpu(hdr->total); 2593 2594 if (de_off > bytes - min_de || used > bytes || total > bytes || 2595 de_off + min_de > used || used > total) { 2596 return false; 2597 } 2598 2599 e = Add2Ptr(hdr, de_off); 2600 for (;;) { 2601 u16 esize = le16_to_cpu(e->size); 2602 struct NTFS_DE *next = Add2Ptr(e, esize); 2603 2604 if (esize < min_de || PtrOffset(hdr, next) > used || 2605 (e->flags & NTFS_IE_HAS_SUBNODES) != mask) { 2606 return false; 2607 } 2608 2609 if (de_is_last(e)) 2610 break; 2611 2612 e = next; 2613 } 2614 2615 return true; 2616 } 2617 2618 static inline bool check_index_buffer(const struct INDEX_BUFFER *ib, u32 bytes) 2619 { 2620 u16 fo; 2621 const struct NTFS_RECORD_HEADER *r = &ib->rhdr; 2622 2623 if (r->sign != NTFS_INDX_SIGNATURE) 2624 return false; 2625 2626 fo = (SECTOR_SIZE - ((bytes >> SECTOR_SHIFT) + 1) * sizeof(short)); 2627 2628 if (le16_to_cpu(r->fix_off) > fo) 2629 return false; 2630 2631 if ((le16_to_cpu(r->fix_num) - 1) * SECTOR_SIZE != bytes) 2632 return false; 2633 2634 return check_index_header(&ib->ihdr, 2635 bytes - offsetof(struct INDEX_BUFFER, ihdr)); 2636 } 2637 2638 static inline bool check_index_root(const struct ATTRIB *attr, 2639 struct ntfs_sb_info *sbi) 2640 { 2641 bool ret; 2642 const struct INDEX_ROOT *root = resident_data(attr); 2643 u8 index_bits = le32_to_cpu(root->index_block_size) >= 2644 sbi->cluster_size ? 2645 sbi->cluster_bits : 2646 SECTOR_SHIFT; 2647 u8 block_clst = root->index_block_clst; 2648 2649 if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) || 2650 (root->type != ATTR_NAME && root->type != ATTR_ZERO) || 2651 (root->type == ATTR_NAME && 2652 root->rule != NTFS_COLLATION_TYPE_FILENAME) || 2653 (le32_to_cpu(root->index_block_size) != 2654 (block_clst << index_bits)) || 2655 (block_clst != 1 && block_clst != 2 && block_clst != 4 && 2656 block_clst != 8 && block_clst != 0x10 && block_clst != 0x20 && 2657 block_clst != 0x40 && block_clst != 0x80)) { 2658 return false; 2659 } 2660 2661 ret = check_index_header(&root->ihdr, 2662 le32_to_cpu(attr->res.data_size) - 2663 offsetof(struct INDEX_ROOT, ihdr)); 2664 return ret; 2665 } 2666 2667 static inline bool check_attr(const struct MFT_REC *rec, 2668 const struct ATTRIB *attr, 2669 struct ntfs_sb_info *sbi) 2670 { 2671 u32 asize = le32_to_cpu(attr->size); 2672 u32 rsize = 0; 2673 u64 dsize, svcn, evcn; 2674 u16 run_off; 2675 2676 /* Check the fixed part of the attribute record header. */ 2677 if (asize >= sbi->record_size || 2678 asize + PtrOffset(rec, attr) >= sbi->record_size || 2679 (attr->name_len && 2680 le16_to_cpu(attr->name_off) + attr->name_len * sizeof(short) > 2681 asize)) { 2682 return false; 2683 } 2684 2685 /* Check the attribute fields. */ 2686 switch (attr->non_res) { 2687 case 0: 2688 rsize = le32_to_cpu(attr->res.data_size); 2689 if (rsize >= asize || 2690 le16_to_cpu(attr->res.data_off) + rsize > asize) { 2691 return false; 2692 } 2693 break; 2694 2695 case 1: 2696 dsize = le64_to_cpu(attr->nres.data_size); 2697 svcn = le64_to_cpu(attr->nres.svcn); 2698 evcn = le64_to_cpu(attr->nres.evcn); 2699 run_off = le16_to_cpu(attr->nres.run_off); 2700 2701 if (svcn > evcn + 1 || run_off >= asize || 2702 le64_to_cpu(attr->nres.valid_size) > dsize || 2703 dsize > le64_to_cpu(attr->nres.alloc_size)) { 2704 return false; 2705 } 2706 2707 if (run_off > asize) 2708 return false; 2709 2710 if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn, 2711 Add2Ptr(attr, run_off), asize - run_off) < 0) { 2712 return false; 2713 } 2714 2715 return true; 2716 2717 default: 2718 return false; 2719 } 2720 2721 switch (attr->type) { 2722 case ATTR_NAME: 2723 if (fname_full_size(Add2Ptr( 2724 attr, le16_to_cpu(attr->res.data_off))) > asize) { 2725 return false; 2726 } 2727 break; 2728 2729 case ATTR_ROOT: 2730 return check_index_root(attr, sbi); 2731 2732 case ATTR_STD: 2733 if (rsize < sizeof(struct ATTR_STD_INFO5) && 2734 rsize != sizeof(struct ATTR_STD_INFO)) { 2735 return false; 2736 } 2737 break; 2738 2739 case ATTR_LIST: 2740 case ATTR_ID: 2741 case ATTR_SECURE: 2742 case ATTR_LABEL: 2743 case ATTR_VOL_INFO: 2744 case ATTR_DATA: 2745 case ATTR_ALLOC: 2746 case ATTR_BITMAP: 2747 case ATTR_REPARSE: 2748 case ATTR_EA_INFO: 2749 case ATTR_EA: 2750 case ATTR_PROPERTYSET: 2751 case ATTR_LOGGED_UTILITY_STREAM: 2752 break; 2753 2754 default: 2755 return false; 2756 } 2757 2758 return true; 2759 } 2760 2761 static inline bool check_file_record(const struct MFT_REC *rec, 2762 const struct MFT_REC *rec2, 2763 struct ntfs_sb_info *sbi) 2764 { 2765 const struct ATTRIB *attr; 2766 u16 fo = le16_to_cpu(rec->rhdr.fix_off); 2767 u16 fn = le16_to_cpu(rec->rhdr.fix_num); 2768 u16 ao = le16_to_cpu(rec->attr_off); 2769 u32 rs = sbi->record_size; 2770 2771 /* Check the file record header for consistency. */ 2772 if (rec->rhdr.sign != NTFS_FILE_SIGNATURE || 2773 fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) || 2774 (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 || 2775 ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) || 2776 le32_to_cpu(rec->total) != rs) { 2777 return false; 2778 } 2779 2780 /* Loop to check all of the attributes. */ 2781 for (attr = Add2Ptr(rec, ao); attr->type != ATTR_END; 2782 attr = Add2Ptr(attr, le32_to_cpu(attr->size))) { 2783 if (check_attr(rec, attr, sbi)) 2784 continue; 2785 return false; 2786 } 2787 2788 return true; 2789 } 2790 2791 static inline int check_lsn(const struct NTFS_RECORD_HEADER *hdr, 2792 const u64 *rlsn) 2793 { 2794 u64 lsn; 2795 2796 if (!rlsn) 2797 return true; 2798 2799 lsn = le64_to_cpu(hdr->lsn); 2800 2801 if (hdr->sign == NTFS_HOLE_SIGNATURE) 2802 return false; 2803 2804 if (*rlsn > lsn) 2805 return true; 2806 2807 return false; 2808 } 2809 2810 static inline bool check_if_attr(const struct MFT_REC *rec, 2811 const struct LOG_REC_HDR *lrh) 2812 { 2813 u16 ro = le16_to_cpu(lrh->record_off); 2814 u16 o = le16_to_cpu(rec->attr_off); 2815 const struct ATTRIB *attr = Add2Ptr(rec, o); 2816 2817 while (o < ro) { 2818 u32 asize; 2819 2820 if (attr->type == ATTR_END) 2821 break; 2822 2823 asize = le32_to_cpu(attr->size); 2824 if (!asize) 2825 break; 2826 2827 o += asize; 2828 attr = Add2Ptr(attr, asize); 2829 } 2830 2831 return o == ro; 2832 } 2833 2834 static inline bool check_if_index_root(const struct MFT_REC *rec, 2835 const struct LOG_REC_HDR *lrh) 2836 { 2837 u16 ro = le16_to_cpu(lrh->record_off); 2838 u16 o = le16_to_cpu(rec->attr_off); 2839 const struct ATTRIB *attr = Add2Ptr(rec, o); 2840 2841 while (o < ro) { 2842 u32 asize; 2843 2844 if (attr->type == ATTR_END) 2845 break; 2846 2847 asize = le32_to_cpu(attr->size); 2848 if (!asize) 2849 break; 2850 2851 o += asize; 2852 attr = Add2Ptr(attr, asize); 2853 } 2854 2855 return o == ro && attr->type == ATTR_ROOT; 2856 } 2857 2858 static inline bool check_if_root_index(const struct ATTRIB *attr, 2859 const struct INDEX_HDR *hdr, 2860 const struct LOG_REC_HDR *lrh) 2861 { 2862 u16 ao = le16_to_cpu(lrh->attr_off); 2863 u32 de_off = le32_to_cpu(hdr->de_off); 2864 u32 o = PtrOffset(attr, hdr) + de_off; 2865 const struct NTFS_DE *e = Add2Ptr(hdr, de_off); 2866 u32 asize = le32_to_cpu(attr->size); 2867 2868 while (o < ao) { 2869 u16 esize; 2870 2871 if (o >= asize) 2872 break; 2873 2874 esize = le16_to_cpu(e->size); 2875 if (!esize) 2876 break; 2877 2878 o += esize; 2879 e = Add2Ptr(e, esize); 2880 } 2881 2882 return o == ao; 2883 } 2884 2885 static inline bool check_if_alloc_index(const struct INDEX_HDR *hdr, 2886 u32 attr_off) 2887 { 2888 u32 de_off = le32_to_cpu(hdr->de_off); 2889 u32 o = offsetof(struct INDEX_BUFFER, ihdr) + de_off; 2890 const struct NTFS_DE *e = Add2Ptr(hdr, de_off); 2891 u32 used = le32_to_cpu(hdr->used); 2892 2893 while (o < attr_off) { 2894 u16 esize; 2895 2896 if (de_off >= used) 2897 break; 2898 2899 esize = le16_to_cpu(e->size); 2900 if (!esize) 2901 break; 2902 2903 o += esize; 2904 de_off += esize; 2905 e = Add2Ptr(e, esize); 2906 } 2907 2908 return o == attr_off; 2909 } 2910 2911 static inline void change_attr_size(struct MFT_REC *rec, struct ATTRIB *attr, 2912 u32 nsize) 2913 { 2914 u32 asize = le32_to_cpu(attr->size); 2915 int dsize = nsize - asize; 2916 u8 *next = Add2Ptr(attr, asize); 2917 u32 used = le32_to_cpu(rec->used); 2918 2919 memmove(Add2Ptr(attr, nsize), next, used - PtrOffset(rec, next)); 2920 2921 rec->used = cpu_to_le32(used + dsize); 2922 attr->size = cpu_to_le32(nsize); 2923 } 2924 2925 struct OpenAttr { 2926 struct ATTRIB *attr; 2927 struct runs_tree *run1; 2928 struct runs_tree run0; 2929 struct ntfs_inode *ni; 2930 // CLST rno; 2931 }; 2932 2933 /* 2934 * cmp_type_and_name 2935 * 2936 * Return: 0 if 'attr' has the same type and name. 2937 */ 2938 static inline int cmp_type_and_name(const struct ATTRIB *a1, 2939 const struct ATTRIB *a2) 2940 { 2941 return a1->type != a2->type || a1->name_len != a2->name_len || 2942 (a1->name_len && memcmp(attr_name(a1), attr_name(a2), 2943 a1->name_len * sizeof(short))); 2944 } 2945 2946 static struct OpenAttr *find_loaded_attr(struct ntfs_log *log, 2947 const struct ATTRIB *attr, CLST rno) 2948 { 2949 struct OPEN_ATTR_ENRTY *oe = NULL; 2950 2951 while ((oe = enum_rstbl(log->open_attr_tbl, oe))) { 2952 struct OpenAttr *op_attr; 2953 2954 if (ino_get(&oe->ref) != rno) 2955 continue; 2956 2957 op_attr = (struct OpenAttr *)oe->ptr; 2958 if (!cmp_type_and_name(op_attr->attr, attr)) 2959 return op_attr; 2960 } 2961 return NULL; 2962 } 2963 2964 static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi, 2965 enum ATTR_TYPE type, u64 size, 2966 const u16 *name, size_t name_len, 2967 __le16 flags) 2968 { 2969 struct ATTRIB *attr; 2970 u32 name_size = ALIGN(name_len * sizeof(short), 8); 2971 bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED); 2972 u32 asize = name_size + 2973 (is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT); 2974 2975 attr = kzalloc(asize, GFP_NOFS); 2976 if (!attr) 2977 return NULL; 2978 2979 attr->type = type; 2980 attr->size = cpu_to_le32(asize); 2981 attr->flags = flags; 2982 attr->non_res = 1; 2983 attr->name_len = name_len; 2984 2985 attr->nres.evcn = cpu_to_le64((u64)bytes_to_cluster(sbi, size) - 1); 2986 attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, size)); 2987 attr->nres.data_size = cpu_to_le64(size); 2988 attr->nres.valid_size = attr->nres.data_size; 2989 if (is_ext) { 2990 attr->name_off = SIZEOF_NONRESIDENT_EX_LE; 2991 if (is_attr_compressed(attr)) 2992 attr->nres.c_unit = COMPRESSION_UNIT; 2993 2994 attr->nres.run_off = 2995 cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size); 2996 memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT_EX), name, 2997 name_len * sizeof(short)); 2998 } else { 2999 attr->name_off = SIZEOF_NONRESIDENT_LE; 3000 attr->nres.run_off = 3001 cpu_to_le16(SIZEOF_NONRESIDENT + name_size); 3002 memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT), name, 3003 name_len * sizeof(short)); 3004 } 3005 3006 return attr; 3007 } 3008 3009 /* 3010 * do_action - Common routine for the Redo and Undo Passes. 3011 * @rlsn: If it is NULL then undo. 3012 */ 3013 static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe, 3014 const struct LOG_REC_HDR *lrh, u32 op, void *data, 3015 u32 dlen, u32 rec_len, const u64 *rlsn) 3016 { 3017 int err = 0; 3018 struct ntfs_sb_info *sbi = log->ni->mi.sbi; 3019 struct inode *inode = NULL, *inode_parent; 3020 struct mft_inode *mi = NULL, *mi2_child = NULL; 3021 CLST rno = 0, rno_base = 0; 3022 struct INDEX_BUFFER *ib = NULL; 3023 struct MFT_REC *rec = NULL; 3024 struct ATTRIB *attr = NULL, *attr2; 3025 struct INDEX_HDR *hdr; 3026 struct INDEX_ROOT *root; 3027 struct NTFS_DE *e, *e1, *e2; 3028 struct NEW_ATTRIBUTE_SIZES *new_sz; 3029 struct ATTR_FILE_NAME *fname; 3030 struct OpenAttr *oa, *oa2; 3031 u32 nsize, t32, asize, used, esize, off, bits; 3032 u16 id, id2; 3033 u32 record_size = sbi->record_size; 3034 u64 t64; 3035 u16 roff = le16_to_cpu(lrh->record_off); 3036 u16 aoff = le16_to_cpu(lrh->attr_off); 3037 u64 lco = 0; 3038 u64 cbo = (u64)le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT; 3039 u64 tvo = le64_to_cpu(lrh->target_vcn) << sbi->cluster_bits; 3040 u64 vbo = cbo + tvo; 3041 void *buffer_le = NULL; 3042 u32 bytes = 0; 3043 bool a_dirty = false; 3044 u16 data_off; 3045 3046 oa = oe->ptr; 3047 3048 /* Big switch to prepare. */ 3049 switch (op) { 3050 /* ============================================================ 3051 * Process MFT records, as described by the current log record. 3052 * ============================================================ 3053 */ 3054 case InitializeFileRecordSegment: 3055 case DeallocateFileRecordSegment: 3056 case WriteEndOfFileRecordSegment: 3057 case CreateAttribute: 3058 case DeleteAttribute: 3059 case UpdateResidentValue: 3060 case UpdateMappingPairs: 3061 case SetNewAttributeSizes: 3062 case AddIndexEntryRoot: 3063 case DeleteIndexEntryRoot: 3064 case SetIndexEntryVcnRoot: 3065 case UpdateFileNameRoot: 3066 case UpdateRecordDataRoot: 3067 case ZeroEndOfFileRecord: 3068 rno = vbo >> sbi->record_bits; 3069 inode = ilookup(sbi->sb, rno); 3070 if (inode) { 3071 mi = &ntfs_i(inode)->mi; 3072 } else if (op == InitializeFileRecordSegment) { 3073 mi = kzalloc(sizeof(struct mft_inode), GFP_NOFS); 3074 if (!mi) 3075 return -ENOMEM; 3076 err = mi_format_new(mi, sbi, rno, 0, false); 3077 if (err) 3078 goto out; 3079 } else { 3080 /* Read from disk. */ 3081 err = mi_get(sbi, rno, &mi); 3082 if (err) 3083 return err; 3084 } 3085 rec = mi->mrec; 3086 3087 if (op == DeallocateFileRecordSegment) 3088 goto skip_load_parent; 3089 3090 if (InitializeFileRecordSegment != op) { 3091 if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE) 3092 goto dirty_vol; 3093 if (!check_lsn(&rec->rhdr, rlsn)) 3094 goto out; 3095 if (!check_file_record(rec, NULL, sbi)) 3096 goto dirty_vol; 3097 attr = Add2Ptr(rec, roff); 3098 } 3099 3100 if (is_rec_base(rec) || InitializeFileRecordSegment == op) { 3101 rno_base = rno; 3102 goto skip_load_parent; 3103 } 3104 3105 rno_base = ino_get(&rec->parent_ref); 3106 inode_parent = ntfs_iget5(sbi->sb, &rec->parent_ref, NULL); 3107 if (IS_ERR(inode_parent)) 3108 goto skip_load_parent; 3109 3110 if (is_bad_inode(inode_parent)) { 3111 iput(inode_parent); 3112 goto skip_load_parent; 3113 } 3114 3115 if (ni_load_mi_ex(ntfs_i(inode_parent), rno, &mi2_child)) { 3116 iput(inode_parent); 3117 } else { 3118 if (mi2_child->mrec != mi->mrec) 3119 memcpy(mi2_child->mrec, mi->mrec, 3120 sbi->record_size); 3121 3122 if (inode) 3123 iput(inode); 3124 else if (mi) 3125 mi_put(mi); 3126 3127 inode = inode_parent; 3128 mi = mi2_child; 3129 rec = mi2_child->mrec; 3130 attr = Add2Ptr(rec, roff); 3131 } 3132 3133 skip_load_parent: 3134 inode_parent = NULL; 3135 break; 3136 3137 /* 3138 * Process attributes, as described by the current log record. 3139 */ 3140 case UpdateNonresidentValue: 3141 case AddIndexEntryAllocation: 3142 case DeleteIndexEntryAllocation: 3143 case WriteEndOfIndexBuffer: 3144 case SetIndexEntryVcnAllocation: 3145 case UpdateFileNameAllocation: 3146 case SetBitsInNonresidentBitMap: 3147 case ClearBitsInNonresidentBitMap: 3148 case UpdateRecordDataAllocation: 3149 attr = oa->attr; 3150 bytes = UpdateNonresidentValue == op ? dlen : 0; 3151 lco = (u64)le16_to_cpu(lrh->lcns_follow) << sbi->cluster_bits; 3152 3153 if (attr->type == ATTR_ALLOC) { 3154 t32 = le32_to_cpu(oe->bytes_per_index); 3155 if (bytes < t32) 3156 bytes = t32; 3157 } 3158 3159 if (!bytes) 3160 bytes = lco - cbo; 3161 3162 bytes += roff; 3163 if (attr->type == ATTR_ALLOC) 3164 bytes = (bytes + 511) & ~511; // align 3165 3166 buffer_le = kmalloc(bytes, GFP_NOFS); 3167 if (!buffer_le) 3168 return -ENOMEM; 3169 3170 err = ntfs_read_run_nb(sbi, oa->run1, vbo, buffer_le, bytes, 3171 NULL); 3172 if (err) 3173 goto out; 3174 3175 if (attr->type == ATTR_ALLOC && *(int *)buffer_le) 3176 ntfs_fix_post_read(buffer_le, bytes, false); 3177 break; 3178 3179 default: 3180 WARN_ON(1); 3181 } 3182 3183 /* Big switch to do operation. */ 3184 switch (op) { 3185 case InitializeFileRecordSegment: 3186 if (roff + dlen > record_size) 3187 goto dirty_vol; 3188 3189 memcpy(Add2Ptr(rec, roff), data, dlen); 3190 mi->dirty = true; 3191 break; 3192 3193 case DeallocateFileRecordSegment: 3194 clear_rec_inuse(rec); 3195 le16_add_cpu(&rec->seq, 1); 3196 mi->dirty = true; 3197 break; 3198 3199 case WriteEndOfFileRecordSegment: 3200 attr2 = (struct ATTRIB *)data; 3201 if (!check_if_attr(rec, lrh) || roff + dlen > record_size) 3202 goto dirty_vol; 3203 3204 memmove(attr, attr2, dlen); 3205 rec->used = cpu_to_le32(ALIGN(roff + dlen, 8)); 3206 3207 mi->dirty = true; 3208 break; 3209 3210 case CreateAttribute: 3211 attr2 = (struct ATTRIB *)data; 3212 asize = le32_to_cpu(attr2->size); 3213 used = le32_to_cpu(rec->used); 3214 3215 if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT || 3216 !IS_ALIGNED(asize, 8) || 3217 Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) || 3218 dlen > record_size - used) { 3219 goto dirty_vol; 3220 } 3221 3222 memmove(Add2Ptr(attr, asize), attr, used - roff); 3223 memcpy(attr, attr2, asize); 3224 3225 rec->used = cpu_to_le32(used + asize); 3226 id = le16_to_cpu(rec->next_attr_id); 3227 id2 = le16_to_cpu(attr2->id); 3228 if (id <= id2) 3229 rec->next_attr_id = cpu_to_le16(id2 + 1); 3230 if (is_attr_indexed(attr)) 3231 le16_add_cpu(&rec->hard_links, 1); 3232 3233 oa2 = find_loaded_attr(log, attr, rno_base); 3234 if (oa2) { 3235 void *p2 = kmemdup(attr, le32_to_cpu(attr->size), 3236 GFP_NOFS); 3237 if (p2) { 3238 // run_close(oa2->run1); 3239 kfree(oa2->attr); 3240 oa2->attr = p2; 3241 } 3242 } 3243 3244 mi->dirty = true; 3245 break; 3246 3247 case DeleteAttribute: 3248 asize = le32_to_cpu(attr->size); 3249 used = le32_to_cpu(rec->used); 3250 3251 if (!check_if_attr(rec, lrh)) 3252 goto dirty_vol; 3253 3254 rec->used = cpu_to_le32(used - asize); 3255 if (is_attr_indexed(attr)) 3256 le16_add_cpu(&rec->hard_links, -1); 3257 3258 memmove(attr, Add2Ptr(attr, asize), used - asize - roff); 3259 3260 mi->dirty = true; 3261 break; 3262 3263 case UpdateResidentValue: 3264 nsize = aoff + dlen; 3265 3266 if (!check_if_attr(rec, lrh)) 3267 goto dirty_vol; 3268 3269 asize = le32_to_cpu(attr->size); 3270 used = le32_to_cpu(rec->used); 3271 3272 if (lrh->redo_len == lrh->undo_len) { 3273 if (nsize > asize) 3274 goto dirty_vol; 3275 goto move_data; 3276 } 3277 3278 if (nsize > asize && nsize - asize > record_size - used) 3279 goto dirty_vol; 3280 3281 nsize = ALIGN(nsize, 8); 3282 data_off = le16_to_cpu(attr->res.data_off); 3283 3284 if (nsize < asize) { 3285 memmove(Add2Ptr(attr, aoff), data, dlen); 3286 data = NULL; // To skip below memmove(). 3287 } 3288 3289 memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize), 3290 used - le16_to_cpu(lrh->record_off) - asize); 3291 3292 rec->used = cpu_to_le32(used + nsize - asize); 3293 attr->size = cpu_to_le32(nsize); 3294 attr->res.data_size = cpu_to_le32(aoff + dlen - data_off); 3295 3296 move_data: 3297 if (data) 3298 memmove(Add2Ptr(attr, aoff), data, dlen); 3299 3300 oa2 = find_loaded_attr(log, attr, rno_base); 3301 if (oa2) { 3302 void *p2 = kmemdup(attr, le32_to_cpu(attr->size), 3303 GFP_NOFS); 3304 if (p2) { 3305 // run_close(&oa2->run0); 3306 oa2->run1 = &oa2->run0; 3307 kfree(oa2->attr); 3308 oa2->attr = p2; 3309 } 3310 } 3311 3312 mi->dirty = true; 3313 break; 3314 3315 case UpdateMappingPairs: 3316 nsize = aoff + dlen; 3317 asize = le32_to_cpu(attr->size); 3318 used = le32_to_cpu(rec->used); 3319 3320 if (!check_if_attr(rec, lrh) || !attr->non_res || 3321 aoff < le16_to_cpu(attr->nres.run_off) || aoff > asize || 3322 (nsize > asize && nsize - asize > record_size - used)) { 3323 goto dirty_vol; 3324 } 3325 3326 nsize = ALIGN(nsize, 8); 3327 3328 memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize), 3329 used - le16_to_cpu(lrh->record_off) - asize); 3330 rec->used = cpu_to_le32(used + nsize - asize); 3331 attr->size = cpu_to_le32(nsize); 3332 memmove(Add2Ptr(attr, aoff), data, dlen); 3333 3334 if (run_get_highest_vcn(le64_to_cpu(attr->nres.svcn), 3335 attr_run(attr), &t64)) { 3336 goto dirty_vol; 3337 } 3338 3339 attr->nres.evcn = cpu_to_le64(t64); 3340 oa2 = find_loaded_attr(log, attr, rno_base); 3341 if (oa2 && oa2->attr->non_res) 3342 oa2->attr->nres.evcn = attr->nres.evcn; 3343 3344 mi->dirty = true; 3345 break; 3346 3347 case SetNewAttributeSizes: 3348 new_sz = data; 3349 if (!check_if_attr(rec, lrh) || !attr->non_res) 3350 goto dirty_vol; 3351 3352 attr->nres.alloc_size = new_sz->alloc_size; 3353 attr->nres.data_size = new_sz->data_size; 3354 attr->nres.valid_size = new_sz->valid_size; 3355 3356 if (dlen >= sizeof(struct NEW_ATTRIBUTE_SIZES)) 3357 attr->nres.total_size = new_sz->total_size; 3358 3359 oa2 = find_loaded_attr(log, attr, rno_base); 3360 if (oa2) { 3361 void *p2 = kmemdup(attr, le32_to_cpu(attr->size), 3362 GFP_NOFS); 3363 if (p2) { 3364 kfree(oa2->attr); 3365 oa2->attr = p2; 3366 } 3367 } 3368 mi->dirty = true; 3369 break; 3370 3371 case AddIndexEntryRoot: 3372 e = (struct NTFS_DE *)data; 3373 esize = le16_to_cpu(e->size); 3374 root = resident_data(attr); 3375 hdr = &root->ihdr; 3376 used = le32_to_cpu(hdr->used); 3377 3378 if (!check_if_index_root(rec, lrh) || 3379 !check_if_root_index(attr, hdr, lrh) || 3380 Add2Ptr(data, esize) > Add2Ptr(lrh, rec_len) || 3381 esize > le32_to_cpu(rec->total) - le32_to_cpu(rec->used)) { 3382 goto dirty_vol; 3383 } 3384 3385 e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); 3386 3387 change_attr_size(rec, attr, le32_to_cpu(attr->size) + esize); 3388 3389 memmove(Add2Ptr(e1, esize), e1, 3390 PtrOffset(e1, Add2Ptr(hdr, used))); 3391 memmove(e1, e, esize); 3392 3393 le32_add_cpu(&attr->res.data_size, esize); 3394 hdr->used = cpu_to_le32(used + esize); 3395 le32_add_cpu(&hdr->total, esize); 3396 3397 mi->dirty = true; 3398 break; 3399 3400 case DeleteIndexEntryRoot: 3401 root = resident_data(attr); 3402 hdr = &root->ihdr; 3403 used = le32_to_cpu(hdr->used); 3404 3405 if (!check_if_index_root(rec, lrh) || 3406 !check_if_root_index(attr, hdr, lrh)) { 3407 goto dirty_vol; 3408 } 3409 3410 e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); 3411 esize = le16_to_cpu(e1->size); 3412 e2 = Add2Ptr(e1, esize); 3413 3414 memmove(e1, e2, PtrOffset(e2, Add2Ptr(hdr, used))); 3415 3416 le32_sub_cpu(&attr->res.data_size, esize); 3417 hdr->used = cpu_to_le32(used - esize); 3418 le32_sub_cpu(&hdr->total, esize); 3419 3420 change_attr_size(rec, attr, le32_to_cpu(attr->size) - esize); 3421 3422 mi->dirty = true; 3423 break; 3424 3425 case SetIndexEntryVcnRoot: 3426 root = resident_data(attr); 3427 hdr = &root->ihdr; 3428 3429 if (!check_if_index_root(rec, lrh) || 3430 !check_if_root_index(attr, hdr, lrh)) { 3431 goto dirty_vol; 3432 } 3433 3434 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); 3435 3436 de_set_vbn_le(e, *(__le64 *)data); 3437 mi->dirty = true; 3438 break; 3439 3440 case UpdateFileNameRoot: 3441 root = resident_data(attr); 3442 hdr = &root->ihdr; 3443 3444 if (!check_if_index_root(rec, lrh) || 3445 !check_if_root_index(attr, hdr, lrh)) { 3446 goto dirty_vol; 3447 } 3448 3449 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); 3450 fname = (struct ATTR_FILE_NAME *)(e + 1); 3451 memmove(&fname->dup, data, sizeof(fname->dup)); // 3452 mi->dirty = true; 3453 break; 3454 3455 case UpdateRecordDataRoot: 3456 root = resident_data(attr); 3457 hdr = &root->ihdr; 3458 3459 if (!check_if_index_root(rec, lrh) || 3460 !check_if_root_index(attr, hdr, lrh)) { 3461 goto dirty_vol; 3462 } 3463 3464 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off)); 3465 3466 memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen); 3467 3468 mi->dirty = true; 3469 break; 3470 3471 case ZeroEndOfFileRecord: 3472 if (roff + dlen > record_size) 3473 goto dirty_vol; 3474 3475 memset(attr, 0, dlen); 3476 mi->dirty = true; 3477 break; 3478 3479 case UpdateNonresidentValue: 3480 if (lco < cbo + roff + dlen) 3481 goto dirty_vol; 3482 3483 memcpy(Add2Ptr(buffer_le, roff), data, dlen); 3484 3485 a_dirty = true; 3486 if (attr->type == ATTR_ALLOC) 3487 ntfs_fix_pre_write(buffer_le, bytes); 3488 break; 3489 3490 case AddIndexEntryAllocation: 3491 ib = Add2Ptr(buffer_le, roff); 3492 hdr = &ib->ihdr; 3493 e = data; 3494 esize = le16_to_cpu(e->size); 3495 e1 = Add2Ptr(ib, aoff); 3496 3497 if (is_baad(&ib->rhdr)) 3498 goto dirty_vol; 3499 if (!check_lsn(&ib->rhdr, rlsn)) 3500 goto out; 3501 3502 used = le32_to_cpu(hdr->used); 3503 3504 if (!check_index_buffer(ib, bytes) || 3505 !check_if_alloc_index(hdr, aoff) || 3506 Add2Ptr(e, esize) > Add2Ptr(lrh, rec_len) || 3507 used + esize > le32_to_cpu(hdr->total)) { 3508 goto dirty_vol; 3509 } 3510 3511 memmove(Add2Ptr(e1, esize), e1, 3512 PtrOffset(e1, Add2Ptr(hdr, used))); 3513 memcpy(e1, e, esize); 3514 3515 hdr->used = cpu_to_le32(used + esize); 3516 3517 a_dirty = true; 3518 3519 ntfs_fix_pre_write(&ib->rhdr, bytes); 3520 break; 3521 3522 case DeleteIndexEntryAllocation: 3523 ib = Add2Ptr(buffer_le, roff); 3524 hdr = &ib->ihdr; 3525 e = Add2Ptr(ib, aoff); 3526 esize = le16_to_cpu(e->size); 3527 3528 if (is_baad(&ib->rhdr)) 3529 goto dirty_vol; 3530 if (!check_lsn(&ib->rhdr, rlsn)) 3531 goto out; 3532 3533 if (!check_index_buffer(ib, bytes) || 3534 !check_if_alloc_index(hdr, aoff)) { 3535 goto dirty_vol; 3536 } 3537 3538 e1 = Add2Ptr(e, esize); 3539 nsize = esize; 3540 used = le32_to_cpu(hdr->used); 3541 3542 memmove(e, e1, PtrOffset(e1, Add2Ptr(hdr, used))); 3543 3544 hdr->used = cpu_to_le32(used - nsize); 3545 3546 a_dirty = true; 3547 3548 ntfs_fix_pre_write(&ib->rhdr, bytes); 3549 break; 3550 3551 case WriteEndOfIndexBuffer: 3552 ib = Add2Ptr(buffer_le, roff); 3553 hdr = &ib->ihdr; 3554 e = Add2Ptr(ib, aoff); 3555 3556 if (is_baad(&ib->rhdr)) 3557 goto dirty_vol; 3558 if (!check_lsn(&ib->rhdr, rlsn)) 3559 goto out; 3560 if (!check_index_buffer(ib, bytes) || 3561 !check_if_alloc_index(hdr, aoff) || 3562 aoff + dlen > offsetof(struct INDEX_BUFFER, ihdr) + 3563 le32_to_cpu(hdr->total)) { 3564 goto dirty_vol; 3565 } 3566 3567 hdr->used = cpu_to_le32(dlen + PtrOffset(hdr, e)); 3568 memmove(e, data, dlen); 3569 3570 a_dirty = true; 3571 ntfs_fix_pre_write(&ib->rhdr, bytes); 3572 break; 3573 3574 case SetIndexEntryVcnAllocation: 3575 ib = Add2Ptr(buffer_le, roff); 3576 hdr = &ib->ihdr; 3577 e = Add2Ptr(ib, aoff); 3578 3579 if (is_baad(&ib->rhdr)) 3580 goto dirty_vol; 3581 3582 if (!check_lsn(&ib->rhdr, rlsn)) 3583 goto out; 3584 if (!check_index_buffer(ib, bytes) || 3585 !check_if_alloc_index(hdr, aoff)) { 3586 goto dirty_vol; 3587 } 3588 3589 de_set_vbn_le(e, *(__le64 *)data); 3590 3591 a_dirty = true; 3592 ntfs_fix_pre_write(&ib->rhdr, bytes); 3593 break; 3594 3595 case UpdateFileNameAllocation: 3596 ib = Add2Ptr(buffer_le, roff); 3597 hdr = &ib->ihdr; 3598 e = Add2Ptr(ib, aoff); 3599 3600 if (is_baad(&ib->rhdr)) 3601 goto dirty_vol; 3602 3603 if (!check_lsn(&ib->rhdr, rlsn)) 3604 goto out; 3605 if (!check_index_buffer(ib, bytes) || 3606 !check_if_alloc_index(hdr, aoff)) { 3607 goto dirty_vol; 3608 } 3609 3610 fname = (struct ATTR_FILE_NAME *)(e + 1); 3611 memmove(&fname->dup, data, sizeof(fname->dup)); 3612 3613 a_dirty = true; 3614 ntfs_fix_pre_write(&ib->rhdr, bytes); 3615 break; 3616 3617 case SetBitsInNonresidentBitMap: 3618 off = le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off); 3619 bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits); 3620 3621 if (cbo + (off + 7) / 8 > lco || 3622 cbo + ((off + bits + 7) / 8) > lco) { 3623 goto dirty_vol; 3624 } 3625 3626 ntfs_bitmap_set_le(Add2Ptr(buffer_le, roff), off, bits); 3627 a_dirty = true; 3628 break; 3629 3630 case ClearBitsInNonresidentBitMap: 3631 off = le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off); 3632 bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits); 3633 3634 if (cbo + (off + 7) / 8 > lco || 3635 cbo + ((off + bits + 7) / 8) > lco) { 3636 goto dirty_vol; 3637 } 3638 3639 ntfs_bitmap_clear_le(Add2Ptr(buffer_le, roff), off, bits); 3640 a_dirty = true; 3641 break; 3642 3643 case UpdateRecordDataAllocation: 3644 ib = Add2Ptr(buffer_le, roff); 3645 hdr = &ib->ihdr; 3646 e = Add2Ptr(ib, aoff); 3647 3648 if (is_baad(&ib->rhdr)) 3649 goto dirty_vol; 3650 3651 if (!check_lsn(&ib->rhdr, rlsn)) 3652 goto out; 3653 if (!check_index_buffer(ib, bytes) || 3654 !check_if_alloc_index(hdr, aoff)) { 3655 goto dirty_vol; 3656 } 3657 3658 memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen); 3659 3660 a_dirty = true; 3661 ntfs_fix_pre_write(&ib->rhdr, bytes); 3662 break; 3663 3664 default: 3665 WARN_ON(1); 3666 } 3667 3668 if (rlsn) { 3669 __le64 t64 = cpu_to_le64(*rlsn); 3670 3671 if (rec) 3672 rec->rhdr.lsn = t64; 3673 if (ib) 3674 ib->rhdr.lsn = t64; 3675 } 3676 3677 if (mi && mi->dirty) { 3678 err = mi_write(mi, 0); 3679 if (err) 3680 goto out; 3681 } 3682 3683 if (a_dirty) { 3684 attr = oa->attr; 3685 err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 3686 0); 3687 if (err) 3688 goto out; 3689 } 3690 3691 out: 3692 3693 if (inode) 3694 iput(inode); 3695 else if (mi != mi2_child) 3696 mi_put(mi); 3697 3698 kfree(buffer_le); 3699 3700 return err; 3701 3702 dirty_vol: 3703 log->set_dirty = true; 3704 goto out; 3705 } 3706 3707 /* 3708 * log_replay - Replays log and empties it. 3709 * 3710 * This function is called during mount operation. 3711 * It replays log and empties it. 3712 * Initialized is set false if logfile contains '-1'. 3713 */ 3714 int log_replay(struct ntfs_inode *ni, bool *initialized) 3715 { 3716 int err; 3717 struct ntfs_sb_info *sbi = ni->mi.sbi; 3718 struct ntfs_log *log; 3719 3720 u64 rec_lsn, checkpt_lsn = 0, rlsn = 0; 3721 struct ATTR_NAME_ENTRY *attr_names = NULL; 3722 struct RESTART_TABLE *dptbl = NULL; 3723 struct RESTART_TABLE *trtbl = NULL; 3724 const struct RESTART_TABLE *rt; 3725 struct RESTART_TABLE *oatbl = NULL; 3726 struct inode *inode; 3727 struct OpenAttr *oa; 3728 struct ntfs_inode *ni_oe; 3729 struct ATTRIB *attr = NULL; 3730 u64 size, vcn, undo_next_lsn; 3731 CLST rno, lcn, lcn0, len0, clen; 3732 void *data; 3733 struct NTFS_RESTART *rst = NULL; 3734 struct lcb *lcb = NULL; 3735 struct OPEN_ATTR_ENRTY *oe; 3736 struct TRANSACTION_ENTRY *tr; 3737 struct DIR_PAGE_ENTRY *dp; 3738 u32 i, bytes_per_attr_entry; 3739 u32 vbo, tail, off, dlen; 3740 u32 saved_len, rec_len, transact_id; 3741 bool use_second_page; 3742 struct RESTART_AREA *ra2, *ra = NULL; 3743 struct CLIENT_REC *ca, *cr; 3744 __le16 client; 3745 struct RESTART_HDR *rh; 3746 const struct LFS_RECORD_HDR *frh; 3747 const struct LOG_REC_HDR *lrh; 3748 bool is_mapped; 3749 bool is_ro = sb_rdonly(sbi->sb); 3750 u64 t64; 3751 u16 t16; 3752 u32 t32; 3753 3754 log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS); 3755 if (!log) 3756 return -ENOMEM; 3757 3758 log->ni = ni; 3759 log->l_size = log->orig_file_size = ni->vfs_inode.i_size; 3760 3761 /* Get the size of page. NOTE: To replay we can use default page. */ 3762 #if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2 3763 log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, true); 3764 #else 3765 log->page_size = norm_file_page(PAGE_SIZE, &log->l_size, false); 3766 #endif 3767 if (!log->page_size) { 3768 err = -EINVAL; 3769 goto out; 3770 } 3771 3772 log->one_page_buf = kmalloc(log->page_size, GFP_NOFS); 3773 if (!log->one_page_buf) { 3774 err = -ENOMEM; 3775 goto out; 3776 } 3777 3778 log->page_mask = log->page_size - 1; 3779 log->page_bits = blksize_bits(log->page_size); 3780 3781 /* Look for a restart area on the disk. */ 3782 err = log_read_rst(log, true, &log->rst_info); 3783 if (err) 3784 goto out; 3785 3786 /* remember 'initialized' */ 3787 *initialized = log->rst_info.initialized; 3788 3789 if (!log->rst_info.restart) { 3790 if (log->rst_info.initialized) { 3791 /* No restart area but the file is not initialized. */ 3792 err = -EINVAL; 3793 goto out; 3794 } 3795 3796 log_init_pg_hdr(log, 1, 1); 3797 log_create(log, 0, get_random_u32(), false, false); 3798 3799 ra = log_create_ra(log); 3800 if (!ra) { 3801 err = -ENOMEM; 3802 goto out; 3803 } 3804 log->ra = ra; 3805 log->init_ra = true; 3806 3807 goto process_log; 3808 } 3809 3810 /* 3811 * If the restart offset above wasn't zero then we won't 3812 * look for a second restart. 3813 */ 3814 if (log->rst_info.vbo) 3815 goto check_restart_area; 3816 3817 err = log_read_rst(log, false, &log->rst_info2); 3818 if (err) 3819 goto out; 3820 3821 /* Determine which restart area to use. */ 3822 if (!log->rst_info2.restart || 3823 log->rst_info2.last_lsn <= log->rst_info.last_lsn) 3824 goto use_first_page; 3825 3826 use_second_page = true; 3827 3828 if (log->rst_info.chkdsk_was_run && 3829 log->page_size != log->rst_info.vbo) { 3830 struct RECORD_PAGE_HDR *sp = NULL; 3831 bool usa_error; 3832 3833 if (!read_log_page(log, log->page_size, &sp, &usa_error) && 3834 sp->rhdr.sign == NTFS_CHKD_SIGNATURE) { 3835 use_second_page = false; 3836 } 3837 kfree(sp); 3838 } 3839 3840 if (use_second_page) { 3841 kfree(log->rst_info.r_page); 3842 memcpy(&log->rst_info, &log->rst_info2, 3843 sizeof(struct restart_info)); 3844 log->rst_info2.r_page = NULL; 3845 } 3846 3847 use_first_page: 3848 kfree(log->rst_info2.r_page); 3849 3850 check_restart_area: 3851 /* 3852 * If the restart area is at offset 0, we want 3853 * to write the second restart area first. 3854 */ 3855 log->init_ra = !!log->rst_info.vbo; 3856 3857 /* If we have a valid page then grab a pointer to the restart area. */ 3858 ra2 = log->rst_info.valid_page ? 3859 Add2Ptr(log->rst_info.r_page, 3860 le16_to_cpu(log->rst_info.r_page->ra_off)) : 3861 NULL; 3862 3863 if (log->rst_info.chkdsk_was_run || 3864 (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) { 3865 bool wrapped = false; 3866 bool use_multi_page = false; 3867 u32 open_log_count; 3868 3869 /* Do some checks based on whether we have a valid log page. */ 3870 open_log_count = log->rst_info.valid_page ? 3871 le32_to_cpu(ra2->open_log_count) : 3872 get_random_u32(); 3873 3874 log_init_pg_hdr(log, 1, 1); 3875 3876 log_create(log, log->rst_info.last_lsn, open_log_count, wrapped, 3877 use_multi_page); 3878 3879 ra = log_create_ra(log); 3880 if (!ra) { 3881 err = -ENOMEM; 3882 goto out; 3883 } 3884 log->ra = ra; 3885 3886 /* Put the restart areas and initialize 3887 * the log file as required. 3888 */ 3889 goto process_log; 3890 } 3891 3892 if (!ra2) { 3893 err = -EINVAL; 3894 goto out; 3895 } 3896 3897 /* 3898 * If the log page or the system page sizes have changed, we can't 3899 * use the log file. We must use the system page size instead of the 3900 * default size if there is not a clean shutdown. 3901 */ 3902 t32 = le32_to_cpu(log->rst_info.r_page->sys_page_size); 3903 if (log->page_size != t32) { 3904 log->l_size = log->orig_file_size; 3905 log->page_size = norm_file_page(t32, &log->l_size, 3906 t32 == DefaultLogPageSize); 3907 } 3908 3909 if (log->page_size != t32 || 3910 log->page_size != le32_to_cpu(log->rst_info.r_page->page_size)) { 3911 err = -EINVAL; 3912 goto out; 3913 } 3914 3915 /* If the file size has shrunk then we won't mount it. */ 3916 if (log->l_size < le64_to_cpu(ra2->l_size)) { 3917 err = -EINVAL; 3918 goto out; 3919 } 3920 3921 log_init_pg_hdr(log, le16_to_cpu(log->rst_info.r_page->major_ver), 3922 le16_to_cpu(log->rst_info.r_page->minor_ver)); 3923 3924 log->l_size = le64_to_cpu(ra2->l_size); 3925 log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits); 3926 log->file_data_bits = sizeof(u64) * 8 - log->seq_num_bits; 3927 log->seq_num_mask = (8 << log->file_data_bits) - 1; 3928 log->last_lsn = le64_to_cpu(ra2->current_lsn); 3929 log->seq_num = log->last_lsn >> log->file_data_bits; 3930 log->ra_off = le16_to_cpu(log->rst_info.r_page->ra_off); 3931 log->restart_size = log->sys_page_size - log->ra_off; 3932 log->record_header_len = le16_to_cpu(ra2->rec_hdr_len); 3933 log->ra_size = le16_to_cpu(ra2->ra_len); 3934 log->data_off = le16_to_cpu(ra2->data_off); 3935 log->data_size = log->page_size - log->data_off; 3936 log->reserved = log->data_size - log->record_header_len; 3937 3938 vbo = lsn_to_vbo(log, log->last_lsn); 3939 3940 if (vbo < log->first_page) { 3941 /* This is a pseudo lsn. */ 3942 log->l_flags |= NTFSLOG_NO_LAST_LSN; 3943 log->next_page = log->first_page; 3944 goto find_oldest; 3945 } 3946 3947 /* Find the end of this log record. */ 3948 off = final_log_off(log, log->last_lsn, 3949 le32_to_cpu(ra2->last_lsn_data_len)); 3950 3951 /* If we wrapped the file then increment the sequence number. */ 3952 if (off <= vbo) { 3953 log->seq_num += 1; 3954 log->l_flags |= NTFSLOG_WRAPPED; 3955 } 3956 3957 /* Now compute the next log page to use. */ 3958 vbo &= ~log->sys_page_mask; 3959 tail = log->page_size - (off & log->page_mask) - 1; 3960 3961 /* 3962 *If we can fit another log record on the page, 3963 * move back a page the log file. 3964 */ 3965 if (tail >= log->record_header_len) { 3966 log->l_flags |= NTFSLOG_REUSE_TAIL; 3967 log->next_page = vbo; 3968 } else { 3969 log->next_page = next_page_off(log, vbo); 3970 } 3971 3972 find_oldest: 3973 /* 3974 * Find the oldest client lsn. Use the last 3975 * flushed lsn as a starting point. 3976 */ 3977 log->oldest_lsn = log->last_lsn; 3978 oldest_client_lsn(Add2Ptr(ra2, le16_to_cpu(ra2->client_off)), 3979 ra2->client_idx[1], &log->oldest_lsn); 3980 log->oldest_lsn_off = lsn_to_vbo(log, log->oldest_lsn); 3981 3982 if (log->oldest_lsn_off < log->first_page) 3983 log->l_flags |= NTFSLOG_NO_OLDEST_LSN; 3984 3985 if (!(ra2->flags & RESTART_SINGLE_PAGE_IO)) 3986 log->l_flags |= NTFSLOG_WRAPPED | NTFSLOG_MULTIPLE_PAGE_IO; 3987 3988 log->current_openlog_count = le32_to_cpu(ra2->open_log_count); 3989 log->total_avail_pages = log->l_size - log->first_page; 3990 log->total_avail = log->total_avail_pages >> log->page_bits; 3991 log->max_current_avail = log->total_avail * log->reserved; 3992 log->total_avail = log->total_avail * log->data_size; 3993 3994 log->current_avail = current_log_avail(log); 3995 3996 ra = kzalloc(log->restart_size, GFP_NOFS); 3997 if (!ra) { 3998 err = -ENOMEM; 3999 goto out; 4000 } 4001 log->ra = ra; 4002 4003 t16 = le16_to_cpu(ra2->client_off); 4004 if (t16 == offsetof(struct RESTART_AREA, clients)) { 4005 memcpy(ra, ra2, log->ra_size); 4006 } else { 4007 memcpy(ra, ra2, offsetof(struct RESTART_AREA, clients)); 4008 memcpy(ra->clients, Add2Ptr(ra2, t16), 4009 le16_to_cpu(ra2->ra_len) - t16); 4010 4011 log->current_openlog_count = get_random_u32(); 4012 ra->open_log_count = cpu_to_le32(log->current_openlog_count); 4013 log->ra_size = offsetof(struct RESTART_AREA, clients) + 4014 sizeof(struct CLIENT_REC); 4015 ra->client_off = 4016 cpu_to_le16(offsetof(struct RESTART_AREA, clients)); 4017 ra->ra_len = cpu_to_le16(log->ra_size); 4018 } 4019 4020 le32_add_cpu(&ra->open_log_count, 1); 4021 4022 /* Now we need to walk through looking for the last lsn. */ 4023 err = last_log_lsn(log); 4024 if (err) 4025 goto out; 4026 4027 log->current_avail = current_log_avail(log); 4028 4029 /* Remember which restart area to write first. */ 4030 log->init_ra = log->rst_info.vbo; 4031 4032 process_log: 4033 /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */ 4034 switch ((log->major_ver << 16) + log->minor_ver) { 4035 case 0x10000: 4036 case 0x10001: 4037 case 0x20000: 4038 break; 4039 default: 4040 ntfs_warn(sbi->sb, "\x24LogFile version %d.%d is not supported", 4041 log->major_ver, log->minor_ver); 4042 err = -EOPNOTSUPP; 4043 log->set_dirty = true; 4044 goto out; 4045 } 4046 4047 /* One client "NTFS" per logfile. */ 4048 ca = Add2Ptr(ra, le16_to_cpu(ra->client_off)); 4049 4050 for (client = ra->client_idx[1];; client = cr->next_client) { 4051 if (client == LFS_NO_CLIENT_LE) { 4052 /* Insert "NTFS" client LogFile. */ 4053 client = ra->client_idx[0]; 4054 if (client == LFS_NO_CLIENT_LE) { 4055 err = -EINVAL; 4056 goto out; 4057 } 4058 4059 t16 = le16_to_cpu(client); 4060 cr = ca + t16; 4061 4062 remove_client(ca, cr, &ra->client_idx[0]); 4063 4064 cr->restart_lsn = 0; 4065 cr->oldest_lsn = cpu_to_le64(log->oldest_lsn); 4066 cr->name_bytes = cpu_to_le32(8); 4067 cr->name[0] = cpu_to_le16('N'); 4068 cr->name[1] = cpu_to_le16('T'); 4069 cr->name[2] = cpu_to_le16('F'); 4070 cr->name[3] = cpu_to_le16('S'); 4071 4072 add_client(ca, t16, &ra->client_idx[1]); 4073 break; 4074 } 4075 4076 cr = ca + le16_to_cpu(client); 4077 4078 if (cpu_to_le32(8) == cr->name_bytes && 4079 cpu_to_le16('N') == cr->name[0] && 4080 cpu_to_le16('T') == cr->name[1] && 4081 cpu_to_le16('F') == cr->name[2] && 4082 cpu_to_le16('S') == cr->name[3]) 4083 break; 4084 } 4085 4086 /* Update the client handle with the client block information. */ 4087 log->client_id.seq_num = cr->seq_num; 4088 log->client_id.client_idx = client; 4089 4090 err = read_rst_area(log, &rst, &checkpt_lsn); 4091 if (err) 4092 goto out; 4093 4094 if (!rst) 4095 goto out; 4096 4097 bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28; 4098 4099 if (rst->check_point_start) 4100 checkpt_lsn = le64_to_cpu(rst->check_point_start); 4101 4102 /* Allocate and Read the Transaction Table. */ 4103 if (!rst->transact_table_len) 4104 goto check_dirty_page_table; 4105 4106 t64 = le64_to_cpu(rst->transact_table_lsn); 4107 err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb); 4108 if (err) 4109 goto out; 4110 4111 lrh = lcb->log_rec; 4112 frh = lcb->lrh; 4113 rec_len = le32_to_cpu(frh->client_data_len); 4114 4115 if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id), 4116 bytes_per_attr_entry)) { 4117 err = -EINVAL; 4118 goto out; 4119 } 4120 4121 t16 = le16_to_cpu(lrh->redo_off); 4122 4123 rt = Add2Ptr(lrh, t16); 4124 t32 = rec_len - t16; 4125 4126 /* Now check that this is a valid restart table. */ 4127 if (!check_rstbl(rt, t32)) { 4128 err = -EINVAL; 4129 goto out; 4130 } 4131 4132 trtbl = kmemdup(rt, t32, GFP_NOFS); 4133 if (!trtbl) { 4134 err = -ENOMEM; 4135 goto out; 4136 } 4137 4138 lcb_put(lcb); 4139 lcb = NULL; 4140 4141 check_dirty_page_table: 4142 /* The next record back should be the Dirty Pages Table. */ 4143 if (!rst->dirty_pages_len) 4144 goto check_attribute_names; 4145 4146 t64 = le64_to_cpu(rst->dirty_pages_table_lsn); 4147 err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb); 4148 if (err) 4149 goto out; 4150 4151 lrh = lcb->log_rec; 4152 frh = lcb->lrh; 4153 rec_len = le32_to_cpu(frh->client_data_len); 4154 4155 if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id), 4156 bytes_per_attr_entry)) { 4157 err = -EINVAL; 4158 goto out; 4159 } 4160 4161 t16 = le16_to_cpu(lrh->redo_off); 4162 4163 rt = Add2Ptr(lrh, t16); 4164 t32 = rec_len - t16; 4165 4166 /* Now check that this is a valid restart table. */ 4167 if (!check_rstbl(rt, t32)) { 4168 err = -EINVAL; 4169 goto out; 4170 } 4171 4172 dptbl = kmemdup(rt, t32, GFP_NOFS); 4173 if (!dptbl) { 4174 err = -ENOMEM; 4175 goto out; 4176 } 4177 4178 /* Convert Ra version '0' into version '1'. */ 4179 if (rst->major_ver) 4180 goto end_conv_1; 4181 4182 dp = NULL; 4183 while ((dp = enum_rstbl(dptbl, dp))) { 4184 struct DIR_PAGE_ENTRY_32 *dp0 = (struct DIR_PAGE_ENTRY_32 *)dp; 4185 // NOTE: Danger. Check for of boundary. 4186 memmove(&dp->vcn, &dp0->vcn_low, 4187 2 * sizeof(u64) + 4188 le32_to_cpu(dp->lcns_follow) * sizeof(u64)); 4189 } 4190 4191 end_conv_1: 4192 lcb_put(lcb); 4193 lcb = NULL; 4194 4195 /* 4196 * Go through the table and remove the duplicates, 4197 * remembering the oldest lsn values. 4198 */ 4199 if (sbi->cluster_size <= log->page_size) 4200 goto trace_dp_table; 4201 4202 dp = NULL; 4203 while ((dp = enum_rstbl(dptbl, dp))) { 4204 struct DIR_PAGE_ENTRY *next = dp; 4205 4206 while ((next = enum_rstbl(dptbl, next))) { 4207 if (next->target_attr == dp->target_attr && 4208 next->vcn == dp->vcn) { 4209 if (le64_to_cpu(next->oldest_lsn) < 4210 le64_to_cpu(dp->oldest_lsn)) { 4211 dp->oldest_lsn = next->oldest_lsn; 4212 } 4213 4214 free_rsttbl_idx(dptbl, PtrOffset(dptbl, next)); 4215 } 4216 } 4217 } 4218 trace_dp_table: 4219 check_attribute_names: 4220 /* The next record should be the Attribute Names. */ 4221 if (!rst->attr_names_len) 4222 goto check_attr_table; 4223 4224 t64 = le64_to_cpu(rst->attr_names_lsn); 4225 err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb); 4226 if (err) 4227 goto out; 4228 4229 lrh = lcb->log_rec; 4230 frh = lcb->lrh; 4231 rec_len = le32_to_cpu(frh->client_data_len); 4232 4233 if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id), 4234 bytes_per_attr_entry)) { 4235 err = -EINVAL; 4236 goto out; 4237 } 4238 4239 t32 = lrh_length(lrh); 4240 rec_len -= t32; 4241 4242 attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS); 4243 if (!attr_names) { 4244 err = -ENOMEM; 4245 goto out; 4246 } 4247 4248 lcb_put(lcb); 4249 lcb = NULL; 4250 4251 check_attr_table: 4252 /* The next record should be the attribute Table. */ 4253 if (!rst->open_attr_len) 4254 goto check_attribute_names2; 4255 4256 t64 = le64_to_cpu(rst->open_attr_table_lsn); 4257 err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb); 4258 if (err) 4259 goto out; 4260 4261 lrh = lcb->log_rec; 4262 frh = lcb->lrh; 4263 rec_len = le32_to_cpu(frh->client_data_len); 4264 4265 if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id), 4266 bytes_per_attr_entry)) { 4267 err = -EINVAL; 4268 goto out; 4269 } 4270 4271 t16 = le16_to_cpu(lrh->redo_off); 4272 4273 rt = Add2Ptr(lrh, t16); 4274 t32 = rec_len - t16; 4275 4276 if (!check_rstbl(rt, t32)) { 4277 err = -EINVAL; 4278 goto out; 4279 } 4280 4281 oatbl = kmemdup(rt, t32, GFP_NOFS); 4282 if (!oatbl) { 4283 err = -ENOMEM; 4284 goto out; 4285 } 4286 4287 log->open_attr_tbl = oatbl; 4288 4289 /* Clear all of the Attr pointers. */ 4290 oe = NULL; 4291 while ((oe = enum_rstbl(oatbl, oe))) { 4292 if (!rst->major_ver) { 4293 struct OPEN_ATTR_ENRTY_32 oe0; 4294 4295 /* Really 'oe' points to OPEN_ATTR_ENRTY_32. */ 4296 memcpy(&oe0, oe, SIZEOF_OPENATTRIBUTEENTRY0); 4297 4298 oe->bytes_per_index = oe0.bytes_per_index; 4299 oe->type = oe0.type; 4300 oe->is_dirty_pages = oe0.is_dirty_pages; 4301 oe->name_len = 0; 4302 oe->ref = oe0.ref; 4303 oe->open_record_lsn = oe0.open_record_lsn; 4304 } 4305 4306 oe->is_attr_name = 0; 4307 oe->ptr = NULL; 4308 } 4309 4310 lcb_put(lcb); 4311 lcb = NULL; 4312 4313 check_attribute_names2: 4314 if (rst->attr_names_len && oatbl) { 4315 struct ATTR_NAME_ENTRY *ane = attr_names; 4316 while (ane->off) { 4317 /* TODO: Clear table on exit! */ 4318 oe = Add2Ptr(oatbl, le16_to_cpu(ane->off)); 4319 t16 = le16_to_cpu(ane->name_bytes); 4320 oe->name_len = t16 / sizeof(short); 4321 oe->ptr = ane->name; 4322 oe->is_attr_name = 2; 4323 ane = Add2Ptr(ane, 4324 sizeof(struct ATTR_NAME_ENTRY) + t16); 4325 } 4326 } 4327 4328 /* 4329 * If the checkpt_lsn is zero, then this is a freshly 4330 * formatted disk and we have no work to do. 4331 */ 4332 if (!checkpt_lsn) { 4333 err = 0; 4334 goto out; 4335 } 4336 4337 if (!oatbl) { 4338 oatbl = init_rsttbl(bytes_per_attr_entry, 8); 4339 if (!oatbl) { 4340 err = -ENOMEM; 4341 goto out; 4342 } 4343 } 4344 4345 log->open_attr_tbl = oatbl; 4346 4347 /* Start the analysis pass from the Checkpoint lsn. */ 4348 rec_lsn = checkpt_lsn; 4349 4350 /* Read the first lsn. */ 4351 err = read_log_rec_lcb(log, checkpt_lsn, lcb_ctx_next, &lcb); 4352 if (err) 4353 goto out; 4354 4355 /* Loop to read all subsequent records to the end of the log file. */ 4356 next_log_record_analyze: 4357 err = read_next_log_rec(log, lcb, &rec_lsn); 4358 if (err) 4359 goto out; 4360 4361 if (!rec_lsn) 4362 goto end_log_records_enumerate; 4363 4364 frh = lcb->lrh; 4365 transact_id = le32_to_cpu(frh->transact_id); 4366 rec_len = le32_to_cpu(frh->client_data_len); 4367 lrh = lcb->log_rec; 4368 4369 if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) { 4370 err = -EINVAL; 4371 goto out; 4372 } 4373 4374 /* 4375 * The first lsn after the previous lsn remembered 4376 * the checkpoint is the first candidate for the rlsn. 4377 */ 4378 if (!rlsn) 4379 rlsn = rec_lsn; 4380 4381 if (LfsClientRecord != frh->record_type) 4382 goto next_log_record_analyze; 4383 4384 /* 4385 * Now update the Transaction Table for this transaction. If there 4386 * is no entry present or it is unallocated we allocate the entry. 4387 */ 4388 if (!trtbl) { 4389 trtbl = init_rsttbl(sizeof(struct TRANSACTION_ENTRY), 4390 INITIAL_NUMBER_TRANSACTIONS); 4391 if (!trtbl) { 4392 err = -ENOMEM; 4393 goto out; 4394 } 4395 } 4396 4397 tr = Add2Ptr(trtbl, transact_id); 4398 4399 if (transact_id >= bytes_per_rt(trtbl) || 4400 tr->next != RESTART_ENTRY_ALLOCATED_LE) { 4401 tr = alloc_rsttbl_from_idx(&trtbl, transact_id); 4402 if (!tr) { 4403 err = -ENOMEM; 4404 goto out; 4405 } 4406 tr->transact_state = TransactionActive; 4407 tr->first_lsn = cpu_to_le64(rec_lsn); 4408 } 4409 4410 tr->prev_lsn = tr->undo_next_lsn = cpu_to_le64(rec_lsn); 4411 4412 /* 4413 * If this is a compensation log record, then change 4414 * the undo_next_lsn to be the undo_next_lsn of this record. 4415 */ 4416 if (lrh->undo_op == cpu_to_le16(CompensationLogRecord)) 4417 tr->undo_next_lsn = frh->client_undo_next_lsn; 4418 4419 /* Dispatch to handle log record depending on type. */ 4420 switch (le16_to_cpu(lrh->redo_op)) { 4421 case InitializeFileRecordSegment: 4422 case DeallocateFileRecordSegment: 4423 case WriteEndOfFileRecordSegment: 4424 case CreateAttribute: 4425 case DeleteAttribute: 4426 case UpdateResidentValue: 4427 case UpdateNonresidentValue: 4428 case UpdateMappingPairs: 4429 case SetNewAttributeSizes: 4430 case AddIndexEntryRoot: 4431 case DeleteIndexEntryRoot: 4432 case AddIndexEntryAllocation: 4433 case DeleteIndexEntryAllocation: 4434 case WriteEndOfIndexBuffer: 4435 case SetIndexEntryVcnRoot: 4436 case SetIndexEntryVcnAllocation: 4437 case UpdateFileNameRoot: 4438 case UpdateFileNameAllocation: 4439 case SetBitsInNonresidentBitMap: 4440 case ClearBitsInNonresidentBitMap: 4441 case UpdateRecordDataRoot: 4442 case UpdateRecordDataAllocation: 4443 case ZeroEndOfFileRecord: 4444 t16 = le16_to_cpu(lrh->target_attr); 4445 t64 = le64_to_cpu(lrh->target_vcn); 4446 dp = find_dp(dptbl, t16, t64); 4447 4448 if (dp) 4449 goto copy_lcns; 4450 4451 /* 4452 * Calculate the number of clusters per page the system 4453 * which wrote the checkpoint, possibly creating the table. 4454 */ 4455 if (dptbl) { 4456 t32 = (le16_to_cpu(dptbl->size) - 4457 sizeof(struct DIR_PAGE_ENTRY)) / 4458 sizeof(u64); 4459 } else { 4460 t32 = log->clst_per_page; 4461 kfree(dptbl); 4462 dptbl = init_rsttbl(struct_size(dp, page_lcns, t32), 4463 32); 4464 if (!dptbl) { 4465 err = -ENOMEM; 4466 goto out; 4467 } 4468 } 4469 4470 dp = alloc_rsttbl_idx(&dptbl); 4471 if (!dp) { 4472 err = -ENOMEM; 4473 goto out; 4474 } 4475 dp->target_attr = cpu_to_le32(t16); 4476 dp->transfer_len = cpu_to_le32(t32 << sbi->cluster_bits); 4477 dp->lcns_follow = cpu_to_le32(t32); 4478 dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1)); 4479 dp->oldest_lsn = cpu_to_le64(rec_lsn); 4480 4481 copy_lcns: 4482 /* 4483 * Copy the Lcns from the log record into the Dirty Page Entry. 4484 * TODO: For different page size support, must somehow make 4485 * whole routine a loop, case Lcns do not fit below. 4486 */ 4487 t16 = le16_to_cpu(lrh->lcns_follow); 4488 for (i = 0; i < t16; i++) { 4489 size_t j = (size_t)(le64_to_cpu(lrh->target_vcn) - 4490 le64_to_cpu(dp->vcn)); 4491 dp->page_lcns[j + i] = lrh->page_lcns[i]; 4492 } 4493 4494 goto next_log_record_analyze; 4495 4496 case DeleteDirtyClusters: { 4497 u32 range_count = 4498 le16_to_cpu(lrh->redo_len) / sizeof(struct LCN_RANGE); 4499 const struct LCN_RANGE *r = 4500 Add2Ptr(lrh, le16_to_cpu(lrh->redo_off)); 4501 4502 /* Loop through all of the Lcn ranges this log record. */ 4503 for (i = 0; i < range_count; i++, r++) { 4504 u64 lcn0 = le64_to_cpu(r->lcn); 4505 u64 lcn_e = lcn0 + le64_to_cpu(r->len) - 1; 4506 4507 dp = NULL; 4508 while ((dp = enum_rstbl(dptbl, dp))) { 4509 u32 j; 4510 4511 t32 = le32_to_cpu(dp->lcns_follow); 4512 for (j = 0; j < t32; j++) { 4513 t64 = le64_to_cpu(dp->page_lcns[j]); 4514 if (t64 >= lcn0 && t64 <= lcn_e) 4515 dp->page_lcns[j] = 0; 4516 } 4517 } 4518 } 4519 goto next_log_record_analyze; 4520 ; 4521 } 4522 4523 case OpenNonresidentAttribute: 4524 t16 = le16_to_cpu(lrh->target_attr); 4525 if (t16 >= bytes_per_rt(oatbl)) { 4526 /* 4527 * Compute how big the table needs to be. 4528 * Add 10 extra entries for some cushion. 4529 */ 4530 u32 new_e = t16 / le16_to_cpu(oatbl->size); 4531 4532 new_e += 10 - le16_to_cpu(oatbl->used); 4533 4534 oatbl = extend_rsttbl(oatbl, new_e, ~0u); 4535 log->open_attr_tbl = oatbl; 4536 if (!oatbl) { 4537 err = -ENOMEM; 4538 goto out; 4539 } 4540 } 4541 4542 /* Point to the entry being opened. */ 4543 oe = alloc_rsttbl_from_idx(&oatbl, t16); 4544 log->open_attr_tbl = oatbl; 4545 if (!oe) { 4546 err = -ENOMEM; 4547 goto out; 4548 } 4549 4550 /* Initialize this entry from the log record. */ 4551 t16 = le16_to_cpu(lrh->redo_off); 4552 if (!rst->major_ver) { 4553 /* Convert version '0' into version '1'. */ 4554 struct OPEN_ATTR_ENRTY_32 *oe0 = Add2Ptr(lrh, t16); 4555 4556 oe->bytes_per_index = oe0->bytes_per_index; 4557 oe->type = oe0->type; 4558 oe->is_dirty_pages = oe0->is_dirty_pages; 4559 oe->name_len = 0; //oe0.name_len; 4560 oe->ref = oe0->ref; 4561 oe->open_record_lsn = oe0->open_record_lsn; 4562 } else { 4563 memcpy(oe, Add2Ptr(lrh, t16), bytes_per_attr_entry); 4564 } 4565 4566 t16 = le16_to_cpu(lrh->undo_len); 4567 if (t16) { 4568 oe->ptr = kmalloc(t16, GFP_NOFS); 4569 if (!oe->ptr) { 4570 err = -ENOMEM; 4571 goto out; 4572 } 4573 oe->name_len = t16 / sizeof(short); 4574 memcpy(oe->ptr, 4575 Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)), t16); 4576 oe->is_attr_name = 1; 4577 } else { 4578 oe->ptr = NULL; 4579 oe->is_attr_name = 0; 4580 } 4581 4582 goto next_log_record_analyze; 4583 4584 case HotFix: 4585 t16 = le16_to_cpu(lrh->target_attr); 4586 t64 = le64_to_cpu(lrh->target_vcn); 4587 dp = find_dp(dptbl, t16, t64); 4588 if (dp) { 4589 size_t j = le64_to_cpu(lrh->target_vcn) - 4590 le64_to_cpu(dp->vcn); 4591 if (dp->page_lcns[j]) 4592 dp->page_lcns[j] = lrh->page_lcns[0]; 4593 } 4594 goto next_log_record_analyze; 4595 4596 case EndTopLevelAction: 4597 tr = Add2Ptr(trtbl, transact_id); 4598 tr->prev_lsn = cpu_to_le64(rec_lsn); 4599 tr->undo_next_lsn = frh->client_undo_next_lsn; 4600 goto next_log_record_analyze; 4601 4602 case PrepareTransaction: 4603 tr = Add2Ptr(trtbl, transact_id); 4604 tr->transact_state = TransactionPrepared; 4605 goto next_log_record_analyze; 4606 4607 case CommitTransaction: 4608 tr = Add2Ptr(trtbl, transact_id); 4609 tr->transact_state = TransactionCommitted; 4610 goto next_log_record_analyze; 4611 4612 case ForgetTransaction: 4613 free_rsttbl_idx(trtbl, transact_id); 4614 goto next_log_record_analyze; 4615 4616 case Noop: 4617 case OpenAttributeTableDump: 4618 case AttributeNamesDump: 4619 case DirtyPageTableDump: 4620 case TransactionTableDump: 4621 /* The following cases require no action the Analysis Pass. */ 4622 goto next_log_record_analyze; 4623 4624 default: 4625 /* 4626 * All codes will be explicitly handled. 4627 * If we see a code we do not expect, then we are trouble. 4628 */ 4629 goto next_log_record_analyze; 4630 } 4631 4632 end_log_records_enumerate: 4633 lcb_put(lcb); 4634 lcb = NULL; 4635 4636 /* 4637 * Scan the Dirty Page Table and Transaction Table for 4638 * the lowest lsn, and return it as the Redo lsn. 4639 */ 4640 dp = NULL; 4641 while ((dp = enum_rstbl(dptbl, dp))) { 4642 t64 = le64_to_cpu(dp->oldest_lsn); 4643 if (t64 && t64 < rlsn) 4644 rlsn = t64; 4645 } 4646 4647 tr = NULL; 4648 while ((tr = enum_rstbl(trtbl, tr))) { 4649 t64 = le64_to_cpu(tr->first_lsn); 4650 if (t64 && t64 < rlsn) 4651 rlsn = t64; 4652 } 4653 4654 /* 4655 * Only proceed if the Dirty Page Table or Transaction 4656 * table are not empty. 4657 */ 4658 if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total)) 4659 goto end_reply; 4660 4661 sbi->flags |= NTFS_FLAGS_NEED_REPLAY; 4662 if (is_ro) 4663 goto out; 4664 4665 /* Reopen all of the attributes with dirty pages. */ 4666 oe = NULL; 4667 next_open_attribute: 4668 4669 oe = enum_rstbl(oatbl, oe); 4670 if (!oe) { 4671 err = 0; 4672 dp = NULL; 4673 goto next_dirty_page; 4674 } 4675 4676 oa = kzalloc(sizeof(struct OpenAttr), GFP_NOFS); 4677 if (!oa) { 4678 err = -ENOMEM; 4679 goto out; 4680 } 4681 4682 inode = ntfs_iget5(sbi->sb, &oe->ref, NULL); 4683 if (IS_ERR(inode)) 4684 goto fake_attr; 4685 4686 if (is_bad_inode(inode)) { 4687 iput(inode); 4688 fake_attr: 4689 if (oa->ni) { 4690 iput(&oa->ni->vfs_inode); 4691 oa->ni = NULL; 4692 } 4693 4694 attr = attr_create_nonres_log(sbi, oe->type, 0, oe->ptr, 4695 oe->name_len, 0); 4696 if (!attr) { 4697 kfree(oa); 4698 err = -ENOMEM; 4699 goto out; 4700 } 4701 oa->attr = attr; 4702 oa->run1 = &oa->run0; 4703 goto final_oe; 4704 } 4705 4706 ni_oe = ntfs_i(inode); 4707 oa->ni = ni_oe; 4708 4709 attr = ni_find_attr(ni_oe, NULL, NULL, oe->type, oe->ptr, oe->name_len, 4710 NULL, NULL); 4711 4712 if (!attr) 4713 goto fake_attr; 4714 4715 t32 = le32_to_cpu(attr->size); 4716 oa->attr = kmemdup(attr, t32, GFP_NOFS); 4717 if (!oa->attr) 4718 goto fake_attr; 4719 4720 if (!S_ISDIR(inode->i_mode)) { 4721 if (attr->type == ATTR_DATA && !attr->name_len) { 4722 oa->run1 = &ni_oe->file.run; 4723 goto final_oe; 4724 } 4725 } else { 4726 if (attr->type == ATTR_ALLOC && 4727 attr->name_len == ARRAY_SIZE(I30_NAME) && 4728 !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) { 4729 oa->run1 = &ni_oe->dir.alloc_run; 4730 goto final_oe; 4731 } 4732 } 4733 4734 if (attr->non_res) { 4735 u16 roff = le16_to_cpu(attr->nres.run_off); 4736 CLST svcn = le64_to_cpu(attr->nres.svcn); 4737 4738 if (roff > t32) { 4739 kfree(oa->attr); 4740 oa->attr = NULL; 4741 goto fake_attr; 4742 } 4743 4744 err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn, 4745 le64_to_cpu(attr->nres.evcn), svcn, 4746 Add2Ptr(attr, roff), t32 - roff); 4747 if (err < 0) { 4748 kfree(oa->attr); 4749 oa->attr = NULL; 4750 goto fake_attr; 4751 } 4752 err = 0; 4753 } 4754 oa->run1 = &oa->run0; 4755 attr = oa->attr; 4756 4757 final_oe: 4758 if (oe->is_attr_name == 1) 4759 kfree(oe->ptr); 4760 oe->is_attr_name = 0; 4761 oe->ptr = oa; 4762 oe->name_len = attr->name_len; 4763 4764 goto next_open_attribute; 4765 4766 /* 4767 * Now loop through the dirty page table to extract all of the Vcn/Lcn. 4768 * Mapping that we have, and insert it into the appropriate run. 4769 */ 4770 next_dirty_page: 4771 dp = enum_rstbl(dptbl, dp); 4772 if (!dp) 4773 goto do_redo_1; 4774 4775 oe = Add2Ptr(oatbl, le32_to_cpu(dp->target_attr)); 4776 4777 if (oe->next != RESTART_ENTRY_ALLOCATED_LE) 4778 goto next_dirty_page; 4779 4780 oa = oe->ptr; 4781 if (!oa) 4782 goto next_dirty_page; 4783 4784 i = -1; 4785 next_dirty_page_vcn: 4786 i += 1; 4787 if (i >= le32_to_cpu(dp->lcns_follow)) 4788 goto next_dirty_page; 4789 4790 vcn = le64_to_cpu(dp->vcn) + i; 4791 size = (vcn + 1) << sbi->cluster_bits; 4792 4793 if (!dp->page_lcns[i]) 4794 goto next_dirty_page_vcn; 4795 4796 rno = ino_get(&oe->ref); 4797 if (rno <= MFT_REC_MIRR && 4798 size < (MFT_REC_VOL + 1) * sbi->record_size && 4799 oe->type == ATTR_DATA) { 4800 goto next_dirty_page_vcn; 4801 } 4802 4803 lcn = le64_to_cpu(dp->page_lcns[i]); 4804 4805 if ((!run_lookup_entry(oa->run1, vcn, &lcn0, &len0, NULL) || 4806 lcn0 != lcn) && 4807 !run_add_entry(oa->run1, vcn, lcn, 1, false)) { 4808 err = -ENOMEM; 4809 goto out; 4810 } 4811 attr = oa->attr; 4812 if (size > le64_to_cpu(attr->nres.alloc_size)) { 4813 attr->nres.valid_size = attr->nres.data_size = 4814 attr->nres.alloc_size = cpu_to_le64(size); 4815 } 4816 goto next_dirty_page_vcn; 4817 4818 do_redo_1: 4819 /* 4820 * Perform the Redo Pass, to restore all of the dirty pages to the same 4821 * contents that they had immediately before the crash. If the dirty 4822 * page table is empty, then we can skip the entire Redo Pass. 4823 */ 4824 if (!dptbl || !dptbl->total) 4825 goto do_undo_action; 4826 4827 rec_lsn = rlsn; 4828 4829 /* 4830 * Read the record at the Redo lsn, before falling 4831 * into common code to handle each record. 4832 */ 4833 err = read_log_rec_lcb(log, rlsn, lcb_ctx_next, &lcb); 4834 if (err) 4835 goto out; 4836 4837 /* 4838 * Now loop to read all of our log records forwards, until 4839 * we hit the end of the file, cleaning up at the end. 4840 */ 4841 do_action_next: 4842 frh = lcb->lrh; 4843 4844 if (LfsClientRecord != frh->record_type) 4845 goto read_next_log_do_action; 4846 4847 transact_id = le32_to_cpu(frh->transact_id); 4848 rec_len = le32_to_cpu(frh->client_data_len); 4849 lrh = lcb->log_rec; 4850 4851 if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) { 4852 err = -EINVAL; 4853 goto out; 4854 } 4855 4856 /* Ignore log records that do not update pages. */ 4857 if (lrh->lcns_follow) 4858 goto find_dirty_page; 4859 4860 goto read_next_log_do_action; 4861 4862 find_dirty_page: 4863 t16 = le16_to_cpu(lrh->target_attr); 4864 t64 = le64_to_cpu(lrh->target_vcn); 4865 dp = find_dp(dptbl, t16, t64); 4866 4867 if (!dp) 4868 goto read_next_log_do_action; 4869 4870 if (rec_lsn < le64_to_cpu(dp->oldest_lsn)) 4871 goto read_next_log_do_action; 4872 4873 t16 = le16_to_cpu(lrh->target_attr); 4874 if (t16 >= bytes_per_rt(oatbl)) { 4875 err = -EINVAL; 4876 goto out; 4877 } 4878 4879 oe = Add2Ptr(oatbl, t16); 4880 4881 if (oe->next != RESTART_ENTRY_ALLOCATED_LE) { 4882 err = -EINVAL; 4883 goto out; 4884 } 4885 4886 oa = oe->ptr; 4887 4888 if (!oa) { 4889 err = -EINVAL; 4890 goto out; 4891 } 4892 attr = oa->attr; 4893 4894 vcn = le64_to_cpu(lrh->target_vcn); 4895 4896 if (!run_lookup_entry(oa->run1, vcn, &lcn, NULL, NULL) || 4897 lcn == SPARSE_LCN) { 4898 goto read_next_log_do_action; 4899 } 4900 4901 /* Point to the Redo data and get its length. */ 4902 data = Add2Ptr(lrh, le16_to_cpu(lrh->redo_off)); 4903 dlen = le16_to_cpu(lrh->redo_len); 4904 4905 /* Shorten length by any Lcns which were deleted. */ 4906 saved_len = dlen; 4907 4908 for (i = le16_to_cpu(lrh->lcns_follow); i; i--) { 4909 size_t j; 4910 u32 alen, voff; 4911 4912 voff = le16_to_cpu(lrh->record_off) + 4913 le16_to_cpu(lrh->attr_off); 4914 voff += le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT; 4915 4916 /* If the Vcn question is allocated, we can just get out. */ 4917 j = le64_to_cpu(lrh->target_vcn) - le64_to_cpu(dp->vcn); 4918 if (dp->page_lcns[j + i - 1]) 4919 break; 4920 4921 if (!saved_len) 4922 saved_len = 1; 4923 4924 /* 4925 * Calculate the allocated space left relative to the 4926 * log record Vcn, after removing this unallocated Vcn. 4927 */ 4928 alen = (i - 1) << sbi->cluster_bits; 4929 4930 /* 4931 * If the update described this log record goes beyond 4932 * the allocated space, then we will have to reduce the length. 4933 */ 4934 if (voff >= alen) 4935 dlen = 0; 4936 else if (voff + dlen > alen) 4937 dlen = alen - voff; 4938 } 4939 4940 /* 4941 * If the resulting dlen from above is now zero, 4942 * we can skip this log record. 4943 */ 4944 if (!dlen && saved_len) 4945 goto read_next_log_do_action; 4946 4947 t16 = le16_to_cpu(lrh->redo_op); 4948 if (can_skip_action(t16)) 4949 goto read_next_log_do_action; 4950 4951 /* Apply the Redo operation a common routine. */ 4952 err = do_action(log, oe, lrh, t16, data, dlen, rec_len, &rec_lsn); 4953 if (err) 4954 goto out; 4955 4956 /* Keep reading and looping back until end of file. */ 4957 read_next_log_do_action: 4958 err = read_next_log_rec(log, lcb, &rec_lsn); 4959 if (!err && rec_lsn) 4960 goto do_action_next; 4961 4962 lcb_put(lcb); 4963 lcb = NULL; 4964 4965 do_undo_action: 4966 /* Scan Transaction Table. */ 4967 tr = NULL; 4968 transaction_table_next: 4969 tr = enum_rstbl(trtbl, tr); 4970 if (!tr) 4971 goto undo_action_done; 4972 4973 if (TransactionActive != tr->transact_state || !tr->undo_next_lsn) { 4974 free_rsttbl_idx(trtbl, PtrOffset(trtbl, tr)); 4975 goto transaction_table_next; 4976 } 4977 4978 log->transaction_id = PtrOffset(trtbl, tr); 4979 undo_next_lsn = le64_to_cpu(tr->undo_next_lsn); 4980 4981 /* 4982 * We only have to do anything if the transaction has 4983 * something its undo_next_lsn field. 4984 */ 4985 if (!undo_next_lsn) 4986 goto commit_undo; 4987 4988 /* Read the first record to be undone by this transaction. */ 4989 err = read_log_rec_lcb(log, undo_next_lsn, lcb_ctx_undo_next, &lcb); 4990 if (err) 4991 goto out; 4992 4993 /* 4994 * Now loop to read all of our log records forwards, 4995 * until we hit the end of the file, cleaning up at the end. 4996 */ 4997 undo_action_next: 4998 4999 lrh = lcb->log_rec; 5000 frh = lcb->lrh; 5001 transact_id = le32_to_cpu(frh->transact_id); 5002 rec_len = le32_to_cpu(frh->client_data_len); 5003 5004 if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) { 5005 err = -EINVAL; 5006 goto out; 5007 } 5008 5009 if (lrh->undo_op == cpu_to_le16(Noop)) 5010 goto read_next_log_undo_action; 5011 5012 oe = Add2Ptr(oatbl, le16_to_cpu(lrh->target_attr)); 5013 oa = oe->ptr; 5014 5015 t16 = le16_to_cpu(lrh->lcns_follow); 5016 if (!t16) 5017 goto add_allocated_vcns; 5018 5019 is_mapped = run_lookup_entry(oa->run1, le64_to_cpu(lrh->target_vcn), 5020 &lcn, &clen, NULL); 5021 5022 /* 5023 * If the mapping isn't already the table or the mapping 5024 * corresponds to a hole the mapping, we need to make sure 5025 * there is no partial page already memory. 5026 */ 5027 if (is_mapped && lcn != SPARSE_LCN && clen >= t16) 5028 goto add_allocated_vcns; 5029 5030 vcn = le64_to_cpu(lrh->target_vcn); 5031 vcn &= ~(u64)(log->clst_per_page - 1); 5032 5033 add_allocated_vcns: 5034 for (i = 0, vcn = le64_to_cpu(lrh->target_vcn), 5035 size = (vcn + 1) << sbi->cluster_bits; 5036 i < t16; i++, vcn += 1, size += sbi->cluster_size) { 5037 attr = oa->attr; 5038 if (!attr->non_res) { 5039 if (size > le32_to_cpu(attr->res.data_size)) 5040 attr->res.data_size = cpu_to_le32(size); 5041 } else { 5042 if (size > le64_to_cpu(attr->nres.data_size)) 5043 attr->nres.valid_size = attr->nres.data_size = 5044 attr->nres.alloc_size = 5045 cpu_to_le64(size); 5046 } 5047 } 5048 5049 t16 = le16_to_cpu(lrh->undo_op); 5050 if (can_skip_action(t16)) 5051 goto read_next_log_undo_action; 5052 5053 /* Point to the Redo data and get its length. */ 5054 data = Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)); 5055 dlen = le16_to_cpu(lrh->undo_len); 5056 5057 /* It is time to apply the undo action. */ 5058 err = do_action(log, oe, lrh, t16, data, dlen, rec_len, NULL); 5059 5060 read_next_log_undo_action: 5061 /* 5062 * Keep reading and looping back until we have read the 5063 * last record for this transaction. 5064 */ 5065 err = read_next_log_rec(log, lcb, &rec_lsn); 5066 if (err) 5067 goto out; 5068 5069 if (rec_lsn) 5070 goto undo_action_next; 5071 5072 lcb_put(lcb); 5073 lcb = NULL; 5074 5075 commit_undo: 5076 free_rsttbl_idx(trtbl, log->transaction_id); 5077 5078 log->transaction_id = 0; 5079 5080 goto transaction_table_next; 5081 5082 undo_action_done: 5083 5084 ntfs_update_mftmirr(sbi, 0); 5085 5086 sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY; 5087 5088 end_reply: 5089 5090 err = 0; 5091 if (is_ro) 5092 goto out; 5093 5094 rh = kzalloc(log->page_size, GFP_NOFS); 5095 if (!rh) { 5096 err = -ENOMEM; 5097 goto out; 5098 } 5099 5100 rh->rhdr.sign = NTFS_RSTR_SIGNATURE; 5101 rh->rhdr.fix_off = cpu_to_le16(offsetof(struct RESTART_HDR, fixups)); 5102 t16 = (log->page_size >> SECTOR_SHIFT) + 1; 5103 rh->rhdr.fix_num = cpu_to_le16(t16); 5104 rh->sys_page_size = cpu_to_le32(log->page_size); 5105 rh->page_size = cpu_to_le32(log->page_size); 5106 5107 t16 = ALIGN(offsetof(struct RESTART_HDR, fixups) + sizeof(short) * t16, 5108 8); 5109 rh->ra_off = cpu_to_le16(t16); 5110 rh->minor_ver = cpu_to_le16(1); // 0x1A: 5111 rh->major_ver = cpu_to_le16(1); // 0x1C: 5112 5113 ra2 = Add2Ptr(rh, t16); 5114 memcpy(ra2, ra, sizeof(struct RESTART_AREA)); 5115 5116 ra2->client_idx[0] = 0; 5117 ra2->client_idx[1] = LFS_NO_CLIENT_LE; 5118 ra2->flags = cpu_to_le16(2); 5119 5120 le32_add_cpu(&ra2->open_log_count, 1); 5121 5122 ntfs_fix_pre_write(&rh->rhdr, log->page_size); 5123 5124 err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size, 0); 5125 if (!err) 5126 err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size, 5127 rh, log->page_size, 0); 5128 5129 kfree(rh); 5130 if (err) 5131 goto out; 5132 5133 out: 5134 kfree(rst); 5135 if (lcb) 5136 lcb_put(lcb); 5137 5138 /* 5139 * Scan the Open Attribute Table to close all of 5140 * the open attributes. 5141 */ 5142 oe = NULL; 5143 while ((oe = enum_rstbl(oatbl, oe))) { 5144 rno = ino_get(&oe->ref); 5145 5146 if (oe->is_attr_name == 1) { 5147 kfree(oe->ptr); 5148 oe->ptr = NULL; 5149 continue; 5150 } 5151 5152 if (oe->is_attr_name) 5153 continue; 5154 5155 oa = oe->ptr; 5156 if (!oa) 5157 continue; 5158 5159 run_close(&oa->run0); 5160 kfree(oa->attr); 5161 if (oa->ni) 5162 iput(&oa->ni->vfs_inode); 5163 kfree(oa); 5164 } 5165 5166 kfree(trtbl); 5167 kfree(oatbl); 5168 kfree(dptbl); 5169 kfree(attr_names); 5170 kfree(log->rst_info.r_page); 5171 5172 kfree(ra); 5173 kfree(log->one_page_buf); 5174 5175 if (err) 5176 sbi->flags |= NTFS_FLAGS_NEED_REPLAY; 5177 5178 if (err == -EROFS) 5179 err = 0; 5180 else if (log->set_dirty) 5181 ntfs_set_state(sbi, NTFS_DIRTY_ERROR); 5182 5183 kfree(log); 5184 5185 return err; 5186 } 5187