1 /* 2 * .xz Stream decoder 3 * 4 * Author: Lasse Collin <lasse.collin@tukaani.org> 5 * 6 * This file has been put into the public domain. 7 * You can do whatever you want with this file. 8 */ 9 10 #include "xz_private.h" 11 #include "xz_stream.h" 12 13 /* Hash used to validate the Index field */ 14 struct xz_dec_hash { 15 vli_type unpadded; 16 vli_type uncompressed; 17 uint32_t crc32; 18 }; 19 20 struct xz_dec { 21 /* Position in dec_main() */ 22 enum { 23 SEQ_STREAM_HEADER, 24 SEQ_BLOCK_START, 25 SEQ_BLOCK_HEADER, 26 SEQ_BLOCK_UNCOMPRESS, 27 SEQ_BLOCK_PADDING, 28 SEQ_BLOCK_CHECK, 29 SEQ_INDEX, 30 SEQ_INDEX_PADDING, 31 SEQ_INDEX_CRC32, 32 SEQ_STREAM_FOOTER 33 } sequence; 34 35 /* Position in variable-length integers and Check fields */ 36 uint32_t pos; 37 38 /* Variable-length integer decoded by dec_vli() */ 39 vli_type vli; 40 41 /* Saved in_pos and out_pos */ 42 size_t in_start; 43 size_t out_start; 44 45 /* CRC32 value in Block or Index */ 46 uint32_t crc32; 47 48 /* Type of the integrity check calculated from uncompressed data */ 49 enum xz_check check_type; 50 51 /* Operation mode */ 52 enum xz_mode mode; 53 54 /* 55 * True if the next call to xz_dec_run() is allowed to return 56 * XZ_BUF_ERROR. 57 */ 58 bool allow_buf_error; 59 60 /* Information stored in Block Header */ 61 struct { 62 /* 63 * Value stored in the Compressed Size field, or 64 * VLI_UNKNOWN if Compressed Size is not present. 65 */ 66 vli_type compressed; 67 68 /* 69 * Value stored in the Uncompressed Size field, or 70 * VLI_UNKNOWN if Uncompressed Size is not present. 71 */ 72 vli_type uncompressed; 73 74 /* Size of the Block Header field */ 75 uint32_t size; 76 } block_header; 77 78 /* Information collected when decoding Blocks */ 79 struct { 80 /* Observed compressed size of the current Block */ 81 vli_type compressed; 82 83 /* Observed uncompressed size of the current Block */ 84 vli_type uncompressed; 85 86 /* Number of Blocks decoded so far */ 87 vli_type count; 88 89 /* 90 * Hash calculated from the Block sizes. This is used to 91 * validate the Index field. 92 */ 93 struct xz_dec_hash hash; 94 } block; 95 96 /* Variables needed when verifying the Index field */ 97 struct { 98 /* Position in dec_index() */ 99 enum { 100 SEQ_INDEX_COUNT, 101 SEQ_INDEX_UNPADDED, 102 SEQ_INDEX_UNCOMPRESSED 103 } sequence; 104 105 /* Size of the Index in bytes */ 106 vli_type size; 107 108 /* Number of Records (matches block.count in valid files) */ 109 vli_type count; 110 111 /* 112 * Hash calculated from the Records (matches block.hash in 113 * valid files). 114 */ 115 struct xz_dec_hash hash; 116 } index; 117 118 /* 119 * Temporary buffer needed to hold Stream Header, Block Header, 120 * and Stream Footer. The Block Header is the biggest (1 KiB) 121 * so we reserve space according to that. buf[] has to be aligned 122 * to a multiple of four bytes; the size_t variables before it 123 * should guarantee this. 124 */ 125 struct { 126 size_t pos; 127 size_t size; 128 uint8_t buf[1024]; 129 } temp; 130 131 struct xz_dec_lzma2 *lzma2; 132 133 #ifdef XZ_DEC_BCJ 134 struct xz_dec_bcj *bcj; 135 bool bcj_active; 136 #endif 137 }; 138 139 #ifdef XZ_DEC_ANY_CHECK 140 /* Sizes of the Check field with different Check IDs */ 141 static const uint8_t check_sizes[16] = { 142 0, 143 4, 4, 4, 144 8, 8, 8, 145 16, 16, 16, 146 32, 32, 32, 147 64, 64, 64 148 }; 149 #endif 150 151 /* 152 * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller 153 * must have set s->temp.pos to indicate how much data we are supposed 154 * to copy into s->temp.buf. Return true once s->temp.pos has reached 155 * s->temp.size. 156 */ 157 static bool fill_temp(struct xz_dec *s, struct xz_buf *b) 158 { 159 size_t copy_size = min_t(size_t, 160 b->in_size - b->in_pos, s->temp.size - s->temp.pos); 161 162 memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); 163 b->in_pos += copy_size; 164 s->temp.pos += copy_size; 165 166 if (s->temp.pos == s->temp.size) { 167 s->temp.pos = 0; 168 return true; 169 } 170 171 return false; 172 } 173 174 /* Decode a variable-length integer (little-endian base-128 encoding) */ 175 static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, 176 size_t *in_pos, size_t in_size) 177 { 178 uint8_t byte; 179 180 if (s->pos == 0) 181 s->vli = 0; 182 183 while (*in_pos < in_size) { 184 byte = in[*in_pos]; 185 ++*in_pos; 186 187 s->vli |= (vli_type)(byte & 0x7F) << s->pos; 188 189 if ((byte & 0x80) == 0) { 190 /* Don't allow non-minimal encodings. */ 191 if (byte == 0 && s->pos != 0) 192 return XZ_DATA_ERROR; 193 194 s->pos = 0; 195 return XZ_STREAM_END; 196 } 197 198 s->pos += 7; 199 if (s->pos == 7 * VLI_BYTES_MAX) 200 return XZ_DATA_ERROR; 201 } 202 203 return XZ_OK; 204 } 205 206 /* 207 * Decode the Compressed Data field from a Block. Update and validate 208 * the observed compressed and uncompressed sizes of the Block so that 209 * they don't exceed the values possibly stored in the Block Header 210 * (validation assumes that no integer overflow occurs, since vli_type 211 * is normally uint64_t). Update the CRC32 if presence of the CRC32 212 * field was indicated in Stream Header. 213 * 214 * Once the decoding is finished, validate that the observed sizes match 215 * the sizes possibly stored in the Block Header. Update the hash and 216 * Block count, which are later used to validate the Index field. 217 */ 218 static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) 219 { 220 enum xz_ret ret; 221 222 s->in_start = b->in_pos; 223 s->out_start = b->out_pos; 224 225 #ifdef XZ_DEC_BCJ 226 if (s->bcj_active) 227 ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); 228 else 229 #endif 230 ret = xz_dec_lzma2_run(s->lzma2, b); 231 232 s->block.compressed += b->in_pos - s->in_start; 233 s->block.uncompressed += b->out_pos - s->out_start; 234 235 /* 236 * There is no need to separately check for VLI_UNKNOWN, since 237 * the observed sizes are always smaller than VLI_UNKNOWN. 238 */ 239 if (s->block.compressed > s->block_header.compressed 240 || s->block.uncompressed 241 > s->block_header.uncompressed) 242 return XZ_DATA_ERROR; 243 244 if (s->check_type == XZ_CHECK_CRC32) 245 s->crc32 = xz_crc32(b->out + s->out_start, 246 b->out_pos - s->out_start, s->crc32); 247 248 if (ret == XZ_STREAM_END) { 249 if (s->block_header.compressed != VLI_UNKNOWN 250 && s->block_header.compressed 251 != s->block.compressed) 252 return XZ_DATA_ERROR; 253 254 if (s->block_header.uncompressed != VLI_UNKNOWN 255 && s->block_header.uncompressed 256 != s->block.uncompressed) 257 return XZ_DATA_ERROR; 258 259 s->block.hash.unpadded += s->block_header.size 260 + s->block.compressed; 261 262 #ifdef XZ_DEC_ANY_CHECK 263 s->block.hash.unpadded += check_sizes[s->check_type]; 264 #else 265 if (s->check_type == XZ_CHECK_CRC32) 266 s->block.hash.unpadded += 4; 267 #endif 268 269 s->block.hash.uncompressed += s->block.uncompressed; 270 s->block.hash.crc32 = xz_crc32( 271 (const uint8_t *)&s->block.hash, 272 sizeof(s->block.hash), s->block.hash.crc32); 273 274 ++s->block.count; 275 } 276 277 return ret; 278 } 279 280 /* Update the Index size and the CRC32 value. */ 281 static void index_update(struct xz_dec *s, const struct xz_buf *b) 282 { 283 size_t in_used = b->in_pos - s->in_start; 284 s->index.size += in_used; 285 s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32); 286 } 287 288 /* 289 * Decode the Number of Records, Unpadded Size, and Uncompressed Size 290 * fields from the Index field. That is, Index Padding and CRC32 are not 291 * decoded by this function. 292 * 293 * This can return XZ_OK (more input needed), XZ_STREAM_END (everything 294 * successfully decoded), or XZ_DATA_ERROR (input is corrupt). 295 */ 296 static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) 297 { 298 enum xz_ret ret; 299 300 do { 301 ret = dec_vli(s, b->in, &b->in_pos, b->in_size); 302 if (ret != XZ_STREAM_END) { 303 index_update(s, b); 304 return ret; 305 } 306 307 switch (s->index.sequence) { 308 case SEQ_INDEX_COUNT: 309 s->index.count = s->vli; 310 311 /* 312 * Validate that the Number of Records field 313 * indicates the same number of Records as 314 * there were Blocks in the Stream. 315 */ 316 if (s->index.count != s->block.count) 317 return XZ_DATA_ERROR; 318 319 s->index.sequence = SEQ_INDEX_UNPADDED; 320 break; 321 322 case SEQ_INDEX_UNPADDED: 323 s->index.hash.unpadded += s->vli; 324 s->index.sequence = SEQ_INDEX_UNCOMPRESSED; 325 break; 326 327 case SEQ_INDEX_UNCOMPRESSED: 328 s->index.hash.uncompressed += s->vli; 329 s->index.hash.crc32 = xz_crc32( 330 (const uint8_t *)&s->index.hash, 331 sizeof(s->index.hash), 332 s->index.hash.crc32); 333 --s->index.count; 334 s->index.sequence = SEQ_INDEX_UNPADDED; 335 break; 336 } 337 } while (s->index.count > 0); 338 339 return XZ_STREAM_END; 340 } 341 342 /* 343 * Validate that the next four input bytes match the value of s->crc32. 344 * s->pos must be zero when starting to validate the first byte. 345 */ 346 static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b) 347 { 348 do { 349 if (b->in_pos == b->in_size) 350 return XZ_OK; 351 352 if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++]) 353 return XZ_DATA_ERROR; 354 355 s->pos += 8; 356 357 } while (s->pos < 32); 358 359 s->crc32 = 0; 360 s->pos = 0; 361 362 return XZ_STREAM_END; 363 } 364 365 #ifdef XZ_DEC_ANY_CHECK 366 /* 367 * Skip over the Check field when the Check ID is not supported. 368 * Returns true once the whole Check field has been skipped over. 369 */ 370 static bool check_skip(struct xz_dec *s, struct xz_buf *b) 371 { 372 while (s->pos < check_sizes[s->check_type]) { 373 if (b->in_pos == b->in_size) 374 return false; 375 376 ++b->in_pos; 377 ++s->pos; 378 } 379 380 s->pos = 0; 381 382 return true; 383 } 384 #endif 385 386 /* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ 387 static enum xz_ret dec_stream_header(struct xz_dec *s) 388 { 389 if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) 390 return XZ_FORMAT_ERROR; 391 392 if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) 393 != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) 394 return XZ_DATA_ERROR; 395 396 if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) 397 return XZ_OPTIONS_ERROR; 398 399 /* 400 * Of integrity checks, we support only none (Check ID = 0) and 401 * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined, 402 * we will accept other check types too, but then the check won't 403 * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. 404 */ 405 s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; 406 407 #ifdef XZ_DEC_ANY_CHECK 408 if (s->check_type > XZ_CHECK_MAX) 409 return XZ_OPTIONS_ERROR; 410 411 if (s->check_type > XZ_CHECK_CRC32) 412 return XZ_UNSUPPORTED_CHECK; 413 #else 414 if (s->check_type > XZ_CHECK_CRC32) 415 return XZ_OPTIONS_ERROR; 416 #endif 417 418 return XZ_OK; 419 } 420 421 /* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ 422 static enum xz_ret dec_stream_footer(struct xz_dec *s) 423 { 424 if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) 425 return XZ_DATA_ERROR; 426 427 if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) 428 return XZ_DATA_ERROR; 429 430 /* 431 * Validate Backward Size. Note that we never added the size of the 432 * Index CRC32 field to s->index.size, thus we use s->index.size / 4 433 * instead of s->index.size / 4 - 1. 434 */ 435 if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) 436 return XZ_DATA_ERROR; 437 438 if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) 439 return XZ_DATA_ERROR; 440 441 /* 442 * Use XZ_STREAM_END instead of XZ_OK to be more convenient 443 * for the caller. 444 */ 445 return XZ_STREAM_END; 446 } 447 448 /* Decode the Block Header and initialize the filter chain. */ 449 static enum xz_ret dec_block_header(struct xz_dec *s) 450 { 451 enum xz_ret ret; 452 453 /* 454 * Validate the CRC32. We know that the temp buffer is at least 455 * eight bytes so this is safe. 456 */ 457 s->temp.size -= 4; 458 if (xz_crc32(s->temp.buf, s->temp.size, 0) 459 != get_le32(s->temp.buf + s->temp.size)) 460 return XZ_DATA_ERROR; 461 462 s->temp.pos = 2; 463 464 /* 465 * Catch unsupported Block Flags. We support only one or two filters 466 * in the chain, so we catch that with the same test. 467 */ 468 #ifdef XZ_DEC_BCJ 469 if (s->temp.buf[1] & 0x3E) 470 #else 471 if (s->temp.buf[1] & 0x3F) 472 #endif 473 return XZ_OPTIONS_ERROR; 474 475 /* Compressed Size */ 476 if (s->temp.buf[1] & 0x40) { 477 if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) 478 != XZ_STREAM_END) 479 return XZ_DATA_ERROR; 480 481 s->block_header.compressed = s->vli; 482 } else { 483 s->block_header.compressed = VLI_UNKNOWN; 484 } 485 486 /* Uncompressed Size */ 487 if (s->temp.buf[1] & 0x80) { 488 if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) 489 != XZ_STREAM_END) 490 return XZ_DATA_ERROR; 491 492 s->block_header.uncompressed = s->vli; 493 } else { 494 s->block_header.uncompressed = VLI_UNKNOWN; 495 } 496 497 #ifdef XZ_DEC_BCJ 498 /* If there are two filters, the first one must be a BCJ filter. */ 499 s->bcj_active = s->temp.buf[1] & 0x01; 500 if (s->bcj_active) { 501 if (s->temp.size - s->temp.pos < 2) 502 return XZ_OPTIONS_ERROR; 503 504 ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); 505 if (ret != XZ_OK) 506 return ret; 507 508 /* 509 * We don't support custom start offset, 510 * so Size of Properties must be zero. 511 */ 512 if (s->temp.buf[s->temp.pos++] != 0x00) 513 return XZ_OPTIONS_ERROR; 514 } 515 #endif 516 517 /* Valid Filter Flags always take at least two bytes. */ 518 if (s->temp.size - s->temp.pos < 2) 519 return XZ_DATA_ERROR; 520 521 /* Filter ID = LZMA2 */ 522 if (s->temp.buf[s->temp.pos++] != 0x21) 523 return XZ_OPTIONS_ERROR; 524 525 /* Size of Properties = 1-byte Filter Properties */ 526 if (s->temp.buf[s->temp.pos++] != 0x01) 527 return XZ_OPTIONS_ERROR; 528 529 /* Filter Properties contains LZMA2 dictionary size. */ 530 if (s->temp.size - s->temp.pos < 1) 531 return XZ_DATA_ERROR; 532 533 ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); 534 if (ret != XZ_OK) 535 return ret; 536 537 /* The rest must be Header Padding. */ 538 while (s->temp.pos < s->temp.size) 539 if (s->temp.buf[s->temp.pos++] != 0x00) 540 return XZ_OPTIONS_ERROR; 541 542 s->temp.pos = 0; 543 s->block.compressed = 0; 544 s->block.uncompressed = 0; 545 546 return XZ_OK; 547 } 548 549 static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) 550 { 551 enum xz_ret ret; 552 553 /* 554 * Store the start position for the case when we are in the middle 555 * of the Index field. 556 */ 557 s->in_start = b->in_pos; 558 559 while (true) { 560 switch (s->sequence) { 561 case SEQ_STREAM_HEADER: 562 /* 563 * Stream Header is copied to s->temp, and then 564 * decoded from there. This way if the caller 565 * gives us only little input at a time, we can 566 * still keep the Stream Header decoding code 567 * simple. Similar approach is used in many places 568 * in this file. 569 */ 570 if (!fill_temp(s, b)) 571 return XZ_OK; 572 573 /* 574 * If dec_stream_header() returns 575 * XZ_UNSUPPORTED_CHECK, it is still possible 576 * to continue decoding if working in multi-call 577 * mode. Thus, update s->sequence before calling 578 * dec_stream_header(). 579 */ 580 s->sequence = SEQ_BLOCK_START; 581 582 ret = dec_stream_header(s); 583 if (ret != XZ_OK) 584 return ret; 585 586 case SEQ_BLOCK_START: 587 /* We need one byte of input to continue. */ 588 if (b->in_pos == b->in_size) 589 return XZ_OK; 590 591 /* See if this is the beginning of the Index field. */ 592 if (b->in[b->in_pos] == 0) { 593 s->in_start = b->in_pos++; 594 s->sequence = SEQ_INDEX; 595 break; 596 } 597 598 /* 599 * Calculate the size of the Block Header and 600 * prepare to decode it. 601 */ 602 s->block_header.size 603 = ((uint32_t)b->in[b->in_pos] + 1) * 4; 604 605 s->temp.size = s->block_header.size; 606 s->temp.pos = 0; 607 s->sequence = SEQ_BLOCK_HEADER; 608 609 case SEQ_BLOCK_HEADER: 610 if (!fill_temp(s, b)) 611 return XZ_OK; 612 613 ret = dec_block_header(s); 614 if (ret != XZ_OK) 615 return ret; 616 617 s->sequence = SEQ_BLOCK_UNCOMPRESS; 618 619 case SEQ_BLOCK_UNCOMPRESS: 620 ret = dec_block(s, b); 621 if (ret != XZ_STREAM_END) 622 return ret; 623 624 s->sequence = SEQ_BLOCK_PADDING; 625 626 case SEQ_BLOCK_PADDING: 627 /* 628 * Size of Compressed Data + Block Padding 629 * must be a multiple of four. We don't need 630 * s->block.compressed for anything else 631 * anymore, so we use it here to test the size 632 * of the Block Padding field. 633 */ 634 while (s->block.compressed & 3) { 635 if (b->in_pos == b->in_size) 636 return XZ_OK; 637 638 if (b->in[b->in_pos++] != 0) 639 return XZ_DATA_ERROR; 640 641 ++s->block.compressed; 642 } 643 644 s->sequence = SEQ_BLOCK_CHECK; 645 646 case SEQ_BLOCK_CHECK: 647 if (s->check_type == XZ_CHECK_CRC32) { 648 ret = crc32_validate(s, b); 649 if (ret != XZ_STREAM_END) 650 return ret; 651 } 652 #ifdef XZ_DEC_ANY_CHECK 653 else if (!check_skip(s, b)) { 654 return XZ_OK; 655 } 656 #endif 657 658 s->sequence = SEQ_BLOCK_START; 659 break; 660 661 case SEQ_INDEX: 662 ret = dec_index(s, b); 663 if (ret != XZ_STREAM_END) 664 return ret; 665 666 s->sequence = SEQ_INDEX_PADDING; 667 668 case SEQ_INDEX_PADDING: 669 while ((s->index.size + (b->in_pos - s->in_start)) 670 & 3) { 671 if (b->in_pos == b->in_size) { 672 index_update(s, b); 673 return XZ_OK; 674 } 675 676 if (b->in[b->in_pos++] != 0) 677 return XZ_DATA_ERROR; 678 } 679 680 /* Finish the CRC32 value and Index size. */ 681 index_update(s, b); 682 683 /* Compare the hashes to validate the Index field. */ 684 if (!memeq(&s->block.hash, &s->index.hash, 685 sizeof(s->block.hash))) 686 return XZ_DATA_ERROR; 687 688 s->sequence = SEQ_INDEX_CRC32; 689 690 case SEQ_INDEX_CRC32: 691 ret = crc32_validate(s, b); 692 if (ret != XZ_STREAM_END) 693 return ret; 694 695 s->temp.size = STREAM_HEADER_SIZE; 696 s->sequence = SEQ_STREAM_FOOTER; 697 698 case SEQ_STREAM_FOOTER: 699 if (!fill_temp(s, b)) 700 return XZ_OK; 701 702 return dec_stream_footer(s); 703 } 704 } 705 706 /* Never reached */ 707 } 708 709 /* 710 * xz_dec_run() is a wrapper for dec_main() to handle some special cases in 711 * multi-call and single-call decoding. 712 * 713 * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we 714 * are not going to make any progress anymore. This is to prevent the caller 715 * from calling us infinitely when the input file is truncated or otherwise 716 * corrupt. Since zlib-style API allows that the caller fills the input buffer 717 * only when the decoder doesn't produce any new output, we have to be careful 718 * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only 719 * after the second consecutive call to xz_dec_run() that makes no progress. 720 * 721 * In single-call mode, if we couldn't decode everything and no error 722 * occurred, either the input is truncated or the output buffer is too small. 723 * Since we know that the last input byte never produces any output, we know 724 * that if all the input was consumed and decoding wasn't finished, the file 725 * must be corrupt. Otherwise the output buffer has to be too small or the 726 * file is corrupt in a way that decoding it produces too big output. 727 * 728 * If single-call decoding fails, we reset b->in_pos and b->out_pos back to 729 * their original values. This is because with some filter chains there won't 730 * be any valid uncompressed data in the output buffer unless the decoding 731 * actually succeeds (that's the price to pay of using the output buffer as 732 * the workspace). 733 */ 734 XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b) 735 { 736 size_t in_start; 737 size_t out_start; 738 enum xz_ret ret; 739 740 if (DEC_IS_SINGLE(s->mode)) 741 xz_dec_reset(s); 742 743 in_start = b->in_pos; 744 out_start = b->out_pos; 745 ret = dec_main(s, b); 746 747 if (DEC_IS_SINGLE(s->mode)) { 748 if (ret == XZ_OK) 749 ret = b->in_pos == b->in_size 750 ? XZ_DATA_ERROR : XZ_BUF_ERROR; 751 752 if (ret != XZ_STREAM_END) { 753 b->in_pos = in_start; 754 b->out_pos = out_start; 755 } 756 757 } else if (ret == XZ_OK && in_start == b->in_pos 758 && out_start == b->out_pos) { 759 if (s->allow_buf_error) 760 ret = XZ_BUF_ERROR; 761 762 s->allow_buf_error = true; 763 } else { 764 s->allow_buf_error = false; 765 } 766 767 return ret; 768 } 769 770 XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max) 771 { 772 struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); 773 if (s == NULL) 774 return NULL; 775 776 s->mode = mode; 777 778 #ifdef XZ_DEC_BCJ 779 s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); 780 if (s->bcj == NULL) 781 goto error_bcj; 782 #endif 783 784 s->lzma2 = xz_dec_lzma2_create(mode, dict_max); 785 if (s->lzma2 == NULL) 786 goto error_lzma2; 787 788 xz_dec_reset(s); 789 return s; 790 791 error_lzma2: 792 #ifdef XZ_DEC_BCJ 793 xz_dec_bcj_end(s->bcj); 794 error_bcj: 795 #endif 796 kfree(s); 797 return NULL; 798 } 799 800 XZ_EXTERN void xz_dec_reset(struct xz_dec *s) 801 { 802 s->sequence = SEQ_STREAM_HEADER; 803 s->allow_buf_error = false; 804 s->pos = 0; 805 s->crc32 = 0; 806 memzero(&s->block, sizeof(s->block)); 807 memzero(&s->index, sizeof(s->index)); 808 s->temp.pos = 0; 809 s->temp.size = STREAM_HEADER_SIZE; 810 } 811 812 XZ_EXTERN void xz_dec_end(struct xz_dec *s) 813 { 814 if (s != NULL) { 815 xz_dec_lzma2_end(s->lzma2); 816 #ifdef XZ_DEC_BCJ 817 xz_dec_bcj_end(s->bcj); 818 #endif 819 kfree(s); 820 } 821 } 822