1 /* 2 * LZMA2 decoder 3 * 4 * Authors: Lasse Collin <lasse.collin@tukaani.org> 5 * Igor Pavlov <https://7-zip.org/> 6 * 7 * This file has been put into the public domain. 8 * You can do whatever you want with this file. 9 */ 10 11 #include "xz_private.h" 12 #include "xz_lzma2.h" 13 14 /* 15 * Range decoder initialization eats the first five bytes of each LZMA chunk. 16 */ 17 #define RC_INIT_BYTES 5 18 19 /* 20 * Minimum number of usable input buffer to safely decode one LZMA symbol. 21 * The worst case is that we decode 22 bits using probabilities and 26 22 * direct bits. This may decode at maximum of 20 bytes of input. However, 23 * lzma_main() does an extra normalization before returning, thus we 24 * need to put 21 here. 25 */ 26 #define LZMA_IN_REQUIRED 21 27 28 /* 29 * Dictionary (history buffer) 30 * 31 * These are always true: 32 * start <= pos <= full <= end 33 * pos <= limit <= end 34 * 35 * In multi-call mode, also these are true: 36 * end == size 37 * size <= size_max 38 * allocated <= size 39 * 40 * Most of these variables are size_t to support single-call mode, 41 * in which the dictionary variables address the actual output 42 * buffer directly. 43 */ 44 struct dictionary { 45 /* Beginning of the history buffer */ 46 uint8_t *buf; 47 48 /* Old position in buf (before decoding more data) */ 49 size_t start; 50 51 /* Position in buf */ 52 size_t pos; 53 54 /* 55 * How full dictionary is. This is used to detect corrupt input that 56 * would read beyond the beginning of the uncompressed stream. 57 */ 58 size_t full; 59 60 /* Write limit; we don't write to buf[limit] or later bytes. */ 61 size_t limit; 62 63 /* 64 * End of the dictionary buffer. In multi-call mode, this is 65 * the same as the dictionary size. In single-call mode, this 66 * indicates the size of the output buffer. 67 */ 68 size_t end; 69 70 /* 71 * Size of the dictionary as specified in Block Header. This is used 72 * together with "full" to detect corrupt input that would make us 73 * read beyond the beginning of the uncompressed stream. 74 */ 75 uint32_t size; 76 77 /* 78 * Maximum allowed dictionary size in multi-call mode. 79 * This is ignored in single-call mode. 80 */ 81 uint32_t size_max; 82 83 /* 84 * Amount of memory currently allocated for the dictionary. 85 * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, 86 * size_max is always the same as the allocated size.) 87 */ 88 uint32_t allocated; 89 90 /* Operation mode */ 91 enum xz_mode mode; 92 }; 93 94 /* Range decoder */ 95 struct rc_dec { 96 uint32_t range; 97 uint32_t code; 98 99 /* 100 * Number of initializing bytes remaining to be read 101 * by rc_read_init(). 102 */ 103 uint32_t init_bytes_left; 104 105 /* 106 * Buffer from which we read our input. It can be either 107 * temp.buf or the caller-provided input buffer. 108 */ 109 const uint8_t *in; 110 size_t in_pos; 111 size_t in_limit; 112 }; 113 114 /* Probabilities for a length decoder. */ 115 struct lzma_len_dec { 116 /* Probability of match length being at least 10 */ 117 uint16_t choice; 118 119 /* Probability of match length being at least 18 */ 120 uint16_t choice2; 121 122 /* Probabilities for match lengths 2-9 */ 123 uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; 124 125 /* Probabilities for match lengths 10-17 */ 126 uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; 127 128 /* Probabilities for match lengths 18-273 */ 129 uint16_t high[LEN_HIGH_SYMBOLS]; 130 }; 131 132 struct lzma_dec { 133 /* Distances of latest four matches */ 134 uint32_t rep0; 135 uint32_t rep1; 136 uint32_t rep2; 137 uint32_t rep3; 138 139 /* Types of the most recently seen LZMA symbols */ 140 enum lzma_state state; 141 142 /* 143 * Length of a match. This is updated so that dict_repeat can 144 * be called again to finish repeating the whole match. 145 */ 146 uint32_t len; 147 148 /* 149 * LZMA properties or related bit masks (number of literal 150 * context bits, a mask derived from the number of literal 151 * position bits, and a mask derived from the number 152 * position bits) 153 */ 154 uint32_t lc; 155 uint32_t literal_pos_mask; /* (1 << lp) - 1 */ 156 uint32_t pos_mask; /* (1 << pb) - 1 */ 157 158 /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ 159 uint16_t is_match[STATES][POS_STATES_MAX]; 160 161 /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ 162 uint16_t is_rep[STATES]; 163 164 /* 165 * If 0, distance of a repeated match is rep0. 166 * Otherwise check is_rep1. 167 */ 168 uint16_t is_rep0[STATES]; 169 170 /* 171 * If 0, distance of a repeated match is rep1. 172 * Otherwise check is_rep2. 173 */ 174 uint16_t is_rep1[STATES]; 175 176 /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ 177 uint16_t is_rep2[STATES]; 178 179 /* 180 * If 1, the repeated match has length of one byte. Otherwise 181 * the length is decoded from rep_len_decoder. 182 */ 183 uint16_t is_rep0_long[STATES][POS_STATES_MAX]; 184 185 /* 186 * Probability tree for the highest two bits of the match 187 * distance. There is a separate probability tree for match 188 * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. 189 */ 190 uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; 191 192 /* 193 * Probility trees for additional bits for match distance 194 * when the distance is in the range [4, 127]. 195 */ 196 uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; 197 198 /* 199 * Probability tree for the lowest four bits of a match 200 * distance that is equal to or greater than 128. 201 */ 202 uint16_t dist_align[ALIGN_SIZE]; 203 204 /* Length of a normal match */ 205 struct lzma_len_dec match_len_dec; 206 207 /* Length of a repeated match */ 208 struct lzma_len_dec rep_len_dec; 209 210 /* Probabilities of literals */ 211 uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; 212 }; 213 214 struct lzma2_dec { 215 /* Position in xz_dec_lzma2_run(). */ 216 enum lzma2_seq { 217 SEQ_CONTROL, 218 SEQ_UNCOMPRESSED_1, 219 SEQ_UNCOMPRESSED_2, 220 SEQ_COMPRESSED_0, 221 SEQ_COMPRESSED_1, 222 SEQ_PROPERTIES, 223 SEQ_LZMA_PREPARE, 224 SEQ_LZMA_RUN, 225 SEQ_COPY 226 } sequence; 227 228 /* Next position after decoding the compressed size of the chunk. */ 229 enum lzma2_seq next_sequence; 230 231 /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ 232 uint32_t uncompressed; 233 234 /* 235 * Compressed size of LZMA chunk or compressed/uncompressed 236 * size of uncompressed chunk (64 KiB at maximum) 237 */ 238 uint32_t compressed; 239 240 /* 241 * True if dictionary reset is needed. This is false before 242 * the first chunk (LZMA or uncompressed). 243 */ 244 bool need_dict_reset; 245 246 /* 247 * True if new LZMA properties are needed. This is false 248 * before the first LZMA chunk. 249 */ 250 bool need_props; 251 }; 252 253 struct xz_dec_lzma2 { 254 /* 255 * The order below is important on x86 to reduce code size and 256 * it shouldn't hurt on other platforms. Everything up to and 257 * including lzma.pos_mask are in the first 128 bytes on x86-32, 258 * which allows using smaller instructions to access those 259 * variables. On x86-64, fewer variables fit into the first 128 260 * bytes, but this is still the best order without sacrificing 261 * the readability by splitting the structures. 262 */ 263 struct rc_dec rc; 264 struct dictionary dict; 265 struct lzma2_dec lzma2; 266 struct lzma_dec lzma; 267 268 /* 269 * Temporary buffer which holds small number of input bytes between 270 * decoder calls. See lzma2_lzma() for details. 271 */ 272 struct { 273 uint32_t size; 274 uint8_t buf[3 * LZMA_IN_REQUIRED]; 275 } temp; 276 }; 277 278 /************** 279 * Dictionary * 280 **************/ 281 282 /* 283 * Reset the dictionary state. When in single-call mode, set up the beginning 284 * of the dictionary to point to the actual output buffer. 285 */ 286 static void dict_reset(struct dictionary *dict, struct xz_buf *b) 287 { 288 if (DEC_IS_SINGLE(dict->mode)) { 289 dict->buf = b->out + b->out_pos; 290 dict->end = b->out_size - b->out_pos; 291 } 292 293 dict->start = 0; 294 dict->pos = 0; 295 dict->limit = 0; 296 dict->full = 0; 297 } 298 299 /* Set dictionary write limit */ 300 static void dict_limit(struct dictionary *dict, size_t out_max) 301 { 302 if (dict->end - dict->pos <= out_max) 303 dict->limit = dict->end; 304 else 305 dict->limit = dict->pos + out_max; 306 } 307 308 /* Return true if at least one byte can be written into the dictionary. */ 309 static inline bool dict_has_space(const struct dictionary *dict) 310 { 311 return dict->pos < dict->limit; 312 } 313 314 /* 315 * Get a byte from the dictionary at the given distance. The distance is 316 * assumed to valid, or as a special case, zero when the dictionary is 317 * still empty. This special case is needed for single-call decoding to 318 * avoid writing a '\0' to the end of the destination buffer. 319 */ 320 static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist) 321 { 322 size_t offset = dict->pos - dist - 1; 323 324 if (dist >= dict->pos) 325 offset += dict->end; 326 327 return dict->full > 0 ? dict->buf[offset] : 0; 328 } 329 330 /* 331 * Put one byte into the dictionary. It is assumed that there is space for it. 332 */ 333 static inline void dict_put(struct dictionary *dict, uint8_t byte) 334 { 335 dict->buf[dict->pos++] = byte; 336 337 if (dict->full < dict->pos) 338 dict->full = dict->pos; 339 } 340 341 /* 342 * Repeat given number of bytes from the given distance. If the distance is 343 * invalid, false is returned. On success, true is returned and *len is 344 * updated to indicate how many bytes were left to be repeated. 345 */ 346 static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist) 347 { 348 size_t back; 349 uint32_t left; 350 351 if (dist >= dict->full || dist >= dict->size) 352 return false; 353 354 left = min_t(size_t, dict->limit - dict->pos, *len); 355 *len -= left; 356 357 back = dict->pos - dist - 1; 358 if (dist >= dict->pos) 359 back += dict->end; 360 361 do { 362 dict->buf[dict->pos++] = dict->buf[back++]; 363 if (back == dict->end) 364 back = 0; 365 } while (--left > 0); 366 367 if (dict->full < dict->pos) 368 dict->full = dict->pos; 369 370 return true; 371 } 372 373 /* Copy uncompressed data as is from input to dictionary and output buffers. */ 374 static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, 375 uint32_t *left) 376 { 377 size_t copy_size; 378 379 while (*left > 0 && b->in_pos < b->in_size 380 && b->out_pos < b->out_size) { 381 copy_size = min(b->in_size - b->in_pos, 382 b->out_size - b->out_pos); 383 if (copy_size > dict->end - dict->pos) 384 copy_size = dict->end - dict->pos; 385 if (copy_size > *left) 386 copy_size = *left; 387 388 *left -= copy_size; 389 390 /* 391 * If doing in-place decompression in single-call mode and the 392 * uncompressed size of the file is larger than the caller 393 * thought (i.e. it is invalid input!), the buffers below may 394 * overlap and cause undefined behavior with memcpy(). 395 * With valid inputs memcpy() would be fine here. 396 */ 397 memmove(dict->buf + dict->pos, b->in + b->in_pos, copy_size); 398 dict->pos += copy_size; 399 400 if (dict->full < dict->pos) 401 dict->full = dict->pos; 402 403 if (DEC_IS_MULTI(dict->mode)) { 404 if (dict->pos == dict->end) 405 dict->pos = 0; 406 407 /* 408 * Like above but for multi-call mode: use memmove() 409 * to avoid undefined behavior with invalid input. 410 */ 411 memmove(b->out + b->out_pos, b->in + b->in_pos, 412 copy_size); 413 } 414 415 dict->start = dict->pos; 416 417 b->out_pos += copy_size; 418 b->in_pos += copy_size; 419 } 420 } 421 422 /* 423 * Flush pending data from dictionary to b->out. It is assumed that there is 424 * enough space in b->out. This is guaranteed because caller uses dict_limit() 425 * before decoding data into the dictionary. 426 */ 427 static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) 428 { 429 size_t copy_size = dict->pos - dict->start; 430 431 if (DEC_IS_MULTI(dict->mode)) { 432 if (dict->pos == dict->end) 433 dict->pos = 0; 434 435 /* 436 * These buffers cannot overlap even if doing in-place 437 * decompression because in multi-call mode dict->buf 438 * has been allocated by us in this file; it's not 439 * provided by the caller like in single-call mode. 440 */ 441 memcpy(b->out + b->out_pos, dict->buf + dict->start, 442 copy_size); 443 } 444 445 dict->start = dict->pos; 446 b->out_pos += copy_size; 447 return copy_size; 448 } 449 450 /***************** 451 * Range decoder * 452 *****************/ 453 454 /* Reset the range decoder. */ 455 static void rc_reset(struct rc_dec *rc) 456 { 457 rc->range = (uint32_t)-1; 458 rc->code = 0; 459 rc->init_bytes_left = RC_INIT_BYTES; 460 } 461 462 /* 463 * Read the first five initial bytes into rc->code if they haven't been 464 * read already. (Yes, the first byte gets completely ignored.) 465 */ 466 static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b) 467 { 468 while (rc->init_bytes_left > 0) { 469 if (b->in_pos == b->in_size) 470 return false; 471 472 rc->code = (rc->code << 8) + b->in[b->in_pos++]; 473 --rc->init_bytes_left; 474 } 475 476 return true; 477 } 478 479 /* Return true if there may not be enough input for the next decoding loop. */ 480 static inline bool rc_limit_exceeded(const struct rc_dec *rc) 481 { 482 return rc->in_pos > rc->in_limit; 483 } 484 485 /* 486 * Return true if it is possible (from point of view of range decoder) that 487 * we have reached the end of the LZMA chunk. 488 */ 489 static inline bool rc_is_finished(const struct rc_dec *rc) 490 { 491 return rc->code == 0; 492 } 493 494 /* Read the next input byte if needed. */ 495 static __always_inline void rc_normalize(struct rc_dec *rc) 496 { 497 if (rc->range < RC_TOP_VALUE) { 498 rc->range <<= RC_SHIFT_BITS; 499 rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; 500 } 501 } 502 503 /* 504 * Decode one bit. In some versions, this function has been split in three 505 * functions so that the compiler is supposed to be able to more easily avoid 506 * an extra branch. In this particular version of the LZMA decoder, this 507 * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3 508 * on x86). Using a non-splitted version results in nicer looking code too. 509 * 510 * NOTE: This must return an int. Do not make it return a bool or the speed 511 * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care, 512 * and it generates 10-20 % faster code than GCC 3.x from this file anyway.) 513 */ 514 static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) 515 { 516 uint32_t bound; 517 int bit; 518 519 rc_normalize(rc); 520 bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; 521 if (rc->code < bound) { 522 rc->range = bound; 523 *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; 524 bit = 0; 525 } else { 526 rc->range -= bound; 527 rc->code -= bound; 528 *prob -= *prob >> RC_MOVE_BITS; 529 bit = 1; 530 } 531 532 return bit; 533 } 534 535 /* Decode a bittree starting from the most significant bit. */ 536 static __always_inline uint32_t rc_bittree(struct rc_dec *rc, 537 uint16_t *probs, uint32_t limit) 538 { 539 uint32_t symbol = 1; 540 541 do { 542 if (rc_bit(rc, &probs[symbol])) 543 symbol = (symbol << 1) + 1; 544 else 545 symbol <<= 1; 546 } while (symbol < limit); 547 548 return symbol; 549 } 550 551 /* Decode a bittree starting from the least significant bit. */ 552 static __always_inline void rc_bittree_reverse(struct rc_dec *rc, 553 uint16_t *probs, 554 uint32_t *dest, uint32_t limit) 555 { 556 uint32_t symbol = 1; 557 uint32_t i = 0; 558 559 do { 560 if (rc_bit(rc, &probs[symbol])) { 561 symbol = (symbol << 1) + 1; 562 *dest += 1 << i; 563 } else { 564 symbol <<= 1; 565 } 566 } while (++i < limit); 567 } 568 569 /* Decode direct bits (fixed fifty-fifty probability) */ 570 static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit) 571 { 572 uint32_t mask; 573 574 do { 575 rc_normalize(rc); 576 rc->range >>= 1; 577 rc->code -= rc->range; 578 mask = (uint32_t)0 - (rc->code >> 31); 579 rc->code += rc->range & mask; 580 *dest = (*dest << 1) + (mask + 1); 581 } while (--limit > 0); 582 } 583 584 /******** 585 * LZMA * 586 ********/ 587 588 /* Get pointer to literal coder probability array. */ 589 static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s) 590 { 591 uint32_t prev_byte = dict_get(&s->dict, 0); 592 uint32_t low = prev_byte >> (8 - s->lzma.lc); 593 uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; 594 return s->lzma.literal[low + high]; 595 } 596 597 /* Decode a literal (one 8-bit byte) */ 598 static void lzma_literal(struct xz_dec_lzma2 *s) 599 { 600 uint16_t *probs; 601 uint32_t symbol; 602 uint32_t match_byte; 603 uint32_t match_bit; 604 uint32_t offset; 605 uint32_t i; 606 607 probs = lzma_literal_probs(s); 608 609 if (lzma_state_is_literal(s->lzma.state)) { 610 symbol = rc_bittree(&s->rc, probs, 0x100); 611 } else { 612 symbol = 1; 613 match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; 614 offset = 0x100; 615 616 do { 617 match_bit = match_byte & offset; 618 match_byte <<= 1; 619 i = offset + match_bit + symbol; 620 621 if (rc_bit(&s->rc, &probs[i])) { 622 symbol = (symbol << 1) + 1; 623 offset &= match_bit; 624 } else { 625 symbol <<= 1; 626 offset &= ~match_bit; 627 } 628 } while (symbol < 0x100); 629 } 630 631 dict_put(&s->dict, (uint8_t)symbol); 632 lzma_state_literal(&s->lzma.state); 633 } 634 635 /* Decode the length of the match into s->lzma.len. */ 636 static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, 637 uint32_t pos_state) 638 { 639 uint16_t *probs; 640 uint32_t limit; 641 642 if (!rc_bit(&s->rc, &l->choice)) { 643 probs = l->low[pos_state]; 644 limit = LEN_LOW_SYMBOLS; 645 s->lzma.len = MATCH_LEN_MIN; 646 } else { 647 if (!rc_bit(&s->rc, &l->choice2)) { 648 probs = l->mid[pos_state]; 649 limit = LEN_MID_SYMBOLS; 650 s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; 651 } else { 652 probs = l->high; 653 limit = LEN_HIGH_SYMBOLS; 654 s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS 655 + LEN_MID_SYMBOLS; 656 } 657 } 658 659 s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; 660 } 661 662 /* Decode a match. The distance will be stored in s->lzma.rep0. */ 663 static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) 664 { 665 uint16_t *probs; 666 uint32_t dist_slot; 667 uint32_t limit; 668 669 lzma_state_match(&s->lzma.state); 670 671 s->lzma.rep3 = s->lzma.rep2; 672 s->lzma.rep2 = s->lzma.rep1; 673 s->lzma.rep1 = s->lzma.rep0; 674 675 lzma_len(s, &s->lzma.match_len_dec, pos_state); 676 677 probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; 678 dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; 679 680 if (dist_slot < DIST_MODEL_START) { 681 s->lzma.rep0 = dist_slot; 682 } else { 683 limit = (dist_slot >> 1) - 1; 684 s->lzma.rep0 = 2 + (dist_slot & 1); 685 686 if (dist_slot < DIST_MODEL_END) { 687 s->lzma.rep0 <<= limit; 688 probs = s->lzma.dist_special + s->lzma.rep0 689 - dist_slot - 1; 690 rc_bittree_reverse(&s->rc, probs, 691 &s->lzma.rep0, limit); 692 } else { 693 rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); 694 s->lzma.rep0 <<= ALIGN_BITS; 695 rc_bittree_reverse(&s->rc, s->lzma.dist_align, 696 &s->lzma.rep0, ALIGN_BITS); 697 } 698 } 699 } 700 701 /* 702 * Decode a repeated match. The distance is one of the four most recently 703 * seen matches. The distance will be stored in s->lzma.rep0. 704 */ 705 static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state) 706 { 707 uint32_t tmp; 708 709 if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) { 710 if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[ 711 s->lzma.state][pos_state])) { 712 lzma_state_short_rep(&s->lzma.state); 713 s->lzma.len = 1; 714 return; 715 } 716 } else { 717 if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) { 718 tmp = s->lzma.rep1; 719 } else { 720 if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) { 721 tmp = s->lzma.rep2; 722 } else { 723 tmp = s->lzma.rep3; 724 s->lzma.rep3 = s->lzma.rep2; 725 } 726 727 s->lzma.rep2 = s->lzma.rep1; 728 } 729 730 s->lzma.rep1 = s->lzma.rep0; 731 s->lzma.rep0 = tmp; 732 } 733 734 lzma_state_long_rep(&s->lzma.state); 735 lzma_len(s, &s->lzma.rep_len_dec, pos_state); 736 } 737 738 /* LZMA decoder core */ 739 static bool lzma_main(struct xz_dec_lzma2 *s) 740 { 741 uint32_t pos_state; 742 743 /* 744 * If the dictionary was reached during the previous call, try to 745 * finish the possibly pending repeat in the dictionary. 746 */ 747 if (dict_has_space(&s->dict) && s->lzma.len > 0) 748 dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); 749 750 /* 751 * Decode more LZMA symbols. One iteration may consume up to 752 * LZMA_IN_REQUIRED - 1 bytes. 753 */ 754 while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) { 755 pos_state = s->dict.pos & s->lzma.pos_mask; 756 757 if (!rc_bit(&s->rc, &s->lzma.is_match[ 758 s->lzma.state][pos_state])) { 759 lzma_literal(s); 760 } else { 761 if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) 762 lzma_rep_match(s, pos_state); 763 else 764 lzma_match(s, pos_state); 765 766 if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0)) 767 return false; 768 } 769 } 770 771 /* 772 * Having the range decoder always normalized when we are outside 773 * this function makes it easier to correctly handle end of the chunk. 774 */ 775 rc_normalize(&s->rc); 776 777 return true; 778 } 779 780 /* 781 * Reset the LZMA decoder and range decoder state. Dictionary is not reset 782 * here, because LZMA state may be reset without resetting the dictionary. 783 */ 784 static void lzma_reset(struct xz_dec_lzma2 *s) 785 { 786 uint16_t *probs; 787 size_t i; 788 789 s->lzma.state = STATE_LIT_LIT; 790 s->lzma.rep0 = 0; 791 s->lzma.rep1 = 0; 792 s->lzma.rep2 = 0; 793 s->lzma.rep3 = 0; 794 795 /* 796 * All probabilities are initialized to the same value. This hack 797 * makes the code smaller by avoiding a separate loop for each 798 * probability array. 799 * 800 * This could be optimized so that only that part of literal 801 * probabilities that are actually required. In the common case 802 * we would write 12 KiB less. 803 */ 804 probs = s->lzma.is_match[0]; 805 for (i = 0; i < PROBS_TOTAL; ++i) 806 probs[i] = RC_BIT_MODEL_TOTAL / 2; 807 808 rc_reset(&s->rc); 809 } 810 811 /* 812 * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks 813 * from the decoded lp and pb values. On success, the LZMA decoder state is 814 * reset and true is returned. 815 */ 816 static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props) 817 { 818 if (props > (4 * 5 + 4) * 9 + 8) 819 return false; 820 821 s->lzma.pos_mask = 0; 822 while (props >= 9 * 5) { 823 props -= 9 * 5; 824 ++s->lzma.pos_mask; 825 } 826 827 s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; 828 829 s->lzma.literal_pos_mask = 0; 830 while (props >= 9) { 831 props -= 9; 832 ++s->lzma.literal_pos_mask; 833 } 834 835 s->lzma.lc = props; 836 837 if (s->lzma.lc + s->lzma.literal_pos_mask > 4) 838 return false; 839 840 s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; 841 842 lzma_reset(s); 843 844 return true; 845 } 846 847 /********* 848 * LZMA2 * 849 *********/ 850 851 /* 852 * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't 853 * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This 854 * wrapper function takes care of making the LZMA decoder's assumption safe. 855 * 856 * As long as there is plenty of input left to be decoded in the current LZMA 857 * chunk, we decode directly from the caller-supplied input buffer until 858 * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into 859 * s->temp.buf, which (hopefully) gets filled on the next call to this 860 * function. We decode a few bytes from the temporary buffer so that we can 861 * continue decoding from the caller-supplied input buffer again. 862 */ 863 static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) 864 { 865 size_t in_avail; 866 uint32_t tmp; 867 868 in_avail = b->in_size - b->in_pos; 869 if (s->temp.size > 0 || s->lzma2.compressed == 0) { 870 tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; 871 if (tmp > s->lzma2.compressed - s->temp.size) 872 tmp = s->lzma2.compressed - s->temp.size; 873 if (tmp > in_avail) 874 tmp = in_avail; 875 876 memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); 877 878 if (s->temp.size + tmp == s->lzma2.compressed) { 879 memzero(s->temp.buf + s->temp.size + tmp, 880 sizeof(s->temp.buf) 881 - s->temp.size - tmp); 882 s->rc.in_limit = s->temp.size + tmp; 883 } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) { 884 s->temp.size += tmp; 885 b->in_pos += tmp; 886 return true; 887 } else { 888 s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; 889 } 890 891 s->rc.in = s->temp.buf; 892 s->rc.in_pos = 0; 893 894 if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp) 895 return false; 896 897 s->lzma2.compressed -= s->rc.in_pos; 898 899 if (s->rc.in_pos < s->temp.size) { 900 s->temp.size -= s->rc.in_pos; 901 memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, 902 s->temp.size); 903 return true; 904 } 905 906 b->in_pos += s->rc.in_pos - s->temp.size; 907 s->temp.size = 0; 908 } 909 910 in_avail = b->in_size - b->in_pos; 911 if (in_avail >= LZMA_IN_REQUIRED) { 912 s->rc.in = b->in; 913 s->rc.in_pos = b->in_pos; 914 915 if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED) 916 s->rc.in_limit = b->in_pos + s->lzma2.compressed; 917 else 918 s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; 919 920 if (!lzma_main(s)) 921 return false; 922 923 in_avail = s->rc.in_pos - b->in_pos; 924 if (in_avail > s->lzma2.compressed) 925 return false; 926 927 s->lzma2.compressed -= in_avail; 928 b->in_pos = s->rc.in_pos; 929 } 930 931 in_avail = b->in_size - b->in_pos; 932 if (in_avail < LZMA_IN_REQUIRED) { 933 if (in_avail > s->lzma2.compressed) 934 in_avail = s->lzma2.compressed; 935 936 memcpy(s->temp.buf, b->in + b->in_pos, in_avail); 937 s->temp.size = in_avail; 938 b->in_pos += in_avail; 939 } 940 941 return true; 942 } 943 944 /* 945 * Take care of the LZMA2 control layer, and forward the job of actual LZMA 946 * decoding or copying of uncompressed chunks to other functions. 947 */ 948 XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, 949 struct xz_buf *b) 950 { 951 uint32_t tmp; 952 953 while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) { 954 switch (s->lzma2.sequence) { 955 case SEQ_CONTROL: 956 /* 957 * LZMA2 control byte 958 * 959 * Exact values: 960 * 0x00 End marker 961 * 0x01 Dictionary reset followed by 962 * an uncompressed chunk 963 * 0x02 Uncompressed chunk (no dictionary reset) 964 * 965 * Highest three bits (s->control & 0xE0): 966 * 0xE0 Dictionary reset, new properties and state 967 * reset, followed by LZMA compressed chunk 968 * 0xC0 New properties and state reset, followed 969 * by LZMA compressed chunk (no dictionary 970 * reset) 971 * 0xA0 State reset using old properties, 972 * followed by LZMA compressed chunk (no 973 * dictionary reset) 974 * 0x80 LZMA chunk (no dictionary or state reset) 975 * 976 * For LZMA compressed chunks, the lowest five bits 977 * (s->control & 1F) are the highest bits of the 978 * uncompressed size (bits 16-20). 979 * 980 * A new LZMA2 stream must begin with a dictionary 981 * reset. The first LZMA chunk must set new 982 * properties and reset the LZMA state. 983 * 984 * Values that don't match anything described above 985 * are invalid and we return XZ_DATA_ERROR. 986 */ 987 tmp = b->in[b->in_pos++]; 988 989 if (tmp == 0x00) 990 return XZ_STREAM_END; 991 992 if (tmp >= 0xE0 || tmp == 0x01) { 993 s->lzma2.need_props = true; 994 s->lzma2.need_dict_reset = false; 995 dict_reset(&s->dict, b); 996 } else if (s->lzma2.need_dict_reset) { 997 return XZ_DATA_ERROR; 998 } 999 1000 if (tmp >= 0x80) { 1001 s->lzma2.uncompressed = (tmp & 0x1F) << 16; 1002 s->lzma2.sequence = SEQ_UNCOMPRESSED_1; 1003 1004 if (tmp >= 0xC0) { 1005 /* 1006 * When there are new properties, 1007 * state reset is done at 1008 * SEQ_PROPERTIES. 1009 */ 1010 s->lzma2.need_props = false; 1011 s->lzma2.next_sequence 1012 = SEQ_PROPERTIES; 1013 1014 } else if (s->lzma2.need_props) { 1015 return XZ_DATA_ERROR; 1016 1017 } else { 1018 s->lzma2.next_sequence 1019 = SEQ_LZMA_PREPARE; 1020 if (tmp >= 0xA0) 1021 lzma_reset(s); 1022 } 1023 } else { 1024 if (tmp > 0x02) 1025 return XZ_DATA_ERROR; 1026 1027 s->lzma2.sequence = SEQ_COMPRESSED_0; 1028 s->lzma2.next_sequence = SEQ_COPY; 1029 } 1030 1031 break; 1032 1033 case SEQ_UNCOMPRESSED_1: 1034 s->lzma2.uncompressed 1035 += (uint32_t)b->in[b->in_pos++] << 8; 1036 s->lzma2.sequence = SEQ_UNCOMPRESSED_2; 1037 break; 1038 1039 case SEQ_UNCOMPRESSED_2: 1040 s->lzma2.uncompressed 1041 += (uint32_t)b->in[b->in_pos++] + 1; 1042 s->lzma2.sequence = SEQ_COMPRESSED_0; 1043 break; 1044 1045 case SEQ_COMPRESSED_0: 1046 s->lzma2.compressed 1047 = (uint32_t)b->in[b->in_pos++] << 8; 1048 s->lzma2.sequence = SEQ_COMPRESSED_1; 1049 break; 1050 1051 case SEQ_COMPRESSED_1: 1052 s->lzma2.compressed 1053 += (uint32_t)b->in[b->in_pos++] + 1; 1054 s->lzma2.sequence = s->lzma2.next_sequence; 1055 break; 1056 1057 case SEQ_PROPERTIES: 1058 if (!lzma_props(s, b->in[b->in_pos++])) 1059 return XZ_DATA_ERROR; 1060 1061 s->lzma2.sequence = SEQ_LZMA_PREPARE; 1062 1063 fallthrough; 1064 1065 case SEQ_LZMA_PREPARE: 1066 if (s->lzma2.compressed < RC_INIT_BYTES) 1067 return XZ_DATA_ERROR; 1068 1069 if (!rc_read_init(&s->rc, b)) 1070 return XZ_OK; 1071 1072 s->lzma2.compressed -= RC_INIT_BYTES; 1073 s->lzma2.sequence = SEQ_LZMA_RUN; 1074 1075 fallthrough; 1076 1077 case SEQ_LZMA_RUN: 1078 /* 1079 * Set dictionary limit to indicate how much we want 1080 * to be encoded at maximum. Decode new data into the 1081 * dictionary. Flush the new data from dictionary to 1082 * b->out. Check if we finished decoding this chunk. 1083 * In case the dictionary got full but we didn't fill 1084 * the output buffer yet, we may run this loop 1085 * multiple times without changing s->lzma2.sequence. 1086 */ 1087 dict_limit(&s->dict, min_t(size_t, 1088 b->out_size - b->out_pos, 1089 s->lzma2.uncompressed)); 1090 if (!lzma2_lzma(s, b)) 1091 return XZ_DATA_ERROR; 1092 1093 s->lzma2.uncompressed -= dict_flush(&s->dict, b); 1094 1095 if (s->lzma2.uncompressed == 0) { 1096 if (s->lzma2.compressed > 0 || s->lzma.len > 0 1097 || !rc_is_finished(&s->rc)) 1098 return XZ_DATA_ERROR; 1099 1100 rc_reset(&s->rc); 1101 s->lzma2.sequence = SEQ_CONTROL; 1102 1103 } else if (b->out_pos == b->out_size 1104 || (b->in_pos == b->in_size 1105 && s->temp.size 1106 < s->lzma2.compressed)) { 1107 return XZ_OK; 1108 } 1109 1110 break; 1111 1112 case SEQ_COPY: 1113 dict_uncompressed(&s->dict, b, &s->lzma2.compressed); 1114 if (s->lzma2.compressed > 0) 1115 return XZ_OK; 1116 1117 s->lzma2.sequence = SEQ_CONTROL; 1118 break; 1119 } 1120 } 1121 1122 return XZ_OK; 1123 } 1124 1125 XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, 1126 uint32_t dict_max) 1127 { 1128 struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL); 1129 if (s == NULL) 1130 return NULL; 1131 1132 s->dict.mode = mode; 1133 s->dict.size_max = dict_max; 1134 1135 if (DEC_IS_PREALLOC(mode)) { 1136 s->dict.buf = vmalloc(dict_max); 1137 if (s->dict.buf == NULL) { 1138 kfree(s); 1139 return NULL; 1140 } 1141 } else if (DEC_IS_DYNALLOC(mode)) { 1142 s->dict.buf = NULL; 1143 s->dict.allocated = 0; 1144 } 1145 1146 return s; 1147 } 1148 1149 XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props) 1150 { 1151 /* This limits dictionary size to 3 GiB to keep parsing simpler. */ 1152 if (props > 39) 1153 return XZ_OPTIONS_ERROR; 1154 1155 s->dict.size = 2 + (props & 1); 1156 s->dict.size <<= (props >> 1) + 11; 1157 1158 if (DEC_IS_MULTI(s->dict.mode)) { 1159 if (s->dict.size > s->dict.size_max) 1160 return XZ_MEMLIMIT_ERROR; 1161 1162 s->dict.end = s->dict.size; 1163 1164 if (DEC_IS_DYNALLOC(s->dict.mode)) { 1165 if (s->dict.allocated < s->dict.size) { 1166 s->dict.allocated = s->dict.size; 1167 vfree(s->dict.buf); 1168 s->dict.buf = vmalloc(s->dict.size); 1169 if (s->dict.buf == NULL) { 1170 s->dict.allocated = 0; 1171 return XZ_MEM_ERROR; 1172 } 1173 } 1174 } 1175 } 1176 1177 s->lzma.len = 0; 1178 1179 s->lzma2.sequence = SEQ_CONTROL; 1180 s->lzma2.need_dict_reset = true; 1181 1182 s->temp.size = 0; 1183 1184 return XZ_OK; 1185 } 1186 1187 XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s) 1188 { 1189 if (DEC_IS_MULTI(s->dict.mode)) 1190 vfree(s->dict.buf); 1191 1192 kfree(s); 1193 } 1194