1 /* 2 * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc. 3 * All rights reserved. 4 * 5 * This source code is licensed under both the BSD-style license (found in the 6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found 7 * in the COPYING file in the root directory of this source tree). 8 * You may select, at your option, one of the above-listed licenses. 9 */ 10 11 #include "zstd_compress_internal.h" 12 #include "hist.h" 13 #include "zstd_opt.h" 14 15 16 #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ 17 #define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */ 18 #define ZSTD_MAX_PRICE (1<<30) 19 20 #define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ 21 22 23 /*-************************************* 24 * Price functions for optimal parser 25 ***************************************/ 26 27 #if 0 /* approximation at bit level */ 28 # define BITCOST_ACCURACY 0 29 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) 30 # define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat)) 31 #elif 0 /* fractional bit accuracy */ 32 # define BITCOST_ACCURACY 8 33 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) 34 # define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) 35 #else /* opt==approx, ultra==accurate */ 36 # define BITCOST_ACCURACY 8 37 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) 38 # define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) 39 #endif 40 41 MEM_STATIC U32 ZSTD_bitWeight(U32 stat) 42 { 43 return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); 44 } 45 46 MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) 47 { 48 U32 const stat = rawStat + 1; 49 U32 const hb = ZSTD_highbit32(stat); 50 U32 const BWeight = hb * BITCOST_MULTIPLIER; 51 U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; 52 U32 const weight = BWeight + FWeight; 53 assert(hb + BITCOST_ACCURACY < 31); 54 return weight; 55 } 56 57 #if (DEBUGLEVEL>=2) 58 /* debugging function, 59 * @return price in bytes as fractional value 60 * for debug messages only */ 61 MEM_STATIC double ZSTD_fCost(U32 price) 62 { 63 return (double)price / (BITCOST_MULTIPLIER*8); 64 } 65 #endif 66 67 static int ZSTD_compressedLiterals(optState_t const* const optPtr) 68 { 69 return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed; 70 } 71 72 static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) 73 { 74 if (ZSTD_compressedLiterals(optPtr)) 75 optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel); 76 optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel); 77 optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel); 78 optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel); 79 } 80 81 82 /* ZSTD_downscaleStat() : 83 * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus) 84 * return the resulting sum of elements */ 85 static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus) 86 { 87 U32 s, sum=0; 88 DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1); 89 assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31); 90 for (s=0; s<lastEltIndex+1; s++) { 91 table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus)); 92 sum += table[s]; 93 } 94 return sum; 95 } 96 97 /* ZSTD_rescaleFreqs() : 98 * if first block (detected by optPtr->litLengthSum == 0) : init statistics 99 * take hints from dictionary if there is one 100 * or init from zero, using src for literals stats, or flat 1 for match symbols 101 * otherwise downscale existing stats, to be used as seed for next block. 102 */ 103 static void 104 ZSTD_rescaleFreqs(optState_t* const optPtr, 105 const BYTE* const src, size_t const srcSize, 106 int const optLevel) 107 { 108 int const compressedLiterals = ZSTD_compressedLiterals(optPtr); 109 DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); 110 optPtr->priceType = zop_dynamic; 111 112 if (optPtr->litLengthSum == 0) { /* first block : init */ 113 if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */ 114 DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef"); 115 optPtr->priceType = zop_predef; 116 } 117 118 assert(optPtr->symbolCosts != NULL); 119 if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { 120 /* huffman table presumed generated by dictionary */ 121 optPtr->priceType = zop_dynamic; 122 123 if (compressedLiterals) { 124 unsigned lit; 125 assert(optPtr->litFreq != NULL); 126 optPtr->litSum = 0; 127 for (lit=0; lit<=MaxLit; lit++) { 128 U32 const scaleLog = 11; /* scale to 2K */ 129 U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit); 130 assert(bitCost <= scaleLog); 131 optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; 132 optPtr->litSum += optPtr->litFreq[lit]; 133 } } 134 135 { unsigned ll; 136 FSE_CState_t llstate; 137 FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable); 138 optPtr->litLengthSum = 0; 139 for (ll=0; ll<=MaxLL; ll++) { 140 U32 const scaleLog = 10; /* scale to 1K */ 141 U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); 142 assert(bitCost < scaleLog); 143 optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; 144 optPtr->litLengthSum += optPtr->litLengthFreq[ll]; 145 } } 146 147 { unsigned ml; 148 FSE_CState_t mlstate; 149 FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable); 150 optPtr->matchLengthSum = 0; 151 for (ml=0; ml<=MaxML; ml++) { 152 U32 const scaleLog = 10; 153 U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); 154 assert(bitCost < scaleLog); 155 optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; 156 optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; 157 } } 158 159 { unsigned of; 160 FSE_CState_t ofstate; 161 FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable); 162 optPtr->offCodeSum = 0; 163 for (of=0; of<=MaxOff; of++) { 164 U32 const scaleLog = 10; 165 U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); 166 assert(bitCost < scaleLog); 167 optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; 168 optPtr->offCodeSum += optPtr->offCodeFreq[of]; 169 } } 170 171 } else { /* not a dictionary */ 172 173 assert(optPtr->litFreq != NULL); 174 if (compressedLiterals) { 175 unsigned lit = MaxLit; 176 HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ 177 optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); 178 } 179 180 { unsigned ll; 181 for (ll=0; ll<=MaxLL; ll++) 182 optPtr->litLengthFreq[ll] = 1; 183 } 184 optPtr->litLengthSum = MaxLL+1; 185 186 { unsigned ml; 187 for (ml=0; ml<=MaxML; ml++) 188 optPtr->matchLengthFreq[ml] = 1; 189 } 190 optPtr->matchLengthSum = MaxML+1; 191 192 { unsigned of; 193 for (of=0; of<=MaxOff; of++) 194 optPtr->offCodeFreq[of] = 1; 195 } 196 optPtr->offCodeSum = MaxOff+1; 197 198 } 199 200 } else { /* new block : re-use previous statistics, scaled down */ 201 202 if (compressedLiterals) 203 optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); 204 optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0); 205 optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0); 206 optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0); 207 } 208 209 ZSTD_setBasePrices(optPtr, optLevel); 210 } 211 212 /* ZSTD_rawLiteralsCost() : 213 * price of literals (only) in specified segment (which length can be 0). 214 * does not include price of literalLength symbol */ 215 static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, 216 const optState_t* const optPtr, 217 int optLevel) 218 { 219 if (litLength == 0) return 0; 220 221 if (!ZSTD_compressedLiterals(optPtr)) 222 return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */ 223 224 if (optPtr->priceType == zop_predef) 225 return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */ 226 227 /* dynamic statistics */ 228 { U32 price = litLength * optPtr->litSumBasePrice; 229 U32 u; 230 for (u=0; u < litLength; u++) { 231 assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */ 232 price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel); 233 } 234 return price; 235 } 236 } 237 238 /* ZSTD_litLengthPrice() : 239 * cost of literalLength symbol */ 240 static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel) 241 { 242 if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); 243 244 /* dynamic statistics */ 245 { U32 const llCode = ZSTD_LLcode(litLength); 246 return (LL_bits[llCode] * BITCOST_MULTIPLIER) 247 + optPtr->litLengthSumBasePrice 248 - WEIGHT(optPtr->litLengthFreq[llCode], optLevel); 249 } 250 } 251 252 /* ZSTD_getMatchPrice() : 253 * Provides the cost of the match part (offset + matchLength) of a sequence 254 * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. 255 * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ 256 FORCE_INLINE_TEMPLATE U32 257 ZSTD_getMatchPrice(U32 const offset, 258 U32 const matchLength, 259 const optState_t* const optPtr, 260 int const optLevel) 261 { 262 U32 price; 263 U32 const offCode = ZSTD_highbit32(offset+1); 264 U32 const mlBase = matchLength - MINMATCH; 265 assert(matchLength >= MINMATCH); 266 267 if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */ 268 return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER); 269 270 /* dynamic statistics */ 271 price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); 272 if ((optLevel<2) /*static*/ && offCode >= 20) 273 price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */ 274 275 /* match Length */ 276 { U32 const mlCode = ZSTD_MLcode(mlBase); 277 price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel)); 278 } 279 280 price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */ 281 282 DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price); 283 return price; 284 } 285 286 /* ZSTD_updateStats() : 287 * assumption : literals + litLengtn <= iend */ 288 static void ZSTD_updateStats(optState_t* const optPtr, 289 U32 litLength, const BYTE* literals, 290 U32 offsetCode, U32 matchLength) 291 { 292 /* literals */ 293 if (ZSTD_compressedLiterals(optPtr)) { 294 U32 u; 295 for (u=0; u < litLength; u++) 296 optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; 297 optPtr->litSum += litLength*ZSTD_LITFREQ_ADD; 298 } 299 300 /* literal Length */ 301 { U32 const llCode = ZSTD_LLcode(litLength); 302 optPtr->litLengthFreq[llCode]++; 303 optPtr->litLengthSum++; 304 } 305 306 /* match offset code (0-2=>repCode; 3+=>offset+2) */ 307 { U32 const offCode = ZSTD_highbit32(offsetCode+1); 308 assert(offCode <= MaxOff); 309 optPtr->offCodeFreq[offCode]++; 310 optPtr->offCodeSum++; 311 } 312 313 /* match Length */ 314 { U32 const mlBase = matchLength - MINMATCH; 315 U32 const mlCode = ZSTD_MLcode(mlBase); 316 optPtr->matchLengthFreq[mlCode]++; 317 optPtr->matchLengthSum++; 318 } 319 } 320 321 322 /* ZSTD_readMINMATCH() : 323 * function safe only for comparisons 324 * assumption : memPtr must be at least 4 bytes before end of buffer */ 325 MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) 326 { 327 switch (length) 328 { 329 default : 330 case 4 : return MEM_read32(memPtr); 331 case 3 : if (MEM_isLittleEndian()) 332 return MEM_read32(memPtr)<<8; 333 else 334 return MEM_read32(memPtr)>>8; 335 } 336 } 337 338 339 /* Update hashTable3 up to ip (excluded) 340 Assumption : always within prefix (i.e. not within extDict) */ 341 static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, 342 U32* nextToUpdate3, 343 const BYTE* const ip) 344 { 345 U32* const hashTable3 = ms->hashTable3; 346 U32 const hashLog3 = ms->hashLog3; 347 const BYTE* const base = ms->window.base; 348 U32 idx = *nextToUpdate3; 349 U32 const target = (U32)(ip - base); 350 size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3); 351 assert(hashLog3 > 0); 352 353 while(idx < target) { 354 hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; 355 idx++; 356 } 357 358 *nextToUpdate3 = target; 359 return hashTable3[hash3]; 360 } 361 362 363 /*-************************************* 364 * Binary Tree search 365 ***************************************/ 366 /* ZSTD_insertBt1() : add one or multiple positions to tree. 367 * ip : assumed <= iend-8 . 368 * @return : nb of positions added */ 369 static U32 ZSTD_insertBt1( 370 ZSTD_matchState_t* ms, 371 const BYTE* const ip, const BYTE* const iend, 372 U32 const mls, const int extDict) 373 { 374 const ZSTD_compressionParameters* const cParams = &ms->cParams; 375 U32* const hashTable = ms->hashTable; 376 U32 const hashLog = cParams->hashLog; 377 size_t const h = ZSTD_hashPtr(ip, hashLog, mls); 378 U32* const bt = ms->chainTable; 379 U32 const btLog = cParams->chainLog - 1; 380 U32 const btMask = (1 << btLog) - 1; 381 U32 matchIndex = hashTable[h]; 382 size_t commonLengthSmaller=0, commonLengthLarger=0; 383 const BYTE* const base = ms->window.base; 384 const BYTE* const dictBase = ms->window.dictBase; 385 const U32 dictLimit = ms->window.dictLimit; 386 const BYTE* const dictEnd = dictBase + dictLimit; 387 const BYTE* const prefixStart = base + dictLimit; 388 const BYTE* match; 389 const U32 curr = (U32)(ip-base); 390 const U32 btLow = btMask >= curr ? 0 : curr - btMask; 391 U32* smallerPtr = bt + 2*(curr&btMask); 392 U32* largerPtr = smallerPtr + 1; 393 U32 dummy32; /* to be nullified at the end */ 394 U32 const windowLow = ms->window.lowLimit; 395 U32 matchEndIdx = curr+8+1; 396 size_t bestLength = 8; 397 U32 nbCompares = 1U << cParams->searchLog; 398 #ifdef ZSTD_C_PREDICT 399 U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0); 400 U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1); 401 predictedSmall += (predictedSmall>0); 402 predictedLarge += (predictedLarge>0); 403 #endif /* ZSTD_C_PREDICT */ 404 405 DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr); 406 407 assert(ip <= iend-8); /* required for h calculation */ 408 hashTable[h] = curr; /* Update Hash Table */ 409 410 assert(windowLow > 0); 411 for (; nbCompares && (matchIndex >= windowLow); --nbCompares) { 412 U32* const nextPtr = bt + 2*(matchIndex & btMask); 413 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ 414 assert(matchIndex < curr); 415 416 #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ 417 const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ 418 if (matchIndex == predictedSmall) { 419 /* no need to check length, result known */ 420 *smallerPtr = matchIndex; 421 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ 422 smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ 423 matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ 424 predictedSmall = predictPtr[1] + (predictPtr[1]>0); 425 continue; 426 } 427 if (matchIndex == predictedLarge) { 428 *largerPtr = matchIndex; 429 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ 430 largerPtr = nextPtr; 431 matchIndex = nextPtr[0]; 432 predictedLarge = predictPtr[0] + (predictPtr[0]>0); 433 continue; 434 } 435 #endif 436 437 if (!extDict || (matchIndex+matchLength >= dictLimit)) { 438 assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */ 439 match = base + matchIndex; 440 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); 441 } else { 442 match = dictBase + matchIndex; 443 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); 444 if (matchIndex+matchLength >= dictLimit) 445 match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ 446 } 447 448 if (matchLength > bestLength) { 449 bestLength = matchLength; 450 if (matchLength > matchEndIdx - matchIndex) 451 matchEndIdx = matchIndex + (U32)matchLength; 452 } 453 454 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ 455 break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ 456 } 457 458 if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ 459 /* match is smaller than current */ 460 *smallerPtr = matchIndex; /* update smaller idx */ 461 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ 462 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ 463 smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ 464 matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ 465 } else { 466 /* match is larger than current */ 467 *largerPtr = matchIndex; 468 commonLengthLarger = matchLength; 469 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ 470 largerPtr = nextPtr; 471 matchIndex = nextPtr[0]; 472 } } 473 474 *smallerPtr = *largerPtr = 0; 475 { U32 positions = 0; 476 if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */ 477 assert(matchEndIdx > curr + 8); 478 return MAX(positions, matchEndIdx - (curr + 8)); 479 } 480 } 481 482 FORCE_INLINE_TEMPLATE 483 void ZSTD_updateTree_internal( 484 ZSTD_matchState_t* ms, 485 const BYTE* const ip, const BYTE* const iend, 486 const U32 mls, const ZSTD_dictMode_e dictMode) 487 { 488 const BYTE* const base = ms->window.base; 489 U32 const target = (U32)(ip - base); 490 U32 idx = ms->nextToUpdate; 491 DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", 492 idx, target, dictMode); 493 494 while(idx < target) { 495 U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict); 496 assert(idx < (U32)(idx + forward)); 497 idx += forward; 498 } 499 assert((size_t)(ip - base) <= (size_t)(U32)(-1)); 500 assert((size_t)(iend - base) <= (size_t)(U32)(-1)); 501 ms->nextToUpdate = target; 502 } 503 504 void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { 505 ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); 506 } 507 508 FORCE_INLINE_TEMPLATE 509 U32 ZSTD_insertBtAndGetAllMatches ( 510 ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ 511 ZSTD_matchState_t* ms, 512 U32* nextToUpdate3, 513 const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode, 514 const U32 rep[ZSTD_REP_NUM], 515 U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ 516 const U32 lengthToBeat, 517 U32 const mls /* template */) 518 { 519 const ZSTD_compressionParameters* const cParams = &ms->cParams; 520 U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); 521 const BYTE* const base = ms->window.base; 522 U32 const curr = (U32)(ip-base); 523 U32 const hashLog = cParams->hashLog; 524 U32 const minMatch = (mls==3) ? 3 : 4; 525 U32* const hashTable = ms->hashTable; 526 size_t const h = ZSTD_hashPtr(ip, hashLog, mls); 527 U32 matchIndex = hashTable[h]; 528 U32* const bt = ms->chainTable; 529 U32 const btLog = cParams->chainLog - 1; 530 U32 const btMask= (1U << btLog) - 1; 531 size_t commonLengthSmaller=0, commonLengthLarger=0; 532 const BYTE* const dictBase = ms->window.dictBase; 533 U32 const dictLimit = ms->window.dictLimit; 534 const BYTE* const dictEnd = dictBase + dictLimit; 535 const BYTE* const prefixStart = base + dictLimit; 536 U32 const btLow = (btMask >= curr) ? 0 : curr - btMask; 537 U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog); 538 U32 const matchLow = windowLow ? windowLow : 1; 539 U32* smallerPtr = bt + 2*(curr&btMask); 540 U32* largerPtr = bt + 2*(curr&btMask) + 1; 541 U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */ 542 U32 dummy32; /* to be nullified at the end */ 543 U32 mnum = 0; 544 U32 nbCompares = 1U << cParams->searchLog; 545 546 const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; 547 const ZSTD_compressionParameters* const dmsCParams = 548 dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; 549 const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; 550 const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL; 551 U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0; 552 U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0; 553 U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; 554 U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; 555 U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; 556 U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0; 557 U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit; 558 559 size_t bestLength = lengthToBeat-1; 560 DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr); 561 562 /* check repCode */ 563 assert(ll0 <= 1); /* necessarily 1 or 0 */ 564 { U32 const lastR = ZSTD_REP_NUM + ll0; 565 U32 repCode; 566 for (repCode = ll0; repCode < lastR; repCode++) { 567 U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; 568 U32 const repIndex = curr - repOffset; 569 U32 repLen = 0; 570 assert(curr >= dictLimit); 571 if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */ 572 /* We must validate the repcode offset because when we're using a dictionary the 573 * valid offset range shrinks when the dictionary goes out of bounds. 574 */ 575 if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) { 576 repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch; 577 } 578 } else { /* repIndex < dictLimit || repIndex >= curr */ 579 const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ? 580 dmsBase + repIndex - dmsIndexDelta : 581 dictBase + repIndex; 582 assert(curr >= windowLow); 583 if ( dictMode == ZSTD_extDict 584 && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */ 585 & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */) 586 && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { 587 repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; 588 } 589 if (dictMode == ZSTD_dictMatchState 590 && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */ 591 & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ 592 && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { 593 repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch; 594 } } 595 /* save longer solution */ 596 if (repLen > bestLength) { 597 DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", 598 repCode, ll0, repOffset, repLen); 599 bestLength = repLen; 600 matches[mnum].off = repCode - ll0; 601 matches[mnum].len = (U32)repLen; 602 mnum++; 603 if ( (repLen > sufficient_len) 604 | (ip+repLen == iLimit) ) { /* best possible */ 605 return mnum; 606 } } } } 607 608 /* HC3 match finder */ 609 if ((mls == 3) /*static*/ && (bestLength < mls)) { 610 U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip); 611 if ((matchIndex3 >= matchLow) 612 & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) { 613 size_t mlen; 614 if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) { 615 const BYTE* const match = base + matchIndex3; 616 mlen = ZSTD_count(ip, match, iLimit); 617 } else { 618 const BYTE* const match = dictBase + matchIndex3; 619 mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); 620 } 621 622 /* save best solution */ 623 if (mlen >= mls /* == 3 > bestLength */) { 624 DEBUGLOG(8, "found small match with hlog3, of length %u", 625 (U32)mlen); 626 bestLength = mlen; 627 assert(curr > matchIndex3); 628 assert(mnum==0); /* no prior solution */ 629 matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE; 630 matches[0].len = (U32)mlen; 631 mnum = 1; 632 if ( (mlen > sufficient_len) | 633 (ip+mlen == iLimit) ) { /* best possible length */ 634 ms->nextToUpdate = curr+1; /* skip insertion */ 635 return 1; 636 } } } 637 /* no dictMatchState lookup: dicts don't have a populated HC3 table */ 638 } 639 640 hashTable[h] = curr; /* Update Hash Table */ 641 642 for (; nbCompares && (matchIndex >= matchLow); --nbCompares) { 643 U32* const nextPtr = bt + 2*(matchIndex & btMask); 644 const BYTE* match; 645 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ 646 assert(curr > matchIndex); 647 648 if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) { 649 assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */ 650 match = base + matchIndex; 651 if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ 652 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit); 653 } else { 654 match = dictBase + matchIndex; 655 assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ 656 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); 657 if (matchIndex+matchLength >= dictLimit) 658 match = base + matchIndex; /* prepare for match[matchLength] read */ 659 } 660 661 if (matchLength > bestLength) { 662 DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)", 663 (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE); 664 assert(matchEndIdx > matchIndex); 665 if (matchLength > matchEndIdx - matchIndex) 666 matchEndIdx = matchIndex + (U32)matchLength; 667 bestLength = matchLength; 668 matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE; 669 matches[mnum].len = (U32)matchLength; 670 mnum++; 671 if ( (matchLength > ZSTD_OPT_NUM) 672 | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { 673 if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */ 674 break; /* drop, to preserve bt consistency (miss a little bit of compression) */ 675 } 676 } 677 678 if (match[matchLength] < ip[matchLength]) { 679 /* match smaller than current */ 680 *smallerPtr = matchIndex; /* update smaller idx */ 681 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ 682 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ 683 smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */ 684 matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */ 685 } else { 686 *largerPtr = matchIndex; 687 commonLengthLarger = matchLength; 688 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ 689 largerPtr = nextPtr; 690 matchIndex = nextPtr[0]; 691 } } 692 693 *smallerPtr = *largerPtr = 0; 694 695 assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ 696 if (dictMode == ZSTD_dictMatchState && nbCompares) { 697 size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); 698 U32 dictMatchIndex = dms->hashTable[dmsH]; 699 const U32* const dmsBt = dms->chainTable; 700 commonLengthSmaller = commonLengthLarger = 0; 701 for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) { 702 const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask); 703 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ 704 const BYTE* match = dmsBase + dictMatchIndex; 705 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart); 706 if (dictMatchIndex+matchLength >= dmsHighLimit) 707 match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */ 708 709 if (matchLength > bestLength) { 710 matchIndex = dictMatchIndex + dmsIndexDelta; 711 DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)", 712 (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE); 713 if (matchLength > matchEndIdx - matchIndex) 714 matchEndIdx = matchIndex + (U32)matchLength; 715 bestLength = matchLength; 716 matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE; 717 matches[mnum].len = (U32)matchLength; 718 mnum++; 719 if ( (matchLength > ZSTD_OPT_NUM) 720 | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { 721 break; /* drop, to guarantee consistency (miss a little bit of compression) */ 722 } 723 } 724 725 if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */ 726 if (match[matchLength] < ip[matchLength]) { 727 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ 728 dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ 729 } else { 730 /* match is larger than current */ 731 commonLengthLarger = matchLength; 732 dictMatchIndex = nextPtr[0]; 733 } 734 } 735 } 736 737 assert(matchEndIdx > curr+8); 738 ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ 739 return mnum; 740 } 741 742 743 FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches ( 744 ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */ 745 ZSTD_matchState_t* ms, 746 U32* nextToUpdate3, 747 const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode, 748 const U32 rep[ZSTD_REP_NUM], 749 U32 const ll0, 750 U32 const lengthToBeat) 751 { 752 const ZSTD_compressionParameters* const cParams = &ms->cParams; 753 U32 const matchLengthSearch = cParams->minMatch; 754 DEBUGLOG(8, "ZSTD_BtGetAllMatches"); 755 if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ 756 ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode); 757 switch(matchLengthSearch) 758 { 759 case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3); 760 default : 761 case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4); 762 case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5); 763 case 7 : 764 case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6); 765 } 766 } 767 768 /* *********************** 769 * LDM helper functions * 770 *************************/ 771 772 /* Struct containing info needed to make decision about ldm inclusion */ 773 typedef struct { 774 rawSeqStore_t seqStore; /* External match candidates store for this block */ 775 U32 startPosInBlock; /* Start position of the current match candidate */ 776 U32 endPosInBlock; /* End position of the current match candidate */ 777 U32 offset; /* Offset of the match candidate */ 778 } ZSTD_optLdm_t; 779 780 /* ZSTD_optLdm_skipRawSeqStoreBytes(): 781 * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'. 782 */ 783 static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { 784 U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); 785 while (currPos && rawSeqStore->pos < rawSeqStore->size) { 786 rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; 787 if (currPos >= currSeq.litLength + currSeq.matchLength) { 788 currPos -= currSeq.litLength + currSeq.matchLength; 789 rawSeqStore->pos++; 790 } else { 791 rawSeqStore->posInSequence = currPos; 792 break; 793 } 794 } 795 if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) { 796 rawSeqStore->posInSequence = 0; 797 } 798 } 799 800 /* ZSTD_opt_getNextMatchAndUpdateSeqStore(): 801 * Calculates the beginning and end of the next match in the current block. 802 * Updates 'pos' and 'posInSequence' of the ldmSeqStore. 803 */ 804 static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock, 805 U32 blockBytesRemaining) { 806 rawSeq currSeq; 807 U32 currBlockEndPos; 808 U32 literalsBytesRemaining; 809 U32 matchBytesRemaining; 810 811 /* Setting match end position to MAX to ensure we never use an LDM during this block */ 812 if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { 813 optLdm->startPosInBlock = UINT_MAX; 814 optLdm->endPosInBlock = UINT_MAX; 815 return; 816 } 817 /* Calculate appropriate bytes left in matchLength and litLength after adjusting 818 based on ldmSeqStore->posInSequence */ 819 currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos]; 820 assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength); 821 currBlockEndPos = currPosInBlock + blockBytesRemaining; 822 literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ? 823 currSeq.litLength - (U32)optLdm->seqStore.posInSequence : 824 0; 825 matchBytesRemaining = (literalsBytesRemaining == 0) ? 826 currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) : 827 currSeq.matchLength; 828 829 /* If there are more literal bytes than bytes remaining in block, no ldm is possible */ 830 if (literalsBytesRemaining >= blockBytesRemaining) { 831 optLdm->startPosInBlock = UINT_MAX; 832 optLdm->endPosInBlock = UINT_MAX; 833 ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining); 834 return; 835 } 836 837 /* Matches may be < MINMATCH by this process. In that case, we will reject them 838 when we are deciding whether or not to add the ldm */ 839 optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining; 840 optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining; 841 optLdm->offset = currSeq.offset; 842 843 if (optLdm->endPosInBlock > currBlockEndPos) { 844 /* Match ends after the block ends, we can't use the whole match */ 845 optLdm->endPosInBlock = currBlockEndPos; 846 ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock); 847 } else { 848 /* Consume nb of bytes equal to size of sequence left */ 849 ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining); 850 } 851 } 852 853 /* ZSTD_optLdm_maybeAddMatch(): 854 * Adds a match if it's long enough, based on it's 'matchStartPosInBlock' 855 * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches' 856 */ 857 static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, 858 ZSTD_optLdm_t* optLdm, U32 currPosInBlock) { 859 U32 posDiff = currPosInBlock - optLdm->startPosInBlock; 860 /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */ 861 U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; 862 U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE; 863 864 /* Ensure that current block position is not outside of the match */ 865 if (currPosInBlock < optLdm->startPosInBlock 866 || currPosInBlock >= optLdm->endPosInBlock 867 || candidateMatchLength < MINMATCH) { 868 return; 869 } 870 871 if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) { 872 DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u", 873 candidateOffCode, candidateMatchLength, currPosInBlock); 874 matches[*nbMatches].len = candidateMatchLength; 875 matches[*nbMatches].off = candidateOffCode; 876 (*nbMatches)++; 877 } 878 } 879 880 /* ZSTD_optLdm_processMatchCandidate(): 881 * Wrapper function to update ldm seq store and call ldm functions as necessary. 882 */ 883 static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches, 884 U32 currPosInBlock, U32 remainingBytes) { 885 if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { 886 return; 887 } 888 889 if (currPosInBlock >= optLdm->endPosInBlock) { 890 if (currPosInBlock > optLdm->endPosInBlock) { 891 /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily 892 * at the end of a match from the ldm seq store, and will often be some bytes 893 * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots" 894 */ 895 U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock; 896 ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot); 897 } 898 ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); 899 } 900 ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock); 901 } 902 903 /*-******************************* 904 * Optimal parser 905 *********************************/ 906 907 908 static U32 ZSTD_totalLen(ZSTD_optimal_t sol) 909 { 910 return sol.litlen + sol.mlen; 911 } 912 913 #if 0 /* debug */ 914 915 static void 916 listStats(const U32* table, int lastEltID) 917 { 918 int const nbElts = lastEltID + 1; 919 int enb; 920 for (enb=0; enb < nbElts; enb++) { 921 (void)table; 922 /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */ 923 RAWLOG(2, "%4i,", table[enb]); 924 } 925 RAWLOG(2, " \n"); 926 } 927 928 #endif 929 930 FORCE_INLINE_TEMPLATE size_t 931 ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, 932 seqStore_t* seqStore, 933 U32 rep[ZSTD_REP_NUM], 934 const void* src, size_t srcSize, 935 const int optLevel, 936 const ZSTD_dictMode_e dictMode) 937 { 938 optState_t* const optStatePtr = &ms->opt; 939 const BYTE* const istart = (const BYTE*)src; 940 const BYTE* ip = istart; 941 const BYTE* anchor = istart; 942 const BYTE* const iend = istart + srcSize; 943 const BYTE* const ilimit = iend - 8; 944 const BYTE* const base = ms->window.base; 945 const BYTE* const prefixStart = base + ms->window.dictLimit; 946 const ZSTD_compressionParameters* const cParams = &ms->cParams; 947 948 U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); 949 U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4; 950 U32 nextToUpdate3 = ms->nextToUpdate; 951 952 ZSTD_optimal_t* const opt = optStatePtr->priceTable; 953 ZSTD_match_t* const matches = optStatePtr->matchTable; 954 ZSTD_optimal_t lastSequence; 955 ZSTD_optLdm_t optLdm; 956 957 optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore; 958 optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; 959 ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip)); 960 961 /* init */ 962 DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u", 963 (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate); 964 assert(optLevel <= 2); 965 ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel); 966 ip += (ip==prefixStart); 967 968 /* Match Loop */ 969 while (ip < ilimit) { 970 U32 cur, last_pos = 0; 971 972 /* find first match */ 973 { U32 const litlen = (U32)(ip - anchor); 974 U32 const ll0 = !litlen; 975 U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch); 976 ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, 977 (U32)(ip-istart), (U32)(iend - ip)); 978 if (!nbMatches) { ip++; continue; } 979 980 /* initialize opt[0] */ 981 { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; } 982 opt[0].mlen = 0; /* means is_a_literal */ 983 opt[0].litlen = litlen; 984 /* We don't need to include the actual price of the literals because 985 * it is static for the duration of the forward pass, and is included 986 * in every price. We include the literal length to avoid negative 987 * prices when we subtract the previous literal length. 988 */ 989 opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel); 990 991 /* large match -> immediate encoding */ 992 { U32 const maxML = matches[nbMatches-1].len; 993 U32 const maxOffset = matches[nbMatches-1].off; 994 DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series", 995 nbMatches, maxML, maxOffset, (U32)(ip-prefixStart)); 996 997 if (maxML > sufficient_len) { 998 lastSequence.litlen = litlen; 999 lastSequence.mlen = maxML; 1000 lastSequence.off = maxOffset; 1001 DEBUGLOG(6, "large match (%u>%u), immediate encoding", 1002 maxML, sufficient_len); 1003 cur = 0; 1004 last_pos = ZSTD_totalLen(lastSequence); 1005 goto _shortestPath; 1006 } } 1007 1008 /* set prices for first matches starting position == 0 */ 1009 { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); 1010 U32 pos; 1011 U32 matchNb; 1012 for (pos = 1; pos < minMatch; pos++) { 1013 opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ 1014 } 1015 for (matchNb = 0; matchNb < nbMatches; matchNb++) { 1016 U32 const offset = matches[matchNb].off; 1017 U32 const end = matches[matchNb].len; 1018 for ( ; pos <= end ; pos++ ) { 1019 U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel); 1020 U32 const sequencePrice = literalsPrice + matchPrice; 1021 DEBUGLOG(7, "rPos:%u => set initial price : %.2f", 1022 pos, ZSTD_fCost(sequencePrice)); 1023 opt[pos].mlen = pos; 1024 opt[pos].off = offset; 1025 opt[pos].litlen = litlen; 1026 opt[pos].price = sequencePrice; 1027 } } 1028 last_pos = pos-1; 1029 } 1030 } 1031 1032 /* check further positions */ 1033 for (cur = 1; cur <= last_pos; cur++) { 1034 const BYTE* const inr = ip + cur; 1035 assert(cur < ZSTD_OPT_NUM); 1036 DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur) 1037 1038 /* Fix current position with one literal if cheaper */ 1039 { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; 1040 int const price = opt[cur-1].price 1041 + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) 1042 + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) 1043 - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); 1044 assert(price < 1000000000); /* overflow check */ 1045 if (price <= opt[cur].price) { 1046 DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", 1047 inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, 1048 opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]); 1049 opt[cur].mlen = 0; 1050 opt[cur].off = 0; 1051 opt[cur].litlen = litlen; 1052 opt[cur].price = price; 1053 } else { 1054 DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)", 1055 inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), 1056 opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]); 1057 } 1058 } 1059 1060 /* Set the repcodes of the current position. We must do it here 1061 * because we rely on the repcodes of the 2nd to last sequence being 1062 * correct to set the next chunks repcodes during the backward 1063 * traversal. 1064 */ 1065 ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t)); 1066 assert(cur >= opt[cur].mlen); 1067 if (opt[cur].mlen != 0) { 1068 U32 const prev = cur - opt[cur].mlen; 1069 repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0); 1070 ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t)); 1071 } else { 1072 ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t)); 1073 } 1074 1075 /* last match must start at a minimum distance of 8 from oend */ 1076 if (inr > ilimit) continue; 1077 1078 if (cur == last_pos) break; 1079 1080 if ( (optLevel==0) /*static_test*/ 1081 && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) { 1082 DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1); 1083 continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ 1084 } 1085 1086 { U32 const ll0 = (opt[cur].mlen != 0); 1087 U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0; 1088 U32 const previousPrice = opt[cur].price; 1089 U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel); 1090 U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch); 1091 U32 matchNb; 1092 1093 ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, 1094 (U32)(inr-istart), (U32)(iend-inr)); 1095 1096 if (!nbMatches) { 1097 DEBUGLOG(7, "rPos:%u : no match found", cur); 1098 continue; 1099 } 1100 1101 { U32 const maxML = matches[nbMatches-1].len; 1102 DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u", 1103 inr-istart, cur, nbMatches, maxML); 1104 1105 if ( (maxML > sufficient_len) 1106 || (cur + maxML >= ZSTD_OPT_NUM) ) { 1107 lastSequence.mlen = maxML; 1108 lastSequence.off = matches[nbMatches-1].off; 1109 lastSequence.litlen = litlen; 1110 cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */ 1111 last_pos = cur + ZSTD_totalLen(lastSequence); 1112 if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */ 1113 goto _shortestPath; 1114 } } 1115 1116 /* set prices using matches found at position == cur */ 1117 for (matchNb = 0; matchNb < nbMatches; matchNb++) { 1118 U32 const offset = matches[matchNb].off; 1119 U32 const lastML = matches[matchNb].len; 1120 U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; 1121 U32 mlen; 1122 1123 DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u", 1124 matchNb, matches[matchNb].off, lastML, litlen); 1125 1126 for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ 1127 U32 const pos = cur + mlen; 1128 int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); 1129 1130 if ((pos > last_pos) || (price < opt[pos].price)) { 1131 DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", 1132 pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); 1133 while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */ 1134 opt[pos].mlen = mlen; 1135 opt[pos].off = offset; 1136 opt[pos].litlen = litlen; 1137 opt[pos].price = price; 1138 } else { 1139 DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)", 1140 pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); 1141 if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */ 1142 } 1143 } } } 1144 } /* for (cur = 1; cur <= last_pos; cur++) */ 1145 1146 lastSequence = opt[last_pos]; 1147 cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */ 1148 assert(cur < ZSTD_OPT_NUM); /* control overflow*/ 1149 1150 _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ 1151 assert(opt[0].mlen == 0); 1152 1153 /* Set the next chunk's repcodes based on the repcodes of the beginning 1154 * of the last match, and the last sequence. This avoids us having to 1155 * update them while traversing the sequences. 1156 */ 1157 if (lastSequence.mlen != 0) { 1158 repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0); 1159 ZSTD_memcpy(rep, &reps, sizeof(reps)); 1160 } else { 1161 ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t)); 1162 } 1163 1164 { U32 const storeEnd = cur + 1; 1165 U32 storeStart = storeEnd; 1166 U32 seqPos = cur; 1167 1168 DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)", 1169 last_pos, cur); (void)last_pos; 1170 assert(storeEnd < ZSTD_OPT_NUM); 1171 DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", 1172 storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off); 1173 opt[storeEnd] = lastSequence; 1174 while (seqPos > 0) { 1175 U32 const backDist = ZSTD_totalLen(opt[seqPos]); 1176 storeStart--; 1177 DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", 1178 seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off); 1179 opt[storeStart] = opt[seqPos]; 1180 seqPos = (seqPos > backDist) ? seqPos - backDist : 0; 1181 } 1182 1183 /* save sequences */ 1184 DEBUGLOG(6, "sending selected sequences into seqStore") 1185 { U32 storePos; 1186 for (storePos=storeStart; storePos <= storeEnd; storePos++) { 1187 U32 const llen = opt[storePos].litlen; 1188 U32 const mlen = opt[storePos].mlen; 1189 U32 const offCode = opt[storePos].off; 1190 U32 const advance = llen + mlen; 1191 DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", 1192 anchor - istart, (unsigned)llen, (unsigned)mlen); 1193 1194 if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ 1195 assert(storePos == storeEnd); /* must be last sequence */ 1196 ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */ 1197 continue; /* will finish */ 1198 } 1199 1200 assert(anchor + llen <= iend); 1201 ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen); 1202 ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH); 1203 anchor += advance; 1204 ip = anchor; 1205 } } 1206 ZSTD_setBasePrices(optStatePtr, optLevel); 1207 } 1208 } /* while (ip < ilimit) */ 1209 1210 /* Return the last literals size */ 1211 return (size_t)(iend - anchor); 1212 } 1213 1214 1215 size_t ZSTD_compressBlock_btopt( 1216 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1217 const void* src, size_t srcSize) 1218 { 1219 DEBUGLOG(5, "ZSTD_compressBlock_btopt"); 1220 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict); 1221 } 1222 1223 1224 /* used in 2-pass strategy */ 1225 static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus) 1226 { 1227 U32 s, sum=0; 1228 assert(ZSTD_FREQ_DIV+bonus >= 0); 1229 for (s=0; s<lastEltIndex+1; s++) { 1230 table[s] <<= ZSTD_FREQ_DIV+bonus; 1231 table[s]--; 1232 sum += table[s]; 1233 } 1234 return sum; 1235 } 1236 1237 /* used in 2-pass strategy */ 1238 MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr) 1239 { 1240 if (ZSTD_compressedLiterals(optPtr)) 1241 optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0); 1242 optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0); 1243 optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0); 1244 optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0); 1245 } 1246 1247 /* ZSTD_initStats_ultra(): 1248 * make a first compression pass, just to seed stats with more accurate starting values. 1249 * only works on first block, with no dictionary and no ldm. 1250 * this function cannot error, hence its contract must be respected. 1251 */ 1252 static void 1253 ZSTD_initStats_ultra(ZSTD_matchState_t* ms, 1254 seqStore_t* seqStore, 1255 U32 rep[ZSTD_REP_NUM], 1256 const void* src, size_t srcSize) 1257 { 1258 U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ 1259 ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep)); 1260 1261 DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize); 1262 assert(ms->opt.litLengthSum == 0); /* first block */ 1263 assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */ 1264 assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */ 1265 assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */ 1266 1267 ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/ 1268 1269 /* invalidate first scan from history */ 1270 ZSTD_resetSeqStore(seqStore); 1271 ms->window.base -= srcSize; 1272 ms->window.dictLimit += (U32)srcSize; 1273 ms->window.lowLimit = ms->window.dictLimit; 1274 ms->nextToUpdate = ms->window.dictLimit; 1275 1276 /* re-inforce weight of collected statistics */ 1277 ZSTD_upscaleStats(&ms->opt); 1278 } 1279 1280 size_t ZSTD_compressBlock_btultra( 1281 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1282 const void* src, size_t srcSize) 1283 { 1284 DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); 1285 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); 1286 } 1287 1288 size_t ZSTD_compressBlock_btultra2( 1289 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1290 const void* src, size_t srcSize) 1291 { 1292 U32 const curr = (U32)((const BYTE*)src - ms->window.base); 1293 DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); 1294 1295 /* 2-pass strategy: 1296 * this strategy makes a first pass over first block to collect statistics 1297 * and seed next round's statistics with it. 1298 * After 1st pass, function forgets everything, and starts a new block. 1299 * Consequently, this can only work if no data has been previously loaded in tables, 1300 * aka, no dictionary, no prefix, no ldm preprocessing. 1301 * The compression ratio gain is generally small (~0.5% on first block), 1302 * the cost is 2x cpu time on first block. */ 1303 assert(srcSize <= ZSTD_BLOCKSIZE_MAX); 1304 if ( (ms->opt.litLengthSum==0) /* first block */ 1305 && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ 1306 && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ 1307 && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ 1308 && (srcSize > ZSTD_PREDEF_THRESHOLD) 1309 ) { 1310 ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); 1311 } 1312 1313 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); 1314 } 1315 1316 size_t ZSTD_compressBlock_btopt_dictMatchState( 1317 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1318 const void* src, size_t srcSize) 1319 { 1320 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState); 1321 } 1322 1323 size_t ZSTD_compressBlock_btultra_dictMatchState( 1324 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1325 const void* src, size_t srcSize) 1326 { 1327 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState); 1328 } 1329 1330 size_t ZSTD_compressBlock_btopt_extDict( 1331 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1332 const void* src, size_t srcSize) 1333 { 1334 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict); 1335 } 1336 1337 size_t ZSTD_compressBlock_btultra_extDict( 1338 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], 1339 const void* src, size_t srcSize) 1340 { 1341 return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict); 1342 } 1343 1344 /* note : no btultra2 variant for extDict nor dictMatchState, 1345 * because btultra2 is not meant to work with dictionaries 1346 * and is only specific for the first block (no prefix) */ 1347