You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

zstd_ldm.c 26KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703
  1. /*
  2. * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. */
  9. #include "zstd_ldm.h"
  10. #include "zstd_fast.h" /* ZSTD_fillHashTable() */
  11. #include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
  12. #define LDM_BUCKET_SIZE_LOG 3
  13. #define LDM_MIN_MATCH_LENGTH 64
  14. #define LDM_HASH_LOG 20
  15. #define LDM_HASH_CHAR_OFFSET 10
  16. size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm)
  17. {
  18. ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
  19. params->enableLdm = enableLdm>0;
  20. params->hashLog = LDM_HASH_LOG;
  21. params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
  22. params->minMatchLength = LDM_MIN_MATCH_LENGTH;
  23. params->hashEveryLog = ZSTD_LDM_HASHEVERYLOG_NOTSET;
  24. return 0;
  25. }
  26. void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog)
  27. {
  28. if (params->hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET) {
  29. params->hashEveryLog =
  30. windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
  31. }
  32. params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
  33. }
  34. size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog) {
  35. size_t const ldmHSize = ((size_t)1) << hashLog;
  36. size_t const ldmBucketSizeLog = MIN(bucketSizeLog, hashLog);
  37. size_t const ldmBucketSize =
  38. ((size_t)1) << (hashLog - ldmBucketSizeLog);
  39. return ldmBucketSize + (ldmHSize * (sizeof(ldmEntry_t)));
  40. }
  41. /** ZSTD_ldm_getSmallHash() :
  42. * numBits should be <= 32
  43. * If numBits==0, returns 0.
  44. * @return : the most significant numBits of value. */
  45. static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
  46. {
  47. assert(numBits <= 32);
  48. return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
  49. }
  50. /** ZSTD_ldm_getChecksum() :
  51. * numBitsToDiscard should be <= 32
  52. * @return : the next most significant 32 bits after numBitsToDiscard */
  53. static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
  54. {
  55. assert(numBitsToDiscard <= 32);
  56. return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
  57. }
  58. /** ZSTD_ldm_getTag() ;
  59. * Given the hash, returns the most significant numTagBits bits
  60. * after (32 + hbits) bits.
  61. *
  62. * If there are not enough bits remaining, return the last
  63. * numTagBits bits. */
  64. static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
  65. {
  66. assert(numTagBits < 32 && hbits <= 32);
  67. if (32 - hbits < numTagBits) {
  68. return hash & (((U32)1 << numTagBits) - 1);
  69. } else {
  70. return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
  71. }
  72. }
  73. /** ZSTD_ldm_getBucket() :
  74. * Returns a pointer to the start of the bucket associated with hash. */
  75. static ldmEntry_t* ZSTD_ldm_getBucket(
  76. ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
  77. {
  78. return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
  79. }
  80. /** ZSTD_ldm_insertEntry() :
  81. * Insert the entry with corresponding hash into the hash table */
  82. static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
  83. size_t const hash, const ldmEntry_t entry,
  84. ldmParams_t const ldmParams)
  85. {
  86. BYTE* const bucketOffsets = ldmState->bucketOffsets;
  87. *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
  88. bucketOffsets[hash]++;
  89. bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
  90. }
  91. /** ZSTD_ldm_makeEntryAndInsertByTag() :
  92. *
  93. * Gets the small hash, checksum, and tag from the rollingHash.
  94. *
  95. * If the tag matches (1 << ldmParams.hashEveryLog)-1, then
  96. * creates an ldmEntry from the offset, and inserts it into the hash table.
  97. *
  98. * hBits is the length of the small hash, which is the most significant hBits
  99. * of rollingHash. The checksum is the next 32 most significant bits, followed
  100. * by ldmParams.hashEveryLog bits that make up the tag. */
  101. static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
  102. U64 const rollingHash,
  103. U32 const hBits,
  104. U32 const offset,
  105. ldmParams_t const ldmParams)
  106. {
  107. U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog);
  108. U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
  109. if (tag == tagMask) {
  110. U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
  111. U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
  112. ldmEntry_t entry;
  113. entry.offset = offset;
  114. entry.checksum = checksum;
  115. ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
  116. }
  117. }
  118. /** ZSTD_ldm_getRollingHash() :
  119. * Get a 64-bit hash using the first len bytes from buf.
  120. *
  121. * Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be
  122. * H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0)
  123. *
  124. * where the constant a is defined to be prime8bytes.
  125. *
  126. * The implementation adds an offset to each byte, so
  127. * H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */
  128. static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len)
  129. {
  130. U64 ret = 0;
  131. U32 i;
  132. for (i = 0; i < len; i++) {
  133. ret *= prime8bytes;
  134. ret += buf[i] + LDM_HASH_CHAR_OFFSET;
  135. }
  136. return ret;
  137. }
  138. /** ZSTD_ldm_ipow() :
  139. * Return base^exp. */
  140. static U64 ZSTD_ldm_ipow(U64 base, U64 exp)
  141. {
  142. U64 ret = 1;
  143. while (exp) {
  144. if (exp & 1) { ret *= base; }
  145. exp >>= 1;
  146. base *= base;
  147. }
  148. return ret;
  149. }
  150. U64 ZSTD_ldm_getHashPower(U32 minMatchLength) {
  151. assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN);
  152. return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1);
  153. }
  154. /** ZSTD_ldm_updateHash() :
  155. * Updates hash by removing toRemove and adding toAdd. */
  156. static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower)
  157. {
  158. hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower);
  159. hash *= prime8bytes;
  160. hash += toAdd + LDM_HASH_CHAR_OFFSET;
  161. return hash;
  162. }
  163. /** ZSTD_ldm_countBackwardsMatch() :
  164. * Returns the number of bytes that match backwards before pIn and pMatch.
  165. *
  166. * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
  167. static size_t ZSTD_ldm_countBackwardsMatch(
  168. const BYTE* pIn, const BYTE* pAnchor,
  169. const BYTE* pMatch, const BYTE* pBase)
  170. {
  171. size_t matchLength = 0;
  172. while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
  173. pIn--;
  174. pMatch--;
  175. matchLength++;
  176. }
  177. return matchLength;
  178. }
  179. /** ZSTD_ldm_fillFastTables() :
  180. *
  181. * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
  182. * This is similar to ZSTD_loadDictionaryContent.
  183. *
  184. * The tables for the other strategies are filled within their
  185. * block compressors. */
  186. static size_t ZSTD_ldm_fillFastTables(ZSTD_CCtx* zc, const void* end)
  187. {
  188. const BYTE* const iend = (const BYTE*)end;
  189. const U32 mls = zc->appliedParams.cParams.searchLength;
  190. switch(zc->appliedParams.cParams.strategy)
  191. {
  192. case ZSTD_fast:
  193. ZSTD_fillHashTable(zc, iend, mls);
  194. zc->nextToUpdate = (U32)(iend - zc->base);
  195. break;
  196. case ZSTD_dfast:
  197. ZSTD_fillDoubleHashTable(zc, iend, mls);
  198. zc->nextToUpdate = (U32)(iend - zc->base);
  199. break;
  200. case ZSTD_greedy:
  201. case ZSTD_lazy:
  202. case ZSTD_lazy2:
  203. case ZSTD_btlazy2:
  204. case ZSTD_btopt:
  205. case ZSTD_btultra:
  206. break;
  207. default:
  208. assert(0); /* not possible : not a valid strategy id */
  209. }
  210. return 0;
  211. }
  212. /** ZSTD_ldm_fillLdmHashTable() :
  213. *
  214. * Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
  215. * lastHash is the rolling hash that corresponds to lastHashed.
  216. *
  217. * Returns the rolling hash corresponding to position iend-1. */
  218. static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
  219. U64 lastHash, const BYTE* lastHashed,
  220. const BYTE* iend, const BYTE* base,
  221. U32 hBits, ldmParams_t const ldmParams)
  222. {
  223. U64 rollingHash = lastHash;
  224. const BYTE* cur = lastHashed + 1;
  225. while (cur < iend) {
  226. rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1],
  227. cur[ldmParams.minMatchLength-1],
  228. state->hashPower);
  229. ZSTD_ldm_makeEntryAndInsertByTag(state,
  230. rollingHash, hBits,
  231. (U32)(cur - base), ldmParams);
  232. ++cur;
  233. }
  234. return rollingHash;
  235. }
  236. /** ZSTD_ldm_limitTableUpdate() :
  237. *
  238. * Sets cctx->nextToUpdate to a position corresponding closer to anchor
  239. * if it is far way
  240. * (after a long match, only update tables a limited amount). */
  241. static void ZSTD_ldm_limitTableUpdate(ZSTD_CCtx* cctx, const BYTE* anchor)
  242. {
  243. U32 const current = (U32)(anchor - cctx->base);
  244. if (current > cctx->nextToUpdate + 1024) {
  245. cctx->nextToUpdate =
  246. current - MIN(512, current - cctx->nextToUpdate - 1024);
  247. }
  248. }
  249. typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
  250. /* defined in zstd_compress.c */
  251. ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
  252. FORCE_INLINE_TEMPLATE
  253. size_t ZSTD_compressBlock_ldm_generic(ZSTD_CCtx* cctx,
  254. const void* src, size_t srcSize)
  255. {
  256. ldmState_t* const ldmState = &(cctx->ldmState);
  257. const ldmParams_t ldmParams = cctx->appliedParams.ldmParams;
  258. const U64 hashPower = ldmState->hashPower;
  259. const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
  260. const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
  261. const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
  262. seqStore_t* const seqStorePtr = &(cctx->seqStore);
  263. const BYTE* const base = cctx->base;
  264. const BYTE* const istart = (const BYTE*)src;
  265. const BYTE* ip = istart;
  266. const BYTE* anchor = istart;
  267. const U32 lowestIndex = cctx->dictLimit;
  268. const BYTE* const lowest = base + lowestIndex;
  269. const BYTE* const iend = istart + srcSize;
  270. const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
  271. const ZSTD_blockCompressor blockCompressor =
  272. ZSTD_selectBlockCompressor(cctx->appliedParams.cParams.strategy, 0);
  273. U32* const repToConfirm = seqStorePtr->repToConfirm;
  274. U32 savedRep[ZSTD_REP_NUM];
  275. U64 rollingHash = 0;
  276. const BYTE* lastHashed = NULL;
  277. size_t i, lastLiterals;
  278. /* Save seqStorePtr->rep and copy repToConfirm */
  279. for (i = 0; i < ZSTD_REP_NUM; i++)
  280. savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
  281. /* Main Search Loop */
  282. while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
  283. size_t mLength;
  284. U32 const current = (U32)(ip - base);
  285. size_t forwardMatchLength = 0, backwardMatchLength = 0;
  286. ldmEntry_t* bestEntry = NULL;
  287. if (ip != istart) {
  288. rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
  289. lastHashed[ldmParams.minMatchLength],
  290. hashPower);
  291. } else {
  292. rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
  293. }
  294. lastHashed = ip;
  295. /* Do not insert and do not look for a match */
  296. if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
  297. ldmTagMask) {
  298. ip++;
  299. continue;
  300. }
  301. /* Get the best entry and compute the match lengths */
  302. {
  303. ldmEntry_t* const bucket =
  304. ZSTD_ldm_getBucket(ldmState,
  305. ZSTD_ldm_getSmallHash(rollingHash, hBits),
  306. ldmParams);
  307. ldmEntry_t* cur;
  308. size_t bestMatchLength = 0;
  309. U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
  310. for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
  311. const BYTE* const pMatch = cur->offset + base;
  312. size_t curForwardMatchLength, curBackwardMatchLength,
  313. curTotalMatchLength;
  314. if (cur->checksum != checksum || cur->offset <= lowestIndex) {
  315. continue;
  316. }
  317. curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
  318. if (curForwardMatchLength < ldmParams.minMatchLength) {
  319. continue;
  320. }
  321. curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
  322. ip, anchor, pMatch, lowest);
  323. curTotalMatchLength = curForwardMatchLength +
  324. curBackwardMatchLength;
  325. if (curTotalMatchLength > bestMatchLength) {
  326. bestMatchLength = curTotalMatchLength;
  327. forwardMatchLength = curForwardMatchLength;
  328. backwardMatchLength = curBackwardMatchLength;
  329. bestEntry = cur;
  330. }
  331. }
  332. }
  333. /* No match found -- continue searching */
  334. if (bestEntry == NULL) {
  335. ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
  336. hBits, current,
  337. ldmParams);
  338. ip++;
  339. continue;
  340. }
  341. /* Match found */
  342. mLength = forwardMatchLength + backwardMatchLength;
  343. ip -= backwardMatchLength;
  344. /* Call the block compressor on the remaining literals */
  345. {
  346. U32 const matchIndex = bestEntry->offset;
  347. const BYTE* const match = base + matchIndex - backwardMatchLength;
  348. U32 const offset = (U32)(ip - match);
  349. /* Overwrite rep codes */
  350. for (i = 0; i < ZSTD_REP_NUM; i++)
  351. seqStorePtr->rep[i] = repToConfirm[i];
  352. /* Fill tables for block compressor */
  353. ZSTD_ldm_limitTableUpdate(cctx, anchor);
  354. ZSTD_ldm_fillFastTables(cctx, anchor);
  355. /* Call block compressor and get remaining literals */
  356. lastLiterals = blockCompressor(cctx, anchor, ip - anchor);
  357. cctx->nextToUpdate = (U32)(ip - base);
  358. /* Update repToConfirm with the new offset */
  359. for (i = ZSTD_REP_NUM - 1; i > 0; i--)
  360. repToConfirm[i] = repToConfirm[i-1];
  361. repToConfirm[0] = offset;
  362. /* Store the sequence with the leftover literals */
  363. ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
  364. offset + ZSTD_REP_MOVE, mLength - MINMATCH);
  365. }
  366. /* Insert the current entry into the hash table */
  367. ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
  368. (U32)(lastHashed - base),
  369. ldmParams);
  370. assert(ip + backwardMatchLength == lastHashed);
  371. /* Fill the hash table from lastHashed+1 to ip+mLength*/
  372. /* Heuristic: don't need to fill the entire table at end of block */
  373. if (ip + mLength < ilimit) {
  374. rollingHash = ZSTD_ldm_fillLdmHashTable(
  375. ldmState, rollingHash, lastHashed,
  376. ip + mLength, base, hBits, ldmParams);
  377. lastHashed = ip + mLength - 1;
  378. }
  379. ip += mLength;
  380. anchor = ip;
  381. /* Check immediate repcode */
  382. while ( (ip < ilimit)
  383. && ( (repToConfirm[1] > 0) && (repToConfirm[1] <= (U32)(ip-lowest))
  384. && (MEM_read32(ip) == MEM_read32(ip - repToConfirm[1])) )) {
  385. size_t const rLength = ZSTD_count(ip+4, ip+4-repToConfirm[1],
  386. iend) + 4;
  387. /* Swap repToConfirm[1] <=> repToConfirm[0] */
  388. {
  389. U32 const tmpOff = repToConfirm[1];
  390. repToConfirm[1] = repToConfirm[0];
  391. repToConfirm[0] = tmpOff;
  392. }
  393. ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
  394. /* Fill the hash table from lastHashed+1 to ip+rLength*/
  395. if (ip + rLength < ilimit) {
  396. rollingHash = ZSTD_ldm_fillLdmHashTable(
  397. ldmState, rollingHash, lastHashed,
  398. ip + rLength, base, hBits, ldmParams);
  399. lastHashed = ip + rLength - 1;
  400. }
  401. ip += rLength;
  402. anchor = ip;
  403. }
  404. }
  405. /* Overwrite rep */
  406. for (i = 0; i < ZSTD_REP_NUM; i++)
  407. seqStorePtr->rep[i] = repToConfirm[i];
  408. ZSTD_ldm_limitTableUpdate(cctx, anchor);
  409. ZSTD_ldm_fillFastTables(cctx, anchor);
  410. lastLiterals = blockCompressor(cctx, anchor, iend - anchor);
  411. cctx->nextToUpdate = (U32)(iend - base);
  412. /* Restore seqStorePtr->rep */
  413. for (i = 0; i < ZSTD_REP_NUM; i++)
  414. seqStorePtr->rep[i] = savedRep[i];
  415. /* Return the last literals size */
  416. return lastLiterals;
  417. }
  418. size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* ctx,
  419. const void* src, size_t srcSize)
  420. {
  421. return ZSTD_compressBlock_ldm_generic(ctx, src, srcSize);
  422. }
  423. static size_t ZSTD_compressBlock_ldm_extDict_generic(
  424. ZSTD_CCtx* ctx,
  425. const void* src, size_t srcSize)
  426. {
  427. ldmState_t* const ldmState = &(ctx->ldmState);
  428. const ldmParams_t ldmParams = ctx->appliedParams.ldmParams;
  429. const U64 hashPower = ldmState->hashPower;
  430. const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
  431. const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
  432. const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
  433. seqStore_t* const seqStorePtr = &(ctx->seqStore);
  434. const BYTE* const base = ctx->base;
  435. const BYTE* const dictBase = ctx->dictBase;
  436. const BYTE* const istart = (const BYTE*)src;
  437. const BYTE* ip = istart;
  438. const BYTE* anchor = istart;
  439. const U32 lowestIndex = ctx->lowLimit;
  440. const BYTE* const dictStart = dictBase + lowestIndex;
  441. const U32 dictLimit = ctx->dictLimit;
  442. const BYTE* const lowPrefixPtr = base + dictLimit;
  443. const BYTE* const dictEnd = dictBase + dictLimit;
  444. const BYTE* const iend = istart + srcSize;
  445. const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
  446. const ZSTD_blockCompressor blockCompressor =
  447. ZSTD_selectBlockCompressor(ctx->appliedParams.cParams.strategy, 1);
  448. U32* const repToConfirm = seqStorePtr->repToConfirm;
  449. U32 savedRep[ZSTD_REP_NUM];
  450. U64 rollingHash = 0;
  451. const BYTE* lastHashed = NULL;
  452. size_t i, lastLiterals;
  453. /* Save seqStorePtr->rep and copy repToConfirm */
  454. for (i = 0; i < ZSTD_REP_NUM; i++) {
  455. savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
  456. }
  457. /* Search Loop */
  458. while (ip < ilimit) { /* < instead of <=, because (ip+1) */
  459. size_t mLength;
  460. const U32 current = (U32)(ip-base);
  461. size_t forwardMatchLength = 0, backwardMatchLength = 0;
  462. ldmEntry_t* bestEntry = NULL;
  463. if (ip != istart) {
  464. rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
  465. lastHashed[ldmParams.minMatchLength],
  466. hashPower);
  467. } else {
  468. rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
  469. }
  470. lastHashed = ip;
  471. if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
  472. ldmTagMask) {
  473. /* Don't insert and don't look for a match */
  474. ip++;
  475. continue;
  476. }
  477. /* Get the best entry and compute the match lengths */
  478. {
  479. ldmEntry_t* const bucket =
  480. ZSTD_ldm_getBucket(ldmState,
  481. ZSTD_ldm_getSmallHash(rollingHash, hBits),
  482. ldmParams);
  483. ldmEntry_t* cur;
  484. size_t bestMatchLength = 0;
  485. U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
  486. for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
  487. const BYTE* const curMatchBase =
  488. cur->offset < dictLimit ? dictBase : base;
  489. const BYTE* const pMatch = curMatchBase + cur->offset;
  490. const BYTE* const matchEnd =
  491. cur->offset < dictLimit ? dictEnd : iend;
  492. const BYTE* const lowMatchPtr =
  493. cur->offset < dictLimit ? dictStart : lowPrefixPtr;
  494. size_t curForwardMatchLength, curBackwardMatchLength,
  495. curTotalMatchLength;
  496. if (cur->checksum != checksum || cur->offset <= lowestIndex) {
  497. continue;
  498. }
  499. curForwardMatchLength = ZSTD_count_2segments(
  500. ip, pMatch, iend,
  501. matchEnd, lowPrefixPtr);
  502. if (curForwardMatchLength < ldmParams.minMatchLength) {
  503. continue;
  504. }
  505. curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
  506. ip, anchor, pMatch, lowMatchPtr);
  507. curTotalMatchLength = curForwardMatchLength +
  508. curBackwardMatchLength;
  509. if (curTotalMatchLength > bestMatchLength) {
  510. bestMatchLength = curTotalMatchLength;
  511. forwardMatchLength = curForwardMatchLength;
  512. backwardMatchLength = curBackwardMatchLength;
  513. bestEntry = cur;
  514. }
  515. }
  516. }
  517. /* No match found -- continue searching */
  518. if (bestEntry == NULL) {
  519. ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
  520. (U32)(lastHashed - base),
  521. ldmParams);
  522. ip++;
  523. continue;
  524. }
  525. /* Match found */
  526. mLength = forwardMatchLength + backwardMatchLength;
  527. ip -= backwardMatchLength;
  528. /* Call the block compressor on the remaining literals */
  529. {
  530. /* ip = current - backwardMatchLength
  531. * The match is at (bestEntry->offset - backwardMatchLength) */
  532. U32 const matchIndex = bestEntry->offset;
  533. U32 const offset = current - matchIndex;
  534. /* Overwrite rep codes */
  535. for (i = 0; i < ZSTD_REP_NUM; i++)
  536. seqStorePtr->rep[i] = repToConfirm[i];
  537. /* Fill the hash table for the block compressor */
  538. ZSTD_ldm_limitTableUpdate(ctx, anchor);
  539. ZSTD_ldm_fillFastTables(ctx, anchor);
  540. /* Call block compressor and get remaining literals */
  541. lastLiterals = blockCompressor(ctx, anchor, ip - anchor);
  542. ctx->nextToUpdate = (U32)(ip - base);
  543. /* Update repToConfirm with the new offset */
  544. for (i = ZSTD_REP_NUM - 1; i > 0; i--)
  545. repToConfirm[i] = repToConfirm[i-1];
  546. repToConfirm[0] = offset;
  547. /* Store the sequence with the leftover literals */
  548. ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
  549. offset + ZSTD_REP_MOVE, mLength - MINMATCH);
  550. }
  551. /* Insert the current entry into the hash table */
  552. ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
  553. (U32)(lastHashed - base),
  554. ldmParams);
  555. /* Fill the hash table from lastHashed+1 to ip+mLength */
  556. assert(ip + backwardMatchLength == lastHashed);
  557. if (ip + mLength < ilimit) {
  558. rollingHash = ZSTD_ldm_fillLdmHashTable(
  559. ldmState, rollingHash, lastHashed,
  560. ip + mLength, base, hBits,
  561. ldmParams);
  562. lastHashed = ip + mLength - 1;
  563. }
  564. ip += mLength;
  565. anchor = ip;
  566. /* check immediate repcode */
  567. while (ip < ilimit) {
  568. U32 const current2 = (U32)(ip-base);
  569. U32 const repIndex2 = current2 - repToConfirm[1];
  570. const BYTE* repMatch2 = repIndex2 < dictLimit ?
  571. dictBase + repIndex2 : base + repIndex2;
  572. if ( (((U32)((dictLimit-1) - repIndex2) >= 3) &
  573. (repIndex2 > lowestIndex)) /* intentional overflow */
  574. && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
  575. const BYTE* const repEnd2 = repIndex2 < dictLimit ?
  576. dictEnd : iend;
  577. size_t const repLength2 =
  578. ZSTD_count_2segments(ip+4, repMatch2+4, iend,
  579. repEnd2, lowPrefixPtr) + 4;
  580. U32 tmpOffset = repToConfirm[1];
  581. repToConfirm[1] = repToConfirm[0];
  582. repToConfirm[0] = tmpOffset;
  583. ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
  584. /* Fill the hash table from lastHashed+1 to ip+repLength2*/
  585. if (ip + repLength2 < ilimit) {
  586. rollingHash = ZSTD_ldm_fillLdmHashTable(
  587. ldmState, rollingHash, lastHashed,
  588. ip + repLength2, base, hBits,
  589. ldmParams);
  590. lastHashed = ip + repLength2 - 1;
  591. }
  592. ip += repLength2;
  593. anchor = ip;
  594. continue;
  595. }
  596. break;
  597. }
  598. }
  599. /* Overwrite rep */
  600. for (i = 0; i < ZSTD_REP_NUM; i++)
  601. seqStorePtr->rep[i] = repToConfirm[i];
  602. ZSTD_ldm_limitTableUpdate(ctx, anchor);
  603. ZSTD_ldm_fillFastTables(ctx, anchor);
  604. /* Call the block compressor one last time on the last literals */
  605. lastLiterals = blockCompressor(ctx, anchor, iend - anchor);
  606. ctx->nextToUpdate = (U32)(iend - base);
  607. /* Restore seqStorePtr->rep */
  608. for (i = 0; i < ZSTD_REP_NUM; i++)
  609. seqStorePtr->rep[i] = savedRep[i];
  610. /* Return the last literals size */
  611. return lastLiterals;
  612. }
  613. size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx,
  614. const void* src, size_t srcSize)
  615. {
  616. return ZSTD_compressBlock_ldm_extDict_generic(ctx, src, srcSize);
  617. }