mirror of
https://github.com/facebook/zstd.git
synced 2024-11-24 22:36:47 +08:00
fixed corruption with inter-blocks repeated offsets
This commit is contained in:
parent
4266c0a2fd
commit
45c03c564f
@ -292,16 +292,17 @@ ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
|
||||
const void* dict,size_t dictSize,
|
||||
ZSTD_parameters params);
|
||||
|
||||
/*- Advanced Decompression functions -*/
|
||||
|
||||
/*--- Advanced Decompression functions ---*/
|
||||
|
||||
/*! ZSTD_createDCtx_advanced() :
|
||||
* Create a ZSTD decompression context using external alloc and free functions */
|
||||
ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
|
||||
|
||||
|
||||
/* **************************************
|
||||
* Streaming functions (direct mode)
|
||||
****************************************/
|
||||
/* ****************************************************************
|
||||
* Streaming functions (direct mode - synchronous and buffer-less)
|
||||
******************************************************************/
|
||||
ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
|
||||
ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize);
|
||||
@ -311,10 +312,8 @@ ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstC
|
||||
ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity);
|
||||
|
||||
/*
|
||||
Streaming compression, synchronous mode (bufferless)
|
||||
|
||||
A ZSTD_CCtx object is required to track streaming operations.
|
||||
Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage it.
|
||||
Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
|
||||
ZSTD_CCtx object can be re-used multiple times within successive compression operations.
|
||||
|
||||
Start by initializing a context.
|
||||
@ -323,12 +322,13 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci
|
||||
It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
|
||||
|
||||
Then, consume your input using ZSTD_compressContinue().
|
||||
The interface is synchronous, so all input will be consumed and produce a compressed output.
|
||||
ZSTD_compressContinue() presumes prior data is still accessible and unmodified (up to maximum distance size, see WindowLog).
|
||||
The interface is synchronous, so input will be entirely consumed and produce associated compressed output.
|
||||
You must ensure there is enough space in destination buffer to store compressed data under worst case scenario.
|
||||
Worst case evaluation is provided by ZSTD_compressBound().
|
||||
|
||||
Finish a frame with ZSTD_compressEnd(), which will write the epilogue.
|
||||
Without the epilogue, frames will be considered incomplete by decoder.
|
||||
Without epilogue, frames will be considered unfinished (broken) by decoders.
|
||||
|
||||
You can then reuse ZSTD_CCtx to compress some new frame.
|
||||
*/
|
||||
|
@ -933,9 +933,9 @@ _check_compressibility:
|
||||
`offsetCode` : distance to match, or 0 == repCode.
|
||||
`matchCode` : matchLength - MINMATCH
|
||||
*/
|
||||
MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, size_t offsetCode, size_t matchCode)
|
||||
MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, size_t offsetCode, size_t matchCode)
|
||||
{
|
||||
#if 1 /* for debug */
|
||||
#if 0 /* for debug */
|
||||
static const BYTE* g_start = NULL;
|
||||
const U32 pos = (U32)(literals - g_start);
|
||||
if (g_start==NULL) g_start = literals;
|
||||
@ -943,7 +943,7 @@ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const B
|
||||
printf("Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
|
||||
pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
|
||||
#endif
|
||||
ZSTD_statsUpdatePrices(&seqStorePtr->stats, litLength, literals, offsetCode, matchCode);
|
||||
ZSTD_statsUpdatePrices(&seqStorePtr->stats, litLength, literals, offsetCode, matchCode); /* debug only */
|
||||
|
||||
/* copy Literals */
|
||||
ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
|
||||
@ -2346,7 +2346,6 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* zc, const void* dict, size_t
|
||||
|
||||
{ size_t const hufHeaderSize = HUF_readCTable(zc->hufTable, 255, dict, dictSize);
|
||||
if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
|
||||
zc->flagStaticTables = 1;
|
||||
dict = (const char*)dict + hufHeaderSize;
|
||||
dictSize -= hufHeaderSize;
|
||||
}
|
||||
@ -2380,6 +2379,7 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* zc, const void* dict, size_t
|
||||
dictSize -= litlengthHeaderSize;
|
||||
}
|
||||
|
||||
zc->flagStaticTables = 1;
|
||||
return (dictSizeStart-dictSize);
|
||||
}
|
||||
|
||||
|
@ -468,12 +468,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
ZSTD_resetSeqStore(seqStorePtr);
|
||||
ZSTD_rescaleFreqs(seqStorePtr);
|
||||
ip += (ip==prefixStart);
|
||||
{ U32 i;
|
||||
U32 const maxRep = (ip-prefixStart);
|
||||
for (i=0; i<ZSTD_REP_INIT; i++) {
|
||||
rep[i]=ctx->rep[i];
|
||||
if (rep[i]>maxRep) rep[i]=0;
|
||||
} }
|
||||
{ U32 i; for (i=0; i<ZSTD_REP_INIT; i++) rep[i]=ctx->rep[i]; }
|
||||
|
||||
ZSTD_LOG_BLOCK("%d: COMPBLOCK_OPT_GENERIC srcSz=%d maxSrch=%d mls=%d sufLen=%d\n", (int)(ip-base), (int)srcSize, maxSearches, mls, sufficient_len);
|
||||
|
||||
@ -488,7 +483,8 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
/* check repCode */
|
||||
{ U32 i;
|
||||
for (i=0; i<ZSTD_REP_NUM; i++) {
|
||||
if ((rep[i]>0) & (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(ip - rep[i], minMatch))) {
|
||||
if ((rep[i]<(U32)(ip-prefixStart))
|
||||
&& (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(ip - rep[i], minMatch))) {
|
||||
mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-rep[i], iend) + minMatch;
|
||||
ZSTD_LOG_PARSER("%d: start try REP rep[%d]=%d mlen=%d\n", (int)(ip-base), i, (int)rep[i], (int)mlen);
|
||||
if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
|
||||
@ -578,7 +574,8 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
|
||||
best_mlen = minMatch;
|
||||
{ U32 i;
|
||||
for (i=0; i<ZSTD_REP_NUM; i++) {
|
||||
if ((rep[i]>0) & (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(inr - opt[cur].rep[i], minMatch))) { /* check rep */
|
||||
if ((rep[i]<(inr-prefixStart))
|
||||
&& (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(inr - opt[cur].rep[i], minMatch))) { /* check rep */
|
||||
mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - opt[cur].rep[i], iend) + minMatch;
|
||||
ZSTD_LOG_PARSER("%d: Found REP %d/%d mlen=%d off=%d rep=%d opt[%d].off=%d\n", (int)(inr-base), i, ZSTD_REP_NUM, mlen, i, opt[cur].rep[i], cur, opt[cur].off);
|
||||
|
||||
@ -718,14 +715,10 @@ _storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
|
||||
} } /* for (cur=0; cur < last_pos; ) */
|
||||
|
||||
/* Save reps for next block */
|
||||
{ int i;
|
||||
for (i=0; i<ZSTD_REP_NUM; i++) {
|
||||
if (!rep[i]) rep[i] = (U32)(iend-base); /* in case some zero are left */
|
||||
ctx->savedRep[i] = rep[i];
|
||||
} }
|
||||
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->savedRep[i] = rep[i]; }
|
||||
|
||||
/* Last Literals */
|
||||
{ size_t lastLLSize = iend - anchor;
|
||||
{ size_t const lastLLSize = iend - anchor;
|
||||
ZSTD_LOG_ENCODE("%d: lastLLSize literals=%u\n", (int)(ip-base), (U32)lastLLSize);
|
||||
memcpy(seqStorePtr->lit, anchor, lastLLSize);
|
||||
seqStorePtr->lit += lastLLSize;
|
||||
|
Loading…
Reference in New Issue
Block a user