Merge pull request #347 from lz4/negativeCLevels

Negative Compression levels
This commit is contained in:
Yann Collet 2017-04-11 14:43:11 -07:00 committed by GitHub
commit 31e9ed612b
5 changed files with 35 additions and 22 deletions

1
NEWS
View File

@ -2,6 +2,7 @@ v1.7.6
cli : fix : do not modify /dev/null permissions, reported by @Maokaman1
cli : added GNU separator -- specifying that all following arguments are files
API : added LZ4_compress_HC_destSize(), by Oleg (@remittor)
API : lz4frame : negative compression levels trigger fast acceleration
API : fix : expose obsolete decoding functions, reported by Chen Yufei
build : dragonFlyBSD, OpenBSD, NetBSD supported
build : LZ4_MEMORY_USAGE can be modified at compile time, through external define

View File

@ -256,19 +256,19 @@ LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, in
/*! LZ4_compress_fast_continue() :
* Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve compression ratio.
* Important : Previous data blocks are assumed to still be present and unmodified !
* Important : Previous data blocks are assumed to remain present and unmodified !
* 'dst' buffer must be already allocated.
* If maxDstSize >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
* If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
* If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function returns a zero.
* After an error, the stream status is invalid, and it can only be reset or freed.
*/
LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int maxDstSize, int acceleration);
LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
/*! LZ4_saveDict() :
* If previously compressed data block is not guaranteed to remain available at its memory location,
* If previously compressed data block is not guaranteed to remain available at its current memory location,
* save it into a safer place (char* safeBuffer).
* Note : you don't need to call LZ4_loadDict() afterwards,
* dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
* Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
* Note : it's not necessary to call LZ4_loadDict() after LZ4_saveDict(), dictionary is immediately usable.
* @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
*/
LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize);

View File

@ -316,11 +316,11 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBu
BYTE* dstPtr = dstStart;
BYTE* const dstEnd = dstStart + dstCapacity;
memset(&cctxI, 0, sizeof(cctxI)); /* works because no allocation */
memset(&cctxI, 0, sizeof(cctxI));
memset(&options, 0, sizeof(options));
cctxI.version = LZ4F_VERSION;
cctxI.maxBufferSize = 5 MB; /* mess with real buffer size to prevent allocation; works because autoflush==1 & stableSrc==1 */
cctxI.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works because autoflush==1 & stableSrc==1 */
if (preferencesPtr!=NULL)
prefs = *preferencesPtr;
@ -332,7 +332,7 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBu
if (prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
cctxI.lz4CtxPtr = &lz4ctx;
cctxI.lz4CtxLevel = 1;
}
} /* otherwise : will be created within LZ4F_compressBegin */
prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
prefs.autoFlush = 1;
@ -341,7 +341,7 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBu
options.stableSrc = 1;
if (dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs))
if (dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs)) /* condition to guarantee success */
return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
{ size_t const headerSize = LZ4F_compressBegin(&cctxI, dstBuffer, dstCapacity, &prefs); /* write header */
@ -356,7 +356,7 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity, const void* srcBu
if (LZ4F_isError(tailSize)) return tailSize;
dstPtr += tailSize; }
if (prefs.compressionLevel >= LZ4HC_CLEVEL_MIN) /* no allocation done with lz4 fast */
if (prefs.compressionLevel >= LZ4HC_CLEVEL_MIN) /* Ctx allocation only for lz4hc */
FREEMEM(cctxI.lz4CtxPtr);
return (dstPtr - dstStart);
@ -423,7 +423,7 @@ size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapacit
if (preferencesPtr == NULL) preferencesPtr = &prefNull;
cctxPtr->prefs = *preferencesPtr;
/* ctx Management */
/* Ctx Management */
{ U32 const tableID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2; /* 0:nothing ; 1:LZ4 table ; 2:HC tables */
if (cctxPtr->lz4CtxLevel < tableID) {
FREEMEM(cctxPtr->lz4CtxPtr);
@ -431,6 +431,7 @@ size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapacit
cctxPtr->lz4CtxPtr = (void*)LZ4_createStream();
else
cctxPtr->lz4CtxPtr = (void*)LZ4_createStreamHC();
if (cctxPtr->lz4CtxPtr == NULL) return err0r(LZ4F_ERROR_allocation_failed);
cctxPtr->lz4CtxLevel = tableID;
}
}
@ -444,10 +445,11 @@ size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapacit
requiredBuffSize = (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) * 64 KB; /* just needs dict */
if (cctxPtr->maxBufferSize < requiredBuffSize) {
cctxPtr->maxBufferSize = requiredBuffSize;
cctxPtr->maxBufferSize = 0;
FREEMEM(cctxPtr->tmpBuff);
cctxPtr->tmpBuff = (BYTE*)ALLOCATOR(requiredBuffSize);
if (cctxPtr->tmpBuff == NULL) return err0r(LZ4F_ERROR_allocation_failed);
cctxPtr->maxBufferSize = requiredBuffSize;
}
cctxPtr->tmpIn = cctxPtr->tmpBuff;
cctxPtr->tmpInSize = 0;
@ -515,14 +517,14 @@ static size_t LZ4F_compressBlock(void* dst, const void* src, size_t srcSize, com
static int LZ4F_localLZ4_compress_limitedOutput_withState(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level)
{
(void) level;
return LZ4_compress_fast_extState(ctx, src, dst, srcSize, dstCapacity, 1);
int const acceleration = (level < -1) ? -level : 1;
return LZ4_compress_fast_extState(ctx, src, dst, srcSize, dstCapacity, acceleration);
}
static int LZ4F_localLZ4_compress_limitedOutput_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level)
{
(void) level;
return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, 1);
int const acceleration = (level < -1) ? -level : 1;
return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
}
static int LZ4F_localLZ4_compressHC_limitedOutput_continue(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level)
@ -617,7 +619,7 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapaci
if (compressOptionsPtr->stableSrc) {
cctxPtr->tmpIn = cctxPtr->tmpBuff;
} else {
int realDictSize = LZ4F_localSaveDict(cctxPtr);
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
if (realDictSize==0) return err0r(LZ4F_ERROR_GENERIC);
cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
}
@ -627,7 +629,7 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr, void* dstBuffer, size_t dstCapaci
if ((cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) /* necessarily LZ4F_blockLinked && lastBlockCompressed==fromTmpBuffer */
&& !(cctxPtr->prefs.autoFlush))
{
int realDictSize = LZ4F_localSaveDict(cctxPtr);
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
}

View File

@ -167,7 +167,7 @@ typedef struct {
* All reserved fields must be set to zero. */
typedef struct {
LZ4F_frameInfo_t frameInfo;
int compressionLevel; /* 0 == default (fast mode); values above LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values below 0 count as 0 */
int compressionLevel; /* 0 == default (fast mode); values above LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values below 0 trigger "fast acceleration", proportional to value */
unsigned autoFlush; /* 1 == always flush (reduce usage of tmp buffer) */
unsigned reserved[4]; /* must be zero for forward compatibility */
} LZ4F_preferences_t;

View File

@ -218,6 +218,16 @@ int basicTests(U32 seed, double compressibility)
/* test one-pass frame compression */
testSize = COMPRESSIBLE_NOISE_LENGTH;
DISPLAYLEVEL(3, "LZ4F_compressFrame, using fast level -3 : ");
{ LZ4F_preferences_t fastCompressPrefs;
memset(&fastCompressPrefs, 0, sizeof(fastCompressPrefs));
fastCompressPrefs.compressionLevel = -3;
cSize = LZ4F_compressFrame(compressedBuffer, LZ4F_compressFrameBound(testSize, NULL), CNBuffer, testSize, &fastCompressPrefs);
if (LZ4F_isError(cSize)) goto _output_error;
DISPLAYLEVEL(3, "Compressed %u bytes into a %u bytes frame \n", (U32)testSize, (U32)cSize);
}
DISPLAYLEVEL(3, "LZ4F_compressFrame, using default preferences : ");
cSize = LZ4F_compressFrame(compressedBuffer, LZ4F_compressFrameBound(testSize, NULL), CNBuffer, testSize, NULL);
if (LZ4F_isError(cSize)) goto _output_error;
@ -618,7 +628,7 @@ int fuzzerTests(U32 seed, unsigned nbTests, unsigned startTest, double compressi
prefs.frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)(FUZ_rand(&randState) & 1);
prefs.frameInfo.contentSize = ((FUZ_rand(&randState) & 0xF) == 1) ? srcSize : 0;
prefs.autoFlush = neverFlush ? 0 : (FUZ_rand(&randState) & 7) == 2;
prefs.compressionLevel = FUZ_rand(&randState) % 5;
prefs.compressionLevel = -5 + (int)(FUZ_rand(&randState) % 11);
if ((FUZ_rand(&randState) & 0xF) == 1) prefsPtr = NULL;
DISPLAYUPDATE(2, "\r%5u ", testNb);