lz4: Revert "lz4: remove unused functions"

- This reverts commit 9f4c0c41c6d2 as EROFS uses LZ4_decompress_safe_partial().

Signed-off-by: Cyber Knight <cyberknight755@gmail.com>
Signed-off-by: azrim <mirzaspc@gmail.com>
This commit is contained in:
John Galt 2022-05-03 16:15:19 -04:00 committed by azrim
parent 90e7e976f1
commit 607eff5412
No known key found for this signature in database
GPG Key ID: 497F8FB059B45D1C
4 changed files with 1141 additions and 6 deletions

View File

@ -133,6 +133,23 @@ typedef union {
LZ4HC_CCtx_internal internal_donotuse;
} LZ4_streamHC_t;
/*
* LZ4_streamDecode_t - information structure to track an
* LZ4 stream during decompression.
*
* init this structure using LZ4_setStreamDecode (or memset()) before first use
*/
typedef struct {
const uint8_t *externalDict;
size_t extDictSize;
const uint8_t *prefixEnd;
size_t prefixSize;
} LZ4_streamDecode_t_internal;
typedef union {
unsigned long long table[LZ4_STREAMDECODESIZE_U64];
LZ4_streamDecode_t_internal internal_donotuse;
} LZ4_streamDecode_t;
/*-************************************************************************
* SIZE OF STATE
**************************************************************************/
@ -180,6 +197,54 @@ static inline int LZ4_compressBound(size_t isize)
int LZ4_compress_default(const char *source, char *dest, int inputSize,
int maxOutputSize, void *wrkmem);
/**
* LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param
* @source: source address of the original data
* @dest: output buffer address of the compressed data
* @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
* @maxOutputSize: full or partial size of buffer 'dest'
* which must be already allocated
* @acceleration: acceleration factor
* @wrkmem: address of the working memory.
* This requires 'workmem' of LZ4_MEM_COMPRESS.
*
* Same as LZ4_compress_default(), but allows to select an "acceleration"
* factor. The larger the acceleration value, the faster the algorithm,
* but also the lesser the compression. It's a trade-off. It can be fine tuned,
* with each successive value providing roughly +~3% to speed.
* An acceleration value of "1" is the same as regular LZ4_compress_default()
* Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1.
*
* Return: Number of bytes written into buffer 'dest'
* (necessarily <= maxOutputSize) or 0 if compression fails
*/
static int LZ4_compress_fast(const char *source, char *dest, int inputSize,
int maxOutputSize, int acceleration, void *wrkmem);
/**
* LZ4_compress_destSize() - Compress as much data as possible
* from source to dest
* @source: source address of the original data
* @dest: output buffer address of the compressed data
* @sourceSizePtr: will be modified to indicate how many bytes where read
* from 'source' to fill 'dest'. New value is necessarily <= old value.
* @targetDestSize: Size of buffer 'dest' which must be already allocated
* @wrkmem: address of the working memory.
* This requires 'workmem' of LZ4_MEM_COMPRESS.
*
* Reverse the logic, by compressing as much data as possible
* from 'source' buffer into already allocated buffer 'dest'
* of size 'targetDestSize'.
* This function either compresses the entire 'source' content into 'dest'
* if it's large enough, or fill 'dest' buffer completely with as much data as
* possible from 'source'.
*
* Return: Number of bytes written into 'dest' (necessarily <= targetDestSize)
* or 0 if compression fails
*/
static int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr,
int targetDestSize, void *wrkmem);
/*-************************************************************************
* Decompression Functions
**************************************************************************/
@ -227,6 +292,34 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
int maxDecompressedSize);
/**
* LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize'
* at position 'source' into buffer 'dest'
* @source: source address of the compressed data
* @dest: output buffer address of the decompressed data which must be
* already allocated
* @compressedSize: is the precise full size of the compressed block.
* @targetOutputSize: the decompression operation will try
* to stop as soon as 'targetOutputSize' has been reached
* @maxDecompressedSize: is the size of destination buffer
*
* This function decompresses a compressed block of size 'compressedSize'
* at position 'source' into destination buffer 'dest'
* of size 'maxDecompressedSize'.
* The function tries to stop decompressing operation as soon as
* 'targetOutputSize' has been reached, reducing decompression time.
* This function never writes outside of output buffer,
* and never reads outside of input buffer.
* It is therefore protected against malicious data packets.
*
* Return: the number of bytes decoded in the destination buffer
* (necessarily <= maxDecompressedSize)
* or a negative result in case of error
*
*/
static int LZ4_decompress_safe_partial(const char *source, char *dest,
int compressedSize, int targetOutputSize, int maxDecompressedSize);
/*-************************************************************************
* LZ4 HC Compression
**************************************************************************/
@ -253,4 +346,303 @@ int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity,
int compressionLevel, void *wrkmem);
/**
* LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure
* @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure
* @compressionLevel: Recommended values are between 4 and 9, although any
* value between 1 and LZ4HC_MAX_CLEVEL will work.
* Values >LZ4HC_MAX_CLEVEL behave the same as 16.
*
* An LZ4_streamHC_t structure can be allocated once
* and re-used multiple times.
* Use this function to init an allocated `LZ4_streamHC_t` structure
* and start a new compression.
*/
static void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel);
/**
* LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC
* @streamHCPtr: pointer to the LZ4HC_stream_t
* @dictionary: dictionary to load
* @dictSize: size of dictionary
*
* Use this function to load a static dictionary into LZ4HC_stream.
* Any previous data will be forgotten, only 'dictionary'
* will remain in memory.
* Loading a size of 0 is allowed.
*
* Return : dictionary size, in bytes (necessarily <= 64 KB)
*/
static int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary,
int dictSize);
/**
* LZ4_compress_HC_continue() - Compress 'src' using data from previously
* compressed blocks as a dictionary using the HC algorithm
* @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure
* @src: source address of the original data
* @dst: output buffer address of the compressed data,
* which must be already allocated
* @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
* @maxDstSize: full or partial size of buffer 'dest'
* which must be already allocated
*
* These functions compress data in successive blocks of any size, using
* previous blocks as dictionary. One key assumption is that previous
* blocks (up to 64 KB) remain read-accessible while
* compressing next blocks. There is an exception for ring buffers,
* which can be smaller than 64 KB.
* Ring buffers scenario is automatically detected and handled by
* LZ4_compress_HC_continue().
* Before starting compression, state must be properly initialized,
* using LZ4_resetStreamHC().
* A first "fictional block" can then be designated as
* initial dictionary, using LZ4_loadDictHC() (Optional).
* Then, use LZ4_compress_HC_continue()
* to compress each successive block. Previous memory blocks
* (including initial dictionary when present) must remain accessible
* and unmodified during compression.
* 'dst' buffer should be sized to handle worst case scenarios, using
* LZ4_compressBound(), to ensure operation success.
* If, for any reason, previous data blocks can't be preserved unmodified
* in memory during next compression block,
* you must save it to a safer memory space, using LZ4_saveDictHC().
* Return value of LZ4_saveDictHC() is the size of dictionary
* effectively saved into 'safeBuffer'.
*
* Return: Number of bytes written into buffer 'dst' or 0 if compression fails
*/
static int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src,
char *dst, int srcSize, int maxDstSize);
/**
* LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream
* @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure
* @safeBuffer: buffer to save dictionary to, must be already allocated
* @maxDictSize: size of 'safeBuffer'
*
* If previously compressed data block is not guaranteed
* to remain available at its memory location,
* save it into a safer place (char *safeBuffer).
* Note : you don't need to call LZ4_loadDictHC() afterwards,
* dictionary is immediately usable, you can therefore call
* LZ4_compress_HC_continue().
*
* Return : saved dictionary size in bytes (necessarily <= maxDictSize),
* or 0 if error.
*/
static int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer,
int maxDictSize);
/*-*********************************************
* Streaming Compression Functions
***********************************************/
/**
* LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure
* @LZ4_stream: pointer to the 'LZ4_stream_t' structure
*
* An LZ4_stream_t structure can be allocated once
* and re-used multiple times.
* Use this function to init an allocated `LZ4_stream_t` structure
* and start a new compression.
*/
static __always_inline void LZ4_resetStream(LZ4_stream_t *LZ4_stream);
/**
* LZ4_loadDict() - Load a static dictionary into LZ4_stream
* @streamPtr: pointer to the LZ4_stream_t
* @dictionary: dictionary to load
* @dictSize: size of dictionary
*
* Use this function to load a static dictionary into LZ4_stream.
* Any previous data will be forgotten, only 'dictionary'
* will remain in memory.
* Loading a size of 0 is allowed.
*
* Return : dictionary size, in bytes (necessarily <= 64 KB)
*/
int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary,
int dictSize);
/**
* LZ4_saveDict() - Save static dictionary from LZ4_stream
* @streamPtr: pointer to the 'LZ4_stream_t' structure
* @safeBuffer: buffer to save dictionary to, must be already allocated
* @dictSize: size of 'safeBuffer'
*
* If previously compressed data block is not guaranteed
* to remain available at its memory location,
* save it into a safer place (char *safeBuffer).
* Note : you don't need to call LZ4_loadDict() afterwards,
* dictionary is immediately usable, you can therefore call
* LZ4_compress_fast_continue().
*
* Return : saved dictionary size in bytes (necessarily <= dictSize),
* or 0 if error.
*/
int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize);
/**
* LZ4_compress_fast_continue() - Compress 'src' using data from previously
* compressed blocks as a dictionary
* @streamPtr: Pointer to the previous 'LZ4_stream_t' structure
* @src: source address of the original data
* @dst: output buffer address of the compressed data,
* which must be already allocated
* @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
* @maxDstSize: full or partial size of buffer 'dest'
* which must be already allocated
* @acceleration: acceleration factor
*
* Compress buffer content 'src', using data from previously compressed blocks
* as dictionary to improve compression ratio.
* Important : Previous data blocks are assumed to still
* be present and unmodified !
* If maxDstSize >= LZ4_compressBound(srcSize),
* compression is guaranteed to succeed, and runs faster.
*
* Return: Number of bytes written into buffer 'dst' or 0 if compression fails
*/
static int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src,
char *dst, int srcSize, int maxDstSize, int acceleration);
/**
* LZ4_setStreamDecode() - Instruct where to find dictionary
* @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
* @dictionary: dictionary to use
* @dictSize: size of dictionary
*
* Use this function to instruct where to find the dictionary.
* Setting a size of 0 is allowed (same effect as reset).
*
* Return: 1 if OK, 0 if error
*/
static int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize);
/**
* LZ4_decompress_safe_continue() - Decompress blocks in streaming mode
* @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
* @source: source address of the compressed data
* @dest: output buffer address of the uncompressed data
* which must be already allocated
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
*
* This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
* In the case of a ring buffers, decoding buffer must be either :
* - Exactly same size as encoding buffer, with same update rule
* (block boundaries at same positions) In which case,
* the decoding & encoding ring buffer can have any size,
* including very small ones ( < 64 KB).
* - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
* maxBlockSize is implementation dependent.
* It's the maximum size you intend to compress into a single block.
* In which case, encoding and decoding buffers do not need
* to be synchronized, and encoding ring buffer can have any size,
* including small ones ( < 64 KB).
* - _At least_ 64 KB + 8 bytes + maxBlockSize.
* In which case, encoding and decoding buffers do not need to be
* synchronized, and encoding ring buffer can have any size,
* including larger than decoding buffer. W
* Whenever these conditions are not possible, save the last 64KB of decoded
* data into a safe buffer, and indicate where it is saved
* using LZ4_setStreamDecode()
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
* or a negative result in case of error
*/
static int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int compressedSize,
int maxDecompressedSize);
/**
* LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
* @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
* @source: source address of the compressed data
* @dest: output buffer address of the uncompressed data
* which must be already allocated with 'originalSize' bytes
* @originalSize: is the original and therefore uncompressed size
*
* This decoding function allows decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks *must* remain available at the memory position
* where they were decoded (up to 64 KB)
* In the case of a ring buffers, decoding buffer must be either :
* - Exactly same size as encoding buffer, with same update rule
* (block boundaries at same positions) In which case,
* the decoding & encoding ring buffer can have any size,
* including very small ones ( < 64 KB).
* - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
* maxBlockSize is implementation dependent.
* It's the maximum size you intend to compress into a single block.
* In which case, encoding and decoding buffers do not need
* to be synchronized, and encoding ring buffer can have any size,
* including small ones ( < 64 KB).
* - _At least_ 64 KB + 8 bytes + maxBlockSize.
* In which case, encoding and decoding buffers do not need to be
* synchronized, and encoding ring buffer can have any size,
* including larger than decoding buffer. W
* Whenever these conditions are not possible, save the last 64KB of decoded
* data into a safe buffer, and indicate where it is saved
* using LZ4_setStreamDecode()
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
* or a negative result in case of error
*/
static int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int originalSize);
/**
* LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode()
* followed by LZ4_decompress_safe_continue()
* @source: source address of the compressed data
* @dest: output buffer address of the uncompressed data
* which must be already allocated
* @compressedSize: is the precise full size of the compressed block
* @maxDecompressedSize: is the size of 'dest' buffer
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
* This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
* LZ4_decompress_safe_continue()
* It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
* or a negative result in case of error
*/
static int LZ4_decompress_safe_usingDict(const char *source, char *dest,
int compressedSize, int maxDecompressedSize, const char *dictStart,
int dictSize);
/**
* LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode()
* followed by LZ4_decompress_fast_continue()
* @source: source address of the compressed data
* @dest: output buffer address of the uncompressed data
* which must be already allocated with 'originalSize' bytes
* @originalSize: is the original and therefore uncompressed size
* @dictStart: pointer to the start of the dictionary in memory
* @dictSize: size of dictionary
*
* This decoding function works the same as
* a combination of LZ4_setStreamDecode() followed by
* LZ4_decompress_fast_continue()
* It is stand-alone, and doesn't need an LZ4_streamDecode_t structure.
*
* Return: number of bytes decompressed into destination buffer
* (necessarily <= maxDecompressedSize)
* or a negative result in case of error
*/
static int LZ4_decompress_fast_usingDict(const char *source, char *dest,
int originalSize, const char *dictStart, int dictSize);
#endif

View File

@ -189,6 +189,9 @@ static FORCE_INLINE int LZ4_compress_generic(
const BYTE *base;
const BYTE *lowLimit;
const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
const BYTE * const dictionary = dictPtr->dictionary;
const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
const size_t dictDelta = dictEnd - (const BYTE *)source;
const BYTE *anchor = (const BYTE *) source;
const BYTE * const iend = ip + inputSize;
const BYTE * const mflimit = iend - MFLIMIT;
@ -216,6 +219,10 @@ static FORCE_INLINE int LZ4_compress_generic(
base = (const BYTE *)source - dictPtr->currentOffset;
lowLimit = (const BYTE *)source - dictPtr->dictSize;
break;
case usingExtDict:
base = (const BYTE *)source - dictPtr->currentOffset;
lowLimit = (const BYTE *)source;
break;
}
if ((tableType == byU16)
@ -259,6 +266,15 @@ static FORCE_INLINE int LZ4_compress_generic(
dictPtr->hashTable,
tableType, base);
if (dict == usingExtDict) {
if (match < (const BYTE *)source) {
refDelta = dictDelta;
lowLimit = dictionary;
} else {
refDelta = 0;
lowLimit = (const BYTE *)source;
} }
forwardH = LZ4_hashPosition(forwardIp,
tableType);
@ -319,9 +335,34 @@ _next_match:
{
unsigned int matchCode;
if ((dict == usingExtDict)
&& (lowLimit == dictionary)) {
const BYTE *limit;
match += refDelta;
limit = ip + (dictEnd - match);
if (limit > matchlimit)
limit = matchlimit;
matchCode = LZ4_count(ip + MINMATCH,
match + MINMATCH, limit);
ip += MINMATCH + matchCode;
if (ip == limit) {
unsigned const int more = LZ4_count(ip,
(const BYTE *)source,
matchlimit);
matchCode += more;
ip += more;
}
} else {
matchCode = LZ4_count(ip + MINMATCH,
match + MINMATCH, matchlimit);
ip += MINMATCH + matchCode;
}
if (outputLimited &&
/* Check output buffer overflow */
@ -360,6 +401,16 @@ _next_match:
match = LZ4_getPosition(ip, dictPtr->hashTable,
tableType, base);
if (dict == usingExtDict) {
if (match < (const BYTE *)source) {
refDelta = dictDelta;
lowLimit = dictionary;
} else {
refDelta = 0;
lowLimit = (const BYTE *)source;
}
}
LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
@ -419,7 +470,7 @@ static int LZ4_compress_fast_extState(
const tableType_t tableType = byPtr;
#endif
memset(state, 0, sizeof(LZ4_stream_t));
LZ4_resetStream((LZ4_stream_t *)state);
if (acceleration < 1)
acceleration = LZ4_ACCELERATION_DEFAULT;
@ -464,5 +515,423 @@ int LZ4_compress_default(const char *source, char *dest, int inputSize,
}
EXPORT_SYMBOL(LZ4_compress_default);
/*-******************************
* *_destSize() variant
********************************/
static int LZ4_compress_destSize_generic(
LZ4_stream_t_internal * const ctx,
const char * const src,
char * const dst,
int * const srcSizePtr,
const int targetDstSize,
const tableType_t tableType)
{
const BYTE *ip = (const BYTE *) src;
const BYTE *base = (const BYTE *) src;
const BYTE *lowLimit = (const BYTE *) src;
const BYTE *anchor = ip;
const BYTE * const iend = ip + *srcSizePtr;
const BYTE * const mflimit = iend - MFLIMIT;
const BYTE * const matchlimit = iend - LASTLITERALS;
BYTE *op = (BYTE *) dst;
BYTE * const oend = op + targetDstSize;
BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
- 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
BYTE * const oMaxMatch = op + targetDstSize
- (LASTLITERALS + 1 /* token */);
BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
U32 forwardH;
/* Init conditions */
/* Impossible to store anything */
if (targetDstSize < 1)
return 0;
/* Unsupported input size, too large (or negative) */
if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
return 0;
/* Size too large (not within 64K limit) */
if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
return 0;
/* Input too small, no compression (all literals) */
if (*srcSizePtr < LZ4_minLength)
goto _last_literals;
/* First Byte */
*srcSizePtr = 0;
LZ4_putPosition(ip, ctx->hashTable, tableType, base);
ip++; forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
for ( ; ; ) {
const BYTE *match;
BYTE *token;
/* Find a match */
{
const BYTE *forwardIp = ip;
unsigned int step = 1;
unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
do {
U32 h = forwardH;
ip = forwardIp;
forwardIp += step;
step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
if (unlikely(forwardIp > mflimit))
goto _last_literals;
match = LZ4_getPositionOnHash(h, ctx->hashTable,
tableType, base);
forwardH = LZ4_hashPosition(forwardIp,
tableType);
LZ4_putPositionOnHash(ip, h,
ctx->hashTable, tableType,
base);
} while (((tableType == byU16)
? 0
: (match + MAX_DISTANCE < ip))
|| (LZ4_read32(match) != LZ4_read32(ip)));
}
/* Catch up */
while ((ip > anchor)
&& (match > lowLimit)
&& (unlikely(ip[-1] == match[-1]))) {
ip--;
match--;
}
/* Encode Literal length */
{
unsigned int litLength = (unsigned int)(ip - anchor);
token = op++;
if (op + ((litLength + 240) / 255)
+ litLength > oMaxLit) {
/* Not enough space for a last match */
op--;
goto _last_literals;
}
if (litLength >= RUN_MASK) {
unsigned int len = litLength - RUN_MASK;
*token = (RUN_MASK<<ML_BITS);
for (; len >= 255; len -= 255)
*op++ = 255;
*op++ = (BYTE)len;
} else
*token = (BYTE)(litLength << ML_BITS);
/* Copy Literals */
LZ4_wildCopy(op, anchor, op + litLength);
op += litLength;
}
_next_match:
/* Encode Offset */
LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
/* Encode MatchLength */
{
size_t matchLength = LZ4_count(ip + MINMATCH,
match + MINMATCH, matchlimit);
if (op + ((matchLength + 240)/255) > oMaxMatch) {
/* Match description too long : reduce it */
matchLength = (15 - 1) + (oMaxMatch - op) * 255;
}
ip += MINMATCH + matchLength;
if (matchLength >= ML_MASK) {
*token += ML_MASK;
matchLength -= ML_MASK;
while (matchLength >= 255) {
matchLength -= 255;
*op++ = 255;
}
*op++ = (BYTE)matchLength;
} else
*token += (BYTE)(matchLength);
}
anchor = ip;
/* Test end of block */
if (ip > mflimit)
break;
if (op > oMaxSeq)
break;
/* Fill table */
LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
/* Test next position */
match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
LZ4_putPosition(ip, ctx->hashTable, tableType, base);
if ((match + MAX_DISTANCE >= ip)
&& (LZ4_read32(match) == LZ4_read32(ip))) {
token = op++; *token = 0;
goto _next_match;
}
/* Prepare next loop */
forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
/* Encode Last Literals */
{
size_t lastRunSize = (size_t)(iend - anchor);
if (op + 1 /* token */
+ ((lastRunSize + 240) / 255) /* litLength */
+ lastRunSize /* literals */ > oend) {
/* adapt lastRunSize to fill 'dst' */
lastRunSize = (oend - op) - 1;
lastRunSize -= (lastRunSize + 240) / 255;
}
ip = anchor + lastRunSize;
if (lastRunSize >= RUN_MASK) {
size_t accumulator = lastRunSize - RUN_MASK;
*op++ = RUN_MASK << ML_BITS;
for (; accumulator >= 255; accumulator -= 255)
*op++ = 255;
*op++ = (BYTE) accumulator;
} else {
*op++ = (BYTE)(lastRunSize<<ML_BITS);
}
memcpy(op, anchor, lastRunSize);
op += lastRunSize;
}
/* End */
*srcSizePtr = (int) (((const char *)ip) - src);
return (int) (((char *)op) - dst);
}
static int LZ4_compress_destSize_extState(
LZ4_stream_t *state,
const char *src,
char *dst,
int *srcSizePtr,
int targetDstSize)
{
#if LZ4_ARCH64
const tableType_t tableType = byU32;
#else
const tableType_t tableType = byPtr;
#endif
LZ4_resetStream(state);
if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
/* compression success is guaranteed */
return LZ4_compress_fast_extState(
state, src, dst, *srcSizePtr,
targetDstSize, 1);
} else {
if (*srcSizePtr < LZ4_64Klimit)
return LZ4_compress_destSize_generic(
&state->internal_donotuse,
src, dst, srcSizePtr,
targetDstSize, byU16);
else
return LZ4_compress_destSize_generic(
&state->internal_donotuse,
src, dst, srcSizePtr,
targetDstSize, tableType);
}
}
static int LZ4_compress_destSize(
const char *src,
char *dst,
int *srcSizePtr,
int targetDstSize,
void *wrkmem)
{
return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
targetDstSize);
}
/*-******************************
* Streaming functions
********************************/
static FORCE_INLINE void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
{
memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
}
int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
const char *dictionary, int dictSize)
{
LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
const BYTE *p = (const BYTE *)dictionary;
const BYTE * const dictEnd = p + dictSize;
const BYTE *base;
if ((dict->initCheck)
|| (dict->currentOffset > 1 * GB)) {
/* Uninitialized structure, or reuse overflow */
LZ4_resetStream(LZ4_dict);
}
if (dictSize < (int)HASH_UNIT) {
dict->dictionary = NULL;
dict->dictSize = 0;
return 0;
}
if ((dictEnd - p) > 64 * KB)
p = dictEnd - 64 * KB;
dict->currentOffset += 64 * KB;
base = p - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
dict->currentOffset += dict->dictSize;
while (p <= dictEnd - HASH_UNIT) {
LZ4_putPosition(p, dict->hashTable, byU32, base);
p += 3;
}
return dict->dictSize;
}
EXPORT_SYMBOL(LZ4_loadDict);
static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
const BYTE *src)
{
if ((LZ4_dict->currentOffset > 0x80000000) ||
((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
/* address space overflow */
/* rescale hash table */
U32 const delta = LZ4_dict->currentOffset - 64 * KB;
const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
int i;
for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
if (LZ4_dict->hashTable[i] < delta)
LZ4_dict->hashTable[i] = 0;
else
LZ4_dict->hashTable[i] -= delta;
}
LZ4_dict->currentOffset = 64 * KB;
if (LZ4_dict->dictSize > 64 * KB)
LZ4_dict->dictSize = 64 * KB;
LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
}
}
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
{
LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
if ((U32)dictSize > 64 * KB) {
/* useless to define a dictionary > 64 * KB */
dictSize = 64 * KB;
}
if ((U32)dictSize > dict->dictSize)
dictSize = dict->dictSize;
memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
dict->dictionary = (const BYTE *)safeBuffer;
dict->dictSize = (U32)dictSize;
return dictSize;
}
EXPORT_SYMBOL(LZ4_saveDict);
static int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
char *dest, int inputSize, int maxOutputSize, int acceleration)
{
LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
const BYTE * const dictEnd = streamPtr->dictionary
+ streamPtr->dictSize;
const BYTE *smallest = (const BYTE *) source;
if (streamPtr->initCheck) {
/* Uninitialized structure detected */
return 0;
}
if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
smallest = dictEnd;
LZ4_renormDictT(streamPtr, smallest);
if (acceleration < 1)
acceleration = LZ4_ACCELERATION_DEFAULT;
/* Check overlapping input/dictionary space */
{
const BYTE *sourceEnd = (const BYTE *) source + inputSize;
if ((sourceEnd > streamPtr->dictionary)
&& (sourceEnd < dictEnd)) {
streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
if (streamPtr->dictSize > 64 * KB)
streamPtr->dictSize = 64 * KB;
if (streamPtr->dictSize < 4)
streamPtr->dictSize = 0;
streamPtr->dictionary = dictEnd - streamPtr->dictSize;
}
}
/* prefix mode : source data follows dictionary */
if (dictEnd == (const BYTE *)source) {
int result;
if ((streamPtr->dictSize < 64 * KB) &&
(streamPtr->dictSize < streamPtr->currentOffset)) {
result = LZ4_compress_generic(
streamPtr, source, dest, inputSize,
maxOutputSize, limitedOutput, byU32,
withPrefix64k, dictSmall, acceleration);
} else {
result = LZ4_compress_generic(
streamPtr, source, dest, inputSize,
maxOutputSize, limitedOutput, byU32,
withPrefix64k, noDictIssue, acceleration);
}
streamPtr->dictSize += (U32)inputSize;
streamPtr->currentOffset += (U32)inputSize;
return result;
}
/* external dictionary mode */
{
int result;
if ((streamPtr->dictSize < 64 * KB) &&
(streamPtr->dictSize < streamPtr->currentOffset)) {
result = LZ4_compress_generic(
streamPtr, source, dest, inputSize,
maxOutputSize, limitedOutput, byU32,
usingExtDict, dictSmall, acceleration);
} else {
result = LZ4_compress_generic(
streamPtr, source, dest, inputSize,
maxOutputSize, limitedOutput, byU32,
usingExtDict, noDictIssue, acceleration);
}
streamPtr->dictionary = (const BYTE *)source;
streamPtr->dictSize = (U32)inputSize;
streamPtr->currentOffset += (U32)inputSize;
return result;
}
}
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 compressor");

View File

@ -70,7 +70,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
endCondition_directive endOnInput,
/* full, partial */
earlyEnd_directive partialDecoding,
/* noDict, withPrefix64k */
/* noDict, withPrefix64k, usingExtDict */
dict_directive dict,
/* always <= dst, == dst when no prefix */
const BYTE * const lowPrefix,
@ -87,6 +87,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
BYTE * const oend = op + outputSize;
BYTE *cpy;
const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
@ -328,6 +329,48 @@ _copy_match:
length += MINMATCH;
/* match starting within external dictionary */
if ((dict == usingExtDict) && (match < lowPrefix)) {
if (unlikely(op + length > oend - LASTLITERALS)) {
/* doesn't respect parsing restriction */
if (!partialDecoding)
goto _output_error;
length = min(length, (size_t)(oend - op));
}
if (length <= (size_t)(lowPrefix - match)) {
/*
* match fits entirely within external
* dictionary : just copy
*/
memmove(op, dictEnd - (lowPrefix - match),
length);
op += length;
} else {
/*
* match stretches into both external
* dictionary and current block
*/
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
memcpy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) {
/* overlap copy */
BYTE * const endOfMatch = op + restSize;
const BYTE *copyFrom = lowPrefix;
while (op < endOfMatch)
*op++ = *copyFrom++;
} else {
memcpy(op, lowPrefix, restSize);
op += restSize;
}
}
continue;
}
/* copy match within block */
cpy = op + length;
@ -419,6 +462,15 @@ int LZ4_decompress_safe(const char *source, char *dest,
noDict, (BYTE *)dest, NULL, 0);
}
static int LZ4_decompress_safe_partial(const char *src, char *dst,
int compressedSize, int targetOutputSize, int dstCapacity)
{
dstCapacity = min(targetOutputSize, dstCapacity);
return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
endOnInputSize, partial_decode,
noDict, (BYTE *)dst, NULL, 0);
}
int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
{
return LZ4_decompress_generic(source, dest, 0, originalSize,
@ -427,6 +479,228 @@ int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
(BYTE *)dest - 64 * KB, NULL, 0);
}
/* ===== Instantiate a few more decoding cases, used more than once. ===== */
static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
int compressedSize, int maxOutputSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
endOnInputSize, decode_full_block,
withPrefix64k,
(BYTE *)dest - 64 * KB, NULL, 0);
}
static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
int compressedSize,
int maxOutputSize,
size_t prefixSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
endOnInputSize, decode_full_block,
noDict,
(BYTE *)dest - prefixSize, NULL, 0);
}
static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
int compressedSize, int maxOutputSize,
const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
endOnInputSize, decode_full_block,
usingExtDict, (BYTE *)dest,
(const BYTE *)dictStart, dictSize);
}
static int LZ4_decompress_fast_extDict(const char *source, char *dest,
int originalSize,
const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
0, originalSize,
endOnOutputSize, decode_full_block,
usingExtDict, (BYTE *)dest,
(const BYTE *)dictStart, dictSize);
}
/*
* The "double dictionary" mode, for use with e.g. ring buffers: the first part
* of the dictionary is passed as prefix, and the second via dictStart + dictSize.
* These routines are used only once, in LZ4_decompress_*_continue().
*/
static FORCE_INLINE
int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
int compressedSize, int maxOutputSize,
size_t prefixSize,
const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
endOnInputSize, decode_full_block,
usingExtDict, (BYTE *)dest - prefixSize,
(const BYTE *)dictStart, dictSize);
}
static FORCE_INLINE
int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
int originalSize, size_t prefixSize,
const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
0, originalSize,
endOnOutputSize, decode_full_block,
usingExtDict, (BYTE *)dest - prefixSize,
(const BYTE *)dictStart, dictSize);
}
/* ===== streaming decompression functions ===== */
static int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize)
{
LZ4_streamDecode_t_internal *lz4sd =
&LZ4_streamDecode->internal_donotuse;
lz4sd->prefixSize = (size_t) dictSize;
lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
lz4sd->externalDict = NULL;
lz4sd->extDictSize = 0;
return 1;
}
/*
* *_continue() :
* These decoding functions allow decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks must still be available at the memory
* position where they were decoded.
* If it's not possible, save the relevant part of
* decoded data into a safe buffer,
* and indicate where it stands using LZ4_setStreamDecode()
*/
static int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int compressedSize, int maxOutputSize)
{
LZ4_streamDecode_t_internal *lz4sd =
&LZ4_streamDecode->internal_donotuse;
int result;
if (lz4sd->prefixSize == 0) {
/* The first call, no dictionary yet. */
assert(lz4sd->extDictSize == 0);
result = LZ4_decompress_safe(source, dest,
compressedSize, maxOutputSize);
if (result <= 0)
return result;
lz4sd->prefixSize = result;
lz4sd->prefixEnd = (BYTE *)dest + result;
} else if (lz4sd->prefixEnd == (BYTE *)dest) {
/* They're rolling the current segment. */
if (lz4sd->prefixSize >= 64 * KB - 1)
result = LZ4_decompress_safe_withPrefix64k(source, dest,
compressedSize, maxOutputSize);
else if (lz4sd->extDictSize == 0)
result = LZ4_decompress_safe_withSmallPrefix(source,
dest, compressedSize, maxOutputSize,
lz4sd->prefixSize);
else
result = LZ4_decompress_safe_doubleDict(source, dest,
compressedSize, maxOutputSize,
lz4sd->prefixSize,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize += result;
lz4sd->prefixEnd += result;
} else {
/*
* The buffer wraps around, or they're
* switching to another buffer.
*/
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
result = LZ4_decompress_safe_forceExtDict(source, dest,
compressedSize, maxOutputSize,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = result;
lz4sd->prefixEnd = (BYTE *)dest + result;
}
return result;
}
static int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int originalSize)
{
LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
if (lz4sd->prefixSize == 0) {
assert(lz4sd->extDictSize == 0);
result = LZ4_decompress_fast(source, dest, originalSize);
if (result <= 0)
return result;
lz4sd->prefixSize = originalSize;
lz4sd->prefixEnd = (BYTE *)dest + originalSize;
} else if (lz4sd->prefixEnd == (BYTE *)dest) {
if (lz4sd->prefixSize >= 64 * KB - 1 ||
lz4sd->extDictSize == 0)
result = LZ4_decompress_fast(source, dest,
originalSize);
else
result = LZ4_decompress_fast_doubleDict(source, dest,
originalSize, lz4sd->prefixSize,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize += originalSize;
lz4sd->prefixEnd += originalSize;
} else {
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
result = LZ4_decompress_fast_extDict(source, dest,
originalSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = originalSize;
lz4sd->prefixEnd = (BYTE *)dest + originalSize;
}
return result;
}
static int LZ4_decompress_safe_usingDict(const char *source, char *dest,
int compressedSize, int maxOutputSize,
const char *dictStart, int dictSize)
{
if (dictSize == 0)
return LZ4_decompress_safe(source, dest,
compressedSize, maxOutputSize);
if (dictStart+dictSize == dest) {
if (dictSize >= 64 * KB - 1)
return LZ4_decompress_safe_withPrefix64k(source, dest,
compressedSize, maxOutputSize);
return LZ4_decompress_safe_withSmallPrefix(source, dest,
compressedSize, maxOutputSize, dictSize);
}
return LZ4_decompress_safe_forceExtDict(source, dest,
compressedSize, maxOutputSize, dictStart, dictSize);
}
static int LZ4_decompress_fast_usingDict(const char *source, char *dest,
int originalSize,
const char *dictStart, int dictSize)
{
if (dictSize == 0 || dictStart + dictSize == dest)
return LZ4_decompress_fast(source, dest, originalSize);
return LZ4_decompress_fast_extDict(source, dest, originalSize,
dictStart, dictSize);
}
#ifndef STATIC
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 decompressor");

View File

@ -234,7 +234,7 @@ static FORCE_INLINE unsigned int LZ4_count(
typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
typedef enum { byPtr, byU32, byU16 } tableType_t;
typedef enum { noDict = 0, withPrefix64k } dict_directive;
typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;