Bug Summary

File:var/lib/jenkins/workspace/firefox-scan-build/mfbt/lz4/lz4frame.c
Warning:line 1579, column 9
Null pointer passed to 2nd parameter expecting 'nonnull'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name lz4frame.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mfbt -fcoverage-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mfbt -resource-dir /usr/lib/llvm-18/lib/clang/18 -include /var/lib/jenkins/workspace/firefox-scan-build/config/gcc_hidden.h -include /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mozilla-config.h -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D DEBUG=1 -D IMPL_MFBT -D LZ4LIB_VISIBILITY= -D MOZ_SUPPORT_LEAKCHECKING -I /var/lib/jenkins/workspace/firefox-scan-build/mfbt -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mfbt -I /var/lib/jenkins/workspace/firefox-scan-build/mfbt/double-conversion -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -internal-isystem /usr/lib/llvm-18/lib/clang/18/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=tautological-type-limit-compare -Wno-range-loop-analysis -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-unknown-warning-option -std=gnu99 -ferror-limit 19 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2024-07-27-022226-2793976-1 -x c /var/lib/jenkins/workspace/firefox-scan-build/mfbt/lz4/lz4frame.c
1/*
2 * LZ4 auto-framing library
3 * Copyright (C) 2011-2016, Yann Collet.
4 *
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
9 * met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
16 * distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * You can contact the author at :
31 * - LZ4 homepage : http://www.lz4.org
32 * - LZ4 source repository : https://github.com/lz4/lz4
33 */
34
35/* LZ4F is a stand-alone API to create LZ4-compressed Frames
36 * in full conformance with specification v1.6.1 .
37 * This library rely upon memory management capabilities (malloc, free)
38 * provided either by <stdlib.h>,
39 * or redirected towards another library of user's choice
40 * (see Memory Routines below).
41 */
42
43
44/*-************************************
45* Compiler Options
46**************************************/
47#include <limits.h>
48#ifdef _MSC_VER /* Visual Studio */
49# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
50#endif
51
52
53/*-************************************
54* Tuning parameters
55**************************************/
56/*
57 * LZ4F_HEAPMODE :
58 * Control how LZ4F_compressFrame allocates the Compression State,
59 * either on stack (0:default, fastest), or in memory heap (1:requires malloc()).
60 */
61#ifndef LZ4F_HEAPMODE0
62# define LZ4F_HEAPMODE0 0
63#endif
64
65
66/*-************************************
67* Library declarations
68**************************************/
69#define LZ4F_STATIC_LINKING_ONLY
70#include "lz4frame.h"
71#define LZ4_STATIC_LINKING_ONLY
72#include "lz4.h"
73#define LZ4_HC_STATIC_LINKING_ONLY
74#include "lz4hc.h"
75#define XXH_STATIC_LINKING_ONLY
76#include "xxhash.h"
77
78
79/*-************************************
80* Memory routines
81**************************************/
82/*
83 * User may redirect invocations of
84 * malloc(), calloc() and free()
85 * towards another library or solution of their choice
86 * by modifying below section.
87**/
88
89#include <string.h> /* memset, memcpy, memmove */
90#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
91# define MEM_INIT(p,v,s)memset((p),(v),(s)) memset((p),(v),(s))
92#endif
93
94#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
95# include <stdlib.h> /* malloc, calloc, free */
96# define ALLOC(s)malloc(s) malloc(s)
97# define ALLOC_AND_ZERO(s)calloc(1,(s)) calloc(1,(s))
98# define FREEMEM(p)free(p) free(p)
99#endif
100
101static void* LZ4F_calloc(size_t s, LZ4F_CustomMem cmem)
102{
103 /* custom calloc defined : use it */
104 if (cmem.customCalloc != NULL((void*)0)) {
105 return cmem.customCalloc(cmem.opaqueState, s);
106 }
107 /* nothing defined : use default <stdlib.h>'s calloc() */
108 if (cmem.customAlloc == NULL((void*)0)) {
109 return ALLOC_AND_ZERO(s)calloc(1,(s));
110 }
111 /* only custom alloc defined : use it, and combine it with memset() */
112 { void* const p = cmem.customAlloc(cmem.opaqueState, s);
113 if (p != NULL((void*)0)) MEM_INIT(p, 0, s)memset((p),(0),(s));
114 return p;
115} }
116
117static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem)
118{
119 /* custom malloc defined : use it */
120 if (cmem.customAlloc != NULL((void*)0)) {
121 return cmem.customAlloc(cmem.opaqueState, s);
122 }
123 /* nothing defined : use default <stdlib.h>'s malloc() */
124 return ALLOC(s)malloc(s);
125}
126
127static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
128{
129 if (p == NULL((void*)0)) return;
130 if (cmem.customFree != NULL((void*)0)) {
131 /* custom allocation defined : use it */
132 cmem.customFree(cmem.opaqueState, p);
133 return;
134 }
135 /* nothing defined : use default <stdlib.h>'s free() */
136 FREEMEM(p)free(p);
137}
138
139
140/*-************************************
141* Debug
142**************************************/
143#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
144# include <assert.h>
145#else
146# ifndef assert
147# define assert(condition)((void)0) ((void)0)
148# endif
149#endif
150
151#define LZ4F_STATIC_ASSERT(c){ enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
152
153#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
154# include <stdio.h>
155static int g_debuglog_enable = 1;
156# define DEBUGLOG(l, ...){} { \
157 if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
158 fprintf(stderr, __FILE__"/var/lib/jenkins/workspace/firefox-scan-build/mfbt/lz4/lz4frame.c" " (%i): ", __LINE__158 ); \
159 fprintf(stderr, __VA_ARGS__); \
160 fprintf(stderr, " \n"); \
161 } }
162#else
163# define DEBUGLOG(l, ...){} {} /* disabled */
164#endif
165
166
167/*-************************************
168* Basic Types
169**************************************/
170#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__199901L) && (__STDC_VERSION__199901L >= 199901L) /* C99 */) )
171# include <stdint.h>
172 typedef uint8_t BYTE;
173 typedef uint16_t U16;
174 typedef uint32_t U32;
175 typedef int32_t S32;
176 typedef uint64_t U64;
177#else
178 typedef unsigned char BYTE;
179 typedef unsigned short U16;
180 typedef unsigned int U32;
181 typedef signed int S32;
182 typedef unsigned long long U64;
183#endif
184
185
186/* unoptimized version; solves endianness & alignment issues */
187static U32 LZ4F_readLE32 (const void* src)
188{
189 const BYTE* const srcPtr = (const BYTE*)src;
190 U32 value32 = srcPtr[0];
191 value32 |= ((U32)srcPtr[1])<< 8;
192 value32 |= ((U32)srcPtr[2])<<16;
193 value32 |= ((U32)srcPtr[3])<<24;
194 return value32;
195}
196
197static void LZ4F_writeLE32 (void* dst, U32 value32)
198{
199 BYTE* const dstPtr = (BYTE*)dst;
200 dstPtr[0] = (BYTE)value32;
201 dstPtr[1] = (BYTE)(value32 >> 8);
202 dstPtr[2] = (BYTE)(value32 >> 16);
203 dstPtr[3] = (BYTE)(value32 >> 24);
204}
205
206static U64 LZ4F_readLE64 (const void* src)
207{
208 const BYTE* const srcPtr = (const BYTE*)src;
209 U64 value64 = srcPtr[0];
210 value64 |= ((U64)srcPtr[1]<<8);
211 value64 |= ((U64)srcPtr[2]<<16);
212 value64 |= ((U64)srcPtr[3]<<24);
213 value64 |= ((U64)srcPtr[4]<<32);
214 value64 |= ((U64)srcPtr[5]<<40);
215 value64 |= ((U64)srcPtr[6]<<48);
216 value64 |= ((U64)srcPtr[7]<<56);
217 return value64;
218}
219
220static void LZ4F_writeLE64 (void* dst, U64 value64)
221{
222 BYTE* const dstPtr = (BYTE*)dst;
223 dstPtr[0] = (BYTE)value64;
224 dstPtr[1] = (BYTE)(value64 >> 8);
225 dstPtr[2] = (BYTE)(value64 >> 16);
226 dstPtr[3] = (BYTE)(value64 >> 24);
227 dstPtr[4] = (BYTE)(value64 >> 32);
228 dstPtr[5] = (BYTE)(value64 >> 40);
229 dstPtr[6] = (BYTE)(value64 >> 48);
230 dstPtr[7] = (BYTE)(value64 >> 56);
231}
232
233
234/*-************************************
235* Constants
236**************************************/
237#ifndef LZ4_SRC_INCLUDED /* avoid double definition */
238# define KB*(1<<10) *(1<<10)
239# define MB*(1<<20) *(1<<20)
240# define GB*(1<<30) *(1<<30)
241#endif
242
243#define _1BIT0x01 0x01
244#define _2BITS0x03 0x03
245#define _3BITS0x07 0x07
246#define _4BITS0x0F 0x0F
247#define _8BITS0xFF 0xFF
248
249#define LZ4F_BLOCKUNCOMPRESSED_FLAG0x80000000U 0x80000000U
250#define LZ4F_BLOCKSIZEID_DEFAULTLZ4F_max64KB LZ4F_max64KB
251
252static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN7; /* 7 */
253static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX19; /* 19 */
254static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE4; /* block header : size, and compress flag */
255static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE4; /* block footer : checksum (optional) */
256
257
258/*-************************************
259* Structures and local types
260**************************************/
261
262typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_BlockCompressMode_e;
263typedef enum { ctxNone, ctxFast, ctxHC } LZ4F_CtxType_e;
264
265typedef struct LZ4F_cctx_s
266{
267 LZ4F_CustomMem cmem;
268 LZ4F_preferences_t prefs;
269 U32 version;
270 U32 cStage; /* 0 : compression uninitialized ; 1 : initialized, can compress */
271 const LZ4F_CDict* cdict;
272 size_t maxBlockSize;
273 size_t maxBufferSize;
274 BYTE* tmpBuff; /* internal buffer, for streaming */
275 BYTE* tmpIn; /* starting position of data compress within internal buffer (>= tmpBuff) */
276 size_t tmpInSize; /* amount of data to compress after tmpIn */
277 U64 totalInSize;
278 XXH32_state_t xxh;
279 void* lz4CtxPtr;
280 U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
281 U16 lz4CtxType; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
282 LZ4F_BlockCompressMode_e blockCompressMode;
283} LZ4F_cctx_t;
284
285
286/*-************************************
287* Error management
288**************************************/
289#define LZ4F_GENERATE_STRING(STRING)"STRING", #STRING,
290static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING)"OK_NoError", "ERROR_GENERIC", "ERROR_maxBlockSize_invalid", "ERROR_blockMode_invalid"
, "ERROR_parameter_invalid", "ERROR_compressionLevel_invalid"
, "ERROR_headerVersion_wrong", "ERROR_blockChecksum_invalid",
"ERROR_reservedFlag_set", "ERROR_allocation_failed", "ERROR_srcSize_tooLarge"
, "ERROR_dstMaxSize_tooSmall", "ERROR_frameHeader_incomplete"
, "ERROR_frameType_unknown", "ERROR_frameSize_wrong", "ERROR_srcPtr_wrong"
, "ERROR_decompressionFailed", "ERROR_headerChecksum_invalid"
, "ERROR_contentChecksum_invalid", "ERROR_frameDecoding_alreadyStarted"
, "ERROR_compressionState_uninitialized", "ERROR_parameter_null"
, "ERROR_io_write", "ERROR_io_read", "ERROR_maxCode",
};
291
292
293unsigned LZ4F_isError(LZ4F_errorCode_t code)
294{
295 return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode));
296}
297
298const char* LZ4F_getErrorName(LZ4F_errorCode_t code)
299{
300 static const char* codeError = "Unspecified error code";
301 if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)];
302 return codeError;
303}
304
305LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
306{
307 if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError;
308 return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
309}
310
311static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code)
312{
313 /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
314 LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t)){ enum { LZ4F_static_assert = 1/(int)(!!(sizeof(ptrdiff_t) >=
sizeof(size_t))) }; }
;
315 return (LZ4F_errorCode_t)-(ptrdiff_t)code;
316}
317
318#define RETURN_ERROR(e)return LZ4F_returnErrorCode(LZ4F_ERROR_e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
319
320#define RETURN_ERROR_IF(c,e)do { if (c) { {}; return LZ4F_returnErrorCode(LZ4F_ERROR_e); }
} while (0)
do { \
321 if (c) { \
322 DEBUGLOG(3, "Error: " #c){}; \
323 RETURN_ERROR(e)return LZ4F_returnErrorCode(LZ4F_ERROR_e); \
324 } \
325 } while (0)
326
327#define FORWARD_IF_ERROR(r)do { if (LZ4F_isError(r)) return (r); } while (0) do { if (LZ4F_isError(r)) return (r); } while (0)
328
329unsigned LZ4F_getVersion(void) { return LZ4F_VERSION100; }
330
331int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX12; }
332
333size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID)
334{
335 static const size_t blockSizes[4] = { 64 KB*(1<<10), 256 KB*(1<<10), 1 MB*(1<<20), 4 MB*(1<<20) };
336
337 if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULTLZ4F_max64KB;
338 if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB)
339 RETURN_ERROR(maxBlockSize_invalid)return LZ4F_returnErrorCode(LZ4F_ERROR_maxBlockSize_invalid);
340 { int const blockSizeIdx = (int)blockSizeID - (int)LZ4F_max64KB;
341 return blockSizes[blockSizeIdx];
342} }
343
344/*-************************************
345* Private functions
346**************************************/
347#define MIN(a,b)( (a) < (b) ? (a) : (b) ) ( (a) < (b) ? (a) : (b) )
348
349static BYTE LZ4F_headerChecksum (const void* header, size_t length)
350{
351 U32 const xxh = XXH32(header, length, 0);
352 return (BYTE)(xxh >> 8);
353}
354
355
356/*-************************************
357* Simple-pass compression functions
358**************************************/
359static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID,
360 const size_t srcSize)
361{
362 LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB;
363 size_t maxBlockSize = 64 KB*(1<<10);
364 while (requestedBSID > proposedBSID) {
365 if (srcSize <= maxBlockSize)
366 return proposedBSID;
367 proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1);
368 maxBlockSize <<= 2;
369 }
370 return requestedBSID;
371}
372
373/*! LZ4F_compressBound_internal() :
374 * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
375 * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario.
376 * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers.
377 * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
378 */
379static size_t LZ4F_compressBound_internal(size_t srcSize,
380 const LZ4F_preferences_t* preferencesPtr,
381 size_t alreadyBuffered)
382{
383 LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES{ { LZ4F_max64KB, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame
, 0ULL, 0U, LZ4F_noBlockChecksum }, 0, 0u, 0u, { 0u, 0u, 0u }
}
;
384 prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
385 prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
386 { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL((void*)0)) ? &prefsNull : preferencesPtr;
387 U32 const flush = prefsPtr->autoFlush | (srcSize==0);
388 LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID;
389 size_t const blockSize = LZ4F_getBlockSize(blockID);
390 size_t const maxBuffered = blockSize - 1;
391 size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered)( (alreadyBuffered) < (maxBuffered) ? (alreadyBuffered) : (
maxBuffered) )
;
392 size_t const maxSrcSize = srcSize + bufferedSize;
393 unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
394 size_t const partialBlockSize = maxSrcSize & (blockSize-1);
395 size_t const lastBlockSize = flush ? partialBlockSize : 0;
396 unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
397
398 size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag;
399 size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize);
400
401 return ((BHSize + blockCRCSize) * nbBlocks) +
402 (blockSize * nbFullBlocks) + lastBlockSize + frameEnd;
403 }
404}
405
406size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
407{
408 LZ4F_preferences_t prefs;
409 size_t const headerSize = maxFHSize; /* max header size, including optional fields */
410
411 if (preferencesPtr!=NULL((void*)0)) prefs = *preferencesPtr;
412 else MEM_INIT(&prefs, 0, sizeof(prefs))memset((&prefs),(0),(sizeof(prefs)));
413 prefs.autoFlush = 1;
414
415 return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
416}
417
418
419/*! LZ4F_compressFrame_usingCDict() :
420 * Compress srcBuffer using a dictionary, in a single step.
421 * cdict can be NULL, in which case, no dictionary is used.
422 * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
423 * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
424 * however, it's the only way to provide a dictID, so it's not recommended.
425 * @return : number of bytes written into dstBuffer,
426 * or an error code if it fails (can be tested using LZ4F_isError())
427 */
428size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
429 void* dstBuffer, size_t dstCapacity,
430 const void* srcBuffer, size_t srcSize,
431 const LZ4F_CDict* cdict,
432 const LZ4F_preferences_t* preferencesPtr)
433{
434 LZ4F_preferences_t prefs;
435 LZ4F_compressOptions_t options;
436 BYTE* const dstStart = (BYTE*) dstBuffer;
437 BYTE* dstPtr = dstStart;
438 BYTE* const dstEnd = dstStart + dstCapacity;
439
440 DEBUGLOG(4, "LZ4F_compressFrame_usingCDict (srcSize=%u)", (unsigned)srcSize){};
441 if (preferencesPtr!=NULL((void*)0))
442 prefs = *preferencesPtr;
443 else
444 MEM_INIT(&prefs, 0, sizeof(prefs))memset((&prefs),(0),(sizeof(prefs)));
445 if (prefs.frameInfo.contentSize != 0)
446 prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */
447
448 prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
449 prefs.autoFlush = 1;
450 if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
451 prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */
452
453 MEM_INIT(&options, 0, sizeof(options))memset((&options),(0),(sizeof(options)));
454 options.stableSrc = 1;
455
456 RETURN_ERROR_IF(dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs), dstMaxSize_tooSmall)do { if (dstCapacity < LZ4F_compressFrameBound(srcSize, &
prefs)) { {}; return LZ4F_returnErrorCode(LZ4F_ERROR_dstMaxSize_tooSmall
); } } while (0)
;
457
458 { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */
459 FORWARD_IF_ERROR(headerSize)do { if (LZ4F_isError(headerSize)) return (headerSize); } while
(0)
;
460 dstPtr += headerSize; /* header size */ }
461
462 assert(dstEnd >= dstPtr)((void)0);
463 { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options);
464 FORWARD_IF_ERROR(cSize)do { if (LZ4F_isError(cSize)) return (cSize); } while (0);
465 dstPtr += cSize; }
466
467 assert(dstEnd >= dstPtr)((void)0);
468 { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */
469 FORWARD_IF_ERROR(tailSize)do { if (LZ4F_isError(tailSize)) return (tailSize); } while (
0)
;
470 dstPtr += tailSize; }
471
472 assert(dstEnd >= dstStart)((void)0);
473 return (size_t)(dstPtr - dstStart);
474}
475
476
477/*! LZ4F_compressFrame() :
478 * Compress an entire srcBuffer into a valid LZ4 frame, in a single step.
479 * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
480 * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
481 * @return : number of bytes written into dstBuffer.
482 * or an error code if it fails (can be tested using LZ4F_isError())
483 */
484size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
485 const void* srcBuffer, size_t srcSize,
486 const LZ4F_preferences_t* preferencesPtr)
487{
488 size_t result;
489#if (LZ4F_HEAPMODE0)
490 LZ4F_cctx_t* cctxPtr;
491 result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION100);
492 FORWARD_IF_ERROR(result)do { if (LZ4F_isError(result)) return (result); } while (0);
493#else
494 LZ4F_cctx_t cctx;
495 LZ4_stream_t lz4ctx;
496 LZ4F_cctx_t* const cctxPtr = &cctx;
497
498 MEM_INIT(&cctx, 0, sizeof(cctx))memset((&cctx),(0),(sizeof(cctx)));
499 cctx.version = LZ4F_VERSION100;
500 cctx.maxBufferSize = 5 MB*(1<<20); /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
501 if ( preferencesPtr == NULL((void*)0)
502 || preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN2 ) {
503 LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
504 cctxPtr->lz4CtxPtr = &lz4ctx;
505 cctxPtr->lz4CtxAlloc = 1;
506 cctxPtr->lz4CtxType = ctxFast;
507 }
508#endif
509 DEBUGLOG(4, "LZ4F_compressFrame"){};
510
511 result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity,
512 srcBuffer, srcSize,
513 NULL((void*)0), preferencesPtr);
514
515#if (LZ4F_HEAPMODE0)
516 LZ4F_freeCompressionContext(cctxPtr);
517#else
518 if ( preferencesPtr != NULL((void*)0)
519 && preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN2 ) {
520 LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
521 }
522#endif
523 return result;
524}
525
526
527/*-***************************************************
528* Dictionary compression
529*****************************************************/
530
531struct LZ4F_CDict_s {
532 LZ4F_CustomMem cmem;
533 void* dictContent;
534 LZ4_stream_t* fastCtx;
535 LZ4_streamHC_t* HCCtx;
536}; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
537
538LZ4F_CDict*
539LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t dictSize)
540{
541 const char* dictStart = (const char*)dictBuffer;
542 LZ4F_CDict* const cdict = (LZ4F_CDict*)LZ4F_malloc(sizeof(*cdict), cmem);
543 DEBUGLOG(4, "LZ4F_createCDict_advanced"){};
544 if (!cdict) return NULL((void*)0);
545 cdict->cmem = cmem;
546 if (dictSize > 64 KB*(1<<10)) {
547 dictStart += dictSize - 64 KB*(1<<10);
548 dictSize = 64 KB*(1<<10);
549 }
550 cdict->dictContent = LZ4F_malloc(dictSize, cmem);
551 /* note: using @cmem to allocate => can't use default create */
552 cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem);
553 cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem);
554 if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
555 LZ4F_freeCDict(cdict);
556 return NULL((void*)0);
557 }
558 memcpy(cdict->dictContent, dictStart, dictSize);
559 LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
560 LZ4_loadDictSlow(cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
561 LZ4_initStreamHC(cdict->HCCtx, sizeof(LZ4_streamHC_t));
562 /* note: we don't know at this point which compression level is going to be used
563 * as a consequence, HCCtx is created for the more common HC mode */
564 LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT9);
565 LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
566 return cdict;
567}
568
569/*! LZ4F_createCDict() :
570 * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
571 * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
572 * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
573 * @dictBuffer can be released after LZ4F_CDict creation, since its content is copied within CDict
574 * @return : digested dictionary for compression, or NULL if failed */
575LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
576{
577 DEBUGLOG(4, "LZ4F_createCDict"){};
578 return LZ4F_createCDict_advanced(LZ4F_defaultCMem, dictBuffer, dictSize);
579}
580
581void LZ4F_freeCDict(LZ4F_CDict* cdict)
582{
583 if (cdict==NULL((void*)0)) return; /* support free on NULL */
584 LZ4F_free(cdict->dictContent, cdict->cmem);
585 LZ4F_free(cdict->fastCtx, cdict->cmem);
586 LZ4F_free(cdict->HCCtx, cdict->cmem);
587 LZ4F_free(cdict, cdict->cmem);
588}
589
590
591/*-*********************************
592* Advanced compression functions
593***********************************/
594
595LZ4F_cctx*
596LZ4F_createCompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
597{
598 LZ4F_cctx* const cctxPtr =
599 (LZ4F_cctx*)LZ4F_calloc(sizeof(LZ4F_cctx), customMem);
600 if (cctxPtr==NULL((void*)0)) return NULL((void*)0);
601
602 cctxPtr->cmem = customMem;
603 cctxPtr->version = version;
604 cctxPtr->cStage = 0; /* Uninitialized. Next stage : init cctx */
605
606 return cctxPtr;
607}
608
609/*! LZ4F_createCompressionContext() :
610 * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
611 * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
612 * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries.
613 * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
614 * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
615 * Object can release its memory using LZ4F_freeCompressionContext();
616**/
617LZ4F_errorCode_t
618LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
619{
620 assert(LZ4F_compressionContextPtr != NULL)((void)0); /* considered a violation of narrow contract */
621 /* in case it nonetheless happen in production */
622 RETURN_ERROR_IF(LZ4F_compressionContextPtr == NULL, parameter_null)do { if (LZ4F_compressionContextPtr == ((void*)0)) { {}; return
LZ4F_returnErrorCode(LZ4F_ERROR_parameter_null); } } while (
0)
;
623
624 *LZ4F_compressionContextPtr = LZ4F_createCompressionContext_advanced(LZ4F_defaultCMem, version);
625 RETURN_ERROR_IF(*LZ4F_compressionContextPtr==NULL, allocation_failed)do { if (*LZ4F_compressionContextPtr==((void*)0)) { {}; return
LZ4F_returnErrorCode(LZ4F_ERROR_allocation_failed); } } while
(0)
;
626 return LZ4F_OK_NoError;
627}
628
629LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
630{
631 if (cctxPtr != NULL((void*)0)) { /* support free on NULL */
632 LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
633 LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
634 LZ4F_free(cctxPtr, cctxPtr->cmem);
635 }
636 return LZ4F_OK_NoError;
637}
638
639
640/**
641 * This function prepares the internal LZ4(HC) stream for a new compression,
642 * resetting the context and attaching the dictionary, if there is one.
643 *
644 * It needs to be called at the beginning of each independent compression
645 * stream (i.e., at the beginning of a frame in blockLinked mode, or at the
646 * beginning of each block in blockIndependent mode).
647 */
648static void LZ4F_initStream(void* ctx,
649 const LZ4F_CDict* cdict,
650 int level,
651 LZ4F_blockMode_t blockMode) {
652 if (level < LZ4HC_CLEVEL_MIN2) {
653 if (cdict || blockMode == LZ4F_blockLinked) {
654 /* In these cases, we will call LZ4_compress_fast_continue(),
655 * which needs an already reset context. Otherwise, we'll call a
656 * one-shot API. The non-continued APIs internally perform their own
657 * resets at the beginning of their calls, where they know what
658 * tableType they need the context to be in. So in that case this
659 * would be misguided / wasted work. */
660 LZ4_resetStream_fast((LZ4_stream_t*)ctx);
661 if (cdict)
662 LZ4_attach_dictionary((LZ4_stream_t*)ctx, cdict->fastCtx);
663 }
664 /* In these cases, we'll call a one-shot API.
665 * The non-continued APIs internally perform their own resets
666 * at the beginning of their calls, where they know
667 * which tableType they need the context to be in.
668 * Therefore, a reset here would be wasted work. */
669 } else {
670 LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
671 if (cdict)
672 LZ4_attach_HC_dictionary((LZ4_streamHC_t*)ctx, cdict->HCCtx);
673 }
674}
675
676static int ctxTypeID_to_size(int ctxTypeID) {
677 switch(ctxTypeID) {
678 case 1:
679 return LZ4_sizeofState();
680 case 2:
681 return LZ4_sizeofStateHC();
682 default:
683 return 0;
684 }
685}
686
687/* LZ4F_compressBegin_internal()
688 * Note: only accepts @cdict _or_ @dictBuffer as non NULL.
689 */
690size_t LZ4F_compressBegin_internal(LZ4F_cctx* cctx,
691 void* dstBuffer, size_t dstCapacity,
692 const void* dictBuffer, size_t dictSize,
693 const LZ4F_CDict* cdict,
694 const LZ4F_preferences_t* preferencesPtr)
695{
696 LZ4F_preferences_t const prefNull = LZ4F_INIT_PREFERENCES{ { LZ4F_max64KB, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame
, 0ULL, 0U, LZ4F_noBlockChecksum }, 0, 0u, 0u, { 0u, 0u, 0u }
}
;
697 BYTE* const dstStart = (BYTE*)dstBuffer;
698 BYTE* dstPtr = dstStart;
699
700 RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall)do { if (dstCapacity < maxFHSize) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_dstMaxSize_tooSmall); } } while (0)
;
701 if (preferencesPtr == NULL((void*)0)) preferencesPtr = &prefNull;
702 cctx->prefs = *preferencesPtr;
703
704 /* cctx Management */
705 { U16 const ctxTypeID = (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN2) ? 1 : 2;
706 int requiredSize = ctxTypeID_to_size(ctxTypeID);
707 int allocatedSize = ctxTypeID_to_size(cctx->lz4CtxAlloc);
708 if (allocatedSize < requiredSize) {
709 /* not enough space allocated */
710 LZ4F_free(cctx->lz4CtxPtr, cctx->cmem);
711 if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN2) {
712 /* must take ownership of memory allocation,
713 * in order to respect custom allocator contract */
714 cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctx->cmem);
715 if (cctx->lz4CtxPtr)
716 LZ4_initStream(cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
717 } else {
718 cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctx->cmem);
719 if (cctx->lz4CtxPtr)
720 LZ4_initStreamHC(cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
721 }
722 RETURN_ERROR_IF(cctx->lz4CtxPtr == NULL, allocation_failed)do { if (cctx->lz4CtxPtr == ((void*)0)) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_allocation_failed); } } while (0)
;
723 cctx->lz4CtxAlloc = ctxTypeID;
724 cctx->lz4CtxType = ctxTypeID;
725 } else if (cctx->lz4CtxType != ctxTypeID) {
726 /* otherwise, a sufficient buffer is already allocated,
727 * but we need to reset it to the correct context type */
728 if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN2) {
729 LZ4_initStream((LZ4_stream_t*)cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
730 } else {
731 LZ4_initStreamHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
732 LZ4_setCompressionLevel((LZ4_streamHC_t*)cctx->lz4CtxPtr, cctx->prefs.compressionLevel);
733 }
734 cctx->lz4CtxType = ctxTypeID;
735 } }
736
737 /* Buffer Management */
738 if (cctx->prefs.frameInfo.blockSizeID == 0)
739 cctx->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULTLZ4F_max64KB;
740 cctx->maxBlockSize = LZ4F_getBlockSize(cctx->prefs.frameInfo.blockSizeID);
741
742 { size_t const requiredBuffSize = preferencesPtr->autoFlush ?
743 ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB*(1<<10) : 0) : /* only needs past data up to window size */
744 cctx->maxBlockSize + ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB*(1<<10) : 0);
745
746 if (cctx->maxBufferSize < requiredBuffSize) {
747 cctx->maxBufferSize = 0;
748 LZ4F_free(cctx->tmpBuff, cctx->cmem);
749 cctx->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctx->cmem);
750 RETURN_ERROR_IF(cctx->tmpBuff == NULL, allocation_failed)do { if (cctx->tmpBuff == ((void*)0)) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_allocation_failed); } } while (0)
;
751 cctx->maxBufferSize = requiredBuffSize;
752 } }
753 cctx->tmpIn = cctx->tmpBuff;
754 cctx->tmpInSize = 0;
755 (void)XXH32_reset(&(cctx->xxh), 0);
756
757 /* context init */
758 cctx->cdict = cdict;
759 if (cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
760 /* frame init only for blockLinked : blockIndependent will be init at each block */
761 LZ4F_initStream(cctx->lz4CtxPtr, cdict, cctx->prefs.compressionLevel, LZ4F_blockLinked);
762 }
763 if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN2) {
764 LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctx->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
765 }
766 if (dictBuffer) {
767 assert(cdict == NULL)((void)0);
768 RETURN_ERROR_IF(dictSize > INT_MAX, parameter_invalid)do { if (dictSize > 2147483647) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_parameter_invalid); } } while (0)
;
769 if (cctx->lz4CtxType == ctxFast) {
770 /* lz4 fast*/
771 LZ4_loadDict((LZ4_stream_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
772 } else {
773 /* lz4hc */
774 assert(cctx->lz4CtxType == ctxHC)((void)0);
775 LZ4_loadDictHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
776 }
777 }
778
779 /* Stage 2 : Write Frame Header */
780
781 /* Magic Number */
782 LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER0x184D2204U);
783 dstPtr += 4;
784 { BYTE* const headerStart = dstPtr;
785
786 /* FLG Byte */
787 *dstPtr++ = (BYTE)(((1 & _2BITS0x03) << 6) /* Version('01') */
788 + ((cctx->prefs.frameInfo.blockMode & _1BIT0x01 ) << 5)
789 + ((cctx->prefs.frameInfo.blockChecksumFlag & _1BIT0x01 ) << 4)
790 + ((unsigned)(cctx->prefs.frameInfo.contentSize > 0) << 3)
791 + ((cctx->prefs.frameInfo.contentChecksumFlag & _1BIT0x01 ) << 2)
792 + (cctx->prefs.frameInfo.dictID > 0) );
793 /* BD Byte */
794 *dstPtr++ = (BYTE)((cctx->prefs.frameInfo.blockSizeID & _3BITS0x07) << 4);
795 /* Optional Frame content size field */
796 if (cctx->prefs.frameInfo.contentSize) {
797 LZ4F_writeLE64(dstPtr, cctx->prefs.frameInfo.contentSize);
798 dstPtr += 8;
799 cctx->totalInSize = 0;
800 }
801 /* Optional dictionary ID field */
802 if (cctx->prefs.frameInfo.dictID) {
803 LZ4F_writeLE32(dstPtr, cctx->prefs.frameInfo.dictID);
804 dstPtr += 4;
805 }
806 /* Header CRC Byte */
807 *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
808 dstPtr++;
809 }
810
811 cctx->cStage = 1; /* header written, now request input data block */
812 return (size_t)(dstPtr - dstStart);
813}
814
815size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
816 void* dstBuffer, size_t dstCapacity,
817 const LZ4F_preferences_t* preferencesPtr)
818{
819 return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
820 NULL((void*)0), 0,
821 NULL((void*)0), preferencesPtr);
822}
823
824/* LZ4F_compressBegin_usingDictOnce:
825 * Hidden implementation,
826 * employed for multi-threaded compression
827 * when frame defines linked blocks */
828size_t LZ4F_compressBegin_usingDictOnce(LZ4F_cctx* cctx,
829 void* dstBuffer, size_t dstCapacity,
830 const void* dict, size_t dictSize,
831 const LZ4F_preferences_t* preferencesPtr)
832{
833 return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
834 dict, dictSize,
835 NULL((void*)0), preferencesPtr);
836}
837
838size_t LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx,
839 void* dstBuffer, size_t dstCapacity,
840 const void* dict, size_t dictSize,
841 const LZ4F_preferences_t* preferencesPtr)
842{
843 /* note : incorrect implementation :
844 * this will only use the dictionary once,
845 * instead of once *per* block when frames defines independent blocks */
846 return LZ4F_compressBegin_usingDictOnce(cctx, dstBuffer, dstCapacity,
847 dict, dictSize,
848 preferencesPtr);
849}
850
851size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
852 void* dstBuffer, size_t dstCapacity,
853 const LZ4F_CDict* cdict,
854 const LZ4F_preferences_t* preferencesPtr)
855{
856 return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
857 NULL((void*)0), 0,
858 cdict, preferencesPtr);
859}
860
861
862/* LZ4F_compressBound() :
863 * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
864 * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
865 * This function cannot fail.
866 */
867size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
868{
869 if (preferencesPtr && preferencesPtr->autoFlush) {
870 return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
871 }
872 return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
873}
874
875
876typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict);
877
878
879/*! LZ4F_makeBlock():
880 * compress a single block, add header and optional checksum.
881 * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize
882 */
883static size_t LZ4F_makeBlock(void* dst,
884 const void* src, size_t srcSize,
885 compressFunc_t compress, void* lz4ctx, int level,
886 const LZ4F_CDict* cdict,
887 LZ4F_blockChecksum_t crcFlag)
888{
889 BYTE* const cSizePtr = (BYTE*)dst;
890 U32 cSize;
891 assert(compress != NULL)((void)0);
892 cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
893 (int)(srcSize), (int)(srcSize-1),
894 level, cdict);
895
896 if (cSize == 0 || cSize >= srcSize) {
897 cSize = (U32)srcSize;
898 LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG0x80000000U);
899 memcpy(cSizePtr+BHSize, src, srcSize);
900 } else {
901 LZ4F_writeLE32(cSizePtr, cSize);
902 }
903 if (crcFlag) {
904 U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */
905 LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32);
906 }
907 return BHSize + cSize + ((U32)crcFlag)*BFSize;
908}
909
910
911static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
912{
913 int const acceleration = (level < 0) ? -level + 1 : 1;
914 DEBUGLOG(5, "LZ4F_compressBlock (srcSize=%i)", srcSize){};
915 LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
916 if (cdict) {
917 return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
918 } else {
919 return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
920 }
921}
922
923static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
924{
925 int const acceleration = (level < 0) ? -level + 1 : 1;
926 (void)cdict; /* init once at beginning of frame */
927 DEBUGLOG(5, "LZ4F_compressBlock_continue (srcSize=%i)", srcSize){};
928 return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
929}
930
931static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
932{
933 LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
934 if (cdict) {
935 return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
936 }
937 return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level);
938}
939
940static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
941{
942 (void)level; (void)cdict; /* init once at beginning of frame */
943 return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
944}
945
946static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
947{
948 (void)ctx; (void)src; (void)dst; (void)srcSize; (void)dstCapacity; (void)level; (void)cdict;
949 return 0;
950}
951
952static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_BlockCompressMode_e compressMode)
953{
954 if (compressMode == LZ4B_UNCOMPRESSED)
955 return LZ4F_doNotCompressBlock;
956 if (level < LZ4HC_CLEVEL_MIN2) {
957 if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
958 return LZ4F_compressBlock_continue;
959 }
960 if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC;
961 return LZ4F_compressBlockHC_continue;
962}
963
964/* Save history (up to 64KB) into @tmpBuff */
965static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
966{
967 if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN2)
968 return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB*(1<<10));
969 return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB*(1<<10));
970}
971
972typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
973
974static const LZ4F_compressOptions_t k_cOptionsNull = { 0, { 0, 0, 0 } };
975
976
977 /*! LZ4F_compressUpdateImpl() :
978 * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
979 * When successful, the function always entirely consumes @srcBuffer.
980 * src data is either buffered or compressed into @dstBuffer.
981 * If the block compression does not match the compression of the previous block, the old data is flushed
982 * and operations continue with the new compression mode.
983 * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr) when block compression is turned on.
984 * @compressOptionsPtr is optional : provide NULL to mean "default".
985 * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
986 * or an error code if it fails (which can be tested using LZ4F_isError())
987 * After an error, the state is left in a UB state, and must be re-initialized.
988 */
989static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
990 void* dstBuffer, size_t dstCapacity,
991 const void* srcBuffer, size_t srcSize,
992 const LZ4F_compressOptions_t* compressOptionsPtr,
993 LZ4F_BlockCompressMode_e blockCompression)
994 {
995 size_t const blockSize = cctxPtr->maxBlockSize;
996 const BYTE* srcPtr = (const BYTE*)srcBuffer;
997 const BYTE* const srcEnd = srcPtr + srcSize;
998 BYTE* const dstStart = (BYTE*)dstBuffer;
999 BYTE* dstPtr = dstStart;
1000 LZ4F_lastBlockStatus lastBlockCompressed = notDone;
1001 compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, blockCompression);
1002 size_t bytesWritten;
1003 DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize){};
1004
1005 RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized)do { if (cctxPtr->cStage != 1) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_compressionState_uninitialized); } } while (0)
; /* state must be initialized and waiting for next block */
1006 if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
1007 RETURN_ERROR(dstMaxSize_tooSmall)return LZ4F_returnErrorCode(LZ4F_ERROR_dstMaxSize_tooSmall);
1008
1009 if (blockCompression == LZ4B_UNCOMPRESSED && dstCapacity < srcSize)
1010 RETURN_ERROR(dstMaxSize_tooSmall)return LZ4F_returnErrorCode(LZ4F_ERROR_dstMaxSize_tooSmall);
1011
1012 /* flush currently written block, to continue with new block compression */
1013 if (cctxPtr->blockCompressMode != blockCompression) {
1014 bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
1015 dstPtr += bytesWritten;
1016 cctxPtr->blockCompressMode = blockCompression;
1017 }
1018
1019 if (compressOptionsPtr == NULL((void*)0)) compressOptionsPtr = &k_cOptionsNull;
1020
1021 /* complete tmp buffer */
1022 if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
1023 size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
1024 assert(blockSize > cctxPtr->tmpInSize)((void)0);
1025 if (sizeToCopy > srcSize) {
1026 /* add src to tmpIn buffer */
1027 memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
1028 srcPtr = srcEnd;
1029 cctxPtr->tmpInSize += srcSize;
1030 /* still needs some CRC */
1031 } else {
1032 /* complete tmpIn block and then compress it */
1033 lastBlockCompressed = fromTmpBuffer;
1034 memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
1035 srcPtr += sizeToCopy;
1036
1037 dstPtr += LZ4F_makeBlock(dstPtr,
1038 cctxPtr->tmpIn, blockSize,
1039 compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
1040 cctxPtr->cdict,
1041 cctxPtr->prefs.frameInfo.blockChecksumFlag);
1042 if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
1043 cctxPtr->tmpInSize = 0;
1044 } }
1045
1046 while ((size_t)(srcEnd - srcPtr) >= blockSize) {
1047 /* compress full blocks */
1048 lastBlockCompressed = fromSrcBuffer;
1049 dstPtr += LZ4F_makeBlock(dstPtr,
1050 srcPtr, blockSize,
1051 compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
1052 cctxPtr->cdict,
1053 cctxPtr->prefs.frameInfo.blockChecksumFlag);
1054 srcPtr += blockSize;
1055 }
1056
1057 if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
1058 /* autoFlush : remaining input (< blockSize) is compressed */
1059 lastBlockCompressed = fromSrcBuffer;
1060 dstPtr += LZ4F_makeBlock(dstPtr,
1061 srcPtr, (size_t)(srcEnd - srcPtr),
1062 compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
1063 cctxPtr->cdict,
1064 cctxPtr->prefs.frameInfo.blockChecksumFlag);
1065 srcPtr = srcEnd;
1066 }
1067
1068 /* preserve dictionary within @tmpBuff whenever necessary */
1069 if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
1070 /* linked blocks are only supported in compressed mode, see LZ4F_uncompressedUpdate */
1071 assert(blockCompression == LZ4B_COMPRESSED)((void)0);
1072 if (compressOptionsPtr->stableSrc) {
1073 cctxPtr->tmpIn = cctxPtr->tmpBuff; /* src is stable : dictionary remains in src across invocations */
1074 } else {
1075 int const realDictSize = LZ4F_localSaveDict(cctxPtr);
1076 assert(0 <= realDictSize && realDictSize <= 64 KB)((void)0);
1077 cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
1078 }
1079 }
1080
1081 /* keep tmpIn within limits */
1082 if (!(cctxPtr->prefs.autoFlush) /* no autoflush : there may be some data left within internal buffer */
1083 && (cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) ) /* not enough room to store next block */
1084 {
1085 /* only preserve 64KB within internal buffer. Ensures there is enough room for next block.
1086 * note: this situation necessarily implies lastBlockCompressed==fromTmpBuffer */
1087 int const realDictSize = LZ4F_localSaveDict(cctxPtr);
1088 cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
1089 assert((cctxPtr->tmpIn + blockSize) <= (cctxPtr->tmpBuff + cctxPtr->maxBufferSize))((void)0);
1090 }
1091
1092 /* some input data left, necessarily < blockSize */
1093 if (srcPtr < srcEnd) {
1094 /* fill tmp buffer */
1095 size_t const sizeToCopy = (size_t)(srcEnd - srcPtr);
1096 memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
1097 cctxPtr->tmpInSize = sizeToCopy;
1098 }
1099
1100 if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
1101 (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
1102
1103 cctxPtr->totalInSize += srcSize;
1104 return (size_t)(dstPtr - dstStart);
1105}
1106
1107/*! LZ4F_compressUpdate() :
1108 * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
1109 * When successful, the function always entirely consumes @srcBuffer.
1110 * src data is either buffered or compressed into @dstBuffer.
1111 * If previously an uncompressed block was written, buffered data is flushed
1112 * before appending compressed data is continued.
1113 * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
1114 * @compressOptionsPtr is optional : provide NULL to mean "default".
1115 * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
1116 * or an error code if it fails (which can be tested using LZ4F_isError())
1117 * After an error, the state is left in a UB state, and must be re-initialized.
1118 */
1119size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
1120 void* dstBuffer, size_t dstCapacity,
1121 const void* srcBuffer, size_t srcSize,
1122 const LZ4F_compressOptions_t* compressOptionsPtr)
1123{
1124 return LZ4F_compressUpdateImpl(cctxPtr,
1125 dstBuffer, dstCapacity,
1126 srcBuffer, srcSize,
1127 compressOptionsPtr, LZ4B_COMPRESSED);
1128}
1129
1130/*! LZ4F_uncompressedUpdate() :
1131 * Same as LZ4F_compressUpdate(), but requests blocks to be sent uncompressed.
1132 * This symbol is only supported when LZ4F_blockIndependent is used
1133 * @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
1134 * @compressOptionsPtr is optional : provide NULL to mean "default".
1135 * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
1136 * or an error code if it fails (which can be tested using LZ4F_isError())
1137 * After an error, the state is left in a UB state, and must be re-initialized.
1138 */
1139size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
1140 void* dstBuffer, size_t dstCapacity,
1141 const void* srcBuffer, size_t srcSize,
1142 const LZ4F_compressOptions_t* compressOptionsPtr)
1143{
1144 return LZ4F_compressUpdateImpl(cctxPtr,
1145 dstBuffer, dstCapacity,
1146 srcBuffer, srcSize,
1147 compressOptionsPtr, LZ4B_UNCOMPRESSED);
1148}
1149
1150
1151/*! LZ4F_flush() :
1152 * When compressed data must be sent immediately, without waiting for a block to be filled,
1153 * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
1154 * The result of the function is the number of bytes written into dstBuffer.
1155 * It can be zero, this means there was no data left within LZ4F_cctx.
1156 * The function outputs an error code if it fails (can be tested using LZ4F_isError())
1157 * LZ4F_compressOptions_t* is optional. NULL is a valid argument.
1158 */
1159size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
1160 void* dstBuffer, size_t dstCapacity,
1161 const LZ4F_compressOptions_t* compressOptionsPtr)
1162{
1163 BYTE* const dstStart = (BYTE*)dstBuffer;
1164 BYTE* dstPtr = dstStart;
1165 compressFunc_t compress;
1166
1167 if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
1168 RETURN_ERROR_IF(cctxPtr->cStage != 1, compressionState_uninitialized)do { if (cctxPtr->cStage != 1) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_compressionState_uninitialized); } } while (0)
;
1169 RETURN_ERROR_IF(dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize), dstMaxSize_tooSmall)do { if (dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize
)) { {}; return LZ4F_returnErrorCode(LZ4F_ERROR_dstMaxSize_tooSmall
); } } while (0)
;
1170 (void)compressOptionsPtr; /* not useful (yet) */
1171
1172 /* select compression function */
1173 compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompressMode);
1174
1175 /* compress tmp buffer */
1176 dstPtr += LZ4F_makeBlock(dstPtr,
1177 cctxPtr->tmpIn, cctxPtr->tmpInSize,
1178 compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
1179 cctxPtr->cdict,
1180 cctxPtr->prefs.frameInfo.blockChecksumFlag);
1181 assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity))((void)0);
1182
1183 if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
1184 cctxPtr->tmpIn += cctxPtr->tmpInSize;
1185 cctxPtr->tmpInSize = 0;
1186
1187 /* keep tmpIn within limits */
1188 if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
1189 int const realDictSize = LZ4F_localSaveDict(cctxPtr);
1190 cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
1191 }
1192
1193 return (size_t)(dstPtr - dstStart);
1194}
1195
1196
1197/*! LZ4F_compressEnd() :
1198 * When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
1199 * It will flush whatever data remained within compressionContext (like LZ4_flush())
1200 * but also properly finalize the frame, with an endMark and an (optional) checksum.
1201 * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
1202 * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
1203 * or an error code if it fails (can be tested using LZ4F_isError())
1204 * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin().
1205 */
1206size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
1207 void* dstBuffer, size_t dstCapacity,
1208 const LZ4F_compressOptions_t* compressOptionsPtr)
1209{
1210 BYTE* const dstStart = (BYTE*)dstBuffer;
1211 BYTE* dstPtr = dstStart;
1212
1213 size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
1214 DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity){};
1215 FORWARD_IF_ERROR(flushSize)do { if (LZ4F_isError(flushSize)) return (flushSize); } while
(0)
;
1216 dstPtr += flushSize;
1217
1218 assert(flushSize <= dstCapacity)((void)0);
1219 dstCapacity -= flushSize;
1220
1221 RETURN_ERROR_IF(dstCapacity < 4, dstMaxSize_tooSmall)do { if (dstCapacity < 4) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_dstMaxSize_tooSmall); } } while (0)
;
1222 LZ4F_writeLE32(dstPtr, 0);
1223 dstPtr += 4; /* endMark */
1224
1225 if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
1226 U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
1227 RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall)do { if (dstCapacity < 8) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_dstMaxSize_tooSmall); } } while (0)
;
1228 DEBUGLOG(5,"Writing 32-bit content checksum (0x%0X)", xxh){};
1229 LZ4F_writeLE32(dstPtr, xxh);
1230 dstPtr+=4; /* content Checksum */
1231 }
1232
1233 cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
1234
1235 if (cctxPtr->prefs.frameInfo.contentSize) {
1236 if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
1237 RETURN_ERROR(frameSize_wrong)return LZ4F_returnErrorCode(LZ4F_ERROR_frameSize_wrong);
1238 }
1239
1240 return (size_t)(dstPtr - dstStart);
1241}
1242
1243
1244/*-***************************************************
1245* Frame Decompression
1246*****************************************************/
1247
1248typedef enum {
1249 dstage_getFrameHeader=0, dstage_storeFrameHeader,
1250 dstage_init,
1251 dstage_getBlockHeader, dstage_storeBlockHeader,
1252 dstage_copyDirect, dstage_getBlockChecksum,
1253 dstage_getCBlock, dstage_storeCBlock,
1254 dstage_flushOut,
1255 dstage_getSuffix, dstage_storeSuffix,
1256 dstage_getSFrameSize, dstage_storeSFrameSize,
1257 dstage_skipSkippable
1258} dStage_t;
1259
1260struct LZ4F_dctx_s {
1261 LZ4F_CustomMem cmem;
1262 LZ4F_frameInfo_t frameInfo;
1263 U32 version;
1264 dStage_t dStage;
1265 U64 frameRemainingSize;
1266 size_t maxBlockSize;
1267 size_t maxBufferSize;
1268 BYTE* tmpIn;
1269 size_t tmpInSize;
1270 size_t tmpInTarget;
1271 BYTE* tmpOutBuffer;
1272 const BYTE* dict;
1273 size_t dictSize;
1274 BYTE* tmpOut;
1275 size_t tmpOutSize;
1276 size_t tmpOutStart;
1277 XXH32_state_t xxh;
1278 XXH32_state_t blockChecksum;
1279 int skipChecksum;
1280 BYTE header[LZ4F_HEADER_SIZE_MAX19];
1281}; /* typedef'd to LZ4F_dctx in lz4frame.h */
1282
1283
1284LZ4F_dctx* LZ4F_createDecompressionContext_advanced(LZ4F_CustomMem customMem, unsigned version)
1285{
1286 LZ4F_dctx* const dctx = (LZ4F_dctx*)LZ4F_calloc(sizeof(LZ4F_dctx), customMem);
1287 if (dctx == NULL((void*)0)) return NULL((void*)0);
1288
1289 dctx->cmem = customMem;
1290 dctx->version = version;
1291 return dctx;
1292}
1293
1294/*! LZ4F_createDecompressionContext() :
1295 * Create a decompressionContext object, which will track all decompression operations.
1296 * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
1297 * Object can later be released using LZ4F_freeDecompressionContext().
1298 * @return : if != 0, there was an error during context creation.
1299 */
1300LZ4F_errorCode_t
1301LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
1302{
1303 assert(LZ4F_decompressionContextPtr != NULL)((void)0); /* violation of narrow contract */
1304 RETURN_ERROR_IF(LZ4F_decompressionContextPtr == NULL, parameter_null)do { if (LZ4F_decompressionContextPtr == ((void*)0)) { {}; return
LZ4F_returnErrorCode(LZ4F_ERROR_parameter_null); } } while (
0)
; /* in case it nonetheless happen in production */
1305
1306 *LZ4F_decompressionContextPtr = LZ4F_createDecompressionContext_advanced(LZ4F_defaultCMem, versionNumber);
1307 if (*LZ4F_decompressionContextPtr == NULL((void*)0)) { /* failed allocation */
1308 RETURN_ERROR(allocation_failed)return LZ4F_returnErrorCode(LZ4F_ERROR_allocation_failed);
1309 }
1310 return LZ4F_OK_NoError;
1311}
1312
1313LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
1314{
1315 LZ4F_errorCode_t result = LZ4F_OK_NoError;
1316 if (dctx != NULL((void*)0)) { /* can accept NULL input, like free() */
1317 result = (LZ4F_errorCode_t)dctx->dStage;
1318 LZ4F_free(dctx->tmpIn, dctx->cmem);
1319 LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
1320 LZ4F_free(dctx, dctx->cmem);
1321 }
1322 return result;
1323}
1324
1325
1326/*==--- Streaming Decompression operations ---==*/
1327void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
1328{
1329 DEBUGLOG(5, "LZ4F_resetDecompressionContext"){};
1330 dctx->dStage = dstage_getFrameHeader;
1331 dctx->dict = NULL((void*)0);
1332 dctx->dictSize = 0;
1333 dctx->skipChecksum = 0;
1334 dctx->frameRemainingSize = 0;
1335}
1336
1337
1338/*! LZ4F_decodeHeader() :
1339 * input : `src` points at the **beginning of the frame**
1340 * output : set internal values of dctx, such as
1341 * dctx->frameInfo and dctx->dStage.
1342 * Also allocates internal buffers.
1343 * @return : nb Bytes read from src (necessarily <= srcSize)
1344 * or an error code (testable with LZ4F_isError())
1345 */
1346static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize)
1347{
1348 unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID;
1349 size_t frameHeaderSize;
1350 const BYTE* srcPtr = (const BYTE*)src;
1351
1352 DEBUGLOG(5, "LZ4F_decodeHeader"){};
1353 /* need to decode header to get frameInfo */
1354 RETURN_ERROR_IF(srcSize < minFHSize, frameHeader_incomplete)do { if (srcSize < minFHSize) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_frameHeader_incomplete); } } while (0)
; /* minimal frame header size */
1355 MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo))memset((&(dctx->frameInfo)),(0),(sizeof(dctx->frameInfo
)))
;
1356
1357 /* special case : skippable frames */
1358 if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START0x184D2A50U) {
1359 dctx->frameInfo.frameType = LZ4F_skippableFrame;
1360 if (src == (void*)(dctx->header)) {
1361 dctx->tmpInSize = srcSize;
1362 dctx->tmpInTarget = 8;
1363 dctx->dStage = dstage_storeSFrameSize;
1364 return srcSize;
1365 } else {
1366 dctx->dStage = dstage_getSFrameSize;
1367 return 4;
1368 } }
1369
1370 /* control magic number */
1371#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1372 if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER0x184D2204U) {
1373 DEBUGLOG(4, "frame header error : unknown magic number"){};
1374 RETURN_ERROR(frameType_unknown)return LZ4F_returnErrorCode(LZ4F_ERROR_frameType_unknown);
1375 }
1376#endif
1377 dctx->frameInfo.frameType = LZ4F_frame;
1378
1379 /* Flags */
1380 { U32 const FLG = srcPtr[4];
1381 U32 const version = (FLG>>6) & _2BITS0x03;
1382 blockChecksumFlag = (FLG>>4) & _1BIT0x01;
1383 blockMode = (FLG>>5) & _1BIT0x01;
1384 contentSizeFlag = (FLG>>3) & _1BIT0x01;
1385 contentChecksumFlag = (FLG>>2) & _1BIT0x01;
1386 dictIDFlag = FLG & _1BIT0x01;
1387 /* validate */
1388 if (((FLG>>1)&_1BIT0x01) != 0) RETURN_ERROR(reservedFlag_set)return LZ4F_returnErrorCode(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */
1389 if (version != 1) RETURN_ERROR(headerVersion_wrong)return LZ4F_returnErrorCode(LZ4F_ERROR_headerVersion_wrong); /* Version Number, only supported value */
1390 }
1391 DEBUGLOG(6, "contentSizeFlag: %u", contentSizeFlag){};
1392
1393 /* Frame Header Size */
1394 frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
1395
1396 if (srcSize < frameHeaderSize) {
1397 /* not enough input to fully decode frame header */
1398 if (srcPtr != dctx->header)
1399 memcpy(dctx->header, srcPtr, srcSize);
1400 dctx->tmpInSize = srcSize;
1401 dctx->tmpInTarget = frameHeaderSize;
1402 dctx->dStage = dstage_storeFrameHeader;
1403 return srcSize;
1404 }
1405
1406 { U32 const BD = srcPtr[5];
1407 blockSizeID = (BD>>4) & _3BITS0x07;
1408 /* validate */
1409 if (((BD>>7)&_1BIT0x01) != 0) RETURN_ERROR(reservedFlag_set)return LZ4F_returnErrorCode(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */
1410 if (blockSizeID < 4) RETURN_ERROR(maxBlockSize_invalid)return LZ4F_returnErrorCode(LZ4F_ERROR_maxBlockSize_invalid); /* 4-7 only supported values for the time being */
1411 if (((BD>>0)&_4BITS0x0F) != 0) RETURN_ERROR(reservedFlag_set)return LZ4F_returnErrorCode(LZ4F_ERROR_reservedFlag_set); /* Reserved bits */
1412 }
1413
1414 /* check header */
1415 assert(frameHeaderSize > 5)((void)0);
1416#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1417 { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
1418 RETURN_ERROR_IF(HC != srcPtr[frameHeaderSize-1], headerChecksum_invalid)do { if (HC != srcPtr[frameHeaderSize-1]) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_headerChecksum_invalid); } } while (0)
;
1419 }
1420#endif
1421
1422 /* save */
1423 dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
1424 dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag;
1425 dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
1426 dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
1427 dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID);
1428 if (contentSizeFlag) {
1429 dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
1430 }
1431 if (dictIDFlag)
1432 dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
1433
1434 dctx->dStage = dstage_init;
1435
1436 return frameHeaderSize;
1437}
1438
1439
1440/*! LZ4F_headerSize() :
1441 * @return : size of frame header
1442 * or an error code, which can be tested using LZ4F_isError()
1443 */
1444size_t LZ4F_headerSize(const void* src, size_t srcSize)
1445{
1446 RETURN_ERROR_IF(src == NULL, srcPtr_wrong)do { if (src == ((void*)0)) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_srcPtr_wrong); } } while (0)
;
1447
1448 /* minimal srcSize to determine header size */
1449 if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH5)
1450 RETURN_ERROR(frameHeader_incomplete)return LZ4F_returnErrorCode(LZ4F_ERROR_frameHeader_incomplete
)
;
1451
1452 /* special case : skippable frames */
1453 if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START0x184D2A50U)
1454 return 8;
1455
1456 /* control magic number */
1457#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1458 if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER0x184D2204U)
1459 RETURN_ERROR(frameType_unknown)return LZ4F_returnErrorCode(LZ4F_ERROR_frameType_unknown);
1460#endif
1461
1462 /* Frame Header Size */
1463 { BYTE const FLG = ((const BYTE*)src)[4];
1464 U32 const contentSizeFlag = (FLG>>3) & _1BIT0x01;
1465 U32 const dictIDFlag = FLG & _1BIT0x01;
1466 return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
1467 }
1468}
1469
1470/*! LZ4F_getFrameInfo() :
1471 * This function extracts frame parameters (max blockSize, frame checksum, etc.).
1472 * Usage is optional. Objective is to provide relevant information for allocation purposes.
1473 * This function works in 2 situations :
1474 * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
1475 * Amount of input data provided must be large enough to successfully decode the frame header.
1476 * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum.
1477 * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx.
1478 * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
1479 * Decompression must resume from (srcBuffer + *srcSizePtr).
1480 * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
1481 * or an error code which can be tested using LZ4F_isError()
1482 * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
1483 * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
1484 */
1485LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
1486 LZ4F_frameInfo_t* frameInfoPtr,
1487 const void* srcBuffer, size_t* srcSizePtr)
1488{
1489 LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader){ enum { LZ4F_static_assert = 1/(int)(!!(dstage_getFrameHeader
< dstage_storeFrameHeader)) }; }
;
1490 if (dctx->dStage > dstage_storeFrameHeader) {
1491 /* frameInfo already decoded */
1492 size_t o=0, i=0;
1493 *srcSizePtr = 0;
1494 *frameInfoPtr = dctx->frameInfo;
1495 /* returns : recommended nb of bytes for LZ4F_decompress() */
1496 return LZ4F_decompress(dctx, NULL((void*)0), &o, NULL((void*)0), &i, NULL((void*)0));
1497 } else {
1498 if (dctx->dStage == dstage_storeFrameHeader) {
1499 /* frame decoding already started, in the middle of header => automatic fail */
1500 *srcSizePtr = 0;
1501 RETURN_ERROR(frameDecoding_alreadyStarted)return LZ4F_returnErrorCode(LZ4F_ERROR_frameDecoding_alreadyStarted
)
;
1502 } else {
1503 size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr);
1504 if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; }
1505 if (*srcSizePtr < hSize) {
1506 *srcSizePtr=0;
1507 RETURN_ERROR(frameHeader_incomplete)return LZ4F_returnErrorCode(LZ4F_ERROR_frameHeader_incomplete
)
;
1508 }
1509
1510 { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
1511 if (LZ4F_isError(decodeResult)) {
1512 *srcSizePtr = 0;
1513 } else {
1514 *srcSizePtr = decodeResult;
1515 decodeResult = BHSize; /* block header size */
1516 }
1517 *frameInfoPtr = dctx->frameInfo;
1518 return decodeResult;
1519 } } }
1520}
1521
1522
1523/* LZ4F_updateDict() :
1524 * only used for LZ4F_blockLinked mode
1525 * Condition : @dstPtr != NULL
1526 */
1527static void LZ4F_updateDict(LZ4F_dctx* dctx,
1528 const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
1529 unsigned withinTmp)
1530{
1531 assert(dstPtr != NULL)((void)0);
1532 if (dctx->dictSize==0) dctx->dict = (const BYTE*)dstPtr; /* will lead to prefix mode */
32
Assuming field 'dictSize' is not equal to 0
33
Taking false branch
1533 assert(dctx->dict != NULL)((void)0);
1534
1535 if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
34
Taking false branch
1536 dctx->dictSize += dstSize;
1537 return;
1538 }
1539
1540 assert(dstPtr >= dstBufferStart)((void)0);
1541 if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB*(1<<10)) { /* history in dstBuffer becomes large enough to become dictionary */
35
Assuming the condition is false
36
Taking false branch
1542 dctx->dict = (const BYTE*)dstBufferStart;
1543 dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize;
1544 return;
1545 }
1546
1547 assert(dstSize < 64 KB)((void)0); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
1548
1549 /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
1550 assert(dctx->tmpOutBuffer != NULL)((void)0);
1551
1552 if (withinTmp
36.1
'withinTmp' is 0
&& (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
1553 /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
1554 assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart)((void)0);
1555 dctx->dictSize += dstSize;
1556 return;
1557 }
1558
1559 if (withinTmp
36.2
'withinTmp' is 0
) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
37
Taking false branch
1560 size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
1561 size_t copySize = 64 KB*(1<<10) - dctx->tmpOutSize;
1562 const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
1563 if (dctx->tmpOutSize > 64 KB*(1<<10)) copySize = 0;
1564 if (copySize > preserveSize) copySize = preserveSize;
1565
1566 memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
1567
1568 dctx->dict = dctx->tmpOutBuffer;
1569 dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize;
1570 return;
1571 }
1572
1573 if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */
38
Assuming field 'dict' is equal to field 'tmpOutBuffer'
39
Taking true branch
1574 if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */
40
Assuming the condition is false
41
Taking false branch
1575 size_t const preserveSize = 64 KB*(1<<10) - dstSize;
1576 memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
1577 dctx->dictSize = preserveSize;
1578 }
1579 memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize);
42
Null pointer passed to 2nd parameter expecting 'nonnull'
1580 dctx->dictSize += dstSize;
1581 return;
1582 }
1583
1584 /* join dict & dest into tmp */
1585 { size_t preserveSize = 64 KB*(1<<10) - dstSize;
1586 if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize;
1587 memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
1588 memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize);
1589 dctx->dict = dctx->tmpOutBuffer;
1590 dctx->dictSize = preserveSize + dstSize;
1591 }
1592}
1593
1594
1595/*! LZ4F_decompress() :
1596 * Call this function repetitively to regenerate compressed data in srcBuffer.
1597 * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
1598 * into dstBuffer of capacity *dstSizePtr.
1599 *
1600 * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
1601 *
1602 * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
1603 * If number of bytes read is < number of bytes provided, then decompression operation is not complete.
1604 * Remaining data will have to be presented again in a subsequent invocation.
1605 *
1606 * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
1607 * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
1608 * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling.
1609 * Note that this is just a hint, and it's always possible to any srcSize value.
1610 * When a frame is fully decoded, @return will be 0.
1611 * If decompression failed, @return is an error code which can be tested using LZ4F_isError().
1612 */
1613size_t LZ4F_decompress(LZ4F_dctx* dctx,
1614 void* dstBuffer, size_t* dstSizePtr,
1615 const void* srcBuffer, size_t* srcSizePtr,
1616 const LZ4F_decompressOptions_t* decompressOptionsPtr)
1617{
1618 LZ4F_decompressOptions_t optionsNull;
1619 const BYTE* const srcStart = (const BYTE*)srcBuffer;
1620 const BYTE* const srcEnd = srcStart + *srcSizePtr;
1621 const BYTE* srcPtr = srcStart;
1622 BYTE* const dstStart = (BYTE*)dstBuffer;
1
'dstStart' initialized here
1623 BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL((void*)0);
2
Assuming 'dstStart' is null
3
'?' condition is false
1624 BYTE* dstPtr = dstStart;
4
'dstPtr' initialized to a null pointer value
1625 const BYTE* selectedIn = NULL((void*)0);
1626 unsigned doAnotherStage = 1;
1627 size_t nextSrcSizeHint = 1;
1628
1629
1630 DEBUGLOG(5, "LZ4F_decompress: src[%p](%u) => dst[%p](%u)",{}
1631 srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr){};
1632 if (dstBuffer
4.1
'dstBuffer' is equal to NULL
== NULL((void*)0)) assert(*dstSizePtr == 0)((void)0);
5
Taking true branch
1633 MEM_INIT(&optionsNull, 0, sizeof(optionsNull))memset((&optionsNull),(0),(sizeof(optionsNull)));
1634 if (decompressOptionsPtr==NULL((void*)0)) decompressOptionsPtr = &optionsNull;
6
Assuming 'decompressOptionsPtr' is not equal to NULL
7
Taking false branch
1635 *srcSizePtr = 0;
1636 *dstSizePtr = 0;
1637 assert(dctx != NULL)((void)0);
1638 dctx->skipChecksum |= (decompressOptionsPtr->skipChecksums != 0); /* once set, disable for the remainder of the frame */
8
Assuming field 'skipChecksums' is equal to 0
1639
1640 /* behaves as a state machine */
1641
1642 while (doAnotherStage) {
9
Loop condition is true. Entering loop body
1643
1644 switch(dctx->dStage)
10
Control jumps to 'case dstage_storeCBlock:' at line 1851
1645 {
1646
1647 case dstage_getFrameHeader:
1648 DEBUGLOG(6, "dstage_getFrameHeader"){};
1649 if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
1650 size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
1651 FORWARD_IF_ERROR(hSize)do { if (LZ4F_isError(hSize)) return (hSize); } while (0);
1652 srcPtr += hSize;
1653 break;
1654 }
1655 dctx->tmpInSize = 0;
1656 if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */
1657 dctx->tmpInTarget = minFHSize; /* minimum size to decode header */
1658 dctx->dStage = dstage_storeFrameHeader;
1659 /* fall-through */
1660
1661 case dstage_storeFrameHeader:
1662 DEBUGLOG(6, "dstage_storeFrameHeader"){};
1663 { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr))( (dctx->tmpInTarget - dctx->tmpInSize) < ((size_t)(
srcEnd - srcPtr)) ? (dctx->tmpInTarget - dctx->tmpInSize
) : ((size_t)(srcEnd - srcPtr)) )
;
1664 memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
1665 dctx->tmpInSize += sizeToCopy;
1666 srcPtr += sizeToCopy;
1667 }
1668 if (dctx->tmpInSize < dctx->tmpInTarget) {
1669 nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */
1670 doAnotherStage = 0; /* not enough src data, ask for some more */
1671 break;
1672 }
1673 FORWARD_IF_ERROR( LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget) )do { if (LZ4F_isError(LZ4F_decodeHeader(dctx, dctx->header
, dctx->tmpInTarget))) return (LZ4F_decodeHeader(dctx, dctx
->header, dctx->tmpInTarget)); } while (0)
; /* will update dStage appropriately */
1674 break;
1675
1676 case dstage_init:
1677 DEBUGLOG(6, "dstage_init"){};
1678 if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
1679 /* internal buffers allocation */
1680 { size_t const bufferNeeded = dctx->maxBlockSize
1681 + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB*(1<<10) : 0);
1682 if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */
1683 dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/
1684 LZ4F_free(dctx->tmpIn, dctx->cmem);
1685 dctx->tmpIn = (BYTE*)LZ4F_malloc(dctx->maxBlockSize + BFSize /* block checksum */, dctx->cmem);
1686 RETURN_ERROR_IF(dctx->tmpIn == NULL, allocation_failed)do { if (dctx->tmpIn == ((void*)0)) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_allocation_failed); } } while (0)
;
1687 LZ4F_free(dctx->tmpOutBuffer, dctx->cmem);
1688 dctx->tmpOutBuffer= (BYTE*)LZ4F_malloc(bufferNeeded, dctx->cmem);
1689 RETURN_ERROR_IF(dctx->tmpOutBuffer== NULL, allocation_failed)do { if (dctx->tmpOutBuffer== ((void*)0)) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_allocation_failed); } } while (0)
;
1690 dctx->maxBufferSize = bufferNeeded;
1691 } }
1692 dctx->tmpInSize = 0;
1693 dctx->tmpInTarget = 0;
1694 dctx->tmpOut = dctx->tmpOutBuffer;
1695 dctx->tmpOutStart = 0;
1696 dctx->tmpOutSize = 0;
1697
1698 dctx->dStage = dstage_getBlockHeader;
1699 /* fall-through */
1700
1701 case dstage_getBlockHeader:
1702 if ((size_t)(srcEnd - srcPtr) >= BHSize) {
1703 selectedIn = srcPtr;
1704 srcPtr += BHSize;
1705 } else {
1706 /* not enough input to read cBlockSize field */
1707 dctx->tmpInSize = 0;
1708 dctx->dStage = dstage_storeBlockHeader;
1709 }
1710
1711 if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */
1712 case dstage_storeBlockHeader:
1713 { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
1714 size_t const wantedData = BHSize - dctx->tmpInSize;
1715 size_t const sizeToCopy = MIN(wantedData, remainingInput)( (wantedData) < (remainingInput) ? (wantedData) : (remainingInput
) )
;
1716 memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
1717 srcPtr += sizeToCopy;
1718 dctx->tmpInSize += sizeToCopy;
1719
1720 if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */
1721 nextSrcSizeHint = BHSize - dctx->tmpInSize;
1722 doAnotherStage = 0;
1723 break;
1724 }
1725 selectedIn = dctx->tmpIn;
1726 } /* if (dctx->dStage == dstage_storeBlockHeader) */
1727
1728 /* decode block header */
1729 { U32 const blockHeader = LZ4F_readLE32(selectedIn);
1730 size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
1731 size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
1732 if (blockHeader==0) { /* frameEnd signal, no more block */
1733 DEBUGLOG(5, "end of frame"){};
1734 dctx->dStage = dstage_getSuffix;
1735 break;
1736 }
1737 if (nextCBlockSize > dctx->maxBlockSize) {
1738 RETURN_ERROR(maxBlockSize_invalid)return LZ4F_returnErrorCode(LZ4F_ERROR_maxBlockSize_invalid);
1739 }
1740 if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG0x80000000U) {
1741 /* next block is uncompressed */
1742 dctx->tmpInTarget = nextCBlockSize;
1743 DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize){};
1744 if (dctx->frameInfo.blockChecksumFlag) {
1745 (void)XXH32_reset(&dctx->blockChecksum, 0);
1746 }
1747 dctx->dStage = dstage_copyDirect;
1748 break;
1749 }
1750 /* next block is a compressed block */
1751 dctx->tmpInTarget = nextCBlockSize + crcSize;
1752 dctx->dStage = dstage_getCBlock;
1753 if (dstPtr==dstEnd || srcPtr==srcEnd) {
1754 nextSrcSizeHint = BHSize + nextCBlockSize + crcSize;
1755 doAnotherStage = 0;
1756 }
1757 break;
1758 }
1759
1760 case dstage_copyDirect: /* uncompressed block */
1761 DEBUGLOG(6, "dstage_copyDirect"){};
1762 { size_t sizeToCopy;
1763 if (dstPtr == NULL((void*)0)) {
1764 sizeToCopy = 0;
1765 } else {
1766 size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr))( ((size_t)(srcEnd-srcPtr)) < ((size_t)(dstEnd-dstPtr)) ? (
(size_t)(srcEnd-srcPtr)) : ((size_t)(dstEnd-dstPtr)) )
;
1767 sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize)( (dctx->tmpInTarget) < (minBuffSize) ? (dctx->tmpInTarget
) : (minBuffSize) )
;
1768 memcpy(dstPtr, srcPtr, sizeToCopy);
1769 if (!dctx->skipChecksum) {
1770 if (dctx->frameInfo.blockChecksumFlag) {
1771 (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
1772 }
1773 if (dctx->frameInfo.contentChecksumFlag)
1774 (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
1775 }
1776 if (dctx->frameInfo.contentSize)
1777 dctx->frameRemainingSize -= sizeToCopy;
1778
1779 /* history management (linked blocks only)*/
1780 if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
1781 LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
1782 }
1783 srcPtr += sizeToCopy;
1784 dstPtr += sizeToCopy;
1785 }
1786 if (sizeToCopy == dctx->tmpInTarget) { /* all done */
1787 if (dctx->frameInfo.blockChecksumFlag) {
1788 dctx->tmpInSize = 0;
1789 dctx->dStage = dstage_getBlockChecksum;
1790 } else
1791 dctx->dStage = dstage_getBlockHeader; /* new block */
1792 break;
1793 }
1794 dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
1795 }
1796 nextSrcSizeHint = dctx->tmpInTarget +
1797 +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
1798 + BHSize /* next header size */;
1799 doAnotherStage = 0;
1800 break;
1801
1802 /* check block checksum for recently transferred uncompressed block */
1803 case dstage_getBlockChecksum:
1804 DEBUGLOG(6, "dstage_getBlockChecksum"){};
1805 { const void* crcSrc;
1806 if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
1807 crcSrc = srcPtr;
1808 srcPtr += 4;
1809 } else {
1810 size_t const stillToCopy = 4 - dctx->tmpInSize;
1811 size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr))( (stillToCopy) < ((size_t)(srcEnd-srcPtr)) ? (stillToCopy
) : ((size_t)(srcEnd-srcPtr)) )
;
1812 memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
1813 dctx->tmpInSize += sizeToCopy;
1814 srcPtr += sizeToCopy;
1815 if (dctx->tmpInSize < 4) { /* all input consumed */
1816 doAnotherStage = 0;
1817 break;
1818 }
1819 crcSrc = dctx->header;
1820 }
1821 if (!dctx->skipChecksum) {
1822 U32 const readCRC = LZ4F_readLE32(crcSrc);
1823 U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
1824#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1825 DEBUGLOG(6, "compare block checksum"){};
1826 if (readCRC != calcCRC) {
1827 DEBUGLOG(4, "incorrect block checksum: %08X != %08X",{}
1828 readCRC, calcCRC){};
1829 RETURN_ERROR(blockChecksum_invalid)return LZ4F_returnErrorCode(LZ4F_ERROR_blockChecksum_invalid);
1830 }
1831#else
1832 (void)readCRC;
1833 (void)calcCRC;
1834#endif
1835 } }
1836 dctx->dStage = dstage_getBlockHeader; /* new block */
1837 break;
1838
1839 case dstage_getCBlock:
1840 DEBUGLOG(6, "dstage_getCBlock"){};
1841 if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
1842 dctx->tmpInSize = 0;
1843 dctx->dStage = dstage_storeCBlock;
1844 break;
1845 }
1846 /* input large enough to read full block directly */
1847 selectedIn = srcPtr;
1848 srcPtr += dctx->tmpInTarget;
1849
1850 if (0) /* always jump over next block */
1851 case dstage_storeCBlock:
1852 { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
1853 size_t const inputLeft = (size_t)(srcEnd-srcPtr);
1854 size_t const sizeToCopy = MIN(wantedData, inputLeft)( (wantedData) < (inputLeft) ? (wantedData) : (inputLeft) );
11
Assuming 'wantedData' is >= 'inputLeft'
12
'?' condition is false
1855 memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
1856 dctx->tmpInSize += sizeToCopy;
1857 srcPtr += sizeToCopy;
1858 if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */
13
Assuming field 'tmpInSize' is >= field 'tmpInTarget'
14
Taking false branch
1859 nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize)
1860 + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
1861 + BHSize /* next header size */;
1862 doAnotherStage = 0;
1863 break;
1864 }
1865 selectedIn = dctx->tmpIn;
1866 }
1867
1868 /* At this stage, input is large enough to decode a block */
1869
1870 /* First, decode and control block checksum if it exists */
1871 if (dctx->frameInfo.blockChecksumFlag) {
15
Assuming field 'blockChecksumFlag' is 0
1872 assert(dctx->tmpInTarget >= 4)((void)0);
1873 dctx->tmpInTarget -= 4;
1874 assert(selectedIn != NULL)((void)0); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
1875 { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget);
1876 U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0);
1877#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
1878 RETURN_ERROR_IF(readBlockCrc != calcBlockCrc, blockChecksum_invalid)do { if (readBlockCrc != calcBlockCrc) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_blockChecksum_invalid); } } while (0)
;
1879#else
1880 (void)readBlockCrc;
1881 (void)calcBlockCrc;
1882#endif
1883 } }
1884
1885 /* decode directly into destination buffer if there is enough room */
1886 if ( ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize)
16
Assuming the condition is true
19
Taking true branch
1887 /* unless the dictionary is stored in tmpOut:
1888 * in which case it's faster to decode within tmpOut
1889 * to benefit from prefix speedup */
1890 && !(dctx->dict!= NULL((void*)0) && (const BYTE*)dctx->dict + dctx->dictSize == dctx->tmpOut) )
17
Assuming field 'dict' is not equal to NULL
18
Assuming the condition is true
1891 {
1892 const char* dict = (const char*)dctx->dict;
1893 size_t dictSize = dctx->dictSize;
1894 int decodedSize;
1895 assert(dstPtr != NULL)((void)0);
1896 if (dict
19.1
'dict' is non-null
&& dictSize > 1 GB*(1<<30)) {
20
Assuming the condition is false
21
Taking false branch
1897 /* overflow control : dctx->dictSize is an int, avoid truncation / sign issues */
1898 dict += dictSize - 64 KB*(1<<10);
1899 dictSize = 64 KB*(1<<10);
1900 }
1901 decodedSize = LZ4_decompress_safe_usingDict(
1902 (const char*)selectedIn, (char*)dstPtr,
1903 (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
1904 dict, (int)dictSize);
1905 RETURN_ERROR_IF(decodedSize < 0, decompressionFailed)do { if (decodedSize < 0) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_decompressionFailed); } } while (0)
;
22
Assuming 'decodedSize' is >= 0
23
Taking false branch
1906 if ((dctx->frameInfo.contentChecksumFlag) && (!dctx->skipChecksum))
24
Loop condition is false. Exiting loop
25
Assuming field 'contentChecksumFlag' is 0
1907 XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize);
1908 if (dctx->frameInfo.contentSize)
26
Assuming field 'contentSize' is 0
27
Taking false branch
1909 dctx->frameRemainingSize -= (size_t)decodedSize;
1910
1911 /* dictionary management */
1912 if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
28
Assuming field 'blockMode' is equal to LZ4F_blockLinked
29
Taking true branch
1913 LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
30
Passing null pointer value via 2nd parameter 'dstPtr'
31
Calling 'LZ4F_updateDict'
1914 }
1915
1916 dstPtr += decodedSize;
1917 dctx->dStage = dstage_getBlockHeader; /* end of block, let's get another one */
1918 break;
1919 }
1920
1921 /* not enough place into dst : decode into tmpOut */
1922
1923 /* manage dictionary */
1924 if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
1925 if (dctx->dict == dctx->tmpOutBuffer) {
1926 /* truncate dictionary to 64 KB if too big */
1927 if (dctx->dictSize > 128 KB*(1<<10)) {
1928 memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB*(1<<10), 64 KB*(1<<10));
1929 dctx->dictSize = 64 KB*(1<<10);
1930 }
1931 dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize;
1932 } else { /* dict not within tmpOut */
1933 size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB)( (dctx->dictSize) < (64 *(1<<10)) ? (dctx->dictSize
) : (64 *(1<<10)) )
;
1934 dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace;
1935 } }
1936
1937 /* Decode block into tmpOut */
1938 { const char* dict = (const char*)dctx->dict;
1939 size_t dictSize = dctx->dictSize;
1940 int decodedSize;
1941 if (dict && dictSize > 1 GB*(1<<30)) {
1942 /* the dictSize param is an int, avoid truncation / sign issues */
1943 dict += dictSize - 64 KB*(1<<10);
1944 dictSize = 64 KB*(1<<10);
1945 }
1946 decodedSize = LZ4_decompress_safe_usingDict(
1947 (const char*)selectedIn, (char*)dctx->tmpOut,
1948 (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
1949 dict, (int)dictSize);
1950 RETURN_ERROR_IF(decodedSize < 0, decompressionFailed)do { if (decodedSize < 0) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_decompressionFailed); } } while (0)
;
1951 if (dctx->frameInfo.contentChecksumFlag && !dctx->skipChecksum)
1952 XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize);
1953 if (dctx->frameInfo.contentSize)
1954 dctx->frameRemainingSize -= (size_t)decodedSize;
1955 dctx->tmpOutSize = (size_t)decodedSize;
1956 dctx->tmpOutStart = 0;
1957 dctx->dStage = dstage_flushOut;
1958 }
1959 /* fall-through */
1960
1961 case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
1962 DEBUGLOG(6, "dstage_flushOut"){};
1963 if (dstPtr != NULL((void*)0)) {
1964 size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr))( (dctx->tmpOutSize - dctx->tmpOutStart) < ((size_t)
(dstEnd-dstPtr)) ? (dctx->tmpOutSize - dctx->tmpOutStart
) : ((size_t)(dstEnd-dstPtr)) )
;
1965 memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
1966
1967 /* dictionary management */
1968 if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
1969 LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/);
1970
1971 dctx->tmpOutStart += sizeToCopy;
1972 dstPtr += sizeToCopy;
1973 }
1974 if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
1975 dctx->dStage = dstage_getBlockHeader; /* get next block */
1976 break;
1977 }
1978 /* could not flush everything : stop there, just request a block header */
1979 doAnotherStage = 0;
1980 nextSrcSizeHint = BHSize;
1981 break;
1982
1983 case dstage_getSuffix:
1984 RETURN_ERROR_IF(dctx->frameRemainingSize, frameSize_wrong)do { if (dctx->frameRemainingSize) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_frameSize_wrong); } } while (0)
; /* incorrect frame size decoded */
1985 if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */
1986 nextSrcSizeHint = 0;
1987 LZ4F_resetDecompressionContext(dctx);
1988 doAnotherStage = 0;
1989 break;
1990 }
1991 if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */
1992 dctx->tmpInSize = 0;
1993 dctx->dStage = dstage_storeSuffix;
1994 } else {
1995 selectedIn = srcPtr;
1996 srcPtr += 4;
1997 }
1998
1999 if (dctx->dStage == dstage_storeSuffix) /* can be skipped */
2000 case dstage_storeSuffix:
2001 { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
2002 size_t const wantedData = 4 - dctx->tmpInSize;
2003 size_t const sizeToCopy = MIN(wantedData, remainingInput)( (wantedData) < (remainingInput) ? (wantedData) : (remainingInput
) )
;
2004 memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
2005 srcPtr += sizeToCopy;
2006 dctx->tmpInSize += sizeToCopy;
2007 if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */
2008 nextSrcSizeHint = 4 - dctx->tmpInSize;
2009 doAnotherStage=0;
2010 break;
2011 }
2012 selectedIn = dctx->tmpIn;
2013 } /* if (dctx->dStage == dstage_storeSuffix) */
2014
2015 /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
2016 if (!dctx->skipChecksum) {
2017 U32 const readCRC = LZ4F_readLE32(selectedIn);
2018 U32 const resultCRC = XXH32_digest(&(dctx->xxh));
2019 DEBUGLOG(4, "frame checksum: stored 0x%0X vs 0x%0X processed", readCRC, resultCRC){};
2020#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
2021 RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid)do { if (readCRC != resultCRC) { {}; return LZ4F_returnErrorCode
(LZ4F_ERROR_contentChecksum_invalid); } } while (0)
;
2022#else
2023 (void)readCRC;
2024 (void)resultCRC;
2025#endif
2026 }
2027 nextSrcSizeHint = 0;
2028 LZ4F_resetDecompressionContext(dctx);
2029 doAnotherStage = 0;
2030 break;
2031
2032 case dstage_getSFrameSize:
2033 if ((srcEnd - srcPtr) >= 4) {
2034 selectedIn = srcPtr;
2035 srcPtr += 4;
2036 } else {
2037 /* not enough input to read cBlockSize field */
2038 dctx->tmpInSize = 4;
2039 dctx->tmpInTarget = 8;
2040 dctx->dStage = dstage_storeSFrameSize;
2041 }
2042
2043 if (dctx->dStage == dstage_storeSFrameSize)
2044 case dstage_storeSFrameSize:
2045 { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize,( (dctx->tmpInTarget - dctx->tmpInSize) < ((size_t)(
srcEnd - srcPtr)) ? (dctx->tmpInTarget - dctx->tmpInSize
) : ((size_t)(srcEnd - srcPtr)) )
2046 (size_t)(srcEnd - srcPtr) )( (dctx->tmpInTarget - dctx->tmpInSize) < ((size_t)(
srcEnd - srcPtr)) ? (dctx->tmpInTarget - dctx->tmpInSize
) : ((size_t)(srcEnd - srcPtr)) )
;
2047 memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
2048 srcPtr += sizeToCopy;
2049 dctx->tmpInSize += sizeToCopy;
2050 if (dctx->tmpInSize < dctx->tmpInTarget) {
2051 /* not enough input to get full sBlockSize; wait for more */
2052 nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize;
2053 doAnotherStage = 0;
2054 break;
2055 }
2056 selectedIn = dctx->header + 4;
2057 } /* if (dctx->dStage == dstage_storeSFrameSize) */
2058
2059 /* case dstage_decodeSFrameSize: */ /* no direct entry */
2060 { size_t const SFrameSize = LZ4F_readLE32(selectedIn);
2061 dctx->frameInfo.contentSize = SFrameSize;
2062 dctx->tmpInTarget = SFrameSize;
2063 dctx->dStage = dstage_skipSkippable;
2064 break;
2065 }
2066
2067 case dstage_skipSkippable:
2068 { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr))( (dctx->tmpInTarget) < ((size_t)(srcEnd-srcPtr)) ? (dctx
->tmpInTarget) : ((size_t)(srcEnd-srcPtr)) )
;
2069 srcPtr += skipSize;
2070 dctx->tmpInTarget -= skipSize;
2071 doAnotherStage = 0;
2072 nextSrcSizeHint = dctx->tmpInTarget;
2073 if (nextSrcSizeHint) break; /* still more to skip */
2074 /* frame fully skipped : prepare context for a new frame */
2075 LZ4F_resetDecompressionContext(dctx);
2076 break;
2077 }
2078 } /* switch (dctx->dStage) */
2079 } /* while (doAnotherStage) */
2080
2081 /* preserve history within tmpOut whenever necessary */
2082 LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2){ enum { LZ4F_static_assert = 1/(int)(!!((unsigned)dstage_init
== 2)) }; }
;
2083 if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
2084 && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
2085 && (dctx->dict != NULL((void*)0)) /* dictionary exists */
2086 && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
2087 && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
2088 {
2089 if (dctx->dStage == dstage_flushOut) {
2090 size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
2091 size_t copySize = 64 KB*(1<<10) - dctx->tmpOutSize;
2092 const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
2093 if (dctx->tmpOutSize > 64 KB*(1<<10)) copySize = 0;
2094 if (copySize > preserveSize) copySize = preserveSize;
2095 assert(dctx->tmpOutBuffer != NULL)((void)0);
2096
2097 memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
2098
2099 dctx->dict = dctx->tmpOutBuffer;
2100 dctx->dictSize = preserveSize + dctx->tmpOutStart;
2101 } else {
2102 const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
2103 size_t const newDictSize = MIN(dctx->dictSize, 64 KB)( (dctx->dictSize) < (64 *(1<<10)) ? (dctx->dictSize
) : (64 *(1<<10)) )
;
2104
2105 memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
2106
2107 dctx->dict = dctx->tmpOutBuffer;
2108 dctx->dictSize = newDictSize;
2109 dctx->tmpOut = dctx->tmpOutBuffer + newDictSize;
2110 }
2111 }
2112
2113 *srcSizePtr = (size_t)(srcPtr - srcStart);
2114 *dstSizePtr = (size_t)(dstPtr - dstStart);
2115 return nextSrcSizeHint;
2116}
2117
2118/*! LZ4F_decompress_usingDict() :
2119 * Same as LZ4F_decompress(), using a predefined dictionary.
2120 * Dictionary is used "in place", without any preprocessing.
2121 * It must remain accessible throughout the entire frame decoding.
2122 */
2123size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx,
2124 void* dstBuffer, size_t* dstSizePtr,
2125 const void* srcBuffer, size_t* srcSizePtr,
2126 const void* dict, size_t dictSize,
2127 const LZ4F_decompressOptions_t* decompressOptionsPtr)
2128{
2129 if (dctx->dStage <= dstage_init) {
2130 dctx->dict = (const BYTE*)dict;
2131 dctx->dictSize = dictSize;
2132 }
2133 return LZ4F_decompress(dctx, dstBuffer, dstSizePtr,
2134 srcBuffer, srcSizePtr,
2135 decompressOptionsPtr);
2136}