Bug Summary

File:var/lib/jenkins/workspace/firefox-scan-build/mozglue/static/lz4/lz4hc.c
Warning:line 1753, column 9
Null pointer passed as 1st argument to memory copy function

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name lz4hc.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=all -relaxed-aliasing -ffp-contract=off -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fdebug-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mozglue/static -fcoverage-compilation-dir=/var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mozglue/static -resource-dir /usr/lib/llvm-19/lib/clang/19 -include /var/lib/jenkins/workspace/firefox-scan-build/config/gcc_hidden.h -include /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mozilla-config.h -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/system_wrappers -U _FORTIFY_SOURCE -D _FORTIFY_SOURCE=2 -D DEBUG=1 -D LZ4LIB_VISIBILITY= -D LZ4FLIB_VISIBILITY= -D EXPORT_JS_API -D MOZ_HAS_MOZGLUE -D MOZ_SUPPORT_LEAKCHECKING -I /var/lib/jenkins/workspace/firefox-scan-build/mozglue/static -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/mozglue/static -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nspr -I /var/lib/jenkins/workspace/firefox-scan-build/obj-x86_64-pc-linux-gnu/dist/include/nss -D MOZILLA_CLIENT -internal-isystem /usr/lib/llvm-19/lib/clang/19/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/14/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-error=tautological-type-limit-compare -Wno-range-loop-analysis -Wno-error=deprecated-declarations -Wno-error=array-bounds -Wno-error=free-nonheap-object -Wno-error=atomic-alignment -Wno-error=deprecated-builtins -Wno-psabi -Wno-error=builtin-macro-redefined -Wno-unknown-warning-option -std=gnu99 -ferror-limit 19 -fstrict-flex-arrays=1 -stack-protector 2 -fstack-clash-protection -ftrivial-auto-var-init=pattern -fgnuc-version=4.2.1 -fskip-odr-check-in-gmf -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2024-09-22-115206-3586786-1 -x c /var/lib/jenkins/workspace/firefox-scan-build/mozglue/static/lz4/lz4hc.c
1/*
2 LZ4 HC - High Compression Mode of LZ4
3 Copyright (C) 2011-2020, Yann Collet.
4
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the
16 distribution.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 You can contact the author at :
31 - LZ4 source repository : https://github.com/lz4/lz4
32 - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
33*/
34/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
35
36
37/* *************************************
38* Tuning Parameter
39***************************************/
40
41/*! HEAPMODE :
42 * Select how stateless HC compression functions like `LZ4_compress_HC()`
43 * allocate memory for their workspace:
44 * in stack (0:fastest), or in heap (1:default, requires malloc()).
45 * Since workspace is rather large, heap mode is recommended.
46**/
47#ifndef LZ4HC_HEAPMODE1
48# define LZ4HC_HEAPMODE1 1
49#endif
50
51
52/*=== Dependency ===*/
53#define LZ4_HC_STATIC_LINKING_ONLY
54#include "lz4hc.h"
55#include <limits.h>
56
57
58/*=== Shared lz4.c code ===*/
59#ifndef LZ4_SRC_INCLUDED1
60# if defined(__GNUC__4)
61# pragma GCC diagnostic ignored "-Wunused-function"
62# endif
63# if defined (__clang__1)
64# pragma clang diagnostic ignored "-Wunused-function"
65# endif
66# define LZ4_COMMONDEFS_ONLY
67# include "lz4.c" /* LZ4_count, constants, mem */
68#endif
69
70
71/*=== Enums ===*/
72typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
73
74
75/*=== Constants ===*/
76#define OPTIMAL_ML(int)((((1U<<4)-1)-1)+4) (int)((ML_MASK((1U<<4)-1)-1)+MINMATCH4)
77#define LZ4_OPT_NUM(1<<12) (1<<12)
78
79
80/*=== Macros ===*/
81#define MIN(a,b)( (a) < (b) ? (a) : (b) ) ( (a) < (b) ? (a) : (b) )
82#define MAX(a,b)( (a) > (b) ? (a) : (b) ) ( (a) > (b) ? (a) : (b) )
83
84
85/*=== Levels definition ===*/
86typedef enum { lz4mid, lz4hc, lz4opt } lz4hc_strat_e;
87typedef struct {
88 lz4hc_strat_e strat;
89 int nbSearches;
90 U32 targetLength;
91} cParams_t;
92static const cParams_t k_clTable[LZ4HC_CLEVEL_MAX12+1] = {
93 { lz4mid, 2, 16 }, /* 0, unused */
94 { lz4mid, 2, 16 }, /* 1, unused */
95 { lz4mid, 2, 16 }, /* 2 */
96 { lz4hc, 4, 16 }, /* 3 */
97 { lz4hc, 8, 16 }, /* 4 */
98 { lz4hc, 16, 16 }, /* 5 */
99 { lz4hc, 32, 16 }, /* 6 */
100 { lz4hc, 64, 16 }, /* 7 */
101 { lz4hc, 128, 16 }, /* 8 */
102 { lz4hc, 256, 16 }, /* 9 */
103 { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/
104 { lz4opt, 512,128 }, /*11 */
105 { lz4opt,16384,LZ4_OPT_NUM(1<<12) }, /* 12==LZ4HC_CLEVEL_MAX */
106};
107
108static cParams_t LZ4HC_getCLevelParams(int cLevel)
109{
110 /* note : clevel convention is a bit different from lz4frame,
111 * possibly something worth revisiting for consistency */
112 if (cLevel < 1)
113 cLevel = LZ4HC_CLEVEL_DEFAULT9;
114 cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel)( (12) < (cLevel) ? (12) : (cLevel) );
115 return k_clTable[cLevel];
116}
117
118
119/*=== Hashing ===*/
120#define LZ4HC_HASHSIZE4 4
121#define HASH_FUNCTION(i)(((i) * 2654435761U) >> ((4*8)-15)) (((i) * 2654435761U) >> ((MINMATCH4*8)-LZ4HC_HASH_LOG15))
122static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr))(((LZ4_read32(ptr)) * 2654435761U) >> ((4*8)-15)); }
123
124#if defined(LZ4_FORCE_MEMORY_ACCESS1) && (LZ4_FORCE_MEMORY_ACCESS1==2)
125/* lie to the compiler about data alignment; use with caution */
126static U64 LZ4_read64(const void* memPtr) { return *(const U64*) memPtr; }
127
128#elif defined(LZ4_FORCE_MEMORY_ACCESS1) && (LZ4_FORCE_MEMORY_ACCESS1==1)
129/* __pack instructions are safer, but compiler specific */
130LZ4_PACK(typedef struct { U64 u64; })typedef struct { U64 u64; } __attribute__((__packed__)) LZ4_unalign64;
131static U64 LZ4_read64(const void* ptr) { return ((const LZ4_unalign64*)ptr)->u64; }
132
133#else /* safe and portable access using memcpy() */
134static U64 LZ4_read64(const void* memPtr)
135{
136 U64 val; LZ4_memcpy(&val, memPtr, sizeof(val))__builtin_memcpy(&val, memPtr, sizeof(val)); return val;
137}
138
139#endif /* LZ4_FORCE_MEMORY_ACCESS */
140
141#define LZ4MID_HASHSIZE8 8
142#define LZ4MID_HASHLOG(15 -1) (LZ4HC_HASH_LOG15-1)
143#define LZ4MID_HASHTABLESIZE(1 << (15 -1)) (1 << LZ4MID_HASHLOG(15 -1))
144
145static U32 LZ4MID_hash4(U32 v) { return (v * 2654435761U) >> (32-LZ4MID_HASHLOG(15 -1)); }
146static U32 LZ4MID_hash4Ptr(const void* ptr) { return LZ4MID_hash4(LZ4_read32(ptr)); }
147/* note: hash7 hashes the lower 56-bits.
148 * It presumes input was read using little endian.*/
149static U32 LZ4MID_hash7(U64 v) { return (U32)(((v << (64-56)) * 58295818150454627ULL) >> (64-LZ4MID_HASHLOG(15 -1))) ; }
150static U64 LZ4_readLE64(const void* memPtr);
151static U32 LZ4MID_hash8Ptr(const void* ptr) { return LZ4MID_hash7(LZ4_readLE64(ptr)); }
152
153static U64 LZ4_readLE64(const void* memPtr)
154{
155 if (LZ4_isLittleEndian()) {
156 return LZ4_read64(memPtr);
157 } else {
158 const BYTE* p = (const BYTE*)memPtr;
159 /* note: relies on the compiler to simplify this expression */
160 return (U64)p[0] | ((U64)p[1]<<8) | ((U64)p[2]<<16) | ((U64)p[3]<<24)
161 | ((U64)p[4]<<32) | ((U64)p[5]<<40) | ((U64)p[6]<<48) | ((U64)p[7]<<56);
162 }
163}
164
165
166/*=== Count match length ===*/
167LZ4_FORCE_INLINEstatic inline __attribute__((always_inline))
168unsigned LZ4HC_NbCommonBytes32(U32 val)
169{
170 assert(val != 0)((void)0);
171 if (LZ4_isLittleEndian()) {
172# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
173 unsigned long r;
174 _BitScanReverse(&r, val);
175 return (unsigned)((31 - r) >> 3);
176# elif (defined(__clang__1) || (defined(__GNUC__4) && ((__GNUC__4 > 3) || \
177 ((__GNUC__4 == 3) && (__GNUC_MINOR__2 >= 4))))) && \
178 !defined(LZ4_FORCE_SW_BITCOUNT)
179 return (unsigned)__builtin_clz(val) >> 3;
180# else
181 val >>= 8;
182 val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
183 (val + 0x00FF0000)) >> 24;
184 return (unsigned)val ^ 3;
185# endif
186 } else {
187# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
188 unsigned long r;
189 _BitScanForward(&r, val);
190 return (unsigned)(r >> 3);
191# elif (defined(__clang__1) || (defined(__GNUC__4) && ((__GNUC__4 > 3) || \
192 ((__GNUC__4 == 3) && (__GNUC_MINOR__2 >= 4))))) && \
193 !defined(LZ4_FORCE_SW_BITCOUNT)
194 return (unsigned)__builtin_ctz(val) >> 3;
195# else
196 const U32 m = 0x01010101;
197 return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
198# endif
199 }
200}
201
202/** LZ4HC_countBack() :
203 * @return : negative value, nb of common bytes before ip/match */
204LZ4_FORCE_INLINEstatic inline __attribute__((always_inline))
205int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
206 const BYTE* const iMin, const BYTE* const mMin)
207{
208 int back = 0;
209 int const min = (int)MAX(iMin - ip, mMin - match)( (iMin - ip) > (mMin - match) ? (iMin - ip) : (mMin - match
) )
;
210 assert(min <= 0)((void)0);
211 assert(ip >= iMin)((void)0); assert((size_t)(ip-iMin) < (1U<<31))((void)0);
212 assert(match >= mMin)((void)0); assert((size_t)(match - mMin) < (1U<<31))((void)0);
213
214 while ((back - min) > 3) {
215 U32 const v = LZ4_read32(ip + back - 4) ^ LZ4_read32(match + back - 4);
216 if (v) {
217 return (back - (int)LZ4HC_NbCommonBytes32(v));
218 } else back -= 4; /* 4-byte step */
219 }
220 /* check remainder if any */
221 while ( (back > min)
222 && (ip[back-1] == match[back-1]) )
223 back--;
224 return back;
225}
226
227/*=== Chain table updates ===*/
228#define DELTANEXTU16(table, pos)table[(U16)(pos)] table[(U16)(pos)] /* faster */
229/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
230#define UPDATABLE(ip, op, anchor)&ip, &op, &anchor &ip, &op, &anchor
231
232
233/**************************************
234* Init
235**************************************/
236static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
237{
238 MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable))memset(((hc4->hashTable)),((0)),((sizeof(hc4->hashTable
))))
;
239 MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable))memset(((hc4->chainTable)),((0xFF)),((sizeof(hc4->chainTable
))))
;
240}
241
242static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
243{
244 size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart);
245 size_t newStartingOffset = bufferSize + hc4->dictLimit;
246 DEBUGLOG(5, "LZ4HC_init_internal"){};
247 assert(newStartingOffset >= bufferSize)((void)0); /* check overflow */
248 if (newStartingOffset > 1 GB*(1U<<30)) {
249 LZ4HC_clearTables(hc4);
250 newStartingOffset = 0;
251 }
252 newStartingOffset += 64 KB*(1 <<10);
253 hc4->nextToUpdate = (U32)newStartingOffset;
254 hc4->prefixStart = start;
255 hc4->end = start;
256 hc4->dictStart = start;
257 hc4->dictLimit = (U32)newStartingOffset;
258 hc4->lowLimit = (U32)newStartingOffset;
259}
260
261
262/**************************************
263* Encode
264**************************************/
265/* LZ4HC_encodeSequence() :
266 * @return : 0 if ok,
267 * 1 if buffer issue detected */
268LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) int LZ4HC_encodeSequence (
269 const BYTE** _ip,
270 BYTE** _op,
271 const BYTE** _anchor,
272 int matchLength,
273 int offset,
274 limitedOutput_directive limit,
275 BYTE* oend)
276{
277#define ip (*_ip)
278#define op (*_op)
279#define anchor (*_anchor)
280
281 size_t length;
282 BYTE* const token = op++;
283
284#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
285 static const BYTE* start = NULL((void*)0);
286 static U32 totalCost = 0;
287 U32 const pos = (start==NULL((void*)0)) ? 0 : (U32)(anchor - start);
288 U32 const ll = (U32)(ip - anchor);
289 U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
290 U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
291 U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
292 if (start==NULL((void*)0)) start = anchor; /* only works for single segment */
293 /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
294 DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5i, cost:%4u + %5u",{}
295 pos,{}
296 (U32)(ip - anchor), matchLength, offset,{}
297 cost, totalCost){};
298 totalCost += cost;
299#endif
300
301 /* Encode Literal length */
302 length = (size_t)(ip - anchor);
303 LZ4_STATIC_ASSERT(notLimited == 0){ enum { LZ4_static_assert = 1/(int)(!!(notLimited == 0)) }; };
304 /* Check output limit */
305 if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS5)) > oend)) {
306 DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",{}
307 (int)length, (int)(oend - op)){};
308 return 1;
309 }
310 if (length >= RUN_MASK((1U<<(8-4))-1)) {
311 size_t len = length - RUN_MASK((1U<<(8-4))-1);
312 *token = (RUN_MASK((1U<<(8-4))-1) << ML_BITS4);
313 for(; len >= 255 ; len -= 255) *op++ = 255;
314 *op++ = (BYTE)len;
315 } else {
316 *token = (BYTE)(length << ML_BITS4);
317 }
318
319 /* Copy Literals */
320 LZ4_wildCopy8(op, anchor, op + length);
321 op += length;
322
323 /* Encode Offset */
324 assert(offset <= LZ4_DISTANCE_MAX )((void)0);
325 assert(offset > 0)((void)0);
326 LZ4_writeLE16(op, (U16)(offset)); op += 2;
327
328 /* Encode MatchLength */
329 assert(matchLength >= MINMATCH)((void)0);
330 length = (size_t)matchLength - MINMATCH4;
331 if (limit && (op + (length / 255) + (1 + LASTLITERALS5) > oend)) {
332 DEBUGLOG(6, "Not enough room to write match length"){};
333 return 1; /* Check output limit */
334 }
335 if (length >= ML_MASK((1U<<4)-1)) {
336 *token += ML_MASK((1U<<4)-1);
337 length -= ML_MASK((1U<<4)-1);
338 for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
339 if (length >= 255) { length -= 255; *op++ = 255; }
340 *op++ = (BYTE)length;
341 } else {
342 *token += (BYTE)(length);
343 }
344
345 /* Prepare next loop */
346 ip += matchLength;
347 anchor = ip;
348
349 return 0;
350
351#undef ip
352#undef op
353#undef anchor
354}
355
356
357typedef struct {
358 int off;
359 int len;
360 int back; /* negative value */
361} LZ4HC_match_t;
362
363LZ4HC_match_t LZ4HC_searchExtDict(const BYTE* ip, U32 ipIndex,
364 const BYTE* const iLowLimit, const BYTE* const iHighLimit,
365 const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex,
366 int currentBestML, int nbAttempts)
367{
368 size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
369 U32 lDictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
370 U32 matchIndex = lDictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
371 int offset = 0, sBack = 0;
372 assert(lDictEndIndex <= 1 GB)((void)0);
373 if (lDictMatchIndex>0)
374 DEBUGLOG(7, "lDictEndIndex = %zu, lDictMatchIndex = %u", lDictEndIndex, lDictMatchIndex){};
375 while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX65535 && nbAttempts--) {
376 const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + lDictMatchIndex;
377
378 if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
379 int mlt;
380 int back = 0;
381 const BYTE* vLimit = ip + (lDictEndIndex - lDictMatchIndex);
382 if (vLimit > iHighLimit) vLimit = iHighLimit;
383 mlt = (int)LZ4_count(ip+MINMATCH4, matchPtr+MINMATCH4, vLimit) + MINMATCH4;
384 back = (ip > iLowLimit) ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
385 mlt -= back;
386 if (mlt > currentBestML) {
387 currentBestML = mlt;
388 offset = (int)(ipIndex - matchIndex);
389 sBack = back;
390 DEBUGLOG(7, "found match of length %i within extDictCtx", currentBestML){};
391 } }
392
393 { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, lDictMatchIndex)dictCtx->chainTable[(U16)(lDictMatchIndex)];
394 lDictMatchIndex -= nextOffset;
395 matchIndex -= nextOffset;
396 } }
397
398 { LZ4HC_match_t md;
399 md.len = currentBestML;
400 md.off = offset;
401 md.back = sBack;
402 return md;
403 }
404}
405
406typedef LZ4HC_match_t (*LZ4MID_searchIntoDict_f)(const BYTE* ip, U32 ipIndex,
407 const BYTE* const iHighLimit,
408 const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex);
409
410static LZ4HC_match_t LZ4MID_searchHCDict(const BYTE* ip, U32 ipIndex,
411 const BYTE* const iHighLimit,
412 const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex)
413{
414 return LZ4HC_searchExtDict(ip,ipIndex,
415 ip, iHighLimit,
416 dictCtx, gDictEndIndex,
417 MINMATCH4-1, 2);
418}
419
420static LZ4HC_match_t LZ4MID_searchExtDict(const BYTE* ip, U32 ipIndex,
421 const BYTE* const iHighLimit,
422 const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex)
423{
424 size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
425 const U32* const hash4Table = dictCtx->hashTable;
426 const U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE(1 << (15 -1));
427 DEBUGLOG(7, "LZ4MID_searchExtDict (ipIdx=%u)", ipIndex){};
428
429 /* search long match first */
430 { U32 l8DictMatchIndex = hash8Table[LZ4MID_hash8Ptr(ip)];
431 U32 m8Index = l8DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
432 assert(lDictEndIndex <= 1 GB)((void)0);
433 if (ipIndex - m8Index <= LZ4_DISTANCE_MAX65535) {
434 const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l8DictMatchIndex;
435 const size_t safeLen = MIN(lDictEndIndex - l8DictMatchIndex, (size_t)(iHighLimit - ip))( (lDictEndIndex - l8DictMatchIndex) < ((size_t)(iHighLimit
- ip)) ? (lDictEndIndex - l8DictMatchIndex) : ((size_t)(iHighLimit
- ip)) )
;
436 int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
437 if (mlt >= MINMATCH4) {
438 LZ4HC_match_t md;
439 DEBUGLOG(7, "Found long ExtDict match of len=%u", mlt){};
440 md.len = mlt;
441 md.off = (int)(ipIndex - m8Index);
442 md.back = 0;
443 return md;
444 }
445 }
446 }
447
448 /* search for short match second */
449 { U32 l4DictMatchIndex = hash4Table[LZ4MID_hash4Ptr(ip)];
450 U32 m4Index = l4DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
451 if (ipIndex - m4Index <= LZ4_DISTANCE_MAX65535) {
452 const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l4DictMatchIndex;
453 const size_t safeLen = MIN(lDictEndIndex - l4DictMatchIndex, (size_t)(iHighLimit - ip))( (lDictEndIndex - l4DictMatchIndex) < ((size_t)(iHighLimit
- ip)) ? (lDictEndIndex - l4DictMatchIndex) : ((size_t)(iHighLimit
- ip)) )
;
454 int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
455 if (mlt >= MINMATCH4) {
456 LZ4HC_match_t md;
457 DEBUGLOG(7, "Found short ExtDict match of len=%u", mlt){};
458 md.len = mlt;
459 md.off = (int)(ipIndex - m4Index);
460 md.back = 0;
461 return md;
462 }
463 }
464 }
465
466 /* nothing found */
467 { LZ4HC_match_t const md = {0, 0, 0 };
468 return md;
469 }
470}
471
472/**************************************
473* Mid Compression (level 2)
474**************************************/
475
476LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) void
477LZ4MID_addPosition(U32* hTable, U32 hValue, U32 index)
478{
479 hTable[hValue] = index;
480}
481
482#define ADDPOS8(_p, _idx)LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(_p), _idx) LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(_p), _idx)
483#define ADDPOS4(_p, _idx)LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(_p), _idx) LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(_p), _idx)
484
485/* Fill hash tables with references into dictionary.
486 * The resulting table is only exploitable by LZ4MID (level 2) */
487static void
488LZ4MID_fillHTable (LZ4HC_CCtx_internal* cctx, const void* dict, size_t size)
489{
490 U32* const hash4Table = cctx->hashTable;
491 U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE(1 << (15 -1));
492 const BYTE* const prefixPtr = (const BYTE*)dict;
493 U32 const prefixIdx = cctx->dictLimit;
494 U32 const target = prefixIdx + (U32)size - LZ4MID_HASHSIZE8;
495 U32 idx = cctx->nextToUpdate;
496 assert(dict == cctx->prefixStart)((void)0);
497 DEBUGLOG(4, "LZ4MID_fillHTable (size:%zu)", size){};
498 if (size <= LZ4MID_HASHSIZE8)
499 return;
500
501 for (; idx < target; idx += 3) {
502 ADDPOS4(prefixPtr+idx-prefixIdx, idx)LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(prefixPtr+idx-
prefixIdx), idx)
;
503 ADDPOS8(prefixPtr+idx+1-prefixIdx, idx+1)LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(prefixPtr+idx+
1-prefixIdx), idx+1)
;
504 }
505
506 idx = (size > 32 KB*(1 <<10) + LZ4MID_HASHSIZE8) ? target - 32 KB*(1 <<10) : cctx->nextToUpdate;
507 for (; idx < target; idx += 1) {
508 ADDPOS8(prefixPtr+idx-prefixIdx, idx)LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(prefixPtr+idx-
prefixIdx), idx)
;
509 }
510
511 cctx->nextToUpdate = target;
512}
513
514static LZ4MID_searchIntoDict_f select_searchDict_function(const LZ4HC_CCtx_internal* dictCtx)
515{
516 if (dictCtx == NULL((void*)0)) return NULL((void*)0);
517 if (LZ4HC_getCLevelParams(dictCtx->compressionLevel).strat == lz4mid)
518 return LZ4MID_searchExtDict;
519 return LZ4MID_searchHCDict;
520}
521
522static int LZ4MID_compress (
523 LZ4HC_CCtx_internal* const ctx,
524 const char* const src,
525 char* const dst,
526 int* srcSizePtr,
527 int const maxOutputSize,
528 const limitedOutput_directive limit,
529 const dictCtx_directive dict
530 )
531{
532 U32* const hash4Table = ctx->hashTable;
533 U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE(1 << (15 -1));
534 const BYTE* ip = (const BYTE*)src;
535 const BYTE* anchor = ip;
536 const BYTE* const iend = ip + *srcSizePtr;
537 const BYTE* const mflimit = iend - MFLIMIT12;
538 const BYTE* const matchlimit = (iend - LASTLITERALS5);
539 const BYTE* const ilimit = (iend - LZ4MID_HASHSIZE8);
540 BYTE* op = (BYTE*)dst;
541 BYTE* oend = op + maxOutputSize;
542
543 const BYTE* const prefixPtr = ctx->prefixStart;
544 const U32 prefixIdx = ctx->dictLimit;
545 const U32 ilimitIdx = (U32)(ilimit - prefixPtr) + prefixIdx;
546 const BYTE* const dictStart = ctx->dictStart;
547 const U32 dictIdx = ctx->lowLimit;
548 const U32 gDictEndIndex = ctx->lowLimit;
549 const LZ4MID_searchIntoDict_f searchIntoDict = (dict == usingDictCtxHc) ? select_searchDict_function(ctx->dictCtx) : NULL((void*)0);
550 unsigned matchLength;
551 unsigned matchDistance;
552
553 /* input sanitization */
554 DEBUGLOG(5, "LZ4MID_compress (%i bytes)", *srcSizePtr){};
555 if (dict == usingDictCtxHc) DEBUGLOG(5, "usingDictCtxHc"){};
556 assert(*srcSizePtr >= 0)((void)0);
557 if (*srcSizePtr) assert(src != NULL)((void)0);
558 if (maxOutputSize) assert(dst != NULL)((void)0);
559 if (*srcSizePtr < 0) return 0; /* invalid */
560 if (maxOutputSize < 0) return 0; /* invalid */
561 if (*srcSizePtr > LZ4_MAX_INPUT_SIZE0x7E000000) {
562 /* forbidden: no input is allowed to be that large */
563 return 0;
564 }
565 if (limit == fillOutput) oend -= LASTLITERALS5; /* Hack for support LZ4 format restriction */
566 if (*srcSizePtr < LZ4_minLength)
567 goto _lz4mid_last_literals; /* Input too small, no compression (all literals) */
568
569 /* main loop */
570 while (ip <= mflimit) {
571 const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
572 /* search long match */
573 { U32 const h8 = LZ4MID_hash8Ptr(ip);
574 U32 const pos8 = hash8Table[h8];
575 assert(h8 < LZ4MID_HASHTABLESIZE)((void)0);
576 assert(pos8 < ipIndex)((void)0);
577 LZ4MID_addPosition(hash8Table, h8, ipIndex);
578 if (ipIndex - pos8 <= LZ4_DISTANCE_MAX65535) {
579 /* match candidate found */
580 if (pos8 >= prefixIdx) {
581 const BYTE* const matchPtr = prefixPtr + pos8 - prefixIdx;
582 assert(matchPtr < ip)((void)0);
583 matchLength = LZ4_count(ip, matchPtr, matchlimit);
584 if (matchLength >= MINMATCH4) {
585 DEBUGLOG(7, "found long match at pos %u (len=%u)", pos8, matchLength){};
586 matchDistance = ipIndex - pos8;
587 goto _lz4mid_encode_sequence;
588 }
589 } else {
590 if (pos8 >= dictIdx) {
591 /* extDict match candidate */
592 const BYTE* const matchPtr = dictStart + (pos8 - dictIdx);
593 const size_t safeLen = MIN(prefixIdx - pos8, (size_t)(matchlimit - ip))( (prefixIdx - pos8) < ((size_t)(matchlimit - ip)) ? (prefixIdx
- pos8) : ((size_t)(matchlimit - ip)) )
;
594 matchLength = LZ4_count(ip, matchPtr, ip + safeLen);
595 if (matchLength >= MINMATCH4) {
596 DEBUGLOG(7, "found long match at ExtDict pos %u (len=%u)", pos8, matchLength){};
597 matchDistance = ipIndex - pos8;
598 goto _lz4mid_encode_sequence;
599 }
600 }
601 }
602 } }
603 /* search short match */
604 { U32 const h4 = LZ4MID_hash4Ptr(ip);
605 U32 const pos4 = hash4Table[h4];
606 assert(h4 < LZ4MID_HASHTABLESIZE)((void)0);
607 assert(pos4 < ipIndex)((void)0);
608 LZ4MID_addPosition(hash4Table, h4, ipIndex);
609 if (ipIndex - pos4 <= LZ4_DISTANCE_MAX65535) {
610 /* match candidate found */
611 if (pos4 >= prefixIdx) {
612 /* only search within prefix */
613 const BYTE* const matchPtr = prefixPtr + (pos4 - prefixIdx);
614 assert(matchPtr < ip)((void)0);
615 assert(matchPtr >= prefixPtr)((void)0);
616 matchLength = LZ4_count(ip, matchPtr, matchlimit);
617 if (matchLength >= MINMATCH4) {
618 /* short match found, let's just check ip+1 for longer */
619 U32 const h8 = LZ4MID_hash8Ptr(ip+1);
620 U32 const pos8 = hash8Table[h8];
621 U32 const m2Distance = ipIndex + 1 - pos8;
622 matchDistance = ipIndex - pos4;
623 if ( m2Distance <= LZ4_DISTANCE_MAX65535
624 && pos8 >= prefixIdx /* only search within prefix */
625 && likely(ip < mflimit)(__builtin_expect (((ip < mflimit) != 0),(1)) )
626 ) {
627 const BYTE* const m2Ptr = prefixPtr + (pos8 - prefixIdx);
628 unsigned ml2 = LZ4_count(ip+1, m2Ptr, matchlimit);
629 if (ml2 > matchLength) {
630 LZ4MID_addPosition(hash8Table, h8, ipIndex+1);
631 ip++;
632 matchLength = ml2;
633 matchDistance = m2Distance;
634 } }
635 goto _lz4mid_encode_sequence;
636 }
637 } else {
638 if (pos4 >= dictIdx) {
639 /* extDict match candidate */
640 const BYTE* const matchPtr = dictStart + (pos4 - dictIdx);
641 const size_t safeLen = MIN(prefixIdx - pos4, (size_t)(matchlimit - ip))( (prefixIdx - pos4) < ((size_t)(matchlimit - ip)) ? (prefixIdx
- pos4) : ((size_t)(matchlimit - ip)) )
;
642 matchLength = LZ4_count(ip, matchPtr, ip + safeLen);
643 if (matchLength >= MINMATCH4) {
644 DEBUGLOG(7, "found match at ExtDict pos %u (len=%u)", pos4, matchLength){};
645 matchDistance = ipIndex - pos4;
646 goto _lz4mid_encode_sequence;
647 }
648 }
649 }
650 } }
651 /* no match found in prefix */
652 if ( (dict == usingDictCtxHc)
653 && (ipIndex - gDictEndIndex < LZ4_DISTANCE_MAX65535 - 8) ) {
654 /* search a match into external dictionary */
655 LZ4HC_match_t dMatch = searchIntoDict(ip, ipIndex,
656 matchlimit,
657 ctx->dictCtx, gDictEndIndex);
658 if (dMatch.len >= MINMATCH4) {
659 DEBUGLOG(7, "found Dictionary match (offset=%i)", dMatch.off){};
660 assert(dMatch.back == 0)((void)0);
661 matchLength = (unsigned)dMatch.len;
662 matchDistance = (unsigned)dMatch.off;
663 goto _lz4mid_encode_sequence;
664 }
665 }
666 /* no match found */
667 ip += 1 + ((ip-anchor) >> 9); /* skip faster over incompressible data */
668 continue;
669
670_lz4mid_encode_sequence:
671 /* catch back */
672 while (((ip > anchor) & ((U32)(ip-prefixPtr) > matchDistance)) && (unlikely(ip[-1] == ip[-(int)matchDistance-1])(__builtin_expect (((ip[-1] == ip[-(int)matchDistance-1]) != 0
),(0)) )
)) {
673 ip--; matchLength++;
674 };
675
676 /* fill table with beginning of match */
677 ADDPOS8(ip+1, ipIndex+1)LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(ip+1), ipIndex
+1)
;
678 ADDPOS8(ip+2, ipIndex+2)LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(ip+2), ipIndex
+2)
;
679 ADDPOS4(ip+1, ipIndex+1)LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(ip+1), ipIndex
+1)
;
680
681 /* encode */
682 { BYTE* const saved_op = op;
683 /* LZ4HC_encodeSequence always updates @op; on success, it updates @ip and @anchor */
684 if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor,
685 (int)matchLength, (int)matchDistance,
686 limit, oend) ) {
687 op = saved_op; /* restore @op value before failed LZ4HC_encodeSequence */
688 goto _lz4mid_dest_overflow;
689 }
690 }
691
692 /* fill table with end of match */
693 { U32 endMatchIdx = (U32)(ip-prefixPtr) + prefixIdx;
694 U32 pos_m2 = endMatchIdx - 2;
695 if (pos_m2 < ilimitIdx) {
696 if (likely(ip - prefixPtr > 5)(__builtin_expect (((ip - prefixPtr > 5) != 0),(1)) )) {
697 ADDPOS8(ip-5, endMatchIdx - 5)LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(ip-5), endMatchIdx
- 5)
;
698 }
699 ADDPOS8(ip-3, endMatchIdx - 3)LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(ip-3), endMatchIdx
- 3)
;
700 ADDPOS8(ip-2, endMatchIdx - 2)LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(ip-2), endMatchIdx
- 2)
;
701 ADDPOS4(ip-2, endMatchIdx - 2)LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(ip-2), endMatchIdx
- 2)
;
702 ADDPOS4(ip-1, endMatchIdx - 1)LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(ip-1), endMatchIdx
- 1)
;
703 }
704 }
705 }
706
707_lz4mid_last_literals:
708 /* Encode Last Literals */
709 { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
710 size_t llAdd = (lastRunSize + 255 - RUN_MASK((1U<<(8-4))-1)) / 255;
711 size_t const totalSize = 1 + llAdd + lastRunSize;
712 if (limit == fillOutput) oend += LASTLITERALS5; /* restore correct value */
713 if (limit && (op + totalSize > oend)) {
714 if (limit == limitedOutput) return 0; /* not enough space in @dst */
715 /* adapt lastRunSize to fill 'dest' */
716 lastRunSize = (size_t)(oend - op) - 1 /*token*/;
717 llAdd = (lastRunSize + 256 - RUN_MASK((1U<<(8-4))-1)) / 256;
718 lastRunSize -= llAdd;
719 }
720 DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize){};
721 ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
722
723 if (lastRunSize >= RUN_MASK((1U<<(8-4))-1)) {
724 size_t accumulator = lastRunSize - RUN_MASK((1U<<(8-4))-1);
725 *op++ = (RUN_MASK((1U<<(8-4))-1) << ML_BITS4);
726 for(; accumulator >= 255 ; accumulator -= 255)
727 *op++ = 255;
728 *op++ = (BYTE) accumulator;
729 } else {
730 *op++ = (BYTE)(lastRunSize << ML_BITS4);
731 }
732 assert(lastRunSize <= (size_t)(oend - op))((void)0);
733 LZ4_memcpy(op, anchor, lastRunSize)__builtin_memcpy(op, anchor, lastRunSize);
734 op += lastRunSize;
735 }
736
737 /* End */
738 DEBUGLOG(5, "compressed %i bytes into %i bytes", *srcSizePtr, (int)((char*)op - dst)){};
739 assert(ip >= (const BYTE*)src)((void)0);
740 assert(ip <= iend)((void)0);
741 *srcSizePtr = (int)(ip - (const BYTE*)src);
742 assert((char*)op >= dst)((void)0);
743 assert(op <= oend)((void)0);
744 assert((char*)op - dst < INT_MAX)((void)0);
745 return (int)((char*)op - dst);
746
747_lz4mid_dest_overflow:
748 if (limit == fillOutput) {
749 /* Assumption : @ip, @anchor, @optr and @matchLength must be set correctly */
750 size_t const ll = (size_t)(ip - anchor);
751 size_t const ll_addbytes = (ll + 240) / 255;
752 size_t const ll_totalCost = 1 + ll_addbytes + ll;
753 BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
754 DEBUGLOG(6, "Last sequence is overflowing : %u literals, %u remaining space",{}
755 (unsigned)ll, (unsigned)(oend-op)){};
756 if (op + ll_totalCost <= maxLitPos) {
757 /* ll validated; now adjust match length */
758 size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
759 size_t const maxMlSize = MINMATCH4 + (ML_MASK((1U<<4)-1)-1) + (bytesLeftForMl * 255);
760 assert(maxMlSize < INT_MAX)((void)0);
761 if ((size_t)matchLength > maxMlSize) matchLength= (unsigned)maxMlSize;
762 if ((oend + LASTLITERALS5) - (op + ll_totalCost + 2) - 1 + matchLength >= MFLIMIT12) {
763 DEBUGLOG(6, "Let's encode a last sequence (ll=%u, ml=%u)", (unsigned)ll, matchLength){};
764 LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor,
765 (int)matchLength, (int)matchDistance,
766 notLimited, oend);
767 } }
768 DEBUGLOG(6, "Let's finish with a run of literals (%u bytes left)", (unsigned)(oend-op)){};
769 goto _lz4mid_last_literals;
770 }
771 /* compression failed */
772 return 0;
773}
774
775
776/**************************************
777* HC Compression - Search
778**************************************/
779
780/* Update chains up to ip (excluded) */
781LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
782{
783 U16* const chainTable = hc4->chainTable;
784 U32* const hashTable = hc4->hashTable;
785 const BYTE* const prefixPtr = hc4->prefixStart;
786 U32 const prefixIdx = hc4->dictLimit;
787 U32 const target = (U32)(ip - prefixPtr) + prefixIdx;
788 U32 idx = hc4->nextToUpdate;
789 assert(ip >= prefixPtr)((void)0);
790 assert(target >= prefixIdx)((void)0);
791
792 while (idx < target) {
793 U32 const h = LZ4HC_hashPtr(prefixPtr+idx-prefixIdx);
794 size_t delta = idx - hashTable[h];
795 if (delta>LZ4_DISTANCE_MAX65535) delta = LZ4_DISTANCE_MAX65535;
796 DELTANEXTU16(chainTable, idx)chainTable[(U16)(idx)] = (U16)delta;
797 hashTable[h] = idx;
798 idx++;
799 }
800
801 hc4->nextToUpdate = target;
802}
803
804#if defined(_MSC_VER)
805# define LZ4HC_rotl32(x,r)((x << r) | (x >> (32 - r))) _rotl(x,r)
806#else
807# define LZ4HC_rotl32(x,r)((x << r) | (x >> (32 - r))) ((x << r) | (x >> (32 - r)))
808#endif
809
810
811static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
812{
813 size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
814 if (bitsToRotate == 0) return pattern;
815 return LZ4HC_rotl32(pattern, (int)bitsToRotate)((pattern << (int)bitsToRotate) | (pattern >> (32
- (int)bitsToRotate)))
;
816}
817
818/* LZ4HC_countPattern() :
819 * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
820static unsigned
821LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
822{
823 const BYTE* const iStart = ip;
824 reg_t const pattern = (sizeof(pattern)==8) ?
825 (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
826
827 while (likely(ip < iEnd-(sizeof(pattern)-1))(__builtin_expect (((ip < iEnd-(sizeof(pattern)-1)) != 0),
(1)) )
) {
828 reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
829 if (!diff) { ip+=sizeof(pattern); continue; }
830 ip += LZ4_NbCommonBytes(diff);
831 return (unsigned)(ip - iStart);
832 }
833
834 if (LZ4_isLittleEndian()) {
835 reg_t patternByte = pattern;
836 while ((ip<iEnd) && (*ip == (BYTE)patternByte)) {
837 ip++; patternByte >>= 8;
838 }
839 } else { /* big endian */
840 U32 bitOffset = (sizeof(pattern)*8) - 8;
841 while (ip < iEnd) {
842 BYTE const byte = (BYTE)(pattern >> bitOffset);
843 if (*ip != byte) break;
844 ip ++; bitOffset -= 8;
845 } }
846
847 return (unsigned)(ip - iStart);
848}
849
850/* LZ4HC_reverseCountPattern() :
851 * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
852 * read using natural platform endianness */
853static unsigned
854LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
855{
856 const BYTE* const iStart = ip;
857
858 while (likely(ip >= iLow+4)(__builtin_expect (((ip >= iLow+4) != 0),(1)) )) {
859 if (LZ4_read32(ip-4) != pattern) break;
860 ip -= 4;
861 }
862 { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianness */
863 while (likely(ip>iLow)(__builtin_expect (((ip>iLow) != 0),(1)) )) {
864 if (ip[-1] != *bytePtr) break;
865 ip--; bytePtr--;
866 } }
867 return (unsigned)(iStart - ip);
868}
869
870/* LZ4HC_protectDictEnd() :
871 * Checks if the match is in the last 3 bytes of the dictionary, so reading the
872 * 4 byte MINMATCH would overflow.
873 * @returns true if the match index is okay.
874 */
875static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
876{
877 return ((U32)((dictLimit - 1) - matchIndex) >= 3);
878}
879
880typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
881typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
882
883
884LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) LZ4HC_match_t
885LZ4HC_InsertAndGetWiderMatch (
886 LZ4HC_CCtx_internal* const hc4,
887 const BYTE* const ip,
888 const BYTE* const iLowLimit, const BYTE* const iHighLimit,
889 int longest,
890 const int maxNbAttempts,
891 const int patternAnalysis, const int chainSwap,
892 const dictCtx_directive dict,
893 const HCfavor_e favorDecSpeed)
894{
895 U16* const chainTable = hc4->chainTable;
896 U32* const hashTable = hc4->hashTable;
897 const LZ4HC_CCtx_internal* const dictCtx = hc4->dictCtx;
898 const BYTE* const prefixPtr = hc4->prefixStart;
899 const U32 prefixIdx = hc4->dictLimit;
900 const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
901 const int withinStartDistance = (hc4->lowLimit + (LZ4_DISTANCE_MAX65535 + 1) > ipIndex);
902 const U32 lowestMatchIndex = (withinStartDistance) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX65535;
903 const BYTE* const dictStart = hc4->dictStart;
904 const U32 dictIdx = hc4->lowLimit;
905 const BYTE* const dictEnd = dictStart + prefixIdx - dictIdx;
906 int const lookBackLength = (int)(ip-iLowLimit);
907 int nbAttempts = maxNbAttempts;
908 U32 matchChainPos = 0;
909 U32 const pattern = LZ4_read32(ip);
910 U32 matchIndex;
911 repeat_state_e repeat = rep_untested;
912 size_t srcPatternLength = 0;
913 int offset = 0, sBack = 0;
914
915 DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch"){};
916 /* First Match */
917 LZ4HC_Insert(hc4, ip); /* insert all prior positions up to ip (excluded) */
918 matchIndex = hashTable[LZ4HC_hashPtr(ip)];
919 DEBUGLOG(7, "First candidate match for pos %u found at index %u / %u (lowestMatchIndex)",{}
920 ipIndex, matchIndex, lowestMatchIndex){};
921
922 while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
923 int matchLength=0;
924 nbAttempts--;
925 assert(matchIndex < ipIndex)((void)0);
926 if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
927 /* do nothing:
928 * favorDecSpeed intentionally skips matches with offset < 8 */
929 } else if (matchIndex >= prefixIdx) { /* within current Prefix */
930 const BYTE* const matchPtr = prefixPtr + (matchIndex - prefixIdx);
931 assert(matchPtr < ip)((void)0);
932 assert(longest >= 1)((void)0);
933 if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
934 if (LZ4_read32(matchPtr) == pattern) {
935 int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, prefixPtr) : 0;
936 matchLength = MINMATCH4 + (int)LZ4_count(ip+MINMATCH4, matchPtr+MINMATCH4, iHighLimit);
937 matchLength -= back;
938 if (matchLength > longest) {
939 longest = matchLength;
940 offset = (int)(ipIndex - matchIndex);
941 sBack = back;
942 DEBUGLOG(7, "Found match of len=%i within prefix, offset=%i, back=%i", longest, offset, -back){};
943 } } }
944 } else { /* lowestMatchIndex <= matchIndex < dictLimit : within Ext Dict */
945 const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx);
946 assert(matchIndex >= dictIdx)((void)0);
947 if ( likely(matchIndex <= prefixIdx - 4)(__builtin_expect (((matchIndex <= prefixIdx - 4) != 0),(1
)) )
948 && (LZ4_read32(matchPtr) == pattern) ) {
949 int back = 0;
950 const BYTE* vLimit = ip + (prefixIdx - matchIndex);
951 if (vLimit > iHighLimit) vLimit = iHighLimit;
952 matchLength = (int)LZ4_count(ip+MINMATCH4, matchPtr+MINMATCH4, vLimit) + MINMATCH4;
953 if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
954 matchLength += LZ4_count(ip+matchLength, prefixPtr, iHighLimit);
955 back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
956 matchLength -= back;
957 if (matchLength > longest) {
958 longest = matchLength;
959 offset = (int)(ipIndex - matchIndex);
960 sBack = back;
961 DEBUGLOG(7, "Found match of len=%i within dict, offset=%i, back=%i", longest, offset, -back){};
962 } } }
963
964 if (chainSwap && matchLength==longest) { /* better match => select a better chain */
965 assert(lookBackLength==0)((void)0); /* search forward only */
966 if (matchIndex + (U32)longest <= ipIndex) {
967 int const kTrigger = 4;
968 U32 distanceToNextMatch = 1;
969 int const end = longest - MINMATCH4 + 1;
970 int step = 1;
971 int accel = 1 << kTrigger;
972 int pos;
973 for (pos = 0; pos < end; pos += step) {
974 U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos)chainTable[(U16)(matchIndex + (U32)pos)];
975 step = (accel++ >> kTrigger);
976 if (candidateDist > distanceToNextMatch) {
977 distanceToNextMatch = candidateDist;
978 matchChainPos = (U32)pos;
979 accel = 1 << kTrigger;
980 } }
981 if (distanceToNextMatch > 1) {
982 if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
983 matchIndex -= distanceToNextMatch;
984 continue;
985 } } }
986
987 { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex)chainTable[(U16)(matchIndex)];
988 if (patternAnalysis && distNextMatch==1 && matchChainPos==0) {
989 U32 const matchCandidateIdx = matchIndex-1;
990 /* may be a repeated pattern */
991 if (repeat == rep_untested) {
992 if ( ((pattern & 0xFFFF) == (pattern >> 16))
993 & ((pattern & 0xFF) == (pattern >> 24)) ) {
994 DEBUGLOG(7, "Repeat pattern detected, char %02X", pattern >> 24){};
995 repeat = rep_confirmed;
996 srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
997 } else {
998 repeat = rep_not;
999 } }
1000 if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
1001 && LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) {
1002 const int extDict = matchCandidateIdx < prefixIdx;
1003 const BYTE* const matchPtr = extDict ? dictStart + (matchCandidateIdx - dictIdx) : prefixPtr + (matchCandidateIdx - prefixIdx);
1004 if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
1005 const BYTE* const iLimit = extDict ? dictEnd : iHighLimit;
1006 size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
1007 if (extDict && matchPtr + forwardPatternLength == iLimit) {
1008 U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
1009 forwardPatternLength += LZ4HC_countPattern(prefixPtr, iHighLimit, rotatedPattern);
1010 }
1011 { const BYTE* const lowestMatchPtr = extDict ? dictStart : prefixPtr;
1012 size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
1013 size_t currentSegmentLength;
1014 if (!extDict
1015 && matchPtr - backLength == prefixPtr
1016 && dictIdx < prefixIdx) {
1017 U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
1018 backLength += LZ4HC_reverseCountPattern(dictEnd, dictStart, rotatedPattern);
1019 }
1020 /* Limit backLength not go further than lowestMatchIndex */
1021 backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex)( (matchCandidateIdx - (U32)backLength) > (lowestMatchIndex
) ? (matchCandidateIdx - (U32)backLength) : (lowestMatchIndex
) )
;
1022 assert(matchCandidateIdx - backLength >= lowestMatchIndex)((void)0);
1023 currentSegmentLength = backLength + forwardPatternLength;
1024 /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
1025 if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
1026 && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
1027 U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
1028 if (LZ4HC_protectDictEnd(prefixIdx, newMatchIndex))
1029 matchIndex = newMatchIndex;
1030 else {
1031 /* Can only happen if started in the prefix */
1032 assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict)((void)0);
1033 matchIndex = prefixIdx;
1034 }
1035 } else {
1036 U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
1037 if (!LZ4HC_protectDictEnd(prefixIdx, newMatchIndex)) {
1038 assert(newMatchIndex >= prefixIdx - 3 && newMatchIndex < prefixIdx && !extDict)((void)0);
1039 matchIndex = prefixIdx;
1040 } else {
1041 matchIndex = newMatchIndex;
1042 if (lookBackLength==0) { /* no back possible */
1043 size_t const maxML = MIN(currentSegmentLength, srcPatternLength)( (currentSegmentLength) < (srcPatternLength) ? (currentSegmentLength
) : (srcPatternLength) )
;
1044 if ((size_t)longest < maxML) {
1045 assert(prefixPtr - prefixIdx + matchIndex != ip)((void)0);
1046 if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX65535) break;
1047 assert(maxML < 2 GB)((void)0);
1048 longest = (int)maxML;
1049 offset = (int)(ipIndex - matchIndex);
1050 assert(sBack == 0)((void)0);
1051 DEBUGLOG(7, "Found repeat pattern match of len=%i, offset=%i", longest, offset){};
1052 }
1053 { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex)chainTable[(U16)(matchIndex)];
1054 if (distToNextPattern > matchIndex) break; /* avoid overflow */
1055 matchIndex -= distToNextPattern;
1056 } } } } }
1057 continue;
1058 } }
1059 } } /* PA optimization */
1060
1061 /* follow current chain */
1062 matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos)chainTable[(U16)(matchIndex + matchChainPos)];
1063
1064 } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
1065
1066 if ( dict == usingDictCtxHc
1067 && nbAttempts > 0
1068 && withinStartDistance) {
1069 size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
1070 U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
1071 assert(dictEndOffset <= 1 GB)((void)0);
1072 matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
1073 if (dictMatchIndex>0) DEBUGLOG(7, "dictEndOffset = %zu, dictMatchIndex = %u => relative matchIndex = %i", dictEndOffset, dictMatchIndex, (int)dictMatchIndex - (int)dictEndOffset){};
1074 while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX65535 && nbAttempts--) {
1075 const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex;
1076
1077 if (LZ4_read32(matchPtr) == pattern) {
1078 int mlt;
1079 int back = 0;
1080 const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
1081 if (vLimit > iHighLimit) vLimit = iHighLimit;
1082 mlt = (int)LZ4_count(ip+MINMATCH4, matchPtr+MINMATCH4, vLimit) + MINMATCH4;
1083 back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
1084 mlt -= back;
1085 if (mlt > longest) {
1086 longest = mlt;
1087 offset = (int)(ipIndex - matchIndex);
1088 sBack = back;
1089 DEBUGLOG(7, "found match of length %i within extDictCtx", longest){};
1090 } }
1091
1092 { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex)dictCtx->chainTable[(U16)(dictMatchIndex)];
1093 dictMatchIndex -= nextOffset;
1094 matchIndex -= nextOffset;
1095 } } }
1096
1097 { LZ4HC_match_t md;
1098 assert(longest >= 0)((void)0);
1099 md.len = longest;
1100 md.off = offset;
1101 md.back = sBack;
1102 return md;
1103 }
1104}
1105
1106LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) LZ4HC_match_t
1107LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
1108 const BYTE* const ip, const BYTE* const iLimit,
1109 const int maxNbAttempts,
1110 const int patternAnalysis,
1111 const dictCtx_directive dict)
1112{
1113 DEBUGLOG(7, "LZ4HC_InsertAndFindBestMatch"){};
1114 /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
1115 * but this won't be the case here, as we define iLowLimit==ip,
1116 * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
1117 return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH4-1, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
1118}
1119
1120
1121LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) int LZ4HC_compress_hashChain (
1122 LZ4HC_CCtx_internal* const ctx,
1123 const char* const source,
1124 char* const dest,
1125 int* srcSizePtr,
1126 int const maxOutputSize,
1127 int maxNbAttempts,
1128 const limitedOutput_directive limit,
1129 const dictCtx_directive dict
1130 )
1131{
1132 const int inputSize = *srcSizePtr;
1133 const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */
1134
1135 const BYTE* ip = (const BYTE*) source;
1136 const BYTE* anchor = ip;
1137 const BYTE* const iend = ip + inputSize;
1138 const BYTE* const mflimit = iend - MFLIMIT12;
1139 const BYTE* const matchlimit = (iend - LASTLITERALS5);
1140
1141 BYTE* optr = (BYTE*) dest;
1142 BYTE* op = (BYTE*) dest;
1143 BYTE* oend = op + maxOutputSize;
1144
1145 const BYTE* start0;
1146 const BYTE* start2 = NULL((void*)0);
1147 const BYTE* start3 = NULL((void*)0);
1148 LZ4HC_match_t m0, m1, m2, m3;
1149 const LZ4HC_match_t nomatch = {0, 0, 0};
1150
1151 /* init */
1152 DEBUGLOG(5, "LZ4HC_compress_hashChain (dict?=>%i)", dict){};
1153 *srcSizePtr = 0;
1154 if (limit == fillOutput) oend -= LASTLITERALS5; /* Hack for support LZ4 format restriction */
1155 if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
1156
1157 /* Main Loop */
1158 while (ip <= mflimit) {
1159 m1 = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, maxNbAttempts, patternAnalysis, dict);
1160 if (m1.len<MINMATCH4) { ip++; continue; }
1161
1162 /* saved, in case we would skip too much */
1163 start0 = ip; m0 = m1;
1164
1165_Search2:
1166 DEBUGLOG(7, "_Search2 (currently found match of size %i)", m1.len){};
1167 if (ip+m1.len <= mflimit) {
1168 start2 = ip + m1.len - 2;
1169 m2 = LZ4HC_InsertAndGetWiderMatch(ctx,
1170 start2, ip + 0, matchlimit, m1.len,
1171 maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
1172 start2 += m2.back;
1173 } else {
1174 m2 = nomatch; /* do not search further */
1175 }
1176
1177 if (m2.len <= m1.len) { /* No better match => encode ML1 immediately */
1178 optr = op;
1179 if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor,
1180 m1.len, m1.off,
1181 limit, oend) )
1182 goto _dest_overflow;
1183 continue;
1184 }
1185
1186 if (start0 < ip) { /* first match was skipped at least once */
1187 if (start2 < ip + m0.len) { /* squeezing ML1 between ML0(original ML1) and ML2 */
1188 ip = start0; m1 = m0; /* restore initial Match1 */
1189 } }
1190
1191 /* Here, start0==ip */
1192 if ((start2 - ip) < 3) { /* First Match too small : removed */
1193 ip = start2;
1194 m1 = m2;
1195 goto _Search2;
1196 }
1197
1198_Search3:
1199 if ((start2 - ip) < OPTIMAL_ML(int)((((1U<<4)-1)-1)+4)) {
1200 int correction;
1201 int new_ml = m1.len;
1202 if (new_ml > OPTIMAL_ML(int)((((1U<<4)-1)-1)+4)) new_ml = OPTIMAL_ML(int)((((1U<<4)-1)-1)+4);
1203 if (ip+new_ml > start2 + m2.len - MINMATCH4)
1204 new_ml = (int)(start2 - ip) + m2.len - MINMATCH4;
1205 correction = new_ml - (int)(start2 - ip);
1206 if (correction > 0) {
1207 start2 += correction;
1208 m2.len -= correction;
1209 }
1210 }
1211
1212 if (start2 + m2.len <= mflimit) {
1213 start3 = start2 + m2.len - 3;
1214 m3 = LZ4HC_InsertAndGetWiderMatch(ctx,
1215 start3, start2, matchlimit, m2.len,
1216 maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
1217 start3 += m3.back;
1218 } else {
1219 m3 = nomatch; /* do not search further */
1220 }
1221
1222 if (m3.len <= m2.len) { /* No better match => encode ML1 and ML2 */
1223 /* ip & ref are known; Now for ml */
1224 if (start2 < ip+m1.len) m1.len = (int)(start2 - ip);
1225 /* Now, encode 2 sequences */
1226 optr = op;
1227 if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor,
1228 m1.len, m1.off,
1229 limit, oend) )
1230 goto _dest_overflow;
1231 ip = start2;
1232 optr = op;
1233 if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor,
1234 m2.len, m2.off,
1235 limit, oend) ) {
1236 m1 = m2;
1237 goto _dest_overflow;
1238 }
1239 continue;
1240 }
1241
1242 if (start3 < ip+m1.len+3) { /* Not enough space for match 2 : remove it */
1243 if (start3 >= (ip+m1.len)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
1244 if (start2 < ip+m1.len) {
1245 int correction = (int)(ip+m1.len - start2);
1246 start2 += correction;
1247 m2.len -= correction;
1248 if (m2.len < MINMATCH4) {
1249 start2 = start3;
1250 m2 = m3;
1251 }
1252 }
1253
1254 optr = op;
1255 if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor,
1256 m1.len, m1.off,
1257 limit, oend) )
1258 goto _dest_overflow;
1259 ip = start3;
1260 m1 = m3;
1261
1262 start0 = start2;
1263 m0 = m2;
1264 goto _Search2;
1265 }
1266
1267 start2 = start3;
1268 m2 = m3;
1269 goto _Search3;
1270 }
1271
1272 /*
1273 * OK, now we have 3 ascending matches;
1274 * let's write the first one ML1.
1275 * ip & ref are known; Now decide ml.
1276 */
1277 if (start2 < ip+m1.len) {
1278 if ((start2 - ip) < OPTIMAL_ML(int)((((1U<<4)-1)-1)+4)) {
1279 int correction;
1280 if (m1.len > OPTIMAL_ML(int)((((1U<<4)-1)-1)+4)) m1.len = OPTIMAL_ML(int)((((1U<<4)-1)-1)+4);
1281 if (ip + m1.len > start2 + m2.len - MINMATCH4)
1282 m1.len = (int)(start2 - ip) + m2.len - MINMATCH4;
1283 correction = m1.len - (int)(start2 - ip);
1284 if (correction > 0) {
1285 start2 += correction;
1286 m2.len -= correction;
1287 }
1288 } else {
1289 m1.len = (int)(start2 - ip);
1290 }
1291 }
1292 optr = op;
1293 if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor,
1294 m1.len, m1.off,
1295 limit, oend) )
1296 goto _dest_overflow;
1297
1298 /* ML2 becomes ML1 */
1299 ip = start2; m1 = m2;
1300
1301 /* ML3 becomes ML2 */
1302 start2 = start3; m2 = m3;
1303
1304 /* let's find a new ML3 */
1305 goto _Search3;
1306 }
1307
1308_last_literals:
1309 /* Encode Last Literals */
1310 { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
1311 size_t llAdd = (lastRunSize + 255 - RUN_MASK((1U<<(8-4))-1)) / 255;
1312 size_t const totalSize = 1 + llAdd + lastRunSize;
1313 if (limit == fillOutput) oend += LASTLITERALS5; /* restore correct value */
1314 if (limit && (op + totalSize > oend)) {
1315 if (limit == limitedOutput) return 0;
1316 /* adapt lastRunSize to fill 'dest' */
1317 lastRunSize = (size_t)(oend - op) - 1 /*token*/;
1318 llAdd = (lastRunSize + 256 - RUN_MASK((1U<<(8-4))-1)) / 256;
1319 lastRunSize -= llAdd;
1320 }
1321 DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize){};
1322 ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
1323
1324 if (lastRunSize >= RUN_MASK((1U<<(8-4))-1)) {
1325 size_t accumulator = lastRunSize - RUN_MASK((1U<<(8-4))-1);
1326 *op++ = (RUN_MASK((1U<<(8-4))-1) << ML_BITS4);
1327 for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
1328 *op++ = (BYTE) accumulator;
1329 } else {
1330 *op++ = (BYTE)(lastRunSize << ML_BITS4);
1331 }
1332 LZ4_memcpy(op, anchor, lastRunSize)__builtin_memcpy(op, anchor, lastRunSize);
1333 op += lastRunSize;
1334 }
1335
1336 /* End */
1337 *srcSizePtr = (int) (((const char*)ip) - source);
1338 return (int) (((char*)op)-dest);
1339
1340_dest_overflow:
1341 if (limit == fillOutput) {
1342 /* Assumption : @ip, @anchor, @optr and @m1 must be set correctly */
1343 size_t const ll = (size_t)(ip - anchor);
1344 size_t const ll_addbytes = (ll + 240) / 255;
1345 size_t const ll_totalCost = 1 + ll_addbytes + ll;
1346 BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
1347 DEBUGLOG(6, "Last sequence overflowing"){};
1348 op = optr; /* restore correct out pointer */
1349 if (op + ll_totalCost <= maxLitPos) {
1350 /* ll validated; now adjust match length */
1351 size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
1352 size_t const maxMlSize = MINMATCH4 + (ML_MASK((1U<<4)-1)-1) + (bytesLeftForMl * 255);
1353 assert(maxMlSize < INT_MAX)((void)0); assert(m1.len >= 0)((void)0);
1354 if ((size_t)m1.len > maxMlSize) m1.len = (int)maxMlSize;
1355 if ((oend + LASTLITERALS5) - (op + ll_totalCost + 2) - 1 + m1.len >= MFLIMIT12) {
1356 LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor, m1.len, m1.off, notLimited, oend);
1357 } }
1358 goto _last_literals;
1359 }
1360 /* compression failed */
1361 return 0;
1362}
1363
1364
1365static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
1366 const char* const source, char* dst,
1367 int* srcSizePtr, int dstCapacity,
1368 int const nbSearches, size_t sufficient_len,
1369 const limitedOutput_directive limit, int const fullUpdate,
1370 const dictCtx_directive dict,
1371 const HCfavor_e favorDecSpeed);
1372
1373LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) int
1374LZ4HC_compress_generic_internal (
1375 LZ4HC_CCtx_internal* const ctx,
1376 const char* const src,
1377 char* const dst,
1378 int* const srcSizePtr,
1379 int const dstCapacity,
1380 int cLevel,
1381 const limitedOutput_directive limit,
1382 const dictCtx_directive dict
1383 )
1384{
1385 DEBUGLOG(5, "LZ4HC_compress_generic_internal(src=%p, srcSize=%d)",{}
1386 src, *srcSizePtr){};
1387
1388 if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
1389 if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE0x7E000000) return 0; /* Unsupported input size (too large or negative) */
1390
1391 ctx->end += *srcSizePtr;
1392 { cParams_t const cParam = LZ4HC_getCLevelParams(cLevel);
1393 HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
1394 int result;
1395
1396 if (cParam.strat == lz4mid) {
1397 result = LZ4MID_compress(ctx,
1398 src, dst, srcSizePtr, dstCapacity,
1399 limit, dict);
1400 } else if (cParam.strat == lz4hc) {
1401 result = LZ4HC_compress_hashChain(ctx,
1402 src, dst, srcSizePtr, dstCapacity,
1403 cParam.nbSearches, limit, dict);
1404 } else {
1405 assert(cParam.strat == lz4opt)((void)0);
1406 result = LZ4HC_compress_optimal(ctx,
1407 src, dst, srcSizePtr, dstCapacity,
1408 cParam.nbSearches, cParam.targetLength, limit,
1409 cLevel >= LZ4HC_CLEVEL_MAX12, /* ultra mode */
1410 dict, favor);
1411 }
1412 if (result <= 0) ctx->dirty = 1;
1413 return result;
1414 }
1415}
1416
1417static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
1418
1419static int
1420LZ4HC_compress_generic_noDictCtx (
1421 LZ4HC_CCtx_internal* const ctx,
1422 const char* const src,
1423 char* const dst,
1424 int* const srcSizePtr,
1425 int const dstCapacity,
1426 int cLevel,
1427 limitedOutput_directive limit
1428 )
1429{
1430 assert(ctx->dictCtx == NULL)((void)0);
1431 return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
1432}
1433
1434static int isStateCompatible(const LZ4HC_CCtx_internal* ctx1, const LZ4HC_CCtx_internal* ctx2)
1435{
1436 int const isMid1 = LZ4HC_getCLevelParams(ctx1->compressionLevel).strat == lz4mid;
1437 int const isMid2 = LZ4HC_getCLevelParams(ctx2->compressionLevel).strat == lz4mid;
1438 return !(isMid1 ^ isMid2);
1439}
1440
1441static int
1442LZ4HC_compress_generic_dictCtx (
1443 LZ4HC_CCtx_internal* const ctx,
1444 const char* const src,
1445 char* const dst,
1446 int* const srcSizePtr,
1447 int const dstCapacity,
1448 int cLevel,
1449 limitedOutput_directive limit
1450 )
1451{
1452 const size_t position = (size_t)(ctx->end - ctx->prefixStart) + (ctx->dictLimit - ctx->lowLimit);
1453 assert(ctx->dictCtx != NULL)((void)0);
1454 if (position >= 64 KB*(1 <<10)) {
1455 ctx->dictCtx = NULL((void*)0);
1456 return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
1457 } else if (position == 0 && *srcSizePtr > 4 KB*(1 <<10) && isStateCompatible(ctx, ctx->dictCtx)) {
1458 LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal))__builtin_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal
))
;
1459 LZ4HC_setExternalDict(ctx, (const BYTE *)src);
1460 ctx->compressionLevel = (short)cLevel;
1461 return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
1462 } else {
1463 return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc);
1464 }
1465}
1466
1467static int
1468LZ4HC_compress_generic (
1469 LZ4HC_CCtx_internal* const ctx,
1470 const char* const src,
1471 char* const dst,
1472 int* const srcSizePtr,
1473 int const dstCapacity,
1474 int cLevel,
1475 limitedOutput_directive limit
1476 )
1477{
1478 if (ctx->dictCtx == NULL((void*)0)) {
1479 return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
1480 } else {
1481 return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
1482 }
1483}
1484
1485
1486int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
1487
1488static size_t LZ4_streamHC_t_alignment(void)
1489{
1490#if LZ4_ALIGN_TEST1
1491 typedef struct { char c; LZ4_streamHC_t t; } t_a;
1492 return sizeof(t_a) - sizeof(LZ4_streamHC_t);
1493#else
1494 return 1; /* effectively disabled */
1495#endif
1496}
1497
1498/* state is presumed correctly initialized,
1499 * in which case its size and alignment have already been validate */
1500int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
1501{
1502 LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
1503 if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
1504 LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
1505 LZ4HC_init_internal (ctx, (const BYTE*)src);
1506 if (dstCapacity < LZ4_compressBound(srcSize))
1507 return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
1508 else
1509 return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
1510}
1511
1512int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
1513{
1514 LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
1515 if (ctx==NULL((void*)0)) return 0; /* init failure */
1516 return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
1517}
1518
1519int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
1520{
1521 int cSize;
1522#if defined(LZ4HC_HEAPMODE1) && LZ4HC_HEAPMODE1==1
1523 LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t))malloc(sizeof(LZ4_streamHC_t));
1524 if (statePtr==NULL((void*)0)) return 0;
1525#else
1526 LZ4_streamHC_t state;
1527 LZ4_streamHC_t* const statePtr = &state;
1528#endif
1529 DEBUGLOG(5, "LZ4_compress_HC"){}
1530 cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
1531#if defined(LZ4HC_HEAPMODE1) && LZ4HC_HEAPMODE1==1
1532 FREEMEM(statePtr)free(statePtr);
1533#endif
1534 return cSize;
1535}
1536
1537/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
1538int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
1539{
1540 LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
1541 if (ctx==NULL((void*)0)) return 0; /* init failure */
1542 LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source);
1543 LZ4_setCompressionLevel(ctx, cLevel);
1544 return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput);
1545}
1546
1547
1548
1549/**************************************
1550* Streaming Functions
1551**************************************/
1552/* allocation */
1553#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1554LZ4_streamHC_t* LZ4_createStreamHC(void)
1555{
1556 LZ4_streamHC_t* const state =
1557 (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t))calloc(1,sizeof(LZ4_streamHC_t));
1558 if (state == NULL((void*)0)) return NULL((void*)0);
1559 LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT9);
1560 return state;
1561}
1562
1563int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
1564{
1565 DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr){};
1566 if (!LZ4_streamHCPtr) return 0; /* support free on NULL */
1567 FREEMEM(LZ4_streamHCPtr)free(LZ4_streamHCPtr);
1568 return 0;
1569}
1570#endif
1571
1572
1573LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
1574{
1575 LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
1576 DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size){};
1577 /* check conditions */
1578 if (buffer == NULL((void*)0)) return NULL((void*)0);
1579 if (size < sizeof(LZ4_streamHC_t)) return NULL((void*)0);
1580 if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL((void*)0);
1581 /* init */
1582 { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
1583 MEM_INIT(hcstate, 0, sizeof(*hcstate))memset(((hcstate)),((0)),((sizeof(*hcstate)))); }
1584 LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT9);
1585 return LZ4_streamHCPtr;
1586}
1587
1588/* just a stub */
1589void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
1590{
1591 LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
1592 LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
1593}
1594
1595void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
1596{
1597 LZ4HC_CCtx_internal* const s = &LZ4_streamHCPtr->internal_donotuse;
1598 DEBUGLOG(5, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel){};
1599 if (s->dirty) {
1600 LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
1601 } else {
1602 assert(s->end >= s->prefixStart)((void)0);
1603 s->dictLimit += (U32)(s->end - s->prefixStart);
1604 s->prefixStart = NULL((void*)0);
1605 s->end = NULL((void*)0);
1606 s->dictCtx = NULL((void*)0);
1607 }
1608 LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
1609}
1610
1611void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
1612{
1613 DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel){};
1614 if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT9;
1615 if (compressionLevel > LZ4HC_CLEVEL_MAX12) compressionLevel = LZ4HC_CLEVEL_MAX12;
1616 LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
1617}
1618
1619void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
1620{
1621 LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0);
1622}
1623
1624/* LZ4_loadDictHC() :
1625 * LZ4_streamHCPtr is presumed properly initialized */
1626int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
1627 const char* dictionary, int dictSize)
1628{
1629 LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
1630 cParams_t cp;
1631 DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d, clevel=%d)", LZ4_streamHCPtr, dictionary, dictSize, ctxPtr->compressionLevel){};
1632 assert(dictSize >= 0)((void)0);
1633 assert(LZ4_streamHCPtr != NULL)((void)0);
1634 if (dictSize > 64 KB*(1 <<10)) {
1635 dictionary += (size_t)dictSize - 64 KB*(1 <<10);
1636 dictSize = 64 KB*(1 <<10);
1637 }
1638 /* need a full initialization, there are bad side-effects when using resetFast() */
1639 { int const cLevel = ctxPtr->compressionLevel;
1640 LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
1641 LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
1642 cp = LZ4HC_getCLevelParams(cLevel);
1643 }
1644 LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
1645 ctxPtr->end = (const BYTE*)dictionary + dictSize;
1646 if (cp.strat == lz4mid) {
1647 LZ4MID_fillHTable (ctxPtr, dictionary, (size_t)dictSize);
1648 } else {
1649 if (dictSize >= LZ4HC_HASHSIZE4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
1650 }
1651 return dictSize;
1652}
1653
1654void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
1655 working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL((void*)0) ? &(dictionary_stream->internal_donotuse) : NULL((void*)0);
1656}
1657
1658/* compression */
1659
1660static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
1661{
1662 DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock){};
1663 if ( (ctxPtr->end >= ctxPtr->prefixStart + 4)
1664 && (LZ4HC_getCLevelParams(ctxPtr->compressionLevel).strat != lz4mid) ) {
1665 LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
1666 }
1667
1668 /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
1669 ctxPtr->lowLimit = ctxPtr->dictLimit;
1670 ctxPtr->dictStart = ctxPtr->prefixStart;
1671 ctxPtr->dictLimit += (U32)(ctxPtr->end - ctxPtr->prefixStart);
1672 ctxPtr->prefixStart = newBlock;
1673 ctxPtr->end = newBlock;
1674 ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
1675
1676 /* cannot reference an extDict and a dictCtx at the same time */
1677 ctxPtr->dictCtx = NULL((void*)0);
1678}
1679
1680static int
1681LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
1682 const char* src, char* dst,
1683 int* srcSizePtr, int dstCapacity,
1684 limitedOutput_directive limit)
1685{
1686 LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
1687 DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",{}
1688 LZ4_streamHCPtr, src, *srcSizePtr, limit){};
1689 assert(ctxPtr != NULL)((void)0);
1690 /* auto-init if forgotten */
1691 if (ctxPtr->prefixStart == NULL((void*)0))
1692 LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
1693
1694 /* Check overflow */
1695 if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB*(1U<<30)) {
1696 size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->prefixStart);
1697 if (dictSize > 64 KB*(1 <<10)) dictSize = 64 KB*(1 <<10);
1698 LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
1699 }
1700
1701 /* Check if blocks follow each other */
1702 if ((const BYTE*)src != ctxPtr->end)
1703 LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
1704
1705 /* Check overlapping input/dictionary space */
1706 { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
1707 const BYTE* const dictBegin = ctxPtr->dictStart;
1708 const BYTE* const dictEnd = ctxPtr->dictStart + (ctxPtr->dictLimit - ctxPtr->lowLimit);
1709 if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
1710 if (sourceEnd > dictEnd) sourceEnd = dictEnd;
1711 ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart);
1712 ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart);
1713 /* invalidate dictionary is it's too small */
1714 if (ctxPtr->dictLimit - ctxPtr->lowLimit < LZ4HC_HASHSIZE4) {
1715 ctxPtr->lowLimit = ctxPtr->dictLimit;
1716 ctxPtr->dictStart = ctxPtr->prefixStart;
1717 } } }
1718
1719 return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
1720}
1721
1722int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
1723{
1724 DEBUGLOG(5, "LZ4_compress_HC_continue"){};
1725 if (dstCapacity < LZ4_compressBound(srcSize))
1726 return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
1727 else
1728 return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
1729}
1730
1731int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
1732{
1733 return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput);
1734}
1735
1736
1737/* LZ4_saveDictHC :
1738 * save history content
1739 * into a user-provided buffer
1740 * which is then used to continue compression
1741 */
1742int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
1743{
1744 LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
1745 int const prefixSize = (int)(streamPtr->end - streamPtr->prefixStart);
1746 DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize){};
1747 assert(prefixSize >= 0)((void)0);
1748 if (dictSize > 64 KB*(1 <<10)) dictSize = 64 KB*(1 <<10);
1
Assuming the condition is false
2
Taking false branch
1749 if (dictSize < 4) dictSize = 0;
3
Assuming 'dictSize' is >= 4
4
Taking false branch
1750 if (dictSize > prefixSize) dictSize = prefixSize;
5
Assuming 'dictSize' is <= 'prefixSize'
6
Taking false branch
1751 if (safeBuffer == NULL((void*)0)) assert(dictSize == 0)((void)0);
7
Assuming 'safeBuffer' is equal to NULL
8
Taking true branch
1752 if (dictSize
8.1
'dictSize' is > 0
> 0)
9
Taking true branch
1753 LZ4_memmove__builtin_memmove(safeBuffer, streamPtr->end - dictSize, (size_t)dictSize);
10
Null pointer passed as 1st argument to memory copy function
1754 { U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit;
1755 streamPtr->end = (safeBuffer == NULL((void*)0)) ? NULL((void*)0) : (const BYTE*)safeBuffer + dictSize;
1756 streamPtr->prefixStart = (const BYTE*)safeBuffer;
1757 streamPtr->dictLimit = endIndex - (U32)dictSize;
1758 streamPtr->lowLimit = endIndex - (U32)dictSize;
1759 streamPtr->dictStart = streamPtr->prefixStart;
1760 if (streamPtr->nextToUpdate < streamPtr->dictLimit)
1761 streamPtr->nextToUpdate = streamPtr->dictLimit;
1762 }
1763 return dictSize;
1764}
1765
1766
1767/* ================================================
1768 * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
1769 * ===============================================*/
1770typedef struct {
1771 int price;
1772 int off;
1773 int mlen;
1774 int litlen;
1775} LZ4HC_optimal_t;
1776
1777/* price in bytes */
1778LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) int LZ4HC_literalsPrice(int const litlen)
1779{
1780 int price = litlen;
1781 assert(litlen >= 0)((void)0);
1782 if (litlen >= (int)RUN_MASK((1U<<(8-4))-1))
1783 price += 1 + ((litlen-(int)RUN_MASK((1U<<(8-4))-1)) / 255);
1784 return price;
1785}
1786
1787/* requires mlen >= MINMATCH */
1788LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) int LZ4HC_sequencePrice(int litlen, int mlen)
1789{
1790 int price = 1 + 2 ; /* token + 16-bit offset */
1791 assert(litlen >= 0)((void)0);
1792 assert(mlen >= MINMATCH)((void)0);
1793
1794 price += LZ4HC_literalsPrice(litlen);
1795
1796 if (mlen >= (int)(ML_MASK((1U<<4)-1)+MINMATCH4))
1797 price += 1 + ((mlen-(int)(ML_MASK((1U<<4)-1)+MINMATCH4)) / 255);
1798
1799 return price;
1800}
1801
1802LZ4_FORCE_INLINEstatic inline __attribute__((always_inline)) LZ4HC_match_t
1803LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
1804 const BYTE* ip, const BYTE* const iHighLimit,
1805 int minLen, int nbSearches,
1806 const dictCtx_directive dict,
1807 const HCfavor_e favorDecSpeed)
1808{
1809 LZ4HC_match_t const match0 = { 0 , 0, 0 };
1810 /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
1811 * but this won't be the case here, as we define iLowLimit==ip,
1812 ** so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
1813 LZ4HC_match_t md = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
1814 assert(md.back == 0)((void)0);
1815 if (md.len <= minLen) return match0;
1816 if (favorDecSpeed) {
1817 if ((md.len>18) & (md.len<=36)) md.len=18; /* favor dec.speed (shortcut) */
1818 }
1819 return md;
1820}
1821
1822
1823static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
1824 const char* const source,
1825 char* dst,
1826 int* srcSizePtr,
1827 int dstCapacity,
1828 int const nbSearches,
1829 size_t sufficient_len,
1830 const limitedOutput_directive limit,
1831 int const fullUpdate,
1832 const dictCtx_directive dict,
1833 const HCfavor_e favorDecSpeed)
1834{
1835 int retval = 0;
1836#define TRAILING_LITERALS3 3
1837#if defined(LZ4HC_HEAPMODE1) && LZ4HC_HEAPMODE1==1
1838 LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS))malloc(sizeof(LZ4HC_optimal_t) * ((1<<12) + 3));
1839#else
1840 LZ4HC_optimal_t opt[LZ4_OPT_NUM(1<<12) + TRAILING_LITERALS3]; /* ~64 KB, which is a bit large for stack... */
1841#endif
1842
1843 const BYTE* ip = (const BYTE*) source;
1844 const BYTE* anchor = ip;
1845 const BYTE* const iend = ip + *srcSizePtr;
1846 const BYTE* const mflimit = iend - MFLIMIT12;
1847 const BYTE* const matchlimit = iend - LASTLITERALS5;
1848 BYTE* op = (BYTE*) dst;
1849 BYTE* opSaved = (BYTE*) dst;
1850 BYTE* oend = op + dstCapacity;
1851 int ovml = MINMATCH4; /* overflow - last sequence */
1852 int ovoff = 0;
1853
1854 /* init */
1855#if defined(LZ4HC_HEAPMODE1) && LZ4HC_HEAPMODE1==1
1856 if (opt == NULL((void*)0)) goto _return_label;
1857#endif
1858 DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity){};
1859 *srcSizePtr = 0;
1860 if (limit == fillOutput) oend -= LASTLITERALS5; /* Hack for support LZ4 format restriction */
1861 if (sufficient_len >= LZ4_OPT_NUM(1<<12)) sufficient_len = LZ4_OPT_NUM(1<<12)-1;
1862
1863 /* Main Loop */
1864 while (ip <= mflimit) {
1865 int const llen = (int)(ip - anchor);
1866 int best_mlen, best_off;
1867 int cur, last_match_pos = 0;
1868
1869 LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH4-1, nbSearches, dict, favorDecSpeed);
1870 if (firstMatch.len==0) { ip++; continue; }
1871
1872 if ((size_t)firstMatch.len > sufficient_len) {
1873 /* good enough solution : immediate encoding */
1874 int const firstML = firstMatch.len;
1875 opSaved = op;
1876 if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor, firstML, firstMatch.off, limit, oend) ) { /* updates ip, op and anchor */
1877 ovml = firstML;
1878 ovoff = firstMatch.off;
1879 goto _dest_overflow;
1880 }
1881 continue;
1882 }
1883
1884 /* set prices for first positions (literals) */
1885 { int rPos;
1886 for (rPos = 0 ; rPos < MINMATCH4 ; rPos++) {
1887 int const cost = LZ4HC_literalsPrice(llen + rPos);
1888 opt[rPos].mlen = 1;
1889 opt[rPos].off = 0;
1890 opt[rPos].litlen = llen + rPos;
1891 opt[rPos].price = cost;
1892 DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",{}
1893 rPos, cost, opt[rPos].litlen){};
1894 } }
1895 /* set prices using initial match */
1896 { int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
1897 int const offset = firstMatch.off;
1898 int mlen;
1899 assert(matchML < LZ4_OPT_NUM)((void)0);
1900 for (mlen = MINMATCH4 ; mlen <= matchML ; mlen++) {
1901 int const cost = LZ4HC_sequencePrice(llen, mlen);
1902 opt[mlen].mlen = mlen;
1903 opt[mlen].off = offset;
1904 opt[mlen].litlen = llen;
1905 opt[mlen].price = cost;
1906 DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",{}
1907 mlen, cost, mlen){};
1908 } }
1909 last_match_pos = firstMatch.len;
1910 { int addLit;
1911 for (addLit = 1; addLit <= TRAILING_LITERALS3; addLit ++) {
1912 opt[last_match_pos+addLit].mlen = 1; /* literal */
1913 opt[last_match_pos+addLit].off = 0;
1914 opt[last_match_pos+addLit].litlen = addLit;
1915 opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
1916 DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",{}
1917 last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit){};
1918 } }
1919
1920 /* check further positions */
1921 for (cur = 1; cur < last_match_pos; cur++) {
1922 const BYTE* const curPtr = ip + cur;
1923 LZ4HC_match_t newMatch;
1924
1925 if (curPtr > mflimit) break;
1926 DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u",{}
1927 cur, opt[cur].price, opt[cur+1].price, cur+1){};
1928 if (fullUpdate) {
1929 /* not useful to search here if next position has same (or lower) cost */
1930 if ( (opt[cur+1].price <= opt[cur].price)
1931 /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
1932 && (opt[cur+MINMATCH4].price < opt[cur].price + 3/*min seq price*/) )
1933 continue;
1934 } else {
1935 /* not useful to search here if next position has same (or lower) cost */
1936 if (opt[cur+1].price <= opt[cur].price) continue;
1937 }
1938
1939 DEBUGLOG(7, "search at rPos:%u", cur){};
1940 if (fullUpdate)
1941 newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH4-1, nbSearches, dict, favorDecSpeed);
1942 else
1943 /* only test matches of minimum length; slightly faster, but misses a few bytes */
1944 newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed);
1945 if (!newMatch.len) continue;
1946
1947 if ( ((size_t)newMatch.len > sufficient_len)
1948 || (newMatch.len + cur >= LZ4_OPT_NUM(1<<12)) ) {
1949 /* immediate encoding */
1950 best_mlen = newMatch.len;
1951 best_off = newMatch.off;
1952 last_match_pos = cur + 1;
1953 goto encode;
1954 }
1955
1956 /* before match : set price with literals at beginning */
1957 { int const baseLitlen = opt[cur].litlen;
1958 int litlen;
1959 for (litlen = 1; litlen < MINMATCH4; litlen++) {
1960 int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen);
1961 int const pos = cur + litlen;
1962 if (price < opt[pos].price) {
1963 opt[pos].mlen = 1; /* literal */
1964 opt[pos].off = 0;
1965 opt[pos].litlen = baseLitlen+litlen;
1966 opt[pos].price = price;
1967 DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)",{}
1968 pos, price, opt[pos].litlen){};
1969 } } }
1970
1971 /* set prices using match at position = cur */
1972 { int const matchML = newMatch.len;
1973 int ml = MINMATCH4;
1974
1975 assert(cur + newMatch.len < LZ4_OPT_NUM)((void)0);
1976 for ( ; ml <= matchML ; ml++) {
1977 int const pos = cur + ml;
1978 int const offset = newMatch.off;
1979 int price;
1980 int ll;
1981 DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)",{}
1982 pos, last_match_pos){};
1983 if (opt[cur].mlen == 1) {
1984 ll = opt[cur].litlen;
1985 price = ((cur > ll) ? opt[cur - ll].price : 0)
1986 + LZ4HC_sequencePrice(ll, ml);
1987 } else {
1988 ll = 0;
1989 price = opt[cur].price + LZ4HC_sequencePrice(0, ml);
1990 }
1991
1992 assert((U32)favorDecSpeed <= 1)((void)0);
1993 if (pos > last_match_pos+TRAILING_LITERALS3
1994 || price <= opt[pos].price - (int)favorDecSpeed) {
1995 DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)",{}
1996 pos, price, ml){};
1997 assert(pos < LZ4_OPT_NUM)((void)0);
1998 if ( (ml == matchML) /* last pos of last match */
1999 && (last_match_pos < pos) )
2000 last_match_pos = pos;
2001 opt[pos].mlen = ml;
2002 opt[pos].off = offset;
2003 opt[pos].litlen = ll;
2004 opt[pos].price = price;
2005 } } }
2006 /* complete following positions with literals */
2007 { int addLit;
2008 for (addLit = 1; addLit <= TRAILING_LITERALS3; addLit ++) {
2009 opt[last_match_pos+addLit].mlen = 1; /* literal */
2010 opt[last_match_pos+addLit].off = 0;
2011 opt[last_match_pos+addLit].litlen = addLit;
2012 opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
2013 DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit){};
2014 } }
2015 } /* for (cur = 1; cur <= last_match_pos; cur++) */
2016
2017 assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS)((void)0);
2018 best_mlen = opt[last_match_pos].mlen;
2019 best_off = opt[last_match_pos].off;
2020 cur = last_match_pos - best_mlen;
2021
2022encode: /* cur, last_match_pos, best_mlen, best_off must be set */
2023 assert(cur < LZ4_OPT_NUM)((void)0);
2024 assert(last_match_pos >= 1)((void)0); /* == 1 when only one candidate */
2025 DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos){};
2026 { int candidate_pos = cur;
2027 int selected_matchLength = best_mlen;
2028 int selected_offset = best_off;
2029 while (1) { /* from end to beginning */
2030 int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */
2031 int const next_offset = opt[candidate_pos].off;
2032 DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength){};
2033 opt[candidate_pos].mlen = selected_matchLength;
2034 opt[candidate_pos].off = selected_offset;
2035 selected_matchLength = next_matchLength;
2036 selected_offset = next_offset;
2037 if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */
2038 assert(next_matchLength > 0)((void)0); /* can be 1, means literal */
2039 candidate_pos -= next_matchLength;
2040 } }
2041
2042 /* encode all recorded sequences in order */
2043 { int rPos = 0; /* relative position (to ip) */
2044 while (rPos < last_match_pos) {
2045 int const ml = opt[rPos].mlen;
2046 int const offset = opt[rPos].off;
2047 if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */
2048 rPos += ml;
2049 assert(ml >= MINMATCH)((void)0);
2050 assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX))((void)0);
2051 opSaved = op;
2052 if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor, ml, offset, limit, oend) ) { /* updates ip, op and anchor */
2053 ovml = ml;
2054 ovoff = offset;
2055 goto _dest_overflow;
2056 } } }
2057 } /* while (ip <= mflimit) */
2058
2059_last_literals:
2060 /* Encode Last Literals */
2061 { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
2062 size_t llAdd = (lastRunSize + 255 - RUN_MASK((1U<<(8-4))-1)) / 255;
2063 size_t const totalSize = 1 + llAdd + lastRunSize;
2064 if (limit == fillOutput) oend += LASTLITERALS5; /* restore correct value */
2065 if (limit && (op + totalSize > oend)) {
2066 if (limit == limitedOutput) { /* Check output limit */
2067 retval = 0;
2068 goto _return_label;
2069 }
2070 /* adapt lastRunSize to fill 'dst' */
2071 lastRunSize = (size_t)(oend - op) - 1 /*token*/;
2072 llAdd = (lastRunSize + 256 - RUN_MASK((1U<<(8-4))-1)) / 256;
2073 lastRunSize -= llAdd;
2074 }
2075 DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize){};
2076 ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
2077
2078 if (lastRunSize >= RUN_MASK((1U<<(8-4))-1)) {
2079 size_t accumulator = lastRunSize - RUN_MASK((1U<<(8-4))-1);
2080 *op++ = (RUN_MASK((1U<<(8-4))-1) << ML_BITS4);
2081 for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
2082 *op++ = (BYTE) accumulator;
2083 } else {
2084 *op++ = (BYTE)(lastRunSize << ML_BITS4);
2085 }
2086 LZ4_memcpy(op, anchor, lastRunSize)__builtin_memcpy(op, anchor, lastRunSize);
2087 op += lastRunSize;
2088 }
2089
2090 /* End */
2091 *srcSizePtr = (int) (((const char*)ip) - source);
2092 retval = (int) ((char*)op-dst);
2093 goto _return_label;
2094
2095_dest_overflow:
2096if (limit == fillOutput) {
2097 /* Assumption : ip, anchor, ovml and ovref must be set correctly */
2098 size_t const ll = (size_t)(ip - anchor);
2099 size_t const ll_addbytes = (ll + 240) / 255;
2100 size_t const ll_totalCost = 1 + ll_addbytes + ll;
2101 BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
2102 DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved)){};
2103 op = opSaved; /* restore correct out pointer */
2104 if (op + ll_totalCost <= maxLitPos) {
2105 /* ll validated; now adjust match length */
2106 size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
2107 size_t const maxMlSize = MINMATCH4 + (ML_MASK((1U<<4)-1)-1) + (bytesLeftForMl * 255);
2108 assert(maxMlSize < INT_MAX)((void)0); assert(ovml >= 0)((void)0);
2109 if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
2110 if ((oend + LASTLITERALS5) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT12) {
2111 DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml){};
2112 DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor){};
2113 LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor)&ip, &op, &anchor, ovml, ovoff, notLimited, oend);
2114 DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor){};
2115 } }
2116 goto _last_literals;
2117}
2118_return_label:
2119#if defined(LZ4HC_HEAPMODE1) && LZ4HC_HEAPMODE1==1
2120 if (opt) FREEMEM(opt)free(opt);
2121#endif
2122 return retval;
2123}
2124
2125
2126/***************************************************
2127* Deprecated Functions
2128***************************************************/
2129
2130/* These functions currently generate deprecation warnings */
2131
2132/* Wrappers for deprecated compression functions */
2133int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
2134int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
2135int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
2136int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
2137int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
2138int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
2139int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
2140int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
2141int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
2142int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
2143
2144
2145/* Deprecated streaming functions */
2146int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); }
2147
2148/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
2149 * @return : 0 on success, !=0 if error */
2150int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
2151{
2152 LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
2153 if (hc4 == NULL((void*)0)) return 1; /* init failed */
2154 LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
2155 return 0;
2156}
2157
2158#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2159void* LZ4_createHC (const char* inputBuffer)
2160{
2161 LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
2162 if (hc4 == NULL((void*)0)) return NULL((void*)0); /* not enough memory */
2163 LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
2164 return hc4;
2165}
2166
2167int LZ4_freeHC (void* LZ4HC_Data)
2168{
2169 if (!LZ4HC_Data) return 0; /* support free on NULL */
2170 FREEMEM(LZ4HC_Data)free(LZ4HC_Data);
2171 return 0;
2172}
2173#endif
2174
2175int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
2176{
2177 return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
2178}
2179
2180int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
2181{
2182 return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
2183}
2184
2185char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
2186{
2187 LZ4HC_CCtx_internal* const s = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse;
2188 const BYTE* const bufferStart = s->prefixStart - s->dictLimit + s->lowLimit;
2189 LZ4_resetStreamHC_fast((LZ4_streamHC_t*)LZ4HC_Data, s->compressionLevel);
2190 /* ugly conversion trick, required to evade (const char*) -> (char*) cast-qual warning :( */
2191 return (char*)(uptrval)bufferStart;
2192}