UDocumentation UE5.7 10.02.2026 (Source)
API documentation for Unreal Engine 5.7
lz4.cpp
Go to the documentation of this file.
1/*
2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2020, Yann Collet.
4
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the
16 distribution.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 You can contact the author at :
31 - LZ4 homepage : http://www.lz4.org
32 - LZ4 source repository : https://github.com/lz4/lz4
33*/
34
35/*-************************************
36* Tuning parameters
37**************************************/
38/*
39 * LZ4_HEAPMODE :
40 * Select how default compression functions will allocate memory for their hash table,
41 * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
42 */
43#ifndef LZ4_HEAPMODE
44# define LZ4_HEAPMODE 0
45#endif
46
47/*
48 * LZ4_ACCELERATION_DEFAULT :
49 * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
50 */
51#define LZ4_ACCELERATION_DEFAULT 1
52/*
53 * LZ4_ACCELERATION_MAX :
54 * Any "acceleration" value higher than this threshold
55 * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
56 */
57#define LZ4_ACCELERATION_MAX 65537
58
59
60/*-************************************
61* CPU Feature Detection
62**************************************/
63/* LZ4_FORCE_MEMORY_ACCESS
64 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
65 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
66 * The below switch allow to select different access method for improved performance.
67 * Method 0 (default) : use `memcpy()`. Safe and portable.
68 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
69 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
70 * Method 2 : direct access. This method is portable but violate C standard.
71 * It can generate buggy code on targets which assembly generation depends on alignment.
72 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
73 * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
74 * Prefer these methods in priority order (0 > 1 > 2)
75 */
76#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
77# if defined(__GNUC__) && \
78 ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
79 || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
80# define LZ4_FORCE_MEMORY_ACCESS 2
81# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
82# define LZ4_FORCE_MEMORY_ACCESS 1
83# endif
84#endif
85
86/*
87 * LZ4_FORCE_SW_BITCOUNT
88 * Define this parameter if your target system or compiler does not support hardware bit count
89 */
90#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
91# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
92# define LZ4_FORCE_SW_BITCOUNT
93#endif
94
95
96
97/*-************************************
98* Dependency
99**************************************/
100/*
101 * LZ4_SRC_INCLUDED:
102 * Amalgamation flag, whether lz4.c is included
103 */
104#ifndef LZ4_SRC_INCLUDED
105# define LZ4_SRC_INCLUDED 1
106#endif
107
108#ifndef LZ4_STATIC_LINKING_ONLY
109#define LZ4_STATIC_LINKING_ONLY
110#endif
111
112#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
113#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
114#endif
115
116#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
117#include "Compression/lz4.h"
118/* see also "memory routines" below */
119
120
121/*-************************************
122* Compiler Options
123**************************************/
124#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
125# include <intrin.h> /* only present in VS2005+ */
126# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
127# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */
128#endif /* _MSC_VER */
129
130#ifndef LZ4_FORCE_INLINE
131# ifdef _MSC_VER /* Visual Studio */
132# define LZ4_FORCE_INLINE static __forceinline
133# else
134# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
135# ifdef __GNUC__
136# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
137# else
138# define LZ4_FORCE_INLINE static inline
139# endif
140# else
141# define LZ4_FORCE_INLINE static
142# endif /* __STDC_VERSION__ */
143# endif /* _MSC_VER */
144#endif /* LZ4_FORCE_INLINE */
145
146/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
147 * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
148 * together with a simple 8-byte copy loop as a fall-back path.
149 * However, this optimization hurts the decompression speed by >30%,
150 * because the execution does not go to the optimized loop
151 * for typical compressible data, and all of the preamble checks
152 * before going to the fall-back path become useless overhead.
153 * This optimization happens only with the -O3 flag, and -O2 generates
154 * a simple 8-byte copy loop.
155 * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
156 * functions are annotated with __attribute__((optimize("O2"))),
157 * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
158 * of LZ4_wildCopy8 does not affect the compression speed.
159 */
160#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
161# define LZ4_FORCE_O2 __attribute__((optimize("O2")))
162# undef LZ4_FORCE_INLINE
163# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
164#else
165# define LZ4_FORCE_O2
166#endif
167
168#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
169# define expect(expr,value) (__builtin_expect ((expr),(value)) )
170#else
171# define expect(expr,value) (expr)
172#endif
173
174#ifndef likely
175#define likely(expr) expect(int(expr) != 0, 1)
176#endif
177#ifndef unlikely
178#define unlikely(expr) expect(int(expr) != 0, 0)
179#endif
180
181/* Should the alignment test prove unreliable, for some reason,
182 * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
183#ifndef LZ4_ALIGN_TEST /* can be externally provided */
184# define LZ4_ALIGN_TEST 1
185#endif
186
187
188/*-************************************
189* Memory routines
190**************************************/
191
207#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
208# define ALLOC(s) lz4_error_memory_allocation_is_disabled
209# define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled
210# define FREEMEM(p) lz4_error_memory_allocation_is_disabled
211#elif defined(LZ4_USER_MEMORY_FUNCTIONS)
212/* memory management functions can be customized by user project.
213 * Below functions must exist somewhere in the Project
214 * and be available at link time */
215void* LZ4_malloc(size_t s);
216void* LZ4_calloc(size_t n, size_t s);
217void LZ4_free(void* p);
218# define ALLOC(s) LZ4_malloc(s)
219# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
220# define FREEMEM(p) LZ4_free(p)
221#else
222# define ALLOC(s) malloc(s)
223# define ALLOC_AND_ZERO(s) calloc(1,s)
224# define FREEMEM(p) free(p)
225#endif
226
227#define MEM_INIT(p,v,s) FPlatformMemory::Memset((p),(v),(s))
228
229
230/*-************************************
231* Common Constants
232**************************************/
233#define MINMATCH 4
234
235#define WILDCOPYLENGTH 8
236#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
237#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
238#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
239#define FASTLOOP_SAFE_DISTANCE 64
240static const int LZ4_minLength = (MFLIMIT+1);
241
242#define KB *(1 <<10)
243#define MB *(1 <<20)
244#define GB *(1U<<30)
245
246#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
247#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
248# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
249#endif
250
251#define ML_BITS 4
252#define ML_MASK ((1U<<ML_BITS)-1)
253#define RUN_BITS (8-ML_BITS)
254#define RUN_MASK ((1U<<RUN_BITS)-1)
255
256
257/*-************************************
258* Error detection
259**************************************/
260#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
261# ifndef assert
262# define assert(condition) check(condition)
263# endif
264#else
265# ifndef assert
266# define assert(condition) ((void)0)
267# endif
268#endif
269
270#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
271
272#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
273 static int g_debuglog_enable = 1;
274# define DEBUGLOG(l, ...) { \
275 if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
276 fprintf(stderr, __FILE__ ": "); \
277 fprintf(stderr, __VA_ARGS__); \
278 fprintf(stderr, " \n"); \
279 } }
280#else
281# define DEBUGLOG(l, ...) {} /* disabled */
282#endif
283
284static int LZ4_isAligned(const void* ptr, size_t alignment)
285{
286 return ((size_t)ptr & (alignment -1)) == 0;
287}
288
289
290/*-************************************
291* Types
292**************************************/
293#include <limits.h>
294#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
295 typedef uint8 BYTE;
296 typedef uint16 U16;
297 typedef uint32 U32;
298 typedef int32 S32;
299 typedef uint64 U64;
300 typedef UPTRINT uptrval;
301#else
302# if UINT_MAX != 4294967295UL
303# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
304# endif
305 typedef unsigned char BYTE;
306 typedef unsigned short U16;
307 typedef unsigned int U32;
308 typedef signed int S32;
309 typedef unsigned long long U64;
310 typedef SIZE_T uptrval; /* generally true, except OpenVMS-64 */
311#endif
312
313#if defined(__x86_64__)
314 typedef U64 reg_t; /* 64-bits in x32 mode */
315#else
316 typedef size_t reg_t; /* 32-bits in x32 mode */
317#endif
318
324
325
326/*-************************************
327* Reading and writing into memory
328**************************************/
329
338#if !defined(LZ4_memcpy)
339# if defined(__GNUC__) && (__GNUC__ >= 4)
340# define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
341# else
342# define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
343# endif
344#endif
345
346#if !defined(LZ4_memmove)
347# if defined(__GNUC__) && (__GNUC__ >= 4)
348# define LZ4_memmove __builtin_memmove
349# else
350# define LZ4_memmove memmove
351# endif
352#endif
353
354static unsigned LZ4_isLittleEndian(void)
355{
356 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
357 return one.c[0];
358}
359
360
361#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
362/* lie to the compiler about data alignment; use with caution */
363
364static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
365static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
366static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
367
368static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
369static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
370
371#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
372
373/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
374/* currently only defined for gcc and icc */
375typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) LZ4_unalign;
376
377static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign*)ptr)->u16; }
378static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign*)ptr)->u32; }
379static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalign*)ptr)->uArch; }
380
381static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign*)memPtr)->u16 = value; }
382static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign*)memPtr)->u32 = value; }
383
384#else /* safe and portable access using memcpy() */
385
386static U16 LZ4_read16(const void* memPtr)
387{
388 U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
389}
390
391static U32 LZ4_read32(const void* memPtr)
392{
393 U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
394}
395
396static reg_t LZ4_read_ARCH(const void* memPtr)
397{
398 reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
399}
400
401static void LZ4_write16(void* memPtr, U16 value)
402{
403 LZ4_memcpy(memPtr, &value, sizeof(value));
404}
405
406static void LZ4_write32(void* memPtr, U32 value)
407{
408 LZ4_memcpy(memPtr, &value, sizeof(value));
409}
410
411#endif /* LZ4_FORCE_MEMORY_ACCESS */
412
413
414static U16 LZ4_readLE16(const void* memPtr)
415{
416 if (LZ4_isLittleEndian()) {
417 return LZ4_read16(memPtr);
418 } else {
419 const BYTE* p = (const BYTE*)memPtr;
420 return (U16)((U16)p[0] + (p[1]<<8));
421 }
422}
423
424static void LZ4_writeLE16(void* memPtr, U16 value)
425{
426 if (LZ4_isLittleEndian()) {
427 LZ4_write16(memPtr, value);
428 } else {
429 BYTE* p = (BYTE*)memPtr;
430 p[0] = (BYTE) value;
431 p[1] = (BYTE)(value>>8);
432 }
433}
434
435/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
437void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
438{
439 BYTE* d = (BYTE*)dstPtr;
440 const BYTE* s = (const BYTE*)srcPtr;
441 BYTE* const e = (BYTE*)dstEnd;
442
443 do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
444}
445
446static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
447static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
448
449
450#ifndef LZ4_FAST_DEC_LOOP
451# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
452# define LZ4_FAST_DEC_LOOP 1
453# elif defined(__aarch64__) && defined(__APPLE__)
454# define LZ4_FAST_DEC_LOOP 1
455# elif defined(__aarch64__) && !defined(__clang__)
456 /* On non-Apple aarch64, we disable this optimization for clang because
457 * on certain mobile chipsets, performance is reduced with clang. For
458 * more information refer to https://github.com/lz4/lz4/pull/707 */
459# define LZ4_FAST_DEC_LOOP 1
460# else
461# define LZ4_FAST_DEC_LOOP 0
462# endif
463#endif
464
465#if LZ4_FAST_DEC_LOOP
466
468LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
469{
470 assert(srcPtr + offset == dstPtr);
471 if (offset < 8) {
472 LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
473 dstPtr[0] = srcPtr[0];
474 dstPtr[1] = srcPtr[1];
475 dstPtr[2] = srcPtr[2];
476 dstPtr[3] = srcPtr[3];
477 srcPtr += inc32table[offset];
478 LZ4_memcpy(dstPtr+4, srcPtr, 4);
479 srcPtr -= dec64table[offset];
480 dstPtr += 8;
481 } else {
483 dstPtr += 8;
484 srcPtr += 8;
485 }
486
488}
489
490/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
491 * this version copies two times 16 bytes (instead of one time 32 bytes)
492 * because it must be compatible with offsets >= 16. */
494LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
495{
496 BYTE* d = (BYTE*)dstPtr;
497 const BYTE* s = (const BYTE*)srcPtr;
498 BYTE* const e = (BYTE*)dstEnd;
499
500 do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
501}
502
503/* LZ4_memcpy_using_offset() presumes :
504 * - dstEnd >= dstPtr + MINMATCH
505 * - there is at least 8 bytes available to write after dstEnd */
507LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
508{
509 BYTE v[8];
510
512
513 switch(offset) {
514 case 1:
515 MEM_INIT(v, *srcPtr, 8);
516 break;
517 case 2:
518 LZ4_memcpy(v, srcPtr, 2);
519 LZ4_memcpy(&v[2], srcPtr, 2);
520#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
521# pragma warning(push)
522# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
523#endif
524 CA_SUPPRESS(6385);
525 LZ4_memcpy(&v[4], v, 4);
526#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
527# pragma warning(pop)
528#endif
529 break;
530 case 4:
531 LZ4_memcpy(v, srcPtr, 4);
532 LZ4_memcpy(&v[4], srcPtr, 4);
533 break;
534 default:
536 return;
537 }
538
539 LZ4_memcpy(dstPtr, v, 8);
540 dstPtr += 8;
541 while (dstPtr < dstEnd) {
542 LZ4_memcpy(dstPtr, v, 8);
543 dstPtr += 8;
544 }
545}
546#endif
547
548
549/*-************************************
550* Common functions
551**************************************/
552static unsigned LZ4_NbCommonBytes (reg_t val)
553{
554 assert(val != 0);
555 if (LZ4_isLittleEndian()) {
556 CA_SUPPRESS(6239);
557 if (sizeof(val) == 8) {
558# if defined(_MSC_VER) && (_MSC_VER >= 1800) && (defined(_M_AMD64) && !defined(_M_ARM64EC)) && !defined(LZ4_FORCE_SW_BITCOUNT)
559/*-*************************************************************************************************
560* ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications on ARM64 Windows 11.
561* The ARM64EC ABI does not support AVX/AVX2/AVX512 instructions, nor their relevant intrinsics
562* including _tzcnt_u64. Therefore, we need to neuter the _tzcnt_u64 code path for ARM64EC.
563****************************************************************************************************/
564# if defined(__clang__) && (__clang_major__ < 10)
565 /* Avoid undefined clang-cl intrinsics issue.
566 * See https://github.com/lz4/lz4/pull/1017 for details. */
567 return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;
568# else
569 /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
570 return (unsigned)_tzcnt_u64(val) >> 3;
571# endif
572# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
573 unsigned long r = 0;
574 _BitScanForward64(&r, (U64)val);
575 return (unsigned)r >> 3;
576# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
577 ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
578 !defined(LZ4_FORCE_SW_BITCOUNT)
579 return (unsigned)__builtin_ctzll((U64)val) >> 3;
580# else
581 const U64 m = 0x0101010101010101ULL;
582 val ^= val - 1;
583 return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
584# endif
585 } else /* 32 bits */ {
586# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
587 unsigned long r;
588 _BitScanForward(&r, (U32)val);
589 return (unsigned)r >> 3;
590# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
591 ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
592 !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
593 return (unsigned)__builtin_ctz((U32)val) >> 3;
594# else
595 const U32 m = 0x01010101;
596 return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
597# endif
598 }
599 } else /* Big Endian CPU */ {
600 CA_SUPPRESS(6239);
601 if (sizeof(val)==8) {
602# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
603 ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
604 !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
605 return (unsigned)__builtin_clzll((U64)val) >> 3;
606# else
607#if 1
608 /* this method is probably faster,
609 * but adds a 128 bytes lookup table */
610 static const unsigned char ctz7_tab[128] = {
611 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
612 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
613 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
614 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
615 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
616 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
617 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
618 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
619 };
620 U64 const mask = 0x0101010101010101ULL;
621 U64 const t = (((val >> 8) - mask) | val) & mask;
622 return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
623#else
624 /* this method doesn't consume memory space like the previous one,
625 * but it contains several branches,
626 * that may end up slowing execution */
627 static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
628 Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
629 Note that this code path is never triggered in 32-bits mode. */
630 unsigned r;
631 if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
632 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
633 r += (!val);
634 return r;
635#endif
636# endif
637 } else /* 32 bits */ {
638# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
639 ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
640 !defined(LZ4_FORCE_SW_BITCOUNT)
641 return (unsigned)__builtin_clz((U32)val) >> 3;
642# else
643 val >>= 8;
644 val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
645 (val + 0x00FF0000)) >> 24;
646 return (unsigned)val ^ 3;
647# endif
648 }
649 }
650}
651
652
653#define STEPSIZE sizeof(reg_t)
655unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
656{
657 const BYTE* const pStart = pIn;
658
659 if (likely(pIn < pInLimit-(STEPSIZE-1))) {
660 reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
661 if (!diff) {
663 } else {
664 return LZ4_NbCommonBytes(diff);
665 } }
666
667 while (likely(pIn < pInLimit-(STEPSIZE-1))) {
668 reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
669 if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
670 pIn += LZ4_NbCommonBytes(diff);
671 return (unsigned)(pIn - pStart);
672 }
673
674 CA_SUPPRESS(6239);
675 if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
676 if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
677 if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
678 return (unsigned)(pIn - pStart);
679}
680
681
682#ifndef LZ4_COMMONDEFS_ONLY
683/*-************************************
684* Local Constants
685**************************************/
686static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
687static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
688
689
690/*-************************************
691* Local Structures and types
692**************************************/
693typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
694
720
721
722/*-************************************
723* Local Utils
724**************************************/
726const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
728int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }
729
730
731/*-****************************************
732* Internal Definitions, used only in Tests
733*******************************************/
734#if defined (__cplusplus)
735extern "C" {
736#endif
737
738int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
739
740int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
742 const void* dictStart, size_t dictSize);
745 const void* dictStart, size_t dictSize);
746#if defined (__cplusplus)
747}
748#endif
749
750/*-******************************
751* Compression functions
752********************************/
754{
755 if (tableType == byU16)
756 return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
757 else
758 return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
759}
760
762{
763 const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
764 if (LZ4_isLittleEndian()) {
765 const U64 prime5bytes = 889523592379ULL;
766 return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
767 } else {
768 const U64 prime8bytes = 11400714785074694791ULL;
769 return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
770 }
771}
772
773LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
774{
775 CA_SUPPRESS(6239);
776 if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
777 return LZ4_hash4(LZ4_read32(p), tableType);
778}
779
781{
782 switch (tableType)
783 {
784 default: /* fallthrough */
785 case clearedTable: { /* illegal! */ assert(0); return; }
786 case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
787 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
788 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
789 }
790}
791
793{
794 switch (tableType)
795 {
796 default: /* fallthrough */
797 case clearedTable: /* fallthrough */
798 case byPtr: { /* illegal! */ assert(0); return; }
799 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
800 case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
801 }
802}
803
805 void* tableBase, tableType_t const tableType,
806 const BYTE* srcBase)
807{
808 switch (tableType)
809 {
810 case clearedTable: { /* illegal! */ assert(0); return; }
811 case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
812 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
813 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
814 }
815}
816
817LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
818{
819 U32 const h = LZ4_hashPosition(p, tableType);
820 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
821}
822
823/* LZ4_getIndexOnHash() :
824 * Index of match position registered in hash table.
825 * hash position must be calculated by using base+index, or dictBase+index.
826 * Assumption 1 : only valid if tableType == byU32 or byU16.
827 * Assumption 2 : h is presumed valid (within limits of hash table)
828 */
830{
832 if (tableType == byU32) {
833 const U32* const hashTable = (const U32*) tableBase;
834 assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
835 return hashTable[h];
836 }
837 if (tableType == byU16) {
838 const U16* const hashTable = (const U16*) tableBase;
839 assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
840 return hashTable[h];
841 }
842 assert(0); return 0; /* forbidden case */
843}
844
845static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
846{
847 if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
848 if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
849 { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
850}
851
854 const void* tableBase, tableType_t tableType,
855 const BYTE* srcBase)
856{
857 U32 const h = LZ4_hashPosition(p, tableType);
858 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
859}
860
863 const int inputSize,
864 const tableType_t tableType) {
865 /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
866 * therefore safe to use no matter what mode we're in. Otherwise, we figure
867 * out if it's safe to leave as is or whether it needs to be reset.
868 */
869 if ((tableType_t)cctx->tableType != clearedTable) {
870 assert(inputSize >= 0);
871 if ((tableType_t)cctx->tableType != tableType
872 || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
873 || ((tableType == byU32) && cctx->currentOffset > 1 GB)
874 || tableType == byPtr
875 || inputSize >= 4 KB)
876 {
877 DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
878 MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
879 cctx->currentOffset = 0;
880 cctx->tableType = (U32)clearedTable;
881 } else {
882 DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
883 }
884 }
885
886 /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,
887 * is faster than compressing without a gap.
888 * However, compressing with currentOffset == 0 is faster still,
889 * so we preserve that case.
890 */
891 if (cctx->currentOffset != 0 && tableType == byU32) {
892 DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
893 cctx->currentOffset += 64 KB;
894 }
895
896 /* Finally, clear history */
897 cctx->dictCtx = NULL;
898 cctx->dictionary = NULL;
899 cctx->dictSize = 0;
900}
901
910 const char* const source,
911 char* const dest,
912 const int inputSize,
913 int* inputConsumed, /* only written when outputDirective == fillOutput */
914 const int maxOutputSize,
916 const tableType_t tableType,
919 const int acceleration)
920{
921 int result;
922 const BYTE* ip = (const BYTE*) source;
923
924 U32 const startIndex = cctx->currentOffset;
925 const BYTE* base = (const BYTE*) source - startIndex;
926 const BYTE* lowLimit;
927
928 const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
929 const BYTE* const dictionary =
930 dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
931 const U32 dictSize =
932 dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
933 const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
934
936 U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
937 const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
938 const BYTE* anchor = (const BYTE*) source;
939 const BYTE* const iend = ip + inputSize;
940 const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
941 const BYTE* const matchlimit = iend - LASTLITERALS;
942
943 /* the dictCtx currentOffset is indexed on the start of the dictionary,
944 * while a dictionary in the current context precedes the currentOffset */
945 const BYTE* dictBase = (dictionary == NULL) ? NULL :
947 dictionary + dictSize - dictCtx->currentOffset :
948 dictionary + dictSize - startIndex;
949
950 BYTE* op = (BYTE*) dest;
951 BYTE* const olimit = op + maxOutputSize;
952
953 U32 offset = 0;
955
956 DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
957 assert(ip != NULL);
958 /* If init conditions are not met, we don't have to mark stream
959 * as having dirty context, since no action was taken yet */
960 if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
961 if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
962 if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
963 assert(acceleration >= 1);
964
965 lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
966
967 /* Update context state */
969 /* Subsequent linked blocks can't use the dictionary. */
970 /* Instead, they use the block we just compressed. */
971 cctx->dictCtx = NULL;
972 cctx->dictSize = (U32)inputSize;
973 } else {
974 cctx->dictSize += (U32)inputSize;
975 }
976 cctx->currentOffset += (U32)inputSize;
977 cctx->tableType = (U32)tableType;
978
979 if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
980
981 /* First Byte */
982 LZ4_putPosition(ip, cctx->hashTable, tableType, base);
983 ip++; forwardH = LZ4_hashPosition(ip, tableType);
984
985 /* Main Loop */
986 for ( ; ; ) {
987 const BYTE* match;
988 BYTE* token;
989 const BYTE* filledIp;
990
991 /* Find a match */
992 if (tableType == byPtr) {
993 const BYTE* forwardIp = ip;
994 int step = 1;
995 int searchMatchNb = acceleration << LZ4_skipTrigger;
996 do {
997 U32 const h = forwardH;
998 ip = forwardIp;
999 forwardIp += step;
1000 step = (searchMatchNb++ >> LZ4_skipTrigger);
1001
1004
1005 match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
1006 forwardH = LZ4_hashPosition(forwardIp, tableType);
1007 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
1008
1009 } while ( (match+LZ4_DISTANCE_MAX < ip)
1010 || (LZ4_read32(match) != LZ4_read32(ip)) );
1011
1012 } else { /* byU32, byU16 */
1013
1014 const BYTE* forwardIp = ip;
1015 int step = 1;
1016 int searchMatchNb = acceleration << LZ4_skipTrigger;
1017 do {
1018 U32 const h = forwardH;
1019 U32 const current = (U32)(forwardIp - base);
1020 U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1021 assert(matchIndex <= current);
1022 assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
1023 ip = forwardIp;
1024 forwardIp += step;
1025 step = (searchMatchNb++ >> LZ4_skipTrigger);
1026
1029
1030 if (dictDirective == usingDictCtx) {
1031 if (matchIndex < startIndex) {
1032 /* there was no match, try the dictionary */
1033 assert(tableType == byU32);
1036 matchIndex += dictDelta; /* make dictCtx index comparable with current context */
1037 lowLimit = dictionary;
1038 } else {
1039 match = base + matchIndex;
1040 lowLimit = (const BYTE*)source;
1041 }
1042 } else if (dictDirective == usingExtDict) {
1043 if (matchIndex < startIndex) {
1044 DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
1048 lowLimit = dictionary;
1049 } else {
1050 match = base + matchIndex;
1051 lowLimit = (const BYTE*)source;
1052 }
1053 } else { /* single continuous memory segment */
1054 match = base + matchIndex;
1055 }
1056 forwardH = LZ4_hashPosition(forwardIp, tableType);
1057 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1058
1059 DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
1060 if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
1061 assert(matchIndex < current);
1062 if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
1063 && (matchIndex+LZ4_DISTANCE_MAX < current)) {
1064 continue;
1065 } /* too far */
1066 assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
1067
1068 if (LZ4_read32(match) == LZ4_read32(ip)) {
1069 if (maybe_extMem) offset = current - matchIndex;
1070 break; /* match found */
1071 }
1072
1073 } while(1);
1074 }
1075
1076 /* Catch up */
1077 filledIp = ip;
1078 while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
1079
1080 /* Encode Literals */
1081 { unsigned const litLength = (unsigned)(ip - anchor);
1082 token = op++;
1083 if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
1084 (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
1085 return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1086 }
1087 if ((outputDirective == fillOutput) &&
1088 (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
1089 op--;
1090 goto _last_literals;
1091 }
1092 if (litLength >= RUN_MASK) {
1093 int len = (int)(litLength - RUN_MASK);
1094 *token = (RUN_MASK<<ML_BITS);
1095 for(; len >= 255 ; len-=255) *op++ = 255;
1096 *op++ = (BYTE)len;
1097 }
1098 else *token = (BYTE)(litLength<<ML_BITS);
1099
1100 /* Copy Literals */
1102 op+=litLength;
1103 DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1104 (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
1105 }
1106
1108 /* at this stage, the following variables must be correctly set :
1109 * - ip : at start of LZ operation
1110 * - match : at start of previous pattern occurrence; can be within current prefix, or within extDict
1111 * - offset : if maybe_ext_memSegment==1 (constant)
1112 * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
1113 * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
1114 */
1115
1116 if ((outputDirective == fillOutput) &&
1117 (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
1118 /* the match was too close to the end, rewind and go to last literals */
1119 op = token;
1120 goto _last_literals;
1121 }
1122
1123 /* Encode Offset */
1124 if (maybe_extMem) { /* static test */
1125 DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
1127 LZ4_writeLE16(op, (U16)offset); op+=2;
1128 } else {
1129 DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
1131 LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
1132 }
1133
1134 /* Encode MatchLength */
1135 { unsigned matchCode;
1136
1138 && (lowLimit==dictionary) /* match within extDict */ ) {
1139 const BYTE* limit = ip + (dictEnd-match);
1140 assert(dictEnd > match);
1144 if (ip==limit) {
1145 unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
1146 matchCode += more;
1147 ip += more;
1148 }
1149 DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
1150 } else {
1153 DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
1154 }
1155
1156 if ((outputDirective) && /* Check output buffer overflow */
1157 (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
1158 if (outputDirective == fillOutput) {
1159 /* Match description too long : reduce it */
1160 U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
1164 if (unlikely(ip <= filledIp)) {
1165 /* We have already filled up to filledIp so if ip ends up less than filledIp
1166 * we have positions in the hash table beyond the current position. This is
1167 * a problem if we reuse the hash table. So we have to remove these positions
1168 * from the hash table.
1169 */
1170 const BYTE* ptr;
1171 DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
1172 for (ptr = ip; ptr <= filledIp; ++ptr) {
1173 U32 const h = LZ4_hashPosition(ptr, tableType);
1174 LZ4_clearHash(h, cctx->hashTable, tableType);
1175 }
1176 }
1177 } else {
1179 return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1180 }
1181 }
1182 if (matchCode >= ML_MASK) {
1183 *token += ML_MASK;
1184 matchCode -= ML_MASK;
1185 LZ4_write32(op, 0xFFFFFFFF);
1186 while (matchCode >= 4*255) {
1187 op+=4;
1188 LZ4_write32(op, 0xFFFFFFFF);
1189 matchCode -= 4*255;
1190 }
1191 op += matchCode / 255;
1192 *op++ = (BYTE)(matchCode % 255);
1193 } else
1194 *token += (BYTE)(matchCode);
1195 }
1196 /* Ensure we have enough space for the last literals. */
1198
1199 anchor = ip;
1200
1201 /* Test end of chunk */
1202 if (ip >= mflimitPlusOne) break;
1203
1204 /* Fill table */
1205 LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
1206
1207 /* Test next position */
1208 if (tableType == byPtr) {
1209
1210 match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
1211 LZ4_putPosition(ip, cctx->hashTable, tableType, base);
1212 if ( (match+LZ4_DISTANCE_MAX >= ip)
1213 && (LZ4_read32(match) == LZ4_read32(ip)) )
1214 { token=op++; *token=0; goto _next_match; }
1215
1216 } else { /* byU32, byU16 */
1217
1218 U32 const h = LZ4_hashPosition(ip, tableType);
1219 U32 const current = (U32)(ip-base);
1220 U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1221 assert(matchIndex < current);
1222 if (dictDirective == usingDictCtx) {
1223 if (matchIndex < startIndex) {
1224 /* there was no match, try the dictionary */
1227 lowLimit = dictionary; /* required for match length counter */
1229 } else {
1230 match = base + matchIndex;
1231 lowLimit = (const BYTE*)source; /* required for match length counter */
1232 }
1233 } else if (dictDirective==usingExtDict) {
1234 if (matchIndex < startIndex) {
1237 lowLimit = dictionary; /* required for match length counter */
1238 } else {
1239 match = base + matchIndex;
1240 lowLimit = (const BYTE*)source; /* required for match length counter */
1241 }
1242 } else { /* single memory segment */
1243 match = base + matchIndex;
1244 }
1245 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1246 assert(matchIndex < current);
1247 if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
1248 && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
1249 && (LZ4_read32(match) == LZ4_read32(ip)) ) {
1250 token=op++;
1251 *token=0;
1252 if (maybe_extMem) offset = current - matchIndex;
1253 DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1254 (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
1255 goto _next_match;
1256 }
1257 }
1258
1259 /* Prepare next loop */
1260 forwardH = LZ4_hashPosition(++ip, tableType);
1261
1262 }
1263
1265 /* Encode Last Literals */
1266 { size_t lastRun = (size_t)(iend - anchor);
1267 if ( (outputDirective) && /* Check output buffer overflow */
1268 (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
1269 if (outputDirective == fillOutput) {
1270 /* adapt lastRun to fill 'dst' */
1271 assert(olimit >= op);
1272 lastRun = (size_t)(olimit-op) - 1/*token*/;
1273 lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
1274 } else {
1276 return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1277 }
1278 }
1279 DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
1280 if (lastRun >= RUN_MASK) {
1281 size_t accumulator = lastRun - RUN_MASK;
1282 *op++ = RUN_MASK << ML_BITS;
1283 for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
1284 *op++ = (BYTE) accumulator;
1285 } else {
1286 *op++ = (BYTE)(lastRun<<ML_BITS);
1287 }
1289 ip = anchor + lastRun;
1290 op += lastRun;
1291 }
1292
1293 if (outputDirective == fillOutput) {
1294 *inputConsumed = (int) (((const char*)ip)-source);
1295 }
1296 result = (int)(((char*)op) - dest);
1297 assert(result > 0);
1298 DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
1299 return result;
1300}
1301
1308 const char* const src,
1309 char* const dst,
1310 const int srcSize,
1311 int *inputConsumed, /* only written when outputDirective == fillOutput */
1312 const int dstCapacity,
1314 const tableType_t tableType,
1317 const int acceleration)
1318{
1319 DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
1321
1322 if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
1323 if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
1324 if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
1325 DEBUGLOG(5, "Generating an empty block");
1327 assert(dst != NULL);
1328 dst[0] = 0;
1329 if (outputDirective == fillOutput) {
1331 *inputConsumed = 0;
1332 }
1333 return 1;
1334 }
1335 assert(src != NULL);
1336
1338 inputConsumed, /* only written into if outputDirective == fillOutput */
1340 tableType, dictDirective, dictIssue, acceleration);
1341}
1342
1343
1344int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1345{
1346 LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
1347 assert(ctx != NULL);
1351 if (inputSize < LZ4_64Klimit) {
1353 } else {
1354 const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1356 }
1357 } else {
1358 if (inputSize < LZ4_64Klimit) {
1360 } else {
1361 const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1363 }
1364 }
1365}
1366
1376int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
1377{
1378 LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
1381
1383 if (srcSize < LZ4_64Klimit) {
1384 const tableType_t tableType = byU16;
1385 LZ4_prepareTable(ctx, srcSize, tableType);
1386 if (ctx->currentOffset) {
1387 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
1388 } else {
1389 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1390 }
1391 } else {
1392 const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1393 LZ4_prepareTable(ctx, srcSize, tableType);
1394 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1395 }
1396 } else {
1397 if (srcSize < LZ4_64Klimit) {
1398 const tableType_t tableType = byU16;
1399 LZ4_prepareTable(ctx, srcSize, tableType);
1400 if (ctx->currentOffset) {
1402 } else {
1404 }
1405 } else {
1406 const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1407 LZ4_prepareTable(ctx, srcSize, tableType);
1409 }
1410 }
1411}
1412
1413
1414int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1415{
1416 int result;
1417#if (LZ4_HEAPMODE)
1418 LZ4_stream_t* ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1419 if (ctxPtr == NULL) return 0;
1420#else
1421 LZ4_stream_t ctx;
1422 LZ4_stream_t* const ctxPtr = &ctx;
1423#endif
1425
1426#if (LZ4_HEAPMODE)
1427 FREEMEM(ctxPtr);
1428#endif
1429 return result;
1430}
1431
1432
1433int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
1434{
1435 return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
1436}
1437
1438
1439/* Note!: This function leaves the stream in an unclean/broken state!
1440 * It is not safe to subsequently use the same state with a _fastReset() or
1441 * _continue() call without resetting it. */
1442static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1443{
1444 void* const s = LZ4_initStream(state, sizeof (*state));
1445 assert(s != NULL); (void)s;
1446
1447 if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
1448 return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
1449 } else {
1450 if (*srcSizePtr < LZ4_64Klimit) {
1452 } else {
1453 tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1455 } }
1456}
1457
1458
1459int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1460{
1461#if (LZ4_HEAPMODE)
1462 LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1463 if (ctx == NULL) return 0;
1464#else
1466 LZ4_stream_t* ctx = &ctxBody;
1467#endif
1468
1469 int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
1470
1471#if (LZ4_HEAPMODE)
1472 FREEMEM(ctx);
1473#endif
1474 return result;
1475}
1476
1477
1478
1479/*-******************************
1480* Streaming functions
1481********************************/
1482
1483#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1485{
1486 LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
1488 DEBUGLOG(4, "LZ4_createStream %p", lz4s);
1489 if (lz4s == NULL) return NULL;
1490 LZ4_initStream(lz4s, sizeof(*lz4s));
1491 return lz4s;
1492}
1493#endif
1494
1495static size_t LZ4_stream_t_alignment(void)
1496{
1497#if LZ4_ALIGN_TEST
1498 typedef struct { char c; LZ4_stream_t t; } t_a;
1499 return sizeof(t_a) - sizeof(LZ4_stream_t);
1500#else
1501 return 1; /* effectively disabled */
1502#endif
1503}
1504
1505LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
1506{
1507 DEBUGLOG(5, "LZ4_initStream");
1508 if (buffer == NULL) { return NULL; }
1509 if (size < sizeof(LZ4_stream_t)) { return NULL; }
1510 if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
1511 MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
1512 return (LZ4_stream_t*)buffer;
1513}
1514
1515/* resetStream is now deprecated,
1516 * prefer initStream() which is more general */
1518{
1519 DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
1521}
1522
1526
1527#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
1529{
1530 if (!LZ4_stream) return 0; /* support free on NULL */
1531 DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
1533 return (0);
1534}
1535#endif
1536
1537
1538#define HASH_UNIT sizeof(reg_t)
1539int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
1540{
1541 LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
1542 const tableType_t tableType = byU32;
1543 const BYTE* p = (const BYTE*)dictionary;
1544 const BYTE* const dictEnd = p + dictSize;
1545 const BYTE* base;
1546
1547 DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
1548
1549 /* It's necessary to reset the context,
1550 * and not just continue it with prepareTable()
1551 * to avoid any risk of generating overflowing matchIndex
1552 * when compressing using this dictionary */
1554
1555 /* We always increment the offset by 64 KB, since, if the dict is longer,
1556 * we truncate it to the last 64k, and if it's shorter, we still want to
1557 * advance by a whole window length so we can provide the guarantee that
1558 * there are only valid offsets in the window, which allows an optimization
1559 * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1560 * dictionary isn't a full 64k. */
1561 dict->currentOffset += 64 KB;
1562
1563 if (dictSize < (int)HASH_UNIT) {
1564 return 0;
1565 }
1566
1567 if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
1568 base = dictEnd - dict->currentOffset;
1569 dict->dictionary = p;
1570 dict->dictSize = (U32)(dictEnd - p);
1571 dict->tableType = (U32)tableType;
1572
1573 while (p <= dictEnd-HASH_UNIT) {
1574 LZ4_putPosition(p, dict->hashTable, tableType, base);
1575 p+=3;
1576 }
1577
1578 return (int)dict->dictSize;
1579}
1580
1582{
1583 const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :
1584 &(dictionaryStream->internal_donotuse);
1585
1586 DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
1588 dictCtx != NULL ? dictCtx->dictSize : 0);
1589
1590 if (dictCtx != NULL) {
1591 /* If the current offset is zero, we will never look in the
1592 * external dictionary context, since there is no value a table
1593 * entry can take that indicate a miss. In that case, we need
1594 * to bump the offset to something non-zero.
1595 */
1596 if (workingStream->internal_donotuse.currentOffset == 0) {
1597 workingStream->internal_donotuse.currentOffset = 64 KB;
1598 }
1599
1600 /* Don't actually attach an empty dictionary.
1601 */
1602 if (dictCtx->dictSize == 0) {
1603 dictCtx = NULL;
1604 }
1605 }
1606 workingStream->internal_donotuse.dictCtx = dictCtx;
1607}
1608
1609
1610static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
1611{
1612 assert(nextSize >= 0);
1613 if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
1614 /* rescale hash table */
1615 U32 const delta = LZ4_dict->currentOffset - 64 KB;
1616 const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1617 int i;
1618 DEBUGLOG(4, "LZ4_renormDictT");
1619 for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
1620 if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
1621 else LZ4_dict->hashTable[i] -= delta;
1622 }
1623 LZ4_dict->currentOffset = 64 KB;
1624 if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1625 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1626 }
1627}
1628
1629
1631 const char* source, char* dest,
1632 int inputSize, int maxOutputSize,
1633 int acceleration)
1634{
1635 const tableType_t tableType = byU32;
1636 LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;
1637 const char* dictEnd = streamPtr->dictSize ? (const char*)streamPtr->dictionary + streamPtr->dictSize : NULL;
1638
1639 DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)", inputSize, streamPtr->dictSize);
1640
1641 LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */
1644
1645 /* invalidate tiny dictionaries */
1646 if ( (streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */
1647 && (dictEnd != source) /* prefix mode */
1648 && (inputSize > 0) /* tolerance : don't lose history, in case next invocation would use prefix mode */
1649 && (streamPtr->dictCtx == NULL) /* usingDictCtx */
1650 ) {
1651 DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
1652 /* remove dictionary existence from history, to employ faster prefix mode */
1653 streamPtr->dictSize = 0;
1654 streamPtr->dictionary = (const BYTE*)source;
1655 dictEnd = source;
1656 }
1657
1658 /* Check overlapping input/dictionary space */
1659 { const char* const sourceEnd = source + inputSize;
1660 if ((sourceEnd > (const char*)streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1661 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1662 if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1663 if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1664 streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;
1665 }
1666 }
1667
1668 /* prefix mode : source data follows dictionary */
1669 if (dictEnd == source) {
1670 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1672 else
1674 }
1675
1676 /* external dictionary mode */
1677 { int result;
1678 if (streamPtr->dictCtx) {
1679 /* We depend here on the fact that dictCtx'es (produced by
1680 * LZ4_loadDict) guarantee that their tables contain no references
1681 * to offsets between dictCtx->currentOffset - 64 KB and
1682 * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
1683 * to use noDictIssue even when the dict isn't a full 64 KB.
1684 */
1685 if (inputSize > 4 KB) {
1686 /* For compressing large blobs, it is faster to pay the setup
1687 * cost to copy the dictionary's tables into the active context,
1688 * so that the compression loop is only looking into one table.
1689 */
1690 LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
1692 } else {
1694 }
1695 } else { /* small data <= 4 KB */
1696 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1698 } else {
1700 }
1701 }
1702 streamPtr->dictionary = (const BYTE*)source;
1703 streamPtr->dictSize = (U32)inputSize;
1704 return result;
1705 }
1706}
1707
1708
1709/* Hidden debug function, to force-test external dictionary mode */
1711{
1712 LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1713 int result;
1714
1715 LZ4_renormDictT(streamPtr, srcSize);
1716
1717 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1719 } else {
1721 }
1722
1723 streamPtr->dictionary = (const BYTE*)source;
1724 streamPtr->dictSize = (U32)srcSize;
1725
1726 return result;
1727}
1728
1729
1738{
1739 LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1740
1741 DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize, safeBuffer);
1742
1743 if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
1744 if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
1745
1746 if (safeBuffer == NULL) assert(dictSize == 0);
1747 if (dictSize > 0) {
1748 const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1749 assert(dict->dictionary);
1750 LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);
1751 }
1752
1753 dict->dictionary = (const BYTE*)safeBuffer;
1754 dict->dictSize = (U32)dictSize;
1755
1756 return dictSize;
1757}
1758
1759
1760
1761/*-*******************************
1762 * Decompression functions
1763 ********************************/
1764
1766
1767#undef MIN
1768#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
1769
1770
1771/* variant for decompress_unsafe()
1772 * does not know end of input
1773 * presumes input is well formed
1774 * note : will consume at least one byte */
1776{
1777 size_t b, l = 0;
1778 do { b = **pp; (*pp)++; l += b; } while (b==255);
1779 DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes", l, l/255 + 1)
1780 return l;
1781}
1782
1783/* core decoder variant for LZ4_decompress_fast*()
1784 * for legacy support only : these entry points are deprecated.
1785 * - Presumes input is correctly formed (no defense vs malformed inputs)
1786 * - Does not know input size (presume input buffer is "large enough")
1787 * - Decompress a full block (only)
1788 * @return : nb of bytes read from input.
1789 * Note : this variant is not optimized for speed, just for maintenance.
1790 * the goal is to remove support of decompress_fast*() variants by v2.0
1791**/
1794 const BYTE* const istart,
1795 BYTE* const ostart,
1796 int decompressedSize,
1797
1798 size_t prefixSize,
1799 const BYTE* const dictStart, /* only if dict==usingExtDict */
1800 const size_t dictSize /* note: =0 if dictStart==NULL */
1801 )
1802{
1803 const BYTE* ip = istart;
1804 BYTE* op = (BYTE*)ostart;
1805 BYTE* const oend = ostart + decompressedSize;
1806 const BYTE* const prefixStart = ostart - prefixSize;
1807
1808 DEBUGLOG(5, "LZ4_decompress_unsafe_generic");
1809 if (dictStart == NULL) assert(dictSize == 0);
1810
1811 while (1) {
1812 /* start new sequence */
1813 unsigned token = *ip++;
1814
1815 /* literals */
1816 { size_t ll = token >> ML_BITS;
1817 if (ll==15) {
1818 /* long literal length */
1820 }
1821 if ((size_t)(oend-op) < ll) return -1; /* output buffer overflow */
1822 LZ4_memmove(op, ip, ll); /* support in-place decompression */
1823 op += ll;
1824 ip += ll;
1825 if ((size_t)(oend-op) < MFLIMIT) {
1826 if (op==oend) break; /* end of block */
1827 DEBUGLOG(5, "invalid: literals end at distance %zi from end of block", oend-op);
1828 /* incorrect end of block :
1829 * last match must start at least MFLIMIT==12 bytes before end of output block */
1830 return -1;
1831 } }
1832
1833 /* match */
1834 { size_t ml = token & 15;
1835 size_t const offset = LZ4_readLE16(ip);
1836 ip+=2;
1837
1838 if (ml==15) {
1839 /* long literal length */
1841 }
1842 ml += MINMATCH;
1843
1844 if ((size_t)(oend-op) < ml) return -1; /* output buffer overflow */
1845
1846 { const BYTE* match = op - offset;
1847
1848 /* out of range */
1849 if (offset > (size_t)(op - prefixStart) + dictSize) {
1850 DEBUGLOG(6, "offset out of range");
1851 return -1;
1852 }
1853
1854 /* check special case : extDict */
1855 if (offset > (size_t)(op - prefixStart)) {
1856 /* extDict scenario */
1857 const BYTE* const dictEnd = dictStart + dictSize;
1858 const BYTE* extMatch = dictEnd - (offset - (size_t)(op-prefixStart));
1859 size_t const extml = (size_t)(dictEnd - extMatch);
1860 if (extml > ml) {
1861 /* match entirely within extDict */
1863 op += ml;
1864 ml = 0;
1865 } else {
1866 /* match split between extDict & prefix */
1868 op += extml;
1869 ml -= extml;
1870 }
1871 match = prefixStart;
1872 }
1873
1874 /* match copy - slow variant, supporting overlap copy */
1875 { size_t u;
1876 for (u=0; u<ml; u++) {
1877 op[u] = match[u];
1878 } } }
1879 op += ml;
1880 if ((size_t)(oend-op) < LASTLITERALS) {
1881 DEBUGLOG(5, "invalid: match ends at distance %zi from end of block", oend-op);
1882 /* incorrect end of block :
1883 * last match must stop at least LASTLITERALS==5 bytes before end of output block */
1884 return -1;
1885 }
1886 } /* match */
1887 } /* main loop */
1888 return (int)(ip - istart);
1889}
1890
1891
1892/* Read the variable-length literal or match length.
1893 *
1894 * @ip : input pointer
1895 * @ilimit : position after which if length is not decoded, the input is necessarily corrupted.
1896 * @initial_check - check ip >= ipmax before start of loop. Returns initial_error if so.
1897 * @error (output) - error code. Must be set to 0 before call.
1898**/
1899typedef size_t Rvl_t;
1900static const Rvl_t rvl_error = (Rvl_t)(-1);
1903 int initial_check)
1904{
1905 Rvl_t s, length = 0;
1906 assert(ip != NULL);
1907 assert(*ip != NULL);
1908 assert(ilimit != NULL);
1909 if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */
1910 return rvl_error;
1911 }
1912 do {
1913 s = **ip;
1914 (*ip)++;
1915 length += s;
1916 if (unlikely((*ip) > ilimit)) { /* read limit reached */
1917 return rvl_error;
1918 }
1919 /* accumulator overflow detection (32-bit mode only) */
1920 if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
1921 return rvl_error;
1922 }
1923 } while (s==255);
1924
1925 return length;
1926}
1927
1936 const char* const src,
1937 char* const dst,
1938 int srcSize,
1939 int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
1940
1941 earlyEnd_directive partialDecoding, /* full, partial */
1942 dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
1943 const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
1944 const BYTE* const dictStart, /* only if dict==usingExtDict */
1945 const size_t dictSize /* note : = 0 if noDict */
1946 )
1947{
1948 if ((src == NULL) || (outputSize < 0)) { return -1; }
1949
1950 { const BYTE* ip = (const BYTE*) src;
1951 const BYTE* const iend = ip + srcSize;
1952
1953 BYTE* op = (BYTE*) dst;
1954 BYTE* const oend = op + outputSize;
1955 BYTE* cpy;
1956
1957 const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
1958
1959 const int checkOffset = (dictSize < (int)(64 KB));
1960
1961
1962 /* Set up the "end" pointers for the shortcut. */
1963 const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;
1964 const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;
1965
1966 const BYTE* match;
1967 size_t offset;
1968 unsigned token;
1969 size_t length;
1970
1971
1972 DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
1973
1974 /* Special cases */
1975 assert(lowPrefix <= op);
1976 if (unlikely(outputSize==0)) {
1977 /* Empty output buffer */
1978 if (partialDecoding) return 0;
1979 return ((srcSize==1) && (*ip==0)) ? 0 : -1;
1980 }
1981 if (unlikely(srcSize==0)) { return -1; }
1982
1983 /* LZ4_FAST_DEC_LOOP:
1984 * designed for modern OoO performance cpus,
1985 * where copying reliably 32-bytes is preferable to an unpredictable branch.
1986 * note : fast loop may show a regression for some client arm chips. */
1987#if LZ4_FAST_DEC_LOOP
1988 if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
1989 DEBUGLOG(6, "skip fast decode loop");
1990 goto safe_decode;
1991 }
1992
1993 /* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
1994 while (1) {
1995 /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
1997 assert(ip < iend);
1998 token = *ip++;
1999 length = token >> ML_BITS; /* literal length */
2000
2001 /* decode literal length */
2002 if (length == RUN_MASK) {
2003 size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
2004 if (addl == rvl_error) { goto _output_error; }
2005 length += addl;
2006 if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
2007 if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
2008
2009 /* copy literals */
2010 cpy = op+length;
2012 if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
2014 ip += length; op = cpy;
2015 } else {
2016 cpy = op+length;
2017 DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
2018 /* We don't need to check oend, since we check it once for each loop below */
2019 if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
2020 /* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
2021 LZ4_memcpy(op, ip, 16);
2022 ip += length; op = cpy;
2023 }
2024
2025 /* get offset */
2026 offset = LZ4_readLE16(ip); ip+=2;
2027 match = op - offset;
2028 assert(match <= op); /* overflow check */
2029
2030 /* get matchlength */
2031 length = token & ML_MASK;
2032
2033 if (length == ML_MASK) {
2034 size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
2035 if (addl == rvl_error) { goto _output_error; }
2036 length += addl;
2037 length += MINMATCH;
2038 if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
2039 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
2040 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
2041 goto safe_match_copy;
2042 }
2043 } else {
2044 length += MINMATCH;
2045 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
2046 goto safe_match_copy;
2047 }
2048
2049 /* Fastpath check: skip LZ4_wildCopy32 when true */
2050 if ((dict == withPrefix64k) || (match >= lowPrefix)) {
2051 if (offset >= 8) {
2053 assert(match <= op);
2054 assert(op + 18 <= oend);
2055
2056 LZ4_memcpy(op, match, 8);
2057 LZ4_memcpy(op+8, match+8, 8);
2058 LZ4_memcpy(op+16, match+16, 2);
2059 op += length;
2060 continue;
2061 } } }
2062
2063 if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
2064 /* match starting within external dictionary */
2065 if ((dict==usingExtDict) && (match < lowPrefix)) {
2066 assert(dictEnd != NULL);
2067 if (unlikely(op+length > oend-LASTLITERALS)) {
2068 if (partialDecoding) {
2069 DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
2070 length = MIN(length, (size_t)(oend-op));
2071 } else {
2072 goto _output_error; /* end-of-block condition violated */
2073 } }
2074
2075 if (length <= (size_t)(lowPrefix-match)) {
2076 /* match fits entirely within external dictionary : just copy */
2077 LZ4_memmove(op, dictEnd - (lowPrefix-match), length);
2078 op += length;
2079 } else {
2080 /* match stretches into both external dictionary and current block */
2081 size_t const copySize = (size_t)(lowPrefix - match);
2082 size_t const restSize = length - copySize;
2084 op += copySize;
2085 if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
2086 BYTE* const endOfMatch = op + restSize;
2087 const BYTE* copyFrom = lowPrefix;
2088 while (op < endOfMatch) { *op++ = *copyFrom++; }
2089 } else {
2091 op += restSize;
2092 } }
2093 continue;
2094 }
2095
2096 /* copy match within block */
2097 cpy = op + length;
2098
2099 assert((op <= oend) && (oend-op >= 32));
2100 if (unlikely(offset<16)) {
2102 } else {
2104 }
2105
2106 op = cpy; /* wildcopy correction */
2107 }
2109#endif
2110
2111 /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
2112 while (1) {
2113 assert(ip < iend);
2114 token = *ip++;
2115 length = token >> ML_BITS; /* literal length */
2116
2117 /* A two-stage shortcut for the most common case:
2118 * 1) If the literal length is 0..14, and there is enough space,
2119 * enter the shortcut and copy 16 bytes on behalf of the literals
2120 * (in the fast mode, only 8 bytes can be safely copied this way).
2121 * 2) Further if the match length is 4..18, copy 18 bytes in a similar
2122 * manner; but we ensure that there's enough space in the output for
2123 * those 18 bytes earlier, upon entering the shortcut (in other words,
2124 * there is a combined check for both stages).
2125 */
2126 if ( (length != RUN_MASK)
2127 /* strictly "less than" on input, to re-enter the loop with at least one byte */
2128 && likely((ip < shortiend) & (op <= shortoend)) ) {
2129 /* Copy the literals */
2130 LZ4_memcpy(op, ip, 16);
2131 op += length; ip += length;
2132
2133 /* The second stage: prepare for match copying, decode full info.
2134 * If it doesn't work out, the info won't be wasted. */
2135 length = token & ML_MASK; /* match length */
2136 offset = LZ4_readLE16(ip); ip += 2;
2137 match = op - offset;
2138 assert(match <= op); /* check overflow */
2139
2140 /* Do not deal with overlapping matches. */
2141 if ( (length != ML_MASK)
2142 && (offset >= 8)
2143 && (dict==withPrefix64k || match >= lowPrefix) ) {
2144 /* Copy the match. */
2145 LZ4_memcpy(op + 0, match + 0, 8);
2146 LZ4_memcpy(op + 8, match + 8, 8);
2147 LZ4_memcpy(op +16, match +16, 2);
2148 op += length + MINMATCH;
2149 /* Both stages worked, load the next token. */
2150 continue;
2151 }
2152
2153 /* The second stage didn't work out, but the info is ready.
2154 * Propel it right to the point of match copying. */
2155 goto _copy_match;
2156 }
2157
2158 /* decode literal length */
2159 if (length == RUN_MASK) {
2160 size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
2161 if (addl == rvl_error) { goto _output_error; }
2162 length += addl;
2163 if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
2164 if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
2165 }
2166
2167 /* copy literals */
2168 cpy = op+length;
2169#if LZ4_FAST_DEC_LOOP
2171#endif
2173 if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {
2174 /* We've either hit the input parsing restriction or the output parsing restriction.
2175 * In the normal scenario, decoding a full block, it must be the last sequence,
2176 * otherwise it's an error (invalid input or dimensions).
2177 * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
2178 */
2179 if (partialDecoding) {
2180 /* Since we are partial decoding we may be in this block because of the output parsing
2181 * restriction, which is not valid since the output buffer is allowed to be undersized.
2182 */
2183 DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
2184 DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
2185 DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
2186 DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
2187 /* Finishing in the middle of a literals segment,
2188 * due to lack of input.
2189 */
2190 if (ip+length > iend) {
2191 length = (size_t)(iend-ip);
2192 cpy = op + length;
2193 }
2194 /* Finishing in the middle of a literals segment,
2195 * due to lack of output space.
2196 */
2197 if (cpy > oend) {
2198 cpy = oend;
2199 assert(op<=oend);
2200 length = (size_t)(oend-op);
2201 }
2202 } else {
2203 /* We must be on the last sequence (or invalid) because of the parsing limitations
2204 * so check that we exactly consume the input and don't overrun the output buffer.
2205 */
2206 if ((ip+length != iend) || (cpy > oend)) {
2207 DEBUGLOG(6, "should have been last run of literals")
2208 DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
2209 DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
2210 goto _output_error;
2211 }
2212 }
2213 LZ4_memmove(op, ip, length); /* supports overlapping memory regions, for in-place decompression scenarios */
2214 ip += length;
2215 op += length;
2216 /* Necessarily EOF when !partialDecoding.
2217 * When partialDecoding, it is EOF if we've either
2218 * filled the output buffer or
2219 * can't proceed with reading an offset for following match.
2220 */
2221 if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
2222 break;
2223 }
2224 } else {
2225 LZ4_wildCopy8(op, ip, cpy); /* can overwrite up to 8 bytes beyond cpy */
2226 ip += length; op = cpy;
2227 }
2228
2229 /* get offset */
2230 offset = LZ4_readLE16(ip); ip+=2;
2231 match = op - offset;
2232
2233 /* get matchlength */
2234 length = token & ML_MASK;
2235
2237 if (length == ML_MASK) {
2238 size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
2239 if (addl == rvl_error) { goto _output_error; }
2240 length += addl;
2241 if (unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
2242 }
2243 length += MINMATCH;
2244
2245#if LZ4_FAST_DEC_LOOP
2247#endif
2248 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
2249 /* match starting within external dictionary */
2250 if ((dict==usingExtDict) && (match < lowPrefix)) {
2251 assert(dictEnd != NULL);
2252 if (unlikely(op+length > oend-LASTLITERALS)) {
2253 if (partialDecoding) length = MIN(length, (size_t)(oend-op));
2254 else goto _output_error; /* doesn't respect parsing restriction */
2255 }
2256
2257 if (length <= (size_t)(lowPrefix-match)) {
2258 /* match fits entirely within external dictionary : just copy */
2259 LZ4_memmove(op, dictEnd - (lowPrefix-match), length);
2260 op += length;
2261 } else {
2262 /* match stretches into both external dictionary and current block */
2263 size_t const copySize = (size_t)(lowPrefix - match);
2264 size_t const restSize = length - copySize;
2266 op += copySize;
2267 if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
2268 BYTE* const endOfMatch = op + restSize;
2269 const BYTE* copyFrom = lowPrefix;
2270 while (op < endOfMatch) *op++ = *copyFrom++;
2271 } else {
2273 op += restSize;
2274 } }
2275 continue;
2276 }
2278
2279 /* copy match within block */
2280 cpy = op + length;
2281
2282 /* partialDecoding : may end anywhere within the block */
2283 assert(op<=oend);
2285 size_t const mlen = MIN(length, (size_t)(oend-op));
2286 const BYTE* const matchEnd = match + mlen;
2287 BYTE* const copyEnd = op + mlen;
2288 if (matchEnd > op) { /* overlap copy */
2289 while (op < copyEnd) { *op++ = *match++; }
2290 } else {
2291 LZ4_memcpy(op, match, mlen);
2292 }
2293 op = copyEnd;
2294 if (op == oend) { break; }
2295 continue;
2296 }
2297
2298 if (unlikely(offset<8)) {
2299 LZ4_write32(op, 0); /* silence msan warning when offset==0 */
2300 op[0] = match[0];
2301 op[1] = match[1];
2302 op[2] = match[2];
2303 op[3] = match[3];
2304 match += inc32table[offset];
2305 LZ4_memcpy(op+4, match, 4);
2306 match -= dec64table[offset];
2307 } else {
2308 LZ4_memcpy(op, match, 8);
2309 match += 8;
2310 }
2311 op += 8;
2312
2314 BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
2315 if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
2316 if (op < oCopyLimit) {
2318 match += oCopyLimit - op;
2319 op = oCopyLimit;
2320 }
2321 while (op < cpy) { *op++ = *match++; }
2322 } else {
2323 LZ4_memcpy(op, match, 8);
2324 if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
2325 }
2326 op = cpy; /* wildcopy correction */
2327 }
2328
2329 /* end of decoding */
2330 DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
2331 return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
2332
2333 /* Overflow error detected */
2335 return (int) (-(((const char*)ip)-src))-1;
2336 }
2337}
2338
2339
2340/*===== Instantiate the API decoding functions. =====*/
2341
2349
2358
2360int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
2361{
2362 DEBUGLOG(5, "LZ4_decompress_fast");
2364 (const BYTE*)source, (BYTE*)dest, originalSize,
2365 0, NULL, 0);
2366}
2367
2368/*===== Instantiate a few more decoding cases, used more than once. =====*/
2369
2370LZ4_FORCE_O2 /* Exported, an obsolete API function. */
2377
2379static int LZ4_decompress_safe_partial_withPrefix64k(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity)
2380{
2384 (BYTE*)dest - 64 KB, NULL, 0);
2385}
2386
2387/* Another obsolete API function, paired with the previous one. */
2389{
2391 (const BYTE*)source, (BYTE*)dest, originalSize,
2392 64 KB, NULL, 0);
2393}
2394
2396static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
2397 size_t prefixSize)
2398{
2401 (BYTE*)dest-prefixSize, NULL, 0);
2402}
2403
2405static int LZ4_decompress_safe_partial_withSmallPrefix(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity,
2406 size_t prefixSize)
2407{
2411 (BYTE*)dest-prefixSize, NULL, 0);
2412}
2413
2417 const void* dictStart, size_t dictSize)
2418{
2421 (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2422}
2423
2427 const void* dictStart, size_t dictSize)
2428{
2432 (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2433}
2434
2436static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
2437 const void* dictStart, size_t dictSize)
2438{
2440 (const BYTE*)source, (BYTE*)dest, originalSize,
2441 0, (const BYTE*)dictStart, dictSize);
2442}
2443
2444/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
2445 * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
2446 * These routines are used only once, in LZ4_decompress_*_continue().
2447 */
2450 size_t prefixSize, const void* dictStart, size_t dictSize)
2451{
2454 (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2455}
2456
2457/*===== streaming decompression functions =====*/
2458
2459#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2465
2467{
2468 if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
2470 return 0;
2471}
2472#endif
2473
2480int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
2481{
2482 LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2483 lz4sd->prefixSize = (size_t)dictSize;
2484 if (dictSize) {
2485 assert(dictionary != NULL);
2486 lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
2487 } else {
2488 lz4sd->prefixEnd = (const BYTE*) dictionary;
2489 }
2490 lz4sd->externalDict = NULL;
2491 lz4sd->extDictSize = 0;
2492 return 1;
2493}
2494
2507{
2508 if (maxBlockSize < 0) return 0;
2509 if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
2510 if (maxBlockSize < 16) maxBlockSize = 16;
2512}
2513
2514/*
2515*_continue() :
2516 These decoding functions allow decompression of multiple blocks in "streaming" mode.
2517 Previously decoded blocks must still be available at the memory position where they were decoded.
2518 If it's not possible, save the relevant part of decoded data into a safe buffer,
2519 and indicate where it stands using LZ4_setStreamDecode()
2520*/
2523{
2524 LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2525 int result;
2526
2527 if (lz4sd->prefixSize == 0) {
2528 /* The first call, no dictionary yet. */
2529 assert(lz4sd->extDictSize == 0);
2531 if (result <= 0) return result;
2532 lz4sd->prefixSize = (size_t)result;
2533 lz4sd->prefixEnd = (BYTE*)dest + result;
2534 } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2535 /* They're rolling the current segment. */
2536 if (lz4sd->prefixSize >= 64 KB - 1)
2538 else if (lz4sd->extDictSize == 0)
2539 result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
2540 lz4sd->prefixSize);
2541 else
2543 lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2544 if (result <= 0) return result;
2545 lz4sd->prefixSize += (size_t)result;
2546 lz4sd->prefixEnd += result;
2547 } else {
2548 /* The buffer wraps around, or they're switching to another buffer. */
2549 lz4sd->extDictSize = lz4sd->prefixSize;
2550 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2552 lz4sd->externalDict, lz4sd->extDictSize);
2553 if (result <= 0) return result;
2554 lz4sd->prefixSize = (size_t)result;
2555 lz4sd->prefixEnd = (BYTE*)dest + result;
2556 }
2557
2558 return result;
2559}
2560
2561LZ4_FORCE_O2 int
2563 const char* source, char* dest, int originalSize)
2564{
2566 (assert(LZ4_streamDecode!=NULL), &LZ4_streamDecode->internal_donotuse);
2567 int result;
2568
2569 DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize);
2570 assert(originalSize >= 0);
2571
2572 if (lz4sd->prefixSize == 0) {
2573 DEBUGLOG(5, "first invocation : no prefix nor extDict");
2574 assert(lz4sd->extDictSize == 0);
2576 if (result <= 0) return result;
2577 lz4sd->prefixSize = (size_t)originalSize;
2578 lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2579 } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2580 DEBUGLOG(5, "continue using existing prefix");
2582 (const BYTE*)source, (BYTE*)dest, originalSize,
2583 lz4sd->prefixSize,
2584 lz4sd->externalDict, lz4sd->extDictSize);
2585 if (result <= 0) return result;
2586 lz4sd->prefixSize += (size_t)originalSize;
2587 lz4sd->prefixEnd += originalSize;
2588 } else {
2589 DEBUGLOG(5, "prefix becomes extDict");
2590 lz4sd->extDictSize = lz4sd->prefixSize;
2591 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2592 result = LZ4_decompress_fast_extDict(source, dest, originalSize,
2593 lz4sd->externalDict, lz4sd->extDictSize);
2594 if (result <= 0) return result;
2595 lz4sd->prefixSize = (size_t)originalSize;
2596 lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2597 }
2598
2599 return result;
2600}
2601
2602
2603/*
2604Advanced decoding functions :
2605*_usingDict() :
2606 These decoding functions work the same as "_continue" ones,
2607 the dictionary must be explicitly provided within parameters
2608*/
2609
2610int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
2611{
2612 if (dictSize==0)
2614 if (dictStart+dictSize == dest) {
2615 if (dictSize >= 64 KB - 1) {
2617 }
2618 assert(dictSize >= 0);
2619 return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
2620 }
2621 assert(dictSize >= 0);
2622 return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
2623}
2624
2625int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest, int compressedSize, int targetOutputSize, int dstCapacity, const char* dictStart, int dictSize)
2626{
2627 if (dictSize==0)
2629 if (dictStart+dictSize == dest) {
2630 if (dictSize >= 64 KB - 1) {
2631 return LZ4_decompress_safe_partial_withPrefix64k(source, dest, compressedSize, targetOutputSize, dstCapacity);
2632 }
2633 assert(dictSize >= 0);
2634 return LZ4_decompress_safe_partial_withSmallPrefix(source, dest, compressedSize, targetOutputSize, dstCapacity, (size_t)dictSize);
2635 }
2636 assert(dictSize >= 0);
2638}
2639
2640int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
2641{
2642 if (dictSize==0 || dictStart+dictSize == dest)
2644 (const BYTE*)source, (BYTE*)dest, originalSize,
2645 (size_t)dictSize, NULL, 0);
2646 assert(dictSize >= 0);
2647 return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
2648}
2649
2650
2651/*=*************************************************
2652* Obsolete Functions
2653***************************************************/
2654/* obsolete compression functions */
2659int LZ4_compress(const char* src, char* dest, int srcSize)
2660{
2662}
2663int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
2664{
2665 return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
2666}
2667int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
2668{
2670}
2679
2680/*
2681These decompression functions are deprecated and should no longer be used.
2682They are only provided here for compatibility with older user programs.
2683- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
2684- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
2685*/
2686int LZ4_uncompress (const char* source, char* dest, int outputSize)
2687{
2689}
2694
2695/* Obsolete Streaming functions */
2696
2697int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }
2698
2699int LZ4_resetStreamState(void* state, char* inputBuffer)
2700{
2703 return 0;
2704}
2705
2706#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
2708{
2710 return LZ4_createStream();
2711}
2712#endif
2713
2714char* LZ4_slideInputBuffer (void* state)
2715{
2716 /* avoid const char * -> char * conversion warning */
2717 return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
2718}
2719
2720#endif /* LZ4_COMMONDEFS_ONLY */
OODEFFUNC typedef void(OODLE_CALLBACK t_fp_OodleCore_Plugin_Free)(void *ptr)
#define NULL
Definition oodle2base.h:134
RAD_U32 U32
Definition egttypes.h:501
RAD_U64 U64
Definition egttypes.h:511
RAD_U16 U16
Definition egttypes.h:491
#define CA_SUPPRESS(WarningNumber)
Definition CoreMiscDefines.h:125
#define LZ4_HASHTABLESIZE
Definition lz4.h:641
#define LZ4_COMPRESSBOUND(isize)
Definition lz4.h:198
#define LZ4_MEMORY_USAGE
Definition lz4.h:147
#define LZ4_VERSION_STRING
Definition lz4.h:126
union LZ4_stream_u LZ4_stream_t
Definition lz4.h:298
#define LZ4_HASH_SIZE_U32
Definition lz4.h:642
#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize)
Definition lz4.h:430
#define LZ4_MAX_INPUT_SIZE
Definition lz4.h:197
#define LZ4_VERSION_NUMBER
Definition lz4.h:121
#define LZ4_HASHLOG
Definition lz4.h:640
FPlatformTypes::SIZE_T SIZE_T
An unsigned integer the same size as a pointer, the same as UPTRINT.
Definition Platform.h:1150
FPlatformTypes::int32 int32
A 32-bit signed integer.
Definition Platform.h:1125
FPlatformTypes::UPTRINT UPTRINT
An unsigned integer the same size as a pointer.
Definition Platform.h:1146
FPlatformTypes::uint64 uint64
A 64-bit unsigned integer.
Definition Platform.h:1117
UE_FORCEINLINE_HINT TSharedRef< CastToType, Mode > StaticCastSharedRef(TSharedRef< CastFromType, Mode > const &InSharedRef)
Definition SharedPointer.h:127
float swift_float2 __attribute__((__ext_vector_type__(2)))
Definition MarketplaceKitWrapper.h:67
const char * source
Definition lz4.h:711
char int srcSize
Definition lz4.h:709
char int compressedSize
Definition lz4.h:735
char * inputBuffer
Definition lz4.h:731
const char char int inputSize
Definition lz4.h:711
char * dst
Definition lz4.h:735
char int int maxOutputSize
Definition lz4.h:710
char int originalSize
Definition lz4.h:736
char int outputSize
Definition lz4.h:717
char int isize
Definition lz4.h:718
char * dest
Definition lz4.h:709
uint8_t uint8
Definition binka_ue_file_header.h:8
uint16_t uint16
Definition binka_ue_file_header.h:7
uint32_t uint32
Definition binka_ue_file_header.h:6
LZ4_FORCE_INLINE int LZ4_decompress_unsafe_generic(const BYTE *const istart, BYTE *const ostart, int decompressedSize, size_t prefixSize, const BYTE *const dictStart, const size_t dictSize)
Definition lz4.cpp:1793
LZ4_FORCE_INLINE void LZ4_wildCopy8(void *dstPtr, const void *srcPtr, void *dstEnd)
Definition lz4.cpp:437
#define STEPSIZE
Definition lz4.cpp:653
LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE *p, U32 h, void *tableBase, tableType_t const tableType, const BYTE *srcBase)
Definition lz4.cpp:804
#define KB
Definition lz4.cpp:242
int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.cpp:1344
int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, int dictSize)
Definition lz4.cpp:2640
#define LZ4_STATIC_ASSERT(c)
Definition lz4.cpp:270
unsigned long long U64
Definition lz4.cpp:309
earlyEnd_directive
Definition lz4.cpp:1765
@ partial_decode
Definition lz4.cpp:1765
@ decode_full_block
Definition lz4.cpp:1765
size_t reg_t
Definition lz4.cpp:316
int LZ4_compressBound(int isize)
Definition lz4.cpp:727
int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
Definition lz4.cpp:1539
LZ4_FORCE_INLINE int LZ4_compress_generic(LZ4_stream_t_internal *const cctx, const char *const src, char *const dst, const int srcSize, int *inputConsumed, const int dstCapacity, const limitedOutput_directive outputDirective, const tableType_t tableType, const dict_directive dictDirective, const dictIssue_directive dictIssue, const int acceleration)
Definition lz4.cpp:1306
LZ4_FORCE_O2 int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition lz4.cpp:2371
#define MINMATCH
Definition lz4.cpp:233
int LZ4_decompress_safe_partial_forceExtDict(const char *source, char *dest, int compressedSize, int targetOutputSize, int dstCapacity, const void *dictStart, size_t dictSize)
Definition lz4.cpp:2425
int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const char *dictStart, int dictSize)
Definition lz4.cpp:2610
#define MIN(a, b)
Definition lz4.cpp:1768
#define GB
Definition lz4.cpp:244
int LZ4_compress_limitedOutput(const char *source, char *dest, int inputSize, int maxOutputSize)
Definition lz4.cpp:2655
int LZ4_decoderRingBufferSize(int maxBlockSize)
Definition lz4.cpp:2506
int LZ4_compress_default(const char *src, char *dst, int srcSize, int maxOutputSize)
Definition lz4.cpp:1433
int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_stream, const char *src, char *dst, int srcSize, int dstCapacity)
Definition lz4.cpp:2671
unsigned char BYTE
Definition lz4.cpp:305
LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
Definition lz4.cpp:753
LZ4_FORCE_INLINE Rvl_t read_variable_length(const BYTE **ip, const BYTE *ilimit, int initial_check)
Definition lz4.cpp:1902
LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void *tableBase, tableType_t tableType)
Definition lz4.cpp:829
int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr, int targetDstSize)
Definition lz4.cpp:1459
int LZ4_sizeofState(void)
Definition lz4.cpp:728
signed int S32
Definition lz4.cpp:308
LZ4_FORCE_O2 int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int originalSize)
Definition lz4.cpp:2562
int LZ4_compress(const char *src, char *dest, int srcSize)
Definition lz4.cpp:2659
#define LZ4_memcpy(dst, src, size)
Definition lz4.cpp:342
void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
Definition lz4.cpp:1517
#define LZ4_DISTANCE_ABSOLUTE_MAX
Definition lz4.cpp:246
void LZ4_resetStream_fast(LZ4_stream_t *ctx)
Definition lz4.cpp:1523
int LZ4_freeStream(LZ4_stream_t *LZ4_stream)
Definition lz4.cpp:1528
#define MFLIMIT
Definition lz4.cpp:237
const char * LZ4_versionString(void)
Definition lz4.cpp:726
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition lz4.cpp:817
int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.cpp:1414
int LZ4_uncompress(const char *source, char *dest, int outputSize)
Definition lz4.cpp:2686
char * LZ4_slideInputBuffer(void *state)
Definition lz4.cpp:2714
int LZ4_compress_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize)
Definition lz4.cpp:2675
LZ4_stream_t * LZ4_initStream(void *buffer, size_t size)
Definition lz4.cpp:1505
#define LASTLITERALS
Definition lz4.cpp:236
int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize, int maxOutputSize)
Definition lz4.cpp:2690
#define DEBUGLOG(l,...)
Definition lz4.cpp:281
LZ4_FORCE_INLINE void LZ4_prepareTable(LZ4_stream_t_internal *const cctx, const int inputSize, const tableType_t tableType)
Definition lz4.cpp:862
LZ4_FORCE_INLINE int LZ4_decompress_safe_doubleDict(const char *source, char *dest, int compressedSize, int maxOutputSize, size_t prefixSize, const void *dictStart, size_t dictSize)
Definition lz4.cpp:2449
int LZ4_sizeofStreamState(void)
Definition lz4.cpp:2697
LZ4_FORCE_INLINE unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit)
Definition lz4.cpp:655
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize)
Definition lz4.cpp:2480
#define ALLOC(s)
Definition lz4.cpp:222
#define FREEMEM(p)
Definition lz4.cpp:224
int LZ4_versionNumber(void)
Definition lz4.cpp:725
dictIssue_directive
Definition lz4.cpp:719
@ noDictIssue
Definition lz4.cpp:719
@ dictSmall
Definition lz4.cpp:719
LZ4_stream_t * LZ4_createStream(void)
Definition lz4.cpp:1484
int LZ4_compress_withState(void *state, const char *src, char *dst, int srcSize)
Definition lz4.cpp:2667
LZ4_FORCE_O2 int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition lz4.cpp:2522
#define likely(expr)
Definition lz4.cpp:175
int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source, char *dest, int srcSize)
Definition lz4.cpp:1710
LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void *tableBase, tableType_t const tableType)
Definition lz4.cpp:792
#define FASTLOOP_SAFE_DISTANCE
Definition lz4.cpp:239
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void *tableBase, tableType_t const tableType)
Definition lz4.cpp:780
#define ML_BITS
Definition lz4.cpp:251
#define ML_MASK
Definition lz4.cpp:252
#define ALLOC_AND_ZERO(s)
Definition lz4.cpp:223
#define MEM_INIT(p, v, s)
Definition lz4.cpp:227
LZ4_FORCE_INLINE const BYTE * LZ4_getPosition(const BYTE *p, const void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition lz4.cpp:853
#define HASH_UNIT
Definition lz4.cpp:1538
LZ4_FORCE_INLINE int LZ4_compress_generic_validated(LZ4_stream_t_internal *const cctx, const char *const source, char *const dest, const int inputSize, int *inputConsumed, const int maxOutputSize, const limitedOutput_directive outputDirective, const tableType_t tableType, const dict_directive dictDirective, const dictIssue_directive dictIssue, const int acceleration)
Definition lz4.cpp:908
dict_directive
Definition lz4.cpp:718
@ noDict
Definition lz4.cpp:718
@ withPrefix64k
Definition lz4.cpp:718
@ usingExtDict
Definition lz4.cpp:718
@ usingDictCtx
Definition lz4.cpp:718
LZ4_FORCE_O2 int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
Definition lz4.cpp:2360
limitedOutput_directive
Definition lz4.cpp:319
@ limitedOutput
Definition lz4.cpp:321
@ fillOutput
Definition lz4.cpp:322
@ notLimited
Definition lz4.cpp:320
int LZ4_compress_limitedOutput_withState(void *state, const char *src, char *dst, int srcSize, int dstSize)
Definition lz4.cpp:2663
LZ4_FORCE_O2 int LZ4_decompress_safe_partial(const char *src, char *dst, int compressedSize, int targetOutputSize, int dstCapacity)
Definition lz4.cpp:2351
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
Definition lz4.cpp:1737
size_t Rvl_t
Definition lz4.cpp:1899
#define LZ4_FORCE_INLINE
Definition lz4.cpp:141
int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.cpp:1630
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void *const p, tableType_t const tableType)
Definition lz4.cpp:773
unsigned int U32
Definition lz4.cpp:307
#define LZ4_ACCELERATION_MAX
Definition lz4.cpp:57
LZ4_FORCE_INLINE int LZ4_decompress_generic(const char *const src, char *const dst, int srcSize, int outputSize, earlyEnd_directive partialDecoding, dict_directive dict, const BYTE *const lowPrefix, const BYTE *const dictStart, const size_t dictSize)
Definition lz4.cpp:1935
int LZ4_resetStreamState(void *state, char *inputBuffer)
Definition lz4.cpp:2699
#define MATCH_SAFEGUARD_DISTANCE
Definition lz4.cpp:238
#define assert(condition)
Definition lz4.cpp:266
int LZ4_decompress_safe_partial_usingDict(const char *source, char *dest, int compressedSize, int targetOutputSize, int dstCapacity, const char *dictStart, int dictSize)
Definition lz4.cpp:2625
int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const void *dictStart, size_t dictSize)
Definition lz4.cpp:2415
#define unlikely(expr)
Definition lz4.cpp:178
int LZ4_compress_fast_extState_fastReset(void *state, const char *src, char *dst, int srcSize, int dstCapacity, int acceleration)
Definition lz4.cpp:1376
LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
Definition lz4.cpp:761
#define WILDCOPYLENGTH
Definition lz4.cpp:235
#define LZ4_ACCELERATION_DEFAULT
Definition lz4.cpp:51
unsigned short U16
Definition lz4.cpp:306
void LZ4_attach_dictionary(LZ4_stream_t *workingStream, const LZ4_stream_t *dictionaryStream)
Definition lz4.cpp:1581
LZ4_FORCE_O2 int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize)
Definition lz4.cpp:2343
tableType_t
Definition lz4.cpp:693
@ clearedTable
Definition lz4.cpp:693
@ byU16
Definition lz4.cpp:693
@ byPtr
Definition lz4.cpp:693
@ byU32
Definition lz4.cpp:693
#define RUN_MASK
Definition lz4.cpp:254
#define LZ4_FORCE_O2
Definition lz4.cpp:165
int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest, int originalSize)
Definition lz4.cpp:2388
SIZE_T uptrval
Definition lz4.cpp:310
size_t read_long_length_no_check(const BYTE **pp)
Definition lz4.cpp:1775
#define LZ4_memmove
Definition lz4.cpp:350
int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream)
Definition lz4.cpp:2466
LZ4_streamDecode_t * LZ4_createStreamDecode(void)
Definition lz4.cpp:2460
void * LZ4_create(char *inputBuffer)
Definition lz4.cpp:2707
#define op
#define anchor
#define ip
float v
Definition radaudio_mdct.cpp:62
Definition lz4.h:702
SIZE_T prefixSize
Definition lz4.h:706
Definition lz4.h:663
LZ4_u32 currentOffset
Definition lz4.h:667
const LZ4_byte * dictionary
Definition lz4.h:665
const LZ4_stream_t_internal * dictCtx
Definition lz4.h:666
LZ4_u32 dictSize
Definition lz4.h:669
LZ4_u32 hashTable[LZ4_HASH_SIZE_U32]
Definition lz4.h:664
Definition lz4.h:710
Definition lz4.h:674
LZ4_stream_t_internal internal_donotuse
Definition lz4.h:676