UDocumentation UE5.7 10.02.2026 (Source)
API documentation for Unreal Engine 5.7
lz4.c.inl
Go to the documentation of this file.
1/*
2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-present, Yann Collet.
4
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the
16 distribution.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 You can contact the author at :
31 - LZ4 homepage : http://www.lz4.org
32 - LZ4 source repository : https://github.com/lz4/lz4
33*/
34
35/*-************************************
36* Tuning parameters
37**************************************/
38/*
39 * LZ4_HEAPMODE :
40 * Select how default compression functions will allocate memory for their hash table,
41 * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
42 */
43#ifndef LZ4_HEAPMODE
44# define LZ4_HEAPMODE 0
45#endif
46
47/*
48 * ACCELERATION_DEFAULT :
49 * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
50 */
51#define ACCELERATION_DEFAULT 1
52
53
54/*-************************************
55* CPU Feature Detection
56**************************************/
57/* LZ4_FORCE_MEMORY_ACCESS
58 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
59 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
60 * The below switch allow to select different access method for improved performance.
61 * Method 0 (default) : use `memcpy()`. Safe and portable.
62 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
63 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
64 * Method 2 : direct access. This method is portable but violate C standard.
65 * It can generate buggy code on targets which assembly generation depends on alignment.
66 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
67 * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
68 * Prefer these methods in priority order (0 > 1 > 2)
69 */
70#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
71# if defined(__GNUC__) && \
72 ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
73 || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
74# define LZ4_FORCE_MEMORY_ACCESS 2
75# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
76# define LZ4_FORCE_MEMORY_ACCESS 1
77# endif
78#endif
79
80/*
81 * LZ4_FORCE_SW_BITCOUNT
82 * Define this parameter if your target system or compiler does not support hardware bit count
83 */
84#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
85# define LZ4_FORCE_SW_BITCOUNT
86#endif
87
88
89
90/*-************************************
91* Dependency
92**************************************/
93/*
94 * LZ4_SRC_INCLUDED:
95 * Amalgamation flag, whether lz4.c is included
96 */
97#ifndef LZ4_SRC_INCLUDED
98# define LZ4_SRC_INCLUDED 1
99#endif
100
101#ifndef LZ4_STATIC_LINKING_ONLY
102#define LZ4_STATIC_LINKING_ONLY
103#endif
104
105#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
106#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
107#endif
108
109#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
110#include "lz4.h"
111/* see also "memory routines" below */
112
113
114/*-************************************
115* Compiler Options
116**************************************/
117#ifdef _MSC_VER /* Visual Studio */
118# include <intrin.h>
119# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
120# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
121#endif /* _MSC_VER */
122
123#ifndef LZ4_FORCE_INLINE
124# ifdef _MSC_VER /* Visual Studio */
125# define LZ4_FORCE_INLINE static __forceinline
126# else
127# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
128# ifdef __GNUC__
129# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
130# else
131# define LZ4_FORCE_INLINE static inline
132# endif
133# else
134# define LZ4_FORCE_INLINE static
135# endif /* __STDC_VERSION__ */
136# endif /* _MSC_VER */
137#endif /* LZ4_FORCE_INLINE */
138
139/* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE
140 * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
141 * together with a simple 8-byte copy loop as a fall-back path.
142 * However, this optimization hurts the decompression speed by >30%,
143 * because the execution does not go to the optimized loop
144 * for typical compressible data, and all of the preamble checks
145 * before going to the fall-back path become useless overhead.
146 * This optimization happens only with the -O3 flag, and -O2 generates
147 * a simple 8-byte copy loop.
148 * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
149 * functions are annotated with __attribute__((optimize("O2"))),
150 * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
151 * of LZ4_wildCopy8 does not affect the compression speed.
152 */
153#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
154# define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2")))
155# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE
156#else
157# define LZ4_FORCE_O2_GCC_PPC64LE
158# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static
159#endif
160
161#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
162# define expect(expr,value) (__builtin_expect ((expr),(value)) )
163#else
164# define expect(expr,value) (expr)
165#endif
166
167#ifndef likely
168#define likely(expr) expect(int(expr) != 0, 1)
169#endif
170#ifndef unlikely
171#define unlikely(expr) expect(int(expr) != 0, 0)
172#endif
173
174
175/*-************************************
176* Memory routines
177**************************************/
178#include <stdlib.h> /* malloc, calloc, free */
179#define ALLOC(s) malloc(s)
180#define ALLOC_AND_ZERO(s) calloc(1,s)
181#define FREEMEM(p) free(p)
182#include <string.h> /* memset, memcpy */
183#define MEM_INIT(p,v,s) memset((p),(v),(s))
184
185
186/*-************************************
187* Common Constants
188**************************************/
189#define MINMATCH 4
190
191#define WILDCOPYLENGTH 8
192#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
193#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
194#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
195#define FASTLOOP_SAFE_DISTANCE 64
196static const int LZ4_minLength = (MFLIMIT+1);
197
198#define KB *(1 <<10)
199#define MB *(1 <<20)
200#define GB *(1U<<30)
201
202#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
203#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
204# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
205#endif
206
207#define ML_BITS 4
208#define ML_MASK ((1U<<ML_BITS)-1)
209#define RUN_BITS (8-ML_BITS)
210#define RUN_MASK ((1U<<RUN_BITS)-1)
211
212
213/*-************************************
214* Error detection
215**************************************/
216#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
217# include <assert.h>
218#else
219# ifndef assert
220# define assert(condition) ((void)0)
221# endif
222#endif
223
224#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
225
226#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
227# include <stdio.h>
228static int g_debuglog_enable = 1;
229# define DEBUGLOG(l, ...) { \
230 if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
231 fprintf(stderr, __FILE__ ": "); \
232 fprintf(stderr, __VA_ARGS__); \
233 fprintf(stderr, " \n"); \
234 } }
235#else
236# define DEBUGLOG(l, ...) {} /* disabled */
237#endif
238
239
240/*-************************************
241* Types
242**************************************/
243#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
244# include <stdint.h>
245
246LZ4_BEGIN_NAMESPACE // EPIC MOD : Wrap library in an optional namespace
247
248 typedef uint8_t BYTE;
249 typedef uint16_t U16;
250 typedef uint32_t U32;
251 typedef int32_t S32;
252 typedef uint64_t U64;
253 typedef uintptr_t uptrval;
254#else
255 typedef unsigned char BYTE;
256 typedef unsigned short U16;
257 typedef unsigned int U32;
258 typedef signed int S32;
259 typedef unsigned long long U64;
260 typedef size_t uptrval; /* generally true, except OpenVMS-64 */
261#endif
262
263#if defined(__x86_64__)
264 typedef U64 reg_t; /* 64-bits in x32 mode */
265#else
266 typedef size_t reg_t; /* 32-bits in x32 mode */
267#endif
268
274
275
276/*-************************************
277* Reading and writing into memory
278**************************************/
279static unsigned LZ4_isLittleEndian(void)
280{
281 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
282 return one.c[0];
283}
284
285
286#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
287/* lie to the compiler about data alignment; use with caution */
288
289static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
290static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
291static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
292
293static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
294static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
295
296#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
297
298/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
299/* currently only defined for gcc and icc */
300typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
301
302static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
303static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
304static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
305
306static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
307static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
308
309#else /* safe and portable access using memcpy() */
310
311static U16 LZ4_read16(const void* memPtr)
312{
313 U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
314}
315
316static U32 LZ4_read32(const void* memPtr)
317{
318 U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
319}
320
321static reg_t LZ4_read_ARCH(const void* memPtr)
322{
323 reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
324}
325
326static void LZ4_write16(void* memPtr, U16 value)
327{
328 memcpy(memPtr, &value, sizeof(value));
329}
330
331static void LZ4_write32(void* memPtr, U32 value)
332{
333 memcpy(memPtr, &value, sizeof(value));
334}
335
336#endif /* LZ4_FORCE_MEMORY_ACCESS */
337
338
339static U16 LZ4_readLE16(const void* memPtr)
340{
341 if (LZ4_isLittleEndian()) {
342 return LZ4_read16(memPtr);
343 } else {
344 const BYTE* p = (const BYTE*)memPtr;
345 return (U16)((U16)p[0] + (p[1]<<8));
346 }
347}
348
349static void LZ4_writeLE16(void* memPtr, U16 value)
350{
351 if (LZ4_isLittleEndian()) {
352 LZ4_write16(memPtr, value);
353 } else {
354 BYTE* p = (BYTE*)memPtr;
355 p[0] = (BYTE) value;
356 p[1] = (BYTE)(value>>8);
357 }
358}
359
360/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
362void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
363{
364 BYTE* d = (BYTE*)dstPtr;
365 const BYTE* s = (const BYTE*)srcPtr;
366 BYTE* const e = (BYTE*)dstEnd;
367
368 do { memcpy(d,s,8); d+=8; s+=8; } while (d<e);
369}
370
371static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
372static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
373
374
375#ifndef LZ4_FAST_DEC_LOOP
376# if defined(__i386__) || defined(__x86_64__)
377# define LZ4_FAST_DEC_LOOP 1
378# elif defined(__aarch64__) && !defined(__clang__)
379 /* On aarch64, we disable this optimization for clang because on certain
380 * mobile chipsets and clang, it reduces performance. For more information
381 * refer to https://github.com/lz4/lz4/pull/707. */
382# define LZ4_FAST_DEC_LOOP 1
383# else
384# define LZ4_FAST_DEC_LOOP 0
385# endif
386#endif
387
388#if LZ4_FAST_DEC_LOOP
389
391LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
392{
393 if (offset < 8) {
394 dstPtr[0] = srcPtr[0];
395 dstPtr[1] = srcPtr[1];
396 dstPtr[2] = srcPtr[2];
397 dstPtr[3] = srcPtr[3];
398 srcPtr += inc32table[offset];
399 memcpy(dstPtr+4, srcPtr, 4);
400 srcPtr -= dec64table[offset];
401 dstPtr += 8;
402 } else {
403 memcpy(dstPtr, srcPtr, 8);
404 dstPtr += 8;
405 srcPtr += 8;
406 }
407
409}
410
411/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
412 * this version copies two times 16 bytes (instead of one time 32 bytes)
413 * because it must be compatible with offsets >= 16. */
415LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
416{
417 BYTE* d = (BYTE*)dstPtr;
418 const BYTE* s = (const BYTE*)srcPtr;
419 BYTE* const e = (BYTE*)dstEnd;
420
421 do { memcpy(d,s,16); memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
422}
423
424/* LZ4_memcpy_using_offset() presumes :
425 * - dstEnd >= dstPtr + MINMATCH
426 * - there is at least 8 bytes available to write after dstEnd */
428LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
429{
430 BYTE v[8];
431
433 LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
434
435 switch(offset) {
436 case 1:
437 memset(v, *srcPtr, 8);
438 break;
439 case 2:
440 memcpy(v, srcPtr, 2);
441 memcpy(&v[2], srcPtr, 2);
442 memcpy(&v[4], &v[0], 4);
443 break;
444 case 4:
445 memcpy(v, srcPtr, 4);
446 memcpy(&v[4], srcPtr, 4);
447 break;
448 default:
450 return;
451 }
452
453 memcpy(dstPtr, v, 8);
454 dstPtr += 8;
455 while (dstPtr < dstEnd) {
456 memcpy(dstPtr, v, 8);
457 dstPtr += 8;
458 }
459}
460#endif
461
462
463/*-************************************
464* Common functions
465**************************************/
466static unsigned LZ4_NbCommonBytes (reg_t val)
467{
468 if (LZ4_isLittleEndian()) {
469 if (sizeof(val)==8) {
470# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
471 unsigned long r = 0;
472 _BitScanForward64( &r, (U64)val );
473 return (int)(r>>3);
474# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
475 return (unsigned)__builtin_ctzll((U64)val) >> 3;
476# else
477 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
478 0, 3, 1, 3, 1, 4, 2, 7,
479 0, 2, 3, 6, 1, 5, 3, 5,
480 1, 3, 4, 4, 2, 5, 6, 7,
481 7, 0, 1, 2, 3, 3, 4, 6,
482 2, 6, 5, 5, 3, 4, 5, 6,
483 7, 1, 2, 4, 6, 4, 4, 5,
484 7, 2, 6, 5, 7, 6, 7, 7 };
485 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
486# endif
487 } else /* 32 bits */ {
488# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
489 unsigned long r;
490 _BitScanForward( &r, (U32)val );
491 return (int)(r>>3);
492# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
493 return (unsigned)__builtin_ctz((U32)val) >> 3;
494# else
495 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
496 3, 2, 2, 1, 3, 2, 0, 1,
497 3, 3, 1, 2, 2, 2, 2, 0,
498 3, 1, 2, 0, 1, 0, 1, 1 };
499 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
500# endif
501 }
502 } else /* Big Endian CPU */ {
503 if (sizeof(val)==8) { /* 64-bits */
504# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
505 unsigned long r = 0;
506 _BitScanReverse64( &r, val );
507 return (unsigned)(r>>3);
508# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
509 return (unsigned)__builtin_clzll((U64)val) >> 3;
510# else
511 static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
512 Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
513 Note that this code path is never triggered in 32-bits mode. */
514 unsigned r;
515 if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
516 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
517 r += (!val);
518 return r;
519# endif
520 } else /* 32 bits */ {
521# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
522 unsigned long r = 0;
523 _BitScanReverse( &r, (unsigned long)val );
524 return (unsigned)(r>>3);
525# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
526 return (unsigned)__builtin_clz((U32)val) >> 3;
527# else
528 unsigned r;
529 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
530 r += (!val);
531 return r;
532# endif
533 }
534 }
535}
536
537#define STEPSIZE sizeof(reg_t)
539unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
540{
541 const BYTE* const pStart = pIn;
542
543 if (likely(pIn < pInLimit-(STEPSIZE-1))) {
544 reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
545 if (!diff) {
547 } else {
548 return LZ4_NbCommonBytes(diff);
549 } }
550
551 while (likely(pIn < pInLimit-(STEPSIZE-1))) {
552 reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
553 if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
554 pIn += LZ4_NbCommonBytes(diff);
555 return (unsigned)(pIn - pStart);
556 }
557
558 if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
559 if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
560 if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
561 return (unsigned)(pIn - pStart);
562}
563
564
565#ifndef LZ4_COMMONDEFS_ONLY
566/*-************************************
567* Local Constants
568**************************************/
569static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
570static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
571
572
573/*-************************************
574* Local Structures and types
575**************************************/
576typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
577
603
604
605/*-************************************
606* Local Utils
607**************************************/
609const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
612
613
614/*-************************************
615* Internal Definitions used in Tests
616**************************************/
617LZ4_END_NAMESPACE // EPIC MOD : Wrap library in an optional namespace
618
619#if defined (__cplusplus)
620extern "C" {
621#endif
622
623LZ4_BEGIN_NAMESPACE // EPIC MOD : Wrap library in an optional namespace
624
625int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
626
627int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
629 const void* dictStart, size_t dictSize);
630
631LZ4_END_NAMESPACE // EPIC MOD : Wrap library in an optional namespace
632
633#if defined (__cplusplus)
634}
635#endif
636
637LZ4_BEGIN_NAMESPACE // EPIC MOD : Wrap library in an optional namespace
638
639/*-******************************
640* Compression functions
641********************************/
642static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
643{
644 if (tableType == byU16)
645 return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
646 else
647 return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
648}
649
650static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
651{
652 const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
653 if (LZ4_isLittleEndian()) {
654 const U64 prime5bytes = 889523592379ULL;
655 return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
656 } else {
657 const U64 prime8bytes = 11400714785074694791ULL;
658 return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
659 }
660}
661
662LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
663{
664 if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
665 return LZ4_hash4(LZ4_read32(p), tableType);
666}
667
668static void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
669{
670 switch (tableType)
671 {
672 default: /* fallthrough */
673 case clearedTable: { /* illegal! */ assert(0); return; }
674 case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
675 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
676 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
677 }
678}
679
680static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
681{
682 switch (tableType)
683 {
684 default: /* fallthrough */
685 case clearedTable: /* fallthrough */
686 case byPtr: { /* illegal! */ assert(0); return; }
687 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
688 case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
689 }
690}
691
692static void LZ4_putPositionOnHash(const BYTE* p, U32 h,
693 void* tableBase, tableType_t const tableType,
694 const BYTE* srcBase)
695{
696 switch (tableType)
697 {
698 case clearedTable: { /* illegal! */ assert(0); return; }
699 case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
700 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
701 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
702 }
703}
704
705LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
706{
707 U32 const h = LZ4_hashPosition(p, tableType);
708 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
709}
710
711/* LZ4_getIndexOnHash() :
712 * Index of match position registered in hash table.
713 * hash position must be calculated by using base+index, or dictBase+index.
714 * Assumption 1 : only valid if tableType == byU32 or byU16.
715 * Assumption 2 : h is presumed valid (within limits of hash table)
716 */
717static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
718{
720 if (tableType == byU32) {
721 const U32* const hashTable = (const U32*) tableBase;
722 assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
723 return hashTable[h];
724 }
725 if (tableType == byU16) {
726 const U16* const hashTable = (const U16*) tableBase;
727 assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
728 return hashTable[h];
729 }
730 assert(0); return 0; /* forbidden case */
731}
732
733static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
734{
735 if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
736 if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
737 { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
738}
739
742 const void* tableBase, tableType_t tableType,
743 const BYTE* srcBase)
744{
745 U32 const h = LZ4_hashPosition(p, tableType);
746 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
747}
748
751 const int inputSize,
752 const tableType_t tableType) {
753 /* If compression failed during the previous step, then the context
754 * is marked as dirty, therefore, it has to be fully reset.
755 */
756 if (cctx->dirty) {
757 DEBUGLOG(5, "LZ4_prepareTable: Full reset for %p", cctx);
759 return;
760 }
761
762 /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
763 * therefore safe to use no matter what mode we're in. Otherwise, we figure
764 * out if it's safe to leave as is or whether it needs to be reset.
765 */
766 if (cctx->tableType != clearedTable) {
767 assert(inputSize >= 0);
768 if (cctx->tableType != tableType
769 || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
770 || ((tableType == byU32) && cctx->currentOffset > 1 GB)
771 || tableType == byPtr
772 || inputSize >= 4 KB)
773 {
774 DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
775 MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
776 cctx->currentOffset = 0;
777 cctx->tableType = clearedTable;
778 } else {
779 DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
780 }
781 }
782
783 /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster
784 * than compressing without a gap. However, compressing with
785 * currentOffset == 0 is faster still, so we preserve that case.
786 */
787 if (cctx->currentOffset != 0 && tableType == byU32) {
788 DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
789 cctx->currentOffset += 64 KB;
790 }
791
792 /* Finally, clear history */
793 cctx->dictCtx = NULL;
794 cctx->dictionary = NULL;
795 cctx->dictSize = 0;
796}
797
802 const char* const source,
803 char* const dest,
804 const int inputSize,
805 int *inputConsumed, /* only written when outputDirective == fillOutput */
806 const int maxOutputSize,
808 const tableType_t tableType,
811 const int acceleration)
812{
813 int result;
814 const BYTE* ip = (const BYTE*) source;
815
816 U32 const startIndex = cctx->currentOffset;
817 const BYTE* base = (const BYTE*) source - startIndex;
818 const BYTE* lowLimit;
819
820 const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
821 const BYTE* const dictionary =
822 dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
823 const U32 dictSize =
824 dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
825 const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
826
828 U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
829 const BYTE* const dictEnd = dictionary + dictSize;
830 const BYTE* anchor = (const BYTE*) source;
831 const BYTE* const iend = ip + inputSize;
832 const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
833 const BYTE* const matchlimit = iend - LASTLITERALS;
834
835 /* the dictCtx currentOffset is indexed on the start of the dictionary,
836 * while a dictionary in the current context precedes the currentOffset */
838 dictionary + dictSize - dictCtx->currentOffset :
839 dictionary + dictSize - startIndex;
840
841 BYTE* op = (BYTE*) dest;
842 BYTE* const olimit = op + maxOutputSize;
843
844 U32 offset = 0;
846
847 DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType);
848 /* If init conditions are not met, we don't have to mark stream
849 * as having dirty context, since no action was taken yet */
850 if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
851 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported inputSize, too large (or negative) */
852 if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
853 if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
854 assert(acceleration >= 1);
855
856 lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
857
858 /* Update context state */
860 /* Subsequent linked blocks can't use the dictionary. */
861 /* Instead, they use the block we just compressed. */
862 cctx->dictCtx = NULL;
863 cctx->dictSize = (U32)inputSize;
864 } else {
865 cctx->dictSize += (U32)inputSize;
866 }
867 cctx->currentOffset += (U32)inputSize;
868 cctx->tableType = (U16)tableType;
869
870 if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
871
872 /* First Byte */
873 LZ4_putPosition(ip, cctx->hashTable, tableType, base);
874 ip++; forwardH = LZ4_hashPosition(ip, tableType);
875
876 /* Main Loop */
877 for ( ; ; ) {
878 const BYTE* match;
879 BYTE* token;
880 const BYTE* filledIp;
881
882 /* Find a match */
883 if (tableType == byPtr) {
884 const BYTE* forwardIp = ip;
885 int step = 1;
886 int searchMatchNb = acceleration << LZ4_skipTrigger;
887 do {
888 U32 const h = forwardH;
889 ip = forwardIp;
890 forwardIp += step;
891 step = (searchMatchNb++ >> LZ4_skipTrigger);
892
895
896 match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
897 forwardH = LZ4_hashPosition(forwardIp, tableType);
898 LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
899
900 } while ( (match+LZ4_DISTANCE_MAX < ip)
901 || (LZ4_read32(match) != LZ4_read32(ip)) );
902
903 } else { /* byU32, byU16 */
904
905 const BYTE* forwardIp = ip;
906 int step = 1;
907 int searchMatchNb = acceleration << LZ4_skipTrigger;
908 do {
909 U32 const h = forwardH;
910 U32 const current = (U32)(forwardIp - base);
911 U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
912 assert(matchIndex <= current);
913 assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
914 ip = forwardIp;
915 forwardIp += step;
916 step = (searchMatchNb++ >> LZ4_skipTrigger);
917
920
922 if (matchIndex < startIndex) {
923 /* there was no match, try the dictionary */
924 assert(tableType == byU32);
927 matchIndex += dictDelta; /* make dictCtx index comparable with current context */
928 lowLimit = dictionary;
929 } else {
930 match = base + matchIndex;
931 lowLimit = (const BYTE*)source;
932 }
933 } else if (dictDirective==usingExtDict) {
934 if (matchIndex < startIndex) {
935 DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
938 lowLimit = dictionary;
939 } else {
940 match = base + matchIndex;
941 lowLimit = (const BYTE*)source;
942 }
943 } else { /* single continuous memory segment */
944 match = base + matchIndex;
945 }
946 forwardH = LZ4_hashPosition(forwardIp, tableType);
947 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
948
949 DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
950 if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
951 assert(matchIndex < current);
952 if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
953 && (matchIndex+LZ4_DISTANCE_MAX < current)) {
954 continue;
955 } /* too far */
956 assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
957
958 if (LZ4_read32(match) == LZ4_read32(ip)) {
959 if (maybe_extMem) offset = current - matchIndex;
960 break; /* match found */
961 }
962
963 } while(1);
964 }
965
966 /* Catch up */
967 filledIp = ip;
968 while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
969
970 /* Encode Literals */
971 { unsigned const litLength = (unsigned)(ip - anchor);
972 token = op++;
973 if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
974 (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
975 return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
976 }
977 if ((outputDirective == fillOutput) &&
978 (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
979 op--;
980 goto _last_literals;
981 }
982 if (litLength >= RUN_MASK) {
983 int len = (int)(litLength - RUN_MASK);
984 *token = (RUN_MASK<<ML_BITS);
985 for(; len >= 255 ; len-=255) *op++ = 255;
986 *op++ = (BYTE)len;
987 }
988 else *token = (BYTE)(litLength<<ML_BITS);
989
990 /* Copy Literals */
992 op+=litLength;
993 DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
994 (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
995 }
996
998 /* at this stage, the following variables must be correctly set :
999 * - ip : at start of LZ operation
1000 * - match : at start of previous pattern occurence; can be within current prefix, or within extDict
1001 * - offset : if maybe_ext_memSegment==1 (constant)
1002 * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
1003 * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
1004 */
1005
1006 if ((outputDirective == fillOutput) &&
1007 (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
1008 /* the match was too close to the end, rewind and go to last literals */
1009 op = token;
1010 goto _last_literals;
1011 }
1012
1013 /* Encode Offset */
1014 if (maybe_extMem) { /* static test */
1015 DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
1017 LZ4_writeLE16(op, (U16)offset); op+=2;
1018 } else {
1019 DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
1021 LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
1022 }
1023
1024 /* Encode MatchLength */
1025 { unsigned matchCode;
1026
1028 && (lowLimit==dictionary) /* match within extDict */ ) {
1029 const BYTE* limit = ip + (dictEnd-match);
1030 assert(dictEnd > match);
1034 if (ip==limit) {
1035 unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
1036 matchCode += more;
1037 ip += more;
1038 }
1039 DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
1040 } else {
1043 DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
1044 }
1045
1046 if ((outputDirective) && /* Check output buffer overflow */
1047 (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
1048 if (outputDirective == fillOutput) {
1049 /* Match description too long : reduce it */
1050 U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
1054 if (unlikely(ip <= filledIp)) {
1055 /* We have already filled up to filledIp so if ip ends up less than filledIp
1056 * we have positions in the hash table beyond the current position. This is
1057 * a problem if we reuse the hash table. So we have to remove these positions
1058 * from the hash table.
1059 */
1060 const BYTE* ptr;
1061 DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
1062 for (ptr = ip; ptr <= filledIp; ++ptr) {
1063 U32 const h = LZ4_hashPosition(ptr, tableType);
1064 LZ4_clearHash(h, cctx->hashTable, tableType);
1065 }
1066 }
1067 } else {
1069 return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1070 }
1071 }
1072 if (matchCode >= ML_MASK) {
1073 *token += ML_MASK;
1074 matchCode -= ML_MASK;
1075 LZ4_write32(op, 0xFFFFFFFF);
1076 while (matchCode >= 4*255) {
1077 op+=4;
1078 LZ4_write32(op, 0xFFFFFFFF);
1079 matchCode -= 4*255;
1080 }
1081 op += matchCode / 255;
1082 *op++ = (BYTE)(matchCode % 255);
1083 } else
1084 *token += (BYTE)(matchCode);
1085 }
1086 /* Ensure we have enough space for the last literals. */
1088
1089 anchor = ip;
1090
1091 /* Test end of chunk */
1092 if (ip >= mflimitPlusOne) break;
1093
1094 /* Fill table */
1095 LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
1096
1097 /* Test next position */
1098 if (tableType == byPtr) {
1099
1100 match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
1101 LZ4_putPosition(ip, cctx->hashTable, tableType, base);
1102 if ( (match+LZ4_DISTANCE_MAX >= ip)
1103 && (LZ4_read32(match) == LZ4_read32(ip)) )
1104 { token=op++; *token=0; goto _next_match; }
1105
1106 } else { /* byU32, byU16 */
1107
1108 U32 const h = LZ4_hashPosition(ip, tableType);
1109 U32 const current = (U32)(ip-base);
1110 U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
1111 assert(matchIndex < current);
1112 if (dictDirective == usingDictCtx) {
1113 if (matchIndex < startIndex) {
1114 /* there was no match, try the dictionary */
1117 lowLimit = dictionary; /* required for match length counter */
1119 } else {
1120 match = base + matchIndex;
1121 lowLimit = (const BYTE*)source; /* required for match length counter */
1122 }
1123 } else if (dictDirective==usingExtDict) {
1124 if (matchIndex < startIndex) {
1126 lowLimit = dictionary; /* required for match length counter */
1127 } else {
1128 match = base + matchIndex;
1129 lowLimit = (const BYTE*)source; /* required for match length counter */
1130 }
1131 } else { /* single memory segment */
1132 match = base + matchIndex;
1133 }
1134 LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
1135 assert(matchIndex < current);
1136 if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
1137 && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
1138 && (LZ4_read32(match) == LZ4_read32(ip)) ) {
1139 token=op++;
1140 *token=0;
1141 if (maybe_extMem) offset = current - matchIndex;
1142 DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
1143 (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
1144 goto _next_match;
1145 }
1146 }
1147
1148 /* Prepare next loop */
1149 forwardH = LZ4_hashPosition(++ip, tableType);
1150
1151 }
1152
1154 /* Encode Last Literals */
1155 { size_t lastRun = (size_t)(iend - anchor);
1156 if ( (outputDirective) && /* Check output buffer overflow */
1157 (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
1158 if (outputDirective == fillOutput) {
1159 /* adapt lastRun to fill 'dst' */
1160 assert(olimit >= op);
1161 lastRun = (size_t)(olimit-op) - 1;
1162 lastRun -= (lastRun+240)/255;
1163 } else {
1165 return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
1166 }
1167 }
1168 if (lastRun >= RUN_MASK) {
1169 size_t accumulator = lastRun - RUN_MASK;
1170 *op++ = RUN_MASK << ML_BITS;
1171 for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
1172 *op++ = (BYTE) accumulator;
1173 } else {
1174 *op++ = (BYTE)(lastRun<<ML_BITS);
1175 }
1177 ip = anchor + lastRun;
1178 op += lastRun;
1179 }
1180
1181 if (outputDirective == fillOutput) {
1182 *inputConsumed = (int) (((const char*)ip)-source);
1183 }
1184 DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, (int)(((char*)op) - dest));
1185 result = (int)(((char*)op) - dest);
1186 assert(result > 0);
1187 return result;
1188}
1189
1190
1191int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1192{
1193 LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
1194 assert(ctx != NULL);
1197 if (inputSize < LZ4_64Klimit) {
1199 } else {
1200 const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1202 }
1203 } else {
1204 if (inputSize < LZ4_64Klimit) {
1206 } else {
1207 const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1209 }
1210 }
1211}
1212
1222int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
1223{
1224 LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
1226
1228 if (srcSize < LZ4_64Klimit) {
1229 const tableType_t tableType = byU16;
1230 LZ4_prepareTable(ctx, srcSize, tableType);
1231 if (ctx->currentOffset) {
1232 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
1233 } else {
1234 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1235 }
1236 } else {
1237 const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1238 LZ4_prepareTable(ctx, srcSize, tableType);
1239 return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
1240 }
1241 } else {
1242 if (srcSize < LZ4_64Klimit) {
1243 const tableType_t tableType = byU16;
1244 LZ4_prepareTable(ctx, srcSize, tableType);
1245 if (ctx->currentOffset) {
1247 } else {
1249 }
1250 } else {
1251 const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1252 LZ4_prepareTable(ctx, srcSize, tableType);
1254 }
1255 }
1256}
1257
1258
1259int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1260{
1261 int result;
1262#if (LZ4_HEAPMODE)
1263 LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1264 if (ctxPtr == NULL) return 0;
1265#else
1266 LZ4_stream_t ctx;
1267 LZ4_stream_t* const ctxPtr = &ctx;
1268#endif
1270
1271#if (LZ4_HEAPMODE)
1272 FREEMEM(ctxPtr);
1273#endif
1274 return result;
1275}
1276
1277
1278int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
1279{
1280 return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
1281}
1282
1283
1284/* hidden debug function */
1285/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
1286int LZ4_compress_fast_force(const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
1287{
1288 LZ4_stream_t ctx;
1289 LZ4_initStream(&ctx, sizeof(ctx));
1290
1291 if (srcSize < LZ4_64Klimit) {
1293 } else {
1294 tableType_t const addrMode = (sizeof(void*) > 4) ? byU32 : byPtr;
1296 }
1297}
1298
1299
1300/* Note!: This function leaves the stream in an unclean/broken state!
1301 * It is not safe to subsequently use the same state with a _fastReset() or
1302 * _continue() call without resetting it. */
1303static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1304{
1305 void* const s = LZ4_initStream(state, sizeof (*state));
1306 assert(s != NULL); (void)s;
1307
1308 if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
1309 return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
1310 } else {
1311 if (*srcSizePtr < LZ4_64Klimit) {
1313 } else {
1314 tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
1316 } }
1317}
1318
1319
1320int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
1321{
1322#if (LZ4_HEAPMODE)
1323 LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
1324 if (ctx == NULL) return 0;
1325#else
1327 LZ4_stream_t* ctx = &ctxBody;
1328#endif
1329
1330 int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
1331
1332#if (LZ4_HEAPMODE)
1333 FREEMEM(ctx);
1334#endif
1335 return result;
1336}
1337
1338
1339
1340/*-******************************
1341* Streaming functions
1342********************************/
1343
1345{
1346 LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
1347 LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
1348 DEBUGLOG(4, "LZ4_createStream %p", lz4s);
1349 if (lz4s == NULL) return NULL;
1350 LZ4_initStream(lz4s, sizeof(*lz4s));
1351 return lz4s;
1352}
1353
1354#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
1355 it reports an aligment of 8-bytes,
1356 while actually aligning LZ4_stream_t on 4 bytes. */
1357static size_t LZ4_stream_t_alignment(void)
1358{
1359 struct { char c; LZ4_stream_t t; } t_a;
1360 return sizeof(t_a) - sizeof(t_a.t);
1361}
1362#endif
1363
1364LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
1365{
1366 DEBUGLOG(5, "LZ4_initStream");
1367 if (buffer == NULL) { return NULL; }
1368 if (size < sizeof(LZ4_stream_t)) { return NULL; }
1369#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 :
1370 it reports an aligment of 8-bytes,
1371 while actually aligning LZ4_stream_t on 4 bytes. */
1372 if (((size_t)buffer) & (LZ4_stream_t_alignment() - 1)) { return NULL; } /* alignment check */
1373#endif
1374 MEM_INIT(buffer, 0, sizeof(LZ4_stream_t));
1375 return (LZ4_stream_t*)buffer;
1376}
1377
1378/* resetStream is now deprecated,
1379 * prefer initStream() which is more general */
1381{
1382 DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
1383 MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
1384}
1385
1389
1391{
1392 if (!LZ4_stream) return 0; /* support free on NULL */
1393 DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
1395 return (0);
1396}
1397
1398
1399#define HASH_UNIT sizeof(reg_t)
1400int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
1401{
1402 LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
1403 const tableType_t tableType = byU32;
1404 const BYTE* p = (const BYTE*)dictionary;
1405 const BYTE* const dictEnd = p + dictSize;
1406 const BYTE* base;
1407
1408 DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
1409
1410 /* It's necessary to reset the context,
1411 * and not just continue it with prepareTable()
1412 * to avoid any risk of generating overflowing matchIndex
1413 * when compressing using this dictionary */
1415
1416 /* We always increment the offset by 64 KB, since, if the dict is longer,
1417 * we truncate it to the last 64k, and if it's shorter, we still want to
1418 * advance by a whole window length so we can provide the guarantee that
1419 * there are only valid offsets in the window, which allows an optimization
1420 * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
1421 * dictionary isn't a full 64k. */
1422 dict->currentOffset += 64 KB;
1423
1424 if (dictSize < (int)HASH_UNIT) {
1425 return 0;
1426 }
1427
1428 if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
1429 base = dictEnd - dict->currentOffset;
1430 dict->dictionary = p;
1431 dict->dictSize = (U32)(dictEnd - p);
1432 dict->tableType = tableType;
1433
1434 while (p <= dictEnd-HASH_UNIT) {
1435 LZ4_putPosition(p, dict->hashTable, tableType, base);
1436 p+=3;
1437 }
1438
1439 return (int)dict->dictSize;
1440}
1441
1443 const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
1444 &(dictionaryStream->internal_donotuse);
1445
1446 DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
1448 dictCtx != NULL ? dictCtx->dictSize : 0);
1449
1450 /* Calling LZ4_resetStream_fast() here makes sure that changes will not be
1451 * erased by subsequent calls to LZ4_resetStream_fast() in case stream was
1452 * marked as having dirty context, e.g. requiring full reset.
1453 */
1455
1456 if (dictCtx != NULL) {
1457 /* If the current offset is zero, we will never look in the
1458 * external dictionary context, since there is no value a table
1459 * entry can take that indicate a miss. In that case, we need
1460 * to bump the offset to something non-zero.
1461 */
1462 if (workingStream->internal_donotuse.currentOffset == 0) {
1463 workingStream->internal_donotuse.currentOffset = 64 KB;
1464 }
1465
1466 /* Don't actually attach an empty dictionary.
1467 */
1468 if (dictCtx->dictSize == 0) {
1469 dictCtx = NULL;
1470 }
1471 }
1472 workingStream->internal_donotuse.dictCtx = dictCtx;
1473}
1474
1475
1476static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
1477{
1478 assert(nextSize >= 0);
1479 if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
1480 /* rescale hash table */
1481 U32 const delta = LZ4_dict->currentOffset - 64 KB;
1482 const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1483 int i;
1484 DEBUGLOG(4, "LZ4_renormDictT");
1485 for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
1486 if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
1487 else LZ4_dict->hashTable[i] -= delta;
1488 }
1489 LZ4_dict->currentOffset = 64 KB;
1490 if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1491 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1492 }
1493}
1494
1495
1497 const char* source, char* dest,
1498 int inputSize, int maxOutputSize,
1499 int acceleration)
1500{
1501 const tableType_t tableType = byU32;
1502 LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
1503 const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1504
1505 DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
1506
1507 if (streamPtr->dirty) { return 0; } /* Uninitialized structure detected */
1508 LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
1510
1511 /* invalidate tiny dictionaries */
1512 if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */
1513 && (dictEnd != (const BYTE*)source) ) {
1514 DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
1515 streamPtr->dictSize = 0;
1516 streamPtr->dictionary = (const BYTE*)source;
1517 dictEnd = (const BYTE*)source;
1518 }
1519
1520 /* Check overlapping input/dictionary space */
1521 { const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1522 if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1523 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1524 if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1525 if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1526 streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1527 }
1528 }
1529
1530 /* prefix mode : source data follows dictionary */
1531 if (dictEnd == (const BYTE*)source) {
1532 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1534 else
1536 }
1537
1538 /* external dictionary mode */
1539 { int result;
1540 if (streamPtr->dictCtx) {
1541 /* We depend here on the fact that dictCtx'es (produced by
1542 * LZ4_loadDict) guarantee that their tables contain no references
1543 * to offsets between dictCtx->currentOffset - 64 KB and
1544 * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
1545 * to use noDictIssue even when the dict isn't a full 64 KB.
1546 */
1547 if (inputSize > 4 KB) {
1548 /* For compressing large blobs, it is faster to pay the setup
1549 * cost to copy the dictionary's tables into the active context,
1550 * so that the compression loop is only looking into one table.
1551 */
1552 memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t));
1554 } else {
1556 }
1557 } else {
1558 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1560 } else {
1562 }
1563 }
1564 streamPtr->dictionary = (const BYTE*)source;
1565 streamPtr->dictSize = (U32)inputSize;
1566 return result;
1567 }
1568}
1569
1570
1571/* Hidden debug function, to force-test external dictionary mode */
1573{
1574 LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1575 int result;
1576
1577 LZ4_renormDictT(streamPtr, srcSize);
1578
1579 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
1581 } else {
1583 }
1584
1585 streamPtr->dictionary = (const BYTE*)source;
1586 streamPtr->dictSize = (U32)srcSize;
1587
1588 return result;
1589}
1590
1591
1600{
1601 LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1602 const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1603
1604 if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
1605 if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
1606
1607 memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1608
1609 dict->dictionary = (const BYTE*)safeBuffer;
1610 dict->dictSize = (U32)dictSize;
1611
1612 return dictSize;
1613}
1614
1615
1616
1617/*-*******************************
1618 * Decompression functions
1619 ********************************/
1620
1623
1624#undef MIN
1625#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
1626
1627/* Read the variable-length literal or match length.
1628 *
1629 * ip - pointer to use as input.
1630 * lencheck - end ip. Return an error if ip advances >= lencheck.
1631 * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so.
1632 * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so.
1633 * error (output) - error code. Should be set to 0 before call.
1634 */
1635// BEGIN EPIC MOD : The 'ok' identifier collides with symbols in the MacOS SDK
1637// END EPIC MOD
1638LZ4_FORCE_INLINE unsigned
1640{
1641 unsigned length = 0;
1642 unsigned s;
1643 if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
1644 *error = initial_error;
1645 return length;
1646 }
1647 do {
1648 s = **ip;
1649 (*ip)++;
1650 length += s;
1651 if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
1652 *error = loop_error;
1653 return length;
1654 }
1655 } while (s==255);
1656
1657 return length;
1658}
1659
1668 const char* const src,
1669 char* const dst,
1670 int srcSize,
1671 int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
1672
1673 endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */
1674 earlyEnd_directive partialDecoding, /* full, partial */
1675 dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
1676 const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
1677 const BYTE* const dictStart, /* only if dict==usingExtDict */
1678 const size_t dictSize /* note : = 0 if noDict */
1679 )
1680{
1681 if (src == NULL) { return -1; }
1682
1683 { const BYTE* ip = (const BYTE*) src;
1684 const BYTE* const iend = ip + srcSize;
1685
1686 BYTE* op = (BYTE*) dst;
1687 BYTE* const oend = op + outputSize;
1688 BYTE* cpy;
1689
1690 const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
1691
1692 const int safeDecode = (endOnInput==endOnInputSize);
1693 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1694
1695
1696 /* Set up the "end" pointers for the shortcut. */
1697 const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
1698 const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
1699
1700 const BYTE* match;
1701 size_t offset;
1702 unsigned token;
1703 size_t length;
1704
1705
1706 DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
1707
1708 /* Special cases */
1709 assert(lowPrefix <= op);
1710 if ((endOnInput) && (unlikely(outputSize==0))) {
1711 /* Empty output buffer */
1712 if (partialDecoding) return 0;
1713 return ((srcSize==1) && (*ip==0)) ? 0 : -1;
1714 }
1715 if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
1716 if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
1717
1718 /* Currently the fast loop shows a regression on qualcomm arm chips. */
1719#if LZ4_FAST_DEC_LOOP
1720 if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
1721 DEBUGLOG(6, "skip fast decode loop");
1722 goto safe_decode;
1723 }
1724
1725 /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */
1726 while (1) {
1727 /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
1729 if (endOnInput) { assert(ip < iend); }
1730 token = *ip++;
1731 length = token >> ML_BITS; /* literal length */
1732
1733 assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
1734
1735 /* decode literal length */
1736 if (length == RUN_MASK) {
1737// BEGIN EPIC MOD : The 'ok' identifier collides with symbols in the MacOS SDK
1739// END EPIC MOD
1741 if (error == initial_error) { goto _output_error; }
1742 if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
1743 if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
1744
1745 /* copy literals */
1746 cpy = op+length;
1748 if (endOnInput) { /* LZ4_decompress_safe() */
1749 if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
1751 } else { /* LZ4_decompress_fast() */
1752 if (cpy>oend-8) { goto safe_literal_copy; }
1753 LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
1754 * it doesn't know input length, and only relies on end-of-block properties */
1755 }
1756 ip += length; op = cpy;
1757 } else {
1758 cpy = op+length;
1759 if (endOnInput) { /* LZ4_decompress_safe() */
1760 DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
1761 /* We don't need to check oend, since we check it once for each loop below */
1762 if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
1763 /* Literals can only be 14, but hope compilers optimize if we copy by a register size */
1764 memcpy(op, ip, 16);
1765 } else { /* LZ4_decompress_fast() */
1766 /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
1767 * it doesn't know input length, and relies on end-of-block properties */
1768 memcpy(op, ip, 8);
1769 if (length > 8) { memcpy(op+8, ip+8, 8); }
1770 }
1771 ip += length; op = cpy;
1772 }
1773
1774 /* get offset */
1775 offset = LZ4_readLE16(ip); ip+=2;
1776 match = op - offset;
1777 assert(match <= op);
1778
1779 /* get matchlength */
1780 length = token & ML_MASK;
1781
1782 if (length == ML_MASK) {
1783// BEGIN EPIC MOD : The 'ok' identifier collides with symbols in the MacOS SDK
1785// END EPIC MOD
1786 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1787 length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
1788// BEGIN EPIC MOD : The 'ok' identifier collides with symbols in the MacOS SDK
1789 if (error != length_ok) { goto _output_error; }
1790// END EPIC MOD
1791 if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
1792 length += MINMATCH;
1793 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1794 goto safe_match_copy;
1795 }
1796 } else {
1797 length += MINMATCH;
1798 if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
1799 goto safe_match_copy;
1800 }
1801
1802 /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */
1803 if ((dict == withPrefix64k) || (match >= lowPrefix)) {
1804 if (offset >= 8) {
1806 assert(match <= op);
1807 assert(op + 18 <= oend);
1808
1809 memcpy(op, match, 8);
1810 memcpy(op+8, match+8, 8);
1811 memcpy(op+16, match+16, 2);
1812 op += length;
1813 continue;
1814 } } }
1815
1816 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
1817 /* match starting within external dictionary */
1818 if ((dict==usingExtDict) && (match < lowPrefix)) {
1819 if (unlikely(op+length > oend-LASTLITERALS)) {
1820 if (partialDecoding) {
1821 length = MIN(length, (size_t)(oend-op)); /* reach end of buffer */
1822 } else {
1823 goto _output_error; /* end-of-block condition violated */
1824 } }
1825
1826 if (length <= (size_t)(lowPrefix-match)) {
1827 /* match fits entirely within external dictionary : just copy */
1828 memmove(op, dictEnd - (lowPrefix-match), length);
1829 op += length;
1830 } else {
1831 /* match stretches into both external dictionary and current block */
1832 size_t const copySize = (size_t)(lowPrefix - match);
1833 size_t const restSize = length - copySize;
1835 op += copySize;
1836 if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
1837 BYTE* const endOfMatch = op + restSize;
1838 const BYTE* copyFrom = lowPrefix;
1839 while (op < endOfMatch) { *op++ = *copyFrom++; }
1840 } else {
1842 op += restSize;
1843 } }
1844 continue;
1845 }
1846
1847 /* copy match within block */
1848 cpy = op + length;
1849
1850 assert((op <= oend) && (oend-op >= 32));
1851 if (unlikely(offset<16)) {
1853 } else {
1855 }
1856
1857 op = cpy; /* wildcopy correction */
1858 }
1860#endif
1861
1862 /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
1863 while (1) {
1864 token = *ip++;
1865 length = token >> ML_BITS; /* literal length */
1866
1867 assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
1868
1869 /* A two-stage shortcut for the most common case:
1870 * 1) If the literal length is 0..14, and there is enough space,
1871 * enter the shortcut and copy 16 bytes on behalf of the literals
1872 * (in the fast mode, only 8 bytes can be safely copied this way).
1873 * 2) Further if the match length is 4..18, copy 18 bytes in a similar
1874 * manner; but we ensure that there's enough space in the output for
1875 * those 18 bytes earlier, upon entering the shortcut (in other words,
1876 * there is a combined check for both stages).
1877 */
1878 if ( (endOnInput ? length != RUN_MASK : length <= 8)
1879 /* strictly "less than" on input, to re-enter the loop with at least one byte */
1880 && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
1881 /* Copy the literals */
1882 memcpy(op, ip, endOnInput ? 16 : 8);
1883 op += length; ip += length;
1884
1885 /* The second stage: prepare for match copying, decode full info.
1886 * If it doesn't work out, the info won't be wasted. */
1887 length = token & ML_MASK; /* match length */
1888 offset = LZ4_readLE16(ip); ip += 2;
1889 match = op - offset;
1890 assert(match <= op); /* check overflow */
1891
1892 /* Do not deal with overlapping matches. */
1893 if ( (length != ML_MASK)
1894 && (offset >= 8)
1895 && (dict==withPrefix64k || match >= lowPrefix) ) {
1896 /* Copy the match. */
1897 memcpy(op + 0, match + 0, 8);
1898 memcpy(op + 8, match + 8, 8);
1899 memcpy(op +16, match +16, 2);
1900 op += length + MINMATCH;
1901 /* Both stages worked, load the next token. */
1902 continue;
1903 }
1904
1905 /* The second stage didn't work out, but the info is ready.
1906 * Propel it right to the point of match copying. */
1907 goto _copy_match;
1908 }
1909
1910 /* decode literal length */
1911 if (length == RUN_MASK) {
1912// BEGIN EPIC MOD : The 'ok' identifier collides with symbols in the MacOS SDK
1914// END EPIC MOD
1916 if (error == initial_error) { goto _output_error; }
1917 if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
1918 if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
1919 }
1920
1921 /* copy literals */
1922 cpy = op+length;
1923#if LZ4_FAST_DEC_LOOP
1925#endif
1927 if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) )
1928 || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1929 {
1930 /* We've either hit the input parsing restriction or the output parsing restriction.
1931 * If we've hit the input parsing condition then this must be the last sequence.
1932 * If we've hit the output parsing condition then we are either using partialDecoding
1933 * or we've hit the output parsing condition.
1934 */
1935 if (partialDecoding) {
1936 /* Since we are partial decoding we may be in this block because of the output parsing
1937 * restriction, which is not valid since the output buffer is allowed to be undersized.
1938 */
1940 /* If we're in this block because of the input parsing condition, then we must be on the
1941 * last sequence (or invalid), so we must check that we exactly consume the input.
1942 */
1943 if ((ip+length>iend-(2+1+LASTLITERALS)) && (ip+length != iend)) { goto _output_error; }
1944 assert(ip+length <= iend);
1945 /* We are finishing in the middle of a literals segment.
1946 * Break after the copy.
1947 */
1948 if (cpy > oend) {
1949 cpy = oend;
1950 assert(op<=oend);
1951 length = (size_t)(oend-op);
1952 }
1953 assert(ip+length <= iend);
1954 } else {
1955 /* We must be on the last sequence because of the parsing limitations so check
1956 * that we exactly regenerate the original size (must be exact when !endOnInput).
1957 */
1958 if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
1959 /* We must be on the last sequence (or invalid) because of the parsing limitations
1960 * so check that we exactly consume the input and don't overrun the output buffer.
1961 */
1962 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) { goto _output_error; }
1963 }
1964 memmove(op, ip, length); /* supports overlapping memory regions, which only matters for in-place decompression scenarios */
1965 ip += length;
1966 op += length;
1967 /* Necessarily EOF when !partialDecoding. When partialDecoding
1968 * it is EOF if we've either filled the output buffer or hit
1969 * the input parsing restriction.
1970 */
1971 if (!partialDecoding || (cpy == oend) || (ip == iend)) {
1972 break;
1973 }
1974 } else {
1975 LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */
1976 ip += length; op = cpy;
1977 }
1978
1979 /* get offset */
1980 offset = LZ4_readLE16(ip); ip+=2;
1981 match = op - offset;
1982
1983 /* get matchlength */
1984 length = token & ML_MASK;
1985
1987 if (length == ML_MASK) {
1988// BEGIN EPIC MOD : The 'ok' identifier collides with symbols in the MacOS SDK
1990// END EPIC MOD
1991 length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error);
1992// BEGIN EPIC MOD : The 'ok' identifier collides with symbols in the MacOS SDK
1993 if (error != length_ok) goto _output_error;
1994// END EPIC MOD
1995 if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
1996 }
1997 length += MINMATCH;
1998
1999#if LZ4_FAST_DEC_LOOP
2001#endif
2002 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
2003 /* match starting within external dictionary */
2004 if ((dict==usingExtDict) && (match < lowPrefix)) {
2005 if (unlikely(op+length > oend-LASTLITERALS)) {
2006 if (partialDecoding) length = MIN(length, (size_t)(oend-op));
2007 else goto _output_error; /* doesn't respect parsing restriction */
2008 }
2009
2010 if (length <= (size_t)(lowPrefix-match)) {
2011 /* match fits entirely within external dictionary : just copy */
2012 memmove(op, dictEnd - (lowPrefix-match), length);
2013 op += length;
2014 } else {
2015 /* match stretches into both external dictionary and current block */
2016 size_t const copySize = (size_t)(lowPrefix - match);
2017 size_t const restSize = length - copySize;
2019 op += copySize;
2020 if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
2021 BYTE* const endOfMatch = op + restSize;
2022 const BYTE* copyFrom = lowPrefix;
2023 while (op < endOfMatch) *op++ = *copyFrom++;
2024 } else {
2026 op += restSize;
2027 } }
2028 continue;
2029 }
2031
2032 /* copy match within block */
2033 cpy = op + length;
2034
2035 /* partialDecoding : may end anywhere within the block */
2036 assert(op<=oend);
2038 size_t const mlen = MIN(length, (size_t)(oend-op));
2039 const BYTE* const matchEnd = match + mlen;
2040 BYTE* const copyEnd = op + mlen;
2041 if (matchEnd > op) { /* overlap copy */
2042 while (op < copyEnd) { *op++ = *match++; }
2043 } else {
2044 memcpy(op, match, mlen);
2045 }
2046 op = copyEnd;
2047 if (op == oend) { break; }
2048 continue;
2049 }
2050
2051 if (unlikely(offset<8)) {
2052 LZ4_write32(op, 0); /* silence msan warning when offset==0 */
2053 op[0] = match[0];
2054 op[1] = match[1];
2055 op[2] = match[2];
2056 op[3] = match[3];
2057 match += inc32table[offset];
2058 memcpy(op+4, match, 4);
2059 match -= dec64table[offset];
2060 } else {
2061 memcpy(op, match, 8);
2062 match += 8;
2063 }
2064 op += 8;
2065
2067 BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
2068 if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
2069 if (op < oCopyLimit) {
2071 match += oCopyLimit - op;
2072 op = oCopyLimit;
2073 }
2074 while (op < cpy) { *op++ = *match++; }
2075 } else {
2076 memcpy(op, match, 8);
2077 if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
2078 }
2079 op = cpy; /* wildcopy correction */
2080 }
2081
2082 /* end of decoding */
2083 if (endOnInput) {
2084 return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
2085 } else {
2086 return (int) (((const char*)ip)-src); /* Nb of input bytes read */
2087 }
2088
2089 /* Overflow error detected */
2091 return (int) (-(((const char*)ip)-src))-1;
2092 }
2093}
2094
2095
2096/*===== Instantiate the API decoding functions. =====*/
2097
2105
2114
2116int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
2117{
2120 (BYTE*)dest - 64 KB, NULL, 0);
2121}
2122
2123/*===== Instantiate a few more decoding cases, used more than once. =====*/
2124
2125LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */
2132
2133/* Another obsolete API function, paired with the previous one. */
2135{
2136 /* LZ4_decompress_fast doesn't validate match offsets,
2137 * and thus serves well with any prefixed dictionary. */
2139}
2140
2142static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
2143 size_t prefixSize)
2144{
2147 (BYTE*)dest-prefixSize, NULL, 0);
2148}
2149
2153 const void* dictStart, size_t dictSize)
2154{
2157 (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2158}
2159
2161static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
2162 const void* dictStart, size_t dictSize)
2163{
2166 (BYTE*)dest, (const BYTE*)dictStart, dictSize);
2167}
2168
2169/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
2170 * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
2171 * These routines are used only once, in LZ4_decompress_*_continue().
2172 */
2175 size_t prefixSize, const void* dictStart, size_t dictSize)
2176{
2179 (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2180}
2181
2184 size_t prefixSize, const void* dictStart, size_t dictSize)
2185{
2188 (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
2189}
2190
2191/*===== streaming decompression functions =====*/
2192
2194{
2196 LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
2197 return lz4s;
2198}
2199
2201{
2202 if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
2204 return 0;
2205}
2206
2213int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
2214{
2215 LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2216 lz4sd->prefixSize = (size_t) dictSize;
2217 lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
2218 lz4sd->externalDict = NULL;
2219 lz4sd->extDictSize = 0;
2220 return 1;
2221}
2222
2235{
2236 if (maxBlockSize < 0) return 0;
2237 if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
2238 if (maxBlockSize < 16) maxBlockSize = 16;
2240}
2241
2242/*
2243*_continue() :
2244 These decoding functions allow decompression of multiple blocks in "streaming" mode.
2245 Previously decoded blocks must still be available at the memory position where they were decoded.
2246 If it's not possible, save the relevant part of decoded data into a safe buffer,
2247 and indicate where it stands using LZ4_setStreamDecode()
2248*/
2251{
2252 LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2253 int result;
2254
2255 if (lz4sd->prefixSize == 0) {
2256 /* The first call, no dictionary yet. */
2257 assert(lz4sd->extDictSize == 0);
2259 if (result <= 0) return result;
2260 lz4sd->prefixSize = (size_t)result;
2261 lz4sd->prefixEnd = (BYTE*)dest + result;
2262 } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2263 /* They're rolling the current segment. */
2264 if (lz4sd->prefixSize >= 64 KB - 1)
2266 else if (lz4sd->extDictSize == 0)
2267 result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
2268 lz4sd->prefixSize);
2269 else
2271 lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2272 if (result <= 0) return result;
2273 lz4sd->prefixSize += (size_t)result;
2274 lz4sd->prefixEnd += result;
2275 } else {
2276 /* The buffer wraps around, or they're switching to another buffer. */
2277 lz4sd->extDictSize = lz4sd->prefixSize;
2278 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2280 lz4sd->externalDict, lz4sd->extDictSize);
2281 if (result <= 0) return result;
2282 lz4sd->prefixSize = (size_t)result;
2283 lz4sd->prefixEnd = (BYTE*)dest + result;
2284 }
2285
2286 return result;
2287}
2288
2291{
2292 LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
2293 int result;
2294 assert(originalSize >= 0);
2295
2296 if (lz4sd->prefixSize == 0) {
2297 assert(lz4sd->extDictSize == 0);
2299 if (result <= 0) return result;
2300 lz4sd->prefixSize = (size_t)originalSize;
2301 lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2302 } else if (lz4sd->prefixEnd == (BYTE*)dest) {
2303 if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
2305 else
2307 lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
2308 if (result <= 0) return result;
2309 lz4sd->prefixSize += (size_t)originalSize;
2310 lz4sd->prefixEnd += originalSize;
2311 } else {
2312 lz4sd->extDictSize = lz4sd->prefixSize;
2313 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
2314 result = LZ4_decompress_fast_extDict(source, dest, originalSize,
2315 lz4sd->externalDict, lz4sd->extDictSize);
2316 if (result <= 0) return result;
2317 lz4sd->prefixSize = (size_t)originalSize;
2318 lz4sd->prefixEnd = (BYTE*)dest + originalSize;
2319 }
2320
2321 return result;
2322}
2323
2324
2325/*
2326Advanced decoding functions :
2327*_usingDict() :
2328 These decoding functions work the same as "_continue" ones,
2329 the dictionary must be explicitly provided within parameters
2330*/
2331
2332int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
2333{
2334 if (dictSize==0)
2336 if (dictStart+dictSize == dest) {
2337 if (dictSize >= 64 KB - 1) {
2339 }
2340 assert(dictSize >= 0);
2341 return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
2342 }
2343 assert(dictSize >= 0);
2344 return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
2345}
2346
2347int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
2348{
2349 if (dictSize==0 || dictStart+dictSize == dest)
2351 assert(dictSize >= 0);
2352 return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
2353}
2354
2355
2356/*=*************************************************
2357* Obsolete Functions
2358***************************************************/
2359/* obsolete compression functions */
2364int LZ4_compress(const char* src, char* dest, int srcSize)
2365{
2367}
2368int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
2369{
2370 return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
2371}
2372int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
2373{
2375}
2384
2385/*
2386These decompression functions are deprecated and should no longer be used.
2387They are only provided here for compatibility with older user programs.
2388- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
2389- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
2390*/
2391int LZ4_uncompress (const char* source, char* dest, int outputSize)
2392{
2394}
2399
2400/* Obsolete Streaming functions */
2401
2403
2404int LZ4_resetStreamState(void* state, char* inputBuffer)
2405{
2408 return 0;
2409}
2410
2412{
2414 return LZ4_createStream();
2415}
2416
2417char* LZ4_slideInputBuffer (void* state)
2418{
2419 /* avoid const char * -> char * conversion warning */
2420 return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
2421}
2422
2423#endif /* LZ4_COMMONDEFS_ONLY */
2424
2425LZ4_END_NAMESPACE // EPIC MOD : Wrap library in an optional namespace
OODEFFUNC typedef void(OODLE_CALLBACK t_fp_OodleCore_Plugin_Free)(void *ptr)
#define NULL
Definition oodle2base.h:134
RAD_U32 U32
Definition egttypes.h:501
RAD_U64 U64
Definition egttypes.h:511
RAD_S32 S32
Definition egttypes.h:496
RAD_U16 U16
Definition egttypes.h:491
#define LZ4_HASHTABLESIZE
Definition lz4.h:641
#define LZ4_COMPRESSBOUND(isize)
Definition lz4.h:198
#define LZ4_MEMORY_USAGE
Definition lz4.h:147
#define LZ4_VERSION_STRING
Definition lz4.h:126
#define LZ4_HASH_SIZE_U32
Definition lz4.h:642
#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize)
Definition lz4.h:430
#define LZ4_MAX_INPUT_SIZE
Definition lz4.h:197
#define LZ4_VERSION_NUMBER
Definition lz4.h:121
#define LZ4_HASHLOG
Definition lz4.h:640
UE_FORCEINLINE_HINT TSharedRef< CastToType, Mode > StaticCastSharedRef(TSharedRef< CastFromType, Mode > const &InSharedRef)
Definition SharedPointer.h:127
float swift_float2 __attribute__((__ext_vector_type__(2)))
Definition MarketplaceKitWrapper.h:67
const char * source
Definition lz4.h:711
char int srcSize
Definition lz4.h:709
char int compressedSize
Definition lz4.h:735
#define LZ4_STREAMSIZE
Definition lz4.h:636
char * inputBuffer
Definition lz4.h:731
const char char int inputSize
Definition lz4.h:711
char * dst
Definition lz4.h:735
#define LZ4_STREAMDECODESIZE
Definition lz4.h:667
char int int maxOutputSize
Definition lz4.h:710
#define LZ4_BEGIN_NAMESPACE
Definition lz4.h:47
char int originalSize
Definition lz4.h:736
#define LZ4_END_NAMESPACE
Definition lz4.h:48
char int outputSize
Definition lz4.h:717
char int isize
Definition lz4.h:718
char * dest
Definition lz4.h:709
memcpy(InputBufferBase, BinkBlocksData, BinkBlocksSize)
int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const void *dictStart, size_t dictSize)
Definition lz4.c.inl:2151
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_partial(const char *src, char *dst, int compressedSize, int targetOutputSize, int dstCapacity)
Definition lz4.c.inl:2107
LZ4_FORCE_O2_INLINE_GCC_PPC64LE void LZ4_wildCopy8(void *dstPtr, const void *srcPtr, void *dstEnd)
Definition lz4.c.inl:362
#define STEPSIZE
Definition lz4.c.inl:537
#define KB
Definition lz4.c.inl:198
int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.c.inl:1191
int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, int dictSize)
Definition lz4.c.inl:2347
int LZ4_sizeofStreamState()
Definition lz4.c.inl:2402
#define LZ4_STATIC_ASSERT(c)
Definition lz4.c.inl:224
endCondition_directive
Definition lz4.c.inl:1621
@ endOnInputSize
Definition lz4.c.inl:1621
@ endOnOutputSize
Definition lz4.c.inl:1621
unsigned long long U64
Definition lz4.c.inl:259
earlyEnd_directive
Definition lz4.c.inl:1622
@ partial_decode
Definition lz4.c.inl:1622
@ decode_full_block
Definition lz4.c.inl:1622
size_t reg_t
Definition lz4.c.inl:266
#define ACCELERATION_DEFAULT
Definition lz4.c.inl:51
int LZ4_compressBound(int isize)
Definition lz4.c.inl:610
int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
Definition lz4.c.inl:1400
#define MINMATCH
Definition lz4.c.inl:189
LZ4_FORCE_INLINE int LZ4_decompress_fast_doubleDict(const char *source, char *dest, int originalSize, size_t prefixSize, const void *dictStart, size_t dictSize)
Definition lz4.c.inl:2183
int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const char *dictStart, int dictSize)
Definition lz4.c.inl:2332
#define MIN(a, b)
Definition lz4.c.inl:1625
#define GB
Definition lz4.c.inl:200
int LZ4_compress_limitedOutput(const char *source, char *dest, int inputSize, int maxOutputSize)
Definition lz4.c.inl:2360
int LZ4_decoderRingBufferSize(int maxBlockSize)
Definition lz4.c.inl:2234
int LZ4_compress_default(const char *src, char *dst, int srcSize, int maxOutputSize)
Definition lz4.c.inl:1278
int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_stream, const char *src, char *dst, int srcSize, int dstCapacity)
Definition lz4.c.inl:2376
unsigned char BYTE
Definition lz4.c.inl:255
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition lz4.c.inl:2126
int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr, int targetDstSize)
Definition lz4.c.inl:1320
#define LZ4_FORCE_O2_GCC_PPC64LE
Definition lz4.c.inl:157
LZ4_FORCE_INLINE unsigned read_variable_length(const BYTE **ip, const BYTE *lencheck, int loop_check, int initial_check, variable_length_error *error)
Definition lz4.c.inl:1639
signed int S32
Definition lz4.c.inl:258
#define LZ4_FORCE_O2_INLINE_GCC_PPC64LE
Definition lz4.c.inl:158
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize)
Definition lz4.c.inl:2099
int LZ4_compress(const char *src, char *dest, int srcSize)
Definition lz4.c.inl:2364
void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
Definition lz4.c.inl:1380
#define LZ4_DISTANCE_ABSOLUTE_MAX
Definition lz4.c.inl:202
void LZ4_resetStream_fast(LZ4_stream_t *ctx)
Definition lz4.c.inl:1386
int LZ4_freeStream(LZ4_stream_t *LZ4_stream)
Definition lz4.c.inl:1390
#define MFLIMIT
Definition lz4.c.inl:193
const char * LZ4_versionString(void)
Definition lz4.c.inl:609
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition lz4.c.inl:705
int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.c.inl:1259
int LZ4_uncompress(const char *source, char *dest, int outputSize)
Definition lz4.c.inl:2391
char * LZ4_slideInputBuffer(void *state)
Definition lz4.c.inl:2417
int LZ4_compress_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize)
Definition lz4.c.inl:2380
LZ4_stream_t * LZ4_initStream(void *buffer, size_t size)
Definition lz4.c.inl:1364
#define LASTLITERALS
Definition lz4.c.inl:192
int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize, int maxOutputSize)
Definition lz4.c.inl:2395
#define DEBUGLOG(l,...)
Definition lz4.c.inl:236
LZ4_END_NAMESPACE LZ4_BEGIN_NAMESPACE int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source, char *dest, int srcSize)
Definition lz4.c.inl:1572
LZ4_FORCE_INLINE void LZ4_prepareTable(LZ4_stream_t_internal *const cctx, const int inputSize, const tableType_t tableType)
Definition lz4.c.inl:750
LZ4_FORCE_INLINE int LZ4_decompress_safe_doubleDict(const char *source, char *dest, int compressedSize, int maxOutputSize, size_t prefixSize, const void *dictStart, size_t dictSize)
Definition lz4.c.inl:2174
LZ4_FORCE_INLINE unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit)
Definition lz4.c.inl:539
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize)
Definition lz4.c.inl:2213
#define ALLOC(s)
Definition lz4.c.inl:179
#define FREEMEM(p)
Definition lz4.c.inl:181
int LZ4_versionNumber(void)
Definition lz4.c.inl:608
variable_length_error
Definition lz4.c.inl:1636
@ length_ok
Definition lz4.c.inl:1636
@ loop_error
Definition lz4.c.inl:1636
@ initial_error
Definition lz4.c.inl:1636
dictIssue_directive
Definition lz4.c.inl:602
@ noDictIssue
Definition lz4.c.inl:602
@ dictSmall
Definition lz4.c.inl:602
LZ4_stream_t * LZ4_createStream(void)
Definition lz4.c.inl:1344
int LZ4_compress_withState(void *state, const char *src, char *dst, int srcSize)
Definition lz4.c.inl:2372
#define likely(expr)
Definition lz4.c.inl:168
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition lz4.c.inl:2250
int LZ4_compress_fast_force(const char *src, char *dst, int srcSize, int dstCapacity, int acceleration)
Definition lz4.c.inl:1286
#define FASTLOOP_SAFE_DISTANCE
Definition lz4.c.inl:195
int LZ4_sizeofState()
Definition lz4.c.inl:611
#define ML_BITS
Definition lz4.c.inl:207
#define ML_MASK
Definition lz4.c.inl:208
#define ALLOC_AND_ZERO(s)
Definition lz4.c.inl:180
#define MEM_INIT(p, v, s)
Definition lz4.c.inl:183
LZ4_FORCE_INLINE const BYTE * LZ4_getPosition(const BYTE *p, const void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition lz4.c.inl:741
#define HASH_UNIT
Definition lz4.c.inl:1399
dict_directive
Definition lz4.c.inl:601
@ noDict
Definition lz4.c.inl:601
@ withPrefix64k
Definition lz4.c.inl:601
@ usingExtDict
Definition lz4.c.inl:601
@ usingDictCtx
Definition lz4.c.inl:601
LZ4_FORCE_INLINE int LZ4_compress_generic(LZ4_stream_t_internal *const cctx, const char *const source, char *const dest, const int inputSize, int *inputConsumed, const int maxOutputSize, const limitedOutput_directive outputDirective, const tableType_t tableType, const dict_directive dictDirective, const dictIssue_directive dictIssue, const int acceleration)
Definition lz4.c.inl:800
size_t uptrval
Definition lz4.c.inl:260
limitedOutput_directive
Definition lz4.c.inl:269
@ limitedOutput
Definition lz4.c.inl:271
@ fillOutput
Definition lz4.c.inl:272
@ notLimited
Definition lz4.c.inl:270
int LZ4_compress_limitedOutput_withState(void *state, const char *src, char *dst, int srcSize, int dstSize)
Definition lz4.c.inl:2368
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
Definition lz4.c.inl:1599
#define LZ4_FORCE_INLINE
Definition lz4.c.inl:134
int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition lz4.c.inl:1496
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void *const p, tableType_t const tableType)
Definition lz4.c.inl:662
unsigned int U32
Definition lz4.c.inl:257
LZ4_FORCE_INLINE int LZ4_decompress_generic(const char *const src, char *const dst, int srcSize, int outputSize, endCondition_directive endOnInput, earlyEnd_directive partialDecoding, dict_directive dict, const BYTE *const lowPrefix, const BYTE *const dictStart, const size_t dictSize)
Definition lz4.c.inl:1667
int LZ4_resetStreamState(void *state, char *inputBuffer)
Definition lz4.c.inl:2404
#define MATCH_SAFEGUARD_DISTANCE
Definition lz4.c.inl:194
#define assert(condition)
Definition lz4.c.inl:220
#define unlikely(expr)
Definition lz4.c.inl:171
int LZ4_compress_fast_extState_fastReset(void *state, const char *src, char *dst, int srcSize, int dstCapacity, int acceleration)
Definition lz4.c.inl:1222
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int originalSize)
Definition lz4.c.inl:2290
#define WILDCOPYLENGTH
Definition lz4.c.inl:191
unsigned short U16
Definition lz4.c.inl:256
void LZ4_attach_dictionary(LZ4_stream_t *workingStream, const LZ4_stream_t *dictionaryStream)
Definition lz4.c.inl:1442
tableType_t
Definition lz4.c.inl:576
@ clearedTable
Definition lz4.c.inl:576
@ byU16
Definition lz4.c.inl:576
@ byPtr
Definition lz4.c.inl:576
@ byU32
Definition lz4.c.inl:576
#define RUN_MASK
Definition lz4.c.inl:210
int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest, int originalSize)
Definition lz4.c.inl:2134
LZ4_FORCE_O2_GCC_PPC64LE int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
Definition lz4.c.inl:2116
int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream)
Definition lz4.c.inl:2200
LZ4_streamDecode_t * LZ4_createStreamDecode(void)
Definition lz4.c.inl:2193
void * LZ4_create(char *inputBuffer)
Definition lz4.c.inl:2411
LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE *p, U32 h, void *tableBase, tableType_t const tableType, const BYTE *srcBase)
Definition lz4.cpp:804
earlyEnd_directive
Definition lz4.cpp:1765
size_t reg_t
Definition lz4.cpp:316
unsigned char BYTE
Definition lz4.cpp:305
LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
Definition lz4.cpp:753
LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void *tableBase, tableType_t tableType)
Definition lz4.cpp:829
dictIssue_directive
Definition lz4.cpp:719
LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void *tableBase, tableType_t const tableType)
Definition lz4.cpp:792
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void *tableBase, tableType_t const tableType)
Definition lz4.cpp:780
dict_directive
Definition lz4.cpp:718
limitedOutput_directive
Definition lz4.cpp:319
LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
Definition lz4.cpp:761
tableType_t
Definition lz4.cpp:693
SIZE_T uptrval
Definition lz4.cpp:310
#define op
#define anchor
#define ip
float v
Definition radaudio_mdct.cpp:62
Definition lz4.h:702
SIZE_T prefixSize
Definition lz4.h:706
Definition lz4.h:663
LZ4_u32 currentOffset
Definition lz4.h:667
const LZ4_byte * dictionary
Definition lz4.h:665
const LZ4_stream_t_internal * dictCtx
Definition lz4.h:666
LZ4_u32 dictSize
Definition lz4.h:669
LZ4_u32 hashTable[LZ4_HASH_SIZE_U32]
Definition lz4.h:664
Definition lz4.h:710
Definition lz4.h:674
LZ4_stream_t_internal internal_donotuse
Definition lz4.h:676