summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCedric BAIL <c.bail@partner.samsung.com>2014-06-29 12:56:56 +0200
committerCedric BAIL <c.bail@partner.samsung.com>2014-06-30 09:27:51 +0200
commit45f526580bd3dd2aa1c38b409200abd22fd31b95 (patch)
tree3d9a6ee230b8ec757b3875d4ad47d7f81857b3b8
parentf309703d2375bb9ac3ab7f66e30b2ee262e4d060 (diff)
lz4: fix possible security issue.
See : - http://fastcompression.blogspot.fr/2014/06/lets-move-on.html - http://www.openwall.com/lists/oss-security/2014/06/26/25 @fix
-rw-r--r--src/static_libs/lz4/lz4.c2105
-rw-r--r--src/static_libs/lz4/lz4.h434
-rw-r--r--src/static_libs/lz4/lz4hc.c1563
-rw-r--r--src/static_libs/lz4/lz4hc.h233
4 files changed, 2615 insertions, 1720 deletions
diff --git a/src/static_libs/lz4/lz4.c b/src/static_libs/lz4/lz4.c
index a651748a7a..a1475dc9a9 100644
--- a/src/static_libs/lz4/lz4.c
+++ b/src/static_libs/lz4/lz4.c
@@ -1,861 +1,1244 @@
1/* 1/*
2 LZ4 - Fast LZ compression algorithm 2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2012, Yann Collet. 3 Copyright (C) 2011-2014, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5 5
6 Redistribution and use in source and binary forms, with or without 6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are 7 modification, are permitted provided that the following conditions are
8 met: 8 met:
9 9
10 * Redistributions of source code must retain the above copyright 10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer. 11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above 12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer 13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the 14 in the documentation and/or other materials provided with the
15 distribution. 15 distribution.
16 16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 You can contact the author at : 29 You can contact the author at :
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html 30 - LZ4 source repository : http://code.google.com/p/lz4/
31 - LZ4 source repository : http://code.google.com/p/lz4/ 31 - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
32*/ 32*/
33 33
34//************************************** 34/**************************************
35// Tuning parameters 35 Tuning parameters
36//************************************** 36**************************************/
37// MEMORY_USAGE : 37/*
38// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) 38 * HEAPMODE :
39// Increasing memory usage improves compression ratio 39 * Select how default compression functions will allocate memory for their hash table,
40// Reduced memory usage can improve speed, due to cache effect 40 * in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
41// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache 41 */
42#define MEMORY_USAGE 14 42#define HEAPMODE 0
43 43
44// NOTCOMPRESSIBLE_DETECTIONLEVEL : 44
45// Decreasing this value will make the algorithm skip faster data segments considered "incompressible" 45/**************************************
46// This may decrease compression ratio dramatically, but will be faster on incompressible data 46 CPU Feature Detection
47// Increasing this value will make the algorithm search more before declaring a segment "incompressible" 47**************************************/
48// This could improve compression a bit, but will be slower on incompressible data 48/* 32 or 64 bits ? */
49// The default value (6) is recommended 49#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
50#define NOTCOMPRESSIBLE_DETECTIONLEVEL 6 50 || defined(__powerpc64__) || defined(__powerpc64le__) \
51 51 || defined(__ppc64__) || defined(__ppc64le__) \
52// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE : 52 || defined(__PPC64__) || defined(__PPC64LE__) \
53// This will provide a small boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU. 53 || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) /* Detects 64 bits mode */
54// You can set this option to 1 in situations where data will remain within closed environment 54# define LZ4_ARCH64 1
55// This option is useless on Little_Endian CPU (such as x86) 55#else
56//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 56# define LZ4_ARCH64 0
57 57#endif
58 58
59 59/*
60//************************************** 60 * Little Endian or Big Endian ?
61// CPU Feature Detection 61 * Overwrite the #define below if you know your architecture endianess
62//************************************** 62 */
63// 32 or 64 bits ? 63#include <stdlib.h> /* Apparently required to detect endianess */
64#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode 64#if defined (__GLIBC__)
65# define LZ4_ARCH64 1 65# include <endian.h>
66#else 66# if (__BYTE_ORDER == __BIG_ENDIAN)
67# define LZ4_ARCH64 0 67# define LZ4_BIG_ENDIAN 1
68#endif 68# endif
69 69#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
70// Little Endian or Big Endian ? 70# define LZ4_BIG_ENDIAN 1
71// Note : overwrite the below #define if you know your architecture endianess 71#elif defined(__sparc) || defined(__sparc__) \
72#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) ) 72 || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
73# define LZ4_BIG_ENDIAN 1 73 || defined(__hpux) || defined(__hppa) \
74#else 74 || defined(_MIPSEB) || defined(__s390__)
75// Little Endian assumed. PDP Endian and other very rare endian format are unsupported. 75# define LZ4_BIG_ENDIAN 1
76#endif 76#else
77 77/* Little Endian assumed. PDP Endian and other very rare endian format are unsupported. */
78// Unaligned memory access is automatically enabled for "common" CPU, such as x86. 78#endif
79// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected 79
80// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance 80/*
81#if defined(__ARM_FEATURE_UNALIGNED) 81 * Unaligned memory access is automatically enabled for "common" CPU, such as x86.
82# define LZ4_FORCE_UNALIGNED_ACCESS 1 82 * For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
83#endif 83 * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
84 84 */
85// Define this parameter if your target system or compiler does not support hardware bit count 85#if defined(__ARM_FEATURE_UNALIGNED)
86#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count 86# define LZ4_FORCE_UNALIGNED_ACCESS 1
87# define LZ4_FORCE_SW_BITCOUNT 87#endif
88#endif 88
89 89/* Define this parameter if your target system or compiler does not support hardware bit count */
90 90#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
91//************************************** 91# define LZ4_FORCE_SW_BITCOUNT
92// Compiler Options 92#endif
93//************************************** 93
94#if __STDC_VERSION__ >= 199901L // C99 94/*
95/* "restrict" is a known keyword */ 95 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
96#else 96 * This option may provide a small boost to performance for some big endian cpu, although probably modest.
97# define restrict // Disable restrict 97 * You may set this option to 1 if data will remain within closed environment.
98#endif 98 * This option is useless on Little_Endian CPU (such as x86)
99 99 */
100#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) 100
101 101/* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
102#ifdef _MSC_VER // Visual Studio 102
103# include <intrin.h> // For Visual 2005 103
104# if LZ4_ARCH64 // 64-bit 104/**************************************
105# pragma intrinsic(_BitScanForward64) // For Visual 2005 105 Compiler Options
106# pragma intrinsic(_BitScanReverse64) // For Visual 2005 106**************************************/
107# else 107#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
108# pragma intrinsic(_BitScanForward) // For Visual 2005 108/* "restrict" is a known keyword */
109# pragma intrinsic(_BitScanReverse) // For Visual 2005 109#else
110# endif 110# define restrict /* Disable restrict */
111#endif 111#endif
112 112
113#ifdef _MSC_VER 113#ifdef _MSC_VER /* Visual Studio */
114# define lz4_bswap16(x) _byteswap_ushort(x) 114# define FORCE_INLINE static __forceinline
115#else 115# include <intrin.h> /* For Visual 2005 */
116# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))) 116# if LZ4_ARCH64 /* 64-bits */
117#endif 117# pragma intrinsic(_BitScanForward64) /* For Visual 2005 */
118 118# pragma intrinsic(_BitScanReverse64) /* For Visual 2005 */
119#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__) 119# else /* 32-bits */
120# define expect(expr,value) (__builtin_expect ((expr),(value)) ) 120# pragma intrinsic(_BitScanForward) /* For Visual 2005 */
121#else 121# pragma intrinsic(_BitScanReverse) /* For Visual 2005 */
122# define expect(expr,value) (expr) 122# endif
123#endif 123# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
124 124#else
125#define likely(expr) expect((expr) != 0, 1) 125# ifdef __GNUC__
126#define unlikely(expr) expect((expr) != 0, 0) 126# define FORCE_INLINE static inline __attribute__((always_inline))
127 127# else
128 128# define FORCE_INLINE static inline
129//************************************** 129# endif
130// Includes 130#endif
131//************************************** 131
132#include <stdlib.h> // for malloc 132#ifdef _MSC_VER /* Visual Studio */
133#include <string.h> // for memset 133# define lz4_bswap16(x) _byteswap_ushort(x)
134#include "lz4.h" 134#else
135 135# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
136 136#endif
137//************************************** 137
138// Basic Types 138#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
139//************************************** 139
140#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively 140#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
141# define BYTE unsigned __int8 141# define expect(expr,value) (__builtin_expect ((expr),(value)) )
142# define U16 unsigned __int16 142#else
143# define U32 unsigned __int32 143# define expect(expr,value) (expr)
144# define S32 __int32 144#endif
145# define U64 unsigned __int64 145
146#else 146#define likely(expr) expect((expr) != 0, 1)
147# include <stdint.h> 147#define unlikely(expr) expect((expr) != 0, 0)
148# define BYTE uint8_t 148
149# define U16 uint16_t 149
150# define U32 uint32_t 150/**************************************
151# define S32 int32_t 151 Memory routines
152# define U64 uint64_t 152**************************************/
153#endif 153#include <stdlib.h> /* malloc, calloc, free */
154 154#define ALLOCATOR(n,s) calloc(n,s)
155#ifndef LZ4_FORCE_UNALIGNED_ACCESS 155#define FREEMEM free
156# pragma pack(push, 1) 156#include <string.h> /* memset, memcpy */
157#endif 157#define MEM_INIT memset
158 158
159typedef struct _U16_S { U16 v; } U16_S; 159
160typedef struct _U32_S { U32 v; } U32_S; 160/**************************************
161typedef struct _U64_S { U64 v; } U64_S; 161 Includes
162 162**************************************/
163#ifndef LZ4_FORCE_UNALIGNED_ACCESS 163#include "lz4.h"
164# pragma pack(pop) 164
165#endif 165
166 166/**************************************
167#define A64(x) (((U64_S *)(x))->v) 167 Basic Types
168#define A32(x) (((U32_S *)(x))->v) 168**************************************/
169#define A16(x) (((U16_S *)(x))->v) 169#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
170 170# include <stdint.h>
171 171 typedef uint8_t BYTE;
172//************************************** 172 typedef uint16_t U16;
173// Constants 173 typedef uint32_t U32;
174//************************************** 174 typedef int32_t S32;
175#define MINMATCH 4 175 typedef uint64_t U64;
176 176#else
177#define HASH_LOG (MEMORY_USAGE-2) 177 typedef unsigned char BYTE;
178#define HASHTABLESIZE (1 << HASH_LOG) 178 typedef unsigned short U16;
179#define HASH_MASK (HASHTABLESIZE - 1) 179 typedef unsigned int U32;
180 180 typedef signed int S32;
181#define SKIPSTRENGTH (NOTCOMPRESSIBLE_DETECTIONLEVEL>2?NOTCOMPRESSIBLE_DETECTIONLEVEL:2) 181 typedef unsigned long long U64;
182#define STACKLIMIT 13 182#endif
183#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()). 183
184#define COPYLENGTH 8 184#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
185#define LASTLITERALS 5 185# define _PACKED __attribute__ ((packed))
186#define MFLIMIT (COPYLENGTH+MINMATCH) 186#else
187#define MINLENGTH (MFLIMIT+1) 187# define _PACKED
188 188#endif
189#define MAXD_LOG 16 189
190#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) 190#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
191 191# if defined(__IBMC__) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
192#define ML_BITS 4 192# pragma pack(1)
193#define ML_MASK ((1U<<ML_BITS)-1) 193# else
194#define RUN_BITS (8-ML_BITS) 194# pragma pack(push, 1)
195#define RUN_MASK ((1U<<RUN_BITS)-1) 195# endif
196 196#endif
197 197
198//************************************** 198typedef struct { U16 v; } _PACKED U16_S;
199// Architecture-specific macros 199typedef struct { U32 v; } _PACKED U32_S;
200//************************************** 200typedef struct { U64 v; } _PACKED U64_S;
201#if LZ4_ARCH64 // 64-bit 201typedef struct {size_t v;} _PACKED size_t_S;
202# define STEPSIZE 8 202
203# define UARCH U64 203#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
204# define AARCH A64 204# if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
205# define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8; 205# pragma pack(0)
206# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d) 206# else
207# define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e) 207# pragma pack(pop)
208# define HTYPE U32 208# endif
209# define INITBASE(base) const BYTE* const base = ip 209#endif
210#else // 32-bit 210
211# define STEPSIZE 4 211#define A16(x) (((U16_S *)(x))->v)
212# define UARCH U32 212#define A32(x) (((U32_S *)(x))->v)
213# define AARCH A32 213#define A64(x) (((U64_S *)(x))->v)
214# define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4; 214#define AARCH(x) (((size_t_S *)(x))->v)
215# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d); 215
216# define LZ4_SECURECOPY LZ4_WILDCOPY 216
217# define HTYPE const BYTE* 217/**************************************
218# define INITBASE(base) const int base = 0 218 Constants
219#endif 219**************************************/
220 220#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
221#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE)) 221#define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
222# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; } 222#define HASH_SIZE_U32 (1 << LZ4_HASHLOG)
223# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; } 223
224#else // Little Endian 224#define MINMATCH 4
225# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); } 225
226# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; } 226#define COPYLENGTH 8
227#endif 227#define LASTLITERALS 5
228 228#define MFLIMIT (COPYLENGTH+MINMATCH)
229 229static const int LZ4_minLength = (MFLIMIT+1);
230//************************************** 230
231// Local structures 231#define KB *(1U<<10)
232//************************************** 232#define MB *(1U<<20)
233struct refTables 233#define GB *(1U<<30)
234{ 234
235 HTYPE hashTable[HASHTABLESIZE]; 235#define LZ4_64KLIMIT ((64 KB) + (MFLIMIT-1))
236}; 236#define SKIPSTRENGTH 6 /* Increasing this value will make the compression run slower on incompressible data */
237 237
238 238#define MAXD_LOG 16
239//************************************** 239#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
240// Macros 240
241//************************************** 241#define ML_BITS 4
242#define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG)) 242#define ML_MASK ((1U<<ML_BITS)-1)
243#define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p)) 243#define RUN_BITS (8-ML_BITS)
244#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e); 244#define RUN_MASK ((1U<<RUN_BITS)-1)
245#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+l; LZ4_WILDCOPY(s,d,e); d=e; } 245
246 246
247 247/**************************************
248//**************************** 248 Structures and local types
249// Private functions 249**************************************/
250//**************************** 250typedef struct {
251#if LZ4_ARCH64 251 U32 hashTable[HASH_SIZE_U32];
252 252 U32 currentOffset;
253static inline int LZ4_NbCommonBytes (register U64 val) 253 U32 initCheck;
254{ 254 const BYTE* dictionary;
255#if defined(LZ4_BIG_ENDIAN) 255 const BYTE* bufferStart;
256 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 256 U32 dictSize;
257 unsigned long r = 0; 257} LZ4_stream_t_internal;
258 _BitScanReverse64( &r, val ); 258
259 return (int)(r>>3); 259typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
260 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) 260typedef enum { byPtr, byU32, byU16 } tableType_t;
261 return (__builtin_clzll(val) >> 3); 261
262 #else 262typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
263 int r; 263typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
264 if (!(val>>32)) { r=4; } else { r=0; val>>=32; } 264
265 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } 265typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
266 r += (!val); 266typedef enum { full = 0, partial = 1 } earlyEnd_directive;
267 return r; 267
268 #endif 268
269#else 269/**************************************
270 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 270 Architecture-specific macros
271 unsigned long r = 0; 271**************************************/
272 _BitScanForward64( &r, val ); 272#define STEPSIZE sizeof(size_t)
273 return (int)(r>>3); 273#define LZ4_COPYSTEP(d,s) { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
274 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) 274#define LZ4_COPY8(d,s) { LZ4_COPYSTEP(d,s); if (STEPSIZE<8) LZ4_COPYSTEP(d,s); }
275 return (__builtin_ctzll(val) >> 3); 275
276 #else 276#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
277 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; 277# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
278 return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58]; 278# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
279 #endif 279#else /* Little Endian */
280#endif 280# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
281} 281# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
282 282#endif
283#else 283
284 284
285static inline int LZ4_NbCommonBytes (register U32 val) 285/**************************************
286{ 286 Macros
287#if defined(LZ4_BIG_ENDIAN) 287**************************************/
288 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 288#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(!!(c)) }; } /* use only *after* variable declarations */
289 unsigned long r = 0; 289#if LZ4_ARCH64 || !defined(__GNUC__)
290 _BitScanReverse( &r, val ); 290# define LZ4_WILDCOPY(d,s,e) { do { LZ4_COPY8(d,s) } while (d<e); } /* at the end, d>=e; */
291 return (int)(r>>3); 291#else
292 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) 292# define LZ4_WILDCOPY(d,s,e) { if (likely(e-d <= 8)) LZ4_COPY8(d,s) else do { LZ4_COPY8(d,s) } while (d<e); }
293 return (__builtin_clz(val) >> 3); 293#endif
294 #else 294
295 int r; 295
296 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } 296/****************************
297 r += (!val); 297 Private local functions
298 return r; 298****************************/
299 #endif 299#if LZ4_ARCH64
300#else 300
301 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 301int LZ4_NbCommonBytes (register U64 val)
302 unsigned long r = 0; 302{
303 _BitScanForward( &r, val ); 303# if defined(LZ4_BIG_ENDIAN)
304 return (int)(r>>3); 304# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
305 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) 305 unsigned long r = 0;
306 return (__builtin_ctz(val) >> 3); 306 _BitScanReverse64( &r, val );
307 #else 307 return (int)(r>>3);
308 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; 308# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
309 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; 309 return (__builtin_clzll(val) >> 3);
310 #endif 310# else
311#endif 311 int r;
312} 312 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
313 313 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
314#endif 314 r += (!val);
315 315 return r;
316 316# endif
317 317# else
318//****************************** 318# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
319// Compression functions 319 unsigned long r = 0;
320//****************************** 320 _BitScanForward64( &r, val );
321 321 return (int)(r>>3);
322// LZ4_compressCtx : 322# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
323// ----------------- 323 return (__builtin_ctzll(val) >> 3);
324// Compress 'isize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. 324# else
325// If it cannot achieve it, compression will stop, and result of the function will be zero. 325 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
326// return : the number of bytes written in buffer 'dest', or 0 if the compression fails 326 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
327 327# endif
328static inline int LZ4_compressCtx(void** ctx, 328# endif
329 const char* source, 329}
330 char* dest, 330
331 int isize, 331#else
332 int maxOutputSize) 332
333{ 333int LZ4_NbCommonBytes (register U32 val)
334#if HEAPMODE 334{
335 struct refTables *srt = (struct refTables *) (*ctx); 335# if defined(LZ4_BIG_ENDIAN)
336 HTYPE* HashTable; 336# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
337#else 337 unsigned long r = 0;
338 HTYPE HashTable[HASHTABLESIZE] = {0}; 338 _BitScanReverse( &r, val );
339#endif 339 return (int)(r>>3);
340 340# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
341 const BYTE* ip = (BYTE*) source; 341 return (__builtin_clz(val) >> 3);
342 INITBASE(base); 342# else
343 const BYTE* anchor = ip; 343 int r;
344 const BYTE* const iend = ip + isize; 344 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
345 const BYTE* const mflimit = iend - MFLIMIT; 345 r += (!val);
346#define matchlimit (iend - LASTLITERALS) 346 return r;
347 347# endif
348 BYTE* op = (BYTE*) dest; 348# else
349 BYTE* const oend = op + maxOutputSize; 349# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
350 350 unsigned long r;
351 int len, length; 351 _BitScanForward( &r, val );
352 const int skipStrength = SKIPSTRENGTH; 352 return (int)(r>>3);
353 U32 forwardH; 353# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
354 354 return (__builtin_ctz(val) >> 3);
355 355# else
356 // Init 356 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
357 if (isize<MINLENGTH) goto _last_literals; 357 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
358#if HEAPMODE 358# endif
359 if (*ctx == NULL) 359# endif
360 { 360}
361 srt = (struct refTables *) malloc ( sizeof(struct refTables) ); 361
362 *ctx = (void*) srt; 362#endif
363 } 363
364 HashTable = (HTYPE*)(srt->hashTable); 364
365 memset((void*)HashTable, 0, sizeof(srt->hashTable)); 365/********************************
366#else 366 Compression functions
367 (void) ctx; 367********************************/
368#endif 368int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
369 369
370 370static int LZ4_hashSequence(U32 sequence, tableType_t tableType)
371 // First Byte 371{
372 HashTable[LZ4_HASH_VALUE(ip)] = ip - base; 372 if (tableType == byU16)
373 ip++; forwardH = LZ4_HASH_VALUE(ip); 373 return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
374 374 else
375 // Main Loop 375 return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
376 for ( ; ; ) 376}
377 { 377
378 int findMatchAttempts = (1U << skipStrength) + 3; 378static int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
379 const BYTE* forwardIp = ip; 379
380 const BYTE* ref; 380static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
381 BYTE* token; 381{
382 382 switch (tableType)
383 // Find a match 383 {
384 do { 384 case byPtr: { const BYTE** hashTable = (const BYTE**) tableBase; hashTable[h] = p; break; }
385 U32 h = forwardH; 385 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); break; }
386 int step = findMatchAttempts++ >> skipStrength; 386 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); break; }
387 ip = forwardIp; 387 }
388 forwardIp = ip + step; 388}
389 389
390 if unlikely(forwardIp > mflimit) { goto _last_literals; } 390static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
391 391{
392 forwardH = LZ4_HASH_VALUE(forwardIp); 392 U32 h = LZ4_hashPosition(p, tableType);
393 ref = base + HashTable[h]; 393 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
394 HashTable[h] = ip - base; 394}
395 395
396 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); 396static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
397 397{
398 // Catch up 398 if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
399 while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; } 399 if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
400 400 { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
401 // Encode Literal length 401}
402 length = (int)(ip - anchor); 402
403 token = op++; 403static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
404 if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) > oend) return 0; // Check output limit 404{
405#ifdef _MSC_VER 405 U32 h = LZ4_hashPosition(p, tableType);
406 if (length>=(int)RUN_MASK) 406 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
407 { 407}
408 int len = length-RUN_MASK; 408
409 *token=(RUN_MASK<<ML_BITS); 409static unsigned LZ4_count(const BYTE* pIn, const BYTE* pRef, const BYTE* pInLimit)
410 if (len>254) 410{
411 { 411 const BYTE* const pStart = pIn;
412 do { *op++ = 255; len -= 255; } while (len>254); 412
413 *op++ = (BYTE)len; 413 while (likely(pIn<pInLimit-(STEPSIZE-1)))
414 memcpy(op, anchor, length); 414 {
415 op += length; 415 size_t diff = AARCH(pRef) ^ AARCH(pIn);
416 goto _next_match; 416 if (!diff) { pIn+=STEPSIZE; pRef+=STEPSIZE; continue; }
417 } 417 pIn += LZ4_NbCommonBytes(diff);
418 else 418 return (unsigned)(pIn - pStart);
419 *op++ = (BYTE)len; 419 }
420 } 420 if (sizeof(void*)==8) if ((pIn<(pInLimit-3)) && (A32(pRef) == A32(pIn))) { pIn+=4; pRef+=4; }
421 else *token = (length<<ML_BITS); 421 if ((pIn<(pInLimit-1)) && (A16(pRef) == A16(pIn))) { pIn+=2; pRef+=2; }
422#else 422 if ((pIn<pInLimit) && (*pRef == *pIn)) pIn++;
423 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } 423
424 else *token = (length<<ML_BITS); 424 return (unsigned)(pIn - pStart);
425#endif 425}
426 426
427 // Copy Literals 427
428 LZ4_BLINDCOPY(anchor, op, length); 428static int LZ4_compress_generic(
429 429 void* ctx,
430_next_match: 430 const char* source,
431 // Encode Offset 431 char* dest,
432 LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref)); 432 int inputSize,
433 433 int maxOutputSize,
434 // Start Counting 434
435 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified 435 limitedOutput_directive outputLimited,
436 anchor = ip; 436 tableType_t tableType,
437 while likely(ip<matchlimit-(STEPSIZE-1)) 437 dict_directive dict,
438 { 438 dictIssue_directive dictIssue)
439 UARCH diff = AARCH(ref) ^ AARCH(ip); 439{
440 if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; } 440 LZ4_stream_t_internal* const dictPtr = (LZ4_stream_t_internal*)ctx;
441 ip += LZ4_NbCommonBytes(diff); 441
442 goto _endCount; 442 const BYTE* ip = (const BYTE*) source;
443 } 443 const BYTE* base;
444 if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; } 444 const BYTE* lowLimit;
445 if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; } 445 const BYTE* const lowRefLimit = ip - dictPtr->dictSize;
446 if ((ip<matchlimit) && (*ref == *ip)) ip++; 446 const BYTE* const dictionary = dictPtr->dictionary;
447_endCount: 447 const BYTE* const dictEnd = dictionary + dictPtr->dictSize;
448 448 const size_t dictDelta = dictEnd - (const BYTE*)source;
449 // Encode MatchLength 449 const BYTE* anchor = (const BYTE*) source;
450 len = (int)(ip - anchor); 450 const BYTE* const iend = ip + inputSize;
451 if unlikely(op + (1 + LASTLITERALS) + (len>>8) > oend) return 0; // Check output limit 451 const BYTE* const mflimit = iend - MFLIMIT;
452 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } 452 const BYTE* const matchlimit = iend - LASTLITERALS;
453 else *token += len; 453
454 454 BYTE* op = (BYTE*) dest;
455 // Test end of chunk 455 BYTE* const olimit = op + maxOutputSize;
456 if (ip > mflimit) { anchor = ip; break; } 456
457 457 const int skipStrength = SKIPSTRENGTH;
458 // Fill table 458 U32 forwardH;
459 HashTable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base; 459 size_t refDelta=0;
460 460
461 // Test next position 461 /* Init conditions */
462 ref = base + HashTable[LZ4_HASH_VALUE(ip)]; 462 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
463 HashTable[LZ4_HASH_VALUE(ip)] = ip - base; 463 switch(dict)
464 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; } 464 {
465 465 case noDict:
466 // Prepare next loop 466 default:
467 anchor = ip++; 467 base = (const BYTE*)source;
468 forwardH = LZ4_HASH_VALUE(ip); 468 lowLimit = (const BYTE*)source;
469 } 469 break;
470 470 case withPrefix64k:
471_last_literals: 471 base = (const BYTE*)source - dictPtr->currentOffset;
472 // Encode Last Literals 472 lowLimit = (const BYTE*)source - dictPtr->dictSize;
473 { 473 break;
474 int lastRun = (int)(iend - anchor); 474 case usingExtDict:
475 if (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) return 0; 475 base = (const BYTE*)source - dictPtr->currentOffset;
476 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 476 lowLimit = (const BYTE*)source;
477 else *op++ = (lastRun<<ML_BITS); 477 break;
478 memcpy(op, anchor, iend - anchor); 478 }
479 op += iend-anchor; 479 if ((tableType == byU16) && (inputSize>=(int)LZ4_64KLIMIT)) return 0; /* Size too large (not within 64K limit) */
480 } 480 if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
481 481
482 // End 482 /* First Byte */
483 return (int) (((char*)op)-dest); 483 LZ4_putPosition(ip, ctx, tableType, base);
484} 484 ip++; forwardH = LZ4_hashPosition(ip, tableType);
485 485
486 486 /* Main Loop */
487 487 for ( ; ; )
488// Note : this function is valid only if isize < LZ4_64KLIMIT 488 {
489#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1)) 489 const BYTE* ref;
490#define HASHLOG64K (HASH_LOG+1) 490 BYTE* token;
491#define HASH64KTABLESIZE (1U<<HASHLOG64K) 491 {
492#define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K)) 492 const BYTE* forwardIp = ip;
493#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p)) 493 unsigned step=1;
494static inline int LZ4_compress64kCtx(void** ctx, 494 unsigned searchMatchNb = (1U << skipStrength);
495 const char* source, 495
496 char* dest, 496 /* Find a match */
497 int isize, 497 do {
498 int maxOutputSize) 498 U32 h = forwardH;
499{ 499 ip = forwardIp;
500#if HEAPMODE 500 forwardIp += step;
501 struct refTables *srt = (struct refTables *) (*ctx); 501 step = searchMatchNb++ >> skipStrength;
502 U16* HashTable; 502 //if (step>8) step=8; // required for valid forwardIp ; slows down uncompressible data a bit
503#else 503
504 U16 HashTable[HASH64KTABLESIZE] = {0}; 504 if (unlikely(forwardIp > mflimit)) goto _last_literals;
505#endif 505
506 506 ref = LZ4_getPositionOnHash(h, ctx, tableType, base);
507 const BYTE* ip = (BYTE*) source; 507 if (dict==usingExtDict)
508 const BYTE* anchor = ip; 508 {
509 const BYTE* const base = ip; 509 if (ref<(const BYTE*)source)
510 const BYTE* const iend = ip + isize; 510 {
511 const BYTE* const mflimit = iend - MFLIMIT; 511 refDelta = dictDelta;
512#define matchlimit (iend - LASTLITERALS) 512 lowLimit = dictionary;
513 513 }
514 BYTE* op = (BYTE*) dest; 514 else
515 BYTE* const oend = op + maxOutputSize; 515 {
516 516 refDelta = 0;
517 int len, length; 517 lowLimit = (const BYTE*)source;
518 const int skipStrength = SKIPSTRENGTH; 518 }
519 U32 forwardH; 519 }
520 520 forwardH = LZ4_hashPosition(forwardIp, tableType);
521 521 LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
522 // Init 522
523 if (isize<MINLENGTH) goto _last_literals; 523 } while ( ((dictIssue==dictSmall) ? (ref < lowRefLimit) : 0)
524#if HEAPMODE 524 || ((tableType==byU16) ? 0 : (ref + MAX_DISTANCE < ip))
525 if (*ctx == NULL) 525 || (A32(ref+refDelta) != A32(ip)) );
526 { 526 }
527 srt = (struct refTables *) malloc ( sizeof(struct refTables) ); 527
528 *ctx = (void*) srt; 528 /* Catch up */
529 } 529 while ((ip>anchor) && (ref+refDelta > lowLimit) && (unlikely(ip[-1]==ref[refDelta-1]))) { ip--; ref--; }
530 HashTable = (U16*)(srt->hashTable); 530
531 memset((void*)HashTable, 0, sizeof(srt->hashTable)); 531 {
532#else 532 /* Encode Literal length */
533 (void) ctx; 533 unsigned litLength = (unsigned)(ip - anchor);
534#endif 534 token = op++;
535 535 if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
536 536 return 0; /* Check output limit */
537 // First Byte 537 if (litLength>=RUN_MASK)
538 ip++; forwardH = LZ4_HASH64K_VALUE(ip); 538 {
539 539 int len = (int)litLength-RUN_MASK;
540 // Main Loop 540 *token=(RUN_MASK<<ML_BITS);
541 for ( ; ; ) 541 for(; len >= 255 ; len-=255) *op++ = 255;
542 { 542 *op++ = (BYTE)len;
543 int findMatchAttempts = (1U << skipStrength) + 3; 543 }
544 const BYTE* forwardIp = ip; 544 else *token = (BYTE)(litLength<<ML_BITS);
545 const BYTE* ref; 545
546 BYTE* token; 546 /* Copy Literals */
547 547 { BYTE* end = op+litLength; LZ4_WILDCOPY(op,anchor,end); op=end; }
548 // Find a match 548 }
549 do { 549
550 U32 h = forwardH; 550_next_match:
551 int step = findMatchAttempts++ >> skipStrength; 551 /* Encode Offset */
552 ip = forwardIp; 552 LZ4_WRITE_LITTLEENDIAN_16(op, (U16)(ip-ref));
553 forwardIp = ip + step; 553
554 554 /* Encode MatchLength */
555 if (forwardIp > mflimit) { goto _last_literals; } 555 {
556 556 unsigned matchLength;
557 forwardH = LZ4_HASH64K_VALUE(forwardIp); 557
558 ref = base + HashTable[h]; 558 if ((dict==usingExtDict) && (lowLimit==dictionary))
559 HashTable[h] = (U16)(ip - base); 559 {
560 560 const BYTE* limit;
561 } while (A32(ref) != A32(ip)); 561 ref += refDelta;
562 562 limit = ip + (dictEnd-ref);
563 // Catch up 563 if (limit > matchlimit) limit = matchlimit;
564 while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; } 564 matchLength = LZ4_count(ip+MINMATCH, ref+MINMATCH, limit);
565 565 ip += MINMATCH + matchLength;
566 // Encode Literal length 566 if (ip==limit)
567 length = (int)(ip - anchor); 567 {
568 token = op++; 568 unsigned more = LZ4_count(ip, (const BYTE*)source, matchlimit);
569 if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) > oend) return 0; // Check output limit 569 matchLength += more;
570#ifdef _MSC_VER 570 ip += more;
571 if (length>=(int)RUN_MASK) 571 }
572 { 572 }
573 int len = length-RUN_MASK; 573 else
574 *token=(RUN_MASK<<ML_BITS); 574 {
575 if (len>254) 575 matchLength = LZ4_count(ip+MINMATCH, ref+MINMATCH, matchlimit);
576 { 576 ip += MINMATCH + matchLength;
577 do { *op++ = 255; len -= 255; } while (len>254); 577 }
578 *op++ = (BYTE)len; 578
579 memcpy(op, anchor, length); 579 if (matchLength>=ML_MASK)
580 op += length; 580 {
581 goto _next_match; 581 if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit)))
582 } 582 return 0; /* Check output limit */
583 else 583 *token += ML_MASK;
584 *op++ = (BYTE)len; 584 matchLength -= ML_MASK;
585 } 585 for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
586 else *token = (length<<ML_BITS); 586 if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
587#else 587 *op++ = (BYTE)matchLength;
588 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } 588 }
589 else *token = (length<<ML_BITS); 589 else *token += (BYTE)(matchLength);
590#endif 590 }
591 591
592 // Copy Literals 592 anchor = ip;
593 LZ4_BLINDCOPY(anchor, op, length); 593
594 594 /* Test end of chunk */
595_next_match: 595 if (ip > mflimit) break;
596 // Encode Offset 596
597 LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref)); 597 /* Fill table */
598 598 LZ4_putPosition(ip-2, ctx, tableType, base);
599 // Start Counting 599
600 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified 600 /* Test next position */
601 anchor = ip; 601 ref = LZ4_getPosition(ip, ctx, tableType, base);
602 while (ip<matchlimit-(STEPSIZE-1)) 602 if (dict==usingExtDict)
603 { 603 {
604 UARCH diff = AARCH(ref) ^ AARCH(ip); 604 if (ref<(const BYTE*)source)
605 if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; } 605 {
606 ip += LZ4_NbCommonBytes(diff); 606 refDelta = dictDelta;
607 goto _endCount; 607 lowLimit = dictionary;
608 } 608 }
609 if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; } 609 else
610 if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; } 610 {
611 if ((ip<matchlimit) && (*ref == *ip)) ip++; 611 refDelta = 0;
612_endCount: 612 lowLimit = (const BYTE*)source;
613 613 }
614 // Encode MatchLength 614 }
615 len = (int)(ip - anchor); 615 LZ4_putPosition(ip, ctx, tableType, base);
616 if unlikely(op + (1 + LASTLITERALS) + (len>>8) > oend) return 0; // Check output limit 616 if ( ((dictIssue==dictSmall) ? (ref>=lowRefLimit) : 1)
617 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } 617 && (ref+MAX_DISTANCE>=ip)
618 else *token += len; 618 && (A32(ref+refDelta)==A32(ip)) )
619 619 { token=op++; *token=0; goto _next_match; }
620 // Test end of chunk 620
621 if (ip > mflimit) { anchor = ip; break; } 621 /* Prepare next loop */
622 622 forwardH = LZ4_hashPosition(++ip, tableType);
623 // Fill table 623 }
624 HashTable[LZ4_HASH64K_VALUE(ip-2)] = (U16)(ip - 2 - base); 624
625 625_last_literals:
626 // Test next position 626 /* Encode Last Literals */
627 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)]; 627 {
628 HashTable[LZ4_HASH64K_VALUE(ip)] = (U16)(ip - base); 628 int lastRun = (int)(iend - anchor);
629 if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; } 629 if ((outputLimited) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize))
630 630 return 0; /* Check output limit */
631 // Prepare next loop 631 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
632 anchor = ip++; 632 else *op++ = (BYTE)(lastRun<<ML_BITS);
633 forwardH = LZ4_HASH64K_VALUE(ip); 633 memcpy(op, anchor, iend - anchor);
634 } 634 op += iend-anchor;
635 635 }
636_last_literals: 636
637 // Encode Last Literals 637 /* End */
638 { 638 return (int) (((char*)op)-dest);
639 int lastRun = (int)(iend - anchor); 639}
640 if (op + lastRun + 1 + (lastRun-RUN_MASK+255)/255 > oend) return 0; 640
641 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 641
642 else *op++ = (lastRun<<ML_BITS); 642int LZ4_compress(const char* source, char* dest, int inputSize)
643 memcpy(op, anchor, iend - anchor); 643{
644 op += iend-anchor; 644#if (HEAPMODE)
645 } 645 void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
646 646#else
647 // End 647 U32 ctx[LZ4_STREAMSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
648 return (int) (((char*)op)-dest); 648#endif
649} 649 int result;
650 650
651 651 if (inputSize < (int)LZ4_64KLIMIT)
652int LZ4_compress_limitedOutput(const char* source, 652 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
653 char* dest, 653 else
654 int isize, 654 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
655 int maxOutputSize) 655
656{ 656#if (HEAPMODE)
657#if HEAPMODE 657 FREEMEM(ctx);
658 void* ctx = malloc(sizeof(struct refTables)); 658#endif
659 int result; 659 return result;
660 if (isize < LZ4_64KLIMIT) 660}
661 result = LZ4_compress64kCtx(&ctx, source, dest, isize, maxOutputSize); 661
662 else result = LZ4_compressCtx(&ctx, source, dest, isize, maxOutputSize); 662int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
663 free(ctx); 663{
664 return result; 664#if (HEAPMODE)
665#else 665 void* ctx = ALLOCATOR(LZ4_STREAMSIZE_U32, 4); /* Aligned on 4-bytes boundaries */
666 if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize, maxOutputSize); 666#else
667 return LZ4_compressCtx(NULL, source, dest, isize, maxOutputSize); 667 U32 ctx[LZ4_STREAMSIZE_U32] = {0}; /* Ensure data is aligned on 4-bytes boundaries */
668#endif 668#endif
669} 669 int result;
670 670
671 671 if (inputSize < (int)LZ4_64KLIMIT)
672int LZ4_compress(const char* source, 672 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
673 char* dest, 673 else
674 int isize) 674 result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
675{ 675
676 return LZ4_compress_limitedOutput(source, dest, isize, LZ4_compressBound(isize)); 676#if (HEAPMODE)
677} 677 FREEMEM(ctx);
678 678#endif
679 679 return result;
680 680}
681 681
682//**************************** 682
683// Decompression functions 683/*****************************************
684//**************************** 684 Experimental : Streaming functions
685 685*****************************************/
686// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize() 686
687// are safe against "buffer overflow" attack type. 687void* LZ4_createStream()
688// They will never write nor read outside of the provided output buffers. 688{
689// LZ4_uncompress_unknownOutputSize() also insures that it will never read outside of the input buffer. 689 void* lz4s = ALLOCATOR(4, LZ4_STREAMSIZE_U32);
690// A corrupted input will produce an error result, a negative int, indicating the position of the error within input stream. 690 MEM_INIT(lz4s, 0, LZ4_STREAMSIZE);
691 691 return lz4s;
692int LZ4_uncompress(const char* source, 692}
693 char* dest, 693
694 int osize) 694int LZ4_free (void* LZ4_stream)
695{ 695{
696 // Local Variables 696 FREEMEM(LZ4_stream);
697 const BYTE* restrict ip = (const BYTE*) source; 697 return (0);
698 const BYTE* ref; 698}
699 699
700 BYTE* op = (BYTE*) dest; 700
701 BYTE* const oend = op + osize; 701int LZ4_loadDict (void* LZ4_dict, const char* dictionary, int dictSize)
702 BYTE* cpy; 702{
703 703 LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
704 BYTE token; 704 const BYTE* p = (const BYTE*)dictionary;
705 705 const BYTE* const dictEnd = p + dictSize;
706 int len, length; 706 const BYTE* base;
707 size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0}; 707
708 708 LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
709 709 if (dict->initCheck) MEM_INIT(dict, 0, sizeof(LZ4_stream_t_internal)); /* Uninitialized structure detected */
710 // Main Loop 710
711 while (1) 711 if (dictSize < MINMATCH)
712 { 712 {
713 // get runlength 713 dict->dictionary = NULL;
714 token = *ip++; 714 dict->dictSize = 0;
715 if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; } 715 return 1;
716 716 }
717 // copy literals 717
718 cpy = op+length; 718 if (p <= dictEnd - 64 KB) p = dictEnd - 64 KB;
719 if unlikely(cpy>oend-COPYLENGTH) 719 base = p - dict->currentOffset;
720 { 720 dict->dictionary = p;
721 if (cpy != oend) goto _output_error; // Error : we must necessarily stand at EOF 721 dict->dictSize = (U32)(dictEnd - p);
722 memcpy(op, ip, length); 722 dict->currentOffset += dict->dictSize;
723 ip += length; 723
724 break; // EOF 724 while (p <= dictEnd-MINMATCH)
725 } 725 {
726 LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy; 726 LZ4_putPosition(p, dict, byU32, base);
727 727 p+=3;
728 // get offset 728 }
729 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2; 729
730 if (ref < (BYTE* const)dest) goto _output_error; // Error : offset create reference outside destination buffer 730 return 1;
731 731}
732 // get matchlength 732
733 if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; } 733
734 734void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
735 // copy repeated sequence 735{
736 if unlikely(op-ref<STEPSIZE) 736 if ((LZ4_dict->currentOffset > 0x80000000) ||
737 { 737 ((size_t)LZ4_dict->currentOffset > (size_t)src)) /* address space overflow */
738#if LZ4_ARCH64 738 {
739 size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3}; 739 /* rescale hash table */
740 size_t dec2 = dec2table[op-ref]; 740 U32 delta = LZ4_dict->currentOffset - 64 KB;
741#else 741 const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
742 const int dec2 = 0; 742 int i;
743#endif 743 for (i=0; i<HASH_SIZE_U32; i++)
744 *op++ = *ref++; 744 {
745 *op++ = *ref++; 745 if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
746 *op++ = *ref++; 746 else LZ4_dict->hashTable[i] -= delta;
747 *op++ = *ref++; 747 }
748 ref -= dec[op-ref]; 748 LZ4_dict->currentOffset = 64 KB;
749 A32(op)=A32(ref); op += STEPSIZE-4; 749 if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
750 ref -= dec2; 750 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
751 } else { LZ4_COPYSTEP(ref,op); } 751 }
752 cpy = op + length - (STEPSIZE-4); 752}
753 if (cpy>oend-COPYLENGTH) 753
754 { 754
755 if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer 755FORCE_INLINE int LZ4_compress_continue_generic (void* LZ4_stream, const char* source, char* dest, int inputSize,
756 LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH)); 756 int maxOutputSize, limitedOutput_directive limit)
757 while(op<cpy) *op++=*ref++; 757{
758 op=cpy; 758 LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_stream;
759 if (op == oend) goto _output_error; // Check EOF (should never happen, since last 5 bytes are supposed to be literals) 759 const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
760 continue; 760
761 } 761 const BYTE* smallest = (const BYTE*) source;
762 LZ4_SECURECOPY(ref, op, cpy); 762 if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
763 op=cpy; // correction 763 if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
764 } 764 LZ4_renormDictT(streamPtr, smallest);
765 765
766 // end of decoding 766 /* Check overlapping input/dictionary space */
767 return (int) (((char*)ip)-source); 767 {
768 768 const BYTE* sourceEnd = (const BYTE*) source + inputSize;
769 // write overflow error detected 769 if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd))
770_output_error: 770 {
771 return (int) (-(((char*)ip)-source)); 771 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
772} 772 if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
773 773 if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
774 774 streamPtr->dictionary = dictEnd - streamPtr->dictSize;
775int LZ4_uncompress_unknownOutputSize( 775 }
776 const char* source, 776 }
777 char* dest, 777
778 int isize, 778 /* prefix mode : source data follows dictionary */
779 int maxOutputSize) 779 if (dictEnd == (const BYTE*)source)
780{ 780 {
781 // Local Variables 781 int result;
782 const BYTE* restrict ip = (const BYTE*) source; 782 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
783 const BYTE* const iend = ip + isize; 783 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, dictSmall);
784 const BYTE* ref; 784 else
785 785 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, noDictIssue);
786 BYTE* op = (BYTE*) dest; 786 streamPtr->dictSize += (U32)inputSize;
787 BYTE* const oend = op + maxOutputSize; 787 streamPtr->currentOffset += (U32)inputSize;
788 BYTE* cpy; 788 return result;
789 789 }
790 size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0}; 790
791 791 /* external dictionary mode */
792 792 {
793 // Main Loop 793 int result;
794 while (ip<iend) 794 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
795 { 795 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, dictSmall);
796 BYTE token; 796 else
797 int length; 797 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, noDictIssue);
798 798 streamPtr->dictionary = (const BYTE*)source;
799 // get runlength 799 streamPtr->dictSize = (U32)inputSize;
800 token = *ip++; 800 streamPtr->currentOffset += (U32)inputSize;
801 if ((length=(token>>ML_BITS)) == RUN_MASK) { int s=255; while ((ip<iend) && (s==255)) { s=*ip++; length += s; } } 801 return result;
802 802 }
803 // copy literals 803}
804 cpy = op+length; 804
805 if ((cpy>oend-COPYLENGTH) || (ip+length>iend-COPYLENGTH)) 805
806 { 806int LZ4_compress_continue (void* LZ4_stream, const char* source, char* dest, int inputSize)
807 if (cpy > oend) goto _output_error; // Error : writes beyond output buffer 807{
808 if (ip+length != iend) goto _output_error; // Error : LZ4 format requires to consume all input at this stage 808 return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, 0, notLimited);
809 memcpy(op, ip, length); 809}
810 op += length; 810
811 ip = iend; 811int LZ4_compress_limitedOutput_continue (void* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize)
812 break; // Necessarily EOF, due to parsing restrictions 812{
813 } 813 return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput);
814 LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy; 814}
815 815
816 // get offset 816
817 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2; 817// Hidden debug function, to force separate dictionary mode
818 if (ref < (BYTE* const)dest) goto _output_error; // Error : offset creates reference outside of destination buffer 818int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
819 819{
820 // get matchlength 820 LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_dict;
821 if ((length=(token&ML_MASK)) == ML_MASK) { while (ip<iend) { int s = *ip++; length +=s; if (s==255) continue; break; } } 821 int result;
822 822 const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
823 // copy repeated sequence 823
824 if unlikely(op-ref<STEPSIZE) 824 const BYTE* smallest = dictEnd;
825 { 825 if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
826#if LZ4_ARCH64 826 LZ4_renormDictT((LZ4_stream_t_internal*)LZ4_dict, smallest);
827 size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3}; 827
828 size_t dec2 = dec2table[op-ref]; 828 result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue);
829#else 829
830 const int dec2 = 0; 830 streamPtr->dictionary = (const BYTE*)source;
831#endif 831 streamPtr->dictSize = (U32)inputSize;
832 *op++ = *ref++; 832 streamPtr->currentOffset += (U32)inputSize;
833 *op++ = *ref++; 833
834 *op++ = *ref++; 834 return result;
835 *op++ = *ref++; 835}
836 ref -= dec[op-ref]; 836
837 A32(op)=A32(ref); op += STEPSIZE-4; 837
838 ref -= dec2; 838int LZ4_saveDict (void* LZ4_dict, char* safeBuffer, int dictSize)
839 } else { LZ4_COPYSTEP(ref,op); } 839{
840 cpy = op + length - (STEPSIZE-4); 840 LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
841 if (cpy>oend-COPYLENGTH) 841 const BYTE* previousDictEnd = dict->dictionary + dict->dictSize;
842 { 842
843 if (cpy > oend) goto _output_error; // Error : request to write outside of destination buffer 843 if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
844 LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH)); 844 if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
845 while(op<cpy) *op++=*ref++; 845
846 op=cpy; 846 memcpy(safeBuffer, previousDictEnd - dictSize, dictSize);
847 if (op == oend) goto _output_error; // Check EOF (should never happen, since last 5 bytes are supposed to be literals) 847
848 continue; 848 dict->dictionary = (const BYTE*)safeBuffer;
849 } 849 dict->dictSize = (U32)dictSize;
850 LZ4_SECURECOPY(ref, op, cpy); 850
851 op=cpy; // correction 851 return 1;
852 } 852}
853 853
854 // end of decoding 854
855 return (int) (((char*)op)-dest); 855
856 856/****************************
857 // write overflow error detected 857 Decompression functions
858_output_error: 858****************************/
859 return (int) (-(((char*)ip)-source)); 859/*
860} 860 * This generic decompression function cover all use cases.
861 861 * It shall be instanciated several times, using different sets of directives
862 * Note that it is essential this generic function is really inlined,
863 * in order to remove useless branches during compilation optimisation.
864 */
865FORCE_INLINE int LZ4_decompress_generic(
866 const char* source,
867 char* dest,
868 int inputSize,
869 int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
870
871 int endOnInput, /* endOnOutputSize, endOnInputSize */
872 int partialDecoding, /* full, partial */
873 int targetOutputSize, /* only used if partialDecoding==partial */
874 int dict, /* noDict, withPrefix64k, usingExtDict */
875 const char* dictStart, /* only if dict==usingExtDict */
876 int dictSize /* note : = 0 if noDict */
877 )
878{
879 /* Local Variables */
880 const BYTE* restrict ip = (const BYTE*) source;
881 const BYTE* ref;
882 const BYTE* const iend = ip + inputSize;
883
884 BYTE* op = (BYTE*) dest;
885 BYTE* const oend = op + outputSize;
886 BYTE* cpy;
887 BYTE* oexit = op + targetOutputSize;
888 const BYTE* const lowLimit = (const BYTE*)dest - dictSize;
889
890 const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
891//#define OLD
892#ifdef OLD
893 const size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; /* static reduces speed for LZ4_decompress_safe() on GCC64 */
894#else
895 const size_t dec32table[] = {4-0, 4-3, 4-2, 4-3, 4-0, 4-0, 4-0, 4-0}; /* static reduces speed for LZ4_decompress_safe() on GCC64 */
896#endif
897 static const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
898
899 const int checkOffset = (endOnInput) && (dictSize < (int)(64 KB));
900
901
902 /* Special cases */
903 if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
904 if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
905 if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
906
907
908 /* Main Loop */
909 while (1)
910 {
911 unsigned token;
912 size_t length;
913
914 /* get runlength */
915 token = *ip++;
916 if ((length=(token>>ML_BITS)) == RUN_MASK)
917 {
918 unsigned s;
919 do
920 {
921 s = *ip++;
922 length += s;
923 }
924 while (likely((endOnInput)?ip<iend-RUN_MASK:1) && (s==255));
925 if ((sizeof(void*)==4) && unlikely(length>LZ4_MAX_INPUT_SIZE)) goto _output_error; /* overflow detection */
926 }
927
928 /* copy literals */
929 cpy = op+length;
930 if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
931 || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
932 {
933 if (partialDecoding)
934 {
935 if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
936 if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
937 }
938 else
939 {
940 if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
941 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
942 }
943 memcpy(op, ip, length);
944 ip += length;
945 op += length;
946 break; /* Necessarily EOF, due to parsing restrictions */
947 }
948 LZ4_WILDCOPY(op, ip, cpy); ip -= (op-cpy); op = cpy;
949
950 /* get offset */
951 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
952 if ((checkOffset) && (unlikely(ref < lowLimit))) goto _output_error; /* Error : offset outside destination buffer */
953
954 /* get matchlength */
955 if ((length=(token&ML_MASK)) == ML_MASK)
956 {
957 unsigned s;
958 do
959 {
960 if (endOnInput && (ip > iend-LASTLITERALS)) goto _output_error;
961 s = *ip++;
962 length += s;
963 } while (s==255);
964 if ((sizeof(void*)==4) && unlikely(length>LZ4_MAX_INPUT_SIZE)) goto _output_error; /* overflow detection */
965 }
966
967 /* check external dictionary */
968 if ((dict==usingExtDict) && (ref < (BYTE* const)dest))
969 {
970 if (unlikely(op+length+MINMATCH > oend-LASTLITERALS)) goto _output_error;
971
972 if (length+MINMATCH <= (size_t)(dest-(char*)ref))
973 {
974 ref = dictEnd - (dest-(char*)ref);
975 memcpy(op, ref, length+MINMATCH);
976 op += length+MINMATCH;
977 }
978 else
979 {
980 size_t copySize = (size_t)(dest-(char*)ref);
981 memcpy(op, dictEnd - copySize, copySize);
982 op += copySize;
983 copySize = length+MINMATCH - copySize;
984 if (copySize > (size_t)((char*)op-dest)) /* overlap */
985 {
986 BYTE* const cpy = op + copySize;
987 const BYTE* ref = (BYTE*)dest;
988 while (op < cpy) *op++ = *ref++;
989 }
990 else
991 {
992 memcpy(op, dest, copySize);
993 op += copySize;
994 }
995 }
996 continue;
997 }
998
999 /* copy repeated sequence */
1000 if (unlikely((op-ref)<(int)STEPSIZE))
1001 {
1002 const size_t dec64 = dec64table[(sizeof(void*)==4) ? 0 : op-ref];
1003 op[0] = ref[0];
1004 op[1] = ref[1];
1005 op[2] = ref[2];
1006 op[3] = ref[3];
1007#ifdef OLD
1008 op += 4, ref += 4; ref -= dec32table[op-ref];
1009 A32(op) = A32(ref);
1010 op += STEPSIZE-4; ref -= dec64;
1011#else
1012 ref += dec32table[op-ref];
1013 A32(op+4) = A32(ref);
1014 op += STEPSIZE; ref -= dec64;
1015#endif
1016 } else { LZ4_COPYSTEP(op,ref); }
1017 cpy = op + length - (STEPSIZE-4);
1018
1019 if (unlikely(cpy>oend-COPYLENGTH-(STEPSIZE-4)))
1020 {
1021 if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last 5 bytes must be literals */
1022 if (op<oend-COPYLENGTH) LZ4_WILDCOPY(op, ref, (oend-COPYLENGTH));
1023 while(op<cpy) *op++=*ref++;
1024 op=cpy;
1025 continue;
1026 }
1027 LZ4_WILDCOPY(op, ref, cpy);
1028 op=cpy; /* correction */
1029 }
1030
1031 /* end of decoding */
1032 if (endOnInput)
1033 return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
1034 else
1035 return (int) (((char*)ip)-source); /* Nb of input bytes read */
1036
1037 /* Overflow error detected */
1038_output_error:
1039 return (int) (-(((char*)ip)-source))-1;
1040}
1041
1042
1043int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxOutputSize)
1044{
1045 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, noDict, NULL, 0);
1046}
1047
1048int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxOutputSize)
1049{
1050 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, partial, targetOutputSize, noDict, NULL, 0);
1051}
1052
1053int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1054{
1055 return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, NULL, 0);
1056}
1057
1058/* streaming decompression functions */
1059
1060//#define LZ4_STREAMDECODESIZE_U32 4
1061//#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U32 * sizeof(unsigned int))
1062//typedef struct { unsigned int table[LZ4_STREAMDECODESIZE_U32]; } LZ4_streamDecode_t;
1063typedef struct
1064{
1065 const char* dictionary;
1066 int dictSize;
1067} LZ4_streamDecode_t_internal;
1068
1069/*
1070 * If you prefer dynamic allocation methods,
1071 * LZ4_createStreamDecode()
1072 * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
1073 */
1074void* LZ4_createStreamDecode()
1075{
1076 void* lz4s = ALLOCATOR(sizeof(U32), LZ4_STREAMDECODESIZE_U32);
1077 MEM_INIT(lz4s, 0, LZ4_STREAMDECODESIZE);
1078 return lz4s;
1079}
1080
1081/*
1082 * LZ4_setDictDecode
1083 * Use this function to instruct where to find the dictionary
1084 * This function is not necessary if previous data is still available where it was decoded.
1085 * Loading a size of 0 is allowed (same effect as no dictionary).
1086 * Return : 1 if OK, 0 if error
1087 */
1088int LZ4_setDictDecode (void* LZ4_streamDecode, const char* dictionary, int dictSize)
1089{
1090 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1091 lz4sd->dictionary = dictionary;
1092 lz4sd->dictSize = dictSize;
1093 return 1;
1094}
1095
1096/*
1097*_continue() :
1098 These decoding functions allow decompression of multiple blocks in "streaming" mode.
1099 Previously decoded blocks must still be available at the memory position where they were decoded.
1100 If it's not possible, save the relevant part of decoded data into a safe buffer,
1101 and indicate where it stands using LZ4_setDictDecode()
1102*/
1103int LZ4_decompress_safe_continue (void* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1104{
1105 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1106 int result;
1107
1108 result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, lz4sd->dictionary, lz4sd->dictSize);
1109 if (result <= 0) return result;
1110 if (lz4sd->dictionary + lz4sd->dictSize == dest)
1111 {
1112 lz4sd->dictSize += result;
1113 }
1114 else
1115 {
1116 lz4sd->dictionary = dest;
1117 lz4sd->dictSize = result;
1118 }
1119
1120 return result;
1121}
1122
1123int LZ4_decompress_fast_continue (void* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1124{
1125 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1126 int result;
1127
1128 result = LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict, lz4sd->dictionary, lz4sd->dictSize);
1129 if (result <= 0) return result;
1130 if (lz4sd->dictionary + lz4sd->dictSize == dest)
1131 {
1132 lz4sd->dictSize += result;
1133 }
1134 else
1135 {
1136 lz4sd->dictionary = dest;
1137 lz4sd->dictSize = result;
1138 }
1139
1140 return result;
1141}
1142
1143
1144/*
1145Advanced decoding functions :
1146*_usingDict() :
1147 These decoding functions work the same as "_continue" ones,
1148 the dictionary must be explicitly provided within parameters
1149*/
1150
1151int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1152{
1153 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, dictStart, dictSize);
1154}
1155
1156int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1157{
1158 return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, usingExtDict, dictStart, dictSize);
1159}
1160
1161
1162/***************************************************
1163 Obsolete Functions
1164***************************************************/
1165/*
1166These function names are deprecated and should no longer be used.
1167They are only provided here for compatibility with older user programs.
1168- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1169- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1170*/
1171int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
1172int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1173
1174
1175/* Obsolete Streaming functions */
1176
1177int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
1178
1179void LZ4_init(LZ4_stream_t_internal* lz4ds, const BYTE* base)
1180{
1181 MEM_INIT(lz4ds, 0, LZ4_STREAMSIZE);
1182 lz4ds->bufferStart = base;
1183}
1184
1185int LZ4_resetStreamState(void* state, const char* inputBuffer)
1186{
1187 if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
1188 LZ4_init((LZ4_stream_t_internal*)state, (const BYTE*)inputBuffer);
1189 return 0;
1190}
1191
1192void* LZ4_create (const char* inputBuffer)
1193{
1194 void* lz4ds = ALLOCATOR(4, LZ4_STREAMSIZE_U32);
1195 LZ4_init ((LZ4_stream_t_internal*)lz4ds, (const BYTE*)inputBuffer);
1196 return lz4ds;
1197}
1198
1199char* LZ4_slideInputBuffer (void* LZ4_Data)
1200{
1201 LZ4_stream_t_internal* lz4ds = (LZ4_stream_t_internal*)LZ4_Data;
1202
1203 LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)lz4ds->bufferStart, 64 KB);
1204
1205 return (char*)(lz4ds->bufferStart + 64 KB);
1206}
1207
1208/* Obsolete compresson functions using User-allocated state */
1209
1210int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
1211
1212int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize)
1213{
1214 if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */
1215 MEM_INIT(state, 0, LZ4_STREAMSIZE);
1216
1217 if (inputSize < (int)LZ4_64KLIMIT)
1218 return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue);
1219 else
1220 return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
1221}
1222
1223int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize)
1224{
1225 if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */
1226 MEM_INIT(state, 0, LZ4_STREAMSIZE);
1227
1228 if (inputSize < (int)LZ4_64KLIMIT)
1229 return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue);
1230 else
1231 return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue);
1232}
1233
1234/* Obsolete streaming decompression functions */
1235
1236int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
1237{
1238 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, NULL, 64 KB);
1239}
1240
1241int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
1242{
1243 return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, NULL, 64 KB);
1244}
diff --git a/src/static_libs/lz4/lz4.h b/src/static_libs/lz4/lz4.h
index 3680121c0a..1064fa115d 100644
--- a/src/static_libs/lz4/lz4.h
+++ b/src/static_libs/lz4/lz4.h
@@ -1,128 +1,306 @@
1/* 1/*
2 LZ4 - Fast LZ compression algorithm 2 LZ4 - Fast LZ compression algorithm
3 Header File 3 Header File
4 Copyright (C) 2011-2012, Yann Collet. 4 Copyright (C) 2011-2014, Yann Collet.
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 6
7 Redistribution and use in source and binary forms, with or without 7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are 8 modification, are permitted provided that the following conditions are
9 met: 9 met:
10 10
11 * Redistributions of source code must retain the above copyright 11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer. 12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above 13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer 14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the 15 in the documentation and/or other materials provided with the
16 distribution. 16 distribution.
17 17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 29
30 You can contact the author at : 30 You can contact the author at :
31 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html 31 - LZ4 source repository : http://code.google.com/p/lz4/
32 - LZ4 source repository : http://code.google.com/p/lz4/ 32 - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
33*/ 33*/
34#pragma once 34#pragma once
35 35
36#if defined (__cplusplus) 36#if defined (__cplusplus)
37extern "C" { 37extern "C" {
38#endif 38#endif
39 39
40 40
41//************************************** 41/**************************************
42// Compiler Options 42 Version
43//************************************** 43**************************************/
44#ifdef _MSC_VER // Visual Studio 44#define LZ4_VERSION_MAJOR 1 /* for major interface/format changes */
45# define inline __inline // Visual is not C99, but supports some kind of inline 45#define LZ4_VERSION_MINOR 2 /* for minor interface/format changes */
46#endif 46#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */
47 47
48 48
49//**************************** 49/**************************************
50// Simple Functions 50 Tuning parameter
51//**************************** 51**************************************/
52 52/*
53int LZ4_compress (const char* source, char* dest, int isize); 53 * LZ4_MEMORY_USAGE :
54int LZ4_uncompress (const char* source, char* dest, int osize); 54 * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
55 55 * Increasing memory usage improves compression ratio
56/* 56 * Reduced memory usage can improve speed, due to cache effect
57LZ4_compress() : 57 * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
58 Compresses 'isize' bytes from 'source' into 'dest'. 58 */
59 Destination buffer must be already allocated, 59#define LZ4_MEMORY_USAGE 14
60 and must be sized to handle worst cases situations (input data not compressible) 60
61 Worst case size evaluation is provided by function LZ4_compressBound() 61
62 62/**************************************
63 isize : is the input size. Max supported value is ~1.9GB 63 Simple Functions
64 return : the number of bytes written in buffer dest 64**************************************/
65 65
66 66int LZ4_compress (const char* source, char* dest, int inputSize);
67LZ4_uncompress() : 67int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxOutputSize);
68 osize : is the output size, therefore the original size 68
69 return : the number of bytes read in the source buffer 69/*
70 If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction 70LZ4_compress() :
71 This function never writes outside of provided buffers, and never modifies input buffer. 71 Compresses 'inputSize' bytes from 'source' into 'dest'.
72 note : destination buffer must be already allocated. 72 Destination buffer must be already allocated,
73 its size must be a minimum of 'osize' bytes. 73 and must be sized to handle worst cases situations (input data not compressible)
74*/ 74 Worst case size evaluation is provided by function LZ4_compressBound()
75 75 inputSize : Max supported value is LZ4_MAX_INPUT_VALUE
76 76 return : the number of bytes written in buffer dest
77//**************************** 77 or 0 if the compression fails
78// Advanced Functions 78
79//**************************** 79LZ4_decompress_safe() :
80 80 compressedSize : is obviously the source size
81static inline int LZ4_compressBound(int isize) { return ((isize) + ((isize)/255) + 16); } 81 maxOutputSize : is the size of the destination buffer, which must be already allocated.
82#define LZ4_COMPRESSBOUND( isize) ((isize) + ((isize)/255) + 16) 82 return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
83 83 If the destination buffer is not large enough, decoding will stop and output an error code (<0).
84/* 84 If the source stream is detected malformed, the function will stop decoding and return a negative result.
85LZ4_compressBound() : 85 This function is protected against buffer overflow exploits :
86 Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible) 86 it never writes outside of output buffer, and never reads outside of input buffer.
87 primarily useful for memory allocation of output buffer. 87 Therefore, it is protected against malicious data packets.
88 inline function is recommended for the general case, 88*/
89 but macro is also provided when results need to be evaluated at compile time (such as table size allocation). 89
90 90
91 isize : is the input size. Max supported value is ~1.9GB 91/*
92 return : maximum output size in a "worst case" scenario 92Note :
93 note : this function is limited by "int" range (2^31-1) 93 Should you prefer to explicitly allocate compression-table memory using your own allocation method,
94*/ 94 use the streaming functions provided below, simply reset the memory area between each call to LZ4_compress_continue()
95 95*/
96 96
97int LZ4_compress_limitedOutput (const char* source, char* dest, int isize, int maxOutputSize); 97
98 98/**************************************
99/* 99 Advanced Functions
100LZ4_compress_limitedOutput() : 100**************************************/
101 Compress 'isize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. 101#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
102 If it cannot achieve it, compression will stop, and result of the function will be zero. 102#define LZ4_COMPRESSBOUND(isize) ((unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
103 This function never writes outside of provided output buffer. 103
104 104/*
105 isize : is the input size. Max supported value is ~1.9GB 105LZ4_compressBound() :
106 maxOutputSize : is the size of the destination buffer (which must be already allocated) 106 Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible)
107 return : the number of bytes written in buffer 'dest' 107 primarily useful for memory allocation of output buffer.
108 or 0 if the compression fails 108 macro is also provided when result needs to be evaluated at compilation (such as stack memory allocation).
109*/ 109
110 110 isize : is the input size. Max supported value is LZ4_MAX_INPUT_SIZE
111 111 return : maximum output size in a "worst case" scenario
112int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); 112 or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE)
113 113*/
114/* 114int LZ4_compressBound(int isize);
115LZ4_uncompress_unknownOutputSize() : 115
116 isize : is the input size, therefore the compressed size 116
117 maxOutputSize : is the size of the destination buffer (which must be already allocated) 117/*
118 return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize) 118LZ4_compress_limitedOutput() :
119 If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction 119 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
120 This function never writes beyond dest + maxOutputSize, and is therefore protected against malicious data packets 120 If it cannot achieve it, compression will stop, and result of the function will be zero.
121 note : Destination buffer must be already allocated. 121 This function never writes outside of provided output buffer.
122 This version is slightly slower than LZ4_uncompress() 122
123*/ 123 inputSize : Max supported value is LZ4_MAX_INPUT_VALUE
124 124 maxOutputSize : is the size of the destination buffer (which must be already allocated)
125 125 return : the number of bytes written in buffer 'dest'
126#if defined (__cplusplus) 126 or 0 if the compression fails
127} 127*/
128#endif 128int LZ4_compress_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
129
130
131/*
132LZ4_decompress_fast() :
133 originalSize : is the original and therefore uncompressed size
134 return : the number of bytes read from the source buffer (in other words, the compressed size)
135 If the source stream is malformed, the function will stop decoding and return a negative result.
136 Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes.
137 note : This function is a bit faster than LZ4_decompress_safe()
138 It provides fast decompression and fully respect memory boundaries for properly formed compressed data.
139 It does not provide full protection against intentionnally modified data stream.
140 Use this function in a trusted environment (data to decode comes from a trusted source).
141*/
142int LZ4_decompress_fast (const char* source, char* dest, int originalSize);
143
144
145/*
146LZ4_decompress_safe_partial() :
147 This function decompress a compressed block of size 'compressedSize' at position 'source'
148 into output buffer 'dest' of size 'maxOutputSize'.
149 The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached,
150 reducing decompression time.
151 return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
152 Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller.
153 Always control how many bytes were decoded.
154 If the source stream is detected malformed, the function will stop decoding and return a negative result.
155 This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets
156*/
157int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxOutputSize);
158
159
160/***********************************************
161 Experimental Streaming Compression Functions
162***********************************************/
163
164#define LZ4_STREAMSIZE_U32 ((1 << (LZ4_MEMORY_USAGE-2)) + 8)
165#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U32 * sizeof(unsigned int))
166/*
167 * LZ4_stream_t
168 * information structure to track an LZ4 stream.
169 * important : set this structure content to zero before first use !
170 */
171typedef struct { unsigned int table[LZ4_STREAMSIZE_U32]; } LZ4_stream_t;
172
173/*
174 * If you prefer dynamic allocation methods,
175 * LZ4_createStream
176 * provides a pointer (void*) towards an initialized LZ4_stream_t structure.
177 * LZ4_free just frees it.
178 */
179void* LZ4_createStream();
180int LZ4_free (void* LZ4_stream);
181
182
183/*
184 * LZ4_loadDict
185 * Use this function to load a static dictionary into LZ4_stream.
186 * Any previous data will be forgotten, only 'dictionary' will remain in memory.
187 * Loading a size of 0 is allowed (same effect as init).
188 * Return : 1 if OK, 0 if error
189 */
190int LZ4_loadDict (void* LZ4_stream, const char* dictionary, int dictSize);
191
192/*
193 * LZ4_compress_continue
194 * Compress data block 'source', using blocks compressed before as dictionary to improve compression ratio
195 * Previous data blocks are assumed to still be present at their previous location.
196 */
197int LZ4_compress_continue (void* LZ4_stream, const char* source, char* dest, int inputSize);
198
199/*
200 * LZ4_compress_limitedOutput_continue
201 * Same as before, but also specify a maximum target compressed size (maxOutputSize)
202 * If objective cannot be met, compression exits, and returns a zero.
203 */
204int LZ4_compress_limitedOutput_continue (void* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize);
205
206/*
207 * LZ4_saveDict
208 * If previously compressed data block is not guaranteed to remain at its previous memory location
209 * save it into a safe place (char* safeBuffer)
210 * Note : you don't need to call LZ4_loadDict() afterwards,
211 * dictionary is immediately usable, you can therefore call again LZ4_compress_continue()
212 * Return : 1 if OK, 0 if error
213 * Note : any dictSize > 64 KB will be interpreted as 64KB.
214 */
215int LZ4_saveDict (void* LZ4_stream, char* safeBuffer, int dictSize);
216
217
218/************************************************
219 Experimental Streaming Decompression Functions
220************************************************/
221
222#define LZ4_STREAMDECODESIZE_U32 4
223#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U32 * sizeof(unsigned int))
224/*
225 * LZ4_streamDecode_t
226 * information structure to track an LZ4 stream.
227 * important : set this structure content to zero before first use !
228 */
229typedef struct { unsigned int table[LZ4_STREAMDECODESIZE_U32]; } LZ4_streamDecode_t;
230
231/*
232 * If you prefer dynamic allocation methods,
233 * LZ4_createStreamDecode()
234 * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
235 * LZ4_free just frees it.
236 */
237void* LZ4_createStreamDecode();
238int LZ4_free (void* LZ4_stream); /* yes, it's the same one as for compression */
239
240/*
241*_continue() :
242 These decoding functions allow decompression of multiple blocks in "streaming" mode.
243 Previously decoded blocks must still be available at the memory position where they were decoded.
244 If it's not possible, save the relevant part of decoded data into a safe buffer,
245 and indicate where it stands using LZ4_setDictDecode()
246*/
247int LZ4_decompress_safe_continue (void* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize);
248int LZ4_decompress_fast_continue (void* LZ4_streamDecode, const char* source, char* dest, int originalSize);
249
250/*
251 * LZ4_setDictDecode
252 * Use this function to instruct where to find the dictionary.
253 * This function can be used to specify a static dictionary,
254 * or to instruct where to find some previously decoded data saved into a different memory space.
255 * Setting a size of 0 is allowed (same effect as no dictionary).
256 * Return : 1 if OK, 0 if error
257 */
258int LZ4_setDictDecode (void* LZ4_streamDecode, const char* dictionary, int dictSize);
259
260
261/*
262Advanced decoding functions :
263*_usingDict() :
264 These decoding functions work the same as
265 a combination of LZ4_setDictDecode() followed by LZ4_decompress_x_continue()
266 all together into a single function call.
267 It doesn't use nor update an LZ4_streamDecode_t structure.
268*/
269int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize);
270int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize);
271
272
273
274
275/**************************************
276 Obsolete Functions
277**************************************/
278/*
279Obsolete decompression functions
280These function names are deprecated and should no longer be used.
281They are only provided here for compatibility with older user programs.
282- LZ4_uncompress is the same as LZ4_decompress_fast
283- LZ4_uncompress_unknownOutputSize is the same as LZ4_decompress_safe
284*/
285int LZ4_uncompress (const char* source, char* dest, int outputSize);
286int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
287
288/* Obsolete functions for externally allocated state; use streaming interface instead */
289int LZ4_sizeofState(void);
290int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
291int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
292
293/* Obsolete streaming functions; use new streaming interface whenever possible */
294void* LZ4_create (const char* inputBuffer);
295int LZ4_sizeofStreamState(void);
296int LZ4_resetStreamState(void* state, const char* inputBuffer);
297char* LZ4_slideInputBuffer (void* state);
298
299/* Obsolete streaming decoding functions */
300int LZ4_decompress_safe_withPrefix64k (const char* source, char* dest, int compressedSize, int maxOutputSize);
301int LZ4_decompress_fast_withPrefix64k (const char* source, char* dest, int originalSize);
302
303
304#if defined (__cplusplus)
305}
306#endif
diff --git a/src/static_libs/lz4/lz4hc.c b/src/static_libs/lz4/lz4hc.c
index 7cd8bb4e3f..6086749025 100644
--- a/src/static_libs/lz4/lz4hc.c
+++ b/src/static_libs/lz4/lz4hc.c
@@ -1,671 +1,892 @@
1/* 1/*
2 LZ4 HC - High Compression Mode of LZ4 2 LZ4 HC - High Compression Mode of LZ4
3 Copyright (C) 2011-2012, Yann Collet. 3 Copyright (C) 2011-2014, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5 5
6 Redistribution and use in source and binary forms, with or without 6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are 7 modification, are permitted provided that the following conditions are
8 met: 8 met:
9 9
10 * Redistributions of source code must retain the above copyright 10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer. 11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above 12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer 13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the 14 in the documentation and/or other materials provided with the
15 distribution. 15 distribution.
16 16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 You can contact the author at : 29 You can contact the author at :
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html 30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31 - LZ4 source repository : http://code.google.com/p/lz4/ 31 - LZ4 source repository : http://code.google.com/p/lz4/
32*/ 32*/
33 33
34 34
35//************************************** 35
36// CPU Feature Detection 36/**************************************
37//************************************** 37 Tuning Parameter
38// 32 or 64 bits ? 38**************************************/
39#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode 39#define LZ4HC_DEFAULT_COMPRESSIONLEVEL 8
40#define LZ4_ARCH64 1 40
41#else 41
42#define LZ4_ARCH64 0 42/**************************************
43#endif 43 Memory routines
44 44**************************************/
45// Little Endian or Big Endian ? 45#include <stdlib.h> /* calloc, free */
46#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) ) 46#define ALLOCATOR(s) calloc(1,s)
47#define LZ4_BIG_ENDIAN 1 47#define FREEMEM free
48#else 48#include <string.h> /* memset, memcpy */
49// Little Endian assumed. PDP Endian and other very rare endian format are unsupported. 49#define MEM_INIT memset
50#endif 50
51 51
52// Unaligned memory access is automatically enabled for "common" CPU, such as x86. 52/**************************************
53// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected 53 CPU Feature Detection
54// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance 54**************************************/
55#if defined(__ARM_FEATURE_UNALIGNED) 55/* 32 or 64 bits ? */
56#define LZ4_FORCE_UNALIGNED_ACCESS 1 56#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
57#endif 57 || defined(__powerpc64__) || defined(__powerpc64le__) \
58 58 || defined(__ppc64__) || defined(__ppc64le__) \
59 59 || defined(__PPC64__) || defined(__PPC64LE__) \
60//************************************** 60 || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) ) /* Detects 64 bits mode */
61// Compiler Options 61# define LZ4_ARCH64 1
62//************************************** 62#else
63#if __STDC_VERSION__ >= 199901L // C99 63# define LZ4_ARCH64 0
64 /* "restrict" is a known keyword */ 64#endif
65#else 65
66#define restrict // Disable restrict 66/*
67#endif 67 * Little Endian or Big Endian ?
68 68 * Overwrite the #define below if you know your architecture endianess
69#ifdef _MSC_VER 69 */
70#define inline __forceinline // Visual is not C99, but supports some kind of inline 70#include <stdlib.h> /* Apparently required to detect endianess */
71#include <intrin.h> // For Visual 2005 71#if defined (__GLIBC__)
72# if LZ4_ARCH64 // 64-bit 72# include <endian.h>
73# pragma intrinsic(_BitScanForward64) // For Visual 2005 73# if (__BYTE_ORDER == __BIG_ENDIAN)
74# pragma intrinsic(_BitScanReverse64) // For Visual 2005 74# define LZ4_BIG_ENDIAN 1
75# else 75# endif
76# pragma intrinsic(_BitScanForward) // For Visual 2005 76#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
77# pragma intrinsic(_BitScanReverse) // For Visual 2005 77# define LZ4_BIG_ENDIAN 1
78# endif 78#elif defined(__sparc) || defined(__sparc__) \
79#endif 79 || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
80 80 || defined(__hpux) || defined(__hppa) \
81#ifdef _MSC_VER // Visual Studio 81 || defined(_MIPSEB) || defined(__s390__)
82#define lz4_bswap16(x) _byteswap_ushort(x) 82# define LZ4_BIG_ENDIAN 1
83#else 83#else
84#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))) 84/* Little Endian assumed. PDP Endian and other very rare endian format are unsupported. */
85#endif 85#endif
86 86
87 87/*
88//************************************** 88 * Unaligned memory access is automatically enabled for "common" CPU, such as x86.
89// Includes 89 * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
90//************************************** 90 * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
91#include <stdlib.h> // calloc, free 91 */
92#include <string.h> // memset, memcpy 92#if defined(__ARM_FEATURE_UNALIGNED)
93#include "lz4hc.h" 93# define LZ4_FORCE_UNALIGNED_ACCESS 1
94 94#endif
95#define ALLOCATOR(s) calloc(1,s) 95
96#define FREEMEM free 96/* Define this parameter if your target system or compiler does not support hardware bit count */
97#define MEM_INIT memset 97#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
98 98# define LZ4_FORCE_SW_BITCOUNT
99 99#endif
100//************************************** 100
101// Basic Types 101
102//************************************** 102/**************************************
103#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively 103 Compiler Options
104#define BYTE unsigned __int8 104**************************************/
105#define U16 unsigned __int16 105#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
106#define U32 unsigned __int32 106/* "restrict" is a known keyword */
107#define S32 __int32 107#else
108#define U64 unsigned __int64 108# define restrict /* Disable restrict */
109#else 109#endif
110#include <stdint.h> 110
111#define BYTE uint8_t 111#ifdef _MSC_VER /* Visual Studio */
112#define U16 uint16_t 112# define FORCE_INLINE static __forceinline
113#define U32 uint32_t 113# include <intrin.h> /* For Visual 2005 */
114#define S32 int32_t 114# if LZ4_ARCH64 /* 64-bits */
115#define U64 uint64_t 115# pragma intrinsic(_BitScanForward64) /* For Visual 2005 */
116#endif 116# pragma intrinsic(_BitScanReverse64) /* For Visual 2005 */
117 117# else /* 32-bits */
118#ifndef LZ4_FORCE_UNALIGNED_ACCESS 118# pragma intrinsic(_BitScanForward) /* For Visual 2005 */
119#pragma pack(push, 1) 119# pragma intrinsic(_BitScanReverse) /* For Visual 2005 */
120#endif 120# endif
121 121# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
122typedef struct _U16_S { U16 v; } U16_S; 122# pragma warning(disable : 4701) /* disable: C4701: potentially uninitialized local variable used */
123typedef struct _U32_S { U32 v; } U32_S; 123#else
124typedef struct _U64_S { U64 v; } U64_S; 124# ifdef __GNUC__
125 125# define FORCE_INLINE static inline __attribute__((always_inline))
126#ifndef LZ4_FORCE_UNALIGNED_ACCESS 126# else
127#pragma pack(pop) 127# define FORCE_INLINE static inline
128#endif 128# endif
129 129#endif
130#define A64(x) (((U64_S *)(x))->v) 130
131#define A32(x) (((U32_S *)(x))->v) 131#ifdef _MSC_VER /* Visual Studio */
132#define A16(x) (((U16_S *)(x))->v) 132# define lz4_bswap16(x) _byteswap_ushort(x)
133 133#else
134 134# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
135//************************************** 135#endif
136// Constants 136
137//************************************** 137
138#define MINMATCH 4 138/**************************************
139 139 Includes
140#define DICTIONARY_LOGSIZE 16 140**************************************/
141#define MAXD (1<<DICTIONARY_LOGSIZE) 141#include "lz4hc.h"
142#define MAXD_MASK ((U32)(MAXD - 1)) 142#include "lz4.h"
143#define MAX_DISTANCE (MAXD - 1) 143
144 144
145#define HASH_LOG (DICTIONARY_LOGSIZE-1) 145/**************************************
146#define HASHTABLESIZE (1 << HASH_LOG) 146 Basic Types
147#define HASH_MASK (HASHTABLESIZE - 1) 147**************************************/
148 148#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
149#define MAX_NB_ATTEMPTS 256 149# include <stdint.h>
150 150 typedef uint8_t BYTE;
151#define ML_BITS 4 151 typedef uint16_t U16;
152#define ML_MASK (size_t)((1U<<ML_BITS)-1) 152 typedef uint32_t U32;
153#define RUN_BITS (8-ML_BITS) 153 typedef int32_t S32;
154#define RUN_MASK ((1U<<RUN_BITS)-1) 154 typedef uint64_t U64;
155 155#else
156#define COPYLENGTH 8 156 typedef unsigned char BYTE;
157#define LASTLITERALS 5 157 typedef unsigned short U16;
158#define MFLIMIT (COPYLENGTH+MINMATCH) 158 typedef unsigned int U32;
159#define MINLENGTH (MFLIMIT+1) 159 typedef signed int S32;
160#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) 160 typedef unsigned long long U64;
161 161#endif
162 162
163//************************************** 163#if defined(__GNUC__) && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
164// Architecture-specific macros 164# define _PACKED __attribute__ ((packed))
165//************************************** 165#else
166#if LZ4_ARCH64 // 64-bit 166# define _PACKED
167#define STEPSIZE 8 167#endif
168#define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8; 168
169#define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d) 169#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
170#define UARCH U64 170# ifdef __IBMC__
171#define AARCH A64 171# pragma pack(1)
172#define HTYPE U32 172# else
173#define INITBASE(b,s) const BYTE* const b = s 173# pragma pack(push, 1)
174#else // 32-bit 174# endif
175#define STEPSIZE 4 175#endif
176#define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4; 176
177#define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d); 177typedef struct _U16_S { U16 v; } _PACKED U16_S;
178#define UARCH U32 178typedef struct _U32_S { U32 v; } _PACKED U32_S;
179#define AARCH A32 179typedef struct _U64_S { U64 v; } _PACKED U64_S;
180#define HTYPE const BYTE* 180
181#define INITBASE(b,s) const int b = 0 181#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
182#endif 182# pragma pack(pop)
183 183#endif
184#if defined(LZ4_BIG_ENDIAN) 184
185#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; } 185#define A64(x) (((U64_S *)(x))->v)
186#define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; } 186#define A32(x) (((U32_S *)(x))->v)
187#else // Little Endian 187#define A16(x) (((U16_S *)(x))->v)
188#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); } 188
189#define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; } 189
190#endif 190/**************************************
191 191 Constants
192 192**************************************/
193//************************************************************ 193#define MINMATCH 4
194// Local Types 194
195//************************************************************ 195#define DICTIONARY_LOGSIZE 16
196typedef struct 196#define MAXD (1<<DICTIONARY_LOGSIZE)
197{ 197#define MAXD_MASK ((U32)(MAXD - 1))
198 const BYTE* base; 198#define MAX_DISTANCE (MAXD - 1)
199 HTYPE hashTable[HASHTABLESIZE]; 199
200 U16 chainTable[MAXD]; 200#define HASH_LOG (DICTIONARY_LOGSIZE-1)
201 const BYTE* nextToUpdate; 201#define HASHTABLESIZE (1 << HASH_LOG)
202} LZ4HC_Data_Structure; 202#define HASH_MASK (HASHTABLESIZE - 1)
203 203
204 204#define ML_BITS 4
205//************************************** 205#define ML_MASK (size_t)((1U<<ML_BITS)-1)
206// Macros 206#define RUN_BITS (8-ML_BITS)
207//************************************** 207#define RUN_MASK ((1U<<RUN_BITS)-1)
208#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e); 208
209#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=d+l; LZ4_WILDCOPY(s,d,e); d=e; } 209#define COPYLENGTH 8
210#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG)) 210#define LASTLITERALS 5
211#define HASH_VALUE(p) HASH_FUNCTION(*(U32*)(p)) 211#define MFLIMIT (COPYLENGTH+MINMATCH)
212#define HASH_POINTER(p) (HashTable[HASH_VALUE(p)] + base) 212#define MINLENGTH (MFLIMIT+1)
213#define DELTANEXT(p) chainTable[(size_t)(p) & MAXD_MASK] 213#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
214#define GETNEXT(p) ((p) - (size_t)DELTANEXT(p)) 214
215#define ADD_HASH(p) { size_t delta = (p) - HASH_POINTER(p); if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; DELTANEXT(p) = (U16)delta; HashTable[HASH_VALUE(p)] = (p) - base; } 215#define KB *(1U<<10)
216 216#define MB *(1U<<20)
217 217#define GB *(1U<<30)
218//************************************** 218
219// Private functions 219
220//************************************** 220/**************************************
221#if LZ4_ARCH64 221 Architecture-specific macros
222 222**************************************/
223inline static int LZ4_NbCommonBytes (register U64 val) 223#if LZ4_ARCH64 /* 64-bit */
224{ 224# define STEPSIZE 8
225#if defined(LZ4_BIG_ENDIAN) 225# define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
226 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 226# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
227 unsigned long r = 0; 227# define AARCH A64
228 _BitScanReverse64( &r, val ); 228# define HTYPE U32
229 return (int)(r>>3); 229# define INITBASE(b,s) const BYTE* const b = s
230 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) 230#else /* 32-bit */
231 return (__builtin_clzll(val) >> 3); 231# define STEPSIZE 4
232 #else 232# define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
233 int r; 233# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
234 if (!(val>>32)) { r=4; } else { r=0; val>>=32; } 234# define AARCH A32
235 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } 235# define HTYPE U32
236 r += (!val); 236# define INITBASE(b,s) const BYTE* const b = s
237 return r; 237#endif
238 #endif 238
239#else 239#if defined(LZ4_BIG_ENDIAN)
240 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 240# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
241 unsigned long r = 0; 241# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
242 _BitScanForward64( &r, val ); 242#else /* Little Endian */
243 return (int)(r>>3); 243# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
244 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) 244# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
245 return (__builtin_ctzll(val) >> 3); 245#endif
246 #else 246
247 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; 247
248 return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58]; 248/**************************************
249 #endif 249 Local Types
250#endif 250**************************************/
251} 251typedef struct
252 252{
253#else 253 const BYTE* inputBuffer;
254 254 const BYTE* base;
255inline static int LZ4_NbCommonBytes (register U32 val) 255 const BYTE* end;
256{ 256 HTYPE hashTable[HASHTABLESIZE];
257#if defined(LZ4_BIG_ENDIAN) 257 U16 chainTable[MAXD];
258 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 258 const BYTE* nextToUpdate;
259 unsigned long r = 0; 259} LZ4HC_Data_Structure;
260 _BitScanReverse( &r, val ); 260
261 return (int)(r>>3); 261
262 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) 262/**************************************
263 return (__builtin_clz(val) >> 3); 263 Macros
264 #else 264**************************************/
265 int r; 265#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
266 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } 266#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=d+l; LZ4_WILDCOPY(s,d,e); d=e; }
267 r += (!val); 267#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
268 return r; 268#define HASH_VALUE(p) HASH_FUNCTION(A32(p))
269 #endif 269#define HASH_POINTER(p) (HashTable[HASH_VALUE(p)] + base)
270#else 270#define DELTANEXT(p) chainTable[(size_t)(p) & MAXD_MASK]
271 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 271#define GETNEXT(p) ((p) - (size_t)DELTANEXT(p))
272 unsigned long r = 0; 272
273 _BitScanForward( &r, val ); 273
274 return (int)(r>>3); 274/**************************************
275 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) 275 Private functions
276 return (__builtin_ctz(val) >> 3); 276**************************************/
277 #else 277#if LZ4_ARCH64
278 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; 278
279 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; 279FORCE_INLINE int LZ4_NbCommonBytes (register U64 val)
280 #endif 280{
281#endif 281#if defined(LZ4_BIG_ENDIAN)
282} 282# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
283 283 unsigned long r = 0;
284#endif 284 _BitScanReverse64( &r, val );
285 285 return (int)(r>>3);
286 286# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
287inline static int LZ4HC_Init (LZ4HC_Data_Structure* hc4, const BYTE* base) 287 return (__builtin_clzll(val) >> 3);
288{ 288# else
289 MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable)); 289 int r;
290 MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); 290 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
291 hc4->nextToUpdate = base + LZ4_ARCH64; 291 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
292 hc4->base = base; 292 r += (!val);
293 return 1; 293 return r;
294} 294# endif
295 295#else
296 296# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
297inline static void* LZ4HC_Create (const BYTE* base) 297 unsigned long r = 0;
298{ 298 _BitScanForward64( &r, val );
299 void* hc4 = ALLOCATOR(sizeof(LZ4HC_Data_Structure)); 299 return (int)(r>>3);
300 300# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
301 LZ4HC_Init (hc4, base); 301 return (__builtin_ctzll(val) >> 3);
302 return hc4; 302# else
303} 303 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
304 304 return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
305 305# endif
306inline static int LZ4HC_Free (void** LZ4HC_Data) 306#endif
307{ 307}
308 FREEMEM(*LZ4HC_Data); 308
309 *LZ4HC_Data = NULL; 309#else
310 return (1); 310
311} 311FORCE_INLINE int LZ4_NbCommonBytes (register U32 val)
312 312{
313 313#if defined(LZ4_BIG_ENDIAN)
314inline static void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip) 314# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
315{ 315 unsigned long r;
316 U16* chainTable = hc4->chainTable; 316 _BitScanReverse( &r, val );
317 HTYPE* HashTable = hc4->hashTable; 317 return (int)(r>>3);
318 INITBASE(base,hc4->base); 318# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
319 319 return (__builtin_clz(val) >> 3);
320 while(hc4->nextToUpdate < ip) 320# else
321 { 321 int r;
322 ADD_HASH(hc4->nextToUpdate); 322 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
323 hc4->nextToUpdate++; 323 r += (!val);
324 } 324 return r;
325} 325# endif
326 326#else
327 327# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
328inline static int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* const matchlimit, const BYTE** matchpos) 328 unsigned long r;
329{ 329 _BitScanForward( &r, val );
330 U16* const chainTable = hc4->chainTable; 330 return (int)(r>>3);
331 HTYPE* const HashTable = hc4->hashTable; 331# elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
332 const BYTE* ref; 332 return (__builtin_ctz(val) >> 3);
333 INITBASE(base,hc4->base); 333# else
334 int nbAttempts=MAX_NB_ATTEMPTS; 334 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
335 int ml=0; 335 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
336 336# endif
337 // HC4 match finder 337#endif
338 LZ4HC_Insert(hc4, ip); 338}
339 ref = HASH_POINTER(ip); 339
340 while ((ref >= (ip-MAX_DISTANCE)) && (nbAttempts)) 340#endif
341 { 341
342 nbAttempts--; 342
343 if (*(ref+ml) == *(ip+ml)) 343int LZ4_sizeofStreamStateHC()
344 if (*(U32*)ref == *(U32*)ip) 344{
345 { 345 return sizeof(LZ4HC_Data_Structure);
346 const BYTE* reft = ref+MINMATCH; 346}
347 const BYTE* ipt = ip+MINMATCH; 347
348 348FORCE_INLINE void LZ4_initHC (LZ4HC_Data_Structure* hc4, const BYTE* base)
349 while (ipt<matchlimit-(STEPSIZE-1)) 349{
350 { 350 MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
351 UARCH diff = AARCH(reft) ^ AARCH(ipt); 351 MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
352 if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; } 352 hc4->nextToUpdate = base + 1;
353 ipt += LZ4_NbCommonBytes(diff); 353 hc4->base = base;
354 goto _endCount; 354 hc4->inputBuffer = base;
355 } 355 hc4->end = base;
356 if (LZ4_ARCH64) if ((ipt<(matchlimit-3)) && (A32(reft) == A32(ipt))) { ipt+=4; reft+=4; } 356}
357 if ((ipt<(matchlimit-1)) && (A16(reft) == A16(ipt))) { ipt+=2; reft+=2; } 357
358 if ((ipt<matchlimit) && (*reft == *ipt)) ipt++; 358int LZ4_resetStreamStateHC(void* state, const char* inputBuffer)
359_endCount: 359{
360 360 if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1; /* Error : pointer is not aligned for pointer (32 or 64 bits) */
361 if (ipt-ip > ml) { ml = (int)(ipt-ip); *matchpos = ref; } 361 LZ4_initHC((LZ4HC_Data_Structure*)state, (const BYTE*)inputBuffer);
362 } 362 return 0;
363 ref = GETNEXT(ref); 363}
364 } 364
365 365
366 return ml; 366void* LZ4_createHC (const char* inputBuffer)
367} 367{
368 368 void* hc4 = ALLOCATOR(sizeof(LZ4HC_Data_Structure));
369 369 LZ4_initHC ((LZ4HC_Data_Structure*)hc4, (const BYTE*)inputBuffer);
370inline static int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos) 370 return hc4;
371{ 371}
372 U16* const chainTable = hc4->chainTable; 372
373 HTYPE* const HashTable = hc4->hashTable; 373
374 INITBASE(base,hc4->base); 374int LZ4_freeHC (void* LZ4HC_Data)
375 const BYTE* ref; 375{
376 int nbAttempts = MAX_NB_ATTEMPTS; 376 FREEMEM(LZ4HC_Data);
377 int delta = (int)(ip-startLimit); 377 return (0);
378 378}
379 // First Match 379
380 LZ4HC_Insert(hc4, ip); 380
381 ref = HASH_POINTER(ip); 381/* Update chains up to ip (excluded) */
382 382FORCE_INLINE void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip)
383 while ((ref >= ip-MAX_DISTANCE) && (ref >= hc4->base) && (nbAttempts)) 383{
384 { 384 U16* chainTable = hc4->chainTable;
385 nbAttempts--; 385 HTYPE* HashTable = hc4->hashTable;
386 if (*(startLimit + longest) == *(ref - delta + longest)) 386 INITBASE(base,hc4->base);
387 if (*(U32*)ref == *(U32*)ip) 387
388 { 388 while(hc4->nextToUpdate < ip)
389 const BYTE* reft = ref+MINMATCH; 389 {
390 const BYTE* ipt = ip+MINMATCH; 390 const BYTE* const p = hc4->nextToUpdate;
391 const BYTE* startt = ip; 391 size_t delta = (p) - HASH_POINTER(p);
392 392 if (delta>MAX_DISTANCE) delta = MAX_DISTANCE;
393 while (ipt<matchlimit-(STEPSIZE-1)) 393 DELTANEXT(p) = (U16)delta;
394 { 394 HashTable[HASH_VALUE(p)] = (HTYPE)((p) - base);
395 UARCH diff = AARCH(reft) ^ AARCH(ipt); 395 hc4->nextToUpdate++;
396 if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; } 396 }
397 ipt += LZ4_NbCommonBytes(diff); 397}
398 goto _endCount; 398
399 } 399
400 if (LZ4_ARCH64) if ((ipt<(matchlimit-3)) && (A32(reft) == A32(ipt))) { ipt+=4; reft+=4; } 400char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
401 if ((ipt<(matchlimit-1)) && (A16(reft) == A16(ipt))) { ipt+=2; reft+=2; } 401{
402 if ((ipt<matchlimit) && (*reft == *ipt)) ipt++; 402 LZ4HC_Data_Structure* hc4 = (LZ4HC_Data_Structure*)LZ4HC_Data;
403_endCount: 403 U32 distance = (U32)(hc4->end - hc4->inputBuffer) - 64 KB;
404 404 distance = (distance >> 16) << 16; /* Must be a multiple of 64 KB */
405 reft = ref; 405 LZ4HC_Insert(hc4, hc4->end - MINMATCH);
406 while ((startt>startLimit) && (reft > hc4->base) && (startt[-1] == reft[-1])) {startt--; reft--;} 406 memcpy((void*)(hc4->end - 64 KB - distance), (const void*)(hc4->end - 64 KB), 64 KB);
407 407 hc4->nextToUpdate -= distance;
408 if ((ipt-startt) > longest) 408 hc4->base -= distance;
409 { 409 if ((U32)(hc4->inputBuffer - hc4->base) > 1 GB + 64 KB) /* Avoid overflow */
410 longest = (int)(ipt-startt); 410 {
411 *matchpos = reft; 411 int i;
412 *startpos = startt; 412 hc4->base += 1 GB;
413 } 413 for (i=0; i<HASHTABLESIZE; i++) hc4->hashTable[i] -= 1 GB;
414 } 414 }
415 ref = GETNEXT(ref); 415 hc4->end -= distance;
416 } 416 return (char*)(hc4->end);
417 417}
418 return longest; 418
419} 419
420 420FORCE_INLINE size_t LZ4HC_CommonLength (const BYTE* p1, const BYTE* p2, const BYTE* const matchlimit)
421 421{
422inline static int LZ4_encodeSequence(const BYTE** ip, BYTE** op, const BYTE** anchor, int ml, const BYTE* ref) 422 const BYTE* p1t = p1;
423{ 423
424 int length, len; 424 while (p1t<matchlimit-(STEPSIZE-1))
425 BYTE* token; 425 {
426 426 size_t diff = AARCH(p2) ^ AARCH(p1t);
427 // Encode Literal length 427 if (!diff) { p1t+=STEPSIZE; p2+=STEPSIZE; continue; }
428 length = (int)(*ip - *anchor); 428 p1t += LZ4_NbCommonBytes(diff);
429 token = (*op)++; 429 return (p1t - p1);
430 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; } 430 }
431 else *token = (length<<ML_BITS); 431 if (LZ4_ARCH64) if ((p1t<(matchlimit-3)) && (A32(p2) == A32(p1t))) { p1t+=4; p2+=4; }
432 432 if ((p1t<(matchlimit-1)) && (A16(p2) == A16(p1t))) { p1t+=2; p2+=2; }
433 // Copy Literals 433 if ((p1t<matchlimit) && (*p2 == *p1t)) p1t++;
434 LZ4_BLINDCOPY(*anchor, *op, length); 434 return (p1t - p1);
435 435}
436 // Encode Offset 436
437 LZ4_WRITE_LITTLEENDIAN_16(*op,(U16)(*ip-ref)); 437
438 438FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* const matchlimit, const BYTE** matchpos, const int maxNbAttempts)
439 // Encode MatchLength 439{
440 len = (int)(ml-MINMATCH); 440 U16* const chainTable = hc4->chainTable;
441 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (len > 254) { len-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)len; } 441 HTYPE* const HashTable = hc4->hashTable;
442 else *token += len; 442 const BYTE* ref;
443 443 INITBASE(base,hc4->base);
444 // Prepare next loop 444 int nbAttempts=maxNbAttempts;
445 *ip += ml; 445 size_t repl=0, ml=0;
446 *anchor = *ip; 446 U16 delta=0; /* useless assignment, to remove an uninitialization warning */
447 447
448 return 0; 448 /* HC4 match finder */
449} 449 LZ4HC_Insert(hc4, ip);
450 450 ref = HASH_POINTER(ip);
451 451
452//**************************** 452#define REPEAT_OPTIMIZATION
453// Compression CODE 453#ifdef REPEAT_OPTIMIZATION
454//**************************** 454 /* Detect repetitive sequences of length <= 4 */
455 455 if ((U32)(ip-ref) <= 4) /* potential repetition */
456int LZ4_compressHCCtx(LZ4HC_Data_Structure* ctx, 456 {
457 const char* source, 457 if (A32(ref) == A32(ip)) /* confirmed */
458 char* dest, 458 {
459 int isize) 459 delta = (U16)(ip-ref);
460{ 460 repl = ml = LZ4HC_CommonLength(ip+MINMATCH, ref+MINMATCH, matchlimit) + MINMATCH;
461 const BYTE* ip = (const BYTE*) source; 461 *matchpos = ref;
462 const BYTE* anchor = ip; 462 }
463 const BYTE* const iend = ip + isize; 463 ref = GETNEXT(ref);
464 const BYTE* const mflimit = iend - MFLIMIT; 464 }
465 const BYTE* const matchlimit = (iend - LASTLITERALS); 465#endif
466 466
467 BYTE* op = (BYTE*) dest; 467 while (((U32)(ip-ref) <= MAX_DISTANCE) && (nbAttempts))
468 468 {
469 int ml, ml2, ml3, ml0; 469 nbAttempts--;
470 const BYTE* ref=NULL; 470 if (*(ref+ml) == *(ip+ml))
471 const BYTE* start2=NULL; 471 if (A32(ref) == A32(ip))
472 const BYTE* ref2=NULL; 472 {
473 const BYTE* start3=NULL; 473 size_t mlt = LZ4HC_CommonLength(ip+MINMATCH, ref+MINMATCH, matchlimit) + MINMATCH;
474 const BYTE* ref3=NULL; 474 if (mlt > ml) { ml = mlt; *matchpos = ref; }
475 const BYTE* start0; 475 }
476 const BYTE* ref0; 476 ref = GETNEXT(ref);
477 477 }
478 ip++; 478
479 479#ifdef REPEAT_OPTIMIZATION
480 // Main Loop 480 /* Complete table */
481 while (ip < mflimit) 481 if (repl)
482 { 482 {
483 ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref)); 483 const BYTE* ptr = ip;
484 if (!ml) { ip++; continue; } 484 const BYTE* end;
485 485
486 // saved, in case we would skip too much 486 end = ip + repl - (MINMATCH-1);
487 start0 = ip; 487 while(ptr < end-delta)
488 ref0 = ref; 488 {
489 ml0 = ml; 489 DELTANEXT(ptr) = delta; /* Pre-Load */
490 490 ptr++;
491_Search2: 491 }
492 if (ip+ml < mflimit) 492 do
493 ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2); 493 {
494 else ml2=ml; 494 DELTANEXT(ptr) = delta;
495 495 HashTable[HASH_VALUE(ptr)] = (HTYPE)((ptr) - base); /* Head of chain */
496 if (ml2 == ml) // No better match 496 ptr++;
497 { 497 } while(ptr < end);
498 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref); 498 hc4->nextToUpdate = end;
499 continue; 499 }
500 } 500#endif
501 501
502 if (start0 < ip) 502 return (int)ml;
503 { 503}
504 if (start2 < ip + ml0) // empirical 504
505 { 505
506 ip = start0; 506FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos, const int maxNbAttempts)
507 ref = ref0; 507{
508 ml = ml0; 508 U16* const chainTable = hc4->chainTable;
509 } 509 HTYPE* const HashTable = hc4->hashTable;
510 } 510 INITBASE(base,hc4->base);
511 511 const BYTE* ref;
512 // Here, start0==ip 512 int nbAttempts = maxNbAttempts;
513 if ((start2 - ip) < 3) // First Match too small : removed 513 int delta = (int)(ip-startLimit);
514 { 514
515 ml = ml2; 515 /* First Match */
516 ip = start2; 516 LZ4HC_Insert(hc4, ip);
517 ref =ref2; 517 ref = HASH_POINTER(ip);
518 goto _Search2; 518
519 } 519 while (((U32)(ip-ref) <= MAX_DISTANCE) && (nbAttempts))
520 520 {
521_Search3: 521 nbAttempts--;
522 // Currently we have : 522 if (*(startLimit + longest) == *(ref - delta + longest))
523 // ml2 > ml1, and 523 if (A32(ref) == A32(ip))
524 // ip1+3 <= ip2 (usually < ip1+ml1) 524 {
525 if ((start2 - ip) < OPTIMAL_ML) 525#if 1
526 { 526 const BYTE* reft = ref+MINMATCH;
527 int correction; 527 const BYTE* ipt = ip+MINMATCH;
528 int new_ml = ml; 528 const BYTE* startt = ip;
529 if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; 529
530 if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH; 530 while (ipt<matchlimit-(STEPSIZE-1))
531 correction = new_ml - (int)(start2 - ip); 531 {
532 if (correction > 0) 532 size_t diff = AARCH(reft) ^ AARCH(ipt);
533 { 533 if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; }
534 start2 += correction; 534 ipt += LZ4_NbCommonBytes(diff);
535 ref2 += correction; 535 goto _endCount;
536 ml2 -= correction; 536 }
537 } 537 if (LZ4_ARCH64) if ((ipt<(matchlimit-3)) && (A32(reft) == A32(ipt))) { ipt+=4; reft+=4; }
538 } 538 if ((ipt<(matchlimit-1)) && (A16(reft) == A16(ipt))) { ipt+=2; reft+=2; }
539 // Now, we have start2 = ip+new_ml, with new_ml=min(ml, OPTIMAL_ML=18) 539 if ((ipt<matchlimit) && (*reft == *ipt)) ipt++;
540 540_endCount:
541 if (start2 + ml2 < mflimit) 541 reft = ref;
542 ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3); 542#else
543 else ml3=ml2; 543 /* Easier for code maintenance, but unfortunately slower too */
544 544 const BYTE* startt = ip;
545 if (ml3 == ml2) // No better match : 2 sequences to encode 545 const BYTE* reft = ref;
546 { 546 const BYTE* ipt = ip + MINMATCH + LZ4HC_CommonLength(ip+MINMATCH, ref+MINMATCH, matchlimit);
547 // ip & ref are known; Now for ml 547#endif
548 if (start2 < ip+ml) 548
549 { 549 while ((startt>startLimit) && (reft > hc4->inputBuffer) && (startt[-1] == reft[-1])) {startt--; reft--;}
550 if ((start2 - ip) < OPTIMAL_ML) 550
551 { 551 if ((ipt-startt) > longest)
552 int correction; 552 {
553 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; 553 longest = (int)(ipt-startt);
554 if (ip+ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH; 554 *matchpos = reft;
555 correction = ml - (int)(start2 - ip); 555 *startpos = startt;
556 if (correction > 0) 556 }
557 { 557 }
558 start2 += correction; 558 ref = GETNEXT(ref);
559 ref2 += correction; 559 }
560 ml2 -= correction; 560
561 } 561 return longest;
562 } 562}
563 else 563
564 { 564
565 ml = (int)(start2 - ip); 565typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
566 } 566
567 } 567FORCE_INLINE int LZ4HC_encodeSequence (
568 // Now, encode 2 sequences 568 const BYTE** ip,
569 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref); 569 BYTE** op,
570 ip = start2; 570 const BYTE** anchor,
571 LZ4_encodeSequence(&ip, &op, &anchor, ml2, ref2); 571 int matchLength,
572 continue; 572 const BYTE* ref,
573 } 573 limitedOutput_directive limitedOutputBuffer,
574 574 BYTE* oend)
575 if (start3 < ip+ml+3) // Not enough space for match 2 : remove it 575{
576 { 576 int length;
577 if (start3 >= (ip+ml)) // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 577 BYTE* token;
578 { 578
579 if (start2 < ip+ml) 579 /* Encode Literal length */
580 { 580 length = (int)(*ip - *anchor);
581 int correction = (int)(ip+ml - start2); 581 token = (*op)++;
582 start2 += correction; 582 if ((limitedOutputBuffer) && ((*op + length + (2 + 1 + LASTLITERALS) + (length>>8)) > oend)) return 1; /* Check output limit */
583 ref2 += correction; 583 if (length>=(int)RUN_MASK) { int len; *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; }
584 ml2 -= correction; 584 else *token = (BYTE)(length<<ML_BITS);
585 if (ml2 < MINMATCH) 585
586 { 586 /* Copy Literals */
587 start2 = start3; 587 LZ4_BLINDCOPY(*anchor, *op, length);
588 ref2 = ref3; 588
589 ml2 = ml3; 589 /* Encode Offset */
590 } 590 LZ4_WRITE_LITTLEENDIAN_16(*op,(U16)(*ip-ref));
591 } 591
592 592 /* Encode MatchLength */
593 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref); 593 length = (int)(matchLength-MINMATCH);
594 ip = start3; 594 if ((limitedOutputBuffer) && (*op + (1 + LASTLITERALS) + (length>>8) > oend)) return 1; /* Check output limit */
595 ref = ref3; 595 if (length>=(int)ML_MASK) { *token+=ML_MASK; length-=ML_MASK; for(; length > 509 ; length-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (length > 254) { length-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)length; }
596 ml = ml3; 596 else *token += (BYTE)(length);
597 597
598 start0 = start2; 598 /* Prepare next loop */
599 ref0 = ref2; 599 *ip += matchLength;
600 ml0 = ml2; 600 *anchor = *ip;
601 goto _Search2; 601
602 } 602 return 0;
603 603}
604 start2 = start3; 604
605 ref2 = ref3; 605
606 ml2 = ml3; 606#define MAX_COMPRESSION_LEVEL 16
607 goto _Search3; 607static int LZ4HC_compress_generic (
608 } 608 void* ctxvoid,
609 609 const char* source,
610 // OK, now we have 3 ascending matches; let's write at least the first one 610 char* dest,
611 // ip & ref are known; Now for ml 611 int inputSize,
612 if (start2 < ip+ml) 612 int maxOutputSize,
613 { 613 int compressionLevel,
614 if ((start2 - ip) < (int)ML_MASK) 614 limitedOutput_directive limit
615 { 615 )
616 int correction; 616{
617 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; 617 LZ4HC_Data_Structure* ctx = (LZ4HC_Data_Structure*) ctxvoid;
618 if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH; 618 const BYTE* ip = (const BYTE*) source;
619 correction = ml - (int)(start2 - ip); 619 const BYTE* anchor = ip;
620 if (correction > 0) 620 const BYTE* const iend = ip + inputSize;
621 { 621 const BYTE* const mflimit = iend - MFLIMIT;
622 start2 += correction; 622 const BYTE* const matchlimit = (iend - LASTLITERALS);
623 ref2 += correction; 623
624 ml2 -= correction; 624 BYTE* op = (BYTE*) dest;
625 } 625 BYTE* const oend = op + maxOutputSize;
626 } 626
627 else 627 const int maxNbAttempts = compressionLevel > MAX_COMPRESSION_LEVEL ? 1 << MAX_COMPRESSION_LEVEL : compressionLevel ? 1<<(compressionLevel-1) : 1<<LZ4HC_DEFAULT_COMPRESSIONLEVEL;
628 { 628 int ml, ml2, ml3, ml0;
629 ml = (int)(start2 - ip); 629 const BYTE* ref=NULL;
630 } 630 const BYTE* start2=NULL;
631 } 631 const BYTE* ref2=NULL;
632 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref); 632 const BYTE* start3=NULL;
633 633 const BYTE* ref3=NULL;
634 ip = start2; 634 const BYTE* start0;
635 ref = ref2; 635 const BYTE* ref0;
636 ml = ml2; 636
637 637
638 start2 = start3; 638 /* Ensure blocks follow each other */
639 ref2 = ref3; 639 if (ip != ctx->end) return 0;
640 ml2 = ml3; 640 ctx->end += inputSize;
641 641
642 goto _Search3; 642 ip++;
643 643
644 } 644 /* Main Loop */
645 645 while (ip < mflimit)
646 // Encode Last Literals 646 {
647 { 647 ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref), maxNbAttempts);
648 int lastRun = (int)(iend - anchor); 648 if (!ml) { ip++; continue; }
649 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 649
650 else *op++ = (lastRun<<ML_BITS); 650 /* saved, in case we would skip too much */
651 memcpy(op, anchor, iend - anchor); 651 start0 = ip;
652 op += iend-anchor; 652 ref0 = ref;
653 } 653 ml0 = ml;
654 654
655 // End 655_Search2:
656 return (int) (((char*)op)-dest); 656 if (ip+ml < mflimit)
657} 657 ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2, maxNbAttempts);
658 658 else ml2 = ml;
659 659
660int LZ4_compressHC(const char* source, 660 if (ml2 == ml) /* No better match */
661 char* dest, 661 {
662 int isize) 662 if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
663{ 663 continue;
664 void* ctx = LZ4HC_Create((const BYTE*)source); 664 }
665 int result = LZ4_compressHCCtx(ctx, source, dest, isize); 665
666 LZ4HC_Free (&ctx); 666 if (start0 < ip)
667 667 {
668 return result; 668 if (start2 < ip + ml0) /* empirical */
669} 669 {
670 670 ip = start0;
671 671 ref = ref0;
672 ml = ml0;
673 }
674 }
675
676 /* Here, start0==ip */
677 if ((start2 - ip) < 3) /* First Match too small : removed */
678 {
679 ml = ml2;
680 ip = start2;
681 ref =ref2;
682 goto _Search2;
683 }
684
685_Search3:
686 /*
687 * Currently we have :
688 * ml2 > ml1, and
689 * ip1+3 <= ip2 (usually < ip1+ml1)
690 */
691 if ((start2 - ip) < OPTIMAL_ML)
692 {
693 int correction;
694 int new_ml = ml;
695 if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
696 if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
697 correction = new_ml - (int)(start2 - ip);
698 if (correction > 0)
699 {
700 start2 += correction;
701 ref2 += correction;
702 ml2 -= correction;
703 }
704 }
705 /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
706
707 if (start2 + ml2 < mflimit)
708 ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, maxNbAttempts);
709 else ml3 = ml2;
710
711 if (ml3 == ml2) /* No better match : 2 sequences to encode */
712 {
713 /* ip & ref are known; Now for ml */
714 if (start2 < ip+ml) ml = (int)(start2 - ip);
715 /* Now, encode 2 sequences */
716 if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
717 ip = start2;
718 if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml2, ref2, limit, oend)) return 0;
719 continue;
720 }
721
722 if (start3 < ip+ml+3) /* Not enough space for match 2 : remove it */
723 {
724 if (start3 >= (ip+ml)) /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
725 {
726 if (start2 < ip+ml)
727 {
728 int correction = (int)(ip+ml - start2);
729 start2 += correction;
730 ref2 += correction;
731 ml2 -= correction;
732 if (ml2 < MINMATCH)
733 {
734 start2 = start3;
735 ref2 = ref3;
736 ml2 = ml3;
737 }
738 }
739
740 if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
741 ip = start3;
742 ref = ref3;
743 ml = ml3;
744
745 start0 = start2;
746 ref0 = ref2;
747 ml0 = ml2;
748 goto _Search2;
749 }
750
751 start2 = start3;
752 ref2 = ref3;
753 ml2 = ml3;
754 goto _Search3;
755 }
756
757 /*
758 * OK, now we have 3 ascending matches; let's write at least the first one
759 * ip & ref are known; Now for ml
760 */
761 if (start2 < ip+ml)
762 {
763 if ((start2 - ip) < (int)ML_MASK)
764 {
765 int correction;
766 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
767 if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
768 correction = ml - (int)(start2 - ip);
769 if (correction > 0)
770 {
771 start2 += correction;
772 ref2 += correction;
773 ml2 -= correction;
774 }
775 }
776 else
777 {
778 ml = (int)(start2 - ip);
779 }
780 }
781 if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
782
783 ip = start2;
784 ref = ref2;
785 ml = ml2;
786
787 start2 = start3;
788 ref2 = ref3;
789 ml2 = ml3;
790
791 goto _Search3;
792
793 }
794
795 /* Encode Last Literals */
796 {
797 int lastRun = (int)(iend - anchor);
798 if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; /* Check output limit */
799 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
800 else *op++ = (BYTE)(lastRun<<ML_BITS);
801 memcpy(op, anchor, iend - anchor);
802 op += iend-anchor;
803 }
804
805 /* End */
806 return (int) (((char*)op)-dest);
807}
808
809
810int LZ4_compressHC2(const char* source, char* dest, int inputSize, int compressionLevel)
811{
812 void* ctx = LZ4_createHC(source);
813 int result;
814 if (ctx==NULL) return 0;
815
816 result = LZ4HC_compress_generic (ctx, source, dest, inputSize, 0, compressionLevel, noLimit);
817
818 LZ4_freeHC(ctx);
819 return result;
820}
821
822int LZ4_compressHC(const char* source, char* dest, int inputSize) { return LZ4_compressHC2(source, dest, inputSize, 0); }
823
824int LZ4_compressHC2_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel)
825{
826 void* ctx = LZ4_createHC(source);
827 int result;
828 if (ctx==NULL) return 0;
829
830 result = LZ4HC_compress_generic (ctx, source, dest, inputSize, maxOutputSize, compressionLevel, limitedOutput);
831
832 LZ4_freeHC(ctx);
833 return result;
834}
835
836int LZ4_compressHC_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
837{
838 return LZ4_compressHC2_limitedOutput(source, dest, inputSize, maxOutputSize, 0);
839}
840
841
842/*****************************
843 Using external allocation
844*****************************/
845int LZ4_sizeofStateHC() { return sizeof(LZ4HC_Data_Structure); }
846
847
848int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel)
849{
850 if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
851 LZ4_initHC ((LZ4HC_Data_Structure*)state, (const BYTE*)source);
852 return LZ4HC_compress_generic (state, source, dest, inputSize, 0, compressionLevel, noLimit);
853}
854
855int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize)
856{ return LZ4_compressHC2_withStateHC (state, source, dest, inputSize, 0); }
857
858
859int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel)
860{
861 if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0; /* Error : state is not aligned for pointers (32 or 64 bits) */
862 LZ4_initHC ((LZ4HC_Data_Structure*)state, (const BYTE*)source);
863 return LZ4HC_compress_generic (state, source, dest, inputSize, maxOutputSize, compressionLevel, limitedOutput);
864}
865
866int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize)
867{ return LZ4_compressHC2_limitedOutput_withStateHC (state, source, dest, inputSize, maxOutputSize, 0); }
868
869
870/****************************
871 Stream functions
872****************************/
873
874int LZ4_compressHC_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize)
875{
876 return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, 0, 0, noLimit);
877}
878
879int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel)
880{
881 return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, 0, compressionLevel, noLimit);
882}
883
884int LZ4_compressHC_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize)
885{
886 return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, maxOutputSize, 0, limitedOutput);
887}
888
889int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel)
890{
891 return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, maxOutputSize, compressionLevel, limitedOutput);
892}
diff --git a/src/static_libs/lz4/lz4hc.h b/src/static_libs/lz4/lz4hc.h
index cb74689f07..deb2394062 100644
--- a/src/static_libs/lz4/lz4hc.h
+++ b/src/static_libs/lz4/lz4hc.h
@@ -1,60 +1,173 @@
1/* 1/*
2 LZ4 HC - High Compression Mode of LZ4 2 LZ4 HC - High Compression Mode of LZ4
3 Header File 3 Header File
4 Copyright (C) 2011-2012, Yann Collet. 4 Copyright (C) 2011-2014, Yann Collet.
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 6
7 Redistribution and use in source and binary forms, with or without 7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are 8 modification, are permitted provided that the following conditions are
9 met: 9 met:
10 10
11 * Redistributions of source code must retain the above copyright 11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer. 12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above 13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer 14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the 15 in the documentation and/or other materials provided with the
16 distribution. 16 distribution.
17 17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 29
30 You can contact the author at : 30 You can contact the author at :
31 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html 31 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 - LZ4 source repository : http://code.google.com/p/lz4/ 32 - LZ4 source repository : http://code.google.com/p/lz4/
33*/ 33*/
34#pragma once 34#pragma once
35 35
36 36
37#if defined (__cplusplus) 37#if defined (__cplusplus)
38extern "C" { 38extern "C" {
39#endif 39#endif
40 40
41 41
42int LZ4_compressHC (const char* source, char* dest, int isize); 42int LZ4_compressHC (const char* source, char* dest, int inputSize);
43 43/*
44/* 44LZ4_compressHC :
45LZ4_compressHC : 45 return : the number of bytes in compressed buffer dest
46 return : the number of bytes in compressed buffer dest 46 or 0 if compression fails.
47 note : destination buffer must be already allocated. 47 note : destination buffer must be already allocated.
48 To avoid any problem, size it to handle worst cases situations (input data not compressible) 48 To avoid any problem, size it to handle worst cases situations (input data not compressible)
49 Worst case size evaluation is provided by function LZ4_compressBound() (see "lz4.h") 49 Worst case size evaluation is provided by function LZ4_compressBound() (see "lz4.h")
50*/ 50*/
51 51
52 52int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
53/* Note : 53/*
54Decompression functions are provided within regular LZ4 source code (see "lz4.h") (BSD license) 54LZ4_compress_limitedOutput() :
55*/ 55 Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
56 56 If it cannot achieve it, compression will stop, and result of the function will be zero.
57 57 This function never writes outside of provided output buffer.
58#if defined (__cplusplus) 58
59} 59 inputSize : Max supported value is 1 GB
60#endif 60 maxOutputSize : is maximum allowed size into the destination buffer (which must be already allocated)
61 return : the number of output bytes written in buffer 'dest'
62 or 0 if compression fails.
63*/
64
65
66int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel);
67int LZ4_compressHC2_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
68/*
69 Same functions as above, but with programmable 'compressionLevel'.
70 Recommended values are between 4 and 9, although any value between 0 and 16 will work.
71 'compressionLevel'==0 means use default 'compressionLevel' value.
72 Values above 16 behave the same as 16.
73 Equivalent variants exist for all other compression functions below.
74*/
75
76/* Note :
77Decompression functions are provided within LZ4 source code (see "lz4.h") (BSD license)
78*/
79
80
81/**************************************
82 Using an external allocation
83**************************************/
84int LZ4_sizeofStateHC(void);
85int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize);
86int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
87
88int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel);
89int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
90
91/*
92These functions are provided should you prefer to allocate memory for compression tables with your own allocation methods.
93To know how much memory must be allocated for the compression tables, use :
94int LZ4_sizeofStateHC();
95
96Note that tables must be aligned for pointer (32 or 64 bits), otherwise compression will fail (return code 0).
97
98The allocated memory can be provided to the compressions functions using 'void* state' parameter.
99LZ4_compress_withStateHC() and LZ4_compress_limitedOutput_withStateHC() are equivalent to previously described functions.
100They just use the externally allocated memory area instead of allocating their own (on stack, or on heap).
101*/
102
103
104/**************************************
105 Streaming Functions
106**************************************/
107/* Note : these streaming functions still follows the older model */
108void* LZ4_createHC (const char* inputBuffer);
109int LZ4_compressHC_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize);
110int LZ4_compressHC_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize);
111char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
112int LZ4_freeHC (void* LZ4HC_Data);
113
114int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
115int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
116
117/*
118These functions allow the compression of dependent blocks, where each block benefits from prior 64 KB within preceding blocks.
119In order to achieve this, it is necessary to start creating the LZ4HC Data Structure, thanks to the function :
120
121void* LZ4_createHC (const char* inputBuffer);
122The result of the function is the (void*) pointer on the LZ4HC Data Structure.
123This pointer will be needed in all other functions.
124If the pointer returned is NULL, then the allocation has failed, and compression must be aborted.
125The only parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer.
126The input buffer must be already allocated, and size at least 192KB.
127'inputBuffer' will also be the 'const char* source' of the first block.
128
129All blocks are expected to lay next to each other within the input buffer, starting from 'inputBuffer'.
130To compress each block, use either LZ4_compressHC_continue() or LZ4_compressHC_limitedOutput_continue().
131Their behavior are identical to LZ4_compressHC() or LZ4_compressHC_limitedOutput(),
132but require the LZ4HC Data Structure as their first argument, and check that each block starts right after the previous one.
133If next block does not begin immediately after the previous one, the compression will fail (return 0).
134
135When it's no longer possible to lay the next block after the previous one (not enough space left into input buffer), a call to :
136char* LZ4_slideInputBufferHC(void* LZ4HC_Data);
137must be performed. It will typically copy the latest 64KB of input at the beginning of input buffer.
138Note that, for this function to work properly, minimum size of an input buffer must be 192KB.
139==> The memory position where the next input data block must start is provided as the result of the function.
140
141Compression can then resume, using LZ4_compressHC_continue() or LZ4_compressHC_limitedOutput_continue(), as usual.
142
143When compression is completed, a call to LZ4_freeHC() will release the memory used by the LZ4HC Data Structure.
144*/
145
146int LZ4_sizeofStreamStateHC(void);
147int LZ4_resetStreamStateHC(void* state, const char* inputBuffer);
148
149/*
150These functions achieve the same result as :
151void* LZ4_createHC (const char* inputBuffer);
152
153They are provided here to allow the user program to allocate memory using its own routines.
154
155To know how much space must be allocated, use LZ4_sizeofStreamStateHC();
156Note also that space must be aligned for pointers (32 or 64 bits).
157
158Once space is allocated, you must initialize it using : LZ4_resetStreamStateHC(void* state, const char* inputBuffer);
159void* state is a pointer to the space allocated.
160It must be aligned for pointers (32 or 64 bits), and be large enough.
161The parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer.
162The input buffer must be already allocated, and size at least 192KB.
163'inputBuffer' will also be the 'const char* source' of the first block.
164
165The same space can be re-used multiple times, just by initializing it each time with LZ4_resetStreamState().
166return value of LZ4_resetStreamStateHC() must be 0 is OK.
167Any other value means there was an error (typically, state is not aligned for pointers (32 or 64 bits)).
168*/
169
170
171#if defined (__cplusplus)
172}
173#endif