summaryrefslogtreecommitdiff
path: root/src/lib/eet
diff options
context:
space:
mode:
authorVincent Torri <vincent.torri@gmail.com>2012-10-04 17:25:48 +0000
committerVincent Torri <vincent.torri@gmail.com>2012-10-04 17:25:48 +0000
commit079b6c94c3d6acdf3ca5be5ae6c0711e9fade07d (patch)
treeceaf227f433f71596e69ac981cd882bc057bea05 /src/lib/eet
parent1c96844922935e8f03175d627c8003b2a2b4f7ee (diff)
Eet: Update lz4 code to rev. 77. This fix compilation on NetBSD 5.0
SVN revision: 77449
Diffstat (limited to 'src/lib/eet')
-rw-r--r--src/lib/eet/lz4/lz4.c158
-rw-r--r--src/lib/eet/lz4/lz4.h51
-rw-r--r--src/lib/eet/lz4/lz4hc.c46
3 files changed, 152 insertions, 103 deletions
diff --git a/src/lib/eet/lz4/lz4.c b/src/lib/eet/lz4/lz4.c
index 06e2829707..eeefa67388 100644
--- a/src/lib/eet/lz4/lz4.c
+++ b/src/lib/eet/lz4/lz4.c
@@ -34,31 +34,24 @@
34//************************************** 34//**************************************
35// Tuning parameters 35// Tuning parameters
36//************************************** 36//**************************************
37// COMPRESSIONLEVEL : 37// MEMORY_USAGE :
38// Increasing this value improves compression ratio 38// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
39// Lowering this value reduces memory usage 39// Increasing memory usage improves compression ratio
40// Reduced memory usage typically improves speed, due to cache effect (ex : L1 32KB for Intel, L1 64KB for AMD) 40// Reduced memory usage can improve speed, due to cache effect
41// Memory usage formula : N->2^(N+2) Bytes (examples : 12 -> 16KB ; 17 -> 512KB) 41// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
42#define COMPRESSIONLEVEL 12 42#define MEMORY_USAGE 14
43 43
44// NOTCOMPRESSIBLE_CONFIRMATION : 44// NOTCOMPRESSIBLE_DETECTIONLEVEL :
45// Decreasing this value will make the algorithm skip faster data segments considered "incompressible" 45// Decreasing this value will make the algorithm skip faster data segments considered "incompressible"
46// This may decrease compression ratio dramatically, but will be faster on incompressible data 46// This may decrease compression ratio dramatically, but will be faster on incompressible data
47// Increasing this value will make the algorithm search more before declaring a segment "incompressible" 47// Increasing this value will make the algorithm search more before declaring a segment "incompressible"
48// This could improve compression a bit, but will be slower on incompressible data 48// This could improve compression a bit, but will be slower on incompressible data
49// The default value (6) is recommended 49// The default value (6) is recommended
50#define NOTCOMPRESSIBLE_CONFIRMATION 6 50#define NOTCOMPRESSIBLE_DETECTIONLEVEL 6
51
52// LZ4_COMPRESSMIN :
53// Compression function will *fail* if it is not successful at compressing input by at least LZ4_COMPRESSMIN bytes
54// Since the compression function stops working prematurely, it results in a speed gain
55// The output however is unusable. Compression function result will be zero.
56// Default : 0 = disabled
57#define LZ4_COMPRESSMIN 0
58 51
59// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE : 52// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
60// This will provide a boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU. 53// This will provide a small boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
61// You can set this option to 1 in situations where data will stay within closed environment 54// You can set this option to 1 in situations where data will remain within closed environment
62// This option is useless on Little_Endian CPU (such as x86) 55// This option is useless on Little_Endian CPU (such as x86)
63//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 56//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
64 57
@@ -108,6 +101,7 @@
108 101
109#ifdef _MSC_VER // Visual Studio 102#ifdef _MSC_VER // Visual Studio
110# define inline __forceinline // Visual is not C99, but supports some kind of inline 103# define inline __forceinline // Visual is not C99, but supports some kind of inline
104# include <intrin.h> // For Visual 2005
111# if LZ4_ARCH64 // 64-bit 105# if LZ4_ARCH64 // 64-bit
112# pragma intrinsic(_BitScanForward64) // For Visual 2005 106# pragma intrinsic(_BitScanForward64) // For Visual 2005
113# pragma intrinsic(_BitScanReverse64) // For Visual 2005 107# pragma intrinsic(_BitScanReverse64) // For Visual 2005
@@ -181,11 +175,11 @@ typedef struct _U64_S { U64 v; } U64_S;
181//************************************** 175//**************************************
182#define MINMATCH 4 176#define MINMATCH 4
183 177
184#define HASH_LOG COMPRESSIONLEVEL 178#define HASH_LOG (MEMORY_USAGE-2)
185#define HASHTABLESIZE (1 << HASH_LOG) 179#define HASHTABLESIZE (1 << HASH_LOG)
186#define HASH_MASK (HASHTABLESIZE - 1) 180#define HASH_MASK (HASHTABLESIZE - 1)
187 181
188#define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION>2?NOTCOMPRESSIBLE_CONFIRMATION:2) 182#define SKIPSTRENGTH (NOTCOMPRESSIBLE_DETECTIONLEVEL>2?NOTCOMPRESSIBLE_DETECTIONLEVEL:2)
189#define STACKLIMIT 13 183#define STACKLIMIT 13
190#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()). 184#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).
191#define COPYLENGTH 8 185#define COPYLENGTH 8
@@ -257,7 +251,7 @@ struct refTables
257//**************************** 251//****************************
258#if LZ4_ARCH64 252#if LZ4_ARCH64
259 253
260inline static int LZ4_NbCommonBytes (register U64 val) 254static inline int LZ4_NbCommonBytes (register U64 val)
261{ 255{
262#if defined(LZ4_BIG_ENDIAN) 256#if defined(LZ4_BIG_ENDIAN)
263 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 257 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
@@ -289,7 +283,7 @@ inline static int LZ4_NbCommonBytes (register U64 val)
289 283
290#else 284#else
291 285
292inline static int LZ4_NbCommonBytes (register U32 val) 286static inline int LZ4_NbCommonBytes (register U32 val)
293{ 287{
294#if defined(LZ4_BIG_ENDIAN) 288#if defined(LZ4_BIG_ENDIAN)
295 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) 289 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
@@ -321,25 +315,22 @@ inline static int LZ4_NbCommonBytes (register U32 val)
321#endif 315#endif
322 316
323 317
324//****************************
325// Public functions
326//****************************
327
328int LZ4_compressBound(int isize)
329{
330 return (isize + (isize/255) + 16);
331}
332
333
334 318
335//****************************** 319//******************************
336// Compression functions 320// Compression functions
337//****************************** 321//******************************
338 322
339int LZ4_compressCtx(void** ctx, 323// LZ4_compressCtx :
324// -----------------
325// Compress 'isize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
326// If it cannot achieve it, compression will stop, and result of the function will be zero.
327// return : the number of bytes written in buffer 'dest', or 0 if the compression fails
328
329static inline int LZ4_compressCtx(void** ctx,
340 const char* source, 330 const char* source,
341 char* dest, 331 char* dest,
342 int isize) 332 int isize,
333 int maxOutputSize)
343{ 334{
344#if HEAPMODE 335#if HEAPMODE
345 struct refTables *srt = (struct refTables *) (*ctx); 336 struct refTables *srt = (struct refTables *) (*ctx);
@@ -356,6 +347,7 @@ int LZ4_compressCtx(void** ctx,
356#define matchlimit (iend - LASTLITERALS) 347#define matchlimit (iend - LASTLITERALS)
357 348
358 BYTE* op = (BYTE*) dest; 349 BYTE* op = (BYTE*) dest;
350 BYTE* const oend = op + maxOutputSize;
359 351
360 int len, length; 352 int len, length;
361 const int skipStrength = SKIPSTRENGTH; 353 const int skipStrength = SKIPSTRENGTH;
@@ -408,17 +400,37 @@ int LZ4_compressCtx(void** ctx,
408 while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; } 400 while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; }
409 401
410 // Encode Literal length 402 // Encode Literal length
411 length = ip - anchor; 403 length = (int)(ip - anchor);
412 token = op++; 404 token = op++;
405 if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) >= oend) return 0; // Check output limit
406#ifdef _MSC_VER
407 if (length>=(int)RUN_MASK)
408 {
409 int len = length-RUN_MASK;
410 *token=(RUN_MASK<<ML_BITS);
411 if (len>254)
412 {
413 do { *op++ = 255; len -= 255; } while (len>254);
414 *op++ = (BYTE)len;
415 memcpy(op, anchor, length);
416 op += length;
417 goto _next_match;
418 }
419 else
420 *op++ = (BYTE)len;
421 }
422 else *token = (length<<ML_BITS);
423#else
413 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } 424 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
414 else *token = (length<<ML_BITS); 425 else *token = (length<<ML_BITS);
426#endif
415 427
416 // Copy Literals 428 // Copy Literals
417 LZ4_BLINDCOPY(anchor, op, length); 429 LZ4_BLINDCOPY(anchor, op, length);
418 430
419_next_match: 431_next_match:
420 // Encode Offset 432 // Encode Offset
421 LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref); 433 LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
422 434
423 // Start Counting 435 // Start Counting
424 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified 436 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
@@ -436,7 +448,8 @@ _next_match:
436_endCount: 448_endCount:
437 449
438 // Encode MatchLength 450 // Encode MatchLength
439 len = (ip - anchor); 451 len = (int)(ip - anchor);
452 if unlikely(op + (1 + LASTLITERALS) + (len>>8) >= oend) return 0; // Check output limit
440 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } 453 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
441 else *token += len; 454 else *token += len;
442 455
@@ -459,8 +472,8 @@ _endCount:
459_last_literals: 472_last_literals:
460 // Encode Last Literals 473 // Encode Last Literals
461 { 474 {
462 int lastRun = iend - anchor; 475 int lastRun = (int)(iend - anchor);
463 if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0; 476 if (((char*)op - dest) + lastRun + 1 + ((lastRun-15)/255) >= maxOutputSize) return 0;
464 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 477 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
465 else *op++ = (lastRun<<ML_BITS); 478 else *op++ = (lastRun<<ML_BITS);
466 memcpy(op, anchor, iend - anchor); 479 memcpy(op, anchor, iend - anchor);
@@ -479,10 +492,11 @@ _last_literals:
479#define HASH64KTABLESIZE (1U<<HASHLOG64K) 492#define HASH64KTABLESIZE (1U<<HASHLOG64K)
480#define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K)) 493#define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))
481#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p)) 494#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
482int LZ4_compress64kCtx(void** ctx, 495static inline int LZ4_compress64kCtx(void** ctx,
483 const char* source, 496 const char* source,
484 char* dest, 497 char* dest,
485 int isize) 498 int isize,
499 int maxOutputSize)
486{ 500{
487#if HEAPMODE 501#if HEAPMODE
488 struct refTables *srt = (struct refTables *) (*ctx); 502 struct refTables *srt = (struct refTables *) (*ctx);
@@ -499,6 +513,7 @@ int LZ4_compress64kCtx(void** ctx,
499#define matchlimit (iend - LASTLITERALS) 513#define matchlimit (iend - LASTLITERALS)
500 514
501 BYTE* op = (BYTE*) dest; 515 BYTE* op = (BYTE*) dest;
516 BYTE* const oend = op + maxOutputSize;
502 517
503 int len, length; 518 int len, length;
504 const int skipStrength = SKIPSTRENGTH; 519 const int skipStrength = SKIPSTRENGTH;
@@ -542,7 +557,7 @@ int LZ4_compress64kCtx(void** ctx,
542 557
543 forwardH = LZ4_HASH64K_VALUE(forwardIp); 558 forwardH = LZ4_HASH64K_VALUE(forwardIp);
544 ref = base + HashTable[h]; 559 ref = base + HashTable[h];
545 HashTable[h] = ip - base; 560 HashTable[h] = (U16)(ip - base);
546 561
547 } while (A32(ref) != A32(ip)); 562 } while (A32(ref) != A32(ip));
548 563
@@ -550,17 +565,37 @@ int LZ4_compress64kCtx(void** ctx,
550 while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; } 565 while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }
551 566
552 // Encode Literal length 567 // Encode Literal length
553 length = ip - anchor; 568 length = (int)(ip - anchor);
554 token = op++; 569 token = op++;
570 if unlikely(op + length + (2 + 1 + LASTLITERALS) + (length>>8) >= oend) return 0; // Check output limit
571#ifdef _MSC_VER
572 if (length>=(int)RUN_MASK)
573 {
574 int len = length-RUN_MASK;
575 *token=(RUN_MASK<<ML_BITS);
576 if (len>254)
577 {
578 do { *op++ = 255; len -= 255; } while (len>254);
579 *op++ = (BYTE)len;
580 memcpy(op, anchor, length);
581 op += length;
582 goto _next_match;
583 }
584 else
585 *op++ = (BYTE)len;
586 }
587 else *token = (length<<ML_BITS);
588#else
555 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; } 589 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
556 else *token = (length<<ML_BITS); 590 else *token = (length<<ML_BITS);
591#endif
557 592
558 // Copy Literals 593 // Copy Literals
559 LZ4_BLINDCOPY(anchor, op, length); 594 LZ4_BLINDCOPY(anchor, op, length);
560 595
561_next_match: 596_next_match:
562 // Encode Offset 597 // Encode Offset
563 LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref); 598 LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
564 599
565 // Start Counting 600 // Start Counting
566 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified 601 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
@@ -578,7 +613,8 @@ _next_match:
578_endCount: 613_endCount:
579 614
580 // Encode MatchLength 615 // Encode MatchLength
581 len = (ip - anchor); 616 len = (int)(ip - anchor);
617 if unlikely(op + (1 + LASTLITERALS) + (len>>8) >= oend) return 0; // Check output limit
582 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; } 618 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
583 else *token += len; 619 else *token += len;
584 620
@@ -586,11 +622,11 @@ _endCount:
586 if (ip > mflimit) { anchor = ip; break; } 622 if (ip > mflimit) { anchor = ip; break; }
587 623
588 // Fill table 624 // Fill table
589 HashTable[LZ4_HASH64K_VALUE(ip-2)] = ip - 2 - base; 625 HashTable[LZ4_HASH64K_VALUE(ip-2)] = (U16)(ip - 2 - base);
590 626
591 // Test next position 627 // Test next position
592 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)]; 628 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
593 HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base; 629 HashTable[LZ4_HASH64K_VALUE(ip)] = (U16)(ip - base);
594 if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; } 630 if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }
595 631
596 // Prepare next loop 632 // Prepare next loop
@@ -601,8 +637,8 @@ _endCount:
601_last_literals: 637_last_literals:
602 // Encode Last Literals 638 // Encode Last Literals
603 { 639 {
604 int lastRun = iend - anchor; 640 int lastRun = (int)(iend - anchor);
605 if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0; 641 if (((char*)op - dest) + lastRun + 1 + ((lastRun)>>8) >= maxOutputSize) return 0;
606 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 642 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
607 else *op++ = (lastRun<<ML_BITS); 643 else *op++ = (lastRun<<ML_BITS);
608 memcpy(op, anchor, iend - anchor); 644 memcpy(op, anchor, iend - anchor);
@@ -614,26 +650,34 @@ _last_literals:
614} 650}
615 651
616 652
617 653int LZ4_compress_limitedOutput(const char* source,
618int LZ4_compress(const char* source, 654 char* dest,
619 char* dest, 655 int isize,
620 int isize) 656 int maxOutputSize)
621{ 657{
622#if HEAPMODE 658#if HEAPMODE
623 void* ctx = malloc(sizeof(struct refTables)); 659 void* ctx = malloc(sizeof(struct refTables));
624 int result; 660 int result;
625 if (isize < LZ4_64KLIMIT) 661 if (isize < LZ4_64KLIMIT)
626 result = LZ4_compress64kCtx(&ctx, source, dest, isize); 662 result = LZ4_compress64kCtx(&ctx, source, dest, isize, maxOutputSize);
627 else result = LZ4_compressCtx(&ctx, source, dest, isize); 663 else result = LZ4_compressCtx(&ctx, source, dest, isize, maxOutputSize);
628 free(ctx); 664 free(ctx);
629 return result; 665 return result;
630#else 666#else
631 if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize); 667 if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize, maxOutputSize);
632 return LZ4_compressCtx(NULL, source, dest, isize); 668 return LZ4_compressCtx(NULL, source, dest, isize, maxOutputSize);
633#endif 669#endif
634} 670}
635 671
636 672
673int LZ4_compress(const char* source,
674 char* dest,
675 int isize)
676{
677 return LZ4_compress_limitedOutput(source, dest, isize, LZ4_compressBound(isize));
678}
679
680
637 681
638 682
639//**************************** 683//****************************
diff --git a/src/lib/eet/lz4/lz4.h b/src/lib/eet/lz4/lz4.h
index ebd62b69a4..e3df7bd70c 100644
--- a/src/lib/eet/lz4/lz4.h
+++ b/src/lib/eet/lz4/lz4.h
@@ -47,19 +47,22 @@ int LZ4_uncompress (const char* source, char* dest, int osize);
47 47
48/* 48/*
49LZ4_compress() : 49LZ4_compress() :
50 Compresses 'isize' bytes from 'source' into 'dest'.
51 Destination buffer must be already allocated,
52 and must be sized to handle worst cases situations (input data not compressible)
53 Worst case size evaluation is provided by macro LZ4_compressBound()
54
50 isize : is the input size. Max supported value is ~1.9GB 55 isize : is the input size. Max supported value is ~1.9GB
51 return : the number of bytes written in buffer dest 56 return : the number of bytes written in buffer dest
52 or 0 if the compression fails (if LZ4_COMPRESSMIN is set) 57
53 note : destination buffer must be already allocated.
54 destination buffer must be sized to handle worst cases situations (input data not compressible)
55 worst case size evaluation is provided by function LZ4_compressBound()
56 58
57LZ4_uncompress() : 59LZ4_uncompress() :
58 osize : is the output size, therefore the original size 60 osize : is the output size, therefore the original size
59 return : the number of bytes read in the source buffer 61 return : the number of bytes read in the source buffer
60 If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction 62 If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction
61 This function never writes beyond dest + osize, and is therefore protected against malicious data packets 63 This function never writes beyond dest + osize, and is therefore protected against malicious data packets
62 note : destination buffer must be already allocated 64 note : destination buffer must be already allocated.
65 its size must be a minimum of 'osize' bytes.
63*/ 66*/
64 67
65 68
@@ -67,7 +70,7 @@ LZ4_uncompress() :
67// Advanced Functions 70// Advanced Functions
68//**************************** 71//****************************
69 72
70int LZ4_compressBound(int isize); 73#define LZ4_compressBound(isize) (isize + (isize/255) + 16)
71 74
72/* 75/*
73LZ4_compressBound() : 76LZ4_compressBound() :
@@ -80,6 +83,21 @@ LZ4_compressBound() :
80*/ 83*/
81 84
82 85
86int LZ4_compress_limitedOutput (const char* source, char* dest, int isize, int maxOutputSize);
87
88/*
89LZ4_compress_limitedOutput() :
90 Compress 'isize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
91 If it cannot achieve it, compression will stop, and result of the function will be zero.
92 This function never writes outside of provided output buffer.
93
94 isize : is the input size. Max supported value is ~1.9GB
95 maxOutputSize : is the size of the destination buffer (which must be already allocated)
96 return : the number of bytes written in buffer 'dest'
97 or 0 if the compression fails
98*/
99
100
83int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); 101int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
84 102
85/* 103/*
@@ -94,27 +112,6 @@ LZ4_uncompress_unknownOutputSize() :
94*/ 112*/
95 113
96 114
97int LZ4_compressCtx(void** ctx, const char* source, char* dest, int isize);
98int LZ4_compress64kCtx(void** ctx, const char* source, char* dest, int isize);
99
100/*
101LZ4_compressCtx() :
102 This function explicitly handles the CTX memory structure.
103 It avoids allocating/deallocating memory between each call, improving performance when malloc is heavily invoked.
104 This function is only useful when memory is allocated into the heap (HASH_LOG value beyond STACK_LIMIT)
105 Performance difference will be noticeable only when repetitively calling the compression function over many small segments.
106 Note : by default, memory is allocated into the stack, therefore "malloc" is not invoked.
107LZ4_compress64kCtx() :
108 Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
109 isize *Must* be <64KB, otherwise the output will be corrupted.
110
111 On first call : provide a *ctx=NULL; It will be automatically allocated.
112 On next calls : reuse the same ctx pointer.
113 Use different pointers for different threads when doing multi-threading.
114
115*/
116
117
118#if defined (__cplusplus) 115#if defined (__cplusplus)
119} 116}
120#endif 117#endif
diff --git a/src/lib/eet/lz4/lz4hc.c b/src/lib/eet/lz4/lz4hc.c
index cca755c26c..2ab507e1c9 100644
--- a/src/lib/eet/lz4/lz4hc.c
+++ b/src/lib/eet/lz4/lz4hc.c
@@ -68,12 +68,20 @@
68 68
69#ifdef _MSC_VER 69#ifdef _MSC_VER
70#define inline __forceinline // Visual is not C99, but supports some kind of inline 70#define inline __forceinline // Visual is not C99, but supports some kind of inline
71#include <intrin.h> // For Visual 2005
72# if LZ4_ARCH64 // 64-bit
73# pragma intrinsic(_BitScanForward64) // For Visual 2005
74# pragma intrinsic(_BitScanReverse64) // For Visual 2005
75# else
76# pragma intrinsic(_BitScanForward) // For Visual 2005
77# pragma intrinsic(_BitScanReverse) // For Visual 2005
78# endif
71#endif 79#endif
72 80
73#ifdef _MSC_VER // Visual Studio 81#ifdef _MSC_VER // Visual Studio
74#define bswap16(x) _byteswap_ushort(x) 82#define lz4_bswap16(x) _byteswap_ushort(x)
75#else 83#else
76#define bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))) 84#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
77#endif 85#endif
78 86
79 87
@@ -174,8 +182,8 @@ typedef struct _U64_S { U64 v; } U64_S;
174#endif 182#endif
175 183
176#if defined(LZ4_BIG_ENDIAN) 184#if defined(LZ4_BIG_ENDIAN)
177#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = bswap16(v); d = (s) - v; } 185#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
178#define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = bswap16(v); A16(p) = v; p+=2; } 186#define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
179#else // Little Endian 187#else // Little Endian
180#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); } 188#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
181#define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; } 189#define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
@@ -350,7 +358,7 @@ inline static int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const
350 if ((ipt<matchlimit) && (*reft == *ipt)) ipt++; 358 if ((ipt<matchlimit) && (*reft == *ipt)) ipt++;
351_endCount: 359_endCount:
352 360
353 if (ipt-ip > ml) { ml = ipt-ip; *matchpos = ref; } 361 if (ipt-ip > ml) { ml = (int)(ipt-ip); *matchpos = ref; }
354 } 362 }
355 ref = GETNEXT(ref); 363 ref = GETNEXT(ref);
356 } 364 }
@@ -366,7 +374,7 @@ inline static int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const
366 INITBASE(base,hc4->base); 374 INITBASE(base,hc4->base);
367 const BYTE* ref; 375 const BYTE* ref;
368 int nbAttempts = MAX_NB_ATTEMPTS; 376 int nbAttempts = MAX_NB_ATTEMPTS;
369 int delta = ip-startLimit; 377 int delta = (int)(ip-startLimit);
370 378
371 // First Match 379 // First Match
372 LZ4HC_Insert(hc4, ip); 380 LZ4HC_Insert(hc4, ip);
@@ -399,7 +407,7 @@ _endCount:
399 407
400 if ((ipt-startt) > longest) 408 if ((ipt-startt) > longest)
401 { 409 {
402 longest = ipt-startt; 410 longest = (int)(ipt-startt);
403 *matchpos = reft; 411 *matchpos = reft;
404 *startpos = startt; 412 *startpos = startt;
405 } 413 }
@@ -417,7 +425,7 @@ inline static int LZ4_encodeSequence(const BYTE** ip, BYTE** op, const BYTE** an
417 BYTE* token; 425 BYTE* token;
418 426
419 // Encode Literal length 427 // Encode Literal length
420 length = *ip - *anchor; 428 length = (int)(*ip - *anchor);
421 token = (*op)++; 429 token = (*op)++;
422 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; } 430 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; }
423 else *token = (length<<ML_BITS); 431 else *token = (length<<ML_BITS);
@@ -426,7 +434,7 @@ inline static int LZ4_encodeSequence(const BYTE** ip, BYTE** op, const BYTE** an
426 LZ4_BLINDCOPY(*anchor, *op, length); 434 LZ4_BLINDCOPY(*anchor, *op, length);
427 435
428 // Encode Offset 436 // Encode Offset
429 LZ4_WRITE_LITTLEENDIAN_16(*op,*ip-ref); 437 LZ4_WRITE_LITTLEENDIAN_16(*op,(U16)(*ip-ref));
430 438
431 // Encode MatchLength 439 // Encode MatchLength
432 len = (int)(ml-MINMATCH); 440 len = (int)(ml-MINMATCH);
@@ -519,8 +527,8 @@ _Search3:
519 int correction; 527 int correction;
520 int new_ml = ml; 528 int new_ml = ml;
521 if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; 529 if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
522 if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = start2 - ip + ml2 - MINMATCH; 530 if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
523 correction = new_ml - (start2 - ip); 531 correction = new_ml - (int)(start2 - ip);
524 if (correction > 0) 532 if (correction > 0)
525 { 533 {
526 start2 += correction; 534 start2 += correction;
@@ -543,8 +551,8 @@ _Search3:
543 { 551 {
544 int correction; 552 int correction;
545 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; 553 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
546 if (ip+ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH; 554 if (ip+ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
547 correction = ml - (start2 - ip); 555 correction = ml - (int)(start2 - ip);
548 if (correction > 0) 556 if (correction > 0)
549 { 557 {
550 start2 += correction; 558 start2 += correction;
@@ -554,7 +562,7 @@ _Search3:
554 } 562 }
555 else 563 else
556 { 564 {
557 ml = start2 - ip; 565 ml = (int)(start2 - ip);
558 } 566 }
559 } 567 }
560 // Now, encode 2 sequences 568 // Now, encode 2 sequences
@@ -570,7 +578,7 @@ _Search3:
570 { 578 {
571 if (start2 < ip+ml) 579 if (start2 < ip+ml)
572 { 580 {
573 int correction = (ip+ml) - start2; 581 int correction = (int)(ip+ml - start2);
574 start2 += correction; 582 start2 += correction;
575 ref2 += correction; 583 ref2 += correction;
576 ml2 -= correction; 584 ml2 -= correction;
@@ -607,8 +615,8 @@ _Search3:
607 { 615 {
608 int correction; 616 int correction;
609 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; 617 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
610 if (ip + ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH; 618 if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
611 correction = ml - (start2 - ip); 619 correction = ml - (int)(start2 - ip);
612 if (correction > 0) 620 if (correction > 0)
613 { 621 {
614 start2 += correction; 622 start2 += correction;
@@ -618,7 +626,7 @@ _Search3:
618 } 626 }
619 else 627 else
620 { 628 {
621 ml = start2 - ip; 629 ml = (int)(start2 - ip);
622 } 630 }
623 } 631 }
624 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref); 632 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
@@ -637,7 +645,7 @@ _Search3:
637 645
638 // Encode Last Literals 646 // Encode Last Literals
639 { 647 {
640 int lastRun = iend - anchor; 648 int lastRun = (int)(iend - anchor);
641 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } 649 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
642 else *op++ = (lastRun<<ML_BITS); 650 else *op++ = (lastRun<<ML_BITS);
643 memcpy(op, anchor, iend - anchor); 651 memcpy(op, anchor, iend - anchor);