summaryrefslogtreecommitdiff
path: root/legacy/eet
diff options
context:
space:
mode:
authorCarsten Haitzler <raster@rasterman.com>2012-05-15 12:27:56 +0000
committerCarsten Haitzler <raster@rasterman.com>2012-05-15 12:27:56 +0000
commit6d8004a29dfa10c37a9ee55fb567c36d64a53a1d (patch)
treee6f127818774c8489f3106fcbce4a47e5d9c0899 /legacy/eet
parent726b718812c5d9b966e8e52b2c9de92549f5ddfb (diff)
add lz4 compression options to eet. remove amalgamated build too.
SVN revision: 71111
Diffstat (limited to 'legacy/eet')
-rw-r--r--legacy/eet/ChangeLog4
-rw-r--r--legacy/eet/configure.ac18
-rw-r--r--legacy/eet/src/lib/Eet.h33
-rw-r--r--legacy/eet/src/lib/Eet_private.h2
-rw-r--r--legacy/eet/src/lib/Makefile.am130
-rw-r--r--legacy/eet/src/lib/eet_image.c160
-rw-r--r--legacy/eet/src/lib/eet_lib.c276
-rw-r--r--legacy/eet/src/lib/lz4/lz4.c819
-rw-r--r--legacy/eet/src/lib/lz4/lz4.h120
-rw-r--r--legacy/eet/src/lib/lz4/lz4hc.c663
-rw-r--r--legacy/eet/src/lib/lz4/lz4hc.h60
11 files changed, 2017 insertions, 268 deletions
diff --git a/legacy/eet/ChangeLog b/legacy/eet/ChangeLog
index 43e85b2e45..9df47e5991 100644
--- a/legacy/eet/ChangeLog
+++ b/legacy/eet/ChangeLog
@@ -587,6 +587,10 @@
587 587
588 * Force destruction of all pending file when shuting down eet. 588 * Force destruction of all pending file when shuting down eet.
589 589
5902012-05-14 Carsten Haitzler (The Rasterman)
591
592 * Add LZ4/LZ4HC compression & decompression capabilities
593
5902012-05-15 Cedric Bail 5942012-05-15 Cedric Bail
591 595
592 * Make eet_dictionary thread safe. 596 * Make eet_dictionary thread safe.
diff --git a/legacy/eet/configure.ac b/legacy/eet/configure.ac
index 882fda8b49..8e9a0c3a74 100644
--- a/legacy/eet/configure.ac
+++ b/legacy/eet/configure.ac
@@ -72,23 +72,6 @@ want_signature="yes"
72 72
73requirement_eet="" 73requirement_eet=""
74 74
75
76### Additional options to configure
77
78# Amalgamation
79
80AC_ARG_ENABLE([amalgamation],
81 [AC_HELP_STRING([--enable-amalgamation], [enable generation of one single file with all source code in it, helps compiler optimizations.])],
82 [if test "x${enableval}" = "xyes"; then
83 do_amalgamation="yes"
84 else
85 do_amalgamation="no"
86 fi
87 ],
88 [do_amalgamation="no"]
89)
90AM_CONDITIONAL(EET_AMALGAMATION, test "x${do_amalgamation}" = "xyes")
91
92EFL_ENABLE_BIN([eet]) 75EFL_ENABLE_BIN([eet])
93 76
94# Old eet file format support 77# Old eet file format support
@@ -516,7 +499,6 @@ echo "------------------------------------------------------------------------"
516echo 499echo
517echo "Configuration Options Summary:" 500echo "Configuration Options Summary:"
518echo 501echo
519echo " Amalgamation.........: ${do_amalgamation}"
520echo " Secure layer.........: ${secure_layer}" 502echo " Secure layer.........: ${secure_layer}"
521if test "x${have_gnutls}" = "xyes" || test "x${have_openssl}" = "xyes" ; then 503if test "x${have_gnutls}" = "xyes" || test "x${have_openssl}" = "xyes" ; then
522 echo " Cipher support.....: ${have_cipher}" 504 echo " Cipher support.....: ${have_cipher}"
diff --git a/legacy/eet/src/lib/Eet.h b/legacy/eet/src/lib/Eet.h
index 6207294aae..c525a9b078 100644
--- a/legacy/eet/src/lib/Eet.h
+++ b/legacy/eet/src/lib/Eet.h
@@ -259,6 +259,39 @@ typedef enum _Eet_Error
259/** 259/**
260 * @} 260 * @}
261 */ 261 */
262
263/**
264 * @defgroup Eet_Compression Eet Compression Levels
265 * Compression modes/levels supported by Eet.
266 *
267 * @{
268 */
269
270/**
271 * @enum _Eet_Compression
272 * All the compression modes known by Eet.
273 */
274
275typedef enum _Eet_Compression
276{
277 EET_COMPRESSION_NONE = 0, /**< No compression at all */
278 EET_COMPRESSION_DEFAULT = 1, /**< Default compression (Zlib) */
279 EET_COMPRESSION_LOW = 2, /**< Fast but minimal compression (Zlib) */
280 EET_COMPRESSION_MED = 6, /**< Medium compression level (Zlib) */
281 EET_COMPRESSION_HI = 9, /**< Slow but high compression level (Zlib) */
282 EET_COMPRESSION_VERYFAST = 10, /**< Very fast, but lower compression ratio (LZ4HC) */
283 EET_COMPRESSION_SUPERFAST = 11, /**< Very fast, but lower compression ratio (faster to compress than EET_COMPRESSION_VERYFAST) (LZ4) */
284
285 EET_COMPRESSION_LOW2 = 3, /**< Space filler for compatibility. Don't use it */
286 EET_COMPRESSION_MED1 = 4, /**< Space filler for compatibility. Don't use it */
287 EET_COMPRESSION_MED2 = 5, /**< Space filler for compatibility. Don't use it */
288 EET_COMPRESSION_HI1 = 7, /**< Space filler for compatibility. Don't use it */
289 EET_COMPRESSION_HI2 = 8 /**< Space filler for compatibility. Don't use it */
290} Eet_Compression; /**< Eet compression modes */
291
292/**
293 * @}
294 */
262 295
263/** 296/**
264 * Initialize the EET library. 297 * Initialize the EET library.
diff --git a/legacy/eet/src/lib/Eet_private.h b/legacy/eet/src/lib/Eet_private.h
index afd4685dbe..20c12d75be 100644
--- a/legacy/eet/src/lib/Eet_private.h
+++ b/legacy/eet/src/lib/Eet_private.h
@@ -125,6 +125,8 @@ struct _Eet_File_Node
125 unsigned int size; 125 unsigned int size;
126 unsigned int data_size; 126 unsigned int data_size;
127 127
128 unsigned char compression_type;
129
128 unsigned char free_name : 1; 130 unsigned char free_name : 1;
129 unsigned char compression : 1; 131 unsigned char compression : 1;
130 unsigned char ciphered : 1; 132 unsigned char ciphered : 1;
diff --git a/legacy/eet/src/lib/Makefile.am b/legacy/eet/src/lib/Makefile.am
index ae60168596..952cc9de37 100644
--- a/legacy/eet/src/lib/Makefile.am
+++ b/legacy/eet/src/lib/Makefile.am
@@ -4,6 +4,7 @@ MAINTAINERCLEANFILES = Makefile.in
4AM_CPPFLAGS = \ 4AM_CPPFLAGS = \
5-I. \ 5-I. \
6-I$(top_srcdir)/src/lib \ 6-I$(top_srcdir)/src/lib \
7-I$(top_srcdir)/src/lib/lz4 \
7-I$(top_builddir)/src/lib \ 8-I$(top_builddir)/src/lib \
8-DPACKAGE_BIN_DIR=\"$(bindir)\" \ 9-DPACKAGE_BIN_DIR=\"$(bindir)\" \
9-DPACKAGE_LIB_DIR=\"$(libdir)\" \ 10-DPACKAGE_LIB_DIR=\"$(libdir)\" \
@@ -21,7 +22,7 @@ includesdir = $(includedir)/eet-@VMAJ@
21 22
22lib_LTLIBRARIES = libeet.la 23lib_LTLIBRARIES = libeet.la
23 24
24base_sources = \ 25libeet_la_SOURCES = \
25eet_alloc.c \ 26eet_alloc.c \
26eet_lib.c \ 27eet_lib.c \
27eet_data.c \ 28eet_data.c \
@@ -30,128 +31,11 @@ eet_cipher.c \
30eet_dictionary.c \ 31eet_dictionary.c \
31eet_node.c \ 32eet_node.c \
32eet_utils.c \ 33eet_utils.c \
33eet_connection.c 34eet_connection.c \
34 35lz4/lz4.c \
35if EET_AMALGAMATION 36lz4/lz4.h \
36nodist_libeet_la_SOURCES = eet_amalgamation.c 37lz4/lz4hc.c \
37 38lz4/lz4hc.h
38eet_amalgamation.c: $(base_sources) Makefile
39 -rm -f eet_amalgamation.c
40
41 @echo "#ifdef HAVE_CONFIG_H" >> eet_amalgamation.c
42 @echo "# include \"config.h\"" >> eet_amalgamation.c
43 @echo "#endif" >> eet_amalgamation.c
44
45 @echo "#ifdef HAVE_ALLOCA_H" >> eet_amalgamation.c
46 @echo "# include <alloca.h>" >> eet_amalgamation.c
47 @echo "#elif defined __GNUC__" >> eet_amalgamation.c
48 @echo "# define alloca __builtin_alloca" >> eet_amalgamation.c
49 @echo "#elif defined _AIX" >> eet_amalgamation.c
50 @echo "# define alloca __alloca" >> eet_amalgamation.c
51 @echo "#elif defined _MSC_VER" >> eet_amalgamation.c
52 @echo "# include <malloc.h>" >> eet_amalgamation.c
53 @echo "# define alloca _alloca" >> eet_amalgamation.c
54 @echo "#else" >> eet_amalgamation.c
55 @echo "# include <stddef.h>" >> eet_amalgamation.c
56 @echo "# ifdef __cplusplus" >> eet_amalgamation.c
57 @echo "#extern \"C\"" >> eet_amalgamation.c
58 @echo "# endif" >> eet_amalgamation.c
59 @echo "#void *alloca (size_t);" >> eet_amalgamation.c
60 @echo "#endif" >> eet_amalgamation.c
61
62 @echo "#include <stdio.h>" >> eet_amalgamation.c
63 @echo "#include <string.h>" >> eet_amalgamation.c
64 @echo "#include <math.h>" >> eet_amalgamation.c
65 @echo "#include <ctype.h>" >> eet_amalgamation.c
66 @echo "#include <limits.h>" >> eet_amalgamation.c
67 @echo "#include <sys/types.h>" >> eet_amalgamation.c
68 @echo "#include <sys/stat.h>" >> eet_amalgamation.c
69 @echo "#ifdef HAVE_SIGNATURE" >> eet_amalgamation.c
70 @echo "# include <sys/mman.h>" >> eet_amalgamation.c
71 @echo "#endif" >> eet_amalgamation.c
72 @echo "#include <setjmp.h>" >> eet_amalgamation.c
73 @echo "#include <errno.h>" >> eet_amalgamation.c
74 @echo "#include <time.h>" >> eet_amalgamation.c
75 @echo "#include <fnmatch.h>" >> eet_amalgamation.c
76 @echo "#include <fcntl.h>" >> eet_amalgamation.c
77
78 @echo "#ifdef _WIN32" >> eet_amalgamation.c
79 @echo "# include <winsock2.h>" >> eet_amalgamation.c
80 @echo "#endif" >> eet_amalgamation.c
81
82 @echo "#ifndef _MSC_VER" >> eet_amalgamation.c
83 @echo "# include <unistd.h>" >> eet_amalgamation.c
84 @echo "#endif" >> eet_amalgamation.c
85
86 @echo "#ifdef HAVE_NETINET_IN_H" >> eet_amalgamation.c
87 @echo "# include <netinet/in.h>" >> eet_amalgamation.c
88 @echo "#endif" >> eet_amalgamation.c
89
90 @echo "#ifdef HAVE_GNUTLS" >> eet_amalgamation.c
91 @echo "# include <gnutls/gnutls.h>" >> eet_amalgamation.c
92 @echo "# include <gcrypt.h>" >> eet_amalgamation.c
93 @echo "#endif" >> eet_amalgamation.c
94
95 @echo "#ifdef HAVE_OPENSSL" >> eet_amalgamation.c
96 @echo "# include <openssl/err.h>" >> eet_amalgamation.c
97 @echo "# include <openssl/evp.h>" >> eet_amalgamation.c
98 @echo "# include <openssl/sha.h>" >> eet_amalgamation.c
99 @echo "#endif" >> eet_amalgamation.c
100
101 @echo "#ifdef HAVE_SIGNATURE" >> eet_amalgamation.c
102 @echo "# ifdef HAVE_GNUTLS" >> eet_amalgamation.c
103 @echo "# include <gnutls/gnutls.h>" >> eet_amalgamation.c
104 @echo "# include <gnutls/x509.h>" >> eet_amalgamation.c
105 @echo "# else" >> eet_amalgamation.c
106 @echo "# include <openssl/rsa.h>" >> eet_amalgamation.c
107 @echo "# include <openssl/objects.h>" >> eet_amalgamation.c
108 @echo "# include <openssl/err.h>" >> eet_amalgamation.c
109 @echo "# include <openssl/ssl.h>" >> eet_amalgamation.c
110 @echo "# include <openssl/dh.h>" >> eet_amalgamation.c
111 @echo "# include <openssl/dsa.h>" >> eet_amalgamation.c
112 @echo "# include <openssl/evp.h>" >> eet_amalgamation.c
113 @echo "# include <openssl/x509.h>" >> eet_amalgamation.c
114 @echo "# include <openssl/pem.h>" >> eet_amalgamation.c
115 @echo "# endif" >> eet_amalgamation.c
116 @echo "#endif" >> eet_amalgamation.c
117
118 @echo "#ifdef HAVE_CIPHER" >> eet_amalgamation.c
119 @echo "# ifdef HAVE_GNUTLS" >> eet_amalgamation.c
120 @echo "# include <gnutls/x509.h>" >> eet_amalgamation.c
121 @echo "# include <gcrypt.h>" >> eet_amalgamation.c
122 @echo "# else" >> eet_amalgamation.c
123 @echo "# include <openssl/evp.h>" >> eet_amalgamation.c
124 @echo "# include <openssl/hmac.h>" >> eet_amalgamation.c
125 @echo "# include <openssl/rand.h>" >> eet_amalgamation.c
126 @echo "# endif" >> eet_amalgamation.c
127 @echo "#endif" >> eet_amalgamation.c
128
129 @echo "#include <zlib.h>" >> eet_amalgamation.c
130 @echo "#include <jpeglib.h>" >> eet_amalgamation.c
131
132 @echo "#ifdef HAVE_EVIL" >> eet_amalgamation.c
133 @echo "# include <Evil.h>" >> eet_amalgamation.c
134 @echo "#endif" >> eet_amalgamation.c
135
136 @echo "#include <Eet.h>" >> eet_amalgamation.c
137
138 @echo "#include \"Eet_private.h\"" >> eet_amalgamation.c
139 @echo "#include \"Eet.h\"" >> eet_amalgamation.c
140
141 @for f in $(base_sources); do \
142 if [ `echo $$f | sed -e 's/^...\(.\).*/\1/'` != '/' ]; then \
143 file="$(srcdir)/$$f" ; \
144 else \
145 file="$$f" ; \
146 fi ; \
147 echo "/* file: $$file */" >> eet_amalgamation.c; \
148 grep -v -e '^# *include \+.\(config\|\|Evil\|Eina\|stdio\|string\|math\|ctype\|limits\|sys/types\|sys/stat\|sys/mman\|setjmp\|errno\|time\|fnmatch\|fcntl\|winsock2\|unistd\|netinet/in\|gnutls/gnutls\|gcrypt\|gnutls/x509\|openssl/rsa\|openssl/objects\|openssl/err\|openssl/ssl\|openssl/dh\|openssl/dsa\|openssl/evp\|openssl/pem\|openssl/sha\|openssl/hmac\|openssl/x509\|openssl/rand\|zlib\|jpeglib\|Eet_private\|Eet\)[.]h.*' $$file >> eet_amalgamation.c; \
149 done
150 @echo "eet_amalgamation.c generated"
151
152else
153libeet_la_SOURCES = $(base_sources)
154endif
155 39
156libeet_la_CFLAGS = @EET_CFLAGS@ @DEBUG_CFLAGS@ 40libeet_la_CFLAGS = @EET_CFLAGS@ @DEBUG_CFLAGS@
157libeet_la_LIBADD = @GNUTLS_LIBS@ @OPENSSL_LIBS@ @EFL_COVERAGE_LIBS@ @EET_LIBS@ @EINA_LIBS@ @EVIL_LIBS@ -lz -ljpeg -lm 41libeet_la_LIBADD = @GNUTLS_LIBS@ @OPENSSL_LIBS@ @EFL_COVERAGE_LIBS@ @EET_LIBS@ @EINA_LIBS@ @EVIL_LIBS@ -lz -ljpeg -lm
diff --git a/legacy/eet/src/lib/eet_image.c b/legacy/eet/src/lib/eet_image.c
index b622236e7a..00acda8d2b 100644
--- a/legacy/eet/src/lib/eet_image.c
+++ b/legacy/eet/src/lib/eet_image.c
@@ -40,6 +40,9 @@ void *alloca(size_t);
40#include "Eet.h" 40#include "Eet.h"
41#include "Eet_private.h" 41#include "Eet_private.h"
42 42
43#include "lz4.h"
44#include "lz4hc.h"
45
43/*---*/ 46/*---*/
44 47
45typedef struct _JPEG_error_mgr *emptr; 48typedef struct _JPEG_error_mgr *emptr;
@@ -742,55 +745,68 @@ eet_data_image_lossless_compressed_convert(int *size,
742 } 745 }
743 746
744 { 747 {
745 unsigned char *d; 748 unsigned char *d, *comp;
746 unsigned char *comp; 749 int *header, ret, ok = 1;
747 int *header; 750 uLongf buflen = 0;
748 int ret;
749 uLongf buflen;
750
751 d = malloc((w * h * 4) + (8 * 4));
752 if (!d)
753 return NULL;
754 751
755 buflen = (((w * h * 101) / 100) + 3) * 4; 752 buflen = (((w * h * 101) / 100) + 3) * 4;
753 ret = LZ4_compressBound((w * h * 4));
754 if ((ret > 0) && ((uLongf)ret > buflen)) buflen = ret;
755
756 comp = malloc(buflen); 756 comp = malloc(buflen);
757 if (!comp) 757 if (!comp) return NULL;
758
759 switch (compression)
760 {
761 case EET_COMPRESSION_VERYFAST:
762 ret = LZ4_compressHC((const char *)data, (char *)comp,
763 (w * h * 4));
764 if (ret <= 0) ok = 0;
765 buflen = ret;
766 break;
767 case EET_COMPRESSION_SUPERFAST:
768 ret = LZ4_compress((const char *)data, (char *)comp,
769 (w * h * 4));
770 if (ret <= 0) ok = 0;
771 buflen = ret;
772 break;
773 default: /* zlib etc. */
774 ret = compress2((Bytef *)comp, &buflen, (Bytef *)(data),
775 (uLong)(w * h * 4), compression);
776 if (ret != Z_OK) ok = 0;
777 break;
778 }
779 if ((!ok) || (buflen > (w * h * 4)))
758 { 780 {
759 free(d); 781 free(comp);
782 *size = -1;
760 return NULL; 783 return NULL;
761 } 784 }
762 785
763 header = (int *)d; 786 d = malloc((8 * sizeof(int)) + buflen);
764 memset(d, 0, 32); 787 if (!d)
788 {
789 free(comp);
790 return NULL;
791 }
765 792
793 header = (int *)d;
794 memset(d, 0, 8 * sizeof(int));
766 header[0] = 0xac1dfeed; 795 header[0] = 0xac1dfeed;
767 header[1] = w; 796 header[1] = w;
768 header[2] = h; 797 header[2] = h;
769 header[3] = alpha; 798 header[3] = alpha;
770 header[4] = compression; 799 header[4] = compression;
771 memcpy(d + 32, data, w * h * 4);
772 800
773 if (_eet_image_words_bigendian) 801 if (_eet_image_words_bigendian)
774 { 802 {
775 unsigned int i; 803 unsigned int i;
776 804
777 for (i = 0; i < ((w * h) + 8); i++) SWAP32(header[i]); 805 for (i = 0; i < ((w * h) + 8); i++) SWAP32(header[i]);
778 } 806 }
779 807
780 ret = compress2((Bytef *)comp, &buflen, 808 memcpy(d + (8 * sizeof(int)), comp, buflen);
781 (Bytef *)(d + 32), 809 *size = (8 * sizeof(int)) + buflen;
782 (uLong)(w * h * 4),
783 compression);
784 if (ret != Z_OK || buflen > (w * h * 4))
785 {
786 free(comp);
787 free(d);
788 *size = -1;
789 return NULL;
790 }
791
792 memcpy(d + 32, comp, buflen);
793 *size = (8 * 4) + buflen;
794 free(comp); 810 free(comp);
795 return d; 811 return d;
796 } 812 }
@@ -1577,35 +1593,75 @@ _eet_data_image_decode_inside(const void *data,
1577 w, h, row_stride); 1593 w, h, row_stride);
1578 else 1594 else
1579 { 1595 {
1580 if (src_h == h && src_w == w && row_stride == src_w * 4) 1596 if ((src_h == h) && (src_w == w) && (row_stride == src_w * 4))
1581 { 1597 {
1582 uLongf dlen; 1598 switch (comp)
1583 1599 {
1584 dlen = w * h * 4; 1600 case EET_COMPRESSION_VERYFAST:
1585 uncompress((Bytef *)d, &dlen, (Bytef *)body, 1601 case EET_COMPRESSION_SUPERFAST:
1586 (uLongf)(size - 32)); 1602 if (LZ4_uncompress((const char *)body,
1603 (char *)d, w * h * 4)
1604 != (size - 32)) return 0;
1605 break;
1606 default:
1607 {
1608 uLongf dlen = w * h * 4;
1609
1610 if (uncompress((Bytef *)d, &dlen, (Bytef *)body,
1611 (uLongf)(size - 32)) != Z_OK)
1612 return 0;
1613 }
1614 break;
1615 }
1587 } 1616 }
1588 else 1617 else
1589 { 1618 {
1590 Bytef *dtmp; 1619 switch (comp)
1591 uLongf dlen = src_w * src_h * 4; 1620 {
1592 1621 case EET_COMPRESSION_VERYFAST:
1593 /* FIXME: This could create a huge alloc. So compressed 1622 case EET_COMPRESSION_SUPERFAST:
1594 data and tile could not always work. */ 1623 {
1595 dtmp = malloc(dlen); 1624 char *dtmp;
1596 if (!dtmp) 1625
1597 return 0; 1626 dtmp = malloc(src_w * src_h * 4);
1598 1627 if (!dtmp) return 0;
1599 uncompress(dtmp, &dlen, (Bytef *)body, (uLongf)(size - 32)); 1628 if (LZ4_uncompress((const char *)body,
1600 1629 dtmp, w * h * 4)
1601 _eet_data_image_copy_buffer((unsigned int *)dtmp, 1630 != (size - 32))
1602 src_x, src_y, src_w, d, 1631 {
1603 w, h, row_stride); 1632 free(dtmp);
1604 1633 return 0;
1605 free(dtmp); 1634 }
1635 _eet_data_image_copy_buffer((unsigned int *)dtmp,
1636 src_x, src_y, src_w, d,
1637 w, h, row_stride);
1638 free(dtmp);
1639 }
1640 break;
1641 default:
1642 {
1643 Bytef *dtmp;
1644 uLongf dlen = src_w * src_h * 4;
1645
1646 /* FIXME: This could create a huge alloc. So
1647 compressed data and tile could not always work.*/
1648 dtmp = malloc(dlen);
1649 if (!dtmp) return 0;
1650
1651 if (uncompress(dtmp, &dlen, (Bytef *)body,
1652 (uLongf)(size - 32)) != Z_OK)
1653 {
1654 free(dtmp);
1655 return 0;
1656 }
1657 _eet_data_image_copy_buffer((unsigned int *)dtmp,
1658 src_x, src_y, src_w, d,
1659 w, h, row_stride);
1660 free(dtmp);
1661 }
1662 }
1606 } 1663 }
1607 } 1664 }
1608
1609 /* Fix swapiness. */ 1665 /* Fix swapiness. */
1610 if (_eet_image_words_bigendian) 1666 if (_eet_image_words_bigendian)
1611 { 1667 {
diff --git a/legacy/eet/src/lib/eet_lib.c b/legacy/eet/src/lib/eet_lib.c
index a3295f47af..a350d11f3c 100644
--- a/legacy/eet/src/lib/eet_lib.c
+++ b/legacy/eet/src/lib/eet_lib.c
@@ -66,6 +66,9 @@ GCRY_THREAD_OPTION_PTHREAD_IMPL;
66#include "Eet.h" 66#include "Eet.h"
67#include "Eet_private.h" 67#include "Eet_private.h"
68 68
69#include "lz4.h"
70#include "lz4hc.h"
71
69#ifndef O_BINARY 72#ifndef O_BINARY
70# define O_BINARY 0 73# define O_BINARY 0
71#endif 74#endif
@@ -442,6 +445,7 @@ eet_flush2(Eet_File *ef)
442 int ibuf[EET_FILE2_DIRECTORY_ENTRY_COUNT]; 445 int ibuf[EET_FILE2_DIRECTORY_ENTRY_COUNT];
443 446
444 flag = (efn->alias << 2) | (efn->ciphered << 1) | efn->compression; 447 flag = (efn->alias << 2) | (efn->ciphered << 1) | efn->compression;
448 flag |= efn->compression_type << 3;
445 449
446 ibuf[0] = (int)htonl((unsigned int)efn->offset); 450 ibuf[0] = (int)htonl((unsigned int)efn->offset);
447 ibuf[1] = (int)htonl((unsigned int)efn->size); 451 ibuf[1] = (int)htonl((unsigned int)efn->size);
@@ -864,6 +868,7 @@ eet_internal_read2(Eet_File *ef)
864 efn->compression = flag & 0x1 ? 1 : 0; 868 efn->compression = flag & 0x1 ? 1 : 0;
865 efn->ciphered = flag & 0x2 ? 1 : 0; 869 efn->ciphered = flag & 0x2 ? 1 : 0;
866 efn->alias = flag & 0x4 ? 1 : 0; 870 efn->alias = flag & 0x4 ? 1 : 0;
871 efn->compression_type = (flag >> 3) & 0xff;
867 872
868#define EFN_TEST(Test, Ef, Efn) \ 873#define EFN_TEST(Test, Ef, Efn) \
869 if (eet_test_close(Test, Ef)) \ 874 if (eet_test_close(Test, Ef)) \
@@ -1719,7 +1724,7 @@ eet_read_cipher(Eet_File *ef,
1719 void *tmp_data = NULL; 1724 void *tmp_data = NULL;
1720 void *data_deciphered = NULL; 1725 void *data_deciphered = NULL;
1721 unsigned int data_deciphered_sz = 0; 1726 unsigned int data_deciphered_sz = 0;
1722 int free_tmp = 0; 1727 int free_tmp = 0, ret;
1723 int compr_size = efn->size; 1728 int compr_size = efn->size;
1724 uLongf dlen; 1729 uLongf dlen;
1725 1730
@@ -1765,12 +1770,27 @@ eet_read_cipher(Eet_File *ef,
1765 1770
1766 /* decompress it */ 1771 /* decompress it */
1767 dlen = size; 1772 dlen = size;
1768 if (uncompress((Bytef *)data, &dlen, 1773 switch (efn->compression_type)
1769 tmp_data, (uLongf)compr_size))
1770 { 1774 {
1771 if (free_tmp) 1775 case EET_COMPRESSION_VERYFAST:
1772 free(tmp_data); 1776 case EET_COMPRESSION_SUPERFAST:
1773 goto on_error; 1777 ret = LZ4_uncompress(tmp_data, data, dlen);
1778 if (ret != compr_size)
1779 {
1780 if (free_tmp)
1781 free(tmp_data);
1782 goto on_error;
1783 }
1784 break;
1785 default:
1786 if (uncompress((Bytef *)data, &dlen,
1787 tmp_data, (uLongf)compr_size) != Z_OK)
1788 {
1789 if (free_tmp)
1790 free(tmp_data);
1791 goto on_error;
1792 }
1793 break;
1774 } 1794 }
1775 1795
1776 if (free_tmp) 1796 if (free_tmp)
@@ -1821,7 +1841,7 @@ eet_read_direct(Eet_File *ef,
1821{ 1841{
1822 Eet_File_Node *efn; 1842 Eet_File_Node *efn;
1823 const char *data = NULL; 1843 const char *data = NULL;
1824 int size = 0; 1844 int size = 0, ret;
1825 1845
1826 if (size_ret) 1846 if (size_ret)
1827 *size_ret = 0; 1847 *size_ret = 0;
@@ -1862,23 +1882,46 @@ eet_read_direct(Eet_File *ef,
1862 /* handle alias case */ 1882 /* handle alias case */
1863 if (efn->compression) 1883 if (efn->compression)
1864 { 1884 {
1885 const void *retptr;
1865 char *tmp; 1886 char *tmp;
1866 int compr_size = efn->size; 1887 int compr_size = efn->size;
1867 uLongf dlen; 1888 uLongf dlen;
1868 1889
1869 tmp = alloca(sizeof (compr_size)); 1890 tmp = malloc(compr_size);
1870 dlen = size; 1891 if (!tmp) goto on_error;
1871 1892 switch (efn->compression_type)
1872 if (uncompress((Bytef *)tmp, &dlen, (Bytef *)data, 1893 {
1873 (uLongf)compr_size)) 1894 case EET_COMPRESSION_VERYFAST:
1874 goto on_error; 1895 case EET_COMPRESSION_SUPERFAST:
1875 1896 ret = LZ4_uncompress(data, tmp, size);
1897 if (ret != compr_size)
1898 {
1899 free(tmp);
1900 goto on_error;
1901 }
1902 break;
1903 default:
1904 dlen = size;
1905
1906 if (uncompress((Bytef *)tmp, &dlen, (Bytef *)data,
1907 (uLongf)compr_size))
1908 {
1909 free(tmp);
1910 goto on_error;
1911 }
1912 }
1913
1876 if (tmp[compr_size - 1] != '\0') 1914 if (tmp[compr_size - 1] != '\0')
1877 goto on_error; 1915 {
1878 1916 free(tmp);
1917 goto on_error;
1918 }
1919
1879 UNLOCK_FILE(ef); 1920 UNLOCK_FILE(ef);
1880 1921
1881 return eet_read_direct(ef, tmp, size_ret); 1922 retptr = eet_read_direct(ef, tmp, size_ret);
1923 free(tmp);
1924 return retptr;
1882 } 1925 }
1883 1926
1884 if (!data) 1927 if (!data)
@@ -1893,8 +1936,7 @@ eet_read_direct(Eet_File *ef,
1893 } 1936 }
1894 else 1937 else
1895 /* uncompressed data */ 1938 /* uncompressed data */
1896 if (efn->compression == 0 1939 if ((efn->compression == 0) && (efn->ciphered == 0))
1897 && efn->ciphered == 0)
1898 data = efn->data ? efn->data : ef->data + efn->offset; /* compressed data */ 1940 data = efn->data ? efn->data : ef->data + efn->offset; /* compressed data */
1899 else 1941 else
1900 data = NULL; 1942 data = NULL;
@@ -1918,7 +1960,7 @@ eet_alias_get(Eet_File *ef,
1918{ 1960{
1919 Eet_File_Node *efn; 1961 Eet_File_Node *efn;
1920 const char *data = NULL; 1962 const char *data = NULL;
1921 int size = 0; 1963 int size = 0, ret;
1922 1964
1923 /* check to see its' an eet file pointer */ 1965 /* check to see its' an eet file pointer */
1924 if (eet_check_pointer(ef)) 1966 if (eet_check_pointer(ef))
@@ -1955,23 +1997,43 @@ eet_alias_get(Eet_File *ef,
1955 /* handle alias case */ 1997 /* handle alias case */
1956 if (efn->compression) 1998 if (efn->compression)
1957 { 1999 {
2000 const char *retptr;
1958 char *tmp; 2001 char *tmp;
1959 int compr_size = efn->size; 2002 int compr_size = efn->size;
1960 uLongf dlen; 2003 uLongf dlen;
1961 2004
1962 tmp = alloca(sizeof (compr_size)); 2005 tmp = malloc(compr_size);
1963 dlen = size; 2006 if (!tmp) goto on_error;
1964 2007 switch (efn->compression_type)
1965 if (uncompress((Bytef *)tmp, &dlen, (Bytef *)data, 2008 {
1966 (uLongf)compr_size)) 2009 case EET_COMPRESSION_VERYFAST:
1967 goto on_error; 2010 case EET_COMPRESSION_SUPERFAST:
1968 2011 ret = LZ4_uncompress(data, tmp, size);
2012 if (ret != compr_size)
2013 {
2014 free(tmp);
2015 goto on_error;
2016 }
2017 break;
2018 default:
2019 dlen = size;
2020
2021 if (uncompress((Bytef *)tmp, &dlen, (Bytef *)data,
2022 (uLongf)compr_size))
2023 {
2024 free(tmp);
2025 goto on_error;
2026 }
2027 }
2028
1969 if (tmp[compr_size - 1] != '\0') 2029 if (tmp[compr_size - 1] != '\0')
1970 goto on_error; 2030 goto on_error;
1971 2031
1972 UNLOCK_FILE(ef); 2032 UNLOCK_FILE(ef);
1973 2033
1974 return eina_stringshare_add(tmp); 2034 retptr = eina_stringshare_add(tmp);
2035 free(tmp);
2036 return retptr;
1975 } 2037 }
1976 2038
1977 if (!data) 2039 if (!data)
@@ -1998,8 +2060,7 @@ eet_alias(Eet_File *ef,
1998 Eet_File_Node *efn; 2060 Eet_File_Node *efn;
1999 void *data2; 2061 void *data2;
2000 Eina_Bool exists_already = EINA_FALSE; 2062 Eina_Bool exists_already = EINA_FALSE;
2001 int data_size; 2063 int data_size, ret, hash, slen;
2002 int hash;
2003 2064
2004 /* check to see its' an eet file pointer */ 2065 /* check to see its' an eet file pointer */
2005 if (eet_check_pointer(ef)) 2066 if (eet_check_pointer(ef))
@@ -2048,32 +2109,64 @@ eet_alias(Eet_File *ef,
2048 /* figure hash bucket */ 2109 /* figure hash bucket */
2049 hash = _eet_hash_gen(name, ef->header->directory->size); 2110 hash = _eet_hash_gen(name, ef->header->directory->size);
2050 2111
2112 slen = strlen(destination) + 1;
2051 data_size = comp ? 2113 data_size = comp ?
2052 12 + (((strlen(destination) + 1) * 101) / 100) 2114 12 + ((slen * 101) / 100)
2053 : strlen(destination) + 1; 2115 : slen;
2054 2116 if (comp)
2117 {
2118 ret = LZ4_compressBound(slen);
2119 if ((ret > 0) && (ret > data_size)) data_size = ret;
2120 }
2121
2055 data2 = malloc(data_size); 2122 data2 = malloc(data_size);
2056 if (!data2) 2123 if (!data2) goto on_error;
2057 goto on_error;
2058 2124
2059 /* if we want to compress */ 2125 /* if we want to compress */
2060 if (comp) 2126 if (comp)
2061 { 2127 {
2062 uLongf buflen; 2128 switch (comp)
2063
2064 /* compress the data with max compression */
2065 buflen = (uLongf)data_size;
2066 if (compress2((Bytef *)data2, &buflen, (Bytef *)destination,
2067 (uLong)strlen(destination) + 1,
2068 Z_BEST_COMPRESSION) != Z_OK)
2069 { 2129 {
2070 free(data2); 2130 case EET_COMPRESSION_VERYFAST:
2071 goto on_error; 2131 ret = LZ4_compressHC((const char *)destination, (char *)data2,
2132 slen);
2133 if (ret <= 0)
2134 {
2135 free(data2);
2136 goto on_error;
2137 }
2138 data_size = ret;
2139 break;
2140 case EET_COMPRESSION_SUPERFAST:
2141 ret = LZ4_compress((const char *)destination, (char *)data2,
2142 slen);
2143 if (ret <= 0)
2144 {
2145 free(data2);
2146 goto on_error;
2147 }
2148 data_size = ret;
2149 break;
2150 default:
2151 {
2152 uLongf buflen;
2153
2154 /* compress the data with max compression */
2155 buflen = (uLongf)data_size;
2156 if (compress2((Bytef *)data2, &buflen,
2157 (const Bytef *)destination,
2158 (uLong)slen, Z_BEST_COMPRESSION) != Z_OK)
2159 {
2160 free(data2);
2161 goto on_error;
2162 }
2163 /* record compressed chunk size */
2164 data_size = (int)buflen;
2165 }
2166 break;
2072 } 2167 }
2073 2168 if ((data_size < 0) ||
2074 /* record compressed chunk size */ 2169 (data_size >= (int)(strlen(destination) + 1)))
2075 data_size = (int)buflen;
2076 if (data_size < 0 || data_size >= (int)(strlen(destination) + 1))
2077 { 2170 {
2078 comp = 0; 2171 comp = 0;
2079 data_size = strlen(destination) + 1; 2172 data_size = strlen(destination) + 1;
@@ -2083,13 +2176,11 @@ eet_alias(Eet_File *ef,
2083 void *data3; 2176 void *data3;
2084 2177
2085 data3 = realloc(data2, data_size); 2178 data3 = realloc(data2, data_size);
2086 if (data3) 2179 if (data3) data2 = data3;
2087 data2 = data3;
2088 } 2180 }
2089 } 2181 }
2090 2182
2091 if (!comp) 2183 if (!comp) memcpy(data2, destination, data_size);
2092 memcpy(data2, destination, data_size);
2093 2184
2094 /* Does this node already exist? */ 2185 /* Does this node already exist? */
2095 for (efn = ef->header->directory->nodes[hash]; efn; efn = efn->next) 2186 for (efn = ef->header->directory->nodes[hash]; efn; efn = efn->next)
@@ -2101,13 +2192,13 @@ eet_alias(Eet_File *ef,
2101 efn->alias = 1; 2192 efn->alias = 1;
2102 efn->ciphered = 0; 2193 efn->ciphered = 0;
2103 efn->compression = !!comp; 2194 efn->compression = !!comp;
2195 efn->compression_type = comp;
2104 efn->size = data_size; 2196 efn->size = data_size;
2105 efn->data_size = strlen(destination) + 1; 2197 efn->data_size = strlen(destination) + 1;
2106 efn->data = data2; 2198 efn->data = data2;
2107 /* Put the offset above the limit to avoid direct access */ 2199 /* Put the offset above the limit to avoid direct access */
2108 efn->offset = ef->data_size + 1; 2200 efn->offset = ef->data_size + 1;
2109 exists_already = EINA_TRUE; 2201 exists_already = EINA_TRUE;
2110
2111 break; 2202 break;
2112 } 2203 }
2113 } 2204 }
@@ -2131,6 +2222,7 @@ eet_alias(Eet_File *ef,
2131 efn->alias = 1; 2222 efn->alias = 1;
2132 efn->ciphered = 0; 2223 efn->ciphered = 0;
2133 efn->compression = !!comp; 2224 efn->compression = !!comp;
2225 efn->compression_type = comp;
2134 efn->size = data_size; 2226 efn->size = data_size;
2135 efn->data_size = strlen(destination) + 1; 2227 efn->data_size = strlen(destination) + 1;
2136 efn->data = data2; 2228 efn->data = data2;
@@ -2157,9 +2249,7 @@ eet_write_cipher(Eet_File *ef,
2157{ 2249{
2158 Eet_File_Node *efn; 2250 Eet_File_Node *efn;
2159 void *data2 = NULL; 2251 void *data2 = NULL;
2160 int exists_already = 0; 2252 int exists_already = 0, data_size, hash, ret;
2161 int data_size;
2162 int hash;
2163 2253
2164 /* check to see its' an eet file pointer */ 2254 /* check to see its' an eet file pointer */
2165 if (eet_check_pointer(ef)) 2255 if (eet_check_pointer(ef))
@@ -2208,8 +2298,15 @@ eet_write_cipher(Eet_File *ef,
2208 /* figure hash bucket */ 2298 /* figure hash bucket */
2209 hash = _eet_hash_gen(name, ef->header->directory->size); 2299 hash = _eet_hash_gen(name, ef->header->directory->size);
2210 2300
2301 UNLOCK_FILE(ef);
2302
2211 data_size = comp ? 12 + ((size * 101) / 100) : size; 2303 data_size = comp ? 12 + ((size * 101) / 100) : size;
2212 2304 if (comp)
2305 {
2306 ret = LZ4_compressBound(size);
2307 if ((ret > 0) && (ret > data_size)) data_size = ret;
2308 }
2309
2213 if (comp || !cipher_key) 2310 if (comp || !cipher_key)
2214 { 2311 {
2215 data2 = malloc(data_size); 2312 data2 = malloc(data_size);
@@ -2220,20 +2317,46 @@ eet_write_cipher(Eet_File *ef,
2220 /* if we want to compress */ 2317 /* if we want to compress */
2221 if (comp) 2318 if (comp)
2222 { 2319 {
2223 uLongf buflen; 2320 switch (comp)
2224
2225 /* compress the data with max compression */
2226 buflen = (uLongf)data_size;
2227 if (compress2((Bytef *)data2, &buflen, (Bytef *)data,
2228 (uLong)size, Z_BEST_COMPRESSION) != Z_OK)
2229 { 2321 {
2230 free(data2); 2322 case EET_COMPRESSION_VERYFAST:
2231 goto on_error; 2323 ret = LZ4_compressHC((const char *)data, (char *)data2, size);
2232 } 2324 if (ret <= 0)
2325 {
2326 free(data2);
2327 LOCK_FILE(ef);
2328 goto on_error;
2329 }
2330 data_size = ret;
2331 break;
2332 case EET_COMPRESSION_SUPERFAST:
2333 ret = LZ4_compress((const char *)data, (char *)data2, size);
2334 if (ret <= 0)
2335 {
2336 free(data2);
2337 LOCK_FILE(ef);
2338 goto on_error;
2339 }
2340 data_size = ret;
2341 break;
2342 default:
2343 {
2344 uLongf buflen;
2233 2345
2234 /* record compressed chunk size */ 2346 /* compress the data with max compression */
2235 data_size = (int)buflen; 2347 buflen = (uLongf)data_size;
2236 if (data_size < 0 || data_size >= size) 2348 if (compress2((Bytef *)data2, &buflen, (Bytef *)data,
2349 (uLong)size, Z_BEST_COMPRESSION) != Z_OK)
2350 {
2351 free(data2);
2352 LOCK_FILE(ef);
2353 goto on_error;
2354 }
2355 /* record compressed chunk size */
2356 data_size = (int)buflen;
2357 }
2358 }
2359 if ((data_size < 0) || (data_size >= size))
2237 { 2360 {
2238 comp = 0; 2361 comp = 0;
2239 data_size = size; 2362 data_size = size;
@@ -2276,6 +2399,7 @@ eet_write_cipher(Eet_File *ef,
2276 if (!comp) 2399 if (!comp)
2277 memcpy(data2, data, size); 2400 memcpy(data2, data, size);
2278 2401
2402 LOCK_FILE(ef);
2279 /* Does this node already exist? */ 2403 /* Does this node already exist? */
2280 for (efn = ef->header->directory->nodes[hash]; efn; efn = efn->next) 2404 for (efn = ef->header->directory->nodes[hash]; efn; efn = efn->next)
2281 { 2405 {
@@ -2286,10 +2410,11 @@ eet_write_cipher(Eet_File *ef,
2286 efn->alias = 0; 2410 efn->alias = 0;
2287 efn->ciphered = cipher_key ? 1 : 0; 2411 efn->ciphered = cipher_key ? 1 : 0;
2288 efn->compression = !!comp; 2412 efn->compression = !!comp;
2413 efn->compression_type = comp;
2289 efn->size = data_size; 2414 efn->size = data_size;
2290 efn->data_size = size; 2415 efn->data_size = size;
2291 efn->data = data2; 2416 efn->data = data2;
2292 /* Put the offset above the limit to avoid direct access */ 2417 /* Put the offset above the limit to avoid direct access */
2293 efn->offset = ef->data_size + 1; 2418 efn->offset = ef->data_size + 1;
2294 exists_already = 1; 2419 exists_already = 1;
2295 break; 2420 break;
@@ -2315,6 +2440,7 @@ eet_write_cipher(Eet_File *ef,
2315 efn->alias = 0; 2440 efn->alias = 0;
2316 efn->ciphered = cipher_key ? 1 : 0; 2441 efn->ciphered = cipher_key ? 1 : 0;
2317 efn->compression = !!comp; 2442 efn->compression = !!comp;
2443 efn->compression_type = comp;
2318 efn->size = data_size; 2444 efn->size = data_size;
2319 efn->data_size = size; 2445 efn->data_size = size;
2320 efn->data = data2; 2446 efn->data = data2;
diff --git a/legacy/eet/src/lib/lz4/lz4.c b/legacy/eet/src/lib/lz4/lz4.c
new file mode 100644
index 0000000000..06e2829707
--- /dev/null
+++ b/legacy/eet/src/lib/lz4/lz4.c
@@ -0,0 +1,819 @@
1/*
2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2012, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
8 met:
9
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the
15 distribution.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 You can contact the author at :
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31 - LZ4 source repository : http://code.google.com/p/lz4/
32*/
33
34//**************************************
35// Tuning parameters
36//**************************************
37// COMPRESSIONLEVEL :
38// Increasing this value improves compression ratio
39// Lowering this value reduces memory usage
40// Reduced memory usage typically improves speed, due to cache effect (ex : L1 32KB for Intel, L1 64KB for AMD)
41// Memory usage formula : N->2^(N+2) Bytes (examples : 12 -> 16KB ; 17 -> 512KB)
42#define COMPRESSIONLEVEL 12
43
44// NOTCOMPRESSIBLE_CONFIRMATION :
45// Decreasing this value will make the algorithm skip faster data segments considered "incompressible"
46// This may decrease compression ratio dramatically, but will be faster on incompressible data
47// Increasing this value will make the algorithm search more before declaring a segment "incompressible"
48// This could improve compression a bit, but will be slower on incompressible data
49// The default value (6) is recommended
50#define NOTCOMPRESSIBLE_CONFIRMATION 6
51
52// LZ4_COMPRESSMIN :
53// Compression function will *fail* if it is not successful at compressing input by at least LZ4_COMPRESSMIN bytes
54// Since the compression function stops working prematurely, it results in a speed gain
55// The output however is unusable. Compression function result will be zero.
56// Default : 0 = disabled
57#define LZ4_COMPRESSMIN 0
58
59// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
60// This will provide a boost to performance for big endian cpu, but the resulting compressed stream will be incompatible with little-endian CPU.
61// You can set this option to 1 in situations where data will stay within closed environment
62// This option is useless on Little_Endian CPU (such as x86)
63//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
64
65
66
67//**************************************
68// CPU Feature Detection
69//**************************************
70// 32 or 64 bits ?
71#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode
72# define LZ4_ARCH64 1
73#else
74# define LZ4_ARCH64 0
75#endif
76
77// Little Endian or Big Endian ?
78// Note : overwrite the below #define if you know your architecture endianess
79#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )
80# define LZ4_BIG_ENDIAN 1
81#else
82// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
83#endif
84
85// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
86// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
87// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance
88#if defined(__ARM_FEATURE_UNALIGNED)
89# define LZ4_FORCE_UNALIGNED_ACCESS 1
90#endif
91
92// Define this parameter if your target system or compiler does not support hardware bit count
93#if defined(_MSC_VER) && defined(_WIN32_WCE) // Visual Studio for Windows CE does not support Hardware bit count
94# define LZ4_FORCE_SW_BITCOUNT
95#endif
96
97
98//**************************************
99// Compiler Options
100//**************************************
101#if __STDC_VERSION__ >= 199901L // C99
102/* "restrict" is a known keyword */
103#else
104# define restrict // Disable restrict
105#endif
106
107#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
108
109#ifdef _MSC_VER // Visual Studio
110# define inline __forceinline // Visual is not C99, but supports some kind of inline
111# if LZ4_ARCH64 // 64-bit
112# pragma intrinsic(_BitScanForward64) // For Visual 2005
113# pragma intrinsic(_BitScanReverse64) // For Visual 2005
114# else
115# pragma intrinsic(_BitScanForward) // For Visual 2005
116# pragma intrinsic(_BitScanReverse) // For Visual 2005
117# endif
118#endif
119
120#ifdef _MSC_VER
121# define lz4_bswap16(x) _byteswap_ushort(x)
122#else
123# define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
124#endif
125
126#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
127# define expect(expr,value) (__builtin_expect ((expr),(value)) )
128#else
129# define expect(expr,value) (expr)
130#endif
131
132#define likely(expr) expect((expr) != 0, 1)
133#define unlikely(expr) expect((expr) != 0, 0)
134
135
136//**************************************
137// Includes
138//**************************************
139#include <stdlib.h> // for malloc
140#include <string.h> // for memset
141#include "lz4.h"
142
143
144//**************************************
145// Basic Types
146//**************************************
147#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively
148# define BYTE unsigned __int8
149# define U16 unsigned __int16
150# define U32 unsigned __int32
151# define S32 __int32
152# define U64 unsigned __int64
153#else
154# include <stdint.h>
155# define BYTE uint8_t
156# define U16 uint16_t
157# define U32 uint32_t
158# define S32 int32_t
159# define U64 uint64_t
160#endif
161
162#ifndef LZ4_FORCE_UNALIGNED_ACCESS
163# pragma pack(push, 1)
164#endif
165
166typedef struct _U16_S { U16 v; } U16_S;
167typedef struct _U32_S { U32 v; } U32_S;
168typedef struct _U64_S { U64 v; } U64_S;
169
170#ifndef LZ4_FORCE_UNALIGNED_ACCESS
171# pragma pack(pop)
172#endif
173
174#define A64(x) (((U64_S *)(x))->v)
175#define A32(x) (((U32_S *)(x))->v)
176#define A16(x) (((U16_S *)(x))->v)
177
178
179//**************************************
180// Constants
181//**************************************
182#define MINMATCH 4
183
184#define HASH_LOG COMPRESSIONLEVEL
185#define HASHTABLESIZE (1 << HASH_LOG)
186#define HASH_MASK (HASHTABLESIZE - 1)
187
188#define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION>2?NOTCOMPRESSIBLE_CONFIRMATION:2)
189#define STACKLIMIT 13
190#define HEAPMODE (HASH_LOG>STACKLIMIT) // Defines if memory is allocated into the stack (local variable), or into the heap (malloc()).
191#define COPYLENGTH 8
192#define LASTLITERALS 5
193#define MFLIMIT (COPYLENGTH+MINMATCH)
194#define MINLENGTH (MFLIMIT+1)
195
196#define MAXD_LOG 16
197#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
198
199#define ML_BITS 4
200#define ML_MASK ((1U<<ML_BITS)-1)
201#define RUN_BITS (8-ML_BITS)
202#define RUN_MASK ((1U<<RUN_BITS)-1)
203
204
205//**************************************
206// Architecture-specific macros
207//**************************************
208#if LZ4_ARCH64 // 64-bit
209# define STEPSIZE 8
210# define UARCH U64
211# define AARCH A64
212# define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
213# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
214# define LZ4_SECURECOPY(s,d,e) if (d<e) LZ4_WILDCOPY(s,d,e)
215# define HTYPE U32
216# define INITBASE(base) const BYTE* const base = ip
217#else // 32-bit
218# define STEPSIZE 4
219# define UARCH U32
220# define AARCH A32
221# define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
222# define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
223# define LZ4_SECURECOPY LZ4_WILDCOPY
224# define HTYPE const BYTE*
225# define INITBASE(base) const int base = 0
226#endif
227
228#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
229# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
230# define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
231#else // Little Endian
232# define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
233# define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
234#endif
235
236
237//**************************************
238// Local structures
239//**************************************
240struct refTables
241{
242 HTYPE hashTable[HASHTABLESIZE];
243};
244
245
246//**************************************
247// Macros
248//**************************************
249#define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
250#define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
251#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
252#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=(d)+l; LZ4_WILDCOPY(s,d,e); d=e; }
253
254
255//****************************
256// Private functions
257//****************************
258#if LZ4_ARCH64
259
260inline static int LZ4_NbCommonBytes (register U64 val)
261{
262#if defined(LZ4_BIG_ENDIAN)
263 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
264 unsigned long r = 0;
265 _BitScanReverse64( &r, val );
266 return (int)(r>>3);
267 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
268 return (__builtin_clzll(val) >> 3);
269 #else
270 int r;
271 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
272 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
273 r += (!val);
274 return r;
275 #endif
276#else
277 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
278 unsigned long r = 0;
279 _BitScanForward64( &r, val );
280 return (int)(r>>3);
281 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
282 return (__builtin_ctzll(val) >> 3);
283 #else
284 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
285 return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
286 #endif
287#endif
288}
289
290#else
291
292inline static int LZ4_NbCommonBytes (register U32 val)
293{
294#if defined(LZ4_BIG_ENDIAN)
295 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
296 unsigned long r = 0;
297 _BitScanReverse( &r, val );
298 return (int)(r>>3);
299 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
300 return (__builtin_clz(val) >> 3);
301 #else
302 int r;
303 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
304 r += (!val);
305 return r;
306 #endif
307#else
308 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
309 unsigned long r = 0;
310 _BitScanForward( &r, val );
311 return (int)(r>>3);
312 #elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
313 return (__builtin_ctz(val) >> 3);
314 #else
315 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
316 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
317 #endif
318#endif
319}
320
321#endif
322
323
324//****************************
325// Public functions
326//****************************
327
328int LZ4_compressBound(int isize)
329{
330 return (isize + (isize/255) + 16);
331}
332
333
334
335//******************************
336// Compression functions
337//******************************
338
339int LZ4_compressCtx(void** ctx,
340 const char* source,
341 char* dest,
342 int isize)
343{
344#if HEAPMODE
345 struct refTables *srt = (struct refTables *) (*ctx);
346 HTYPE* HashTable;
347#else
348 HTYPE HashTable[HASHTABLESIZE] = {0};
349#endif
350
351 const BYTE* ip = (BYTE*) source;
352 INITBASE(base);
353 const BYTE* anchor = ip;
354 const BYTE* const iend = ip + isize;
355 const BYTE* const mflimit = iend - MFLIMIT;
356#define matchlimit (iend - LASTLITERALS)
357
358 BYTE* op = (BYTE*) dest;
359
360 int len, length;
361 const int skipStrength = SKIPSTRENGTH;
362 U32 forwardH;
363
364
365 // Init
366 if (isize<MINLENGTH) goto _last_literals;
367#if HEAPMODE
368 if (*ctx == NULL)
369 {
370 srt = (struct refTables *) malloc ( sizeof(struct refTables) );
371 *ctx = (void*) srt;
372 }
373 HashTable = (HTYPE*)(srt->hashTable);
374 memset((void*)HashTable, 0, sizeof(srt->hashTable));
375#else
376 (void) ctx;
377#endif
378
379
380 // First Byte
381 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
382 ip++; forwardH = LZ4_HASH_VALUE(ip);
383
384 // Main Loop
385 for ( ; ; )
386 {
387 int findMatchAttempts = (1U << skipStrength) + 3;
388 const BYTE* forwardIp = ip;
389 const BYTE* ref;
390 BYTE* token;
391
392 // Find a match
393 do {
394 U32 h = forwardH;
395 int step = findMatchAttempts++ >> skipStrength;
396 ip = forwardIp;
397 forwardIp = ip + step;
398
399 if unlikely(forwardIp > mflimit) { goto _last_literals; }
400
401 forwardH = LZ4_HASH_VALUE(forwardIp);
402 ref = base + HashTable[h];
403 HashTable[h] = ip - base;
404
405 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
406
407 // Catch up
408 while ((ip>anchor) && (ref>(BYTE*)source) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; }
409
410 // Encode Literal length
411 length = ip - anchor;
412 token = op++;
413 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
414 else *token = (length<<ML_BITS);
415
416 // Copy Literals
417 LZ4_BLINDCOPY(anchor, op, length);
418
419_next_match:
420 // Encode Offset
421 LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref);
422
423 // Start Counting
424 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
425 anchor = ip;
426 while likely(ip<matchlimit-(STEPSIZE-1))
427 {
428 UARCH diff = AARCH(ref) ^ AARCH(ip);
429 if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
430 ip += LZ4_NbCommonBytes(diff);
431 goto _endCount;
432 }
433 if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
434 if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
435 if ((ip<matchlimit) && (*ref == *ip)) ip++;
436_endCount:
437
438 // Encode MatchLength
439 len = (ip - anchor);
440 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
441 else *token += len;
442
443 // Test end of chunk
444 if (ip > mflimit) { anchor = ip; break; }
445
446 // Fill table
447 HashTable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;
448
449 // Test next position
450 ref = base + HashTable[LZ4_HASH_VALUE(ip)];
451 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
452 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
453
454 // Prepare next loop
455 anchor = ip++;
456 forwardH = LZ4_HASH_VALUE(ip);
457 }
458
459_last_literals:
460 // Encode Last Literals
461 {
462 int lastRun = iend - anchor;
463 if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0;
464 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
465 else *op++ = (lastRun<<ML_BITS);
466 memcpy(op, anchor, iend - anchor);
467 op += iend-anchor;
468 }
469
470 // End
471 return (int) (((char*)op)-dest);
472}
473
474
475
476// Note : this function is valid only if isize < LZ4_64KLIMIT
477#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
478#define HASHLOG64K (HASH_LOG+1)
479#define HASH64KTABLESIZE (1U<<HASHLOG64K)
480#define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASHLOG64K))
481#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
482int LZ4_compress64kCtx(void** ctx,
483 const char* source,
484 char* dest,
485 int isize)
486{
487#if HEAPMODE
488 struct refTables *srt = (struct refTables *) (*ctx);
489 U16* HashTable;
490#else
491 U16 HashTable[HASH64KTABLESIZE] = {0};
492#endif
493
494 const BYTE* ip = (BYTE*) source;
495 const BYTE* anchor = ip;
496 const BYTE* const base = ip;
497 const BYTE* const iend = ip + isize;
498 const BYTE* const mflimit = iend - MFLIMIT;
499#define matchlimit (iend - LASTLITERALS)
500
501 BYTE* op = (BYTE*) dest;
502
503 int len, length;
504 const int skipStrength = SKIPSTRENGTH;
505 U32 forwardH;
506
507
508 // Init
509 if (isize<MINLENGTH) goto _last_literals;
510#if HEAPMODE
511 if (*ctx == NULL)
512 {
513 srt = (struct refTables *) malloc ( sizeof(struct refTables) );
514 *ctx = (void*) srt;
515 }
516 HashTable = (U16*)(srt->hashTable);
517 memset((void*)HashTable, 0, sizeof(srt->hashTable));
518#else
519 (void) ctx;
520#endif
521
522
523 // First Byte
524 ip++; forwardH = LZ4_HASH64K_VALUE(ip);
525
526 // Main Loop
527 for ( ; ; )
528 {
529 int findMatchAttempts = (1U << skipStrength) + 3;
530 const BYTE* forwardIp = ip;
531 const BYTE* ref;
532 BYTE* token;
533
534 // Find a match
535 do {
536 U32 h = forwardH;
537 int step = findMatchAttempts++ >> skipStrength;
538 ip = forwardIp;
539 forwardIp = ip + step;
540
541 if (forwardIp > mflimit) { goto _last_literals; }
542
543 forwardH = LZ4_HASH64K_VALUE(forwardIp);
544 ref = base + HashTable[h];
545 HashTable[h] = ip - base;
546
547 } while (A32(ref) != A32(ip));
548
549 // Catch up
550 while ((ip>anchor) && (ref>(BYTE*)source) && (ip[-1]==ref[-1])) { ip--; ref--; }
551
552 // Encode Literal length
553 length = ip - anchor;
554 token = op++;
555 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *op++ = 255; *op++ = (BYTE)len; }
556 else *token = (length<<ML_BITS);
557
558 // Copy Literals
559 LZ4_BLINDCOPY(anchor, op, length);
560
561_next_match:
562 // Encode Offset
563 LZ4_WRITE_LITTLEENDIAN_16(op,ip-ref);
564
565 // Start Counting
566 ip+=MINMATCH; ref+=MINMATCH; // MinMatch verified
567 anchor = ip;
568 while (ip<matchlimit-(STEPSIZE-1))
569 {
570 UARCH diff = AARCH(ref) ^ AARCH(ip);
571 if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
572 ip += LZ4_NbCommonBytes(diff);
573 goto _endCount;
574 }
575 if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
576 if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
577 if ((ip<matchlimit) && (*ref == *ip)) ip++;
578_endCount:
579
580 // Encode MatchLength
581 len = (ip - anchor);
582 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *op++ = 255; *op++ = 255; } if (len > 254) { len-=255; *op++ = 255; } *op++ = (BYTE)len; }
583 else *token += len;
584
585 // Test end of chunk
586 if (ip > mflimit) { anchor = ip; break; }
587
588 // Fill table
589 HashTable[LZ4_HASH64K_VALUE(ip-2)] = ip - 2 - base;
590
591 // Test next position
592 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
593 HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
594 if (A32(ref) == A32(ip)) { token = op++; *token=0; goto _next_match; }
595
596 // Prepare next loop
597 anchor = ip++;
598 forwardH = LZ4_HASH64K_VALUE(ip);
599 }
600
601_last_literals:
602 // Encode Last Literals
603 {
604 int lastRun = iend - anchor;
605 if ((LZ4_COMPRESSMIN>0) && (((op - (BYTE*)dest) + lastRun + 1 + ((lastRun-15)/255)) > isize - LZ4_COMPRESSMIN)) return 0;
606 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
607 else *op++ = (lastRun<<ML_BITS);
608 memcpy(op, anchor, iend - anchor);
609 op += iend-anchor;
610 }
611
612 // End
613 return (int) (((char*)op)-dest);
614}
615
616
617
618int LZ4_compress(const char* source,
619 char* dest,
620 int isize)
621{
622#if HEAPMODE
623 void* ctx = malloc(sizeof(struct refTables));
624 int result;
625 if (isize < LZ4_64KLIMIT)
626 result = LZ4_compress64kCtx(&ctx, source, dest, isize);
627 else result = LZ4_compressCtx(&ctx, source, dest, isize);
628 free(ctx);
629 return result;
630#else
631 if (isize < (int)LZ4_64KLIMIT) return LZ4_compress64kCtx(NULL, source, dest, isize);
632 return LZ4_compressCtx(NULL, source, dest, isize);
633#endif
634}
635
636
637
638
639//****************************
640// Decompression functions
641//****************************
642
643// Note : The decoding functions LZ4_uncompress() and LZ4_uncompress_unknownOutputSize()
644// are safe against "buffer overflow" attack type.
645// They will never write nor read outside of the provided output buffers.
646// LZ4_uncompress_unknownOutputSize() also insures that it will never read outside of the input buffer.
647// A corrupted input will produce an error result, a negative int, indicating the position of the error within input stream.
648
649int LZ4_uncompress(const char* source,
650 char* dest,
651 int osize)
652{
653 // Local Variables
654 const BYTE* restrict ip = (const BYTE*) source;
655 const BYTE* restrict ref;
656
657 BYTE* restrict op = (BYTE*) dest;
658 BYTE* const oend = op + osize;
659 BYTE* cpy;
660
661 BYTE token;
662
663 int len, length;
664 size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};
665
666
667 // Main Loop
668 while (1)
669 {
670 // get runlength
671 token = *ip++;
672 if ((length=(token>>ML_BITS)) == RUN_MASK) { for (;(len=*ip++)==255;length+=255){} length += len; }
673
674 // copy literals
675 cpy = op+length;
676 if unlikely(cpy>oend-COPYLENGTH)
677 {
678 if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
679 memcpy(op, ip, length);
680 ip += length;
681 break; // Necessarily EOF
682 }
683 LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
684
685 // get offset
686 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
687 if (ref < (BYTE* const)dest) goto _output_error; // Error : offset create reference outside destination buffer
688
689 // get matchlength
690 if ((length=(token&ML_MASK)) == ML_MASK) { for (;*ip==255;length+=255) {ip++;} length += *ip++; }
691
692 // copy repeated sequence
693 if unlikely(op-ref<STEPSIZE)
694 {
695#if LZ4_ARCH64
696 size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};
697 size_t dec2 = dec2table[op-ref];
698#else
699 const int dec2 = 0;
700#endif
701 *op++ = *ref++;
702 *op++ = *ref++;
703 *op++ = *ref++;
704 *op++ = *ref++;
705 ref -= dec[op-ref];
706 A32(op)=A32(ref); op += STEPSIZE-4;
707 ref -= dec2;
708 } else { LZ4_COPYSTEP(ref,op); }
709 cpy = op + length - (STEPSIZE-4);
710 if (cpy>oend-COPYLENGTH)
711 {
712 if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
713 LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
714 while(op<cpy) *op++=*ref++;
715 op=cpy;
716 if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
717 continue;
718 }
719 LZ4_SECURECOPY(ref, op, cpy);
720 op=cpy; // correction
721 }
722
723 // end of decoding
724 return (int) (((char*)ip)-source);
725
726 // write overflow error detected
727_output_error:
728 return (int) (-(((char*)ip)-source));
729}
730
731
732int LZ4_uncompress_unknownOutputSize(
733 const char* source,
734 char* dest,
735 int isize,
736 int maxOutputSize)
737{
738 // Local Variables
739 const BYTE* restrict ip = (const BYTE*) source;
740 const BYTE* const iend = ip + isize;
741 const BYTE* restrict ref;
742
743 BYTE* restrict op = (BYTE*) dest;
744 BYTE* const oend = op + maxOutputSize;
745 BYTE* cpy;
746
747 size_t dec[] ={0, 3, 2, 3, 0, 0, 0, 0};
748
749
750 // Main Loop
751 while (ip<iend)
752 {
753 BYTE token;
754 int length;
755
756 // get runlength
757 token = *ip++;
758 if ((length=(token>>ML_BITS)) == RUN_MASK) { int s=255; while ((ip<iend) && (s==255)) { s=*ip++; length += s; } }
759
760 // copy literals
761 cpy = op+length;
762 if ((cpy>oend-COPYLENGTH) || (ip+length>iend-COPYLENGTH))
763 {
764 if (cpy > oend) goto _output_error; // Error : request to write beyond destination buffer
765 if (ip+length > iend) goto _output_error; // Error : request to read beyond source buffer
766 memcpy(op, ip, length);
767 op += length;
768 ip += length;
769 if (ip<iend) goto _output_error; // Error : LZ4 format violation
770 break; // Necessarily EOF, due to parsing restrictions
771 }
772 LZ4_WILDCOPY(ip, op, cpy); ip -= (op-cpy); op = cpy;
773
774 // get offset
775 LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
776 if (ref < (BYTE* const)dest) goto _output_error; // Error : offset creates reference outside of destination buffer
777
778 // get matchlength
779 if ((length=(token&ML_MASK)) == ML_MASK) { while (ip<iend) { int s = *ip++; length +=s; if (s==255) continue; break; } }
780
781 // copy repeated sequence
782 if unlikely(op-ref<STEPSIZE)
783 {
784#if LZ4_ARCH64
785 size_t dec2table[]={0, 0, 0, -1, 0, 1, 2, 3};
786 size_t dec2 = dec2table[op-ref];
787#else
788 const int dec2 = 0;
789#endif
790 *op++ = *ref++;
791 *op++ = *ref++;
792 *op++ = *ref++;
793 *op++ = *ref++;
794 ref -= dec[op-ref];
795 A32(op)=A32(ref); op += STEPSIZE-4;
796 ref -= dec2;
797 } else { LZ4_COPYSTEP(ref,op); }
798 cpy = op + length - (STEPSIZE-4);
799 if (cpy>oend-COPYLENGTH)
800 {
801 if (cpy > oend) goto _output_error; // Error : request to write outside of destination buffer
802 LZ4_SECURECOPY(ref, op, (oend-COPYLENGTH));
803 while(op<cpy) *op++=*ref++;
804 op=cpy;
805 if (op == oend) break; // Check EOF (should never happen, since last 5 bytes are supposed to be literals)
806 continue;
807 }
808 LZ4_SECURECOPY(ref, op, cpy);
809 op=cpy; // correction
810 }
811
812 // end of decoding
813 return (int) (((char*)op)-dest);
814
815 // write overflow error detected
816_output_error:
817 return (int) (-(((char*)ip)-source));
818}
819
diff --git a/legacy/eet/src/lib/lz4/lz4.h b/legacy/eet/src/lib/lz4/lz4.h
new file mode 100644
index 0000000000..ebd62b69a4
--- /dev/null
+++ b/legacy/eet/src/lib/lz4/lz4.h
@@ -0,0 +1,120 @@
1/*
2 LZ4 - Fast LZ compression algorithm
3 Header File
4 Copyright (C) 2011-2012, Yann Collet.
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the
16 distribution.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 You can contact the author at :
31 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 - LZ4 source repository : http://code.google.com/p/lz4/
33*/
34#pragma once
35
36#if defined (__cplusplus)
37extern "C" {
38#endif
39
40
41//****************************
42// Simple Functions
43//****************************
44
45int LZ4_compress (const char* source, char* dest, int isize);
46int LZ4_uncompress (const char* source, char* dest, int osize);
47
48/*
49LZ4_compress() :
50 isize : is the input size. Max supported value is ~1.9GB
51 return : the number of bytes written in buffer dest
52 or 0 if the compression fails (if LZ4_COMPRESSMIN is set)
53 note : destination buffer must be already allocated.
54 destination buffer must be sized to handle worst cases situations (input data not compressible)
55 worst case size evaluation is provided by function LZ4_compressBound()
56
57LZ4_uncompress() :
58 osize : is the output size, therefore the original size
59 return : the number of bytes read in the source buffer
60 If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction
61 This function never writes beyond dest + osize, and is therefore protected against malicious data packets
62 note : destination buffer must be already allocated
63*/
64
65
66//****************************
67// Advanced Functions
68//****************************
69
70int LZ4_compressBound(int isize);
71
72/*
73LZ4_compressBound() :
74 Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible)
75 primarily useful for memory allocation of output buffer.
76
77 isize : is the input size. Max supported value is ~1.9GB
78 return : maximum output size in a "worst case" scenario
79 note : this function is limited by "int" range (2^31-1)
80*/
81
82
83int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
84
85/*
86LZ4_uncompress_unknownOutputSize() :
87 isize : is the input size, therefore the compressed size
88 maxOutputSize : is the size of the destination buffer (which must be already allocated)
89 return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
90 If the source stream is malformed, the function will stop decoding and return a negative result, indicating the byte position of the faulty instruction
91 This function never writes beyond dest + maxOutputSize, and is therefore protected against malicious data packets
92 note : Destination buffer must be already allocated.
93 This version is slightly slower than LZ4_uncompress()
94*/
95
96
97int LZ4_compressCtx(void** ctx, const char* source, char* dest, int isize);
98int LZ4_compress64kCtx(void** ctx, const char* source, char* dest, int isize);
99
100/*
101LZ4_compressCtx() :
102 This function explicitly handles the CTX memory structure.
103 It avoids allocating/deallocating memory between each call, improving performance when malloc is heavily invoked.
104 This function is only useful when memory is allocated into the heap (HASH_LOG value beyond STACK_LIMIT)
105 Performance difference will be noticeable only when repetitively calling the compression function over many small segments.
106 Note : by default, memory is allocated into the stack, therefore "malloc" is not invoked.
107LZ4_compress64kCtx() :
108 Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
109 isize *Must* be <64KB, otherwise the output will be corrupted.
110
111 On first call : provide a *ctx=NULL; It will be automatically allocated.
112 On next calls : reuse the same ctx pointer.
113 Use different pointers for different threads when doing multi-threading.
114
115*/
116
117
118#if defined (__cplusplus)
119}
120#endif
diff --git a/legacy/eet/src/lib/lz4/lz4hc.c b/legacy/eet/src/lib/lz4/lz4hc.c
new file mode 100644
index 0000000000..cca755c26c
--- /dev/null
+++ b/legacy/eet/src/lib/lz4/lz4hc.c
@@ -0,0 +1,663 @@
1/*
2 LZ4 HC - High Compression Mode of LZ4
3 Copyright (C) 2011-2012, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
8 met:
9
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the
15 distribution.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 You can contact the author at :
30 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31 - LZ4 source repository : http://code.google.com/p/lz4/
32*/
33
34
35//**************************************
36// CPU Feature Detection
37//**************************************
38// 32 or 64 bits ?
39#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || defined(__LP64__) || defined(_LP64) ) // Detects 64 bits mode
40#define LZ4_ARCH64 1
41#else
42#define LZ4_ARCH64 0
43#endif
44
45// Little Endian or Big Endian ?
46#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || defined(__PPC) || defined(PPC) || defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) )
47#define LZ4_BIG_ENDIAN 1
48#else
49// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
50#endif
51
52// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
53// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected
54// If you know your target CPU supports unaligned memory access, you may want to force this option manually to improve performance
55#if defined(__ARM_FEATURE_UNALIGNED)
56#define LZ4_FORCE_UNALIGNED_ACCESS 1
57#endif
58
59
60//**************************************
61// Compiler Options
62//**************************************
63#if __STDC_VERSION__ >= 199901L // C99
64 /* "restrict" is a known keyword */
65#else
66#define restrict // Disable restrict
67#endif
68
69#ifdef _MSC_VER
70#define inline __forceinline // Visual is not C99, but supports some kind of inline
71#endif
72
73#ifdef _MSC_VER // Visual Studio
74#define bswap16(x) _byteswap_ushort(x)
75#else
76#define bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
77#endif
78
79
80//**************************************
81// Includes
82//**************************************
83#include <stdlib.h> // calloc, free
84#include <string.h> // memset, memcpy
85#include "lz4hc.h"
86
87#define ALLOCATOR(s) calloc(1,s)
88#define FREEMEM free
89#define MEM_INIT memset
90
91
92//**************************************
93// Basic Types
94//**************************************
95#if defined(_MSC_VER) // Visual Studio does not support 'stdint' natively
96#define BYTE unsigned __int8
97#define U16 unsigned __int16
98#define U32 unsigned __int32
99#define S32 __int32
100#define U64 unsigned __int64
101#else
102#include <stdint.h>
103#define BYTE uint8_t
104#define U16 uint16_t
105#define U32 uint32_t
106#define S32 int32_t
107#define U64 uint64_t
108#endif
109
110#ifndef LZ4_FORCE_UNALIGNED_ACCESS
111#pragma pack(push, 1)
112#endif
113
114typedef struct _U16_S { U16 v; } U16_S;
115typedef struct _U32_S { U32 v; } U32_S;
116typedef struct _U64_S { U64 v; } U64_S;
117
118#ifndef LZ4_FORCE_UNALIGNED_ACCESS
119#pragma pack(pop)
120#endif
121
122#define A64(x) (((U64_S *)(x))->v)
123#define A32(x) (((U32_S *)(x))->v)
124#define A16(x) (((U16_S *)(x))->v)
125
126
127//**************************************
128// Constants
129//**************************************
130#define MINMATCH 4
131
132#define DICTIONARY_LOGSIZE 16
133#define MAXD (1<<DICTIONARY_LOGSIZE)
134#define MAXD_MASK ((U32)(MAXD - 1))
135#define MAX_DISTANCE (MAXD - 1)
136
137#define HASH_LOG (DICTIONARY_LOGSIZE-1)
138#define HASHTABLESIZE (1 << HASH_LOG)
139#define HASH_MASK (HASHTABLESIZE - 1)
140
141#define MAX_NB_ATTEMPTS 256
142
143#define ML_BITS 4
144#define ML_MASK (size_t)((1U<<ML_BITS)-1)
145#define RUN_BITS (8-ML_BITS)
146#define RUN_MASK ((1U<<RUN_BITS)-1)
147
148#define COPYLENGTH 8
149#define LASTLITERALS 5
150#define MFLIMIT (COPYLENGTH+MINMATCH)
151#define MINLENGTH (MFLIMIT+1)
152#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
153
154
155//**************************************
156// Architecture-specific macros
157//**************************************
158#if LZ4_ARCH64 // 64-bit
159#define STEPSIZE 8
160#define LZ4_COPYSTEP(s,d) A64(d) = A64(s); d+=8; s+=8;
161#define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d)
162#define UARCH U64
163#define AARCH A64
164#define HTYPE U32
165#define INITBASE(b,s) const BYTE* const b = s
166#else // 32-bit
167#define STEPSIZE 4
168#define LZ4_COPYSTEP(s,d) A32(d) = A32(s); d+=4; s+=4;
169#define LZ4_COPYPACKET(s,d) LZ4_COPYSTEP(s,d); LZ4_COPYSTEP(s,d);
170#define UARCH U32
171#define AARCH A32
172#define HTYPE const BYTE*
173#define INITBASE(b,s) const int b = 0
174#endif
175
176#if defined(LZ4_BIG_ENDIAN)
177#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = bswap16(v); d = (s) - v; }
178#define LZ4_WRITE_LITTLEENDIAN_16(p,i) { U16 v = (U16)(i); v = bswap16(v); A16(p) = v; p+=2; }
179#else // Little Endian
180#define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
181#define LZ4_WRITE_LITTLEENDIAN_16(p,v) { A16(p) = v; p+=2; }
182#endif
183
184
185//************************************************************
186// Local Types
187//************************************************************
188typedef struct
189{
190 const BYTE* base;
191 HTYPE hashTable[HASHTABLESIZE];
192 U16 chainTable[MAXD];
193 const BYTE* nextToUpdate;
194} LZ4HC_Data_Structure;
195
196
197//**************************************
198// Macros
199//**************************************
200#define LZ4_WILDCOPY(s,d,e) do { LZ4_COPYPACKET(s,d) } while (d<e);
201#define LZ4_BLINDCOPY(s,d,l) { BYTE* e=d+l; LZ4_WILDCOPY(s,d,e); d=e; }
202#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-HASH_LOG))
203#define HASH_VALUE(p) HASH_FUNCTION(*(U32*)(p))
204#define HASH_POINTER(p) (HashTable[HASH_VALUE(p)] + base)
205#define DELTANEXT(p) chainTable[(size_t)(p) & MAXD_MASK]
206#define GETNEXT(p) ((p) - (size_t)DELTANEXT(p))
207#define ADD_HASH(p) { size_t delta = (p) - HASH_POINTER(p); if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; DELTANEXT(p) = (U16)delta; HashTable[HASH_VALUE(p)] = (p) - base; }
208
209
210//**************************************
211// Private functions
212//**************************************
213#if LZ4_ARCH64
214
215inline static int LZ4_NbCommonBytes (register U64 val)
216{
217#if defined(LZ4_BIG_ENDIAN)
218 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
219 unsigned long r = 0;
220 _BitScanReverse64( &r, val );
221 return (int)(r>>3);
222 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
223 return (__builtin_clzll(val) >> 3);
224 #else
225 int r;
226 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
227 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
228 r += (!val);
229 return r;
230 #endif
231#else
232 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
233 unsigned long r = 0;
234 _BitScanForward64( &r, val );
235 return (int)(r>>3);
236 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
237 return (__builtin_ctzll(val) >> 3);
238 #else
239 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
240 return DeBruijnBytePos[((U64)((val & -val) * 0x0218A392CDABBD3F)) >> 58];
241 #endif
242#endif
243}
244
245#else
246
247inline static int LZ4_NbCommonBytes (register U32 val)
248{
249#if defined(LZ4_BIG_ENDIAN)
250 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
251 unsigned long r = 0;
252 _BitScanReverse( &r, val );
253 return (int)(r>>3);
254 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
255 return (__builtin_clz(val) >> 3);
256 #else
257 int r;
258 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
259 r += (!val);
260 return r;
261 #endif
262#else
263 #if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
264 unsigned long r = 0;
265 _BitScanForward( &r, val );
266 return (int)(r>>3);
267 #elif defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
268 return (__builtin_ctz(val) >> 3);
269 #else
270 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
271 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
272 #endif
273#endif
274}
275
276#endif
277
278
279inline static int LZ4HC_Init (LZ4HC_Data_Structure* hc4, const BYTE* base)
280{
281 MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
282 MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
283 hc4->nextToUpdate = base + LZ4_ARCH64;
284 hc4->base = base;
285 return 1;
286}
287
288
289inline static void* LZ4HC_Create (const BYTE* base)
290{
291 void* hc4 = ALLOCATOR(sizeof(LZ4HC_Data_Structure));
292
293 LZ4HC_Init (hc4, base);
294 return hc4;
295}
296
297
298inline static int LZ4HC_Free (void** LZ4HC_Data)
299{
300 FREEMEM(*LZ4HC_Data);
301 *LZ4HC_Data = NULL;
302 return (1);
303}
304
305
306inline static void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip)
307{
308 U16* chainTable = hc4->chainTable;
309 HTYPE* HashTable = hc4->hashTable;
310 INITBASE(base,hc4->base);
311
312 while(hc4->nextToUpdate < ip)
313 {
314 ADD_HASH(hc4->nextToUpdate);
315 hc4->nextToUpdate++;
316 }
317}
318
319
320inline static int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* const matchlimit, const BYTE** matchpos)
321{
322 U16* const chainTable = hc4->chainTable;
323 HTYPE* const HashTable = hc4->hashTable;
324 const BYTE* ref;
325 INITBASE(base,hc4->base);
326 int nbAttempts=MAX_NB_ATTEMPTS;
327 int ml=0;
328
329 // HC4 match finder
330 LZ4HC_Insert(hc4, ip);
331 ref = HASH_POINTER(ip);
332 while ((ref > (ip-MAX_DISTANCE)) && (nbAttempts))
333 {
334 nbAttempts--;
335 if (*(ref+ml) == *(ip+ml))
336 if (*(U32*)ref == *(U32*)ip)
337 {
338 const BYTE* reft = ref+MINMATCH;
339 const BYTE* ipt = ip+MINMATCH;
340
341 while (ipt<matchlimit-(STEPSIZE-1))
342 {
343 UARCH diff = AARCH(reft) ^ AARCH(ipt);
344 if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; }
345 ipt += LZ4_NbCommonBytes(diff);
346 goto _endCount;
347 }
348 if (LZ4_ARCH64) if ((ipt<(matchlimit-3)) && (A32(reft) == A32(ipt))) { ipt+=4; reft+=4; }
349 if ((ipt<(matchlimit-1)) && (A16(reft) == A16(ipt))) { ipt+=2; reft+=2; }
350 if ((ipt<matchlimit) && (*reft == *ipt)) ipt++;
351_endCount:
352
353 if (ipt-ip > ml) { ml = ipt-ip; *matchpos = ref; }
354 }
355 ref = GETNEXT(ref);
356 }
357
358 return ml;
359}
360
361
362inline static int LZ4HC_InsertAndGetWiderMatch (LZ4HC_Data_Structure* hc4, const BYTE* ip, const BYTE* startLimit, const BYTE* matchlimit, int longest, const BYTE** matchpos, const BYTE** startpos)
363{
364 U16* const chainTable = hc4->chainTable;
365 HTYPE* const HashTable = hc4->hashTable;
366 INITBASE(base,hc4->base);
367 const BYTE* ref;
368 int nbAttempts = MAX_NB_ATTEMPTS;
369 int delta = ip-startLimit;
370
371 // First Match
372 LZ4HC_Insert(hc4, ip);
373 ref = HASH_POINTER(ip);
374
375 while ((ref > ip-MAX_DISTANCE) && (ref >= hc4->base) && (nbAttempts))
376 {
377 nbAttempts--;
378 if (*(startLimit + longest) == *(ref - delta + longest))
379 if (*(U32*)ref == *(U32*)ip)
380 {
381 const BYTE* reft = ref+MINMATCH;
382 const BYTE* ipt = ip+MINMATCH;
383 const BYTE* startt = ip;
384
385 while (ipt<matchlimit-(STEPSIZE-1))
386 {
387 UARCH diff = AARCH(reft) ^ AARCH(ipt);
388 if (!diff) { ipt+=STEPSIZE; reft+=STEPSIZE; continue; }
389 ipt += LZ4_NbCommonBytes(diff);
390 goto _endCount;
391 }
392 if (LZ4_ARCH64) if ((ipt<(matchlimit-3)) && (A32(reft) == A32(ipt))) { ipt+=4; reft+=4; }
393 if ((ipt<(matchlimit-1)) && (A16(reft) == A16(ipt))) { ipt+=2; reft+=2; }
394 if ((ipt<matchlimit) && (*reft == *ipt)) ipt++;
395_endCount:
396
397 reft = ref;
398 while ((startt>startLimit) && (reft > hc4->base) && (startt[-1] == reft[-1])) {startt--; reft--;}
399
400 if ((ipt-startt) > longest)
401 {
402 longest = ipt-startt;
403 *matchpos = reft;
404 *startpos = startt;
405 }
406 }
407 ref = GETNEXT(ref);
408 }
409
410 return longest;
411}
412
413
414inline static int LZ4_encodeSequence(const BYTE** ip, BYTE** op, const BYTE** anchor, int ml, const BYTE* ref)
415{
416 int length, len;
417 BYTE* token;
418
419 // Encode Literal length
420 length = *ip - *anchor;
421 token = (*op)++;
422 if (length>=(int)RUN_MASK) { *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; }
423 else *token = (length<<ML_BITS);
424
425 // Copy Literals
426 LZ4_BLINDCOPY(*anchor, *op, length);
427
428 // Encode Offset
429 LZ4_WRITE_LITTLEENDIAN_16(*op,*ip-ref);
430
431 // Encode MatchLength
432 len = (int)(ml-MINMATCH);
433 if (len>=(int)ML_MASK) { *token+=ML_MASK; len-=ML_MASK; for(; len > 509 ; len-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (len > 254) { len-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)len; }
434 else *token += len;
435
436 // Prepare next loop
437 *ip += ml;
438 *anchor = *ip;
439
440 return 0;
441}
442
443
444//****************************
445// Compression CODE
446//****************************
447
448int LZ4_compressHCCtx(LZ4HC_Data_Structure* ctx,
449 const char* source,
450 char* dest,
451 int isize)
452{
453 const BYTE* ip = (const BYTE*) source;
454 const BYTE* anchor = ip;
455 const BYTE* const iend = ip + isize;
456 const BYTE* const mflimit = iend - MFLIMIT;
457 const BYTE* const matchlimit = (iend - LASTLITERALS);
458
459 BYTE* op = (BYTE*) dest;
460
461 int ml, ml2, ml3, ml0;
462 const BYTE* ref=NULL;
463 const BYTE* start2=NULL;
464 const BYTE* ref2=NULL;
465 const BYTE* start3=NULL;
466 const BYTE* ref3=NULL;
467 const BYTE* start0;
468 const BYTE* ref0;
469
470 ip++;
471
472 // Main Loop
473 while (ip < mflimit)
474 {
475 ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref));
476 if (!ml) { ip++; continue; }
477
478 // saved, in case we would skip too much
479 start0 = ip;
480 ref0 = ref;
481 ml0 = ml;
482
483_Search2:
484 if (ip+ml < mflimit)
485 ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2);
486 else ml2=ml;
487
488 if (ml2 == ml) // No better match
489 {
490 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
491 continue;
492 }
493
494 if (start0 < ip)
495 {
496 if (start2 < ip + ml0) // empirical
497 {
498 ip = start0;
499 ref = ref0;
500 ml = ml0;
501 }
502 }
503
504 // Here, start0==ip
505 if ((start2 - ip) < 3) // First Match too small : removed
506 {
507 ml = ml2;
508 ip = start2;
509 ref =ref2;
510 goto _Search2;
511 }
512
513_Search3:
514 // Currently we have :
515 // ml2 > ml1, and
516 // ip1+3 <= ip2 (usually < ip1+ml1)
517 if ((start2 - ip) < OPTIMAL_ML)
518 {
519 int correction;
520 int new_ml = ml;
521 if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
522 if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = start2 - ip + ml2 - MINMATCH;
523 correction = new_ml - (start2 - ip);
524 if (correction > 0)
525 {
526 start2 += correction;
527 ref2 += correction;
528 ml2 -= correction;
529 }
530 }
531 // Now, we have start2 = ip+new_ml, with new_ml=min(ml, OPTIMAL_ML=18)
532
533 if (start2 + ml2 < mflimit)
534 ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3);
535 else ml3=ml2;
536
537 if (ml3 == ml2) // No better match : 2 sequences to encode
538 {
539 // ip & ref are known; Now for ml
540 if (start2 < ip+ml)
541 {
542 if ((start2 - ip) < OPTIMAL_ML)
543 {
544 int correction;
545 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
546 if (ip+ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH;
547 correction = ml - (start2 - ip);
548 if (correction > 0)
549 {
550 start2 += correction;
551 ref2 += correction;
552 ml2 -= correction;
553 }
554 }
555 else
556 {
557 ml = start2 - ip;
558 }
559 }
560 // Now, encode 2 sequences
561 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
562 ip = start2;
563 LZ4_encodeSequence(&ip, &op, &anchor, ml2, ref2);
564 continue;
565 }
566
567 if (start3 < ip+ml+3) // Not enough space for match 2 : remove it
568 {
569 if (start3 >= (ip+ml)) // can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1
570 {
571 if (start2 < ip+ml)
572 {
573 int correction = (ip+ml) - start2;
574 start2 += correction;
575 ref2 += correction;
576 ml2 -= correction;
577 if (ml2 < MINMATCH)
578 {
579 start2 = start3;
580 ref2 = ref3;
581 ml2 = ml3;
582 }
583 }
584
585 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
586 ip = start3;
587 ref = ref3;
588 ml = ml3;
589
590 start0 = start2;
591 ref0 = ref2;
592 ml0 = ml2;
593 goto _Search2;
594 }
595
596 start2 = start3;
597 ref2 = ref3;
598 ml2 = ml3;
599 goto _Search3;
600 }
601
602 // OK, now we have 3 ascending matches; let's write at least the first one
603 // ip & ref are known; Now for ml
604 if (start2 < ip+ml)
605 {
606 if ((start2 - ip) < (int)ML_MASK)
607 {
608 int correction;
609 if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
610 if (ip + ml > start2 + ml2 - MINMATCH) ml = start2 - ip + ml2 - MINMATCH;
611 correction = ml - (start2 - ip);
612 if (correction > 0)
613 {
614 start2 += correction;
615 ref2 += correction;
616 ml2 -= correction;
617 }
618 }
619 else
620 {
621 ml = start2 - ip;
622 }
623 }
624 LZ4_encodeSequence(&ip, &op, &anchor, ml, ref);
625
626 ip = start2;
627 ref = ref2;
628 ml = ml2;
629
630 start2 = start3;
631 ref2 = ref3;
632 ml2 = ml3;
633
634 goto _Search3;
635
636 }
637
638 // Encode Last Literals
639 {
640 int lastRun = iend - anchor;
641 if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
642 else *op++ = (lastRun<<ML_BITS);
643 memcpy(op, anchor, iend - anchor);
644 op += iend-anchor;
645 }
646
647 // End
648 return (int) (((char*)op)-dest);
649}
650
651
652int LZ4_compressHC(const char* source,
653 char* dest,
654 int isize)
655{
656 void* ctx = LZ4HC_Create((const BYTE*)source);
657 int result = LZ4_compressHCCtx(ctx, source, dest, isize);
658 LZ4HC_Free (&ctx);
659
660 return result;
661}
662
663
diff --git a/legacy/eet/src/lib/lz4/lz4hc.h b/legacy/eet/src/lib/lz4/lz4hc.h
new file mode 100644
index 0000000000..cb74689f07
--- /dev/null
+++ b/legacy/eet/src/lib/lz4/lz4hc.h
@@ -0,0 +1,60 @@
1/*
2 LZ4 HC - High Compression Mode of LZ4
3 Header File
4 Copyright (C) 2011-2012, Yann Collet.
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the
16 distribution.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 You can contact the author at :
31 - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 - LZ4 source repository : http://code.google.com/p/lz4/
33*/
34#pragma once
35
36
37#if defined (__cplusplus)
38extern "C" {
39#endif
40
41
42int LZ4_compressHC (const char* source, char* dest, int isize);
43
44/*
45LZ4_compressHC :
46 return : the number of bytes in compressed buffer dest
47 note : destination buffer must be already allocated.
48 To avoid any problem, size it to handle worst cases situations (input data not compressible)
49 Worst case size evaluation is provided by function LZ4_compressBound() (see "lz4.h")
50*/
51
52
53/* Note :
54Decompression functions are provided within regular LZ4 source code (see "lz4.h") (BSD license)
55*/
56
57
58#if defined (__cplusplus)
59}
60#endif