Commit c72ac7a1 authored by Chanho Min's avatar Chanho Min Committed by Linus Torvalds

lib: add lz4 compressor module

This patchset is for supporting LZ4 compression and the crypto API using
it.

As shown below, the size of data is a little bit bigger but compressing
speed is faster under the enabled unaligned memory access.  We can use
lz4 de/compression through crypto API as well.  Also, It will be useful
for another potential user of lz4 compression.

lz4 Compression Benchmark:
Compiler: ARM gcc 4.6.4
ARMv7, 1 GHz based board
   Kernel: linux 3.4
   Uncompressed data Size: 101 MB
         Compressed Size  compression Speed
   LZO   72.1MB		  32.1MB/s, 33.0MB/s(UA)
   LZ4   75.1MB		  30.4MB/s, 35.9MB/s(UA)
   LZ4HC 59.8MB		   2.4MB/s,  2.5MB/s(UA)
- UA: Unaligned memory Access support
- Latest patch set for LZO applied

This patch:

Add support for LZ4 compression in the Linux Kernel.  LZ4 Compression APIs
for kernel are based on LZ4 implementation by Yann Collet and were changed
for kernel coding style.

LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
LZ4 source repository : http://code.google.com/p/lz4/
svn revision : r90

Two APIs are added:

lz4_compress() support basic lz4 compression whereas lz4hc_compress()
support high compression or CPU performance get lower but compression
ratio get higher.  Also, we require the pre-allocated working memory with
the defined size and destination buffer must be allocated with the size of
lz4_compressbound.

[akpm@linux-foundation.org: make lz4_compresshcctx() static]
Signed-off-by: default avatarChanho Min <chanho.min@lge.com>
Cc: "Darrick J. Wong" <djwong@us.ibm.com>
Cc: Bob Pearson <rpearson@systemfabricworks.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Herbert Xu <herbert@gondor.hengli.com.au>
Cc: Yann Collet <yann.collet.73@gmail.com>
Cc: Kyungsik Lee <kyungsik.lee@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f9b493ac
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#define LZ4_MEM_COMPRESS (4096 * sizeof(unsigned char *))
#define LZ4HC_MEM_COMPRESS (65538 * sizeof(unsigned char *))
/* /*
* lz4_compressbound() * lz4_compressbound()
...@@ -20,6 +22,40 @@ static inline size_t lz4_compressbound(size_t isize) ...@@ -20,6 +22,40 @@ static inline size_t lz4_compressbound(size_t isize)
return isize + (isize / 255) + 16; return isize + (isize / 255) + 16;
} }
/*
* lz4_compress()
* src : source address of the original data
* src_len : size of the original data
* dst : output buffer address of the compressed data
* This requires 'dst' of size LZ4_COMPRESSBOUND.
* dst_len : is the output size, which is returned after compress done
* workmem : address of the working memory.
* This requires 'workmem' of size LZ4_MEM_COMPRESS.
* return : Success if return 0
* Error if return (< 0)
* note : Destination buffer and workmem must be already allocated with
* the defined size.
*/
int lz4_compress(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len, void *wrkmem);
/*
* lz4hc_compress()
* src : source address of the original data
* src_len : size of the original data
* dst : output buffer address of the compressed data
* This requires 'dst' of size LZ4_COMPRESSBOUND.
* dst_len : is the output size, which is returned after compress done
* workmem : address of the working memory.
* This requires 'workmem' of size LZ4HC_MEM_COMPRESS.
* return : Success if return 0
* Error if return (< 0)
* note : Destination buffer and workmem must be already allocated with
* the defined size.
*/
int lz4hc_compress(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len, void *wrkmem);
/* /*
* lz4_decompress() * lz4_decompress()
* src : source address of the compressed data * src : source address of the compressed data
......
...@@ -194,6 +194,12 @@ config LZO_COMPRESS ...@@ -194,6 +194,12 @@ config LZO_COMPRESS
config LZO_DECOMPRESS config LZO_DECOMPRESS
tristate tristate
config LZ4_COMPRESS
tristate
config LZ4HC_COMPRESS
tristate
config LZ4_DECOMPRESS config LZ4_DECOMPRESS
tristate tristate
......
...@@ -75,6 +75,8 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ ...@@ -75,6 +75,8 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_BCH) += bch.o obj-$(CONFIG_BCH) += bch.o
obj-$(CONFIG_LZO_COMPRESS) += lzo/ obj-$(CONFIG_LZO_COMPRESS) += lzo/
obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_LZ4_COMPRESS) += lz4/
obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/
obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/ obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
obj-$(CONFIG_XZ_DEC) += xz/ obj-$(CONFIG_XZ_DEC) += xz/
obj-$(CONFIG_RAID6_PQ) += raid6/ obj-$(CONFIG_RAID6_PQ) += raid6/
......
obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o
obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o
obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o
This diff is collapsed.
...@@ -22,23 +22,40 @@ ...@@ -22,23 +22,40 @@
* Architecture-specific macros * Architecture-specific macros
*/ */
#define BYTE u8 #define BYTE u8
typedef struct _U16_S { u16 v; } U16_S;
typedef struct _U32_S { u32 v; } U32_S;
typedef struct _U64_S { u64 v; } U64_S;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
|| defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \ || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \
&& defined(ARM_EFFICIENT_UNALIGNED_ACCESS) && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
typedef struct _U32_S { u32 v; } U32_S;
typedef struct _U64_S { u64 v; } U64_S;
#define A16(x) (((U16_S *)(x))->v)
#define A32(x) (((U32_S *)(x))->v) #define A32(x) (((U32_S *)(x))->v)
#define A64(x) (((U64_S *)(x))->v) #define A64(x) (((U64_S *)(x))->v)
#define PUT4(s, d) (A32(d) = A32(s)) #define PUT4(s, d) (A32(d) = A32(s))
#define PUT8(s, d) (A64(d) = A64(s)) #define PUT8(s, d) (A64(d) = A64(s))
#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
do { \
A16(p) = v; \
p += 2; \
} while (0)
#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ #else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
#define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
#define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
#define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
#define PUT4(s, d) \ #define PUT4(s, d) \
put_unaligned(get_unaligned((const u32 *) s), (u32 *) d) put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
#define PUT8(s, d) \ #define PUT8(s, d) \
put_unaligned(get_unaligned((const u64 *) s), (u64 *) d) put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
do { \
put_unaligned(v, (u16 *)(p)); \
p += 2; \
} while (0)
#endif #endif
#define COPYLENGTH 8 #define COPYLENGTH 8
...@@ -46,6 +63,29 @@ typedef struct _U64_S { u64 v; } U64_S; ...@@ -46,6 +63,29 @@ typedef struct _U64_S { u64 v; } U64_S;
#define ML_MASK ((1U << ML_BITS) - 1) #define ML_MASK ((1U << ML_BITS) - 1)
#define RUN_BITS (8 - ML_BITS) #define RUN_BITS (8 - ML_BITS)
#define RUN_MASK ((1U << RUN_BITS) - 1) #define RUN_MASK ((1U << RUN_BITS) - 1)
#define MEMORY_USAGE 14
#define MINMATCH 4
#define SKIPSTRENGTH 6
#define LASTLITERALS 5
#define MFLIMIT (COPYLENGTH + MINMATCH)
#define MINLENGTH (MFLIMIT + 1)
#define MAXD_LOG 16
#define MAXD (1 << MAXD_LOG)
#define MAXD_MASK (u32)(MAXD - 1)
#define MAX_DISTANCE (MAXD - 1)
#define HASH_LOG (MAXD_LOG - 1)
#define HASHTABLESIZE (1 << HASH_LOG)
#define MAX_NB_ATTEMPTS 256
#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1))
#define HASHLOG64K ((MEMORY_USAGE - 2) + 1)
#define HASH64KTABLESIZE (1U << HASHLOG64K)
#define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
((MINMATCH * 8) - (MEMORY_USAGE-2)))
#define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \
((MINMATCH * 8) - HASHLOG64K))
#define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
((MINMATCH * 8) - HASH_LOG))
#if LZ4_ARCH64/* 64-bit */ #if LZ4_ARCH64/* 64-bit */
#define STEPSIZE 8 #define STEPSIZE 8
...@@ -65,6 +105,13 @@ typedef struct _U64_S { u64 v; } U64_S; ...@@ -65,6 +105,13 @@ typedef struct _U64_S { u64 v; } U64_S;
LZ4_WILDCOPY(s, d, e); \ LZ4_WILDCOPY(s, d, e); \
} \ } \
} while (0) } while (0)
#define HTYPE u32
#ifdef __BIG_ENDIAN
#define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3)
#else
#define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3)
#endif
#else /* 32-bit */ #else /* 32-bit */
#define STEPSIZE 4 #define STEPSIZE 4
...@@ -83,6 +130,14 @@ typedef struct _U64_S { u64 v; } U64_S; ...@@ -83,6 +130,14 @@ typedef struct _U64_S { u64 v; } U64_S;
} while (0) } while (0)
#define LZ4_SECURECOPY LZ4_WILDCOPY #define LZ4_SECURECOPY LZ4_WILDCOPY
#define HTYPE const u8*
#ifdef __BIG_ENDIAN
#define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3)
#else
#define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
#endif
#endif #endif
#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \ #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
...@@ -92,3 +147,10 @@ typedef struct _U64_S { u64 v; } U64_S; ...@@ -92,3 +147,10 @@ typedef struct _U64_S { u64 v; } U64_S;
do { \ do { \
LZ4_COPYPACKET(s, d); \ LZ4_COPYPACKET(s, d); \
} while (d < e) } while (d < e)
#define LZ4_BLINDCOPY(s, d, l) \
do { \
u8 *e = (d) + l; \
LZ4_WILDCOPY(s, d, e); \
d = e; \
} while (0)
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment