Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v4.6
  1/*
  2 * LZ4 - Fast LZ compression algorithm
  3 * Copyright (C) 2011-2012, Yann Collet.
  4 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
  5
  6 * Redistribution and use in source and binary forms, with or without
  7 * modification, are permitted provided that the following conditions are
  8 * met:
  9 *
 10 *     * Redistributions of source code must retain the above copyright
 11 * notice, this list of conditions and the following disclaimer.
 12 *     * Redistributions in binary form must reproduce the above
 13 * copyright notice, this list of conditions and the following disclaimer
 14 * in the documentation and/or other materials provided with the
 15 * distribution.
 16 *
 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 28 *
 29 * You can contact the author at :
 30 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
 31 * - LZ4 source repository : http://code.google.com/p/lz4/
 32 *
 33 *  Changed for kernel use by:
 34 *  Chanho Min <chanho.min@lge.com>
 35 */
 36
 37#include <linux/module.h>
 38#include <linux/kernel.h>
 
 39#include <linux/lz4.h>
 40#include <asm/unaligned.h>
 41#include "lz4defs.h"
 
 
 
 42
 43/*
 44 * LZ4_compressCtx :
 45 * -----------------
 46 * Compress 'isize' bytes from 'source' into an output buffer 'dest' of
 47 * maximum size 'maxOutputSize'.  * If it cannot achieve it, compression
 48 * will stop, and result of the function will be zero.
 49 * return : the number of bytes written in buffer 'dest', or 0 if the
 50 * compression fails
 51 */
 52static inline int lz4_compressctx(void *ctx,
 53		const char *source,
 54		char *dest,
 55		int isize,
 56		int maxoutputsize)
 57{
 58	HTYPE *hashtable = (HTYPE *)ctx;
 59	const u8 *ip = (u8 *)source;
 60#if LZ4_ARCH64
 61	const BYTE * const base = ip;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62#else
 63	const int base = 0;
 
 
 64#endif
 65	const u8 *anchor = ip;
 66	const u8 *const iend = ip + isize;
 67	const u8 *const mflimit = iend - MFLIMIT;
 68	#define MATCHLIMIT (iend - LASTLITERALS)
 69
 70	u8 *op = (u8 *) dest;
 71	u8 *const oend = op + maxoutputsize;
 72	int length;
 73	const int skipstrength = SKIPSTRENGTH;
 74	u32 forwardh;
 75	int lastrun;
 76
 77	/* Init */
 78	if (isize < MINLENGTH)
 79		goto _last_literals;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80
 81	memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82
 83	/* First Byte */
 84	hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
 85	ip++;
 86	forwardh = LZ4_HASH_VALUE(ip);
 87
 88	/* Main Loop */
 89	for (;;) {
 90		int findmatchattempts = (1U << skipstrength) + 3;
 91		const u8 *forwardip = ip;
 92		const u8 *ref;
 93		u8 *token;
 94
 95		/* Find a match */
 96		do {
 97			u32 h = forwardh;
 98			int step = findmatchattempts++ >> skipstrength;
 99			ip = forwardip;
100			forwardip = ip + step;
101
102			if (unlikely(forwardip > mflimit))
103				goto _last_literals;
104
105			forwardh = LZ4_HASH_VALUE(forwardip);
106			ref = base + hashtable[h];
107			hashtable[h] = ip - base;
108		} while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
110		/* Catch up */
111		while ((ip > anchor) && (ref > (u8 *)source) &&
112			unlikely(ip[-1] == ref[-1])) {
113			ip--;
114			ref--;
115		}
116
117		/* Encode Literal length */
118		length = (int)(ip - anchor);
119		token = op++;
120		/* check output limit */
121		if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
122			(length >> 8) > oend))
123			return 0;
124
125		if (length >= (int)RUN_MASK) {
126			int len;
127			*token = (RUN_MASK << ML_BITS);
128			len = length - RUN_MASK;
129			for (; len > 254 ; len -= 255)
130				*op++ = 255;
131			*op++ = (u8)len;
132		} else
133			*token = (length << ML_BITS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
135		/* Copy Literals */
136		LZ4_BLINDCOPY(anchor, op, length);
137_next_match:
138		/* Encode Offset */
139		LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
 
140
141		/* Start Counting */
142		ip += MINMATCH;
143		/* MinMatch verified */
144		ref += MINMATCH;
145		anchor = ip;
146		while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) {
147			#if LZ4_ARCH64
148			u64 diff = A64(ref) ^ A64(ip);
149			#else
150			u32 diff = A32(ref) ^ A32(ip);
151			#endif
152			if (!diff) {
153				ip += STEPSIZE;
154				ref += STEPSIZE;
155				continue;
156			}
157			ip += LZ4_NBCOMMONBYTES(diff);
158			goto _endcount;
159		}
160		#if LZ4_ARCH64
161		if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
162			ip += 4;
163			ref += 4;
164		}
165		#endif
166		if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
167			ip += 2;
168			ref += 2;
169		}
170		if ((ip < MATCHLIMIT) && (*ref == *ip))
171			ip++;
172_endcount:
173		/* Encode MatchLength */
174		length = (int)(ip - anchor);
175		/* Check output limit */
176		if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend))
177			return 0;
178		if (length >= (int)ML_MASK) {
179			*token += ML_MASK;
180			length -= ML_MASK;
181			for (; length > 509 ; length -= 510) {
182				*op++ = 255;
183				*op++ = 255;
184			}
185			if (length > 254) {
186				length -= 255;
187				*op++ = 255;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188			}
189			*op++ = (u8)length;
190		} else
191			*token += length;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
193		/* Test end of chunk */
194		if (ip > mflimit) {
195			anchor = ip;
196			break;
197		}
198
199		/* Fill table */
200		hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;
201
202		/* Test next position */
203		ref = base + hashtable[LZ4_HASH_VALUE(ip)];
204		hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
205		if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206			token = op++;
207			*token = 0;
208			goto _next_match;
209		}
210
211		/* Prepare next loop */
212		anchor = ip++;
213		forwardh = LZ4_HASH_VALUE(ip);
214	}
215
216_last_literals:
217	/* Encode Last Literals */
218	lastrun = (int)(iend - anchor);
219	if (((char *)op - dest) + lastrun + 1
220		+ ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize)
221		return 0;
222
223	if (lastrun >= (int)RUN_MASK) {
224		*op++ = (RUN_MASK << ML_BITS);
225		lastrun -= RUN_MASK;
226		for (; lastrun > 254 ; lastrun -= 255)
227			*op++ = 255;
228		*op++ = (u8)lastrun;
229	} else
230		*op++ = (lastrun << ML_BITS);
231	memcpy(op, anchor, iend - anchor);
232	op += iend - anchor;
 
 
 
 
 
 
 
 
 
 
233
234	/* End */
235	return (int)(((char *)op) - dest);
236}
237
238static inline int lz4_compress64kctx(void *ctx,
239		const char *source,
240		char *dest,
241		int isize,
242		int maxoutputsize)
243{
244	u16 *hashtable = (u16 *)ctx;
245	const u8 *ip = (u8 *) source;
246	const u8 *anchor = ip;
247	const u8 *const base = ip;
248	const u8 *const iend = ip + isize;
249	const u8 *const mflimit = iend - MFLIMIT;
250	#define MATCHLIMIT (iend - LASTLITERALS)
251
252	u8 *op = (u8 *) dest;
253	u8 *const oend = op + maxoutputsize;
254	int len, length;
255	const int skipstrength = SKIPSTRENGTH;
256	u32 forwardh;
257	int lastrun;
258
259	/* Init */
260	if (isize < MINLENGTH)
261		goto _last_literals;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
263	memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
265	/* First Byte */
266	ip++;
267	forwardh = LZ4_HASH64K_VALUE(ip);
 
268
269	/* Main Loop */
270	for (;;) {
271		int findmatchattempts = (1U << skipstrength) + 3;
272		const u8 *forwardip = ip;
273		const u8 *ref;
274		u8 *token;
275
276		/* Find a match */
277		do {
278			u32 h = forwardh;
279			int step = findmatchattempts++ >> skipstrength;
280			ip = forwardip;
281			forwardip = ip + step;
282
283			if (forwardip > mflimit)
284				goto _last_literals;
285
286			forwardh = LZ4_HASH64K_VALUE(forwardip);
287			ref = base + hashtable[h];
288			hashtable[h] = (u16)(ip - base);
289		} while (A32(ref) != A32(ip));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
291		/* Catch up */
292		while ((ip > anchor) && (ref > (u8 *)source)
293			&& (ip[-1] == ref[-1])) {
 
294			ip--;
295			ref--;
296		}
297
298		/* Encode Literal length */
299		length = (int)(ip - anchor);
300		token = op++;
301		/* Check output limit */
302		if (unlikely(op + length + (2 + 1 + LASTLITERALS)
303			+ (length >> 8) > oend))
304			return 0;
305		if (length >= (int)RUN_MASK) {
306			*token = (RUN_MASK << ML_BITS);
307			len = length - RUN_MASK;
308			for (; len > 254 ; len -= 255)
309				*op++ = 255;
310			*op++ = (u8)len;
311		} else
312			*token = (length << ML_BITS);
313
314		/* Copy Literals */
315		LZ4_BLINDCOPY(anchor, op, length);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
317_next_match:
318		/* Encode Offset */
319		LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
320
321		/* Start Counting */
322		ip += MINMATCH;
323		/* MinMatch verified */
324		ref += MINMATCH;
325		anchor = ip;
326
327		while (ip < MATCHLIMIT - (STEPSIZE - 1)) {
328			#if LZ4_ARCH64
329			u64 diff = A64(ref) ^ A64(ip);
330			#else
331			u32 diff = A32(ref) ^ A32(ip);
332			#endif
333
334			if (!diff) {
335				ip += STEPSIZE;
336				ref += STEPSIZE;
337				continue;
338			}
339			ip += LZ4_NBCOMMONBYTES(diff);
340			goto _endcount;
 
 
 
 
 
 
 
 
 
 
341		}
342		#if LZ4_ARCH64
343		if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
344			ip += 4;
345			ref += 4;
346		}
347		#endif
348		if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
349			ip += 2;
350			ref += 2;
351		}
352		if ((ip < MATCHLIMIT) && (*ref == *ip))
353			ip++;
354_endcount:
355
356		/* Encode MatchLength */
357		len = (int)(ip - anchor);
358		/* Check output limit */
359		if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
360			return 0;
361		if (len >= (int)ML_MASK) {
362			*token += ML_MASK;
363			len -= ML_MASK;
364			for (; len > 509 ; len -= 510) {
365				*op++ = 255;
366				*op++ = 255;
367			}
368			if (len > 254) {
369				len -= 255;
370				*op++ = 255;
371			}
372			*op++ = (u8)len;
373		} else
374			*token += len;
375
376		/* Test end of chunk */
377		if (ip > mflimit) {
378			anchor = ip;
 
379			break;
380		}
381
382		/* Fill table */
383		hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base);
384
385		/* Test next position */
386		ref = base + hashtable[LZ4_HASH64K_VALUE(ip)];
387		hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base);
388		if (A32(ref) == A32(ip)) {
389			token = op++;
390			*token = 0;
 
391			goto _next_match;
392		}
393
394		/* Prepare next loop */
395		anchor = ip++;
396		forwardh = LZ4_HASH64K_VALUE(ip);
397	}
398
399_last_literals:
400	/* Encode Last Literals */
401	lastrun = (int)(iend - anchor);
402	if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend)
403		return 0;
404	if (lastrun >= (int)RUN_MASK) {
405		*op++ = (RUN_MASK << ML_BITS);
406		lastrun -= RUN_MASK;
407		for (; lastrun > 254 ; lastrun -= 255)
408			*op++ = 255;
409		*op++ = (u8)lastrun;
410	} else
411		*op++ = (lastrun << ML_BITS);
412	memcpy(op, anchor, iend - anchor);
413	op += iend - anchor;
 
 
 
 
 
 
 
 
 
 
 
 
 
414	/* End */
415	return (int)(((char *)op) - dest);
 
416}
417
418int lz4_compress(const unsigned char *src, size_t src_len,
419			unsigned char *dst, size_t *dst_len, void *wrkmem)
 
 
 
 
420{
421	int ret = -1;
422	int out_len = 0;
 
 
 
423
424	if (src_len < LZ4_64KLIMIT)
425		out_len = lz4_compress64kctx(wrkmem, src, dst, src_len,
426				lz4_compressbound(src_len));
427	else
428		out_len = lz4_compressctx(wrkmem, src, dst, src_len,
429				lz4_compressbound(src_len));
430
431	if (out_len < 0)
432		goto exit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
434	*dst_len = out_len;
435
436	return 0;
437exit:
438	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439}
440EXPORT_SYMBOL(lz4_compress);
441
442MODULE_LICENSE("Dual BSD/GPL");
443MODULE_DESCRIPTION("LZ4 compressor");
v6.13.7
  1/*
  2 * LZ4 - Fast LZ compression algorithm
  3 * Copyright (C) 2011 - 2016, Yann Collet.
  4 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
 
  5 * Redistribution and use in source and binary forms, with or without
  6 * modification, are permitted provided that the following conditions are
  7 * met:
  8 *	* Redistributions of source code must retain the above copyright
  9 *	  notice, this list of conditions and the following disclaimer.
 10 *	* Redistributions in binary form must reproduce the above
 
 11 * copyright notice, this list of conditions and the following disclaimer
 12 * in the documentation and/or other materials provided with the
 13 * distribution.
 
 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 18 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 25 * You can contact the author at :
 26 *	- LZ4 homepage : http://www.lz4.org
 27 *	- LZ4 source repository : https://github.com/lz4/lz4
 28 *
 29 *	Changed for kernel usage by:
 30 *	Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
 31 */
 32
 33/*-************************************
 34 *	Dependencies
 35 **************************************/
 36#include <linux/lz4.h>
 
 37#include "lz4defs.h"
 38#include <linux/module.h>
 39#include <linux/kernel.h>
 40#include <linux/unaligned.h>
 41
 42static const int LZ4_minLength = (MFLIMIT + 1);
 43static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
 44
 45/*-******************************
 46 *	Compression functions
 47 ********************************/
 48static FORCE_INLINE U32 LZ4_hash4(
 49	U32 sequence,
 50	tableType_t const tableType)
 
 
 
 
 
 51{
 52	if (tableType == byU16)
 53		return ((sequence * 2654435761U)
 54			>> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
 55	else
 56		return ((sequence * 2654435761U)
 57			>> ((MINMATCH * 8) - LZ4_HASHLOG));
 58}
 59
 60static FORCE_INLINE U32 LZ4_hash5(
 61	U64 sequence,
 62	tableType_t const tableType)
 63{
 64	const U32 hashLog = (tableType == byU16)
 65		? LZ4_HASHLOG + 1
 66		: LZ4_HASHLOG;
 67
 68#if LZ4_LITTLE_ENDIAN
 69	static const U64 prime5bytes = 889523592379ULL;
 70
 71	return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
 72#else
 73	static const U64 prime8bytes = 11400714785074694791ULL;
 74
 75	return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
 76#endif
 77}
 
 
 
 
 
 
 
 
 
 
 78
 79static FORCE_INLINE U32 LZ4_hashPosition(
 80	const void *p,
 81	tableType_t const tableType)
 82{
 83#if LZ4_ARCH64
 84	if (tableType == byU32)
 85		return LZ4_hash5(LZ4_read_ARCH(p), tableType);
 86#endif
 87
 88	return LZ4_hash4(LZ4_read32(p), tableType);
 89}
 90
 91static void LZ4_putPositionOnHash(
 92	const BYTE *p,
 93	U32 h,
 94	void *tableBase,
 95	tableType_t const tableType,
 96	const BYTE *srcBase)
 97{
 98	switch (tableType) {
 99	case byPtr:
100	{
101		const BYTE **hashTable = (const BYTE **)tableBase;
102
103		hashTable[h] = p;
104		return;
105	}
106	case byU32:
107	{
108		U32 *hashTable = (U32 *) tableBase;
109
110		hashTable[h] = (U32)(p - srcBase);
111		return;
112	}
113	case byU16:
114	{
115		U16 *hashTable = (U16 *) tableBase;
116
117		hashTable[h] = (U16)(p - srcBase);
118		return;
119	}
120	}
121}
122
123static FORCE_INLINE void LZ4_putPosition(
124	const BYTE *p,
125	void *tableBase,
126	tableType_t tableType,
127	const BYTE *srcBase)
128{
129	U32 const h = LZ4_hashPosition(p, tableType);
130
131	LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
132}
133
134static const BYTE *LZ4_getPositionOnHash(
135	U32 h,
136	void *tableBase,
137	tableType_t tableType,
138	const BYTE *srcBase)
139{
140	if (tableType == byPtr) {
141		const BYTE **hashTable = (const BYTE **) tableBase;
142
143		return hashTable[h];
144	}
145
146	if (tableType == byU32) {
147		const U32 * const hashTable = (U32 *) tableBase;
148
149		return hashTable[h] + srcBase;
150	}
151
152	{
153		/* default, to ensure a return */
154		const U16 * const hashTable = (U16 *) tableBase;
155
156		return hashTable[h] + srcBase;
157	}
158}
159
160static FORCE_INLINE const BYTE *LZ4_getPosition(
161	const BYTE *p,
162	void *tableBase,
163	tableType_t tableType,
164	const BYTE *srcBase)
165{
166	U32 const h = LZ4_hashPosition(p, tableType);
167
168	return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
169}
170
171
172/*
173 * LZ4_compress_generic() :
174 * inlined, to ensure branches are decided at compilation time
175 */
176static FORCE_INLINE int LZ4_compress_generic(
177	LZ4_stream_t_internal * const dictPtr,
178	const char * const source,
179	char * const dest,
180	const int inputSize,
181	const int maxOutputSize,
182	const limitedOutput_directive outputLimited,
183	const tableType_t tableType,
184	const dict_directive dict,
185	const dictIssue_directive dictIssue,
186	const U32 acceleration)
187{
188	const BYTE *ip = (const BYTE *) source;
189	const BYTE *base;
190	const BYTE *lowLimit;
191	const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
192	const BYTE * const dictionary = dictPtr->dictionary;
193	const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
194	const size_t dictDelta = dictEnd - (const BYTE *)source;
195	const BYTE *anchor = (const BYTE *) source;
196	const BYTE * const iend = ip + inputSize;
197	const BYTE * const mflimit = iend - MFLIMIT;
198	const BYTE * const matchlimit = iend - LASTLITERALS;
199
200	BYTE *op = (BYTE *) dest;
201	BYTE * const olimit = op + maxOutputSize;
202
203	U32 forwardH;
204	size_t refDelta = 0;
205
206	/* Init conditions */
207	if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
208		/* Unsupported inputSize, too large (or negative) */
209		return 0;
210	}
211
212	switch (dict) {
213	case noDict:
214	default:
215		base = (const BYTE *)source;
216		lowLimit = (const BYTE *)source;
217		break;
218	case withPrefix64k:
219		base = (const BYTE *)source - dictPtr->currentOffset;
220		lowLimit = (const BYTE *)source - dictPtr->dictSize;
221		break;
222	case usingExtDict:
223		base = (const BYTE *)source - dictPtr->currentOffset;
224		lowLimit = (const BYTE *)source;
225		break;
226	}
227
228	if ((tableType == byU16)
229		&& (inputSize >= LZ4_64Klimit)) {
230		/* Size too large (not within 64K limit) */
231		return 0;
232	}
233
234	if (inputSize < LZ4_minLength) {
235		/* Input too small, no compression (all literals) */
236		goto _last_literals;
237	}
238
239	/* First Byte */
240	LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
241	ip++;
242	forwardH = LZ4_hashPosition(ip, tableType);
243
244	/* Main Loop */
245	for ( ; ; ) {
246		const BYTE *match;
247		BYTE *token;
 
 
248
249		/* Find a match */
250		{
251			const BYTE *forwardIp = ip;
252			unsigned int step = 1;
253			unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
254
255			do {
256				U32 const h = forwardH;
257
258				ip = forwardIp;
259				forwardIp += step;
260				step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
261
262				if (unlikely(forwardIp > mflimit))
263					goto _last_literals;
264
265				match = LZ4_getPositionOnHash(h,
266					dictPtr->hashTable,
267					tableType, base);
268
269				if (dict == usingExtDict) {
270					if (match < (const BYTE *)source) {
271						refDelta = dictDelta;
272						lowLimit = dictionary;
273					} else {
274						refDelta = 0;
275						lowLimit = (const BYTE *)source;
276				}	 }
277
278				forwardH = LZ4_hashPosition(forwardIp,
279					tableType);
280
281				LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
282					tableType, base);
283			} while (((dictIssue == dictSmall)
284					? (match < lowRefLimit)
285					: 0)
286				|| ((tableType == byU16)
287					? 0
288					: (match + MAX_DISTANCE < ip))
289				|| (LZ4_read32(match + refDelta)
290					!= LZ4_read32(ip)));
291		}
292
293		/* Catch up */
294		while (((ip > anchor) & (match + refDelta > lowLimit))
295				&& (unlikely(ip[-1] == match[refDelta - 1]))) {
296			ip--;
297			match--;
298		}
299
300		/* Encode Literals */
301		{
302			unsigned const int litLength = (unsigned int)(ip - anchor);
 
 
 
 
303
304			token = op++;
305
306			if ((outputLimited) &&
307				/* Check output buffer overflow */
308				(unlikely(op + litLength +
309					(2 + 1 + LASTLITERALS) +
310					(litLength / 255) > olimit)))
311				return 0;
312
313			if (litLength >= RUN_MASK) {
314				int len = (int)litLength - RUN_MASK;
315
316				*token = (RUN_MASK << ML_BITS);
317
318				for (; len >= 255; len -= 255)
319					*op++ = 255;
320				*op++ = (BYTE)len;
321			} else
322				*token = (BYTE)(litLength << ML_BITS);
323
324			/* Copy Literals */
325			LZ4_wildCopy(op, anchor, op + litLength);
326			op += litLength;
327		}
328
 
 
329_next_match:
330		/* Encode Offset */
331		LZ4_writeLE16(op, (U16)(ip - match));
332		op += 2;
333
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334		/* Encode MatchLength */
335		{
336			unsigned int matchCode;
337
338			if ((dict == usingExtDict)
339				&& (lowLimit == dictionary)) {
340				const BYTE *limit;
341
342				match += refDelta;
343				limit = ip + (dictEnd - match);
344
345				if (limit > matchlimit)
346					limit = matchlimit;
347
348				matchCode = LZ4_count(ip + MINMATCH,
349					match + MINMATCH, limit);
350
351				ip += MINMATCH + matchCode;
352
353				if (ip == limit) {
354					unsigned const int more = LZ4_count(ip,
355						(const BYTE *)source,
356						matchlimit);
357
358					matchCode += more;
359					ip += more;
360				}
361			} else {
362				matchCode = LZ4_count(ip + MINMATCH,
363					match + MINMATCH, matchlimit);
364				ip += MINMATCH + matchCode;
365			}
366
367			if (outputLimited &&
368				/* Check output buffer overflow */
369				(unlikely(op +
370					(1 + LASTLITERALS) +
371					(matchCode >> 8) > olimit)))
372				return 0;
373
374			if (matchCode >= ML_MASK) {
375				*token += ML_MASK;
376				matchCode -= ML_MASK;
377				LZ4_write32(op, 0xFFFFFFFF);
378
379				while (matchCode >= 4 * 255) {
380					op += 4;
381					LZ4_write32(op, 0xFFFFFFFF);
382					matchCode -= 4 * 255;
383				}
384
385				op += matchCode / 255;
386				*op++ = (BYTE)(matchCode % 255);
387			} else
388				*token += (BYTE)(matchCode);
389		}
390
391		anchor = ip;
392
393		/* Test end of chunk */
394		if (ip > mflimit)
 
395			break;
 
396
397		/* Fill table */
398		LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
399
400		/* Test next position */
401		match = LZ4_getPosition(ip, dictPtr->hashTable,
402			tableType, base);
403
404		if (dict == usingExtDict) {
405			if (match < (const BYTE *)source) {
406				refDelta = dictDelta;
407				lowLimit = dictionary;
408			} else {
409				refDelta = 0;
410				lowLimit = (const BYTE *)source;
411			}
412		}
413
414		LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
415
416		if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
417			&& (match + MAX_DISTANCE >= ip)
418			&& (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
419			token = op++;
420			*token = 0;
421			goto _next_match;
422		}
423
424		/* Prepare next loop */
425		forwardH = LZ4_hashPosition(++ip, tableType);
 
426	}
427
428_last_literals:
429	/* Encode Last Literals */
430	{
431		size_t const lastRun = (size_t)(iend - anchor);
 
 
432
433		if ((outputLimited) &&
434			/* Check output buffer overflow */
435			((op - (BYTE *)dest) + lastRun + 1 +
436			((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
437			return 0;
438
439		if (lastRun >= RUN_MASK) {
440			size_t accumulator = lastRun - RUN_MASK;
441			*op++ = RUN_MASK << ML_BITS;
442			for (; accumulator >= 255; accumulator -= 255)
443				*op++ = 255;
444			*op++ = (BYTE) accumulator;
445		} else {
446			*op++ = (BYTE)(lastRun << ML_BITS);
447		}
448
449		LZ4_memcpy(op, anchor, lastRun);
450
451		op += lastRun;
452	}
453
454	/* End */
455	return (int) (((char *)op) - dest);
456}
457
458static int LZ4_compress_fast_extState(
459	void *state,
460	const char *source,
461	char *dest,
462	int inputSize,
463	int maxOutputSize,
464	int acceleration)
465{
466	LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
467#if LZ4_ARCH64
468	const tableType_t tableType = byU32;
469#else
470	const tableType_t tableType = byPtr;
471#endif
 
 
 
 
 
 
472
473	LZ4_resetStream((LZ4_stream_t *)state);
474
475	if (acceleration < 1)
476		acceleration = LZ4_ACCELERATION_DEFAULT;
477
478	if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
479		if (inputSize < LZ4_64Klimit)
480			return LZ4_compress_generic(ctx, source,
481				dest, inputSize, 0,
482				noLimit, byU16, noDict,
483				noDictIssue, acceleration);
484		else
485			return LZ4_compress_generic(ctx, source,
486				dest, inputSize, 0,
487				noLimit, tableType, noDict,
488				noDictIssue, acceleration);
489	} else {
490		if (inputSize < LZ4_64Klimit)
491			return LZ4_compress_generic(ctx, source,
492				dest, inputSize,
493				maxOutputSize, limitedOutput, byU16, noDict,
494				noDictIssue, acceleration);
495		else
496			return LZ4_compress_generic(ctx, source,
497				dest, inputSize,
498				maxOutputSize, limitedOutput, tableType, noDict,
499				noDictIssue, acceleration);
500	}
501}
502
503int LZ4_compress_fast(const char *source, char *dest, int inputSize,
504	int maxOutputSize, int acceleration, void *wrkmem)
505{
506	return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
507		maxOutputSize, acceleration);
508}
509EXPORT_SYMBOL(LZ4_compress_fast);
510
511int LZ4_compress_default(const char *source, char *dest, int inputSize,
512	int maxOutputSize, void *wrkmem)
513{
514	return LZ4_compress_fast(source, dest, inputSize,
515		maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
516}
517EXPORT_SYMBOL(LZ4_compress_default);
518
519/*-******************************
520 *	*_destSize() variant
521 ********************************/
522static int LZ4_compress_destSize_generic(
523	LZ4_stream_t_internal * const ctx,
524	const char * const src,
525	char * const dst,
526	int * const srcSizePtr,
527	const int targetDstSize,
528	const tableType_t tableType)
529{
530	const BYTE *ip = (const BYTE *) src;
531	const BYTE *base = (const BYTE *) src;
532	const BYTE *lowLimit = (const BYTE *) src;
533	const BYTE *anchor = ip;
534	const BYTE * const iend = ip + *srcSizePtr;
535	const BYTE * const mflimit = iend - MFLIMIT;
536	const BYTE * const matchlimit = iend - LASTLITERALS;
537
538	BYTE *op = (BYTE *) dst;
539	BYTE * const oend = op + targetDstSize;
540	BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
541		- 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
542	BYTE * const oMaxMatch = op + targetDstSize
543		- (LASTLITERALS + 1 /* token */);
544	BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
545
546	U32 forwardH;
547
548	/* Init conditions */
549	/* Impossible to store anything */
550	if (targetDstSize < 1)
551		return 0;
552	/* Unsupported input size, too large (or negative) */
553	if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
554		return 0;
555	/* Size too large (not within 64K limit) */
556	if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
557		return 0;
558	/* Input too small, no compression (all literals) */
559	if (*srcSizePtr < LZ4_minLength)
560		goto _last_literals;
561
562	/* First Byte */
563	*srcSizePtr = 0;
564	LZ4_putPosition(ip, ctx->hashTable, tableType, base);
565	ip++; forwardH = LZ4_hashPosition(ip, tableType);
566
567	/* Main Loop */
568	for ( ; ; ) {
569		const BYTE *match;
570		BYTE *token;
 
 
571
572		/* Find a match */
573		{
574			const BYTE *forwardIp = ip;
575			unsigned int step = 1;
576			unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
577
578			do {
579				U32 h = forwardH;
580
581				ip = forwardIp;
582				forwardIp += step;
583				step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
584
585				if (unlikely(forwardIp > mflimit))
586					goto _last_literals;
587
588				match = LZ4_getPositionOnHash(h, ctx->hashTable,
589					tableType, base);
590				forwardH = LZ4_hashPosition(forwardIp,
591					tableType);
592				LZ4_putPositionOnHash(ip, h,
593					ctx->hashTable, tableType,
594					base);
595
596			} while (((tableType == byU16)
597				? 0
598				: (match + MAX_DISTANCE < ip))
599				|| (LZ4_read32(match) != LZ4_read32(ip)));
600		}
601
602		/* Catch up */
603		while ((ip > anchor)
604			&& (match > lowLimit)
605			&& (unlikely(ip[-1] == match[-1]))) {
606			ip--;
607			match--;
608		}
609
610		/* Encode Literal length */
611		{
612			unsigned int litLength = (unsigned int)(ip - anchor);
 
 
 
 
 
 
 
 
 
 
 
 
613
614			token = op++;
615			if (op + ((litLength + 240) / 255)
616				+ litLength > oMaxLit) {
617				/* Not enough space for a last match */
618				op--;
619				goto _last_literals;
620			}
621			if (litLength >= RUN_MASK) {
622				unsigned int len = litLength - RUN_MASK;
623				*token = (RUN_MASK<<ML_BITS);
624				for (; len >= 255; len -= 255)
625					*op++ = 255;
626				*op++ = (BYTE)len;
627			} else
628				*token = (BYTE)(litLength << ML_BITS);
629
630			/* Copy Literals */
631			LZ4_wildCopy(op, anchor, op + litLength);
632			op += litLength;
633		}
634
635_next_match:
636		/* Encode Offset */
637		LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
638
639		/* Encode MatchLength */
640		{
641			size_t matchLength = LZ4_count(ip + MINMATCH,
642			match + MINMATCH, matchlimit);
643
644			if (op + ((matchLength + 240)/255) > oMaxMatch) {
645				/* Match description too long : reduce it */
646				matchLength = (15 - 1) + (oMaxMatch - op) * 255;
 
 
 
 
 
 
 
 
 
647			}
648			ip += MINMATCH + matchLength;
649
650			if (matchLength >= ML_MASK) {
651				*token += ML_MASK;
652				matchLength -= ML_MASK;
653				while (matchLength >= 255) {
654					matchLength -= 255;
655					*op++ = 255;
656				}
657				*op++ = (BYTE)matchLength;
658			} else
659				*token += (BYTE)(matchLength);
660		}
 
 
 
 
 
 
 
 
 
 
 
 
 
661
662		anchor = ip;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
663
664		/* Test end of block */
665		if (ip > mflimit)
666			break;
667		if (op > oMaxSeq)
668			break;
 
669
670		/* Fill table */
671		LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
672
673		/* Test next position */
674		match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
675		LZ4_putPosition(ip, ctx->hashTable, tableType, base);
676
677		if ((match + MAX_DISTANCE >= ip)
678			&& (LZ4_read32(match) == LZ4_read32(ip))) {
679			token = op++; *token = 0;
680			goto _next_match;
681		}
682
683		/* Prepare next loop */
684		forwardH = LZ4_hashPosition(++ip, tableType);
 
685	}
686
687_last_literals:
688	/* Encode Last Literals */
689	{
690		size_t lastRunSize = (size_t)(iend - anchor);
691
692		if (op + 1 /* token */
693			+ ((lastRunSize + 240) / 255) /* litLength */
694			+ lastRunSize /* literals */ > oend) {
695			/* adapt lastRunSize to fill 'dst' */
696			lastRunSize	= (oend - op) - 1;
697			lastRunSize -= (lastRunSize + 240) / 255;
698		}
699		ip = anchor + lastRunSize;
700
701		if (lastRunSize >= RUN_MASK) {
702			size_t accumulator = lastRunSize - RUN_MASK;
703
704			*op++ = RUN_MASK << ML_BITS;
705			for (; accumulator >= 255; accumulator -= 255)
706				*op++ = 255;
707			*op++ = (BYTE) accumulator;
708		} else {
709			*op++ = (BYTE)(lastRunSize<<ML_BITS);
710		}
711		LZ4_memcpy(op, anchor, lastRunSize);
712		op += lastRunSize;
713	}
714
715	/* End */
716	*srcSizePtr = (int) (((const char *)ip) - src);
717	return (int) (((char *)op) - dst);
718}
719
720static int LZ4_compress_destSize_extState(
721	LZ4_stream_t *state,
722	const char *src,
723	char *dst,
724	int *srcSizePtr,
725	int targetDstSize)
726{
727#if LZ4_ARCH64
728	const tableType_t tableType = byU32;
729#else
730	const tableType_t tableType = byPtr;
731#endif
732
733	LZ4_resetStream(state);
 
 
 
 
 
734
735	if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
736		/* compression success is guaranteed */
737		return LZ4_compress_fast_extState(
738			state, src, dst, *srcSizePtr,
739			targetDstSize, 1);
740	} else {
741		if (*srcSizePtr < LZ4_64Klimit)
742			return LZ4_compress_destSize_generic(
743				&state->internal_donotuse,
744				src, dst, srcSizePtr,
745				targetDstSize, byU16);
746		else
747			return LZ4_compress_destSize_generic(
748				&state->internal_donotuse,
749				src, dst, srcSizePtr,
750				targetDstSize, tableType);
751	}
752}
753
 
754
755int LZ4_compress_destSize(
756	const char *src,
757	char *dst,
758	int *srcSizePtr,
759	int targetDstSize,
760	void *wrkmem)
761{
762	return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
763		targetDstSize);
764}
765EXPORT_SYMBOL(LZ4_compress_destSize);
766
767/*-******************************
768 *	Streaming functions
769 ********************************/
770void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
771{
772	memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
773}
774
775int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
776	const char *dictionary, int dictSize)
777{
778	LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
779	const BYTE *p = (const BYTE *)dictionary;
780	const BYTE * const dictEnd = p + dictSize;
781	const BYTE *base;
782
783	if ((dict->initCheck)
784		|| (dict->currentOffset > 1 * GB)) {
785		/* Uninitialized structure, or reuse overflow */
786		LZ4_resetStream(LZ4_dict);
787	}
788
789	if (dictSize < (int)HASH_UNIT) {
790		dict->dictionary = NULL;
791		dict->dictSize = 0;
792		return 0;
793	}
794
795	if ((dictEnd - p) > 64 * KB)
796		p = dictEnd - 64 * KB;
797	dict->currentOffset += 64 * KB;
798	base = p - dict->currentOffset;
799	dict->dictionary = p;
800	dict->dictSize = (U32)(dictEnd - p);
801	dict->currentOffset += dict->dictSize;
802
803	while (p <= dictEnd - HASH_UNIT) {
804		LZ4_putPosition(p, dict->hashTable, byU32, base);
805		p += 3;
806	}
807
808	return dict->dictSize;
809}
810EXPORT_SYMBOL(LZ4_loadDict);
811
812static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
813	const BYTE *src)
814{
815	if ((LZ4_dict->currentOffset > 0x80000000) ||
816		((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
817		/* address space overflow */
818		/* rescale hash table */
819		U32 const delta = LZ4_dict->currentOffset - 64 * KB;
820		const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
821		int i;
822
823		for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
824			if (LZ4_dict->hashTable[i] < delta)
825				LZ4_dict->hashTable[i] = 0;
826			else
827				LZ4_dict->hashTable[i] -= delta;
828		}
829		LZ4_dict->currentOffset = 64 * KB;
830		if (LZ4_dict->dictSize > 64 * KB)
831			LZ4_dict->dictSize = 64 * KB;
832		LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
833	}
834}
835
836int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
837{
838	LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
839	const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
840
841	if ((U32)dictSize > 64 * KB) {
842		/* useless to define a dictionary > 64 * KB */
843		dictSize = 64 * KB;
844	}
845	if ((U32)dictSize > dict->dictSize)
846		dictSize = dict->dictSize;
847
848	memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
849
850	dict->dictionary = (const BYTE *)safeBuffer;
851	dict->dictSize = (U32)dictSize;
852
853	return dictSize;
854}
855EXPORT_SYMBOL(LZ4_saveDict);
856
857int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
858	char *dest, int inputSize, int maxOutputSize, int acceleration)
859{
860	LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
861	const BYTE * const dictEnd = streamPtr->dictionary
862		+ streamPtr->dictSize;
863
864	const BYTE *smallest = (const BYTE *) source;
865
866	if (streamPtr->initCheck) {
867		/* Uninitialized structure detected */
868		return 0;
869	}
870
871	if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
872		smallest = dictEnd;
873
874	LZ4_renormDictT(streamPtr, smallest);
875
876	if (acceleration < 1)
877		acceleration = LZ4_ACCELERATION_DEFAULT;
878
879	/* Check overlapping input/dictionary space */
880	{
881		const BYTE *sourceEnd = (const BYTE *) source + inputSize;
882
883		if ((sourceEnd > streamPtr->dictionary)
884			&& (sourceEnd < dictEnd)) {
885			streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
886			if (streamPtr->dictSize > 64 * KB)
887				streamPtr->dictSize = 64 * KB;
888			if (streamPtr->dictSize < 4)
889				streamPtr->dictSize = 0;
890			streamPtr->dictionary = dictEnd - streamPtr->dictSize;
891		}
892	}
893
894	/* prefix mode : source data follows dictionary */
895	if (dictEnd == (const BYTE *)source) {
896		int result;
897
898		if ((streamPtr->dictSize < 64 * KB) &&
899			(streamPtr->dictSize < streamPtr->currentOffset)) {
900			result = LZ4_compress_generic(
901				streamPtr, source, dest, inputSize,
902				maxOutputSize, limitedOutput, byU32,
903				withPrefix64k, dictSmall, acceleration);
904		} else {
905			result = LZ4_compress_generic(
906				streamPtr, source, dest, inputSize,
907				maxOutputSize, limitedOutput, byU32,
908				withPrefix64k, noDictIssue, acceleration);
909		}
910		streamPtr->dictSize += (U32)inputSize;
911		streamPtr->currentOffset += (U32)inputSize;
912		return result;
913	}
914
915	/* external dictionary mode */
916	{
917		int result;
918
919		if ((streamPtr->dictSize < 64 * KB) &&
920			(streamPtr->dictSize < streamPtr->currentOffset)) {
921			result = LZ4_compress_generic(
922				streamPtr, source, dest, inputSize,
923				maxOutputSize, limitedOutput, byU32,
924				usingExtDict, dictSmall, acceleration);
925		} else {
926			result = LZ4_compress_generic(
927				streamPtr, source, dest, inputSize,
928				maxOutputSize, limitedOutput, byU32,
929				usingExtDict, noDictIssue, acceleration);
930		}
931		streamPtr->dictionary = (const BYTE *)source;
932		streamPtr->dictSize = (U32)inputSize;
933		streamPtr->currentOffset += (U32)inputSize;
934		return result;
935	}
936}
937EXPORT_SYMBOL(LZ4_compress_fast_continue);
938
939MODULE_LICENSE("Dual BSD/GPL");
940MODULE_DESCRIPTION("LZ4 compressor");