Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v4.17
 
 
  1/*
  2 * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
  3 *
  4 * Author: Lasse Collin <lasse.collin@tukaani.org>
  5 *
  6 * This file has been put into the public domain.
  7 * You can do whatever you want with this file.
  8 */
  9
 10/*
 11 * Important notes about in-place decompression
 12 *
 13 * At least on x86, the kernel is decompressed in place: the compressed data
 14 * is placed to the end of the output buffer, and the decompressor overwrites
 15 * most of the compressed data. There must be enough safety margin to
 16 * guarantee that the write position is always behind the read position.
 17 *
 18 * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
 19 * Note that the margin with XZ is bigger than with Deflate (gzip)!
 20 *
 21 * The worst case for in-place decompression is that the beginning of
 22 * the file is compressed extremely well, and the rest of the file is
 23 * uncompressible. Thus, we must look for worst-case expansion when the
 24 * compressor is encoding uncompressible data.
 25 *
 26 * The structure of the .xz file in case of a compresed kernel is as follows.
 27 * Sizes (as bytes) of the fields are in parenthesis.
 28 *
 29 *    Stream Header (12)
 30 *    Block Header:
 31 *      Block Header (8-12)
 32 *      Compressed Data (N)
 33 *      Block Padding (0-3)
 34 *      CRC32 (4)
 35 *    Index (8-20)
 36 *    Stream Footer (12)
 37 *
 38 * Normally there is exactly one Block, but let's assume that there are
 39 * 2-4 Blocks just in case. Because Stream Header and also Block Header
 40 * of the first Block don't make the decompressor produce any uncompressed
 41 * data, we can ignore them from our calculations. Block Headers of possible
 42 * additional Blocks have to be taken into account still. With these
 43 * assumptions, it is safe to assume that the total header overhead is
 44 * less than 128 bytes.
 45 *
 46 * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
 47 * doesn't change the size of the data, it is enough to calculate the
 48 * safety margin for LZMA2.
 49 *
 50 * LZMA2 stores the data in chunks. Each chunk has a header whose size is
 51 * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
 52 * the maximum chunk header size is 8 bytes. After the chunk header, there
 53 * may be up to 64 KiB of actual payload in the chunk. Often the payload is
 54 * quite a bit smaller though; to be safe, let's assume that an average
 55 * chunk has only 32 KiB of payload.
 56 *
 57 * The maximum uncompressed size of the payload is 2 MiB. The minimum
 58 * uncompressed size of the payload is in practice never less than the
 59 * payload size itself. The LZMA2 format would allow uncompressed size
 60 * to be less than the payload size, but no sane compressor creates such
 61 * files. LZMA2 supports storing uncompressible data in uncompressed form,
 62 * so there's never a need to create payloads whose uncompressed size is
 63 * smaller than the compressed size.
 64 *
 65 * The assumption, that the uncompressed size of the payload is never
 66 * smaller than the payload itself, is valid only when talking about
 67 * the payload as a whole. It is possible that the payload has parts where
 68 * the decompressor consumes more input than it produces output. Calculating
 69 * the worst case for this would be tricky. Instead of trying to do that,
 70 * let's simply make sure that the decompressor never overwrites any bytes
 71 * of the payload which it is currently reading.
 72 *
 73 * Now we have enough information to calculate the safety margin. We need
 74 *   - 128 bytes for the .xz file format headers;
 75 *   - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
 76 *     per chunk, each chunk having average payload size of 32 KiB); and
 77 *   - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
 78 *     the decompressor never overwrites anything from the LZMA2 chunk
 79 *     payload it is currently reading.
 80 *
 81 * We get the following formula:
 82 *
 83 *    safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
 84 *                  = 128 + (uncompressed_size >> 12) + 65536
 85 *
 86 * For comparison, according to arch/x86/boot/compressed/misc.c, the
 87 * equivalent formula for Deflate is this:
 88 *
 89 *    safety_margin = 18 + (uncompressed_size >> 12) + 32768
 90 *
 91 * Thus, when updating Deflate-only in-place kernel decompressor to
 92 * support XZ, the fixed overhead has to be increased from 18+32768 bytes
 93 * to 128+65536 bytes.
 94 */
 95
 96/*
 97 * STATIC is defined to "static" if we are being built for kernel
 98 * decompression (pre-boot code). <linux/decompress/mm.h> will define
 99 * STATIC to empty if it wasn't already defined. Since we will need to
100 * know later if we are being used for kernel decompression, we define
101 * XZ_PREBOOT here.
102 */
103#ifdef STATIC
104#	define XZ_PREBOOT
 
 
105#endif
106#ifdef __KERNEL__
107#	include <linux/decompress/mm.h>
108#endif
109#define XZ_EXTERN STATIC
110
111#ifndef XZ_PREBOOT
112#	include <linux/slab.h>
113#	include <linux/xz.h>
114#else
115/*
116 * Use the internal CRC32 code instead of kernel's CRC32 module, which
117 * is not available in early phase of booting.
118 */
119#define XZ_INTERNAL_CRC32 1
120
121/*
122 * For boot time use, we enable only the BCJ filter of the current
123 * architecture or none if no BCJ filter is available for the architecture.
124 */
125#ifdef CONFIG_X86
126#	define XZ_DEC_X86
127#endif
128#ifdef CONFIG_PPC
129#	define XZ_DEC_POWERPC
130#endif
131#ifdef CONFIG_ARM
132#	define XZ_DEC_ARM
 
 
 
 
 
 
 
133#endif
134#ifdef CONFIG_IA64
135#	define XZ_DEC_IA64
136#endif
137#ifdef CONFIG_SPARC
138#	define XZ_DEC_SPARC
139#endif
140
141/*
142 * This will get the basic headers so that memeq() and others
143 * can be defined.
144 */
145#include "xz/xz_private.h"
146
147/*
148 * Replace the normal allocation functions with the versions from
149 * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL)
150 * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
151 * Workaround it here because the other decompressors don't need it.
152 */
153#undef kmalloc
154#undef kfree
155#undef vmalloc
156#undef vfree
157#define kmalloc(size, flags) malloc(size)
158#define kfree(ptr) free(ptr)
159#define vmalloc(size) malloc(size)
160#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
161
162/*
163 * FIXME: Not all basic memory functions are provided in architecture-specific
164 * files (yet). We define our own versions here for now, but this should be
165 * only a temporary solution.
166 *
167 * memeq and memzero are not used much and any remotely sane implementation
168 * is fast enough. memcpy/memmove speed matters in multi-call mode, but
169 * the kernel image is decompressed in single-call mode, in which only
170 * memcpy speed can matter and only if there is a lot of uncompressible data
171 * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
172 * functions below should just be kept small; it's probably not worth
173 * optimizing for speed.
174 */
175
176#ifndef memeq
177static bool memeq(const void *a, const void *b, size_t size)
178{
179	const uint8_t *x = a;
180	const uint8_t *y = b;
181	size_t i;
182
183	for (i = 0; i < size; ++i)
184		if (x[i] != y[i])
185			return false;
186
187	return true;
188}
189#endif
190
191#ifndef memzero
192static void memzero(void *buf, size_t size)
193{
194	uint8_t *b = buf;
195	uint8_t *e = b + size;
196
197	while (b != e)
198		*b++ = '\0';
199}
200#endif
201
202#ifndef memmove
203/* Not static to avoid a conflict with the prototype in the Linux headers. */
204void *memmove(void *dest, const void *src, size_t size)
205{
206	uint8_t *d = dest;
207	const uint8_t *s = src;
208	size_t i;
209
210	if (d < s) {
211		for (i = 0; i < size; ++i)
212			d[i] = s[i];
213	} else if (d > s) {
214		i = size;
215		while (i-- > 0)
216			d[i] = s[i];
217	}
218
219	return dest;
220}
221#endif
222
223/*
224 * Since we need memmove anyway, would use it as memcpy too.
225 * Commented out for now to avoid breaking things.
226 */
227/*
228#ifndef memcpy
229#	define memcpy memmove
230#endif
231*/
232
233#include "xz/xz_crc32.c"
234#include "xz/xz_dec_stream.c"
235#include "xz/xz_dec_lzma2.c"
236#include "xz/xz_dec_bcj.c"
237
238#endif /* XZ_PREBOOT */
239
240/* Size of the input and output buffers in multi-call mode */
241#define XZ_IOBUF_SIZE 4096
242
243/*
244 * This function implements the API defined in <linux/decompress/generic.h>.
245 *
246 * This wrapper will automatically choose single-call or multi-call mode
247 * of the native XZ decoder API. The single-call mode can be used only when
248 * both input and output buffers are available as a single chunk, i.e. when
249 * fill() and flush() won't be used.
250 */
251STATIC int INIT unxz(unsigned char *in, long in_size,
252		     long (*fill)(void *dest, unsigned long size),
253		     long (*flush)(void *src, unsigned long size),
254		     unsigned char *out, long *in_used,
255		     void (*error)(char *x))
256{
257	struct xz_buf b;
258	struct xz_dec *s;
259	enum xz_ret ret;
260	bool must_free_in = false;
261
262#if XZ_INTERNAL_CRC32
263	xz_crc32_init();
264#endif
265
266	if (in_used != NULL)
267		*in_used = 0;
268
269	if (fill == NULL && flush == NULL)
270		s = xz_dec_init(XZ_SINGLE, 0);
271	else
272		s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
273
274	if (s == NULL)
275		goto error_alloc_state;
276
277	if (flush == NULL) {
278		b.out = out;
279		b.out_size = (size_t)-1;
280	} else {
281		b.out_size = XZ_IOBUF_SIZE;
282		b.out = malloc(XZ_IOBUF_SIZE);
283		if (b.out == NULL)
284			goto error_alloc_out;
285	}
286
287	if (in == NULL) {
288		must_free_in = true;
289		in = malloc(XZ_IOBUF_SIZE);
290		if (in == NULL)
291			goto error_alloc_in;
292	}
293
294	b.in = in;
295	b.in_pos = 0;
296	b.in_size = in_size;
297	b.out_pos = 0;
298
299	if (fill == NULL && flush == NULL) {
300		ret = xz_dec_run(s, &b);
301	} else {
302		do {
303			if (b.in_pos == b.in_size && fill != NULL) {
304				if (in_used != NULL)
305					*in_used += b.in_pos;
306
307				b.in_pos = 0;
308
309				in_size = fill(in, XZ_IOBUF_SIZE);
310				if (in_size < 0) {
311					/*
312					 * This isn't an optimal error code
313					 * but it probably isn't worth making
314					 * a new one either.
315					 */
316					ret = XZ_BUF_ERROR;
317					break;
318				}
319
320				b.in_size = in_size;
321			}
322
323			ret = xz_dec_run(s, &b);
324
325			if (flush != NULL && (b.out_pos == b.out_size
326					|| (ret != XZ_OK && b.out_pos > 0))) {
327				/*
328				 * Setting ret here may hide an error
329				 * returned by xz_dec_run(), but probably
330				 * it's not too bad.
331				 */
332				if (flush(b.out, b.out_pos) != (long)b.out_pos)
333					ret = XZ_BUF_ERROR;
334
335				b.out_pos = 0;
336			}
337		} while (ret == XZ_OK);
338
339		if (must_free_in)
340			free(in);
341
342		if (flush != NULL)
343			free(b.out);
344	}
345
346	if (in_used != NULL)
347		*in_used += b.in_pos;
348
349	xz_dec_end(s);
350
351	switch (ret) {
352	case XZ_STREAM_END:
353		return 0;
354
355	case XZ_MEM_ERROR:
356		/* This can occur only in multi-call mode. */
357		error("XZ decompressor ran out of memory");
358		break;
359
360	case XZ_FORMAT_ERROR:
361		error("Input is not in the XZ format (wrong magic bytes)");
362		break;
363
364	case XZ_OPTIONS_ERROR:
365		error("Input was encoded with settings that are not "
366				"supported by this XZ decoder");
367		break;
368
369	case XZ_DATA_ERROR:
370	case XZ_BUF_ERROR:
371		error("XZ-compressed data is corrupt");
372		break;
373
374	default:
375		error("Bug in the XZ decompressor");
376		break;
377	}
378
379	return -1;
380
381error_alloc_in:
382	if (flush != NULL)
383		free(b.out);
384
385error_alloc_out:
386	xz_dec_end(s);
387
388error_alloc_state:
389	error("XZ decompressor ran out of memory");
390	return -1;
391}
392
393/*
394 * This macro is used by architecture-specific files to decompress
395 * the kernel image.
396 */
397#ifdef XZ_PREBOOT
398STATIC int INIT __decompress(unsigned char *buf, long len,
399			   long (*fill)(void*, unsigned long),
400			   long (*flush)(void*, unsigned long),
401			   unsigned char *out_buf, long olen,
402			   long *pos,
403			   void (*error)(char *x))
404{
405	return unxz(buf, len, fill, flush, out_buf, pos, error);
406}
407#endif
v6.13.7
  1// SPDX-License-Identifier: 0BSD
  2
  3/*
  4 * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
  5 *
  6 * Author: Lasse Collin <lasse.collin@tukaani.org>
 
 
 
  7 */
  8
  9/*
 10 * Important notes about in-place decompression
 11 *
 12 * At least on x86, the kernel is decompressed in place: the compressed data
 13 * is placed to the end of the output buffer, and the decompressor overwrites
 14 * most of the compressed data. There must be enough safety margin to
 15 * guarantee that the write position is always behind the read position.
 16 *
 17 * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
 18 * Note that the margin with XZ is bigger than with Deflate (gzip)!
 19 *
 20 * The worst case for in-place decompression is that the beginning of
 21 * the file is compressed extremely well, and the rest of the file is
 22 * incompressible. Thus, we must look for worst-case expansion when the
 23 * compressor is encoding incompressible data.
 24 *
 25 * The structure of the .xz file in case of a compressed kernel is as follows.
 26 * Sizes (as bytes) of the fields are in parenthesis.
 27 *
 28 *    Stream Header (12)
 29 *    Block Header:
 30 *      Block Header (8-12)
 31 *      Compressed Data (N)
 32 *      Block Padding (0-3)
 33 *      CRC32 (4)
 34 *    Index (8-20)
 35 *    Stream Footer (12)
 36 *
 37 * Normally there is exactly one Block, but let's assume that there are
 38 * 2-4 Blocks just in case. Because Stream Header and also Block Header
 39 * of the first Block don't make the decompressor produce any uncompressed
 40 * data, we can ignore them from our calculations. Block Headers of possible
 41 * additional Blocks have to be taken into account still. With these
 42 * assumptions, it is safe to assume that the total header overhead is
 43 * less than 128 bytes.
 44 *
 45 * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
 46 * doesn't change the size of the data, it is enough to calculate the
 47 * safety margin for LZMA2.
 48 *
 49 * LZMA2 stores the data in chunks. Each chunk has a header whose size is
 50 * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
 51 * the maximum chunk header size is 8 bytes. After the chunk header, there
 52 * may be up to 64 KiB of actual payload in the chunk. Often the payload is
 53 * quite a bit smaller though; to be safe, let's assume that an average
 54 * chunk has only 32 KiB of payload.
 55 *
 56 * The maximum uncompressed size of the payload is 2 MiB. The minimum
 57 * uncompressed size of the payload is in practice never less than the
 58 * payload size itself. The LZMA2 format would allow uncompressed size
 59 * to be less than the payload size, but no sane compressor creates such
 60 * files. LZMA2 supports storing incompressible data in uncompressed form,
 61 * so there's never a need to create payloads whose uncompressed size is
 62 * smaller than the compressed size.
 63 *
 64 * The assumption, that the uncompressed size of the payload is never
 65 * smaller than the payload itself, is valid only when talking about
 66 * the payload as a whole. It is possible that the payload has parts where
 67 * the decompressor consumes more input than it produces output. Calculating
 68 * the worst case for this would be tricky. Instead of trying to do that,
 69 * let's simply make sure that the decompressor never overwrites any bytes
 70 * of the payload which it is currently reading.
 71 *
 72 * Now we have enough information to calculate the safety margin. We need
 73 *   - 128 bytes for the .xz file format headers;
 74 *   - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
 75 *     per chunk, each chunk having average payload size of 32 KiB); and
 76 *   - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
 77 *     the decompressor never overwrites anything from the LZMA2 chunk
 78 *     payload it is currently reading.
 79 *
 80 * We get the following formula:
 81 *
 82 *    safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
 83 *                  = 128 + (uncompressed_size >> 12) + 65536
 84 *
 85 * For comparison, according to arch/x86/boot/compressed/misc.c, the
 86 * equivalent formula for Deflate is this:
 87 *
 88 *    safety_margin = 18 + (uncompressed_size >> 12) + 32768
 89 *
 90 * Thus, when updating Deflate-only in-place kernel decompressor to
 91 * support XZ, the fixed overhead has to be increased from 18+32768 bytes
 92 * to 128+65536 bytes.
 93 */
 94
 95/*
 96 * STATIC is defined to "static" if we are being built for kernel
 97 * decompression (pre-boot code). <linux/decompress/mm.h> will define
 98 * STATIC to empty if it wasn't already defined. Since we will need to
 99 * know later if we are being used for kernel decompression, we define
100 * XZ_PREBOOT here.
101 */
102#ifdef STATIC
103#	define XZ_PREBOOT
104#else
105#	include <linux/decompress/unxz.h>
106#endif
107#ifdef __KERNEL__
108#	include <linux/decompress/mm.h>
109#endif
 
110
111#ifndef XZ_PREBOOT
112#	include <linux/slab.h>
113#	include <linux/xz.h>
114#else
115/*
116 * Use the internal CRC32 code instead of kernel's CRC32 module, which
117 * is not available in early phase of booting.
118 */
119#define XZ_INTERNAL_CRC32 1
120
121/*
122 * For boot time use, we enable only the BCJ filter of the current
123 * architecture or none if no BCJ filter is available for the architecture.
124 */
125#ifdef CONFIG_X86
126#	define XZ_DEC_X86
127#endif
128#if defined(CONFIG_PPC) && defined(CONFIG_CPU_BIG_ENDIAN)
129#	define XZ_DEC_POWERPC
130#endif
131#ifdef CONFIG_ARM
132#	ifdef CONFIG_THUMB2_KERNEL
133#		define XZ_DEC_ARMTHUMB
134#	else
135#		define XZ_DEC_ARM
136#	endif
137#endif
138#ifdef CONFIG_ARM64
139#	define XZ_DEC_ARM64
140#endif
141#ifdef CONFIG_RISCV
142#	define XZ_DEC_RISCV
143#endif
144#ifdef CONFIG_SPARC
145#	define XZ_DEC_SPARC
146#endif
147
148/*
149 * This will get the basic headers so that memeq() and others
150 * can be defined.
151 */
152#include "xz/xz_private.h"
153
154/*
155 * Replace the normal allocation functions with the versions from
156 * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL)
157 * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
158 * Workaround it here because the other decompressors don't need it.
159 */
160#undef kmalloc
161#undef kfree
162#undef vmalloc
163#undef vfree
164#define kmalloc(size, flags) malloc(size)
165#define kfree(ptr) free(ptr)
166#define vmalloc(size) malloc(size)
167#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
168
169/*
170 * FIXME: Not all basic memory functions are provided in architecture-specific
171 * files (yet). We define our own versions here for now, but this should be
172 * only a temporary solution.
173 *
174 * memeq and memzero are not used much and any remotely sane implementation
175 * is fast enough. memcpy/memmove speed matters in multi-call mode, but
176 * the kernel image is decompressed in single-call mode, in which only
177 * memmove speed can matter and only if there is a lot of incompressible data
178 * (LZMA2 stores incompressible chunks in uncompressed form). Thus, the
179 * functions below should just be kept small; it's probably not worth
180 * optimizing for speed.
181 */
182
183#ifndef memeq
184static bool memeq(const void *a, const void *b, size_t size)
185{
186	const uint8_t *x = a;
187	const uint8_t *y = b;
188	size_t i;
189
190	for (i = 0; i < size; ++i)
191		if (x[i] != y[i])
192			return false;
193
194	return true;
195}
196#endif
197
198#ifndef memzero
199static void memzero(void *buf, size_t size)
200{
201	uint8_t *b = buf;
202	uint8_t *e = b + size;
203
204	while (b != e)
205		*b++ = '\0';
206}
207#endif
208
209#ifndef memmove
210/* Not static to avoid a conflict with the prototype in the Linux headers. */
211void *memmove(void *dest, const void *src, size_t size)
212{
213	uint8_t *d = dest;
214	const uint8_t *s = src;
215	size_t i;
216
217	if (d < s) {
218		for (i = 0; i < size; ++i)
219			d[i] = s[i];
220	} else if (d > s) {
221		i = size;
222		while (i-- > 0)
223			d[i] = s[i];
224	}
225
226	return dest;
227}
228#endif
229
230/*
231 * Since we need memmove anyway, we could use it as memcpy too.
232 * Commented out for now to avoid breaking things.
233 */
234/*
235#ifndef memcpy
236#	define memcpy memmove
237#endif
238*/
239
240#include "xz/xz_crc32.c"
241#include "xz/xz_dec_stream.c"
242#include "xz/xz_dec_lzma2.c"
243#include "xz/xz_dec_bcj.c"
244
245#endif /* XZ_PREBOOT */
246
247/* Size of the input and output buffers in multi-call mode */
248#define XZ_IOBUF_SIZE 4096
249
250/*
251 * This function implements the API defined in <linux/decompress/generic.h>.
252 *
253 * This wrapper will automatically choose single-call or multi-call mode
254 * of the native XZ decoder API. The single-call mode can be used only when
255 * both input and output buffers are available as a single chunk, i.e. when
256 * fill() and flush() won't be used.
257 */
258STATIC int INIT unxz(unsigned char *in, long in_size,
259		     long (*fill)(void *dest, unsigned long size),
260		     long (*flush)(void *src, unsigned long size),
261		     unsigned char *out, long *in_used,
262		     void (*error)(char *x))
263{
264	struct xz_buf b;
265	struct xz_dec *s;
266	enum xz_ret ret;
267	bool must_free_in = false;
268
269#if XZ_INTERNAL_CRC32
270	xz_crc32_init();
271#endif
272
273	if (in_used != NULL)
274		*in_used = 0;
275
276	if (fill == NULL && flush == NULL)
277		s = xz_dec_init(XZ_SINGLE, 0);
278	else
279		s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
280
281	if (s == NULL)
282		goto error_alloc_state;
283
284	if (flush == NULL) {
285		b.out = out;
286		b.out_size = (size_t)-1;
287	} else {
288		b.out_size = XZ_IOBUF_SIZE;
289		b.out = malloc(XZ_IOBUF_SIZE);
290		if (b.out == NULL)
291			goto error_alloc_out;
292	}
293
294	if (in == NULL) {
295		must_free_in = true;
296		in = malloc(XZ_IOBUF_SIZE);
297		if (in == NULL)
298			goto error_alloc_in;
299	}
300
301	b.in = in;
302	b.in_pos = 0;
303	b.in_size = in_size;
304	b.out_pos = 0;
305
306	if (fill == NULL && flush == NULL) {
307		ret = xz_dec_run(s, &b);
308	} else {
309		do {
310			if (b.in_pos == b.in_size && fill != NULL) {
311				if (in_used != NULL)
312					*in_used += b.in_pos;
313
314				b.in_pos = 0;
315
316				in_size = fill(in, XZ_IOBUF_SIZE);
317				if (in_size < 0) {
318					/*
319					 * This isn't an optimal error code
320					 * but it probably isn't worth making
321					 * a new one either.
322					 */
323					ret = XZ_BUF_ERROR;
324					break;
325				}
326
327				b.in_size = in_size;
328			}
329
330			ret = xz_dec_run(s, &b);
331
332			if (flush != NULL && (b.out_pos == b.out_size
333					|| (ret != XZ_OK && b.out_pos > 0))) {
334				/*
335				 * Setting ret here may hide an error
336				 * returned by xz_dec_run(), but probably
337				 * it's not too bad.
338				 */
339				if (flush(b.out, b.out_pos) != (long)b.out_pos)
340					ret = XZ_BUF_ERROR;
341
342				b.out_pos = 0;
343			}
344		} while (ret == XZ_OK);
345
346		if (must_free_in)
347			free(in);
348
349		if (flush != NULL)
350			free(b.out);
351	}
352
353	if (in_used != NULL)
354		*in_used += b.in_pos;
355
356	xz_dec_end(s);
357
358	switch (ret) {
359	case XZ_STREAM_END:
360		return 0;
361
362	case XZ_MEM_ERROR:
363		/* This can occur only in multi-call mode. */
364		error("XZ decompressor ran out of memory");
365		break;
366
367	case XZ_FORMAT_ERROR:
368		error("Input is not in the XZ format (wrong magic bytes)");
369		break;
370
371	case XZ_OPTIONS_ERROR:
372		error("Input was encoded with settings that are not "
373				"supported by this XZ decoder");
374		break;
375
376	case XZ_DATA_ERROR:
377	case XZ_BUF_ERROR:
378		error("XZ-compressed data is corrupt");
379		break;
380
381	default:
382		error("Bug in the XZ decompressor");
383		break;
384	}
385
386	return -1;
387
388error_alloc_in:
389	if (flush != NULL)
390		free(b.out);
391
392error_alloc_out:
393	xz_dec_end(s);
394
395error_alloc_state:
396	error("XZ decompressor ran out of memory");
397	return -1;
398}
399
400/*
401 * This function is used by architecture-specific files to decompress
402 * the kernel image.
403 */
404#ifdef XZ_PREBOOT
405STATIC int INIT __decompress(unsigned char *in, long in_size,
406			     long (*fill)(void *dest, unsigned long size),
407			     long (*flush)(void *src, unsigned long size),
408			     unsigned char *out, long out_size,
409			     long *in_used,
410			     void (*error)(char *x))
411{
412	return unxz(in, in_size, fill, flush, out, in_used, error);
413}
414#endif