Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2008 Oracle.  All rights reserved.
  4 *
  5 * Based on jffs2 zlib code:
  6 * Copyright © 2001-2007 Red Hat, Inc.
  7 * Created by David Woodhouse <dwmw2@infradead.org>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/slab.h>
 12#include <linux/zlib.h>
 13#include <linux/zutil.h>
 14#include <linux/mm.h>
 15#include <linux/init.h>
 16#include <linux/err.h>
 17#include <linux/sched.h>
 18#include <linux/pagemap.h>
 19#include <linux/bio.h>
 20#include <linux/refcount.h>
 21#include "btrfs_inode.h"
 22#include "compression.h"
 23#include "fs.h"
 24#include "subpage.h"
 25
 26/* workspace buffer size for s390 zlib hardware support */
 27#define ZLIB_DFLTCC_BUF_SIZE    (4 * PAGE_SIZE)
 28
 29struct workspace {
 30	z_stream strm;
 31	char *buf;
 32	unsigned int buf_size;
 33	struct list_head list;
 34	int level;
 35};
 36
 37static struct workspace_manager wsm;
 38
 39struct list_head *zlib_get_workspace(unsigned int level)
 40{
 41	struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level);
 42	struct workspace *workspace = list_entry(ws, struct workspace, list);
 43
 44	workspace->level = level;
 45
 46	return ws;
 47}
 48
 49void zlib_free_workspace(struct list_head *ws)
 50{
 51	struct workspace *workspace = list_entry(ws, struct workspace, list);
 52
 53	kvfree(workspace->strm.workspace);
 54	kfree(workspace->buf);
 55	kfree(workspace);
 56}
 57
 58struct list_head *zlib_alloc_workspace(unsigned int level)
 59{
 60	struct workspace *workspace;
 61	int workspacesize;
 62
 63	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
 64	if (!workspace)
 65		return ERR_PTR(-ENOMEM);
 66
 67	workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
 68			zlib_inflate_workspacesize());
 69	workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL | __GFP_NOWARN);
 70	workspace->level = level;
 71	workspace->buf = NULL;
 72	/*
 73	 * In case of s390 zlib hardware support, allocate lager workspace
 74	 * buffer. If allocator fails, fall back to a single page buffer.
 75	 */
 76	if (zlib_deflate_dfltcc_enabled()) {
 77		workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
 78					 __GFP_NOMEMALLOC | __GFP_NORETRY |
 79					 __GFP_NOWARN | GFP_NOIO);
 80		workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
 81	}
 82	if (!workspace->buf) {
 83		workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 84		workspace->buf_size = PAGE_SIZE;
 85	}
 86	if (!workspace->strm.workspace || !workspace->buf)
 87		goto fail;
 88
 89	INIT_LIST_HEAD(&workspace->list);
 90
 91	return &workspace->list;
 92fail:
 93	zlib_free_workspace(&workspace->list);
 94	return ERR_PTR(-ENOMEM);
 95}
 96
 97int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
 98			 u64 start, struct folio **folios, unsigned long *out_folios,
 99			 unsigned long *total_in, unsigned long *total_out)
 
 
 
 
100{
101	struct workspace *workspace = list_entry(ws, struct workspace, list);
102	int ret;
103	char *data_in = NULL;
104	char *cfolio_out;
105	int nr_folios = 0;
106	struct folio *in_folio = NULL;
107	struct folio *out_folio = NULL;
108	unsigned long bytes_left;
109	unsigned int in_buf_folios;
110	unsigned long len = *total_out;
111	unsigned long nr_dest_folios = *out_folios;
112	const unsigned long max_out = nr_dest_folios * PAGE_SIZE;
113	const u64 orig_end = start + len;
114
115	*out_folios = 0;
116	*total_out = 0;
117	*total_in = 0;
118
119	ret = zlib_deflateInit(&workspace->strm, workspace->level);
120	if (unlikely(ret != Z_OK)) {
121		struct btrfs_inode *inode = BTRFS_I(mapping->host);
122
123		btrfs_err(inode->root->fs_info,
124	"zlib compression init failed, error %d root %llu inode %llu offset %llu",
125			  ret, btrfs_root_id(inode->root), btrfs_ino(inode), start);
126		ret = -EIO;
127		goto out;
128	}
129
130	workspace->strm.total_in = 0;
131	workspace->strm.total_out = 0;
132
133	out_folio = btrfs_alloc_compr_folio();
134	if (out_folio == NULL) {
 
 
 
135		ret = -ENOMEM;
136		goto out;
137	}
138	cfolio_out = folio_address(out_folio);
139	folios[0] = out_folio;
140	nr_folios = 1;
141
142	workspace->strm.next_in = workspace->buf;
143	workspace->strm.avail_in = 0;
144	workspace->strm.next_out = cfolio_out;
145	workspace->strm.avail_out = PAGE_SIZE;
 
146
147	while (workspace->strm.total_in < len) {
148		/*
149		 * Get next input pages and copy the contents to
150		 * the workspace buffer if required.
151		 */
152		if (workspace->strm.avail_in == 0) {
153			bytes_left = len - workspace->strm.total_in;
154			in_buf_folios = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
155					    workspace->buf_size / PAGE_SIZE);
156			if (in_buf_folios > 1) {
157				int i;
158
159				/* S390 hardware acceleration path, not subpage. */
160				ASSERT(!btrfs_is_subpage(
161						inode_to_fs_info(mapping->host),
162						mapping));
163				for (i = 0; i < in_buf_folios; i++) {
164					if (data_in) {
165						kunmap_local(data_in);
166						folio_put(in_folio);
167						data_in = NULL;
168					}
169					ret = btrfs_compress_filemap_get_folio(mapping,
170							start, &in_folio);
171					if (ret < 0)
172						goto out;
173					data_in = kmap_local_folio(in_folio, 0);
174					copy_page(workspace->buf + i * PAGE_SIZE,
175						  data_in);
176					start += PAGE_SIZE;
177				}
178				workspace->strm.next_in = workspace->buf;
179				workspace->strm.avail_in = min(bytes_left,
180							       in_buf_folios << PAGE_SHIFT);
181			} else {
182				unsigned int pg_off;
183				unsigned int cur_len;
184
185				if (data_in) {
186					kunmap_local(data_in);
187					folio_put(in_folio);
188					data_in = NULL;
189				}
190				ret = btrfs_compress_filemap_get_folio(mapping,
191						start, &in_folio);
192				if (ret < 0)
193					goto out;
194				pg_off = offset_in_page(start);
195				cur_len = btrfs_calc_input_length(orig_end, start);
196				data_in = kmap_local_folio(in_folio, pg_off);
197				start += cur_len;
198				workspace->strm.next_in = data_in;
199				workspace->strm.avail_in = cur_len;
200			}
201		}
202
203		ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
204		if (unlikely(ret != Z_OK)) {
205			struct btrfs_inode *inode = BTRFS_I(mapping->host);
206
207			btrfs_warn(inode->root->fs_info,
208		"zlib compression failed, error %d root %llu inode %llu offset %llu",
209				   ret, btrfs_root_id(inode->root), btrfs_ino(inode),
210				   start);
211			zlib_deflateEnd(&workspace->strm);
212			ret = -EIO;
213			goto out;
214		}
215
216		/* we're making it bigger, give up */
217		if (workspace->strm.total_in > 8192 &&
218		    workspace->strm.total_in <
219		    workspace->strm.total_out) {
220			ret = -E2BIG;
221			goto out;
222		}
223		/* we need another page for writing out.  Test this
224		 * before the total_in so we will pull in a new page for
225		 * the stream end if required
226		 */
227		if (workspace->strm.avail_out == 0) {
228			if (nr_folios == nr_dest_folios) {
 
 
229				ret = -E2BIG;
230				goto out;
231			}
232			out_folio = btrfs_alloc_compr_folio();
233			if (out_folio == NULL) {
234				ret = -ENOMEM;
235				goto out;
236			}
237			cfolio_out = folio_address(out_folio);
238			folios[nr_folios] = out_folio;
239			nr_folios++;
240			workspace->strm.avail_out = PAGE_SIZE;
241			workspace->strm.next_out = cfolio_out;
242		}
243		/* we're all done */
244		if (workspace->strm.total_in >= len)
245			break;
246		if (workspace->strm.total_out > max_out)
247			break;
248	}
249	workspace->strm.avail_in = 0;
250	/*
251	 * Call deflate with Z_FINISH flush parameter providing more output
252	 * space but no more input data, until it returns with Z_STREAM_END.
253	 */
254	while (ret != Z_STREAM_END) {
255		ret = zlib_deflate(&workspace->strm, Z_FINISH);
256		if (ret == Z_STREAM_END)
257			break;
258		if (ret != Z_OK && ret != Z_BUF_ERROR) {
259			zlib_deflateEnd(&workspace->strm);
260			ret = -EIO;
261			goto out;
262		} else if (workspace->strm.avail_out == 0) {
263			/* Get another folio for the stream end. */
264			if (nr_folios == nr_dest_folios) {
265				ret = -E2BIG;
266				goto out;
267			}
268			out_folio = btrfs_alloc_compr_folio();
269			if (out_folio == NULL) {
270				ret = -ENOMEM;
271				goto out;
272			}
273			cfolio_out = folio_address(out_folio);
274			folios[nr_folios] = out_folio;
275			nr_folios++;
276			workspace->strm.avail_out = PAGE_SIZE;
277			workspace->strm.next_out = cfolio_out;
278		}
279	}
 
 
280	zlib_deflateEnd(&workspace->strm);
281
 
 
 
 
 
282	if (workspace->strm.total_out >= workspace->strm.total_in) {
283		ret = -E2BIG;
284		goto out;
285	}
286
287	ret = 0;
288	*total_out = workspace->strm.total_out;
289	*total_in = workspace->strm.total_in;
290out:
291	*out_folios = nr_folios;
292	if (data_in) {
293		kunmap_local(data_in);
294		folio_put(in_folio);
 
 
 
295	}
296
297	return ret;
298}
299
300int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
301{
302	struct workspace *workspace = list_entry(ws, struct workspace, list);
303	int ret = 0, ret2;
304	int wbits = MAX_WBITS;
305	char *data_in;
306	size_t total_out = 0;
307	unsigned long folio_in_index = 0;
308	size_t srclen = cb->compressed_len;
309	unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
310	unsigned long buf_start;
311	struct folio **folios_in = cb->compressed_folios;
 
 
312
313	data_in = kmap_local_folio(folios_in[folio_in_index], 0);
314	workspace->strm.next_in = data_in;
315	workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
316	workspace->strm.total_in = 0;
317
318	workspace->strm.total_out = 0;
319	workspace->strm.next_out = workspace->buf;
320	workspace->strm.avail_out = workspace->buf_size;
321
322	/* If it's deflate, and it's got no preset dictionary, then
323	   we can tell zlib to skip the adler32 check. */
324	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
325	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
326	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
327
328		wbits = -((data_in[0] >> 4) + 8);
329		workspace->strm.next_in += 2;
330		workspace->strm.avail_in -= 2;
331	}
332
333	ret = zlib_inflateInit2(&workspace->strm, wbits);
334	if (unlikely(ret != Z_OK)) {
335		struct btrfs_inode *inode = cb->bbio.inode;
336
337		kunmap_local(data_in);
338		btrfs_err(inode->root->fs_info,
339	"zlib decompression init failed, error %d root %llu inode %llu offset %llu",
340			  ret, btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
341		return -EIO;
342	}
343	while (workspace->strm.total_in < srclen) {
344		ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
345		if (ret != Z_OK && ret != Z_STREAM_END)
346			break;
347
348		buf_start = total_out;
349		total_out = workspace->strm.total_out;
350
351		/* we didn't make progress in this inflate call, we're done */
352		if (buf_start == total_out)
353			break;
354
355		ret2 = btrfs_decompress_buf2page(workspace->buf,
356				total_out - buf_start, cb, buf_start);
 
357		if (ret2 == 0) {
358			ret = 0;
359			goto done;
360		}
361
362		workspace->strm.next_out = workspace->buf;
363		workspace->strm.avail_out = workspace->buf_size;
364
365		if (workspace->strm.avail_in == 0) {
366			unsigned long tmp;
367			kunmap_local(data_in);
368			folio_in_index++;
369			if (folio_in_index >= total_folios_in) {
370				data_in = NULL;
371				break;
372			}
373			data_in = kmap_local_folio(folios_in[folio_in_index], 0);
374			workspace->strm.next_in = data_in;
375			tmp = srclen - workspace->strm.total_in;
376			workspace->strm.avail_in = min(tmp, PAGE_SIZE);
 
377		}
378	}
379	if (unlikely(ret != Z_STREAM_END)) {
380		btrfs_err(cb->bbio.inode->root->fs_info,
381		"zlib decompression failed, error %d root %llu inode %llu offset %llu",
382			  ret, btrfs_root_id(cb->bbio.inode->root),
383			  btrfs_ino(cb->bbio.inode), cb->start);
384		ret = -EIO;
385	} else {
386		ret = 0;
387	}
388done:
389	zlib_inflateEnd(&workspace->strm);
390	if (data_in)
391		kunmap_local(data_in);
 
 
392	return ret;
393}
394
395int zlib_decompress(struct list_head *ws, const u8 *data_in,
396		struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
397		size_t destlen)
 
398{
399	struct workspace *workspace = list_entry(ws, struct workspace, list);
400	int ret = 0;
401	int wbits = MAX_WBITS;
402	unsigned long to_copy;
 
 
 
 
 
 
403
404	workspace->strm.next_in = data_in;
405	workspace->strm.avail_in = srclen;
406	workspace->strm.total_in = 0;
407
408	workspace->strm.next_out = workspace->buf;
409	workspace->strm.avail_out = workspace->buf_size;
410	workspace->strm.total_out = 0;
411	/* If it's deflate, and it's got no preset dictionary, then
412	   we can tell zlib to skip the adler32 check. */
413	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
414	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
415	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
416
417		wbits = -((data_in[0] >> 4) + 8);
418		workspace->strm.next_in += 2;
419		workspace->strm.avail_in -= 2;
420	}
421
422	ret = zlib_inflateInit2(&workspace->strm, wbits);
423	if (unlikely(ret != Z_OK)) {
424		struct btrfs_inode *inode = folio_to_inode(dest_folio);
425
426		btrfs_err(inode->root->fs_info,
427		"zlib decompression init failed, error %d root %llu inode %llu offset %llu",
428			  ret, btrfs_root_id(inode->root), btrfs_ino(inode),
429			  folio_pos(dest_folio));
430		return -EIO;
431	}
432
433	/*
434	 * Everything (in/out buf) should be at most one sector, there should
435	 * be no need to switch any input/output buffer.
436	 */
437	ret = zlib_inflate(&workspace->strm, Z_FINISH);
438	to_copy = min(workspace->strm.total_out, destlen);
439	if (ret != Z_STREAM_END)
440		goto out;
441
442	memcpy_to_folio(dest_folio, dest_pgoff, workspace->buf, to_copy);
 
443
444out:
445	if (unlikely(to_copy != destlen)) {
446		struct btrfs_inode *inode = folio_to_inode(dest_folio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447
448		btrfs_err(inode->root->fs_info,
449"zlib decompression failed, error %d root %llu inode %llu offset %llu decompressed %lu expected %zu",
450			  ret, btrfs_root_id(inode->root), btrfs_ino(inode),
451			  folio_pos(dest_folio), to_copy, destlen);
452		ret = -EIO;
453	} else {
454		ret = 0;
455	}
456
457	zlib_inflateEnd(&workspace->strm);
458
459	if (unlikely(to_copy < destlen))
460		folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy);
 
 
 
 
 
 
 
 
461	return ret;
462}
463
 
 
 
 
 
 
 
 
 
 
 
464const struct btrfs_compress_op btrfs_zlib_compress = {
465	.workspace_manager	= &wsm,
466	.max_level		= 9,
467	.default_level		= BTRFS_ZLIB_DEFAULT_LEVEL,
 
 
 
468};
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2008 Oracle.  All rights reserved.
  4 *
  5 * Based on jffs2 zlib code:
  6 * Copyright © 2001-2007 Red Hat, Inc.
  7 * Created by David Woodhouse <dwmw2@infradead.org>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/slab.h>
 12#include <linux/zlib.h>
 13#include <linux/zutil.h>
 14#include <linux/mm.h>
 15#include <linux/init.h>
 16#include <linux/err.h>
 17#include <linux/sched.h>
 18#include <linux/pagemap.h>
 19#include <linux/bio.h>
 20#include <linux/refcount.h>
 
 21#include "compression.h"
 
 
 
 
 
 22
 23struct workspace {
 24	z_stream strm;
 25	char *buf;
 
 26	struct list_head list;
 27	int level;
 28};
 29
 30static void zlib_free_workspace(struct list_head *ws)
 
 
 
 
 
 
 
 
 
 
 
 
 31{
 32	struct workspace *workspace = list_entry(ws, struct workspace, list);
 33
 34	kvfree(workspace->strm.workspace);
 35	kfree(workspace->buf);
 36	kfree(workspace);
 37}
 38
 39static struct list_head *zlib_alloc_workspace(void)
 40{
 41	struct workspace *workspace;
 42	int workspacesize;
 43
 44	workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
 45	if (!workspace)
 46		return ERR_PTR(-ENOMEM);
 47
 48	workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
 49			zlib_inflate_workspacesize());
 50	workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
 51	workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52	if (!workspace->strm.workspace || !workspace->buf)
 53		goto fail;
 54
 55	INIT_LIST_HEAD(&workspace->list);
 56
 57	return &workspace->list;
 58fail:
 59	zlib_free_workspace(&workspace->list);
 60	return ERR_PTR(-ENOMEM);
 61}
 62
 63static int zlib_compress_pages(struct list_head *ws,
 64			       struct address_space *mapping,
 65			       u64 start,
 66			       struct page **pages,
 67			       unsigned long *out_pages,
 68			       unsigned long *total_in,
 69			       unsigned long *total_out)
 70{
 71	struct workspace *workspace = list_entry(ws, struct workspace, list);
 72	int ret;
 73	char *data_in;
 74	char *cpage_out;
 75	int nr_pages = 0;
 76	struct page *in_page = NULL;
 77	struct page *out_page = NULL;
 78	unsigned long bytes_left;
 
 79	unsigned long len = *total_out;
 80	unsigned long nr_dest_pages = *out_pages;
 81	const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
 
 82
 83	*out_pages = 0;
 84	*total_out = 0;
 85	*total_in = 0;
 86
 87	if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
 88		pr_warn("BTRFS: deflateInit failed\n");
 
 
 
 
 
 89		ret = -EIO;
 90		goto out;
 91	}
 92
 93	workspace->strm.total_in = 0;
 94	workspace->strm.total_out = 0;
 95
 96	in_page = find_get_page(mapping, start >> PAGE_SHIFT);
 97	data_in = kmap(in_page);
 98
 99	out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
100	if (out_page == NULL) {
101		ret = -ENOMEM;
102		goto out;
103	}
104	cpage_out = kmap(out_page);
105	pages[0] = out_page;
106	nr_pages = 1;
107
108	workspace->strm.next_in = data_in;
109	workspace->strm.next_out = cpage_out;
 
110	workspace->strm.avail_out = PAGE_SIZE;
111	workspace->strm.avail_in = min(len, PAGE_SIZE);
112
113	while (workspace->strm.total_in < len) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114		ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
115		if (ret != Z_OK) {
116			pr_debug("BTRFS: deflate in loop returned %d\n",
117			       ret);
 
 
 
 
118			zlib_deflateEnd(&workspace->strm);
119			ret = -EIO;
120			goto out;
121		}
122
123		/* we're making it bigger, give up */
124		if (workspace->strm.total_in > 8192 &&
125		    workspace->strm.total_in <
126		    workspace->strm.total_out) {
127			ret = -E2BIG;
128			goto out;
129		}
130		/* we need another page for writing out.  Test this
131		 * before the total_in so we will pull in a new page for
132		 * the stream end if required
133		 */
134		if (workspace->strm.avail_out == 0) {
135			kunmap(out_page);
136			if (nr_pages == nr_dest_pages) {
137				out_page = NULL;
138				ret = -E2BIG;
139				goto out;
140			}
141			out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
142			if (out_page == NULL) {
143				ret = -ENOMEM;
144				goto out;
145			}
146			cpage_out = kmap(out_page);
147			pages[nr_pages] = out_page;
148			nr_pages++;
149			workspace->strm.avail_out = PAGE_SIZE;
150			workspace->strm.next_out = cpage_out;
151		}
152		/* we're all done */
153		if (workspace->strm.total_in >= len)
154			break;
155
156		/* we've read in a full page, get a new one */
157		if (workspace->strm.avail_in == 0) {
158			if (workspace->strm.total_out > max_out)
159				break;
160
161			bytes_left = len - workspace->strm.total_in;
162			kunmap(in_page);
163			put_page(in_page);
164
165			start += PAGE_SIZE;
166			in_page = find_get_page(mapping,
167						start >> PAGE_SHIFT);
168			data_in = kmap(in_page);
169			workspace->strm.avail_in = min(bytes_left,
170							   PAGE_SIZE);
171			workspace->strm.next_in = data_in;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172		}
173	}
174	workspace->strm.avail_in = 0;
175	ret = zlib_deflate(&workspace->strm, Z_FINISH);
176	zlib_deflateEnd(&workspace->strm);
177
178	if (ret != Z_STREAM_END) {
179		ret = -EIO;
180		goto out;
181	}
182
183	if (workspace->strm.total_out >= workspace->strm.total_in) {
184		ret = -E2BIG;
185		goto out;
186	}
187
188	ret = 0;
189	*total_out = workspace->strm.total_out;
190	*total_in = workspace->strm.total_in;
191out:
192	*out_pages = nr_pages;
193	if (out_page)
194		kunmap(out_page);
195
196	if (in_page) {
197		kunmap(in_page);
198		put_page(in_page);
199	}
 
200	return ret;
201}
202
203static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
204{
205	struct workspace *workspace = list_entry(ws, struct workspace, list);
206	int ret = 0, ret2;
207	int wbits = MAX_WBITS;
208	char *data_in;
209	size_t total_out = 0;
210	unsigned long page_in_index = 0;
211	size_t srclen = cb->compressed_len;
212	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
213	unsigned long buf_start;
214	struct page **pages_in = cb->compressed_pages;
215	u64 disk_start = cb->start;
216	struct bio *orig_bio = cb->orig_bio;
217
218	data_in = kmap(pages_in[page_in_index]);
219	workspace->strm.next_in = data_in;
220	workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
221	workspace->strm.total_in = 0;
222
223	workspace->strm.total_out = 0;
224	workspace->strm.next_out = workspace->buf;
225	workspace->strm.avail_out = PAGE_SIZE;
226
227	/* If it's deflate, and it's got no preset dictionary, then
228	   we can tell zlib to skip the adler32 check. */
229	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
230	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
231	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
232
233		wbits = -((data_in[0] >> 4) + 8);
234		workspace->strm.next_in += 2;
235		workspace->strm.avail_in -= 2;
236	}
237
238	if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
239		pr_warn("BTRFS: inflateInit failed\n");
240		kunmap(pages_in[page_in_index]);
 
 
 
 
 
241		return -EIO;
242	}
243	while (workspace->strm.total_in < srclen) {
244		ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
245		if (ret != Z_OK && ret != Z_STREAM_END)
246			break;
247
248		buf_start = total_out;
249		total_out = workspace->strm.total_out;
250
251		/* we didn't make progress in this inflate call, we're done */
252		if (buf_start == total_out)
253			break;
254
255		ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
256						 total_out, disk_start,
257						 orig_bio);
258		if (ret2 == 0) {
259			ret = 0;
260			goto done;
261		}
262
263		workspace->strm.next_out = workspace->buf;
264		workspace->strm.avail_out = PAGE_SIZE;
265
266		if (workspace->strm.avail_in == 0) {
267			unsigned long tmp;
268			kunmap(pages_in[page_in_index]);
269			page_in_index++;
270			if (page_in_index >= total_pages_in) {
271				data_in = NULL;
272				break;
273			}
274			data_in = kmap(pages_in[page_in_index]);
275			workspace->strm.next_in = data_in;
276			tmp = srclen - workspace->strm.total_in;
277			workspace->strm.avail_in = min(tmp,
278							   PAGE_SIZE);
279		}
280	}
281	if (ret != Z_STREAM_END)
 
 
 
 
282		ret = -EIO;
283	else
284		ret = 0;
 
285done:
286	zlib_inflateEnd(&workspace->strm);
287	if (data_in)
288		kunmap(pages_in[page_in_index]);
289	if (!ret)
290		zero_fill_bio(orig_bio);
291	return ret;
292}
293
294static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
295			   struct page *dest_page,
296			   unsigned long start_byte,
297			   size_t srclen, size_t destlen)
298{
299	struct workspace *workspace = list_entry(ws, struct workspace, list);
300	int ret = 0;
301	int wbits = MAX_WBITS;
302	unsigned long bytes_left;
303	unsigned long total_out = 0;
304	unsigned long pg_offset = 0;
305	char *kaddr;
306
307	destlen = min_t(unsigned long, destlen, PAGE_SIZE);
308	bytes_left = destlen;
309
310	workspace->strm.next_in = data_in;
311	workspace->strm.avail_in = srclen;
312	workspace->strm.total_in = 0;
313
314	workspace->strm.next_out = workspace->buf;
315	workspace->strm.avail_out = PAGE_SIZE;
316	workspace->strm.total_out = 0;
317	/* If it's deflate, and it's got no preset dictionary, then
318	   we can tell zlib to skip the adler32 check. */
319	if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
320	    ((data_in[0] & 0x0f) == Z_DEFLATED) &&
321	    !(((data_in[0]<<8) + data_in[1]) % 31)) {
322
323		wbits = -((data_in[0] >> 4) + 8);
324		workspace->strm.next_in += 2;
325		workspace->strm.avail_in -= 2;
326	}
327
328	if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
329		pr_warn("BTRFS: inflateInit failed\n");
 
 
 
 
 
 
330		return -EIO;
331	}
332
333	while (bytes_left > 0) {
334		unsigned long buf_start;
335		unsigned long buf_offset;
336		unsigned long bytes;
337
338		ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
339		if (ret != Z_OK && ret != Z_STREAM_END)
340			break;
341
342		buf_start = total_out;
343		total_out = workspace->strm.total_out;
344
345		if (total_out == buf_start) {
346			ret = -EIO;
347			break;
348		}
349
350		if (total_out <= start_byte)
351			goto next;
352
353		if (total_out > start_byte && buf_start < start_byte)
354			buf_offset = start_byte - buf_start;
355		else
356			buf_offset = 0;
357
358		bytes = min(PAGE_SIZE - pg_offset,
359			    PAGE_SIZE - buf_offset);
360		bytes = min(bytes, bytes_left);
361
362		kaddr = kmap_atomic(dest_page);
363		memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
364		kunmap_atomic(kaddr);
365
366		pg_offset += bytes;
367		bytes_left -= bytes;
368next:
369		workspace->strm.next_out = workspace->buf;
370		workspace->strm.avail_out = PAGE_SIZE;
371	}
372
373	if (ret != Z_STREAM_END && bytes_left != 0)
 
 
 
374		ret = -EIO;
375	else
376		ret = 0;
 
377
378	zlib_inflateEnd(&workspace->strm);
379
380	/*
381	 * this should only happen if zlib returned fewer bytes than we
382	 * expected.  btrfs_get_block is responsible for zeroing from the
383	 * end of the inline extent (destlen) to the end of the page
384	 */
385	if (pg_offset < destlen) {
386		kaddr = kmap_atomic(dest_page);
387		memset(kaddr + pg_offset, 0, destlen - pg_offset);
388		kunmap_atomic(kaddr);
389	}
390	return ret;
391}
392
393static void zlib_set_level(struct list_head *ws, unsigned int type)
394{
395	struct workspace *workspace = list_entry(ws, struct workspace, list);
396	unsigned level = (type & 0xF0) >> 4;
397
398	if (level > 9)
399		level = 9;
400
401	workspace->level = level > 0 ? level : 3;
402}
403
404const struct btrfs_compress_op btrfs_zlib_compress = {
405	.alloc_workspace	= zlib_alloc_workspace,
406	.free_workspace		= zlib_free_workspace,
407	.compress_pages		= zlib_compress_pages,
408	.decompress_bio		= zlib_decompress_bio,
409	.decompress		= zlib_decompress,
410	.set_level              = zlib_set_level,
411};