Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 *
5 * Based on jffs2 zlib code:
6 * Copyright © 2001-2007 Red Hat, Inc.
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/zlib.h>
13#include <linux/zutil.h>
14#include <linux/mm.h>
15#include <linux/init.h>
16#include <linux/err.h>
17#include <linux/sched.h>
18#include <linux/pagemap.h>
19#include <linux/bio.h>
20#include <linux/refcount.h>
21#include "btrfs_inode.h"
22#include "compression.h"
23#include "fs.h"
24#include "subpage.h"
25
26/* workspace buffer size for s390 zlib hardware support */
27#define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
28
29struct workspace {
30 z_stream strm;
31 char *buf;
32 unsigned int buf_size;
33 struct list_head list;
34 int level;
35};
36
37static struct workspace_manager wsm;
38
39struct list_head *zlib_get_workspace(unsigned int level)
40{
41 struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level);
42 struct workspace *workspace = list_entry(ws, struct workspace, list);
43
44 workspace->level = level;
45
46 return ws;
47}
48
49void zlib_free_workspace(struct list_head *ws)
50{
51 struct workspace *workspace = list_entry(ws, struct workspace, list);
52
53 kvfree(workspace->strm.workspace);
54 kfree(workspace->buf);
55 kfree(workspace);
56}
57
58struct list_head *zlib_alloc_workspace(unsigned int level)
59{
60 struct workspace *workspace;
61 int workspacesize;
62
63 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
64 if (!workspace)
65 return ERR_PTR(-ENOMEM);
66
67 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
68 zlib_inflate_workspacesize());
69 workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL | __GFP_NOWARN);
70 workspace->level = level;
71 workspace->buf = NULL;
72 /*
73 * In case of s390 zlib hardware support, allocate lager workspace
74 * buffer. If allocator fails, fall back to a single page buffer.
75 */
76 if (zlib_deflate_dfltcc_enabled()) {
77 workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
78 __GFP_NOMEMALLOC | __GFP_NORETRY |
79 __GFP_NOWARN | GFP_NOIO);
80 workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
81 }
82 if (!workspace->buf) {
83 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
84 workspace->buf_size = PAGE_SIZE;
85 }
86 if (!workspace->strm.workspace || !workspace->buf)
87 goto fail;
88
89 INIT_LIST_HEAD(&workspace->list);
90
91 return &workspace->list;
92fail:
93 zlib_free_workspace(&workspace->list);
94 return ERR_PTR(-ENOMEM);
95}
96
97int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
98 u64 start, struct folio **folios, unsigned long *out_folios,
99 unsigned long *total_in, unsigned long *total_out)
100{
101 struct workspace *workspace = list_entry(ws, struct workspace, list);
102 int ret;
103 char *data_in = NULL;
104 char *cfolio_out;
105 int nr_folios = 0;
106 struct folio *in_folio = NULL;
107 struct folio *out_folio = NULL;
108 unsigned long bytes_left;
109 unsigned int in_buf_folios;
110 unsigned long len = *total_out;
111 unsigned long nr_dest_folios = *out_folios;
112 const unsigned long max_out = nr_dest_folios * PAGE_SIZE;
113 const u64 orig_end = start + len;
114
115 *out_folios = 0;
116 *total_out = 0;
117 *total_in = 0;
118
119 ret = zlib_deflateInit(&workspace->strm, workspace->level);
120 if (unlikely(ret != Z_OK)) {
121 struct btrfs_inode *inode = BTRFS_I(mapping->host);
122
123 btrfs_err(inode->root->fs_info,
124 "zlib compression init failed, error %d root %llu inode %llu offset %llu",
125 ret, btrfs_root_id(inode->root), btrfs_ino(inode), start);
126 ret = -EIO;
127 goto out;
128 }
129
130 workspace->strm.total_in = 0;
131 workspace->strm.total_out = 0;
132
133 out_folio = btrfs_alloc_compr_folio();
134 if (out_folio == NULL) {
135 ret = -ENOMEM;
136 goto out;
137 }
138 cfolio_out = folio_address(out_folio);
139 folios[0] = out_folio;
140 nr_folios = 1;
141
142 workspace->strm.next_in = workspace->buf;
143 workspace->strm.avail_in = 0;
144 workspace->strm.next_out = cfolio_out;
145 workspace->strm.avail_out = PAGE_SIZE;
146
147 while (workspace->strm.total_in < len) {
148 /*
149 * Get next input pages and copy the contents to
150 * the workspace buffer if required.
151 */
152 if (workspace->strm.avail_in == 0) {
153 bytes_left = len - workspace->strm.total_in;
154 in_buf_folios = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
155 workspace->buf_size / PAGE_SIZE);
156 if (in_buf_folios > 1) {
157 int i;
158
159 /* S390 hardware acceleration path, not subpage. */
160 ASSERT(!btrfs_is_subpage(
161 inode_to_fs_info(mapping->host),
162 mapping));
163 for (i = 0; i < in_buf_folios; i++) {
164 if (data_in) {
165 kunmap_local(data_in);
166 folio_put(in_folio);
167 data_in = NULL;
168 }
169 ret = btrfs_compress_filemap_get_folio(mapping,
170 start, &in_folio);
171 if (ret < 0)
172 goto out;
173 data_in = kmap_local_folio(in_folio, 0);
174 copy_page(workspace->buf + i * PAGE_SIZE,
175 data_in);
176 start += PAGE_SIZE;
177 }
178 workspace->strm.next_in = workspace->buf;
179 workspace->strm.avail_in = min(bytes_left,
180 in_buf_folios << PAGE_SHIFT);
181 } else {
182 unsigned int pg_off;
183 unsigned int cur_len;
184
185 if (data_in) {
186 kunmap_local(data_in);
187 folio_put(in_folio);
188 data_in = NULL;
189 }
190 ret = btrfs_compress_filemap_get_folio(mapping,
191 start, &in_folio);
192 if (ret < 0)
193 goto out;
194 pg_off = offset_in_page(start);
195 cur_len = btrfs_calc_input_length(orig_end, start);
196 data_in = kmap_local_folio(in_folio, pg_off);
197 start += cur_len;
198 workspace->strm.next_in = data_in;
199 workspace->strm.avail_in = cur_len;
200 }
201 }
202
203 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
204 if (unlikely(ret != Z_OK)) {
205 struct btrfs_inode *inode = BTRFS_I(mapping->host);
206
207 btrfs_warn(inode->root->fs_info,
208 "zlib compression failed, error %d root %llu inode %llu offset %llu",
209 ret, btrfs_root_id(inode->root), btrfs_ino(inode),
210 start);
211 zlib_deflateEnd(&workspace->strm);
212 ret = -EIO;
213 goto out;
214 }
215
216 /* we're making it bigger, give up */
217 if (workspace->strm.total_in > 8192 &&
218 workspace->strm.total_in <
219 workspace->strm.total_out) {
220 ret = -E2BIG;
221 goto out;
222 }
223 /* we need another page for writing out. Test this
224 * before the total_in so we will pull in a new page for
225 * the stream end if required
226 */
227 if (workspace->strm.avail_out == 0) {
228 if (nr_folios == nr_dest_folios) {
229 ret = -E2BIG;
230 goto out;
231 }
232 out_folio = btrfs_alloc_compr_folio();
233 if (out_folio == NULL) {
234 ret = -ENOMEM;
235 goto out;
236 }
237 cfolio_out = folio_address(out_folio);
238 folios[nr_folios] = out_folio;
239 nr_folios++;
240 workspace->strm.avail_out = PAGE_SIZE;
241 workspace->strm.next_out = cfolio_out;
242 }
243 /* we're all done */
244 if (workspace->strm.total_in >= len)
245 break;
246 if (workspace->strm.total_out > max_out)
247 break;
248 }
249 workspace->strm.avail_in = 0;
250 /*
251 * Call deflate with Z_FINISH flush parameter providing more output
252 * space but no more input data, until it returns with Z_STREAM_END.
253 */
254 while (ret != Z_STREAM_END) {
255 ret = zlib_deflate(&workspace->strm, Z_FINISH);
256 if (ret == Z_STREAM_END)
257 break;
258 if (ret != Z_OK && ret != Z_BUF_ERROR) {
259 zlib_deflateEnd(&workspace->strm);
260 ret = -EIO;
261 goto out;
262 } else if (workspace->strm.avail_out == 0) {
263 /* Get another folio for the stream end. */
264 if (nr_folios == nr_dest_folios) {
265 ret = -E2BIG;
266 goto out;
267 }
268 out_folio = btrfs_alloc_compr_folio();
269 if (out_folio == NULL) {
270 ret = -ENOMEM;
271 goto out;
272 }
273 cfolio_out = folio_address(out_folio);
274 folios[nr_folios] = out_folio;
275 nr_folios++;
276 workspace->strm.avail_out = PAGE_SIZE;
277 workspace->strm.next_out = cfolio_out;
278 }
279 }
280 zlib_deflateEnd(&workspace->strm);
281
282 if (workspace->strm.total_out >= workspace->strm.total_in) {
283 ret = -E2BIG;
284 goto out;
285 }
286
287 ret = 0;
288 *total_out = workspace->strm.total_out;
289 *total_in = workspace->strm.total_in;
290out:
291 *out_folios = nr_folios;
292 if (data_in) {
293 kunmap_local(data_in);
294 folio_put(in_folio);
295 }
296
297 return ret;
298}
299
300int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
301{
302 struct workspace *workspace = list_entry(ws, struct workspace, list);
303 int ret = 0, ret2;
304 int wbits = MAX_WBITS;
305 char *data_in;
306 size_t total_out = 0;
307 unsigned long folio_in_index = 0;
308 size_t srclen = cb->compressed_len;
309 unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
310 unsigned long buf_start;
311 struct folio **folios_in = cb->compressed_folios;
312
313 data_in = kmap_local_folio(folios_in[folio_in_index], 0);
314 workspace->strm.next_in = data_in;
315 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
316 workspace->strm.total_in = 0;
317
318 workspace->strm.total_out = 0;
319 workspace->strm.next_out = workspace->buf;
320 workspace->strm.avail_out = workspace->buf_size;
321
322 /* If it's deflate, and it's got no preset dictionary, then
323 we can tell zlib to skip the adler32 check. */
324 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
325 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
326 !(((data_in[0]<<8) + data_in[1]) % 31)) {
327
328 wbits = -((data_in[0] >> 4) + 8);
329 workspace->strm.next_in += 2;
330 workspace->strm.avail_in -= 2;
331 }
332
333 ret = zlib_inflateInit2(&workspace->strm, wbits);
334 if (unlikely(ret != Z_OK)) {
335 struct btrfs_inode *inode = cb->bbio.inode;
336
337 kunmap_local(data_in);
338 btrfs_err(inode->root->fs_info,
339 "zlib decompression init failed, error %d root %llu inode %llu offset %llu",
340 ret, btrfs_root_id(inode->root), btrfs_ino(inode), cb->start);
341 return -EIO;
342 }
343 while (workspace->strm.total_in < srclen) {
344 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
345 if (ret != Z_OK && ret != Z_STREAM_END)
346 break;
347
348 buf_start = total_out;
349 total_out = workspace->strm.total_out;
350
351 /* we didn't make progress in this inflate call, we're done */
352 if (buf_start == total_out)
353 break;
354
355 ret2 = btrfs_decompress_buf2page(workspace->buf,
356 total_out - buf_start, cb, buf_start);
357 if (ret2 == 0) {
358 ret = 0;
359 goto done;
360 }
361
362 workspace->strm.next_out = workspace->buf;
363 workspace->strm.avail_out = workspace->buf_size;
364
365 if (workspace->strm.avail_in == 0) {
366 unsigned long tmp;
367 kunmap_local(data_in);
368 folio_in_index++;
369 if (folio_in_index >= total_folios_in) {
370 data_in = NULL;
371 break;
372 }
373 data_in = kmap_local_folio(folios_in[folio_in_index], 0);
374 workspace->strm.next_in = data_in;
375 tmp = srclen - workspace->strm.total_in;
376 workspace->strm.avail_in = min(tmp, PAGE_SIZE);
377 }
378 }
379 if (unlikely(ret != Z_STREAM_END)) {
380 btrfs_err(cb->bbio.inode->root->fs_info,
381 "zlib decompression failed, error %d root %llu inode %llu offset %llu",
382 ret, btrfs_root_id(cb->bbio.inode->root),
383 btrfs_ino(cb->bbio.inode), cb->start);
384 ret = -EIO;
385 } else {
386 ret = 0;
387 }
388done:
389 zlib_inflateEnd(&workspace->strm);
390 if (data_in)
391 kunmap_local(data_in);
392 return ret;
393}
394
395int zlib_decompress(struct list_head *ws, const u8 *data_in,
396 struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
397 size_t destlen)
398{
399 struct workspace *workspace = list_entry(ws, struct workspace, list);
400 int ret = 0;
401 int wbits = MAX_WBITS;
402 unsigned long to_copy;
403
404 workspace->strm.next_in = data_in;
405 workspace->strm.avail_in = srclen;
406 workspace->strm.total_in = 0;
407
408 workspace->strm.next_out = workspace->buf;
409 workspace->strm.avail_out = workspace->buf_size;
410 workspace->strm.total_out = 0;
411 /* If it's deflate, and it's got no preset dictionary, then
412 we can tell zlib to skip the adler32 check. */
413 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
414 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
415 !(((data_in[0]<<8) + data_in[1]) % 31)) {
416
417 wbits = -((data_in[0] >> 4) + 8);
418 workspace->strm.next_in += 2;
419 workspace->strm.avail_in -= 2;
420 }
421
422 ret = zlib_inflateInit2(&workspace->strm, wbits);
423 if (unlikely(ret != Z_OK)) {
424 struct btrfs_inode *inode = folio_to_inode(dest_folio);
425
426 btrfs_err(inode->root->fs_info,
427 "zlib decompression init failed, error %d root %llu inode %llu offset %llu",
428 ret, btrfs_root_id(inode->root), btrfs_ino(inode),
429 folio_pos(dest_folio));
430 return -EIO;
431 }
432
433 /*
434 * Everything (in/out buf) should be at most one sector, there should
435 * be no need to switch any input/output buffer.
436 */
437 ret = zlib_inflate(&workspace->strm, Z_FINISH);
438 to_copy = min(workspace->strm.total_out, destlen);
439 if (ret != Z_STREAM_END)
440 goto out;
441
442 memcpy_to_folio(dest_folio, dest_pgoff, workspace->buf, to_copy);
443
444out:
445 if (unlikely(to_copy != destlen)) {
446 struct btrfs_inode *inode = folio_to_inode(dest_folio);
447
448 btrfs_err(inode->root->fs_info,
449"zlib decompression failed, error %d root %llu inode %llu offset %llu decompressed %lu expected %zu",
450 ret, btrfs_root_id(inode->root), btrfs_ino(inode),
451 folio_pos(dest_folio), to_copy, destlen);
452 ret = -EIO;
453 } else {
454 ret = 0;
455 }
456
457 zlib_inflateEnd(&workspace->strm);
458
459 if (unlikely(to_copy < destlen))
460 folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy);
461 return ret;
462}
463
464const struct btrfs_compress_op btrfs_zlib_compress = {
465 .workspace_manager = &wsm,
466 .max_level = 9,
467 .default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
468};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 *
5 * Based on jffs2 zlib code:
6 * Copyright © 2001-2007 Red Hat, Inc.
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/zlib.h>
13#include <linux/zutil.h>
14#include <linux/mm.h>
15#include <linux/init.h>
16#include <linux/err.h>
17#include <linux/sched.h>
18#include <linux/pagemap.h>
19#include <linux/bio.h>
20#include <linux/refcount.h>
21#include "compression.h"
22
23struct workspace {
24 z_stream strm;
25 char *buf;
26 struct list_head list;
27 int level;
28};
29
30static struct workspace_manager wsm;
31
32static void zlib_init_workspace_manager(void)
33{
34 btrfs_init_workspace_manager(&wsm, &btrfs_zlib_compress);
35}
36
37static void zlib_cleanup_workspace_manager(void)
38{
39 btrfs_cleanup_workspace_manager(&wsm);
40}
41
42static struct list_head *zlib_get_workspace(unsigned int level)
43{
44 struct list_head *ws = btrfs_get_workspace(&wsm, level);
45 struct workspace *workspace = list_entry(ws, struct workspace, list);
46
47 workspace->level = level;
48
49 return ws;
50}
51
52static void zlib_put_workspace(struct list_head *ws)
53{
54 btrfs_put_workspace(&wsm, ws);
55}
56
57static void zlib_free_workspace(struct list_head *ws)
58{
59 struct workspace *workspace = list_entry(ws, struct workspace, list);
60
61 kvfree(workspace->strm.workspace);
62 kfree(workspace->buf);
63 kfree(workspace);
64}
65
66static struct list_head *zlib_alloc_workspace(unsigned int level)
67{
68 struct workspace *workspace;
69 int workspacesize;
70
71 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
72 if (!workspace)
73 return ERR_PTR(-ENOMEM);
74
75 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
76 zlib_inflate_workspacesize());
77 workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
78 workspace->level = level;
79 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
80 if (!workspace->strm.workspace || !workspace->buf)
81 goto fail;
82
83 INIT_LIST_HEAD(&workspace->list);
84
85 return &workspace->list;
86fail:
87 zlib_free_workspace(&workspace->list);
88 return ERR_PTR(-ENOMEM);
89}
90
91static int zlib_compress_pages(struct list_head *ws,
92 struct address_space *mapping,
93 u64 start,
94 struct page **pages,
95 unsigned long *out_pages,
96 unsigned long *total_in,
97 unsigned long *total_out)
98{
99 struct workspace *workspace = list_entry(ws, struct workspace, list);
100 int ret;
101 char *data_in;
102 char *cpage_out;
103 int nr_pages = 0;
104 struct page *in_page = NULL;
105 struct page *out_page = NULL;
106 unsigned long bytes_left;
107 unsigned long len = *total_out;
108 unsigned long nr_dest_pages = *out_pages;
109 const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
110
111 *out_pages = 0;
112 *total_out = 0;
113 *total_in = 0;
114
115 if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
116 pr_warn("BTRFS: deflateInit failed\n");
117 ret = -EIO;
118 goto out;
119 }
120
121 workspace->strm.total_in = 0;
122 workspace->strm.total_out = 0;
123
124 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
125 data_in = kmap(in_page);
126
127 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
128 if (out_page == NULL) {
129 ret = -ENOMEM;
130 goto out;
131 }
132 cpage_out = kmap(out_page);
133 pages[0] = out_page;
134 nr_pages = 1;
135
136 workspace->strm.next_in = data_in;
137 workspace->strm.next_out = cpage_out;
138 workspace->strm.avail_out = PAGE_SIZE;
139 workspace->strm.avail_in = min(len, PAGE_SIZE);
140
141 while (workspace->strm.total_in < len) {
142 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
143 if (ret != Z_OK) {
144 pr_debug("BTRFS: deflate in loop returned %d\n",
145 ret);
146 zlib_deflateEnd(&workspace->strm);
147 ret = -EIO;
148 goto out;
149 }
150
151 /* we're making it bigger, give up */
152 if (workspace->strm.total_in > 8192 &&
153 workspace->strm.total_in <
154 workspace->strm.total_out) {
155 ret = -E2BIG;
156 goto out;
157 }
158 /* we need another page for writing out. Test this
159 * before the total_in so we will pull in a new page for
160 * the stream end if required
161 */
162 if (workspace->strm.avail_out == 0) {
163 kunmap(out_page);
164 if (nr_pages == nr_dest_pages) {
165 out_page = NULL;
166 ret = -E2BIG;
167 goto out;
168 }
169 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
170 if (out_page == NULL) {
171 ret = -ENOMEM;
172 goto out;
173 }
174 cpage_out = kmap(out_page);
175 pages[nr_pages] = out_page;
176 nr_pages++;
177 workspace->strm.avail_out = PAGE_SIZE;
178 workspace->strm.next_out = cpage_out;
179 }
180 /* we're all done */
181 if (workspace->strm.total_in >= len)
182 break;
183
184 /* we've read in a full page, get a new one */
185 if (workspace->strm.avail_in == 0) {
186 if (workspace->strm.total_out > max_out)
187 break;
188
189 bytes_left = len - workspace->strm.total_in;
190 kunmap(in_page);
191 put_page(in_page);
192
193 start += PAGE_SIZE;
194 in_page = find_get_page(mapping,
195 start >> PAGE_SHIFT);
196 data_in = kmap(in_page);
197 workspace->strm.avail_in = min(bytes_left,
198 PAGE_SIZE);
199 workspace->strm.next_in = data_in;
200 }
201 }
202 workspace->strm.avail_in = 0;
203 ret = zlib_deflate(&workspace->strm, Z_FINISH);
204 zlib_deflateEnd(&workspace->strm);
205
206 if (ret != Z_STREAM_END) {
207 ret = -EIO;
208 goto out;
209 }
210
211 if (workspace->strm.total_out >= workspace->strm.total_in) {
212 ret = -E2BIG;
213 goto out;
214 }
215
216 ret = 0;
217 *total_out = workspace->strm.total_out;
218 *total_in = workspace->strm.total_in;
219out:
220 *out_pages = nr_pages;
221 if (out_page)
222 kunmap(out_page);
223
224 if (in_page) {
225 kunmap(in_page);
226 put_page(in_page);
227 }
228 return ret;
229}
230
231static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
232{
233 struct workspace *workspace = list_entry(ws, struct workspace, list);
234 int ret = 0, ret2;
235 int wbits = MAX_WBITS;
236 char *data_in;
237 size_t total_out = 0;
238 unsigned long page_in_index = 0;
239 size_t srclen = cb->compressed_len;
240 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
241 unsigned long buf_start;
242 struct page **pages_in = cb->compressed_pages;
243 u64 disk_start = cb->start;
244 struct bio *orig_bio = cb->orig_bio;
245
246 data_in = kmap(pages_in[page_in_index]);
247 workspace->strm.next_in = data_in;
248 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
249 workspace->strm.total_in = 0;
250
251 workspace->strm.total_out = 0;
252 workspace->strm.next_out = workspace->buf;
253 workspace->strm.avail_out = PAGE_SIZE;
254
255 /* If it's deflate, and it's got no preset dictionary, then
256 we can tell zlib to skip the adler32 check. */
257 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
258 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
259 !(((data_in[0]<<8) + data_in[1]) % 31)) {
260
261 wbits = -((data_in[0] >> 4) + 8);
262 workspace->strm.next_in += 2;
263 workspace->strm.avail_in -= 2;
264 }
265
266 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
267 pr_warn("BTRFS: inflateInit failed\n");
268 kunmap(pages_in[page_in_index]);
269 return -EIO;
270 }
271 while (workspace->strm.total_in < srclen) {
272 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
273 if (ret != Z_OK && ret != Z_STREAM_END)
274 break;
275
276 buf_start = total_out;
277 total_out = workspace->strm.total_out;
278
279 /* we didn't make progress in this inflate call, we're done */
280 if (buf_start == total_out)
281 break;
282
283 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
284 total_out, disk_start,
285 orig_bio);
286 if (ret2 == 0) {
287 ret = 0;
288 goto done;
289 }
290
291 workspace->strm.next_out = workspace->buf;
292 workspace->strm.avail_out = PAGE_SIZE;
293
294 if (workspace->strm.avail_in == 0) {
295 unsigned long tmp;
296 kunmap(pages_in[page_in_index]);
297 page_in_index++;
298 if (page_in_index >= total_pages_in) {
299 data_in = NULL;
300 break;
301 }
302 data_in = kmap(pages_in[page_in_index]);
303 workspace->strm.next_in = data_in;
304 tmp = srclen - workspace->strm.total_in;
305 workspace->strm.avail_in = min(tmp,
306 PAGE_SIZE);
307 }
308 }
309 if (ret != Z_STREAM_END)
310 ret = -EIO;
311 else
312 ret = 0;
313done:
314 zlib_inflateEnd(&workspace->strm);
315 if (data_in)
316 kunmap(pages_in[page_in_index]);
317 if (!ret)
318 zero_fill_bio(orig_bio);
319 return ret;
320}
321
322static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
323 struct page *dest_page,
324 unsigned long start_byte,
325 size_t srclen, size_t destlen)
326{
327 struct workspace *workspace = list_entry(ws, struct workspace, list);
328 int ret = 0;
329 int wbits = MAX_WBITS;
330 unsigned long bytes_left;
331 unsigned long total_out = 0;
332 unsigned long pg_offset = 0;
333 char *kaddr;
334
335 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
336 bytes_left = destlen;
337
338 workspace->strm.next_in = data_in;
339 workspace->strm.avail_in = srclen;
340 workspace->strm.total_in = 0;
341
342 workspace->strm.next_out = workspace->buf;
343 workspace->strm.avail_out = PAGE_SIZE;
344 workspace->strm.total_out = 0;
345 /* If it's deflate, and it's got no preset dictionary, then
346 we can tell zlib to skip the adler32 check. */
347 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
348 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
349 !(((data_in[0]<<8) + data_in[1]) % 31)) {
350
351 wbits = -((data_in[0] >> 4) + 8);
352 workspace->strm.next_in += 2;
353 workspace->strm.avail_in -= 2;
354 }
355
356 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
357 pr_warn("BTRFS: inflateInit failed\n");
358 return -EIO;
359 }
360
361 while (bytes_left > 0) {
362 unsigned long buf_start;
363 unsigned long buf_offset;
364 unsigned long bytes;
365
366 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
367 if (ret != Z_OK && ret != Z_STREAM_END)
368 break;
369
370 buf_start = total_out;
371 total_out = workspace->strm.total_out;
372
373 if (total_out == buf_start) {
374 ret = -EIO;
375 break;
376 }
377
378 if (total_out <= start_byte)
379 goto next;
380
381 if (total_out > start_byte && buf_start < start_byte)
382 buf_offset = start_byte - buf_start;
383 else
384 buf_offset = 0;
385
386 bytes = min(PAGE_SIZE - pg_offset,
387 PAGE_SIZE - buf_offset);
388 bytes = min(bytes, bytes_left);
389
390 kaddr = kmap_atomic(dest_page);
391 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
392 kunmap_atomic(kaddr);
393
394 pg_offset += bytes;
395 bytes_left -= bytes;
396next:
397 workspace->strm.next_out = workspace->buf;
398 workspace->strm.avail_out = PAGE_SIZE;
399 }
400
401 if (ret != Z_STREAM_END && bytes_left != 0)
402 ret = -EIO;
403 else
404 ret = 0;
405
406 zlib_inflateEnd(&workspace->strm);
407
408 /*
409 * this should only happen if zlib returned fewer bytes than we
410 * expected. btrfs_get_block is responsible for zeroing from the
411 * end of the inline extent (destlen) to the end of the page
412 */
413 if (pg_offset < destlen) {
414 kaddr = kmap_atomic(dest_page);
415 memset(kaddr + pg_offset, 0, destlen - pg_offset);
416 kunmap_atomic(kaddr);
417 }
418 return ret;
419}
420
421const struct btrfs_compress_op btrfs_zlib_compress = {
422 .init_workspace_manager = zlib_init_workspace_manager,
423 .cleanup_workspace_manager = zlib_cleanup_workspace_manager,
424 .get_workspace = zlib_get_workspace,
425 .put_workspace = zlib_put_workspace,
426 .alloc_workspace = zlib_alloc_workspace,
427 .free_workspace = zlib_free_workspace,
428 .compress_pages = zlib_compress_pages,
429 .decompress_bio = zlib_decompress_bio,
430 .decompress = zlib_decompress,
431 .max_level = 9,
432 .default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
433};