Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 *
5 * Based on jffs2 zlib code:
6 * Copyright © 2001-2007 Red Hat, Inc.
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/zlib.h>
13#include <linux/zutil.h>
14#include <linux/mm.h>
15#include <linux/init.h>
16#include <linux/err.h>
17#include <linux/sched.h>
18#include <linux/pagemap.h>
19#include <linux/bio.h>
20#include <linux/refcount.h>
21#include "compression.h"
22
23/* workspace buffer size for s390 zlib hardware support */
24#define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
25
26struct workspace {
27 z_stream strm;
28 char *buf;
29 unsigned int buf_size;
30 struct list_head list;
31 int level;
32};
33
34static struct workspace_manager wsm;
35
36struct list_head *zlib_get_workspace(unsigned int level)
37{
38 struct list_head *ws = btrfs_get_workspace(BTRFS_COMPRESS_ZLIB, level);
39 struct workspace *workspace = list_entry(ws, struct workspace, list);
40
41 workspace->level = level;
42
43 return ws;
44}
45
46void zlib_free_workspace(struct list_head *ws)
47{
48 struct workspace *workspace = list_entry(ws, struct workspace, list);
49
50 kvfree(workspace->strm.workspace);
51 kfree(workspace->buf);
52 kfree(workspace);
53}
54
55struct list_head *zlib_alloc_workspace(unsigned int level)
56{
57 struct workspace *workspace;
58 int workspacesize;
59
60 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
61 if (!workspace)
62 return ERR_PTR(-ENOMEM);
63
64 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
65 zlib_inflate_workspacesize());
66 workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
67 workspace->level = level;
68 workspace->buf = NULL;
69 /*
70 * In case of s390 zlib hardware support, allocate lager workspace
71 * buffer. If allocator fails, fall back to a single page buffer.
72 */
73 if (zlib_deflate_dfltcc_enabled()) {
74 workspace->buf = kmalloc(ZLIB_DFLTCC_BUF_SIZE,
75 __GFP_NOMEMALLOC | __GFP_NORETRY |
76 __GFP_NOWARN | GFP_NOIO);
77 workspace->buf_size = ZLIB_DFLTCC_BUF_SIZE;
78 }
79 if (!workspace->buf) {
80 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
81 workspace->buf_size = PAGE_SIZE;
82 }
83 if (!workspace->strm.workspace || !workspace->buf)
84 goto fail;
85
86 INIT_LIST_HEAD(&workspace->list);
87
88 return &workspace->list;
89fail:
90 zlib_free_workspace(&workspace->list);
91 return ERR_PTR(-ENOMEM);
92}
93
94int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
95 u64 start, struct page **pages, unsigned long *out_pages,
96 unsigned long *total_in, unsigned long *total_out)
97{
98 struct workspace *workspace = list_entry(ws, struct workspace, list);
99 int ret;
100 char *data_in;
101 char *cpage_out;
102 int nr_pages = 0;
103 struct page *in_page = NULL;
104 struct page *out_page = NULL;
105 unsigned long bytes_left;
106 unsigned int in_buf_pages;
107 unsigned long len = *total_out;
108 unsigned long nr_dest_pages = *out_pages;
109 const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
110
111 *out_pages = 0;
112 *total_out = 0;
113 *total_in = 0;
114
115 if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
116 pr_warn("BTRFS: deflateInit failed\n");
117 ret = -EIO;
118 goto out;
119 }
120
121 workspace->strm.total_in = 0;
122 workspace->strm.total_out = 0;
123
124 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
125 if (out_page == NULL) {
126 ret = -ENOMEM;
127 goto out;
128 }
129 cpage_out = kmap(out_page);
130 pages[0] = out_page;
131 nr_pages = 1;
132
133 workspace->strm.next_in = workspace->buf;
134 workspace->strm.avail_in = 0;
135 workspace->strm.next_out = cpage_out;
136 workspace->strm.avail_out = PAGE_SIZE;
137
138 while (workspace->strm.total_in < len) {
139 /*
140 * Get next input pages and copy the contents to
141 * the workspace buffer if required.
142 */
143 if (workspace->strm.avail_in == 0) {
144 bytes_left = len - workspace->strm.total_in;
145 in_buf_pages = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
146 workspace->buf_size / PAGE_SIZE);
147 if (in_buf_pages > 1) {
148 int i;
149
150 for (i = 0; i < in_buf_pages; i++) {
151 if (in_page) {
152 kunmap(in_page);
153 put_page(in_page);
154 }
155 in_page = find_get_page(mapping,
156 start >> PAGE_SHIFT);
157 data_in = kmap(in_page);
158 memcpy(workspace->buf + i * PAGE_SIZE,
159 data_in, PAGE_SIZE);
160 start += PAGE_SIZE;
161 }
162 workspace->strm.next_in = workspace->buf;
163 } else {
164 if (in_page) {
165 kunmap(in_page);
166 put_page(in_page);
167 }
168 in_page = find_get_page(mapping,
169 start >> PAGE_SHIFT);
170 data_in = kmap(in_page);
171 start += PAGE_SIZE;
172 workspace->strm.next_in = data_in;
173 }
174 workspace->strm.avail_in = min(bytes_left,
175 (unsigned long) workspace->buf_size);
176 }
177
178 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
179 if (ret != Z_OK) {
180 pr_debug("BTRFS: deflate in loop returned %d\n",
181 ret);
182 zlib_deflateEnd(&workspace->strm);
183 ret = -EIO;
184 goto out;
185 }
186
187 /* we're making it bigger, give up */
188 if (workspace->strm.total_in > 8192 &&
189 workspace->strm.total_in <
190 workspace->strm.total_out) {
191 ret = -E2BIG;
192 goto out;
193 }
194 /* we need another page for writing out. Test this
195 * before the total_in so we will pull in a new page for
196 * the stream end if required
197 */
198 if (workspace->strm.avail_out == 0) {
199 kunmap(out_page);
200 if (nr_pages == nr_dest_pages) {
201 out_page = NULL;
202 ret = -E2BIG;
203 goto out;
204 }
205 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
206 if (out_page == NULL) {
207 ret = -ENOMEM;
208 goto out;
209 }
210 cpage_out = kmap(out_page);
211 pages[nr_pages] = out_page;
212 nr_pages++;
213 workspace->strm.avail_out = PAGE_SIZE;
214 workspace->strm.next_out = cpage_out;
215 }
216 /* we're all done */
217 if (workspace->strm.total_in >= len)
218 break;
219 if (workspace->strm.total_out > max_out)
220 break;
221 }
222 workspace->strm.avail_in = 0;
223 /*
224 * Call deflate with Z_FINISH flush parameter providing more output
225 * space but no more input data, until it returns with Z_STREAM_END.
226 */
227 while (ret != Z_STREAM_END) {
228 ret = zlib_deflate(&workspace->strm, Z_FINISH);
229 if (ret == Z_STREAM_END)
230 break;
231 if (ret != Z_OK && ret != Z_BUF_ERROR) {
232 zlib_deflateEnd(&workspace->strm);
233 ret = -EIO;
234 goto out;
235 } else if (workspace->strm.avail_out == 0) {
236 /* get another page for the stream end */
237 kunmap(out_page);
238 if (nr_pages == nr_dest_pages) {
239 out_page = NULL;
240 ret = -E2BIG;
241 goto out;
242 }
243 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
244 if (out_page == NULL) {
245 ret = -ENOMEM;
246 goto out;
247 }
248 cpage_out = kmap(out_page);
249 pages[nr_pages] = out_page;
250 nr_pages++;
251 workspace->strm.avail_out = PAGE_SIZE;
252 workspace->strm.next_out = cpage_out;
253 }
254 }
255 zlib_deflateEnd(&workspace->strm);
256
257 if (workspace->strm.total_out >= workspace->strm.total_in) {
258 ret = -E2BIG;
259 goto out;
260 }
261
262 ret = 0;
263 *total_out = workspace->strm.total_out;
264 *total_in = workspace->strm.total_in;
265out:
266 *out_pages = nr_pages;
267 if (out_page)
268 kunmap(out_page);
269
270 if (in_page) {
271 kunmap(in_page);
272 put_page(in_page);
273 }
274 return ret;
275}
276
277int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
278{
279 struct workspace *workspace = list_entry(ws, struct workspace, list);
280 int ret = 0, ret2;
281 int wbits = MAX_WBITS;
282 char *data_in;
283 size_t total_out = 0;
284 unsigned long page_in_index = 0;
285 size_t srclen = cb->compressed_len;
286 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
287 unsigned long buf_start;
288 struct page **pages_in = cb->compressed_pages;
289 u64 disk_start = cb->start;
290 struct bio *orig_bio = cb->orig_bio;
291
292 data_in = kmap(pages_in[page_in_index]);
293 workspace->strm.next_in = data_in;
294 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
295 workspace->strm.total_in = 0;
296
297 workspace->strm.total_out = 0;
298 workspace->strm.next_out = workspace->buf;
299 workspace->strm.avail_out = workspace->buf_size;
300
301 /* If it's deflate, and it's got no preset dictionary, then
302 we can tell zlib to skip the adler32 check. */
303 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
304 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
305 !(((data_in[0]<<8) + data_in[1]) % 31)) {
306
307 wbits = -((data_in[0] >> 4) + 8);
308 workspace->strm.next_in += 2;
309 workspace->strm.avail_in -= 2;
310 }
311
312 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
313 pr_warn("BTRFS: inflateInit failed\n");
314 kunmap(pages_in[page_in_index]);
315 return -EIO;
316 }
317 while (workspace->strm.total_in < srclen) {
318 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
319 if (ret != Z_OK && ret != Z_STREAM_END)
320 break;
321
322 buf_start = total_out;
323 total_out = workspace->strm.total_out;
324
325 /* we didn't make progress in this inflate call, we're done */
326 if (buf_start == total_out)
327 break;
328
329 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
330 total_out, disk_start,
331 orig_bio);
332 if (ret2 == 0) {
333 ret = 0;
334 goto done;
335 }
336
337 workspace->strm.next_out = workspace->buf;
338 workspace->strm.avail_out = workspace->buf_size;
339
340 if (workspace->strm.avail_in == 0) {
341 unsigned long tmp;
342 kunmap(pages_in[page_in_index]);
343 page_in_index++;
344 if (page_in_index >= total_pages_in) {
345 data_in = NULL;
346 break;
347 }
348 data_in = kmap(pages_in[page_in_index]);
349 workspace->strm.next_in = data_in;
350 tmp = srclen - workspace->strm.total_in;
351 workspace->strm.avail_in = min(tmp,
352 PAGE_SIZE);
353 }
354 }
355 if (ret != Z_STREAM_END)
356 ret = -EIO;
357 else
358 ret = 0;
359done:
360 zlib_inflateEnd(&workspace->strm);
361 if (data_in)
362 kunmap(pages_in[page_in_index]);
363 if (!ret)
364 zero_fill_bio(orig_bio);
365 return ret;
366}
367
368int zlib_decompress(struct list_head *ws, unsigned char *data_in,
369 struct page *dest_page, unsigned long start_byte, size_t srclen,
370 size_t destlen)
371{
372 struct workspace *workspace = list_entry(ws, struct workspace, list);
373 int ret = 0;
374 int wbits = MAX_WBITS;
375 unsigned long bytes_left;
376 unsigned long total_out = 0;
377 unsigned long pg_offset = 0;
378 char *kaddr;
379
380 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
381 bytes_left = destlen;
382
383 workspace->strm.next_in = data_in;
384 workspace->strm.avail_in = srclen;
385 workspace->strm.total_in = 0;
386
387 workspace->strm.next_out = workspace->buf;
388 workspace->strm.avail_out = workspace->buf_size;
389 workspace->strm.total_out = 0;
390 /* If it's deflate, and it's got no preset dictionary, then
391 we can tell zlib to skip the adler32 check. */
392 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
393 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
394 !(((data_in[0]<<8) + data_in[1]) % 31)) {
395
396 wbits = -((data_in[0] >> 4) + 8);
397 workspace->strm.next_in += 2;
398 workspace->strm.avail_in -= 2;
399 }
400
401 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
402 pr_warn("BTRFS: inflateInit failed\n");
403 return -EIO;
404 }
405
406 while (bytes_left > 0) {
407 unsigned long buf_start;
408 unsigned long buf_offset;
409 unsigned long bytes;
410
411 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
412 if (ret != Z_OK && ret != Z_STREAM_END)
413 break;
414
415 buf_start = total_out;
416 total_out = workspace->strm.total_out;
417
418 if (total_out == buf_start) {
419 ret = -EIO;
420 break;
421 }
422
423 if (total_out <= start_byte)
424 goto next;
425
426 if (total_out > start_byte && buf_start < start_byte)
427 buf_offset = start_byte - buf_start;
428 else
429 buf_offset = 0;
430
431 bytes = min(PAGE_SIZE - pg_offset,
432 PAGE_SIZE - (buf_offset % PAGE_SIZE));
433 bytes = min(bytes, bytes_left);
434
435 kaddr = kmap_atomic(dest_page);
436 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
437 kunmap_atomic(kaddr);
438
439 pg_offset += bytes;
440 bytes_left -= bytes;
441next:
442 workspace->strm.next_out = workspace->buf;
443 workspace->strm.avail_out = workspace->buf_size;
444 }
445
446 if (ret != Z_STREAM_END && bytes_left != 0)
447 ret = -EIO;
448 else
449 ret = 0;
450
451 zlib_inflateEnd(&workspace->strm);
452
453 /*
454 * this should only happen if zlib returned fewer bytes than we
455 * expected. btrfs_get_block is responsible for zeroing from the
456 * end of the inline extent (destlen) to the end of the page
457 */
458 if (pg_offset < destlen) {
459 kaddr = kmap_atomic(dest_page);
460 memset(kaddr + pg_offset, 0, destlen - pg_offset);
461 kunmap_atomic(kaddr);
462 }
463 return ret;
464}
465
466const struct btrfs_compress_op btrfs_zlib_compress = {
467 .workspace_manager = &wsm,
468 .max_level = 9,
469 .default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
470};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 *
5 * Based on jffs2 zlib code:
6 * Copyright © 2001-2007 Red Hat, Inc.
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/zlib.h>
13#include <linux/zutil.h>
14#include <linux/mm.h>
15#include <linux/init.h>
16#include <linux/err.h>
17#include <linux/sched.h>
18#include <linux/pagemap.h>
19#include <linux/bio.h>
20#include <linux/refcount.h>
21#include "compression.h"
22
23struct workspace {
24 z_stream strm;
25 char *buf;
26 struct list_head list;
27 int level;
28};
29
30static void zlib_free_workspace(struct list_head *ws)
31{
32 struct workspace *workspace = list_entry(ws, struct workspace, list);
33
34 kvfree(workspace->strm.workspace);
35 kfree(workspace->buf);
36 kfree(workspace);
37}
38
39static struct list_head *zlib_alloc_workspace(void)
40{
41 struct workspace *workspace;
42 int workspacesize;
43
44 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
45 if (!workspace)
46 return ERR_PTR(-ENOMEM);
47
48 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
49 zlib_inflate_workspacesize());
50 workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
51 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
52 if (!workspace->strm.workspace || !workspace->buf)
53 goto fail;
54
55 INIT_LIST_HEAD(&workspace->list);
56
57 return &workspace->list;
58fail:
59 zlib_free_workspace(&workspace->list);
60 return ERR_PTR(-ENOMEM);
61}
62
63static int zlib_compress_pages(struct list_head *ws,
64 struct address_space *mapping,
65 u64 start,
66 struct page **pages,
67 unsigned long *out_pages,
68 unsigned long *total_in,
69 unsigned long *total_out)
70{
71 struct workspace *workspace = list_entry(ws, struct workspace, list);
72 int ret;
73 char *data_in;
74 char *cpage_out;
75 int nr_pages = 0;
76 struct page *in_page = NULL;
77 struct page *out_page = NULL;
78 unsigned long bytes_left;
79 unsigned long len = *total_out;
80 unsigned long nr_dest_pages = *out_pages;
81 const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
82
83 *out_pages = 0;
84 *total_out = 0;
85 *total_in = 0;
86
87 if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
88 pr_warn("BTRFS: deflateInit failed\n");
89 ret = -EIO;
90 goto out;
91 }
92
93 workspace->strm.total_in = 0;
94 workspace->strm.total_out = 0;
95
96 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
97 data_in = kmap(in_page);
98
99 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
100 if (out_page == NULL) {
101 ret = -ENOMEM;
102 goto out;
103 }
104 cpage_out = kmap(out_page);
105 pages[0] = out_page;
106 nr_pages = 1;
107
108 workspace->strm.next_in = data_in;
109 workspace->strm.next_out = cpage_out;
110 workspace->strm.avail_out = PAGE_SIZE;
111 workspace->strm.avail_in = min(len, PAGE_SIZE);
112
113 while (workspace->strm.total_in < len) {
114 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
115 if (ret != Z_OK) {
116 pr_debug("BTRFS: deflate in loop returned %d\n",
117 ret);
118 zlib_deflateEnd(&workspace->strm);
119 ret = -EIO;
120 goto out;
121 }
122
123 /* we're making it bigger, give up */
124 if (workspace->strm.total_in > 8192 &&
125 workspace->strm.total_in <
126 workspace->strm.total_out) {
127 ret = -E2BIG;
128 goto out;
129 }
130 /* we need another page for writing out. Test this
131 * before the total_in so we will pull in a new page for
132 * the stream end if required
133 */
134 if (workspace->strm.avail_out == 0) {
135 kunmap(out_page);
136 if (nr_pages == nr_dest_pages) {
137 out_page = NULL;
138 ret = -E2BIG;
139 goto out;
140 }
141 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
142 if (out_page == NULL) {
143 ret = -ENOMEM;
144 goto out;
145 }
146 cpage_out = kmap(out_page);
147 pages[nr_pages] = out_page;
148 nr_pages++;
149 workspace->strm.avail_out = PAGE_SIZE;
150 workspace->strm.next_out = cpage_out;
151 }
152 /* we're all done */
153 if (workspace->strm.total_in >= len)
154 break;
155
156 /* we've read in a full page, get a new one */
157 if (workspace->strm.avail_in == 0) {
158 if (workspace->strm.total_out > max_out)
159 break;
160
161 bytes_left = len - workspace->strm.total_in;
162 kunmap(in_page);
163 put_page(in_page);
164
165 start += PAGE_SIZE;
166 in_page = find_get_page(mapping,
167 start >> PAGE_SHIFT);
168 data_in = kmap(in_page);
169 workspace->strm.avail_in = min(bytes_left,
170 PAGE_SIZE);
171 workspace->strm.next_in = data_in;
172 }
173 }
174 workspace->strm.avail_in = 0;
175 ret = zlib_deflate(&workspace->strm, Z_FINISH);
176 zlib_deflateEnd(&workspace->strm);
177
178 if (ret != Z_STREAM_END) {
179 ret = -EIO;
180 goto out;
181 }
182
183 if (workspace->strm.total_out >= workspace->strm.total_in) {
184 ret = -E2BIG;
185 goto out;
186 }
187
188 ret = 0;
189 *total_out = workspace->strm.total_out;
190 *total_in = workspace->strm.total_in;
191out:
192 *out_pages = nr_pages;
193 if (out_page)
194 kunmap(out_page);
195
196 if (in_page) {
197 kunmap(in_page);
198 put_page(in_page);
199 }
200 return ret;
201}
202
203static int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
204{
205 struct workspace *workspace = list_entry(ws, struct workspace, list);
206 int ret = 0, ret2;
207 int wbits = MAX_WBITS;
208 char *data_in;
209 size_t total_out = 0;
210 unsigned long page_in_index = 0;
211 size_t srclen = cb->compressed_len;
212 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
213 unsigned long buf_start;
214 struct page **pages_in = cb->compressed_pages;
215 u64 disk_start = cb->start;
216 struct bio *orig_bio = cb->orig_bio;
217
218 data_in = kmap(pages_in[page_in_index]);
219 workspace->strm.next_in = data_in;
220 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
221 workspace->strm.total_in = 0;
222
223 workspace->strm.total_out = 0;
224 workspace->strm.next_out = workspace->buf;
225 workspace->strm.avail_out = PAGE_SIZE;
226
227 /* If it's deflate, and it's got no preset dictionary, then
228 we can tell zlib to skip the adler32 check. */
229 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
230 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
231 !(((data_in[0]<<8) + data_in[1]) % 31)) {
232
233 wbits = -((data_in[0] >> 4) + 8);
234 workspace->strm.next_in += 2;
235 workspace->strm.avail_in -= 2;
236 }
237
238 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
239 pr_warn("BTRFS: inflateInit failed\n");
240 kunmap(pages_in[page_in_index]);
241 return -EIO;
242 }
243 while (workspace->strm.total_in < srclen) {
244 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
245 if (ret != Z_OK && ret != Z_STREAM_END)
246 break;
247
248 buf_start = total_out;
249 total_out = workspace->strm.total_out;
250
251 /* we didn't make progress in this inflate call, we're done */
252 if (buf_start == total_out)
253 break;
254
255 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
256 total_out, disk_start,
257 orig_bio);
258 if (ret2 == 0) {
259 ret = 0;
260 goto done;
261 }
262
263 workspace->strm.next_out = workspace->buf;
264 workspace->strm.avail_out = PAGE_SIZE;
265
266 if (workspace->strm.avail_in == 0) {
267 unsigned long tmp;
268 kunmap(pages_in[page_in_index]);
269 page_in_index++;
270 if (page_in_index >= total_pages_in) {
271 data_in = NULL;
272 break;
273 }
274 data_in = kmap(pages_in[page_in_index]);
275 workspace->strm.next_in = data_in;
276 tmp = srclen - workspace->strm.total_in;
277 workspace->strm.avail_in = min(tmp,
278 PAGE_SIZE);
279 }
280 }
281 if (ret != Z_STREAM_END)
282 ret = -EIO;
283 else
284 ret = 0;
285done:
286 zlib_inflateEnd(&workspace->strm);
287 if (data_in)
288 kunmap(pages_in[page_in_index]);
289 if (!ret)
290 zero_fill_bio(orig_bio);
291 return ret;
292}
293
294static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
295 struct page *dest_page,
296 unsigned long start_byte,
297 size_t srclen, size_t destlen)
298{
299 struct workspace *workspace = list_entry(ws, struct workspace, list);
300 int ret = 0;
301 int wbits = MAX_WBITS;
302 unsigned long bytes_left;
303 unsigned long total_out = 0;
304 unsigned long pg_offset = 0;
305 char *kaddr;
306
307 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
308 bytes_left = destlen;
309
310 workspace->strm.next_in = data_in;
311 workspace->strm.avail_in = srclen;
312 workspace->strm.total_in = 0;
313
314 workspace->strm.next_out = workspace->buf;
315 workspace->strm.avail_out = PAGE_SIZE;
316 workspace->strm.total_out = 0;
317 /* If it's deflate, and it's got no preset dictionary, then
318 we can tell zlib to skip the adler32 check. */
319 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
320 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
321 !(((data_in[0]<<8) + data_in[1]) % 31)) {
322
323 wbits = -((data_in[0] >> 4) + 8);
324 workspace->strm.next_in += 2;
325 workspace->strm.avail_in -= 2;
326 }
327
328 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
329 pr_warn("BTRFS: inflateInit failed\n");
330 return -EIO;
331 }
332
333 while (bytes_left > 0) {
334 unsigned long buf_start;
335 unsigned long buf_offset;
336 unsigned long bytes;
337
338 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
339 if (ret != Z_OK && ret != Z_STREAM_END)
340 break;
341
342 buf_start = total_out;
343 total_out = workspace->strm.total_out;
344
345 if (total_out == buf_start) {
346 ret = -EIO;
347 break;
348 }
349
350 if (total_out <= start_byte)
351 goto next;
352
353 if (total_out > start_byte && buf_start < start_byte)
354 buf_offset = start_byte - buf_start;
355 else
356 buf_offset = 0;
357
358 bytes = min(PAGE_SIZE - pg_offset,
359 PAGE_SIZE - buf_offset);
360 bytes = min(bytes, bytes_left);
361
362 kaddr = kmap_atomic(dest_page);
363 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
364 kunmap_atomic(kaddr);
365
366 pg_offset += bytes;
367 bytes_left -= bytes;
368next:
369 workspace->strm.next_out = workspace->buf;
370 workspace->strm.avail_out = PAGE_SIZE;
371 }
372
373 if (ret != Z_STREAM_END && bytes_left != 0)
374 ret = -EIO;
375 else
376 ret = 0;
377
378 zlib_inflateEnd(&workspace->strm);
379
380 /*
381 * this should only happen if zlib returned fewer bytes than we
382 * expected. btrfs_get_block is responsible for zeroing from the
383 * end of the inline extent (destlen) to the end of the page
384 */
385 if (pg_offset < destlen) {
386 kaddr = kmap_atomic(dest_page);
387 memset(kaddr + pg_offset, 0, destlen - pg_offset);
388 kunmap_atomic(kaddr);
389 }
390 return ret;
391}
392
393static void zlib_set_level(struct list_head *ws, unsigned int type)
394{
395 struct workspace *workspace = list_entry(ws, struct workspace, list);
396 unsigned level = (type & 0xF0) >> 4;
397
398 if (level > 9)
399 level = 9;
400
401 workspace->level = level > 0 ? level : 3;
402}
403
404const struct btrfs_compress_op btrfs_zlib_compress = {
405 .alloc_workspace = zlib_alloc_workspace,
406 .free_workspace = zlib_free_workspace,
407 .compress_pages = zlib_compress_pages,
408 .decompress_bio = zlib_decompress_bio,
409 .decompress = zlib_decompress,
410 .set_level = zlib_set_level,
411};