Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/slab.h>
8#include <linux/mm.h>
9#include <linux/init.h>
10#include <linux/err.h>
11#include <linux/sched.h>
12#include <linux/pagemap.h>
13#include <linux/bio.h>
14#include <linux/lzo.h>
15#include <linux/refcount.h>
16#include "messages.h"
17#include "compression.h"
18#include "ctree.h"
19#include "super.h"
20
21#define LZO_LEN 4
22
23/*
24 * Btrfs LZO compression format
25 *
26 * Regular and inlined LZO compressed data extents consist of:
27 *
28 * 1. Header
29 * Fixed size. LZO_LEN (4) bytes long, LE32.
30 * Records the total size (including the header) of compressed data.
31 *
32 * 2. Segment(s)
33 * Variable size. Each segment includes one segment header, followed by data
34 * payload.
35 * One regular LZO compressed extent can have one or more segments.
36 * For inlined LZO compressed extent, only one segment is allowed.
37 * One segment represents at most one sector of uncompressed data.
38 *
39 * 2.1 Segment header
40 * Fixed size. LZO_LEN (4) bytes long, LE32.
41 * Records the total size of the segment (not including the header).
42 * Segment header never crosses sector boundary, thus it's possible to
43 * have at most 3 padding zeros at the end of the sector.
44 *
45 * 2.2 Data Payload
46 * Variable size. Size up limit should be lzo1x_worst_compress(sectorsize)
47 * which is 4419 for a 4KiB sectorsize.
48 *
49 * Example with 4K sectorsize:
50 * Page 1:
51 * 0 0x2 0x4 0x6 0x8 0xa 0xc 0xe 0x10
52 * 0x0000 | Header | SegHdr 01 | Data payload 01 ... |
53 * ...
54 * 0x0ff0 | SegHdr N | Data payload N ... |00|
55 * ^^ padding zeros
56 * Page 2:
57 * 0x1000 | SegHdr N+1| Data payload N+1 ... |
58 */
59
60#define WORKSPACE_BUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
61#define WORKSPACE_CBUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
62
63struct workspace {
64 void *mem;
65 void *buf; /* where decompressed data goes */
66 void *cbuf; /* where compressed data goes */
67 struct list_head list;
68};
69
70static struct workspace_manager wsm;
71
72void lzo_free_workspace(struct list_head *ws)
73{
74 struct workspace *workspace = list_entry(ws, struct workspace, list);
75
76 kvfree(workspace->buf);
77 kvfree(workspace->cbuf);
78 kvfree(workspace->mem);
79 kfree(workspace);
80}
81
82struct list_head *lzo_alloc_workspace(unsigned int level)
83{
84 struct workspace *workspace;
85
86 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
87 if (!workspace)
88 return ERR_PTR(-ENOMEM);
89
90 workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
91 workspace->buf = kvmalloc(WORKSPACE_BUF_LENGTH, GFP_KERNEL);
92 workspace->cbuf = kvmalloc(WORKSPACE_CBUF_LENGTH, GFP_KERNEL);
93 if (!workspace->mem || !workspace->buf || !workspace->cbuf)
94 goto fail;
95
96 INIT_LIST_HEAD(&workspace->list);
97
98 return &workspace->list;
99fail:
100 lzo_free_workspace(&workspace->list);
101 return ERR_PTR(-ENOMEM);
102}
103
104static inline void write_compress_length(char *buf, size_t len)
105{
106 __le32 dlen;
107
108 dlen = cpu_to_le32(len);
109 memcpy(buf, &dlen, LZO_LEN);
110}
111
112static inline size_t read_compress_length(const char *buf)
113{
114 __le32 dlen;
115
116 memcpy(&dlen, buf, LZO_LEN);
117 return le32_to_cpu(dlen);
118}
119
120/*
121 * Will do:
122 *
123 * - Write a segment header into the destination
124 * - Copy the compressed buffer into the destination
125 * - Make sure we have enough space in the last sector to fit a segment header
126 * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
127 *
128 * Will allocate new pages when needed.
129 */
130static int copy_compressed_data_to_page(char *compressed_data,
131 size_t compressed_size,
132 struct page **out_pages,
133 unsigned long max_nr_page,
134 u32 *cur_out,
135 const u32 sectorsize)
136{
137 u32 sector_bytes_left;
138 u32 orig_out;
139 struct page *cur_page;
140 char *kaddr;
141
142 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
143 return -E2BIG;
144
145 /*
146 * We never allow a segment header crossing sector boundary, previous
147 * run should ensure we have enough space left inside the sector.
148 */
149 ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
150
151 cur_page = out_pages[*cur_out / PAGE_SIZE];
152 /* Allocate a new page */
153 if (!cur_page) {
154 cur_page = alloc_page(GFP_NOFS);
155 if (!cur_page)
156 return -ENOMEM;
157 out_pages[*cur_out / PAGE_SIZE] = cur_page;
158 }
159
160 kaddr = kmap_local_page(cur_page);
161 write_compress_length(kaddr + offset_in_page(*cur_out),
162 compressed_size);
163 *cur_out += LZO_LEN;
164
165 orig_out = *cur_out;
166
167 /* Copy compressed data */
168 while (*cur_out - orig_out < compressed_size) {
169 u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
170 orig_out + compressed_size - *cur_out);
171
172 kunmap_local(kaddr);
173
174 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
175 return -E2BIG;
176
177 cur_page = out_pages[*cur_out / PAGE_SIZE];
178 /* Allocate a new page */
179 if (!cur_page) {
180 cur_page = alloc_page(GFP_NOFS);
181 if (!cur_page)
182 return -ENOMEM;
183 out_pages[*cur_out / PAGE_SIZE] = cur_page;
184 }
185 kaddr = kmap_local_page(cur_page);
186
187 memcpy(kaddr + offset_in_page(*cur_out),
188 compressed_data + *cur_out - orig_out, copy_len);
189
190 *cur_out += copy_len;
191 }
192
193 /*
194 * Check if we can fit the next segment header into the remaining space
195 * of the sector.
196 */
197 sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
198 if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
199 goto out;
200
201 /* The remaining size is not enough, pad it with zeros */
202 memset(kaddr + offset_in_page(*cur_out), 0,
203 sector_bytes_left);
204 *cur_out += sector_bytes_left;
205
206out:
207 kunmap_local(kaddr);
208 return 0;
209}
210
211int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
212 u64 start, struct page **pages, unsigned long *out_pages,
213 unsigned long *total_in, unsigned long *total_out)
214{
215 struct workspace *workspace = list_entry(ws, struct workspace, list);
216 const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
217 struct page *page_in = NULL;
218 char *sizes_ptr;
219 const unsigned long max_nr_page = *out_pages;
220 int ret = 0;
221 /* Points to the file offset of input data */
222 u64 cur_in = start;
223 /* Points to the current output byte */
224 u32 cur_out = 0;
225 u32 len = *total_out;
226
227 ASSERT(max_nr_page > 0);
228 *out_pages = 0;
229 *total_out = 0;
230 *total_in = 0;
231
232 /*
233 * Skip the header for now, we will later come back and write the total
234 * compressed size
235 */
236 cur_out += LZO_LEN;
237 while (cur_in < start + len) {
238 char *data_in;
239 const u32 sectorsize_mask = sectorsize - 1;
240 u32 sector_off = (cur_in - start) & sectorsize_mask;
241 u32 in_len;
242 size_t out_len;
243
244 /* Get the input page first */
245 if (!page_in) {
246 page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
247 ASSERT(page_in);
248 }
249
250 /* Compress at most one sector of data each time */
251 in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
252 ASSERT(in_len);
253 data_in = kmap_local_page(page_in);
254 ret = lzo1x_1_compress(data_in +
255 offset_in_page(cur_in), in_len,
256 workspace->cbuf, &out_len,
257 workspace->mem);
258 kunmap_local(data_in);
259 if (ret < 0) {
260 pr_debug("BTRFS: lzo in loop returned %d\n", ret);
261 ret = -EIO;
262 goto out;
263 }
264
265 ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
266 pages, max_nr_page,
267 &cur_out, sectorsize);
268 if (ret < 0)
269 goto out;
270
271 cur_in += in_len;
272
273 /*
274 * Check if we're making it bigger after two sectors. And if
275 * it is so, give up.
276 */
277 if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
278 ret = -E2BIG;
279 goto out;
280 }
281
282 /* Check if we have reached page boundary */
283 if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
284 put_page(page_in);
285 page_in = NULL;
286 }
287 }
288
289 /* Store the size of all chunks of compressed data */
290 sizes_ptr = kmap_local_page(pages[0]);
291 write_compress_length(sizes_ptr, cur_out);
292 kunmap_local(sizes_ptr);
293
294 ret = 0;
295 *total_out = cur_out;
296 *total_in = cur_in - start;
297out:
298 if (page_in)
299 put_page(page_in);
300 *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
301 return ret;
302}
303
304/*
305 * Copy the compressed segment payload into @dest.
306 *
307 * For the payload there will be no padding, just need to do page switching.
308 */
309static void copy_compressed_segment(struct compressed_bio *cb,
310 char *dest, u32 len, u32 *cur_in)
311{
312 u32 orig_in = *cur_in;
313
314 while (*cur_in < orig_in + len) {
315 struct page *cur_page;
316 u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
317 orig_in + len - *cur_in);
318
319 ASSERT(copy_len);
320 cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
321
322 memcpy_from_page(dest + *cur_in - orig_in, cur_page,
323 offset_in_page(*cur_in), copy_len);
324
325 *cur_in += copy_len;
326 }
327}
328
329int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
330{
331 struct workspace *workspace = list_entry(ws, struct workspace, list);
332 const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
333 const u32 sectorsize = fs_info->sectorsize;
334 char *kaddr;
335 int ret;
336 /* Compressed data length, can be unaligned */
337 u32 len_in;
338 /* Offset inside the compressed data */
339 u32 cur_in = 0;
340 /* Bytes decompressed so far */
341 u32 cur_out = 0;
342
343 kaddr = kmap_local_page(cb->compressed_pages[0]);
344 len_in = read_compress_length(kaddr);
345 kunmap_local(kaddr);
346 cur_in += LZO_LEN;
347
348 /*
349 * LZO header length check
350 *
351 * The total length should not exceed the maximum extent length,
352 * and all sectors should be used.
353 * If this happens, it means the compressed extent is corrupted.
354 */
355 if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
356 round_up(len_in, sectorsize) < cb->compressed_len) {
357 btrfs_err(fs_info,
358 "invalid lzo header, lzo len %u compressed len %u",
359 len_in, cb->compressed_len);
360 return -EUCLEAN;
361 }
362
363 /* Go through each lzo segment */
364 while (cur_in < len_in) {
365 struct page *cur_page;
366 /* Length of the compressed segment */
367 u32 seg_len;
368 u32 sector_bytes_left;
369 size_t out_len = lzo1x_worst_compress(sectorsize);
370
371 /*
372 * We should always have enough space for one segment header
373 * inside current sector.
374 */
375 ASSERT(cur_in / sectorsize ==
376 (cur_in + LZO_LEN - 1) / sectorsize);
377 cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
378 ASSERT(cur_page);
379 kaddr = kmap_local_page(cur_page);
380 seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
381 kunmap_local(kaddr);
382 cur_in += LZO_LEN;
383
384 if (seg_len > WORKSPACE_CBUF_LENGTH) {
385 /*
386 * seg_len shouldn't be larger than we have allocated
387 * for workspace->cbuf
388 */
389 btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
390 seg_len);
391 ret = -EIO;
392 goto out;
393 }
394
395 /* Copy the compressed segment payload into workspace */
396 copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
397
398 /* Decompress the data */
399 ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
400 workspace->buf, &out_len);
401 if (ret != LZO_E_OK) {
402 btrfs_err(fs_info, "failed to decompress");
403 ret = -EIO;
404 goto out;
405 }
406
407 /* Copy the data into inode pages */
408 ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
409 cur_out += out_len;
410
411 /* All data read, exit */
412 if (ret == 0)
413 goto out;
414 ret = 0;
415
416 /* Check if the sector has enough space for a segment header */
417 sector_bytes_left = sectorsize - (cur_in % sectorsize);
418 if (sector_bytes_left >= LZO_LEN)
419 continue;
420
421 /* Skip the padding zeros */
422 cur_in += sector_bytes_left;
423 }
424out:
425 if (!ret)
426 zero_fill_bio(cb->orig_bio);
427 return ret;
428}
429
430int lzo_decompress(struct list_head *ws, const u8 *data_in,
431 struct page *dest_page, unsigned long start_byte, size_t srclen,
432 size_t destlen)
433{
434 struct workspace *workspace = list_entry(ws, struct workspace, list);
435 size_t in_len;
436 size_t out_len;
437 size_t max_segment_len = WORKSPACE_BUF_LENGTH;
438 int ret = 0;
439 char *kaddr;
440 unsigned long bytes;
441
442 if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
443 return -EUCLEAN;
444
445 in_len = read_compress_length(data_in);
446 if (in_len != srclen)
447 return -EUCLEAN;
448 data_in += LZO_LEN;
449
450 in_len = read_compress_length(data_in);
451 if (in_len != srclen - LZO_LEN * 2) {
452 ret = -EUCLEAN;
453 goto out;
454 }
455 data_in += LZO_LEN;
456
457 out_len = PAGE_SIZE;
458 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
459 if (ret != LZO_E_OK) {
460 pr_warn("BTRFS: decompress failed!\n");
461 ret = -EIO;
462 goto out;
463 }
464
465 if (out_len < start_byte) {
466 ret = -EIO;
467 goto out;
468 }
469
470 /*
471 * the caller is already checking against PAGE_SIZE, but lets
472 * move this check closer to the memcpy/memset
473 */
474 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
475 bytes = min_t(unsigned long, destlen, out_len - start_byte);
476
477 kaddr = kmap_local_page(dest_page);
478 memcpy(kaddr, workspace->buf + start_byte, bytes);
479
480 /*
481 * btrfs_getblock is doing a zero on the tail of the page too,
482 * but this will cover anything missing from the decompressed
483 * data.
484 */
485 if (bytes < destlen)
486 memset(kaddr+bytes, 0, destlen-bytes);
487 kunmap_local(kaddr);
488out:
489 return ret;
490}
491
492const struct btrfs_compress_op btrfs_lzo_compress = {
493 .workspace_manager = &wsm,
494 .max_level = 1,
495 .default_level = 1,
496};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/slab.h>
8#include <linux/mm.h>
9#include <linux/init.h>
10#include <linux/err.h>
11#include <linux/sched.h>
12#include <linux/pagemap.h>
13#include <linux/bio.h>
14#include <linux/lzo.h>
15#include <linux/refcount.h>
16#include "compression.h"
17
18#define LZO_LEN 4
19
20/*
21 * Btrfs LZO compression format
22 *
23 * Regular and inlined LZO compressed data extents consist of:
24 *
25 * 1. Header
26 * Fixed size. LZO_LEN (4) bytes long, LE32.
27 * Records the total size (including the header) of compressed data.
28 *
29 * 2. Segment(s)
30 * Variable size. Each segment includes one segment header, followed by data
31 * payload.
32 * One regular LZO compressed extent can have one or more segments.
33 * For inlined LZO compressed extent, only one segment is allowed.
34 * One segment represents at most one page of uncompressed data.
35 *
36 * 2.1 Segment header
37 * Fixed size. LZO_LEN (4) bytes long, LE32.
38 * Records the total size of the segment (not including the header).
39 * Segment header never crosses page boundary, thus it's possible to
40 * have at most 3 padding zeros at the end of the page.
41 *
42 * 2.2 Data Payload
43 * Variable size. Size up limit should be lzo1x_worst_compress(PAGE_SIZE)
44 * which is 4419 for a 4KiB page.
45 *
46 * Example:
47 * Page 1:
48 * 0 0x2 0x4 0x6 0x8 0xa 0xc 0xe 0x10
49 * 0x0000 | Header | SegHdr 01 | Data payload 01 ... |
50 * ...
51 * 0x0ff0 | SegHdr N | Data payload N ... |00|
52 * ^^ padding zeros
53 * Page 2:
54 * 0x1000 | SegHdr N+1| Data payload N+1 ... |
55 */
56
57struct workspace {
58 void *mem;
59 void *buf; /* where decompressed data goes */
60 void *cbuf; /* where compressed data goes */
61 struct list_head list;
62};
63
64static struct workspace_manager wsm;
65
66void lzo_free_workspace(struct list_head *ws)
67{
68 struct workspace *workspace = list_entry(ws, struct workspace, list);
69
70 kvfree(workspace->buf);
71 kvfree(workspace->cbuf);
72 kvfree(workspace->mem);
73 kfree(workspace);
74}
75
76struct list_head *lzo_alloc_workspace(unsigned int level)
77{
78 struct workspace *workspace;
79
80 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
81 if (!workspace)
82 return ERR_PTR(-ENOMEM);
83
84 workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
85 workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
86 workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
87 if (!workspace->mem || !workspace->buf || !workspace->cbuf)
88 goto fail;
89
90 INIT_LIST_HEAD(&workspace->list);
91
92 return &workspace->list;
93fail:
94 lzo_free_workspace(&workspace->list);
95 return ERR_PTR(-ENOMEM);
96}
97
98static inline void write_compress_length(char *buf, size_t len)
99{
100 __le32 dlen;
101
102 dlen = cpu_to_le32(len);
103 memcpy(buf, &dlen, LZO_LEN);
104}
105
106static inline size_t read_compress_length(const char *buf)
107{
108 __le32 dlen;
109
110 memcpy(&dlen, buf, LZO_LEN);
111 return le32_to_cpu(dlen);
112}
113
114int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
115 u64 start, struct page **pages, unsigned long *out_pages,
116 unsigned long *total_in, unsigned long *total_out)
117{
118 struct workspace *workspace = list_entry(ws, struct workspace, list);
119 int ret = 0;
120 char *data_in;
121 char *cpage_out;
122 int nr_pages = 0;
123 struct page *in_page = NULL;
124 struct page *out_page = NULL;
125 unsigned long bytes_left;
126 unsigned long len = *total_out;
127 unsigned long nr_dest_pages = *out_pages;
128 const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
129 size_t in_len;
130 size_t out_len;
131 char *buf;
132 unsigned long tot_in = 0;
133 unsigned long tot_out = 0;
134 unsigned long pg_bytes_left;
135 unsigned long out_offset;
136 unsigned long bytes;
137
138 *out_pages = 0;
139 *total_out = 0;
140 *total_in = 0;
141
142 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
143 data_in = kmap(in_page);
144
145 /*
146 * store the size of all chunks of compressed data in
147 * the first 4 bytes
148 */
149 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
150 if (out_page == NULL) {
151 ret = -ENOMEM;
152 goto out;
153 }
154 cpage_out = kmap(out_page);
155 out_offset = LZO_LEN;
156 tot_out = LZO_LEN;
157 pages[0] = out_page;
158 nr_pages = 1;
159 pg_bytes_left = PAGE_SIZE - LZO_LEN;
160
161 /* compress at most one page of data each time */
162 in_len = min(len, PAGE_SIZE);
163 while (tot_in < len) {
164 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
165 &out_len, workspace->mem);
166 if (ret != LZO_E_OK) {
167 pr_debug("BTRFS: lzo in loop returned %d\n",
168 ret);
169 ret = -EIO;
170 goto out;
171 }
172
173 /* store the size of this chunk of compressed data */
174 write_compress_length(cpage_out + out_offset, out_len);
175 tot_out += LZO_LEN;
176 out_offset += LZO_LEN;
177 pg_bytes_left -= LZO_LEN;
178
179 tot_in += in_len;
180 tot_out += out_len;
181
182 /* copy bytes from the working buffer into the pages */
183 buf = workspace->cbuf;
184 while (out_len) {
185 bytes = min_t(unsigned long, pg_bytes_left, out_len);
186
187 memcpy(cpage_out + out_offset, buf, bytes);
188
189 out_len -= bytes;
190 pg_bytes_left -= bytes;
191 buf += bytes;
192 out_offset += bytes;
193
194 /*
195 * we need another page for writing out.
196 *
197 * Note if there's less than 4 bytes left, we just
198 * skip to a new page.
199 */
200 if ((out_len == 0 && pg_bytes_left < LZO_LEN) ||
201 pg_bytes_left == 0) {
202 if (pg_bytes_left) {
203 memset(cpage_out + out_offset, 0,
204 pg_bytes_left);
205 tot_out += pg_bytes_left;
206 }
207
208 /* we're done, don't allocate new page */
209 if (out_len == 0 && tot_in >= len)
210 break;
211
212 kunmap(out_page);
213 if (nr_pages == nr_dest_pages) {
214 out_page = NULL;
215 ret = -E2BIG;
216 goto out;
217 }
218
219 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
220 if (out_page == NULL) {
221 ret = -ENOMEM;
222 goto out;
223 }
224 cpage_out = kmap(out_page);
225 pages[nr_pages++] = out_page;
226
227 pg_bytes_left = PAGE_SIZE;
228 out_offset = 0;
229 }
230 }
231
232 /* we're making it bigger, give up */
233 if (tot_in > 8192 && tot_in < tot_out) {
234 ret = -E2BIG;
235 goto out;
236 }
237
238 /* we're all done */
239 if (tot_in >= len)
240 break;
241
242 if (tot_out > max_out)
243 break;
244
245 bytes_left = len - tot_in;
246 kunmap(in_page);
247 put_page(in_page);
248
249 start += PAGE_SIZE;
250 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
251 data_in = kmap(in_page);
252 in_len = min(bytes_left, PAGE_SIZE);
253 }
254
255 if (tot_out >= tot_in) {
256 ret = -E2BIG;
257 goto out;
258 }
259
260 /* store the size of all chunks of compressed data */
261 cpage_out = kmap(pages[0]);
262 write_compress_length(cpage_out, tot_out);
263
264 kunmap(pages[0]);
265
266 ret = 0;
267 *total_out = tot_out;
268 *total_in = tot_in;
269out:
270 *out_pages = nr_pages;
271 if (out_page)
272 kunmap(out_page);
273
274 if (in_page) {
275 kunmap(in_page);
276 put_page(in_page);
277 }
278
279 return ret;
280}
281
282int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
283{
284 struct workspace *workspace = list_entry(ws, struct workspace, list);
285 int ret = 0, ret2;
286 char *data_in;
287 unsigned long page_in_index = 0;
288 size_t srclen = cb->compressed_len;
289 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
290 unsigned long buf_start;
291 unsigned long buf_offset = 0;
292 unsigned long bytes;
293 unsigned long working_bytes;
294 size_t in_len;
295 size_t out_len;
296 const size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE);
297 unsigned long in_offset;
298 unsigned long in_page_bytes_left;
299 unsigned long tot_in;
300 unsigned long tot_out;
301 unsigned long tot_len;
302 char *buf;
303 bool may_late_unmap, need_unmap;
304 struct page **pages_in = cb->compressed_pages;
305 u64 disk_start = cb->start;
306 struct bio *orig_bio = cb->orig_bio;
307
308 data_in = kmap(pages_in[0]);
309 tot_len = read_compress_length(data_in);
310 /*
311 * Compressed data header check.
312 *
313 * The real compressed size can't exceed the maximum extent length, and
314 * all pages should be used (whole unused page with just the segment
315 * header is not possible). If this happens it means the compressed
316 * extent is corrupted.
317 */
318 if (tot_len > min_t(size_t, BTRFS_MAX_COMPRESSED, srclen) ||
319 tot_len < srclen - PAGE_SIZE) {
320 ret = -EUCLEAN;
321 goto done;
322 }
323
324 tot_in = LZO_LEN;
325 in_offset = LZO_LEN;
326 in_page_bytes_left = PAGE_SIZE - LZO_LEN;
327
328 tot_out = 0;
329
330 while (tot_in < tot_len) {
331 in_len = read_compress_length(data_in + in_offset);
332 in_page_bytes_left -= LZO_LEN;
333 in_offset += LZO_LEN;
334 tot_in += LZO_LEN;
335
336 /*
337 * Segment header check.
338 *
339 * The segment length must not exceed the maximum LZO
340 * compression size, nor the total compressed size.
341 */
342 if (in_len > max_segment_len || tot_in + in_len > tot_len) {
343 ret = -EUCLEAN;
344 goto done;
345 }
346
347 tot_in += in_len;
348 working_bytes = in_len;
349 may_late_unmap = need_unmap = false;
350
351 /* fast path: avoid using the working buffer */
352 if (in_page_bytes_left >= in_len) {
353 buf = data_in + in_offset;
354 bytes = in_len;
355 may_late_unmap = true;
356 goto cont;
357 }
358
359 /* copy bytes from the pages into the working buffer */
360 buf = workspace->cbuf;
361 buf_offset = 0;
362 while (working_bytes) {
363 bytes = min(working_bytes, in_page_bytes_left);
364
365 memcpy(buf + buf_offset, data_in + in_offset, bytes);
366 buf_offset += bytes;
367cont:
368 working_bytes -= bytes;
369 in_page_bytes_left -= bytes;
370 in_offset += bytes;
371
372 /* check if we need to pick another page */
373 if ((working_bytes == 0 && in_page_bytes_left < LZO_LEN)
374 || in_page_bytes_left == 0) {
375 tot_in += in_page_bytes_left;
376
377 if (working_bytes == 0 && tot_in >= tot_len)
378 break;
379
380 if (page_in_index + 1 >= total_pages_in) {
381 ret = -EIO;
382 goto done;
383 }
384
385 if (may_late_unmap)
386 need_unmap = true;
387 else
388 kunmap(pages_in[page_in_index]);
389
390 data_in = kmap(pages_in[++page_in_index]);
391
392 in_page_bytes_left = PAGE_SIZE;
393 in_offset = 0;
394 }
395 }
396
397 out_len = max_segment_len;
398 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
399 &out_len);
400 if (need_unmap)
401 kunmap(pages_in[page_in_index - 1]);
402 if (ret != LZO_E_OK) {
403 pr_warn("BTRFS: decompress failed\n");
404 ret = -EIO;
405 break;
406 }
407
408 buf_start = tot_out;
409 tot_out += out_len;
410
411 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
412 tot_out, disk_start, orig_bio);
413 if (ret2 == 0)
414 break;
415 }
416done:
417 kunmap(pages_in[page_in_index]);
418 if (!ret)
419 zero_fill_bio(orig_bio);
420 return ret;
421}
422
423int lzo_decompress(struct list_head *ws, unsigned char *data_in,
424 struct page *dest_page, unsigned long start_byte, size_t srclen,
425 size_t destlen)
426{
427 struct workspace *workspace = list_entry(ws, struct workspace, list);
428 size_t in_len;
429 size_t out_len;
430 size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE);
431 int ret = 0;
432 char *kaddr;
433 unsigned long bytes;
434
435 if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
436 return -EUCLEAN;
437
438 in_len = read_compress_length(data_in);
439 if (in_len != srclen)
440 return -EUCLEAN;
441 data_in += LZO_LEN;
442
443 in_len = read_compress_length(data_in);
444 if (in_len != srclen - LZO_LEN * 2) {
445 ret = -EUCLEAN;
446 goto out;
447 }
448 data_in += LZO_LEN;
449
450 out_len = PAGE_SIZE;
451 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
452 if (ret != LZO_E_OK) {
453 pr_warn("BTRFS: decompress failed!\n");
454 ret = -EIO;
455 goto out;
456 }
457
458 if (out_len < start_byte) {
459 ret = -EIO;
460 goto out;
461 }
462
463 /*
464 * the caller is already checking against PAGE_SIZE, but lets
465 * move this check closer to the memcpy/memset
466 */
467 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
468 bytes = min_t(unsigned long, destlen, out_len - start_byte);
469
470 kaddr = kmap_atomic(dest_page);
471 memcpy(kaddr, workspace->buf + start_byte, bytes);
472
473 /*
474 * btrfs_getblock is doing a zero on the tail of the page too,
475 * but this will cover anything missing from the decompressed
476 * data.
477 */
478 if (bytes < destlen)
479 memset(kaddr+bytes, 0, destlen-bytes);
480 kunmap_atomic(kaddr);
481out:
482 return ret;
483}
484
485const struct btrfs_compress_op btrfs_lzo_compress = {
486 .workspace_manager = &wsm,
487 .max_level = 1,
488 .default_level = 1,
489};