Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
8#include <linux/file.h>
9#include <linux/fs.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/time.h>
13#include <linux/init.h>
14#include <linux/string.h>
15#include <linux/backing-dev.h>
16#include <linux/writeback.h>
17#include <linux/slab.h>
18#include <linux/sched/mm.h>
19#include <linux/log2.h>
20#include <crypto/hash.h>
21#include "misc.h"
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "volumes.h"
27#include "ordered-data.h"
28#include "compression.h"
29#include "extent_io.h"
30#include "extent_map.h"
31
32int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
33 u64 start, struct page **pages, unsigned long *out_pages,
34 unsigned long *total_in, unsigned long *total_out);
35int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
36int zlib_decompress(struct list_head *ws, unsigned char *data_in,
37 struct page *dest_page, unsigned long start_byte, size_t srclen,
38 size_t destlen);
39struct list_head *zlib_alloc_workspace(unsigned int level);
40void zlib_free_workspace(struct list_head *ws);
41struct list_head *zlib_get_workspace(unsigned int level);
42
43int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
44 u64 start, struct page **pages, unsigned long *out_pages,
45 unsigned long *total_in, unsigned long *total_out);
46int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
47int lzo_decompress(struct list_head *ws, unsigned char *data_in,
48 struct page *dest_page, unsigned long start_byte, size_t srclen,
49 size_t destlen);
50struct list_head *lzo_alloc_workspace(unsigned int level);
51void lzo_free_workspace(struct list_head *ws);
52
53int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
54 u64 start, struct page **pages, unsigned long *out_pages,
55 unsigned long *total_in, unsigned long *total_out);
56int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
57int zstd_decompress(struct list_head *ws, unsigned char *data_in,
58 struct page *dest_page, unsigned long start_byte, size_t srclen,
59 size_t destlen);
60void zstd_init_workspace_manager(void);
61void zstd_cleanup_workspace_manager(void);
62struct list_head *zstd_alloc_workspace(unsigned int level);
63void zstd_free_workspace(struct list_head *ws);
64struct list_head *zstd_get_workspace(unsigned int level);
65void zstd_put_workspace(struct list_head *ws);
66
67static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
68
69const char* btrfs_compress_type2str(enum btrfs_compression_type type)
70{
71 switch (type) {
72 case BTRFS_COMPRESS_ZLIB:
73 case BTRFS_COMPRESS_LZO:
74 case BTRFS_COMPRESS_ZSTD:
75 case BTRFS_COMPRESS_NONE:
76 return btrfs_compress_types[type];
77 default:
78 break;
79 }
80
81 return NULL;
82}
83
84bool btrfs_compress_is_valid_type(const char *str, size_t len)
85{
86 int i;
87
88 for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
89 size_t comp_len = strlen(btrfs_compress_types[i]);
90
91 if (len < comp_len)
92 continue;
93
94 if (!strncmp(btrfs_compress_types[i], str, comp_len))
95 return true;
96 }
97 return false;
98}
99
100static int compression_compress_pages(int type, struct list_head *ws,
101 struct address_space *mapping, u64 start, struct page **pages,
102 unsigned long *out_pages, unsigned long *total_in,
103 unsigned long *total_out)
104{
105 switch (type) {
106 case BTRFS_COMPRESS_ZLIB:
107 return zlib_compress_pages(ws, mapping, start, pages,
108 out_pages, total_in, total_out);
109 case BTRFS_COMPRESS_LZO:
110 return lzo_compress_pages(ws, mapping, start, pages,
111 out_pages, total_in, total_out);
112 case BTRFS_COMPRESS_ZSTD:
113 return zstd_compress_pages(ws, mapping, start, pages,
114 out_pages, total_in, total_out);
115 case BTRFS_COMPRESS_NONE:
116 default:
117 /*
118 * This can't happen, the type is validated several times
119 * before we get here. As a sane fallback, return what the
120 * callers will understand as 'no compression happened'.
121 */
122 return -E2BIG;
123 }
124}
125
126static int compression_decompress_bio(int type, struct list_head *ws,
127 struct compressed_bio *cb)
128{
129 switch (type) {
130 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
131 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
132 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
133 case BTRFS_COMPRESS_NONE:
134 default:
135 /*
136 * This can't happen, the type is validated several times
137 * before we get here.
138 */
139 BUG();
140 }
141}
142
143static int compression_decompress(int type, struct list_head *ws,
144 unsigned char *data_in, struct page *dest_page,
145 unsigned long start_byte, size_t srclen, size_t destlen)
146{
147 switch (type) {
148 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
149 start_byte, srclen, destlen);
150 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
151 start_byte, srclen, destlen);
152 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
153 start_byte, srclen, destlen);
154 case BTRFS_COMPRESS_NONE:
155 default:
156 /*
157 * This can't happen, the type is validated several times
158 * before we get here.
159 */
160 BUG();
161 }
162}
163
164static int btrfs_decompress_bio(struct compressed_bio *cb);
165
166static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
167 unsigned long disk_size)
168{
169 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
170
171 return sizeof(struct compressed_bio) +
172 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
173}
174
175static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
176 u64 disk_start)
177{
178 struct btrfs_fs_info *fs_info = inode->root->fs_info;
179 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
180 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
181 struct page *page;
182 unsigned long i;
183 char *kaddr;
184 u8 csum[BTRFS_CSUM_SIZE];
185 struct compressed_bio *cb = bio->bi_private;
186 u8 *cb_sum = cb->sums;
187
188 if (inode->flags & BTRFS_INODE_NODATASUM)
189 return 0;
190
191 shash->tfm = fs_info->csum_shash;
192
193 for (i = 0; i < cb->nr_pages; i++) {
194 page = cb->compressed_pages[i];
195
196 kaddr = kmap_atomic(page);
197 crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum);
198 kunmap_atomic(kaddr);
199
200 if (memcmp(&csum, cb_sum, csum_size)) {
201 btrfs_print_data_csum_error(inode, disk_start,
202 csum, cb_sum, cb->mirror_num);
203 if (btrfs_io_bio(bio)->device)
204 btrfs_dev_stat_inc_and_print(
205 btrfs_io_bio(bio)->device,
206 BTRFS_DEV_STAT_CORRUPTION_ERRS);
207 return -EIO;
208 }
209 cb_sum += csum_size;
210 }
211 return 0;
212}
213
214/* when we finish reading compressed pages from the disk, we
215 * decompress them and then run the bio end_io routines on the
216 * decompressed pages (in the inode address space).
217 *
218 * This allows the checksumming and other IO error handling routines
219 * to work normally
220 *
221 * The compressed pages are freed here, and it must be run
222 * in process context
223 */
224static void end_compressed_bio_read(struct bio *bio)
225{
226 struct compressed_bio *cb = bio->bi_private;
227 struct inode *inode;
228 struct page *page;
229 unsigned long index;
230 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
231 int ret = 0;
232
233 if (bio->bi_status)
234 cb->errors = 1;
235
236 /* if there are more bios still pending for this compressed
237 * extent, just exit
238 */
239 if (!refcount_dec_and_test(&cb->pending_bios))
240 goto out;
241
242 /*
243 * Record the correct mirror_num in cb->orig_bio so that
244 * read-repair can work properly.
245 */
246 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
247 cb->mirror_num = mirror;
248
249 /*
250 * Some IO in this cb have failed, just skip checksum as there
251 * is no way it could be correct.
252 */
253 if (cb->errors == 1)
254 goto csum_failed;
255
256 inode = cb->inode;
257 ret = check_compressed_csum(BTRFS_I(inode), bio,
258 (u64)bio->bi_iter.bi_sector << 9);
259 if (ret)
260 goto csum_failed;
261
262 /* ok, we're the last bio for this extent, lets start
263 * the decompression.
264 */
265 ret = btrfs_decompress_bio(cb);
266
267csum_failed:
268 if (ret)
269 cb->errors = 1;
270
271 /* release the compressed pages */
272 index = 0;
273 for (index = 0; index < cb->nr_pages; index++) {
274 page = cb->compressed_pages[index];
275 page->mapping = NULL;
276 put_page(page);
277 }
278
279 /* do io completion on the original bio */
280 if (cb->errors) {
281 bio_io_error(cb->orig_bio);
282 } else {
283 struct bio_vec *bvec;
284 struct bvec_iter_all iter_all;
285
286 /*
287 * we have verified the checksum already, set page
288 * checked so the end_io handlers know about it
289 */
290 ASSERT(!bio_flagged(bio, BIO_CLONED));
291 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
292 SetPageChecked(bvec->bv_page);
293
294 bio_endio(cb->orig_bio);
295 }
296
297 /* finally free the cb struct */
298 kfree(cb->compressed_pages);
299 kfree(cb);
300out:
301 bio_put(bio);
302}
303
304/*
305 * Clear the writeback bits on all of the file
306 * pages for a compressed write
307 */
308static noinline void end_compressed_writeback(struct inode *inode,
309 const struct compressed_bio *cb)
310{
311 unsigned long index = cb->start >> PAGE_SHIFT;
312 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
313 struct page *pages[16];
314 unsigned long nr_pages = end_index - index + 1;
315 int i;
316 int ret;
317
318 if (cb->errors)
319 mapping_set_error(inode->i_mapping, -EIO);
320
321 while (nr_pages > 0) {
322 ret = find_get_pages_contig(inode->i_mapping, index,
323 min_t(unsigned long,
324 nr_pages, ARRAY_SIZE(pages)), pages);
325 if (ret == 0) {
326 nr_pages -= 1;
327 index += 1;
328 continue;
329 }
330 for (i = 0; i < ret; i++) {
331 if (cb->errors)
332 SetPageError(pages[i]);
333 end_page_writeback(pages[i]);
334 put_page(pages[i]);
335 }
336 nr_pages -= ret;
337 index += ret;
338 }
339 /* the inode may be gone now */
340}
341
342/*
343 * do the cleanup once all the compressed pages hit the disk.
344 * This will clear writeback on the file pages and free the compressed
345 * pages.
346 *
347 * This also calls the writeback end hooks for the file pages so that
348 * metadata and checksums can be updated in the file.
349 */
350static void end_compressed_bio_write(struct bio *bio)
351{
352 struct compressed_bio *cb = bio->bi_private;
353 struct inode *inode;
354 struct page *page;
355 unsigned long index;
356
357 if (bio->bi_status)
358 cb->errors = 1;
359
360 /* if there are more bios still pending for this compressed
361 * extent, just exit
362 */
363 if (!refcount_dec_and_test(&cb->pending_bios))
364 goto out;
365
366 /* ok, we're the last bio for this extent, step one is to
367 * call back into the FS and do all the end_io operations
368 */
369 inode = cb->inode;
370 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
371 btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
372 cb->start, cb->start + cb->len - 1,
373 bio->bi_status == BLK_STS_OK);
374 cb->compressed_pages[0]->mapping = NULL;
375
376 end_compressed_writeback(inode, cb);
377 /* note, our inode could be gone now */
378
379 /*
380 * release the compressed pages, these came from alloc_page and
381 * are not attached to the inode at all
382 */
383 index = 0;
384 for (index = 0; index < cb->nr_pages; index++) {
385 page = cb->compressed_pages[index];
386 page->mapping = NULL;
387 put_page(page);
388 }
389
390 /* finally free the cb struct */
391 kfree(cb->compressed_pages);
392 kfree(cb);
393out:
394 bio_put(bio);
395}
396
397/*
398 * worker function to build and submit bios for previously compressed pages.
399 * The corresponding pages in the inode should be marked for writeback
400 * and the compressed pages should have a reference on them for dropping
401 * when the IO is complete.
402 *
403 * This also checksums the file bytes and gets things ready for
404 * the end io hooks.
405 */
406blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
407 unsigned long len, u64 disk_start,
408 unsigned long compressed_len,
409 struct page **compressed_pages,
410 unsigned long nr_pages,
411 unsigned int write_flags,
412 struct cgroup_subsys_state *blkcg_css)
413{
414 struct btrfs_fs_info *fs_info = inode->root->fs_info;
415 struct bio *bio = NULL;
416 struct compressed_bio *cb;
417 unsigned long bytes_left;
418 int pg_index = 0;
419 struct page *page;
420 u64 first_byte = disk_start;
421 blk_status_t ret;
422 int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
423
424 WARN_ON(!PAGE_ALIGNED(start));
425 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
426 if (!cb)
427 return BLK_STS_RESOURCE;
428 refcount_set(&cb->pending_bios, 0);
429 cb->errors = 0;
430 cb->inode = &inode->vfs_inode;
431 cb->start = start;
432 cb->len = len;
433 cb->mirror_num = 0;
434 cb->compressed_pages = compressed_pages;
435 cb->compressed_len = compressed_len;
436 cb->orig_bio = NULL;
437 cb->nr_pages = nr_pages;
438
439 bio = btrfs_bio_alloc(first_byte);
440 bio->bi_opf = REQ_OP_WRITE | write_flags;
441 bio->bi_private = cb;
442 bio->bi_end_io = end_compressed_bio_write;
443
444 if (blkcg_css) {
445 bio->bi_opf |= REQ_CGROUP_PUNT;
446 kthread_associate_blkcg(blkcg_css);
447 }
448 refcount_set(&cb->pending_bios, 1);
449
450 /* create and submit bios for the compressed pages */
451 bytes_left = compressed_len;
452 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
453 int submit = 0;
454
455 page = compressed_pages[pg_index];
456 page->mapping = inode->vfs_inode.i_mapping;
457 if (bio->bi_iter.bi_size)
458 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
459 0);
460
461 page->mapping = NULL;
462 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
463 PAGE_SIZE) {
464 /*
465 * inc the count before we submit the bio so
466 * we know the end IO handler won't happen before
467 * we inc the count. Otherwise, the cb might get
468 * freed before we're done setting it up
469 */
470 refcount_inc(&cb->pending_bios);
471 ret = btrfs_bio_wq_end_io(fs_info, bio,
472 BTRFS_WQ_ENDIO_DATA);
473 BUG_ON(ret); /* -ENOMEM */
474
475 if (!skip_sum) {
476 ret = btrfs_csum_one_bio(inode, bio, start, 1);
477 BUG_ON(ret); /* -ENOMEM */
478 }
479
480 ret = btrfs_map_bio(fs_info, bio, 0);
481 if (ret) {
482 bio->bi_status = ret;
483 bio_endio(bio);
484 }
485
486 bio = btrfs_bio_alloc(first_byte);
487 bio->bi_opf = REQ_OP_WRITE | write_flags;
488 bio->bi_private = cb;
489 bio->bi_end_io = end_compressed_bio_write;
490 if (blkcg_css)
491 bio->bi_opf |= REQ_CGROUP_PUNT;
492 bio_add_page(bio, page, PAGE_SIZE, 0);
493 }
494 if (bytes_left < PAGE_SIZE) {
495 btrfs_info(fs_info,
496 "bytes left %lu compress len %lu nr %lu",
497 bytes_left, cb->compressed_len, cb->nr_pages);
498 }
499 bytes_left -= PAGE_SIZE;
500 first_byte += PAGE_SIZE;
501 cond_resched();
502 }
503
504 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
505 BUG_ON(ret); /* -ENOMEM */
506
507 if (!skip_sum) {
508 ret = btrfs_csum_one_bio(inode, bio, start, 1);
509 BUG_ON(ret); /* -ENOMEM */
510 }
511
512 ret = btrfs_map_bio(fs_info, bio, 0);
513 if (ret) {
514 bio->bi_status = ret;
515 bio_endio(bio);
516 }
517
518 if (blkcg_css)
519 kthread_associate_blkcg(NULL);
520
521 return 0;
522}
523
524static u64 bio_end_offset(struct bio *bio)
525{
526 struct bio_vec *last = bio_last_bvec_all(bio);
527
528 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
529}
530
531static noinline int add_ra_bio_pages(struct inode *inode,
532 u64 compressed_end,
533 struct compressed_bio *cb)
534{
535 unsigned long end_index;
536 unsigned long pg_index;
537 u64 last_offset;
538 u64 isize = i_size_read(inode);
539 int ret;
540 struct page *page;
541 unsigned long nr_pages = 0;
542 struct extent_map *em;
543 struct address_space *mapping = inode->i_mapping;
544 struct extent_map_tree *em_tree;
545 struct extent_io_tree *tree;
546 u64 end;
547 int misses = 0;
548
549 last_offset = bio_end_offset(cb->orig_bio);
550 em_tree = &BTRFS_I(inode)->extent_tree;
551 tree = &BTRFS_I(inode)->io_tree;
552
553 if (isize == 0)
554 return 0;
555
556 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
557
558 while (last_offset < compressed_end) {
559 pg_index = last_offset >> PAGE_SHIFT;
560
561 if (pg_index > end_index)
562 break;
563
564 page = xa_load(&mapping->i_pages, pg_index);
565 if (page && !xa_is_value(page)) {
566 misses++;
567 if (misses > 4)
568 break;
569 goto next;
570 }
571
572 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
573 ~__GFP_FS));
574 if (!page)
575 break;
576
577 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
578 put_page(page);
579 goto next;
580 }
581
582 end = last_offset + PAGE_SIZE - 1;
583 /*
584 * at this point, we have a locked page in the page cache
585 * for these bytes in the file. But, we have to make
586 * sure they map to this compressed extent on disk.
587 */
588 set_page_extent_mapped(page);
589 lock_extent(tree, last_offset, end);
590 read_lock(&em_tree->lock);
591 em = lookup_extent_mapping(em_tree, last_offset,
592 PAGE_SIZE);
593 read_unlock(&em_tree->lock);
594
595 if (!em || last_offset < em->start ||
596 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
597 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
598 free_extent_map(em);
599 unlock_extent(tree, last_offset, end);
600 unlock_page(page);
601 put_page(page);
602 break;
603 }
604 free_extent_map(em);
605
606 if (page->index == end_index) {
607 char *userpage;
608 size_t zero_offset = offset_in_page(isize);
609
610 if (zero_offset) {
611 int zeros;
612 zeros = PAGE_SIZE - zero_offset;
613 userpage = kmap_atomic(page);
614 memset(userpage + zero_offset, 0, zeros);
615 flush_dcache_page(page);
616 kunmap_atomic(userpage);
617 }
618 }
619
620 ret = bio_add_page(cb->orig_bio, page,
621 PAGE_SIZE, 0);
622
623 if (ret == PAGE_SIZE) {
624 nr_pages++;
625 put_page(page);
626 } else {
627 unlock_extent(tree, last_offset, end);
628 unlock_page(page);
629 put_page(page);
630 break;
631 }
632next:
633 last_offset += PAGE_SIZE;
634 }
635 return 0;
636}
637
638/*
639 * for a compressed read, the bio we get passed has all the inode pages
640 * in it. We don't actually do IO on those pages but allocate new ones
641 * to hold the compressed pages on disk.
642 *
643 * bio->bi_iter.bi_sector points to the compressed extent on disk
644 * bio->bi_io_vec points to all of the inode pages
645 *
646 * After the compressed pages are read, we copy the bytes into the
647 * bio we were passed and then call the bio end_io calls
648 */
649blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
650 int mirror_num, unsigned long bio_flags)
651{
652 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
653 struct extent_map_tree *em_tree;
654 struct compressed_bio *cb;
655 unsigned long compressed_len;
656 unsigned long nr_pages;
657 unsigned long pg_index;
658 struct page *page;
659 struct bio *comp_bio;
660 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
661 u64 em_len;
662 u64 em_start;
663 struct extent_map *em;
664 blk_status_t ret = BLK_STS_RESOURCE;
665 int faili = 0;
666 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
667 u8 *sums;
668
669 em_tree = &BTRFS_I(inode)->extent_tree;
670
671 /* we need the actual starting offset of this extent in the file */
672 read_lock(&em_tree->lock);
673 em = lookup_extent_mapping(em_tree,
674 page_offset(bio_first_page_all(bio)),
675 PAGE_SIZE);
676 read_unlock(&em_tree->lock);
677 if (!em)
678 return BLK_STS_IOERR;
679
680 compressed_len = em->block_len;
681 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
682 if (!cb)
683 goto out;
684
685 refcount_set(&cb->pending_bios, 0);
686 cb->errors = 0;
687 cb->inode = inode;
688 cb->mirror_num = mirror_num;
689 sums = cb->sums;
690
691 cb->start = em->orig_start;
692 em_len = em->len;
693 em_start = em->start;
694
695 free_extent_map(em);
696 em = NULL;
697
698 cb->len = bio->bi_iter.bi_size;
699 cb->compressed_len = compressed_len;
700 cb->compress_type = extent_compress_type(bio_flags);
701 cb->orig_bio = bio;
702
703 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
704 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
705 GFP_NOFS);
706 if (!cb->compressed_pages)
707 goto fail1;
708
709 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
710 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
711 __GFP_HIGHMEM);
712 if (!cb->compressed_pages[pg_index]) {
713 faili = pg_index - 1;
714 ret = BLK_STS_RESOURCE;
715 goto fail2;
716 }
717 }
718 faili = nr_pages - 1;
719 cb->nr_pages = nr_pages;
720
721 add_ra_bio_pages(inode, em_start + em_len, cb);
722
723 /* include any pages we added in add_ra-bio_pages */
724 cb->len = bio->bi_iter.bi_size;
725
726 comp_bio = btrfs_bio_alloc(cur_disk_byte);
727 comp_bio->bi_opf = REQ_OP_READ;
728 comp_bio->bi_private = cb;
729 comp_bio->bi_end_io = end_compressed_bio_read;
730 refcount_set(&cb->pending_bios, 1);
731
732 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
733 int submit = 0;
734
735 page = cb->compressed_pages[pg_index];
736 page->mapping = inode->i_mapping;
737 page->index = em_start >> PAGE_SHIFT;
738
739 if (comp_bio->bi_iter.bi_size)
740 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
741 comp_bio, 0);
742
743 page->mapping = NULL;
744 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
745 PAGE_SIZE) {
746 unsigned int nr_sectors;
747
748 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
749 BTRFS_WQ_ENDIO_DATA);
750 BUG_ON(ret); /* -ENOMEM */
751
752 /*
753 * inc the count before we submit the bio so
754 * we know the end IO handler won't happen before
755 * we inc the count. Otherwise, the cb might get
756 * freed before we're done setting it up
757 */
758 refcount_inc(&cb->pending_bios);
759
760 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
761 ret = btrfs_lookup_bio_sums(inode, comp_bio,
762 (u64)-1, sums);
763 BUG_ON(ret); /* -ENOMEM */
764 }
765
766 nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
767 fs_info->sectorsize);
768 sums += csum_size * nr_sectors;
769
770 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
771 if (ret) {
772 comp_bio->bi_status = ret;
773 bio_endio(comp_bio);
774 }
775
776 comp_bio = btrfs_bio_alloc(cur_disk_byte);
777 comp_bio->bi_opf = REQ_OP_READ;
778 comp_bio->bi_private = cb;
779 comp_bio->bi_end_io = end_compressed_bio_read;
780
781 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
782 }
783 cur_disk_byte += PAGE_SIZE;
784 }
785
786 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
787 BUG_ON(ret); /* -ENOMEM */
788
789 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
790 ret = btrfs_lookup_bio_sums(inode, comp_bio, (u64)-1, sums);
791 BUG_ON(ret); /* -ENOMEM */
792 }
793
794 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
795 if (ret) {
796 comp_bio->bi_status = ret;
797 bio_endio(comp_bio);
798 }
799
800 return 0;
801
802fail2:
803 while (faili >= 0) {
804 __free_page(cb->compressed_pages[faili]);
805 faili--;
806 }
807
808 kfree(cb->compressed_pages);
809fail1:
810 kfree(cb);
811out:
812 free_extent_map(em);
813 return ret;
814}
815
816/*
817 * Heuristic uses systematic sampling to collect data from the input data
818 * range, the logic can be tuned by the following constants:
819 *
820 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
821 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
822 */
823#define SAMPLING_READ_SIZE (16)
824#define SAMPLING_INTERVAL (256)
825
826/*
827 * For statistical analysis of the input data we consider bytes that form a
828 * Galois Field of 256 objects. Each object has an attribute count, ie. how
829 * many times the object appeared in the sample.
830 */
831#define BUCKET_SIZE (256)
832
833/*
834 * The size of the sample is based on a statistical sampling rule of thumb.
835 * The common way is to perform sampling tests as long as the number of
836 * elements in each cell is at least 5.
837 *
838 * Instead of 5, we choose 32 to obtain more accurate results.
839 * If the data contain the maximum number of symbols, which is 256, we obtain a
840 * sample size bound by 8192.
841 *
842 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
843 * from up to 512 locations.
844 */
845#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
846 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
847
848struct bucket_item {
849 u32 count;
850};
851
852struct heuristic_ws {
853 /* Partial copy of input data */
854 u8 *sample;
855 u32 sample_size;
856 /* Buckets store counters for each byte value */
857 struct bucket_item *bucket;
858 /* Sorting buffer */
859 struct bucket_item *bucket_b;
860 struct list_head list;
861};
862
863static struct workspace_manager heuristic_wsm;
864
865static void free_heuristic_ws(struct list_head *ws)
866{
867 struct heuristic_ws *workspace;
868
869 workspace = list_entry(ws, struct heuristic_ws, list);
870
871 kvfree(workspace->sample);
872 kfree(workspace->bucket);
873 kfree(workspace->bucket_b);
874 kfree(workspace);
875}
876
877static struct list_head *alloc_heuristic_ws(unsigned int level)
878{
879 struct heuristic_ws *ws;
880
881 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
882 if (!ws)
883 return ERR_PTR(-ENOMEM);
884
885 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
886 if (!ws->sample)
887 goto fail;
888
889 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
890 if (!ws->bucket)
891 goto fail;
892
893 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
894 if (!ws->bucket_b)
895 goto fail;
896
897 INIT_LIST_HEAD(&ws->list);
898 return &ws->list;
899fail:
900 free_heuristic_ws(&ws->list);
901 return ERR_PTR(-ENOMEM);
902}
903
904const struct btrfs_compress_op btrfs_heuristic_compress = {
905 .workspace_manager = &heuristic_wsm,
906};
907
908static const struct btrfs_compress_op * const btrfs_compress_op[] = {
909 /* The heuristic is represented as compression type 0 */
910 &btrfs_heuristic_compress,
911 &btrfs_zlib_compress,
912 &btrfs_lzo_compress,
913 &btrfs_zstd_compress,
914};
915
916static struct list_head *alloc_workspace(int type, unsigned int level)
917{
918 switch (type) {
919 case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
920 case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
921 case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
922 case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
923 default:
924 /*
925 * This can't happen, the type is validated several times
926 * before we get here.
927 */
928 BUG();
929 }
930}
931
932static void free_workspace(int type, struct list_head *ws)
933{
934 switch (type) {
935 case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
936 case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
937 case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
938 case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
939 default:
940 /*
941 * This can't happen, the type is validated several times
942 * before we get here.
943 */
944 BUG();
945 }
946}
947
948static void btrfs_init_workspace_manager(int type)
949{
950 struct workspace_manager *wsm;
951 struct list_head *workspace;
952
953 wsm = btrfs_compress_op[type]->workspace_manager;
954 INIT_LIST_HEAD(&wsm->idle_ws);
955 spin_lock_init(&wsm->ws_lock);
956 atomic_set(&wsm->total_ws, 0);
957 init_waitqueue_head(&wsm->ws_wait);
958
959 /*
960 * Preallocate one workspace for each compression type so we can
961 * guarantee forward progress in the worst case
962 */
963 workspace = alloc_workspace(type, 0);
964 if (IS_ERR(workspace)) {
965 pr_warn(
966 "BTRFS: cannot preallocate compression workspace, will try later\n");
967 } else {
968 atomic_set(&wsm->total_ws, 1);
969 wsm->free_ws = 1;
970 list_add(workspace, &wsm->idle_ws);
971 }
972}
973
974static void btrfs_cleanup_workspace_manager(int type)
975{
976 struct workspace_manager *wsman;
977 struct list_head *ws;
978
979 wsman = btrfs_compress_op[type]->workspace_manager;
980 while (!list_empty(&wsman->idle_ws)) {
981 ws = wsman->idle_ws.next;
982 list_del(ws);
983 free_workspace(type, ws);
984 atomic_dec(&wsman->total_ws);
985 }
986}
987
988/*
989 * This finds an available workspace or allocates a new one.
990 * If it's not possible to allocate a new one, waits until there's one.
991 * Preallocation makes a forward progress guarantees and we do not return
992 * errors.
993 */
994struct list_head *btrfs_get_workspace(int type, unsigned int level)
995{
996 struct workspace_manager *wsm;
997 struct list_head *workspace;
998 int cpus = num_online_cpus();
999 unsigned nofs_flag;
1000 struct list_head *idle_ws;
1001 spinlock_t *ws_lock;
1002 atomic_t *total_ws;
1003 wait_queue_head_t *ws_wait;
1004 int *free_ws;
1005
1006 wsm = btrfs_compress_op[type]->workspace_manager;
1007 idle_ws = &wsm->idle_ws;
1008 ws_lock = &wsm->ws_lock;
1009 total_ws = &wsm->total_ws;
1010 ws_wait = &wsm->ws_wait;
1011 free_ws = &wsm->free_ws;
1012
1013again:
1014 spin_lock(ws_lock);
1015 if (!list_empty(idle_ws)) {
1016 workspace = idle_ws->next;
1017 list_del(workspace);
1018 (*free_ws)--;
1019 spin_unlock(ws_lock);
1020 return workspace;
1021
1022 }
1023 if (atomic_read(total_ws) > cpus) {
1024 DEFINE_WAIT(wait);
1025
1026 spin_unlock(ws_lock);
1027 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1028 if (atomic_read(total_ws) > cpus && !*free_ws)
1029 schedule();
1030 finish_wait(ws_wait, &wait);
1031 goto again;
1032 }
1033 atomic_inc(total_ws);
1034 spin_unlock(ws_lock);
1035
1036 /*
1037 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1038 * to turn it off here because we might get called from the restricted
1039 * context of btrfs_compress_bio/btrfs_compress_pages
1040 */
1041 nofs_flag = memalloc_nofs_save();
1042 workspace = alloc_workspace(type, level);
1043 memalloc_nofs_restore(nofs_flag);
1044
1045 if (IS_ERR(workspace)) {
1046 atomic_dec(total_ws);
1047 wake_up(ws_wait);
1048
1049 /*
1050 * Do not return the error but go back to waiting. There's a
1051 * workspace preallocated for each type and the compression
1052 * time is bounded so we get to a workspace eventually. This
1053 * makes our caller's life easier.
1054 *
1055 * To prevent silent and low-probability deadlocks (when the
1056 * initial preallocation fails), check if there are any
1057 * workspaces at all.
1058 */
1059 if (atomic_read(total_ws) == 0) {
1060 static DEFINE_RATELIMIT_STATE(_rs,
1061 /* once per minute */ 60 * HZ,
1062 /* no burst */ 1);
1063
1064 if (__ratelimit(&_rs)) {
1065 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1066 }
1067 }
1068 goto again;
1069 }
1070 return workspace;
1071}
1072
1073static struct list_head *get_workspace(int type, int level)
1074{
1075 switch (type) {
1076 case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1077 case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1078 case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
1079 case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1080 default:
1081 /*
1082 * This can't happen, the type is validated several times
1083 * before we get here.
1084 */
1085 BUG();
1086 }
1087}
1088
1089/*
1090 * put a workspace struct back on the list or free it if we have enough
1091 * idle ones sitting around
1092 */
1093void btrfs_put_workspace(int type, struct list_head *ws)
1094{
1095 struct workspace_manager *wsm;
1096 struct list_head *idle_ws;
1097 spinlock_t *ws_lock;
1098 atomic_t *total_ws;
1099 wait_queue_head_t *ws_wait;
1100 int *free_ws;
1101
1102 wsm = btrfs_compress_op[type]->workspace_manager;
1103 idle_ws = &wsm->idle_ws;
1104 ws_lock = &wsm->ws_lock;
1105 total_ws = &wsm->total_ws;
1106 ws_wait = &wsm->ws_wait;
1107 free_ws = &wsm->free_ws;
1108
1109 spin_lock(ws_lock);
1110 if (*free_ws <= num_online_cpus()) {
1111 list_add(ws, idle_ws);
1112 (*free_ws)++;
1113 spin_unlock(ws_lock);
1114 goto wake;
1115 }
1116 spin_unlock(ws_lock);
1117
1118 free_workspace(type, ws);
1119 atomic_dec(total_ws);
1120wake:
1121 cond_wake_up(ws_wait);
1122}
1123
1124static void put_workspace(int type, struct list_head *ws)
1125{
1126 switch (type) {
1127 case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1128 case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1129 case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
1130 case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1131 default:
1132 /*
1133 * This can't happen, the type is validated several times
1134 * before we get here.
1135 */
1136 BUG();
1137 }
1138}
1139
1140/*
1141 * Adjust @level according to the limits of the compression algorithm or
1142 * fallback to default
1143 */
1144static unsigned int btrfs_compress_set_level(int type, unsigned level)
1145{
1146 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1147
1148 if (level == 0)
1149 level = ops->default_level;
1150 else
1151 level = min(level, ops->max_level);
1152
1153 return level;
1154}
1155
1156/*
1157 * Given an address space and start and length, compress the bytes into @pages
1158 * that are allocated on demand.
1159 *
1160 * @type_level is encoded algorithm and level, where level 0 means whatever
1161 * default the algorithm chooses and is opaque here;
1162 * - compression algo are 0-3
1163 * - the level are bits 4-7
1164 *
1165 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1166 * and returns number of actually allocated pages
1167 *
1168 * @total_in is used to return the number of bytes actually read. It
1169 * may be smaller than the input length if we had to exit early because we
1170 * ran out of room in the pages array or because we cross the
1171 * max_out threshold.
1172 *
1173 * @total_out is an in/out parameter, must be set to the input length and will
1174 * be also used to return the total number of compressed bytes
1175 *
1176 * @max_out tells us the max number of bytes that we're allowed to
1177 * stuff into pages
1178 */
1179int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1180 u64 start, struct page **pages,
1181 unsigned long *out_pages,
1182 unsigned long *total_in,
1183 unsigned long *total_out)
1184{
1185 int type = btrfs_compress_type(type_level);
1186 int level = btrfs_compress_level(type_level);
1187 struct list_head *workspace;
1188 int ret;
1189
1190 level = btrfs_compress_set_level(type, level);
1191 workspace = get_workspace(type, level);
1192 ret = compression_compress_pages(type, workspace, mapping, start, pages,
1193 out_pages, total_in, total_out);
1194 put_workspace(type, workspace);
1195 return ret;
1196}
1197
1198/*
1199 * pages_in is an array of pages with compressed data.
1200 *
1201 * disk_start is the starting logical offset of this array in the file
1202 *
1203 * orig_bio contains the pages from the file that we want to decompress into
1204 *
1205 * srclen is the number of bytes in pages_in
1206 *
1207 * The basic idea is that we have a bio that was created by readpages.
1208 * The pages in the bio are for the uncompressed data, and they may not
1209 * be contiguous. They all correspond to the range of bytes covered by
1210 * the compressed extent.
1211 */
1212static int btrfs_decompress_bio(struct compressed_bio *cb)
1213{
1214 struct list_head *workspace;
1215 int ret;
1216 int type = cb->compress_type;
1217
1218 workspace = get_workspace(type, 0);
1219 ret = compression_decompress_bio(type, workspace, cb);
1220 put_workspace(type, workspace);
1221
1222 return ret;
1223}
1224
1225/*
1226 * a less complex decompression routine. Our compressed data fits in a
1227 * single page, and we want to read a single page out of it.
1228 * start_byte tells us the offset into the compressed data we're interested in
1229 */
1230int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1231 unsigned long start_byte, size_t srclen, size_t destlen)
1232{
1233 struct list_head *workspace;
1234 int ret;
1235
1236 workspace = get_workspace(type, 0);
1237 ret = compression_decompress(type, workspace, data_in, dest_page,
1238 start_byte, srclen, destlen);
1239 put_workspace(type, workspace);
1240
1241 return ret;
1242}
1243
1244void __init btrfs_init_compress(void)
1245{
1246 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1247 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1248 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1249 zstd_init_workspace_manager();
1250}
1251
1252void __cold btrfs_exit_compress(void)
1253{
1254 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1255 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1256 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1257 zstd_cleanup_workspace_manager();
1258}
1259
1260/*
1261 * Copy uncompressed data from working buffer to pages.
1262 *
1263 * buf_start is the byte offset we're of the start of our workspace buffer.
1264 *
1265 * total_out is the last byte of the buffer
1266 */
1267int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1268 unsigned long total_out, u64 disk_start,
1269 struct bio *bio)
1270{
1271 unsigned long buf_offset;
1272 unsigned long current_buf_start;
1273 unsigned long start_byte;
1274 unsigned long prev_start_byte;
1275 unsigned long working_bytes = total_out - buf_start;
1276 unsigned long bytes;
1277 char *kaddr;
1278 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1279
1280 /*
1281 * start byte is the first byte of the page we're currently
1282 * copying into relative to the start of the compressed data.
1283 */
1284 start_byte = page_offset(bvec.bv_page) - disk_start;
1285
1286 /* we haven't yet hit data corresponding to this page */
1287 if (total_out <= start_byte)
1288 return 1;
1289
1290 /*
1291 * the start of the data we care about is offset into
1292 * the middle of our working buffer
1293 */
1294 if (total_out > start_byte && buf_start < start_byte) {
1295 buf_offset = start_byte - buf_start;
1296 working_bytes -= buf_offset;
1297 } else {
1298 buf_offset = 0;
1299 }
1300 current_buf_start = buf_start;
1301
1302 /* copy bytes from the working buffer into the pages */
1303 while (working_bytes > 0) {
1304 bytes = min_t(unsigned long, bvec.bv_len,
1305 PAGE_SIZE - (buf_offset % PAGE_SIZE));
1306 bytes = min(bytes, working_bytes);
1307
1308 kaddr = kmap_atomic(bvec.bv_page);
1309 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1310 kunmap_atomic(kaddr);
1311 flush_dcache_page(bvec.bv_page);
1312
1313 buf_offset += bytes;
1314 working_bytes -= bytes;
1315 current_buf_start += bytes;
1316
1317 /* check if we need to pick another page */
1318 bio_advance(bio, bytes);
1319 if (!bio->bi_iter.bi_size)
1320 return 0;
1321 bvec = bio_iter_iovec(bio, bio->bi_iter);
1322 prev_start_byte = start_byte;
1323 start_byte = page_offset(bvec.bv_page) - disk_start;
1324
1325 /*
1326 * We need to make sure we're only adjusting
1327 * our offset into compression working buffer when
1328 * we're switching pages. Otherwise we can incorrectly
1329 * keep copying when we were actually done.
1330 */
1331 if (start_byte != prev_start_byte) {
1332 /*
1333 * make sure our new page is covered by this
1334 * working buffer
1335 */
1336 if (total_out <= start_byte)
1337 return 1;
1338
1339 /*
1340 * the next page in the biovec might not be adjacent
1341 * to the last page, but it might still be found
1342 * inside this working buffer. bump our offset pointer
1343 */
1344 if (total_out > start_byte &&
1345 current_buf_start < start_byte) {
1346 buf_offset = start_byte - buf_start;
1347 working_bytes = total_out - start_byte;
1348 current_buf_start = buf_start + buf_offset;
1349 }
1350 }
1351 }
1352
1353 return 1;
1354}
1355
1356/*
1357 * Shannon Entropy calculation
1358 *
1359 * Pure byte distribution analysis fails to determine compressibility of data.
1360 * Try calculating entropy to estimate the average minimum number of bits
1361 * needed to encode the sampled data.
1362 *
1363 * For convenience, return the percentage of needed bits, instead of amount of
1364 * bits directly.
1365 *
1366 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1367 * and can be compressible with high probability
1368 *
1369 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1370 *
1371 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1372 */
1373#define ENTROPY_LVL_ACEPTABLE (65)
1374#define ENTROPY_LVL_HIGH (80)
1375
1376/*
1377 * For increasead precision in shannon_entropy calculation,
1378 * let's do pow(n, M) to save more digits after comma:
1379 *
1380 * - maximum int bit length is 64
1381 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1382 * - 13 * 4 = 52 < 64 -> M = 4
1383 *
1384 * So use pow(n, 4).
1385 */
1386static inline u32 ilog2_w(u64 n)
1387{
1388 return ilog2(n * n * n * n);
1389}
1390
1391static u32 shannon_entropy(struct heuristic_ws *ws)
1392{
1393 const u32 entropy_max = 8 * ilog2_w(2);
1394 u32 entropy_sum = 0;
1395 u32 p, p_base, sz_base;
1396 u32 i;
1397
1398 sz_base = ilog2_w(ws->sample_size);
1399 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1400 p = ws->bucket[i].count;
1401 p_base = ilog2_w(p);
1402 entropy_sum += p * (sz_base - p_base);
1403 }
1404
1405 entropy_sum /= ws->sample_size;
1406 return entropy_sum * 100 / entropy_max;
1407}
1408
1409#define RADIX_BASE 4U
1410#define COUNTERS_SIZE (1U << RADIX_BASE)
1411
1412static u8 get4bits(u64 num, int shift) {
1413 u8 low4bits;
1414
1415 num >>= shift;
1416 /* Reverse order */
1417 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1418 return low4bits;
1419}
1420
1421/*
1422 * Use 4 bits as radix base
1423 * Use 16 u32 counters for calculating new position in buf array
1424 *
1425 * @array - array that will be sorted
1426 * @array_buf - buffer array to store sorting results
1427 * must be equal in size to @array
1428 * @num - array size
1429 */
1430static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1431 int num)
1432{
1433 u64 max_num;
1434 u64 buf_num;
1435 u32 counters[COUNTERS_SIZE];
1436 u32 new_addr;
1437 u32 addr;
1438 int bitlen;
1439 int shift;
1440 int i;
1441
1442 /*
1443 * Try avoid useless loop iterations for small numbers stored in big
1444 * counters. Example: 48 33 4 ... in 64bit array
1445 */
1446 max_num = array[0].count;
1447 for (i = 1; i < num; i++) {
1448 buf_num = array[i].count;
1449 if (buf_num > max_num)
1450 max_num = buf_num;
1451 }
1452
1453 buf_num = ilog2(max_num);
1454 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1455
1456 shift = 0;
1457 while (shift < bitlen) {
1458 memset(counters, 0, sizeof(counters));
1459
1460 for (i = 0; i < num; i++) {
1461 buf_num = array[i].count;
1462 addr = get4bits(buf_num, shift);
1463 counters[addr]++;
1464 }
1465
1466 for (i = 1; i < COUNTERS_SIZE; i++)
1467 counters[i] += counters[i - 1];
1468
1469 for (i = num - 1; i >= 0; i--) {
1470 buf_num = array[i].count;
1471 addr = get4bits(buf_num, shift);
1472 counters[addr]--;
1473 new_addr = counters[addr];
1474 array_buf[new_addr] = array[i];
1475 }
1476
1477 shift += RADIX_BASE;
1478
1479 /*
1480 * Normal radix expects to move data from a temporary array, to
1481 * the main one. But that requires some CPU time. Avoid that
1482 * by doing another sort iteration to original array instead of
1483 * memcpy()
1484 */
1485 memset(counters, 0, sizeof(counters));
1486
1487 for (i = 0; i < num; i ++) {
1488 buf_num = array_buf[i].count;
1489 addr = get4bits(buf_num, shift);
1490 counters[addr]++;
1491 }
1492
1493 for (i = 1; i < COUNTERS_SIZE; i++)
1494 counters[i] += counters[i - 1];
1495
1496 for (i = num - 1; i >= 0; i--) {
1497 buf_num = array_buf[i].count;
1498 addr = get4bits(buf_num, shift);
1499 counters[addr]--;
1500 new_addr = counters[addr];
1501 array[new_addr] = array_buf[i];
1502 }
1503
1504 shift += RADIX_BASE;
1505 }
1506}
1507
1508/*
1509 * Size of the core byte set - how many bytes cover 90% of the sample
1510 *
1511 * There are several types of structured binary data that use nearly all byte
1512 * values. The distribution can be uniform and counts in all buckets will be
1513 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1514 *
1515 * Other possibility is normal (Gaussian) distribution, where the data could
1516 * be potentially compressible, but we have to take a few more steps to decide
1517 * how much.
1518 *
1519 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1520 * compression algo can easy fix that
1521 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1522 * probability is not compressible
1523 */
1524#define BYTE_CORE_SET_LOW (64)
1525#define BYTE_CORE_SET_HIGH (200)
1526
1527static int byte_core_set_size(struct heuristic_ws *ws)
1528{
1529 u32 i;
1530 u32 coreset_sum = 0;
1531 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1532 struct bucket_item *bucket = ws->bucket;
1533
1534 /* Sort in reverse order */
1535 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1536
1537 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1538 coreset_sum += bucket[i].count;
1539
1540 if (coreset_sum > core_set_threshold)
1541 return i;
1542
1543 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1544 coreset_sum += bucket[i].count;
1545 if (coreset_sum > core_set_threshold)
1546 break;
1547 }
1548
1549 return i;
1550}
1551
1552/*
1553 * Count byte values in buckets.
1554 * This heuristic can detect textual data (configs, xml, json, html, etc).
1555 * Because in most text-like data byte set is restricted to limited number of
1556 * possible characters, and that restriction in most cases makes data easy to
1557 * compress.
1558 *
1559 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1560 * less - compressible
1561 * more - need additional analysis
1562 */
1563#define BYTE_SET_THRESHOLD (64)
1564
1565static u32 byte_set_size(const struct heuristic_ws *ws)
1566{
1567 u32 i;
1568 u32 byte_set_size = 0;
1569
1570 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1571 if (ws->bucket[i].count > 0)
1572 byte_set_size++;
1573 }
1574
1575 /*
1576 * Continue collecting count of byte values in buckets. If the byte
1577 * set size is bigger then the threshold, it's pointless to continue,
1578 * the detection technique would fail for this type of data.
1579 */
1580 for (; i < BUCKET_SIZE; i++) {
1581 if (ws->bucket[i].count > 0) {
1582 byte_set_size++;
1583 if (byte_set_size > BYTE_SET_THRESHOLD)
1584 return byte_set_size;
1585 }
1586 }
1587
1588 return byte_set_size;
1589}
1590
1591static bool sample_repeated_patterns(struct heuristic_ws *ws)
1592{
1593 const u32 half_of_sample = ws->sample_size / 2;
1594 const u8 *data = ws->sample;
1595
1596 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1597}
1598
1599static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1600 struct heuristic_ws *ws)
1601{
1602 struct page *page;
1603 u64 index, index_end;
1604 u32 i, curr_sample_pos;
1605 u8 *in_data;
1606
1607 /*
1608 * Compression handles the input data by chunks of 128KiB
1609 * (defined by BTRFS_MAX_UNCOMPRESSED)
1610 *
1611 * We do the same for the heuristic and loop over the whole range.
1612 *
1613 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1614 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1615 */
1616 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1617 end = start + BTRFS_MAX_UNCOMPRESSED;
1618
1619 index = start >> PAGE_SHIFT;
1620 index_end = end >> PAGE_SHIFT;
1621
1622 /* Don't miss unaligned end */
1623 if (!IS_ALIGNED(end, PAGE_SIZE))
1624 index_end++;
1625
1626 curr_sample_pos = 0;
1627 while (index < index_end) {
1628 page = find_get_page(inode->i_mapping, index);
1629 in_data = kmap(page);
1630 /* Handle case where the start is not aligned to PAGE_SIZE */
1631 i = start % PAGE_SIZE;
1632 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1633 /* Don't sample any garbage from the last page */
1634 if (start > end - SAMPLING_READ_SIZE)
1635 break;
1636 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1637 SAMPLING_READ_SIZE);
1638 i += SAMPLING_INTERVAL;
1639 start += SAMPLING_INTERVAL;
1640 curr_sample_pos += SAMPLING_READ_SIZE;
1641 }
1642 kunmap(page);
1643 put_page(page);
1644
1645 index++;
1646 }
1647
1648 ws->sample_size = curr_sample_pos;
1649}
1650
1651/*
1652 * Compression heuristic.
1653 *
1654 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1655 * quickly (compared to direct compression) detect data characteristics
1656 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1657 * data.
1658 *
1659 * The following types of analysis can be performed:
1660 * - detect mostly zero data
1661 * - detect data with low "byte set" size (text, etc)
1662 * - detect data with low/high "core byte" set
1663 *
1664 * Return non-zero if the compression should be done, 0 otherwise.
1665 */
1666int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1667{
1668 struct list_head *ws_list = get_workspace(0, 0);
1669 struct heuristic_ws *ws;
1670 u32 i;
1671 u8 byte;
1672 int ret = 0;
1673
1674 ws = list_entry(ws_list, struct heuristic_ws, list);
1675
1676 heuristic_collect_sample(inode, start, end, ws);
1677
1678 if (sample_repeated_patterns(ws)) {
1679 ret = 1;
1680 goto out;
1681 }
1682
1683 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1684
1685 for (i = 0; i < ws->sample_size; i++) {
1686 byte = ws->sample[i];
1687 ws->bucket[byte].count++;
1688 }
1689
1690 i = byte_set_size(ws);
1691 if (i < BYTE_SET_THRESHOLD) {
1692 ret = 2;
1693 goto out;
1694 }
1695
1696 i = byte_core_set_size(ws);
1697 if (i <= BYTE_CORE_SET_LOW) {
1698 ret = 3;
1699 goto out;
1700 }
1701
1702 if (i >= BYTE_CORE_SET_HIGH) {
1703 ret = 0;
1704 goto out;
1705 }
1706
1707 i = shannon_entropy(ws);
1708 if (i <= ENTROPY_LVL_ACEPTABLE) {
1709 ret = 4;
1710 goto out;
1711 }
1712
1713 /*
1714 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1715 * needed to give green light to compression.
1716 *
1717 * For now just assume that compression at that level is not worth the
1718 * resources because:
1719 *
1720 * 1. it is possible to defrag the data later
1721 *
1722 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1723 * values, every bucket has counter at level ~54. The heuristic would
1724 * be confused. This can happen when data have some internal repeated
1725 * patterns like "abbacbbc...". This can be detected by analyzing
1726 * pairs of bytes, which is too costly.
1727 */
1728 if (i < ENTROPY_LVL_HIGH) {
1729 ret = 5;
1730 goto out;
1731 } else {
1732 ret = 0;
1733 goto out;
1734 }
1735
1736out:
1737 put_workspace(0, ws_list);
1738 return ret;
1739}
1740
1741/*
1742 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1743 * level, unrecognized string will set the default level
1744 */
1745unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1746{
1747 unsigned int level = 0;
1748 int ret;
1749
1750 if (!type)
1751 return 0;
1752
1753 if (str[0] == ':') {
1754 ret = kstrtouint(str + 1, 10, &level);
1755 if (ret)
1756 level = 0;
1757 }
1758
1759 level = btrfs_compress_set_level(type, level);
1760
1761 return level;
1762}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
8#include <linux/buffer_head.h>
9#include <linux/file.h>
10#include <linux/fs.h>
11#include <linux/pagemap.h>
12#include <linux/highmem.h>
13#include <linux/time.h>
14#include <linux/init.h>
15#include <linux/string.h>
16#include <linux/backing-dev.h>
17#include <linux/mpage.h>
18#include <linux/swap.h>
19#include <linux/writeback.h>
20#include <linux/bit_spinlock.h>
21#include <linux/slab.h>
22#include <linux/sched/mm.h>
23#include <linux/log2.h>
24#include "ctree.h"
25#include "disk-io.h"
26#include "transaction.h"
27#include "btrfs_inode.h"
28#include "volumes.h"
29#include "ordered-data.h"
30#include "compression.h"
31#include "extent_io.h"
32#include "extent_map.h"
33
34static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
35
36const char* btrfs_compress_type2str(enum btrfs_compression_type type)
37{
38 switch (type) {
39 case BTRFS_COMPRESS_ZLIB:
40 case BTRFS_COMPRESS_LZO:
41 case BTRFS_COMPRESS_ZSTD:
42 case BTRFS_COMPRESS_NONE:
43 return btrfs_compress_types[type];
44 }
45
46 return NULL;
47}
48
49static int btrfs_decompress_bio(struct compressed_bio *cb);
50
51static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
52 unsigned long disk_size)
53{
54 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
55
56 return sizeof(struct compressed_bio) +
57 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
58}
59
60static int check_compressed_csum(struct btrfs_inode *inode,
61 struct compressed_bio *cb,
62 u64 disk_start)
63{
64 int ret;
65 struct page *page;
66 unsigned long i;
67 char *kaddr;
68 u32 csum;
69 u32 *cb_sum = &cb->sums;
70
71 if (inode->flags & BTRFS_INODE_NODATASUM)
72 return 0;
73
74 for (i = 0; i < cb->nr_pages; i++) {
75 page = cb->compressed_pages[i];
76 csum = ~(u32)0;
77
78 kaddr = kmap_atomic(page);
79 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
80 btrfs_csum_final(csum, (u8 *)&csum);
81 kunmap_atomic(kaddr);
82
83 if (csum != *cb_sum) {
84 btrfs_print_data_csum_error(inode, disk_start, csum,
85 *cb_sum, cb->mirror_num);
86 ret = -EIO;
87 goto fail;
88 }
89 cb_sum++;
90
91 }
92 ret = 0;
93fail:
94 return ret;
95}
96
97/* when we finish reading compressed pages from the disk, we
98 * decompress them and then run the bio end_io routines on the
99 * decompressed pages (in the inode address space).
100 *
101 * This allows the checksumming and other IO error handling routines
102 * to work normally
103 *
104 * The compressed pages are freed here, and it must be run
105 * in process context
106 */
107static void end_compressed_bio_read(struct bio *bio)
108{
109 struct compressed_bio *cb = bio->bi_private;
110 struct inode *inode;
111 struct page *page;
112 unsigned long index;
113 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
114 int ret = 0;
115
116 if (bio->bi_status)
117 cb->errors = 1;
118
119 /* if there are more bios still pending for this compressed
120 * extent, just exit
121 */
122 if (!refcount_dec_and_test(&cb->pending_bios))
123 goto out;
124
125 /*
126 * Record the correct mirror_num in cb->orig_bio so that
127 * read-repair can work properly.
128 */
129 ASSERT(btrfs_io_bio(cb->orig_bio));
130 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
131 cb->mirror_num = mirror;
132
133 /*
134 * Some IO in this cb have failed, just skip checksum as there
135 * is no way it could be correct.
136 */
137 if (cb->errors == 1)
138 goto csum_failed;
139
140 inode = cb->inode;
141 ret = check_compressed_csum(BTRFS_I(inode), cb,
142 (u64)bio->bi_iter.bi_sector << 9);
143 if (ret)
144 goto csum_failed;
145
146 /* ok, we're the last bio for this extent, lets start
147 * the decompression.
148 */
149 ret = btrfs_decompress_bio(cb);
150
151csum_failed:
152 if (ret)
153 cb->errors = 1;
154
155 /* release the compressed pages */
156 index = 0;
157 for (index = 0; index < cb->nr_pages; index++) {
158 page = cb->compressed_pages[index];
159 page->mapping = NULL;
160 put_page(page);
161 }
162
163 /* do io completion on the original bio */
164 if (cb->errors) {
165 bio_io_error(cb->orig_bio);
166 } else {
167 int i;
168 struct bio_vec *bvec;
169
170 /*
171 * we have verified the checksum already, set page
172 * checked so the end_io handlers know about it
173 */
174 ASSERT(!bio_flagged(bio, BIO_CLONED));
175 bio_for_each_segment_all(bvec, cb->orig_bio, i)
176 SetPageChecked(bvec->bv_page);
177
178 bio_endio(cb->orig_bio);
179 }
180
181 /* finally free the cb struct */
182 kfree(cb->compressed_pages);
183 kfree(cb);
184out:
185 bio_put(bio);
186}
187
188/*
189 * Clear the writeback bits on all of the file
190 * pages for a compressed write
191 */
192static noinline void end_compressed_writeback(struct inode *inode,
193 const struct compressed_bio *cb)
194{
195 unsigned long index = cb->start >> PAGE_SHIFT;
196 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
197 struct page *pages[16];
198 unsigned long nr_pages = end_index - index + 1;
199 int i;
200 int ret;
201
202 if (cb->errors)
203 mapping_set_error(inode->i_mapping, -EIO);
204
205 while (nr_pages > 0) {
206 ret = find_get_pages_contig(inode->i_mapping, index,
207 min_t(unsigned long,
208 nr_pages, ARRAY_SIZE(pages)), pages);
209 if (ret == 0) {
210 nr_pages -= 1;
211 index += 1;
212 continue;
213 }
214 for (i = 0; i < ret; i++) {
215 if (cb->errors)
216 SetPageError(pages[i]);
217 end_page_writeback(pages[i]);
218 put_page(pages[i]);
219 }
220 nr_pages -= ret;
221 index += ret;
222 }
223 /* the inode may be gone now */
224}
225
226/*
227 * do the cleanup once all the compressed pages hit the disk.
228 * This will clear writeback on the file pages and free the compressed
229 * pages.
230 *
231 * This also calls the writeback end hooks for the file pages so that
232 * metadata and checksums can be updated in the file.
233 */
234static void end_compressed_bio_write(struct bio *bio)
235{
236 struct extent_io_tree *tree;
237 struct compressed_bio *cb = bio->bi_private;
238 struct inode *inode;
239 struct page *page;
240 unsigned long index;
241
242 if (bio->bi_status)
243 cb->errors = 1;
244
245 /* if there are more bios still pending for this compressed
246 * extent, just exit
247 */
248 if (!refcount_dec_and_test(&cb->pending_bios))
249 goto out;
250
251 /* ok, we're the last bio for this extent, step one is to
252 * call back into the FS and do all the end_io operations
253 */
254 inode = cb->inode;
255 tree = &BTRFS_I(inode)->io_tree;
256 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
257 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
258 cb->start,
259 cb->start + cb->len - 1,
260 NULL,
261 bio->bi_status ?
262 BLK_STS_OK : BLK_STS_NOTSUPP);
263 cb->compressed_pages[0]->mapping = NULL;
264
265 end_compressed_writeback(inode, cb);
266 /* note, our inode could be gone now */
267
268 /*
269 * release the compressed pages, these came from alloc_page and
270 * are not attached to the inode at all
271 */
272 index = 0;
273 for (index = 0; index < cb->nr_pages; index++) {
274 page = cb->compressed_pages[index];
275 page->mapping = NULL;
276 put_page(page);
277 }
278
279 /* finally free the cb struct */
280 kfree(cb->compressed_pages);
281 kfree(cb);
282out:
283 bio_put(bio);
284}
285
286/*
287 * worker function to build and submit bios for previously compressed pages.
288 * The corresponding pages in the inode should be marked for writeback
289 * and the compressed pages should have a reference on them for dropping
290 * when the IO is complete.
291 *
292 * This also checksums the file bytes and gets things ready for
293 * the end io hooks.
294 */
295blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
296 unsigned long len, u64 disk_start,
297 unsigned long compressed_len,
298 struct page **compressed_pages,
299 unsigned long nr_pages,
300 unsigned int write_flags)
301{
302 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
303 struct bio *bio = NULL;
304 struct compressed_bio *cb;
305 unsigned long bytes_left;
306 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
307 int pg_index = 0;
308 struct page *page;
309 u64 first_byte = disk_start;
310 struct block_device *bdev;
311 blk_status_t ret;
312 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
313
314 WARN_ON(start & ((u64)PAGE_SIZE - 1));
315 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
316 if (!cb)
317 return BLK_STS_RESOURCE;
318 refcount_set(&cb->pending_bios, 0);
319 cb->errors = 0;
320 cb->inode = inode;
321 cb->start = start;
322 cb->len = len;
323 cb->mirror_num = 0;
324 cb->compressed_pages = compressed_pages;
325 cb->compressed_len = compressed_len;
326 cb->orig_bio = NULL;
327 cb->nr_pages = nr_pages;
328
329 bdev = fs_info->fs_devices->latest_bdev;
330
331 bio = btrfs_bio_alloc(bdev, first_byte);
332 bio->bi_opf = REQ_OP_WRITE | write_flags;
333 bio->bi_private = cb;
334 bio->bi_end_io = end_compressed_bio_write;
335 refcount_set(&cb->pending_bios, 1);
336
337 /* create and submit bios for the compressed pages */
338 bytes_left = compressed_len;
339 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
340 int submit = 0;
341
342 page = compressed_pages[pg_index];
343 page->mapping = inode->i_mapping;
344 if (bio->bi_iter.bi_size)
345 submit = io_tree->ops->merge_bio_hook(page, 0,
346 PAGE_SIZE,
347 bio, 0);
348
349 page->mapping = NULL;
350 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
351 PAGE_SIZE) {
352 /*
353 * inc the count before we submit the bio so
354 * we know the end IO handler won't happen before
355 * we inc the count. Otherwise, the cb might get
356 * freed before we're done setting it up
357 */
358 refcount_inc(&cb->pending_bios);
359 ret = btrfs_bio_wq_end_io(fs_info, bio,
360 BTRFS_WQ_ENDIO_DATA);
361 BUG_ON(ret); /* -ENOMEM */
362
363 if (!skip_sum) {
364 ret = btrfs_csum_one_bio(inode, bio, start, 1);
365 BUG_ON(ret); /* -ENOMEM */
366 }
367
368 ret = btrfs_map_bio(fs_info, bio, 0, 1);
369 if (ret) {
370 bio->bi_status = ret;
371 bio_endio(bio);
372 }
373
374 bio = btrfs_bio_alloc(bdev, first_byte);
375 bio->bi_opf = REQ_OP_WRITE | write_flags;
376 bio->bi_private = cb;
377 bio->bi_end_io = end_compressed_bio_write;
378 bio_add_page(bio, page, PAGE_SIZE, 0);
379 }
380 if (bytes_left < PAGE_SIZE) {
381 btrfs_info(fs_info,
382 "bytes left %lu compress len %lu nr %lu",
383 bytes_left, cb->compressed_len, cb->nr_pages);
384 }
385 bytes_left -= PAGE_SIZE;
386 first_byte += PAGE_SIZE;
387 cond_resched();
388 }
389
390 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
391 BUG_ON(ret); /* -ENOMEM */
392
393 if (!skip_sum) {
394 ret = btrfs_csum_one_bio(inode, bio, start, 1);
395 BUG_ON(ret); /* -ENOMEM */
396 }
397
398 ret = btrfs_map_bio(fs_info, bio, 0, 1);
399 if (ret) {
400 bio->bi_status = ret;
401 bio_endio(bio);
402 }
403
404 return 0;
405}
406
407static u64 bio_end_offset(struct bio *bio)
408{
409 struct bio_vec *last = bio_last_bvec_all(bio);
410
411 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
412}
413
414static noinline int add_ra_bio_pages(struct inode *inode,
415 u64 compressed_end,
416 struct compressed_bio *cb)
417{
418 unsigned long end_index;
419 unsigned long pg_index;
420 u64 last_offset;
421 u64 isize = i_size_read(inode);
422 int ret;
423 struct page *page;
424 unsigned long nr_pages = 0;
425 struct extent_map *em;
426 struct address_space *mapping = inode->i_mapping;
427 struct extent_map_tree *em_tree;
428 struct extent_io_tree *tree;
429 u64 end;
430 int misses = 0;
431
432 last_offset = bio_end_offset(cb->orig_bio);
433 em_tree = &BTRFS_I(inode)->extent_tree;
434 tree = &BTRFS_I(inode)->io_tree;
435
436 if (isize == 0)
437 return 0;
438
439 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
440
441 while (last_offset < compressed_end) {
442 pg_index = last_offset >> PAGE_SHIFT;
443
444 if (pg_index > end_index)
445 break;
446
447 rcu_read_lock();
448 page = radix_tree_lookup(&mapping->i_pages, pg_index);
449 rcu_read_unlock();
450 if (page && !radix_tree_exceptional_entry(page)) {
451 misses++;
452 if (misses > 4)
453 break;
454 goto next;
455 }
456
457 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
458 ~__GFP_FS));
459 if (!page)
460 break;
461
462 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
463 put_page(page);
464 goto next;
465 }
466
467 end = last_offset + PAGE_SIZE - 1;
468 /*
469 * at this point, we have a locked page in the page cache
470 * for these bytes in the file. But, we have to make
471 * sure they map to this compressed extent on disk.
472 */
473 set_page_extent_mapped(page);
474 lock_extent(tree, last_offset, end);
475 read_lock(&em_tree->lock);
476 em = lookup_extent_mapping(em_tree, last_offset,
477 PAGE_SIZE);
478 read_unlock(&em_tree->lock);
479
480 if (!em || last_offset < em->start ||
481 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
482 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
483 free_extent_map(em);
484 unlock_extent(tree, last_offset, end);
485 unlock_page(page);
486 put_page(page);
487 break;
488 }
489 free_extent_map(em);
490
491 if (page->index == end_index) {
492 char *userpage;
493 size_t zero_offset = isize & (PAGE_SIZE - 1);
494
495 if (zero_offset) {
496 int zeros;
497 zeros = PAGE_SIZE - zero_offset;
498 userpage = kmap_atomic(page);
499 memset(userpage + zero_offset, 0, zeros);
500 flush_dcache_page(page);
501 kunmap_atomic(userpage);
502 }
503 }
504
505 ret = bio_add_page(cb->orig_bio, page,
506 PAGE_SIZE, 0);
507
508 if (ret == PAGE_SIZE) {
509 nr_pages++;
510 put_page(page);
511 } else {
512 unlock_extent(tree, last_offset, end);
513 unlock_page(page);
514 put_page(page);
515 break;
516 }
517next:
518 last_offset += PAGE_SIZE;
519 }
520 return 0;
521}
522
523/*
524 * for a compressed read, the bio we get passed has all the inode pages
525 * in it. We don't actually do IO on those pages but allocate new ones
526 * to hold the compressed pages on disk.
527 *
528 * bio->bi_iter.bi_sector points to the compressed extent on disk
529 * bio->bi_io_vec points to all of the inode pages
530 *
531 * After the compressed pages are read, we copy the bytes into the
532 * bio we were passed and then call the bio end_io calls
533 */
534blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
535 int mirror_num, unsigned long bio_flags)
536{
537 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
538 struct extent_io_tree *tree;
539 struct extent_map_tree *em_tree;
540 struct compressed_bio *cb;
541 unsigned long compressed_len;
542 unsigned long nr_pages;
543 unsigned long pg_index;
544 struct page *page;
545 struct block_device *bdev;
546 struct bio *comp_bio;
547 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
548 u64 em_len;
549 u64 em_start;
550 struct extent_map *em;
551 blk_status_t ret = BLK_STS_RESOURCE;
552 int faili = 0;
553 u32 *sums;
554
555 tree = &BTRFS_I(inode)->io_tree;
556 em_tree = &BTRFS_I(inode)->extent_tree;
557
558 /* we need the actual starting offset of this extent in the file */
559 read_lock(&em_tree->lock);
560 em = lookup_extent_mapping(em_tree,
561 page_offset(bio_first_page_all(bio)),
562 PAGE_SIZE);
563 read_unlock(&em_tree->lock);
564 if (!em)
565 return BLK_STS_IOERR;
566
567 compressed_len = em->block_len;
568 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
569 if (!cb)
570 goto out;
571
572 refcount_set(&cb->pending_bios, 0);
573 cb->errors = 0;
574 cb->inode = inode;
575 cb->mirror_num = mirror_num;
576 sums = &cb->sums;
577
578 cb->start = em->orig_start;
579 em_len = em->len;
580 em_start = em->start;
581
582 free_extent_map(em);
583 em = NULL;
584
585 cb->len = bio->bi_iter.bi_size;
586 cb->compressed_len = compressed_len;
587 cb->compress_type = extent_compress_type(bio_flags);
588 cb->orig_bio = bio;
589
590 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
591 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
592 GFP_NOFS);
593 if (!cb->compressed_pages)
594 goto fail1;
595
596 bdev = fs_info->fs_devices->latest_bdev;
597
598 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
599 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
600 __GFP_HIGHMEM);
601 if (!cb->compressed_pages[pg_index]) {
602 faili = pg_index - 1;
603 ret = BLK_STS_RESOURCE;
604 goto fail2;
605 }
606 }
607 faili = nr_pages - 1;
608 cb->nr_pages = nr_pages;
609
610 add_ra_bio_pages(inode, em_start + em_len, cb);
611
612 /* include any pages we added in add_ra-bio_pages */
613 cb->len = bio->bi_iter.bi_size;
614
615 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
616 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
617 comp_bio->bi_private = cb;
618 comp_bio->bi_end_io = end_compressed_bio_read;
619 refcount_set(&cb->pending_bios, 1);
620
621 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
622 int submit = 0;
623
624 page = cb->compressed_pages[pg_index];
625 page->mapping = inode->i_mapping;
626 page->index = em_start >> PAGE_SHIFT;
627
628 if (comp_bio->bi_iter.bi_size)
629 submit = tree->ops->merge_bio_hook(page, 0,
630 PAGE_SIZE,
631 comp_bio, 0);
632
633 page->mapping = NULL;
634 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
635 PAGE_SIZE) {
636 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
637 BTRFS_WQ_ENDIO_DATA);
638 BUG_ON(ret); /* -ENOMEM */
639
640 /*
641 * inc the count before we submit the bio so
642 * we know the end IO handler won't happen before
643 * we inc the count. Otherwise, the cb might get
644 * freed before we're done setting it up
645 */
646 refcount_inc(&cb->pending_bios);
647
648 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
649 ret = btrfs_lookup_bio_sums(inode, comp_bio,
650 sums);
651 BUG_ON(ret); /* -ENOMEM */
652 }
653 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
654 fs_info->sectorsize);
655
656 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
657 if (ret) {
658 comp_bio->bi_status = ret;
659 bio_endio(comp_bio);
660 }
661
662 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
663 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
664 comp_bio->bi_private = cb;
665 comp_bio->bi_end_io = end_compressed_bio_read;
666
667 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
668 }
669 cur_disk_byte += PAGE_SIZE;
670 }
671
672 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
673 BUG_ON(ret); /* -ENOMEM */
674
675 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
676 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
677 BUG_ON(ret); /* -ENOMEM */
678 }
679
680 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
681 if (ret) {
682 comp_bio->bi_status = ret;
683 bio_endio(comp_bio);
684 }
685
686 return 0;
687
688fail2:
689 while (faili >= 0) {
690 __free_page(cb->compressed_pages[faili]);
691 faili--;
692 }
693
694 kfree(cb->compressed_pages);
695fail1:
696 kfree(cb);
697out:
698 free_extent_map(em);
699 return ret;
700}
701
702/*
703 * Heuristic uses systematic sampling to collect data from the input data
704 * range, the logic can be tuned by the following constants:
705 *
706 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
707 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
708 */
709#define SAMPLING_READ_SIZE (16)
710#define SAMPLING_INTERVAL (256)
711
712/*
713 * For statistical analysis of the input data we consider bytes that form a
714 * Galois Field of 256 objects. Each object has an attribute count, ie. how
715 * many times the object appeared in the sample.
716 */
717#define BUCKET_SIZE (256)
718
719/*
720 * The size of the sample is based on a statistical sampling rule of thumb.
721 * The common way is to perform sampling tests as long as the number of
722 * elements in each cell is at least 5.
723 *
724 * Instead of 5, we choose 32 to obtain more accurate results.
725 * If the data contain the maximum number of symbols, which is 256, we obtain a
726 * sample size bound by 8192.
727 *
728 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
729 * from up to 512 locations.
730 */
731#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
732 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
733
734struct bucket_item {
735 u32 count;
736};
737
738struct heuristic_ws {
739 /* Partial copy of input data */
740 u8 *sample;
741 u32 sample_size;
742 /* Buckets store counters for each byte value */
743 struct bucket_item *bucket;
744 /* Sorting buffer */
745 struct bucket_item *bucket_b;
746 struct list_head list;
747};
748
749static void free_heuristic_ws(struct list_head *ws)
750{
751 struct heuristic_ws *workspace;
752
753 workspace = list_entry(ws, struct heuristic_ws, list);
754
755 kvfree(workspace->sample);
756 kfree(workspace->bucket);
757 kfree(workspace->bucket_b);
758 kfree(workspace);
759}
760
761static struct list_head *alloc_heuristic_ws(void)
762{
763 struct heuristic_ws *ws;
764
765 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
766 if (!ws)
767 return ERR_PTR(-ENOMEM);
768
769 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
770 if (!ws->sample)
771 goto fail;
772
773 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
774 if (!ws->bucket)
775 goto fail;
776
777 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
778 if (!ws->bucket_b)
779 goto fail;
780
781 INIT_LIST_HEAD(&ws->list);
782 return &ws->list;
783fail:
784 free_heuristic_ws(&ws->list);
785 return ERR_PTR(-ENOMEM);
786}
787
788struct workspaces_list {
789 struct list_head idle_ws;
790 spinlock_t ws_lock;
791 /* Number of free workspaces */
792 int free_ws;
793 /* Total number of allocated workspaces */
794 atomic_t total_ws;
795 /* Waiters for a free workspace */
796 wait_queue_head_t ws_wait;
797};
798
799static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
800
801static struct workspaces_list btrfs_heuristic_ws;
802
803static const struct btrfs_compress_op * const btrfs_compress_op[] = {
804 &btrfs_zlib_compress,
805 &btrfs_lzo_compress,
806 &btrfs_zstd_compress,
807};
808
809void __init btrfs_init_compress(void)
810{
811 struct list_head *workspace;
812 int i;
813
814 INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
815 spin_lock_init(&btrfs_heuristic_ws.ws_lock);
816 atomic_set(&btrfs_heuristic_ws.total_ws, 0);
817 init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
818
819 workspace = alloc_heuristic_ws();
820 if (IS_ERR(workspace)) {
821 pr_warn(
822 "BTRFS: cannot preallocate heuristic workspace, will try later\n");
823 } else {
824 atomic_set(&btrfs_heuristic_ws.total_ws, 1);
825 btrfs_heuristic_ws.free_ws = 1;
826 list_add(workspace, &btrfs_heuristic_ws.idle_ws);
827 }
828
829 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
830 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
831 spin_lock_init(&btrfs_comp_ws[i].ws_lock);
832 atomic_set(&btrfs_comp_ws[i].total_ws, 0);
833 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
834
835 /*
836 * Preallocate one workspace for each compression type so
837 * we can guarantee forward progress in the worst case
838 */
839 workspace = btrfs_compress_op[i]->alloc_workspace();
840 if (IS_ERR(workspace)) {
841 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
842 } else {
843 atomic_set(&btrfs_comp_ws[i].total_ws, 1);
844 btrfs_comp_ws[i].free_ws = 1;
845 list_add(workspace, &btrfs_comp_ws[i].idle_ws);
846 }
847 }
848}
849
850/*
851 * This finds an available workspace or allocates a new one.
852 * If it's not possible to allocate a new one, waits until there's one.
853 * Preallocation makes a forward progress guarantees and we do not return
854 * errors.
855 */
856static struct list_head *__find_workspace(int type, bool heuristic)
857{
858 struct list_head *workspace;
859 int cpus = num_online_cpus();
860 int idx = type - 1;
861 unsigned nofs_flag;
862 struct list_head *idle_ws;
863 spinlock_t *ws_lock;
864 atomic_t *total_ws;
865 wait_queue_head_t *ws_wait;
866 int *free_ws;
867
868 if (heuristic) {
869 idle_ws = &btrfs_heuristic_ws.idle_ws;
870 ws_lock = &btrfs_heuristic_ws.ws_lock;
871 total_ws = &btrfs_heuristic_ws.total_ws;
872 ws_wait = &btrfs_heuristic_ws.ws_wait;
873 free_ws = &btrfs_heuristic_ws.free_ws;
874 } else {
875 idle_ws = &btrfs_comp_ws[idx].idle_ws;
876 ws_lock = &btrfs_comp_ws[idx].ws_lock;
877 total_ws = &btrfs_comp_ws[idx].total_ws;
878 ws_wait = &btrfs_comp_ws[idx].ws_wait;
879 free_ws = &btrfs_comp_ws[idx].free_ws;
880 }
881
882again:
883 spin_lock(ws_lock);
884 if (!list_empty(idle_ws)) {
885 workspace = idle_ws->next;
886 list_del(workspace);
887 (*free_ws)--;
888 spin_unlock(ws_lock);
889 return workspace;
890
891 }
892 if (atomic_read(total_ws) > cpus) {
893 DEFINE_WAIT(wait);
894
895 spin_unlock(ws_lock);
896 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
897 if (atomic_read(total_ws) > cpus && !*free_ws)
898 schedule();
899 finish_wait(ws_wait, &wait);
900 goto again;
901 }
902 atomic_inc(total_ws);
903 spin_unlock(ws_lock);
904
905 /*
906 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
907 * to turn it off here because we might get called from the restricted
908 * context of btrfs_compress_bio/btrfs_compress_pages
909 */
910 nofs_flag = memalloc_nofs_save();
911 if (heuristic)
912 workspace = alloc_heuristic_ws();
913 else
914 workspace = btrfs_compress_op[idx]->alloc_workspace();
915 memalloc_nofs_restore(nofs_flag);
916
917 if (IS_ERR(workspace)) {
918 atomic_dec(total_ws);
919 wake_up(ws_wait);
920
921 /*
922 * Do not return the error but go back to waiting. There's a
923 * workspace preallocated for each type and the compression
924 * time is bounded so we get to a workspace eventually. This
925 * makes our caller's life easier.
926 *
927 * To prevent silent and low-probability deadlocks (when the
928 * initial preallocation fails), check if there are any
929 * workspaces at all.
930 */
931 if (atomic_read(total_ws) == 0) {
932 static DEFINE_RATELIMIT_STATE(_rs,
933 /* once per minute */ 60 * HZ,
934 /* no burst */ 1);
935
936 if (__ratelimit(&_rs)) {
937 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
938 }
939 }
940 goto again;
941 }
942 return workspace;
943}
944
945static struct list_head *find_workspace(int type)
946{
947 return __find_workspace(type, false);
948}
949
950/*
951 * put a workspace struct back on the list or free it if we have enough
952 * idle ones sitting around
953 */
954static void __free_workspace(int type, struct list_head *workspace,
955 bool heuristic)
956{
957 int idx = type - 1;
958 struct list_head *idle_ws;
959 spinlock_t *ws_lock;
960 atomic_t *total_ws;
961 wait_queue_head_t *ws_wait;
962 int *free_ws;
963
964 if (heuristic) {
965 idle_ws = &btrfs_heuristic_ws.idle_ws;
966 ws_lock = &btrfs_heuristic_ws.ws_lock;
967 total_ws = &btrfs_heuristic_ws.total_ws;
968 ws_wait = &btrfs_heuristic_ws.ws_wait;
969 free_ws = &btrfs_heuristic_ws.free_ws;
970 } else {
971 idle_ws = &btrfs_comp_ws[idx].idle_ws;
972 ws_lock = &btrfs_comp_ws[idx].ws_lock;
973 total_ws = &btrfs_comp_ws[idx].total_ws;
974 ws_wait = &btrfs_comp_ws[idx].ws_wait;
975 free_ws = &btrfs_comp_ws[idx].free_ws;
976 }
977
978 spin_lock(ws_lock);
979 if (*free_ws <= num_online_cpus()) {
980 list_add(workspace, idle_ws);
981 (*free_ws)++;
982 spin_unlock(ws_lock);
983 goto wake;
984 }
985 spin_unlock(ws_lock);
986
987 if (heuristic)
988 free_heuristic_ws(workspace);
989 else
990 btrfs_compress_op[idx]->free_workspace(workspace);
991 atomic_dec(total_ws);
992wake:
993 /*
994 * Make sure counter is updated before we wake up waiters.
995 */
996 smp_mb();
997 if (waitqueue_active(ws_wait))
998 wake_up(ws_wait);
999}
1000
1001static void free_workspace(int type, struct list_head *ws)
1002{
1003 return __free_workspace(type, ws, false);
1004}
1005
1006/*
1007 * cleanup function for module exit
1008 */
1009static void free_workspaces(void)
1010{
1011 struct list_head *workspace;
1012 int i;
1013
1014 while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
1015 workspace = btrfs_heuristic_ws.idle_ws.next;
1016 list_del(workspace);
1017 free_heuristic_ws(workspace);
1018 atomic_dec(&btrfs_heuristic_ws.total_ws);
1019 }
1020
1021 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
1022 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
1023 workspace = btrfs_comp_ws[i].idle_ws.next;
1024 list_del(workspace);
1025 btrfs_compress_op[i]->free_workspace(workspace);
1026 atomic_dec(&btrfs_comp_ws[i].total_ws);
1027 }
1028 }
1029}
1030
1031/*
1032 * Given an address space and start and length, compress the bytes into @pages
1033 * that are allocated on demand.
1034 *
1035 * @type_level is encoded algorithm and level, where level 0 means whatever
1036 * default the algorithm chooses and is opaque here;
1037 * - compression algo are 0-3
1038 * - the level are bits 4-7
1039 *
1040 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1041 * and returns number of actually allocated pages
1042 *
1043 * @total_in is used to return the number of bytes actually read. It
1044 * may be smaller than the input length if we had to exit early because we
1045 * ran out of room in the pages array or because we cross the
1046 * max_out threshold.
1047 *
1048 * @total_out is an in/out parameter, must be set to the input length and will
1049 * be also used to return the total number of compressed bytes
1050 *
1051 * @max_out tells us the max number of bytes that we're allowed to
1052 * stuff into pages
1053 */
1054int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1055 u64 start, struct page **pages,
1056 unsigned long *out_pages,
1057 unsigned long *total_in,
1058 unsigned long *total_out)
1059{
1060 struct list_head *workspace;
1061 int ret;
1062 int type = type_level & 0xF;
1063
1064 workspace = find_workspace(type);
1065
1066 btrfs_compress_op[type - 1]->set_level(workspace, type_level);
1067 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
1068 start, pages,
1069 out_pages,
1070 total_in, total_out);
1071 free_workspace(type, workspace);
1072 return ret;
1073}
1074
1075/*
1076 * pages_in is an array of pages with compressed data.
1077 *
1078 * disk_start is the starting logical offset of this array in the file
1079 *
1080 * orig_bio contains the pages from the file that we want to decompress into
1081 *
1082 * srclen is the number of bytes in pages_in
1083 *
1084 * The basic idea is that we have a bio that was created by readpages.
1085 * The pages in the bio are for the uncompressed data, and they may not
1086 * be contiguous. They all correspond to the range of bytes covered by
1087 * the compressed extent.
1088 */
1089static int btrfs_decompress_bio(struct compressed_bio *cb)
1090{
1091 struct list_head *workspace;
1092 int ret;
1093 int type = cb->compress_type;
1094
1095 workspace = find_workspace(type);
1096 ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
1097 free_workspace(type, workspace);
1098
1099 return ret;
1100}
1101
1102/*
1103 * a less complex decompression routine. Our compressed data fits in a
1104 * single page, and we want to read a single page out of it.
1105 * start_byte tells us the offset into the compressed data we're interested in
1106 */
1107int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1108 unsigned long start_byte, size_t srclen, size_t destlen)
1109{
1110 struct list_head *workspace;
1111 int ret;
1112
1113 workspace = find_workspace(type);
1114
1115 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
1116 dest_page, start_byte,
1117 srclen, destlen);
1118
1119 free_workspace(type, workspace);
1120 return ret;
1121}
1122
1123void __cold btrfs_exit_compress(void)
1124{
1125 free_workspaces();
1126}
1127
1128/*
1129 * Copy uncompressed data from working buffer to pages.
1130 *
1131 * buf_start is the byte offset we're of the start of our workspace buffer.
1132 *
1133 * total_out is the last byte of the buffer
1134 */
1135int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1136 unsigned long total_out, u64 disk_start,
1137 struct bio *bio)
1138{
1139 unsigned long buf_offset;
1140 unsigned long current_buf_start;
1141 unsigned long start_byte;
1142 unsigned long prev_start_byte;
1143 unsigned long working_bytes = total_out - buf_start;
1144 unsigned long bytes;
1145 char *kaddr;
1146 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1147
1148 /*
1149 * start byte is the first byte of the page we're currently
1150 * copying into relative to the start of the compressed data.
1151 */
1152 start_byte = page_offset(bvec.bv_page) - disk_start;
1153
1154 /* we haven't yet hit data corresponding to this page */
1155 if (total_out <= start_byte)
1156 return 1;
1157
1158 /*
1159 * the start of the data we care about is offset into
1160 * the middle of our working buffer
1161 */
1162 if (total_out > start_byte && buf_start < start_byte) {
1163 buf_offset = start_byte - buf_start;
1164 working_bytes -= buf_offset;
1165 } else {
1166 buf_offset = 0;
1167 }
1168 current_buf_start = buf_start;
1169
1170 /* copy bytes from the working buffer into the pages */
1171 while (working_bytes > 0) {
1172 bytes = min_t(unsigned long, bvec.bv_len,
1173 PAGE_SIZE - buf_offset);
1174 bytes = min(bytes, working_bytes);
1175
1176 kaddr = kmap_atomic(bvec.bv_page);
1177 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1178 kunmap_atomic(kaddr);
1179 flush_dcache_page(bvec.bv_page);
1180
1181 buf_offset += bytes;
1182 working_bytes -= bytes;
1183 current_buf_start += bytes;
1184
1185 /* check if we need to pick another page */
1186 bio_advance(bio, bytes);
1187 if (!bio->bi_iter.bi_size)
1188 return 0;
1189 bvec = bio_iter_iovec(bio, bio->bi_iter);
1190 prev_start_byte = start_byte;
1191 start_byte = page_offset(bvec.bv_page) - disk_start;
1192
1193 /*
1194 * We need to make sure we're only adjusting
1195 * our offset into compression working buffer when
1196 * we're switching pages. Otherwise we can incorrectly
1197 * keep copying when we were actually done.
1198 */
1199 if (start_byte != prev_start_byte) {
1200 /*
1201 * make sure our new page is covered by this
1202 * working buffer
1203 */
1204 if (total_out <= start_byte)
1205 return 1;
1206
1207 /*
1208 * the next page in the biovec might not be adjacent
1209 * to the last page, but it might still be found
1210 * inside this working buffer. bump our offset pointer
1211 */
1212 if (total_out > start_byte &&
1213 current_buf_start < start_byte) {
1214 buf_offset = start_byte - buf_start;
1215 working_bytes = total_out - start_byte;
1216 current_buf_start = buf_start + buf_offset;
1217 }
1218 }
1219 }
1220
1221 return 1;
1222}
1223
1224/*
1225 * Shannon Entropy calculation
1226 *
1227 * Pure byte distribution analysis fails to determine compressiability of data.
1228 * Try calculating entropy to estimate the average minimum number of bits
1229 * needed to encode the sampled data.
1230 *
1231 * For convenience, return the percentage of needed bits, instead of amount of
1232 * bits directly.
1233 *
1234 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1235 * and can be compressible with high probability
1236 *
1237 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1238 *
1239 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1240 */
1241#define ENTROPY_LVL_ACEPTABLE (65)
1242#define ENTROPY_LVL_HIGH (80)
1243
1244/*
1245 * For increasead precision in shannon_entropy calculation,
1246 * let's do pow(n, M) to save more digits after comma:
1247 *
1248 * - maximum int bit length is 64
1249 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1250 * - 13 * 4 = 52 < 64 -> M = 4
1251 *
1252 * So use pow(n, 4).
1253 */
1254static inline u32 ilog2_w(u64 n)
1255{
1256 return ilog2(n * n * n * n);
1257}
1258
1259static u32 shannon_entropy(struct heuristic_ws *ws)
1260{
1261 const u32 entropy_max = 8 * ilog2_w(2);
1262 u32 entropy_sum = 0;
1263 u32 p, p_base, sz_base;
1264 u32 i;
1265
1266 sz_base = ilog2_w(ws->sample_size);
1267 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1268 p = ws->bucket[i].count;
1269 p_base = ilog2_w(p);
1270 entropy_sum += p * (sz_base - p_base);
1271 }
1272
1273 entropy_sum /= ws->sample_size;
1274 return entropy_sum * 100 / entropy_max;
1275}
1276
1277#define RADIX_BASE 4U
1278#define COUNTERS_SIZE (1U << RADIX_BASE)
1279
1280static u8 get4bits(u64 num, int shift) {
1281 u8 low4bits;
1282
1283 num >>= shift;
1284 /* Reverse order */
1285 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1286 return low4bits;
1287}
1288
1289/*
1290 * Use 4 bits as radix base
1291 * Use 16 u32 counters for calculating new possition in buf array
1292 *
1293 * @array - array that will be sorted
1294 * @array_buf - buffer array to store sorting results
1295 * must be equal in size to @array
1296 * @num - array size
1297 */
1298static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1299 int num)
1300{
1301 u64 max_num;
1302 u64 buf_num;
1303 u32 counters[COUNTERS_SIZE];
1304 u32 new_addr;
1305 u32 addr;
1306 int bitlen;
1307 int shift;
1308 int i;
1309
1310 /*
1311 * Try avoid useless loop iterations for small numbers stored in big
1312 * counters. Example: 48 33 4 ... in 64bit array
1313 */
1314 max_num = array[0].count;
1315 for (i = 1; i < num; i++) {
1316 buf_num = array[i].count;
1317 if (buf_num > max_num)
1318 max_num = buf_num;
1319 }
1320
1321 buf_num = ilog2(max_num);
1322 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1323
1324 shift = 0;
1325 while (shift < bitlen) {
1326 memset(counters, 0, sizeof(counters));
1327
1328 for (i = 0; i < num; i++) {
1329 buf_num = array[i].count;
1330 addr = get4bits(buf_num, shift);
1331 counters[addr]++;
1332 }
1333
1334 for (i = 1; i < COUNTERS_SIZE; i++)
1335 counters[i] += counters[i - 1];
1336
1337 for (i = num - 1; i >= 0; i--) {
1338 buf_num = array[i].count;
1339 addr = get4bits(buf_num, shift);
1340 counters[addr]--;
1341 new_addr = counters[addr];
1342 array_buf[new_addr] = array[i];
1343 }
1344
1345 shift += RADIX_BASE;
1346
1347 /*
1348 * Normal radix expects to move data from a temporary array, to
1349 * the main one. But that requires some CPU time. Avoid that
1350 * by doing another sort iteration to original array instead of
1351 * memcpy()
1352 */
1353 memset(counters, 0, sizeof(counters));
1354
1355 for (i = 0; i < num; i ++) {
1356 buf_num = array_buf[i].count;
1357 addr = get4bits(buf_num, shift);
1358 counters[addr]++;
1359 }
1360
1361 for (i = 1; i < COUNTERS_SIZE; i++)
1362 counters[i] += counters[i - 1];
1363
1364 for (i = num - 1; i >= 0; i--) {
1365 buf_num = array_buf[i].count;
1366 addr = get4bits(buf_num, shift);
1367 counters[addr]--;
1368 new_addr = counters[addr];
1369 array[new_addr] = array_buf[i];
1370 }
1371
1372 shift += RADIX_BASE;
1373 }
1374}
1375
1376/*
1377 * Size of the core byte set - how many bytes cover 90% of the sample
1378 *
1379 * There are several types of structured binary data that use nearly all byte
1380 * values. The distribution can be uniform and counts in all buckets will be
1381 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1382 *
1383 * Other possibility is normal (Gaussian) distribution, where the data could
1384 * be potentially compressible, but we have to take a few more steps to decide
1385 * how much.
1386 *
1387 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1388 * compression algo can easy fix that
1389 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1390 * probability is not compressible
1391 */
1392#define BYTE_CORE_SET_LOW (64)
1393#define BYTE_CORE_SET_HIGH (200)
1394
1395static int byte_core_set_size(struct heuristic_ws *ws)
1396{
1397 u32 i;
1398 u32 coreset_sum = 0;
1399 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1400 struct bucket_item *bucket = ws->bucket;
1401
1402 /* Sort in reverse order */
1403 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1404
1405 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1406 coreset_sum += bucket[i].count;
1407
1408 if (coreset_sum > core_set_threshold)
1409 return i;
1410
1411 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1412 coreset_sum += bucket[i].count;
1413 if (coreset_sum > core_set_threshold)
1414 break;
1415 }
1416
1417 return i;
1418}
1419
1420/*
1421 * Count byte values in buckets.
1422 * This heuristic can detect textual data (configs, xml, json, html, etc).
1423 * Because in most text-like data byte set is restricted to limited number of
1424 * possible characters, and that restriction in most cases makes data easy to
1425 * compress.
1426 *
1427 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1428 * less - compressible
1429 * more - need additional analysis
1430 */
1431#define BYTE_SET_THRESHOLD (64)
1432
1433static u32 byte_set_size(const struct heuristic_ws *ws)
1434{
1435 u32 i;
1436 u32 byte_set_size = 0;
1437
1438 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1439 if (ws->bucket[i].count > 0)
1440 byte_set_size++;
1441 }
1442
1443 /*
1444 * Continue collecting count of byte values in buckets. If the byte
1445 * set size is bigger then the threshold, it's pointless to continue,
1446 * the detection technique would fail for this type of data.
1447 */
1448 for (; i < BUCKET_SIZE; i++) {
1449 if (ws->bucket[i].count > 0) {
1450 byte_set_size++;
1451 if (byte_set_size > BYTE_SET_THRESHOLD)
1452 return byte_set_size;
1453 }
1454 }
1455
1456 return byte_set_size;
1457}
1458
1459static bool sample_repeated_patterns(struct heuristic_ws *ws)
1460{
1461 const u32 half_of_sample = ws->sample_size / 2;
1462 const u8 *data = ws->sample;
1463
1464 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1465}
1466
1467static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1468 struct heuristic_ws *ws)
1469{
1470 struct page *page;
1471 u64 index, index_end;
1472 u32 i, curr_sample_pos;
1473 u8 *in_data;
1474
1475 /*
1476 * Compression handles the input data by chunks of 128KiB
1477 * (defined by BTRFS_MAX_UNCOMPRESSED)
1478 *
1479 * We do the same for the heuristic and loop over the whole range.
1480 *
1481 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1482 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1483 */
1484 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1485 end = start + BTRFS_MAX_UNCOMPRESSED;
1486
1487 index = start >> PAGE_SHIFT;
1488 index_end = end >> PAGE_SHIFT;
1489
1490 /* Don't miss unaligned end */
1491 if (!IS_ALIGNED(end, PAGE_SIZE))
1492 index_end++;
1493
1494 curr_sample_pos = 0;
1495 while (index < index_end) {
1496 page = find_get_page(inode->i_mapping, index);
1497 in_data = kmap(page);
1498 /* Handle case where the start is not aligned to PAGE_SIZE */
1499 i = start % PAGE_SIZE;
1500 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1501 /* Don't sample any garbage from the last page */
1502 if (start > end - SAMPLING_READ_SIZE)
1503 break;
1504 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1505 SAMPLING_READ_SIZE);
1506 i += SAMPLING_INTERVAL;
1507 start += SAMPLING_INTERVAL;
1508 curr_sample_pos += SAMPLING_READ_SIZE;
1509 }
1510 kunmap(page);
1511 put_page(page);
1512
1513 index++;
1514 }
1515
1516 ws->sample_size = curr_sample_pos;
1517}
1518
1519/*
1520 * Compression heuristic.
1521 *
1522 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1523 * quickly (compared to direct compression) detect data characteristics
1524 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1525 * data.
1526 *
1527 * The following types of analysis can be performed:
1528 * - detect mostly zero data
1529 * - detect data with low "byte set" size (text, etc)
1530 * - detect data with low/high "core byte" set
1531 *
1532 * Return non-zero if the compression should be done, 0 otherwise.
1533 */
1534int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1535{
1536 struct list_head *ws_list = __find_workspace(0, true);
1537 struct heuristic_ws *ws;
1538 u32 i;
1539 u8 byte;
1540 int ret = 0;
1541
1542 ws = list_entry(ws_list, struct heuristic_ws, list);
1543
1544 heuristic_collect_sample(inode, start, end, ws);
1545
1546 if (sample_repeated_patterns(ws)) {
1547 ret = 1;
1548 goto out;
1549 }
1550
1551 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1552
1553 for (i = 0; i < ws->sample_size; i++) {
1554 byte = ws->sample[i];
1555 ws->bucket[byte].count++;
1556 }
1557
1558 i = byte_set_size(ws);
1559 if (i < BYTE_SET_THRESHOLD) {
1560 ret = 2;
1561 goto out;
1562 }
1563
1564 i = byte_core_set_size(ws);
1565 if (i <= BYTE_CORE_SET_LOW) {
1566 ret = 3;
1567 goto out;
1568 }
1569
1570 if (i >= BYTE_CORE_SET_HIGH) {
1571 ret = 0;
1572 goto out;
1573 }
1574
1575 i = shannon_entropy(ws);
1576 if (i <= ENTROPY_LVL_ACEPTABLE) {
1577 ret = 4;
1578 goto out;
1579 }
1580
1581 /*
1582 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1583 * needed to give green light to compression.
1584 *
1585 * For now just assume that compression at that level is not worth the
1586 * resources because:
1587 *
1588 * 1. it is possible to defrag the data later
1589 *
1590 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1591 * values, every bucket has counter at level ~54. The heuristic would
1592 * be confused. This can happen when data have some internal repeated
1593 * patterns like "abbacbbc...". This can be detected by analyzing
1594 * pairs of bytes, which is too costly.
1595 */
1596 if (i < ENTROPY_LVL_HIGH) {
1597 ret = 5;
1598 goto out;
1599 } else {
1600 ret = 0;
1601 goto out;
1602 }
1603
1604out:
1605 __free_workspace(0, ws_list, true);
1606 return ret;
1607}
1608
1609unsigned int btrfs_compress_str2level(const char *str)
1610{
1611 if (strncmp(str, "zlib", 4) != 0)
1612 return 0;
1613
1614 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1615 if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
1616 return str[5] - '0';
1617
1618 return BTRFS_ZLIB_DEFAULT_LEVEL;
1619}