Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
8#include <linux/buffer_head.h>
9#include <linux/file.h>
10#include <linux/fs.h>
11#include <linux/pagemap.h>
12#include <linux/highmem.h>
13#include <linux/time.h>
14#include <linux/init.h>
15#include <linux/string.h>
16#include <linux/backing-dev.h>
17#include <linux/mpage.h>
18#include <linux/swap.h>
19#include <linux/writeback.h>
20#include <linux/bit_spinlock.h>
21#include <linux/slab.h>
22#include <linux/sched/mm.h>
23#include <linux/log2.h>
24#include "ctree.h"
25#include "disk-io.h"
26#include "transaction.h"
27#include "btrfs_inode.h"
28#include "volumes.h"
29#include "ordered-data.h"
30#include "compression.h"
31#include "extent_io.h"
32#include "extent_map.h"
33
34static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
35
36const char* btrfs_compress_type2str(enum btrfs_compression_type type)
37{
38 switch (type) {
39 case BTRFS_COMPRESS_ZLIB:
40 case BTRFS_COMPRESS_LZO:
41 case BTRFS_COMPRESS_ZSTD:
42 case BTRFS_COMPRESS_NONE:
43 return btrfs_compress_types[type];
44 }
45
46 return NULL;
47}
48
49static int btrfs_decompress_bio(struct compressed_bio *cb);
50
51static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
52 unsigned long disk_size)
53{
54 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
55
56 return sizeof(struct compressed_bio) +
57 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
58}
59
60static int check_compressed_csum(struct btrfs_inode *inode,
61 struct compressed_bio *cb,
62 u64 disk_start)
63{
64 int ret;
65 struct page *page;
66 unsigned long i;
67 char *kaddr;
68 u32 csum;
69 u32 *cb_sum = &cb->sums;
70
71 if (inode->flags & BTRFS_INODE_NODATASUM)
72 return 0;
73
74 for (i = 0; i < cb->nr_pages; i++) {
75 page = cb->compressed_pages[i];
76 csum = ~(u32)0;
77
78 kaddr = kmap_atomic(page);
79 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
80 btrfs_csum_final(csum, (u8 *)&csum);
81 kunmap_atomic(kaddr);
82
83 if (csum != *cb_sum) {
84 btrfs_print_data_csum_error(inode, disk_start, csum,
85 *cb_sum, cb->mirror_num);
86 ret = -EIO;
87 goto fail;
88 }
89 cb_sum++;
90
91 }
92 ret = 0;
93fail:
94 return ret;
95}
96
97/* when we finish reading compressed pages from the disk, we
98 * decompress them and then run the bio end_io routines on the
99 * decompressed pages (in the inode address space).
100 *
101 * This allows the checksumming and other IO error handling routines
102 * to work normally
103 *
104 * The compressed pages are freed here, and it must be run
105 * in process context
106 */
107static void end_compressed_bio_read(struct bio *bio)
108{
109 struct compressed_bio *cb = bio->bi_private;
110 struct inode *inode;
111 struct page *page;
112 unsigned long index;
113 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
114 int ret = 0;
115
116 if (bio->bi_status)
117 cb->errors = 1;
118
119 /* if there are more bios still pending for this compressed
120 * extent, just exit
121 */
122 if (!refcount_dec_and_test(&cb->pending_bios))
123 goto out;
124
125 /*
126 * Record the correct mirror_num in cb->orig_bio so that
127 * read-repair can work properly.
128 */
129 ASSERT(btrfs_io_bio(cb->orig_bio));
130 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
131 cb->mirror_num = mirror;
132
133 /*
134 * Some IO in this cb have failed, just skip checksum as there
135 * is no way it could be correct.
136 */
137 if (cb->errors == 1)
138 goto csum_failed;
139
140 inode = cb->inode;
141 ret = check_compressed_csum(BTRFS_I(inode), cb,
142 (u64)bio->bi_iter.bi_sector << 9);
143 if (ret)
144 goto csum_failed;
145
146 /* ok, we're the last bio for this extent, lets start
147 * the decompression.
148 */
149 ret = btrfs_decompress_bio(cb);
150
151csum_failed:
152 if (ret)
153 cb->errors = 1;
154
155 /* release the compressed pages */
156 index = 0;
157 for (index = 0; index < cb->nr_pages; index++) {
158 page = cb->compressed_pages[index];
159 page->mapping = NULL;
160 put_page(page);
161 }
162
163 /* do io completion on the original bio */
164 if (cb->errors) {
165 bio_io_error(cb->orig_bio);
166 } else {
167 int i;
168 struct bio_vec *bvec;
169
170 /*
171 * we have verified the checksum already, set page
172 * checked so the end_io handlers know about it
173 */
174 ASSERT(!bio_flagged(bio, BIO_CLONED));
175 bio_for_each_segment_all(bvec, cb->orig_bio, i)
176 SetPageChecked(bvec->bv_page);
177
178 bio_endio(cb->orig_bio);
179 }
180
181 /* finally free the cb struct */
182 kfree(cb->compressed_pages);
183 kfree(cb);
184out:
185 bio_put(bio);
186}
187
188/*
189 * Clear the writeback bits on all of the file
190 * pages for a compressed write
191 */
192static noinline void end_compressed_writeback(struct inode *inode,
193 const struct compressed_bio *cb)
194{
195 unsigned long index = cb->start >> PAGE_SHIFT;
196 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
197 struct page *pages[16];
198 unsigned long nr_pages = end_index - index + 1;
199 int i;
200 int ret;
201
202 if (cb->errors)
203 mapping_set_error(inode->i_mapping, -EIO);
204
205 while (nr_pages > 0) {
206 ret = find_get_pages_contig(inode->i_mapping, index,
207 min_t(unsigned long,
208 nr_pages, ARRAY_SIZE(pages)), pages);
209 if (ret == 0) {
210 nr_pages -= 1;
211 index += 1;
212 continue;
213 }
214 for (i = 0; i < ret; i++) {
215 if (cb->errors)
216 SetPageError(pages[i]);
217 end_page_writeback(pages[i]);
218 put_page(pages[i]);
219 }
220 nr_pages -= ret;
221 index += ret;
222 }
223 /* the inode may be gone now */
224}
225
226/*
227 * do the cleanup once all the compressed pages hit the disk.
228 * This will clear writeback on the file pages and free the compressed
229 * pages.
230 *
231 * This also calls the writeback end hooks for the file pages so that
232 * metadata and checksums can be updated in the file.
233 */
234static void end_compressed_bio_write(struct bio *bio)
235{
236 struct extent_io_tree *tree;
237 struct compressed_bio *cb = bio->bi_private;
238 struct inode *inode;
239 struct page *page;
240 unsigned long index;
241
242 if (bio->bi_status)
243 cb->errors = 1;
244
245 /* if there are more bios still pending for this compressed
246 * extent, just exit
247 */
248 if (!refcount_dec_and_test(&cb->pending_bios))
249 goto out;
250
251 /* ok, we're the last bio for this extent, step one is to
252 * call back into the FS and do all the end_io operations
253 */
254 inode = cb->inode;
255 tree = &BTRFS_I(inode)->io_tree;
256 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
257 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
258 cb->start,
259 cb->start + cb->len - 1,
260 NULL,
261 bio->bi_status ?
262 BLK_STS_OK : BLK_STS_NOTSUPP);
263 cb->compressed_pages[0]->mapping = NULL;
264
265 end_compressed_writeback(inode, cb);
266 /* note, our inode could be gone now */
267
268 /*
269 * release the compressed pages, these came from alloc_page and
270 * are not attached to the inode at all
271 */
272 index = 0;
273 for (index = 0; index < cb->nr_pages; index++) {
274 page = cb->compressed_pages[index];
275 page->mapping = NULL;
276 put_page(page);
277 }
278
279 /* finally free the cb struct */
280 kfree(cb->compressed_pages);
281 kfree(cb);
282out:
283 bio_put(bio);
284}
285
286/*
287 * worker function to build and submit bios for previously compressed pages.
288 * The corresponding pages in the inode should be marked for writeback
289 * and the compressed pages should have a reference on them for dropping
290 * when the IO is complete.
291 *
292 * This also checksums the file bytes and gets things ready for
293 * the end io hooks.
294 */
295blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
296 unsigned long len, u64 disk_start,
297 unsigned long compressed_len,
298 struct page **compressed_pages,
299 unsigned long nr_pages,
300 unsigned int write_flags)
301{
302 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
303 struct bio *bio = NULL;
304 struct compressed_bio *cb;
305 unsigned long bytes_left;
306 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
307 int pg_index = 0;
308 struct page *page;
309 u64 first_byte = disk_start;
310 struct block_device *bdev;
311 blk_status_t ret;
312 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
313
314 WARN_ON(start & ((u64)PAGE_SIZE - 1));
315 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
316 if (!cb)
317 return BLK_STS_RESOURCE;
318 refcount_set(&cb->pending_bios, 0);
319 cb->errors = 0;
320 cb->inode = inode;
321 cb->start = start;
322 cb->len = len;
323 cb->mirror_num = 0;
324 cb->compressed_pages = compressed_pages;
325 cb->compressed_len = compressed_len;
326 cb->orig_bio = NULL;
327 cb->nr_pages = nr_pages;
328
329 bdev = fs_info->fs_devices->latest_bdev;
330
331 bio = btrfs_bio_alloc(bdev, first_byte);
332 bio->bi_opf = REQ_OP_WRITE | write_flags;
333 bio->bi_private = cb;
334 bio->bi_end_io = end_compressed_bio_write;
335 refcount_set(&cb->pending_bios, 1);
336
337 /* create and submit bios for the compressed pages */
338 bytes_left = compressed_len;
339 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
340 int submit = 0;
341
342 page = compressed_pages[pg_index];
343 page->mapping = inode->i_mapping;
344 if (bio->bi_iter.bi_size)
345 submit = io_tree->ops->merge_bio_hook(page, 0,
346 PAGE_SIZE,
347 bio, 0);
348
349 page->mapping = NULL;
350 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
351 PAGE_SIZE) {
352 /*
353 * inc the count before we submit the bio so
354 * we know the end IO handler won't happen before
355 * we inc the count. Otherwise, the cb might get
356 * freed before we're done setting it up
357 */
358 refcount_inc(&cb->pending_bios);
359 ret = btrfs_bio_wq_end_io(fs_info, bio,
360 BTRFS_WQ_ENDIO_DATA);
361 BUG_ON(ret); /* -ENOMEM */
362
363 if (!skip_sum) {
364 ret = btrfs_csum_one_bio(inode, bio, start, 1);
365 BUG_ON(ret); /* -ENOMEM */
366 }
367
368 ret = btrfs_map_bio(fs_info, bio, 0, 1);
369 if (ret) {
370 bio->bi_status = ret;
371 bio_endio(bio);
372 }
373
374 bio = btrfs_bio_alloc(bdev, first_byte);
375 bio->bi_opf = REQ_OP_WRITE | write_flags;
376 bio->bi_private = cb;
377 bio->bi_end_io = end_compressed_bio_write;
378 bio_add_page(bio, page, PAGE_SIZE, 0);
379 }
380 if (bytes_left < PAGE_SIZE) {
381 btrfs_info(fs_info,
382 "bytes left %lu compress len %lu nr %lu",
383 bytes_left, cb->compressed_len, cb->nr_pages);
384 }
385 bytes_left -= PAGE_SIZE;
386 first_byte += PAGE_SIZE;
387 cond_resched();
388 }
389
390 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
391 BUG_ON(ret); /* -ENOMEM */
392
393 if (!skip_sum) {
394 ret = btrfs_csum_one_bio(inode, bio, start, 1);
395 BUG_ON(ret); /* -ENOMEM */
396 }
397
398 ret = btrfs_map_bio(fs_info, bio, 0, 1);
399 if (ret) {
400 bio->bi_status = ret;
401 bio_endio(bio);
402 }
403
404 return 0;
405}
406
407static u64 bio_end_offset(struct bio *bio)
408{
409 struct bio_vec *last = bio_last_bvec_all(bio);
410
411 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
412}
413
414static noinline int add_ra_bio_pages(struct inode *inode,
415 u64 compressed_end,
416 struct compressed_bio *cb)
417{
418 unsigned long end_index;
419 unsigned long pg_index;
420 u64 last_offset;
421 u64 isize = i_size_read(inode);
422 int ret;
423 struct page *page;
424 unsigned long nr_pages = 0;
425 struct extent_map *em;
426 struct address_space *mapping = inode->i_mapping;
427 struct extent_map_tree *em_tree;
428 struct extent_io_tree *tree;
429 u64 end;
430 int misses = 0;
431
432 last_offset = bio_end_offset(cb->orig_bio);
433 em_tree = &BTRFS_I(inode)->extent_tree;
434 tree = &BTRFS_I(inode)->io_tree;
435
436 if (isize == 0)
437 return 0;
438
439 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
440
441 while (last_offset < compressed_end) {
442 pg_index = last_offset >> PAGE_SHIFT;
443
444 if (pg_index > end_index)
445 break;
446
447 rcu_read_lock();
448 page = radix_tree_lookup(&mapping->i_pages, pg_index);
449 rcu_read_unlock();
450 if (page && !radix_tree_exceptional_entry(page)) {
451 misses++;
452 if (misses > 4)
453 break;
454 goto next;
455 }
456
457 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
458 ~__GFP_FS));
459 if (!page)
460 break;
461
462 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
463 put_page(page);
464 goto next;
465 }
466
467 end = last_offset + PAGE_SIZE - 1;
468 /*
469 * at this point, we have a locked page in the page cache
470 * for these bytes in the file. But, we have to make
471 * sure they map to this compressed extent on disk.
472 */
473 set_page_extent_mapped(page);
474 lock_extent(tree, last_offset, end);
475 read_lock(&em_tree->lock);
476 em = lookup_extent_mapping(em_tree, last_offset,
477 PAGE_SIZE);
478 read_unlock(&em_tree->lock);
479
480 if (!em || last_offset < em->start ||
481 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
482 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
483 free_extent_map(em);
484 unlock_extent(tree, last_offset, end);
485 unlock_page(page);
486 put_page(page);
487 break;
488 }
489 free_extent_map(em);
490
491 if (page->index == end_index) {
492 char *userpage;
493 size_t zero_offset = isize & (PAGE_SIZE - 1);
494
495 if (zero_offset) {
496 int zeros;
497 zeros = PAGE_SIZE - zero_offset;
498 userpage = kmap_atomic(page);
499 memset(userpage + zero_offset, 0, zeros);
500 flush_dcache_page(page);
501 kunmap_atomic(userpage);
502 }
503 }
504
505 ret = bio_add_page(cb->orig_bio, page,
506 PAGE_SIZE, 0);
507
508 if (ret == PAGE_SIZE) {
509 nr_pages++;
510 put_page(page);
511 } else {
512 unlock_extent(tree, last_offset, end);
513 unlock_page(page);
514 put_page(page);
515 break;
516 }
517next:
518 last_offset += PAGE_SIZE;
519 }
520 return 0;
521}
522
523/*
524 * for a compressed read, the bio we get passed has all the inode pages
525 * in it. We don't actually do IO on those pages but allocate new ones
526 * to hold the compressed pages on disk.
527 *
528 * bio->bi_iter.bi_sector points to the compressed extent on disk
529 * bio->bi_io_vec points to all of the inode pages
530 *
531 * After the compressed pages are read, we copy the bytes into the
532 * bio we were passed and then call the bio end_io calls
533 */
534blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
535 int mirror_num, unsigned long bio_flags)
536{
537 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
538 struct extent_io_tree *tree;
539 struct extent_map_tree *em_tree;
540 struct compressed_bio *cb;
541 unsigned long compressed_len;
542 unsigned long nr_pages;
543 unsigned long pg_index;
544 struct page *page;
545 struct block_device *bdev;
546 struct bio *comp_bio;
547 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
548 u64 em_len;
549 u64 em_start;
550 struct extent_map *em;
551 blk_status_t ret = BLK_STS_RESOURCE;
552 int faili = 0;
553 u32 *sums;
554
555 tree = &BTRFS_I(inode)->io_tree;
556 em_tree = &BTRFS_I(inode)->extent_tree;
557
558 /* we need the actual starting offset of this extent in the file */
559 read_lock(&em_tree->lock);
560 em = lookup_extent_mapping(em_tree,
561 page_offset(bio_first_page_all(bio)),
562 PAGE_SIZE);
563 read_unlock(&em_tree->lock);
564 if (!em)
565 return BLK_STS_IOERR;
566
567 compressed_len = em->block_len;
568 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
569 if (!cb)
570 goto out;
571
572 refcount_set(&cb->pending_bios, 0);
573 cb->errors = 0;
574 cb->inode = inode;
575 cb->mirror_num = mirror_num;
576 sums = &cb->sums;
577
578 cb->start = em->orig_start;
579 em_len = em->len;
580 em_start = em->start;
581
582 free_extent_map(em);
583 em = NULL;
584
585 cb->len = bio->bi_iter.bi_size;
586 cb->compressed_len = compressed_len;
587 cb->compress_type = extent_compress_type(bio_flags);
588 cb->orig_bio = bio;
589
590 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
591 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
592 GFP_NOFS);
593 if (!cb->compressed_pages)
594 goto fail1;
595
596 bdev = fs_info->fs_devices->latest_bdev;
597
598 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
599 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
600 __GFP_HIGHMEM);
601 if (!cb->compressed_pages[pg_index]) {
602 faili = pg_index - 1;
603 ret = BLK_STS_RESOURCE;
604 goto fail2;
605 }
606 }
607 faili = nr_pages - 1;
608 cb->nr_pages = nr_pages;
609
610 add_ra_bio_pages(inode, em_start + em_len, cb);
611
612 /* include any pages we added in add_ra-bio_pages */
613 cb->len = bio->bi_iter.bi_size;
614
615 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
616 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
617 comp_bio->bi_private = cb;
618 comp_bio->bi_end_io = end_compressed_bio_read;
619 refcount_set(&cb->pending_bios, 1);
620
621 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
622 int submit = 0;
623
624 page = cb->compressed_pages[pg_index];
625 page->mapping = inode->i_mapping;
626 page->index = em_start >> PAGE_SHIFT;
627
628 if (comp_bio->bi_iter.bi_size)
629 submit = tree->ops->merge_bio_hook(page, 0,
630 PAGE_SIZE,
631 comp_bio, 0);
632
633 page->mapping = NULL;
634 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
635 PAGE_SIZE) {
636 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
637 BTRFS_WQ_ENDIO_DATA);
638 BUG_ON(ret); /* -ENOMEM */
639
640 /*
641 * inc the count before we submit the bio so
642 * we know the end IO handler won't happen before
643 * we inc the count. Otherwise, the cb might get
644 * freed before we're done setting it up
645 */
646 refcount_inc(&cb->pending_bios);
647
648 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
649 ret = btrfs_lookup_bio_sums(inode, comp_bio,
650 sums);
651 BUG_ON(ret); /* -ENOMEM */
652 }
653 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
654 fs_info->sectorsize);
655
656 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
657 if (ret) {
658 comp_bio->bi_status = ret;
659 bio_endio(comp_bio);
660 }
661
662 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
663 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
664 comp_bio->bi_private = cb;
665 comp_bio->bi_end_io = end_compressed_bio_read;
666
667 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
668 }
669 cur_disk_byte += PAGE_SIZE;
670 }
671
672 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
673 BUG_ON(ret); /* -ENOMEM */
674
675 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
676 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
677 BUG_ON(ret); /* -ENOMEM */
678 }
679
680 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
681 if (ret) {
682 comp_bio->bi_status = ret;
683 bio_endio(comp_bio);
684 }
685
686 return 0;
687
688fail2:
689 while (faili >= 0) {
690 __free_page(cb->compressed_pages[faili]);
691 faili--;
692 }
693
694 kfree(cb->compressed_pages);
695fail1:
696 kfree(cb);
697out:
698 free_extent_map(em);
699 return ret;
700}
701
702/*
703 * Heuristic uses systematic sampling to collect data from the input data
704 * range, the logic can be tuned by the following constants:
705 *
706 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
707 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
708 */
709#define SAMPLING_READ_SIZE (16)
710#define SAMPLING_INTERVAL (256)
711
712/*
713 * For statistical analysis of the input data we consider bytes that form a
714 * Galois Field of 256 objects. Each object has an attribute count, ie. how
715 * many times the object appeared in the sample.
716 */
717#define BUCKET_SIZE (256)
718
719/*
720 * The size of the sample is based on a statistical sampling rule of thumb.
721 * The common way is to perform sampling tests as long as the number of
722 * elements in each cell is at least 5.
723 *
724 * Instead of 5, we choose 32 to obtain more accurate results.
725 * If the data contain the maximum number of symbols, which is 256, we obtain a
726 * sample size bound by 8192.
727 *
728 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
729 * from up to 512 locations.
730 */
731#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
732 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
733
734struct bucket_item {
735 u32 count;
736};
737
738struct heuristic_ws {
739 /* Partial copy of input data */
740 u8 *sample;
741 u32 sample_size;
742 /* Buckets store counters for each byte value */
743 struct bucket_item *bucket;
744 /* Sorting buffer */
745 struct bucket_item *bucket_b;
746 struct list_head list;
747};
748
749static void free_heuristic_ws(struct list_head *ws)
750{
751 struct heuristic_ws *workspace;
752
753 workspace = list_entry(ws, struct heuristic_ws, list);
754
755 kvfree(workspace->sample);
756 kfree(workspace->bucket);
757 kfree(workspace->bucket_b);
758 kfree(workspace);
759}
760
761static struct list_head *alloc_heuristic_ws(void)
762{
763 struct heuristic_ws *ws;
764
765 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
766 if (!ws)
767 return ERR_PTR(-ENOMEM);
768
769 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
770 if (!ws->sample)
771 goto fail;
772
773 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
774 if (!ws->bucket)
775 goto fail;
776
777 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
778 if (!ws->bucket_b)
779 goto fail;
780
781 INIT_LIST_HEAD(&ws->list);
782 return &ws->list;
783fail:
784 free_heuristic_ws(&ws->list);
785 return ERR_PTR(-ENOMEM);
786}
787
788struct workspaces_list {
789 struct list_head idle_ws;
790 spinlock_t ws_lock;
791 /* Number of free workspaces */
792 int free_ws;
793 /* Total number of allocated workspaces */
794 atomic_t total_ws;
795 /* Waiters for a free workspace */
796 wait_queue_head_t ws_wait;
797};
798
799static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
800
801static struct workspaces_list btrfs_heuristic_ws;
802
803static const struct btrfs_compress_op * const btrfs_compress_op[] = {
804 &btrfs_zlib_compress,
805 &btrfs_lzo_compress,
806 &btrfs_zstd_compress,
807};
808
809void __init btrfs_init_compress(void)
810{
811 struct list_head *workspace;
812 int i;
813
814 INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
815 spin_lock_init(&btrfs_heuristic_ws.ws_lock);
816 atomic_set(&btrfs_heuristic_ws.total_ws, 0);
817 init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
818
819 workspace = alloc_heuristic_ws();
820 if (IS_ERR(workspace)) {
821 pr_warn(
822 "BTRFS: cannot preallocate heuristic workspace, will try later\n");
823 } else {
824 atomic_set(&btrfs_heuristic_ws.total_ws, 1);
825 btrfs_heuristic_ws.free_ws = 1;
826 list_add(workspace, &btrfs_heuristic_ws.idle_ws);
827 }
828
829 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
830 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
831 spin_lock_init(&btrfs_comp_ws[i].ws_lock);
832 atomic_set(&btrfs_comp_ws[i].total_ws, 0);
833 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
834
835 /*
836 * Preallocate one workspace for each compression type so
837 * we can guarantee forward progress in the worst case
838 */
839 workspace = btrfs_compress_op[i]->alloc_workspace();
840 if (IS_ERR(workspace)) {
841 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
842 } else {
843 atomic_set(&btrfs_comp_ws[i].total_ws, 1);
844 btrfs_comp_ws[i].free_ws = 1;
845 list_add(workspace, &btrfs_comp_ws[i].idle_ws);
846 }
847 }
848}
849
850/*
851 * This finds an available workspace or allocates a new one.
852 * If it's not possible to allocate a new one, waits until there's one.
853 * Preallocation makes a forward progress guarantees and we do not return
854 * errors.
855 */
856static struct list_head *__find_workspace(int type, bool heuristic)
857{
858 struct list_head *workspace;
859 int cpus = num_online_cpus();
860 int idx = type - 1;
861 unsigned nofs_flag;
862 struct list_head *idle_ws;
863 spinlock_t *ws_lock;
864 atomic_t *total_ws;
865 wait_queue_head_t *ws_wait;
866 int *free_ws;
867
868 if (heuristic) {
869 idle_ws = &btrfs_heuristic_ws.idle_ws;
870 ws_lock = &btrfs_heuristic_ws.ws_lock;
871 total_ws = &btrfs_heuristic_ws.total_ws;
872 ws_wait = &btrfs_heuristic_ws.ws_wait;
873 free_ws = &btrfs_heuristic_ws.free_ws;
874 } else {
875 idle_ws = &btrfs_comp_ws[idx].idle_ws;
876 ws_lock = &btrfs_comp_ws[idx].ws_lock;
877 total_ws = &btrfs_comp_ws[idx].total_ws;
878 ws_wait = &btrfs_comp_ws[idx].ws_wait;
879 free_ws = &btrfs_comp_ws[idx].free_ws;
880 }
881
882again:
883 spin_lock(ws_lock);
884 if (!list_empty(idle_ws)) {
885 workspace = idle_ws->next;
886 list_del(workspace);
887 (*free_ws)--;
888 spin_unlock(ws_lock);
889 return workspace;
890
891 }
892 if (atomic_read(total_ws) > cpus) {
893 DEFINE_WAIT(wait);
894
895 spin_unlock(ws_lock);
896 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
897 if (atomic_read(total_ws) > cpus && !*free_ws)
898 schedule();
899 finish_wait(ws_wait, &wait);
900 goto again;
901 }
902 atomic_inc(total_ws);
903 spin_unlock(ws_lock);
904
905 /*
906 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
907 * to turn it off here because we might get called from the restricted
908 * context of btrfs_compress_bio/btrfs_compress_pages
909 */
910 nofs_flag = memalloc_nofs_save();
911 if (heuristic)
912 workspace = alloc_heuristic_ws();
913 else
914 workspace = btrfs_compress_op[idx]->alloc_workspace();
915 memalloc_nofs_restore(nofs_flag);
916
917 if (IS_ERR(workspace)) {
918 atomic_dec(total_ws);
919 wake_up(ws_wait);
920
921 /*
922 * Do not return the error but go back to waiting. There's a
923 * workspace preallocated for each type and the compression
924 * time is bounded so we get to a workspace eventually. This
925 * makes our caller's life easier.
926 *
927 * To prevent silent and low-probability deadlocks (when the
928 * initial preallocation fails), check if there are any
929 * workspaces at all.
930 */
931 if (atomic_read(total_ws) == 0) {
932 static DEFINE_RATELIMIT_STATE(_rs,
933 /* once per minute */ 60 * HZ,
934 /* no burst */ 1);
935
936 if (__ratelimit(&_rs)) {
937 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
938 }
939 }
940 goto again;
941 }
942 return workspace;
943}
944
945static struct list_head *find_workspace(int type)
946{
947 return __find_workspace(type, false);
948}
949
950/*
951 * put a workspace struct back on the list or free it if we have enough
952 * idle ones sitting around
953 */
954static void __free_workspace(int type, struct list_head *workspace,
955 bool heuristic)
956{
957 int idx = type - 1;
958 struct list_head *idle_ws;
959 spinlock_t *ws_lock;
960 atomic_t *total_ws;
961 wait_queue_head_t *ws_wait;
962 int *free_ws;
963
964 if (heuristic) {
965 idle_ws = &btrfs_heuristic_ws.idle_ws;
966 ws_lock = &btrfs_heuristic_ws.ws_lock;
967 total_ws = &btrfs_heuristic_ws.total_ws;
968 ws_wait = &btrfs_heuristic_ws.ws_wait;
969 free_ws = &btrfs_heuristic_ws.free_ws;
970 } else {
971 idle_ws = &btrfs_comp_ws[idx].idle_ws;
972 ws_lock = &btrfs_comp_ws[idx].ws_lock;
973 total_ws = &btrfs_comp_ws[idx].total_ws;
974 ws_wait = &btrfs_comp_ws[idx].ws_wait;
975 free_ws = &btrfs_comp_ws[idx].free_ws;
976 }
977
978 spin_lock(ws_lock);
979 if (*free_ws <= num_online_cpus()) {
980 list_add(workspace, idle_ws);
981 (*free_ws)++;
982 spin_unlock(ws_lock);
983 goto wake;
984 }
985 spin_unlock(ws_lock);
986
987 if (heuristic)
988 free_heuristic_ws(workspace);
989 else
990 btrfs_compress_op[idx]->free_workspace(workspace);
991 atomic_dec(total_ws);
992wake:
993 /*
994 * Make sure counter is updated before we wake up waiters.
995 */
996 smp_mb();
997 if (waitqueue_active(ws_wait))
998 wake_up(ws_wait);
999}
1000
1001static void free_workspace(int type, struct list_head *ws)
1002{
1003 return __free_workspace(type, ws, false);
1004}
1005
1006/*
1007 * cleanup function for module exit
1008 */
1009static void free_workspaces(void)
1010{
1011 struct list_head *workspace;
1012 int i;
1013
1014 while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
1015 workspace = btrfs_heuristic_ws.idle_ws.next;
1016 list_del(workspace);
1017 free_heuristic_ws(workspace);
1018 atomic_dec(&btrfs_heuristic_ws.total_ws);
1019 }
1020
1021 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
1022 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
1023 workspace = btrfs_comp_ws[i].idle_ws.next;
1024 list_del(workspace);
1025 btrfs_compress_op[i]->free_workspace(workspace);
1026 atomic_dec(&btrfs_comp_ws[i].total_ws);
1027 }
1028 }
1029}
1030
1031/*
1032 * Given an address space and start and length, compress the bytes into @pages
1033 * that are allocated on demand.
1034 *
1035 * @type_level is encoded algorithm and level, where level 0 means whatever
1036 * default the algorithm chooses and is opaque here;
1037 * - compression algo are 0-3
1038 * - the level are bits 4-7
1039 *
1040 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1041 * and returns number of actually allocated pages
1042 *
1043 * @total_in is used to return the number of bytes actually read. It
1044 * may be smaller than the input length if we had to exit early because we
1045 * ran out of room in the pages array or because we cross the
1046 * max_out threshold.
1047 *
1048 * @total_out is an in/out parameter, must be set to the input length and will
1049 * be also used to return the total number of compressed bytes
1050 *
1051 * @max_out tells us the max number of bytes that we're allowed to
1052 * stuff into pages
1053 */
1054int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1055 u64 start, struct page **pages,
1056 unsigned long *out_pages,
1057 unsigned long *total_in,
1058 unsigned long *total_out)
1059{
1060 struct list_head *workspace;
1061 int ret;
1062 int type = type_level & 0xF;
1063
1064 workspace = find_workspace(type);
1065
1066 btrfs_compress_op[type - 1]->set_level(workspace, type_level);
1067 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
1068 start, pages,
1069 out_pages,
1070 total_in, total_out);
1071 free_workspace(type, workspace);
1072 return ret;
1073}
1074
1075/*
1076 * pages_in is an array of pages with compressed data.
1077 *
1078 * disk_start is the starting logical offset of this array in the file
1079 *
1080 * orig_bio contains the pages from the file that we want to decompress into
1081 *
1082 * srclen is the number of bytes in pages_in
1083 *
1084 * The basic idea is that we have a bio that was created by readpages.
1085 * The pages in the bio are for the uncompressed data, and they may not
1086 * be contiguous. They all correspond to the range of bytes covered by
1087 * the compressed extent.
1088 */
1089static int btrfs_decompress_bio(struct compressed_bio *cb)
1090{
1091 struct list_head *workspace;
1092 int ret;
1093 int type = cb->compress_type;
1094
1095 workspace = find_workspace(type);
1096 ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
1097 free_workspace(type, workspace);
1098
1099 return ret;
1100}
1101
1102/*
1103 * a less complex decompression routine. Our compressed data fits in a
1104 * single page, and we want to read a single page out of it.
1105 * start_byte tells us the offset into the compressed data we're interested in
1106 */
1107int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1108 unsigned long start_byte, size_t srclen, size_t destlen)
1109{
1110 struct list_head *workspace;
1111 int ret;
1112
1113 workspace = find_workspace(type);
1114
1115 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
1116 dest_page, start_byte,
1117 srclen, destlen);
1118
1119 free_workspace(type, workspace);
1120 return ret;
1121}
1122
1123void __cold btrfs_exit_compress(void)
1124{
1125 free_workspaces();
1126}
1127
1128/*
1129 * Copy uncompressed data from working buffer to pages.
1130 *
1131 * buf_start is the byte offset we're of the start of our workspace buffer.
1132 *
1133 * total_out is the last byte of the buffer
1134 */
1135int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1136 unsigned long total_out, u64 disk_start,
1137 struct bio *bio)
1138{
1139 unsigned long buf_offset;
1140 unsigned long current_buf_start;
1141 unsigned long start_byte;
1142 unsigned long prev_start_byte;
1143 unsigned long working_bytes = total_out - buf_start;
1144 unsigned long bytes;
1145 char *kaddr;
1146 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1147
1148 /*
1149 * start byte is the first byte of the page we're currently
1150 * copying into relative to the start of the compressed data.
1151 */
1152 start_byte = page_offset(bvec.bv_page) - disk_start;
1153
1154 /* we haven't yet hit data corresponding to this page */
1155 if (total_out <= start_byte)
1156 return 1;
1157
1158 /*
1159 * the start of the data we care about is offset into
1160 * the middle of our working buffer
1161 */
1162 if (total_out > start_byte && buf_start < start_byte) {
1163 buf_offset = start_byte - buf_start;
1164 working_bytes -= buf_offset;
1165 } else {
1166 buf_offset = 0;
1167 }
1168 current_buf_start = buf_start;
1169
1170 /* copy bytes from the working buffer into the pages */
1171 while (working_bytes > 0) {
1172 bytes = min_t(unsigned long, bvec.bv_len,
1173 PAGE_SIZE - buf_offset);
1174 bytes = min(bytes, working_bytes);
1175
1176 kaddr = kmap_atomic(bvec.bv_page);
1177 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1178 kunmap_atomic(kaddr);
1179 flush_dcache_page(bvec.bv_page);
1180
1181 buf_offset += bytes;
1182 working_bytes -= bytes;
1183 current_buf_start += bytes;
1184
1185 /* check if we need to pick another page */
1186 bio_advance(bio, bytes);
1187 if (!bio->bi_iter.bi_size)
1188 return 0;
1189 bvec = bio_iter_iovec(bio, bio->bi_iter);
1190 prev_start_byte = start_byte;
1191 start_byte = page_offset(bvec.bv_page) - disk_start;
1192
1193 /*
1194 * We need to make sure we're only adjusting
1195 * our offset into compression working buffer when
1196 * we're switching pages. Otherwise we can incorrectly
1197 * keep copying when we were actually done.
1198 */
1199 if (start_byte != prev_start_byte) {
1200 /*
1201 * make sure our new page is covered by this
1202 * working buffer
1203 */
1204 if (total_out <= start_byte)
1205 return 1;
1206
1207 /*
1208 * the next page in the biovec might not be adjacent
1209 * to the last page, but it might still be found
1210 * inside this working buffer. bump our offset pointer
1211 */
1212 if (total_out > start_byte &&
1213 current_buf_start < start_byte) {
1214 buf_offset = start_byte - buf_start;
1215 working_bytes = total_out - start_byte;
1216 current_buf_start = buf_start + buf_offset;
1217 }
1218 }
1219 }
1220
1221 return 1;
1222}
1223
1224/*
1225 * Shannon Entropy calculation
1226 *
1227 * Pure byte distribution analysis fails to determine compressiability of data.
1228 * Try calculating entropy to estimate the average minimum number of bits
1229 * needed to encode the sampled data.
1230 *
1231 * For convenience, return the percentage of needed bits, instead of amount of
1232 * bits directly.
1233 *
1234 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1235 * and can be compressible with high probability
1236 *
1237 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1238 *
1239 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1240 */
1241#define ENTROPY_LVL_ACEPTABLE (65)
1242#define ENTROPY_LVL_HIGH (80)
1243
1244/*
1245 * For increasead precision in shannon_entropy calculation,
1246 * let's do pow(n, M) to save more digits after comma:
1247 *
1248 * - maximum int bit length is 64
1249 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1250 * - 13 * 4 = 52 < 64 -> M = 4
1251 *
1252 * So use pow(n, 4).
1253 */
1254static inline u32 ilog2_w(u64 n)
1255{
1256 return ilog2(n * n * n * n);
1257}
1258
1259static u32 shannon_entropy(struct heuristic_ws *ws)
1260{
1261 const u32 entropy_max = 8 * ilog2_w(2);
1262 u32 entropy_sum = 0;
1263 u32 p, p_base, sz_base;
1264 u32 i;
1265
1266 sz_base = ilog2_w(ws->sample_size);
1267 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1268 p = ws->bucket[i].count;
1269 p_base = ilog2_w(p);
1270 entropy_sum += p * (sz_base - p_base);
1271 }
1272
1273 entropy_sum /= ws->sample_size;
1274 return entropy_sum * 100 / entropy_max;
1275}
1276
1277#define RADIX_BASE 4U
1278#define COUNTERS_SIZE (1U << RADIX_BASE)
1279
1280static u8 get4bits(u64 num, int shift) {
1281 u8 low4bits;
1282
1283 num >>= shift;
1284 /* Reverse order */
1285 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1286 return low4bits;
1287}
1288
1289/*
1290 * Use 4 bits as radix base
1291 * Use 16 u32 counters for calculating new possition in buf array
1292 *
1293 * @array - array that will be sorted
1294 * @array_buf - buffer array to store sorting results
1295 * must be equal in size to @array
1296 * @num - array size
1297 */
1298static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1299 int num)
1300{
1301 u64 max_num;
1302 u64 buf_num;
1303 u32 counters[COUNTERS_SIZE];
1304 u32 new_addr;
1305 u32 addr;
1306 int bitlen;
1307 int shift;
1308 int i;
1309
1310 /*
1311 * Try avoid useless loop iterations for small numbers stored in big
1312 * counters. Example: 48 33 4 ... in 64bit array
1313 */
1314 max_num = array[0].count;
1315 for (i = 1; i < num; i++) {
1316 buf_num = array[i].count;
1317 if (buf_num > max_num)
1318 max_num = buf_num;
1319 }
1320
1321 buf_num = ilog2(max_num);
1322 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1323
1324 shift = 0;
1325 while (shift < bitlen) {
1326 memset(counters, 0, sizeof(counters));
1327
1328 for (i = 0; i < num; i++) {
1329 buf_num = array[i].count;
1330 addr = get4bits(buf_num, shift);
1331 counters[addr]++;
1332 }
1333
1334 for (i = 1; i < COUNTERS_SIZE; i++)
1335 counters[i] += counters[i - 1];
1336
1337 for (i = num - 1; i >= 0; i--) {
1338 buf_num = array[i].count;
1339 addr = get4bits(buf_num, shift);
1340 counters[addr]--;
1341 new_addr = counters[addr];
1342 array_buf[new_addr] = array[i];
1343 }
1344
1345 shift += RADIX_BASE;
1346
1347 /*
1348 * Normal radix expects to move data from a temporary array, to
1349 * the main one. But that requires some CPU time. Avoid that
1350 * by doing another sort iteration to original array instead of
1351 * memcpy()
1352 */
1353 memset(counters, 0, sizeof(counters));
1354
1355 for (i = 0; i < num; i ++) {
1356 buf_num = array_buf[i].count;
1357 addr = get4bits(buf_num, shift);
1358 counters[addr]++;
1359 }
1360
1361 for (i = 1; i < COUNTERS_SIZE; i++)
1362 counters[i] += counters[i - 1];
1363
1364 for (i = num - 1; i >= 0; i--) {
1365 buf_num = array_buf[i].count;
1366 addr = get4bits(buf_num, shift);
1367 counters[addr]--;
1368 new_addr = counters[addr];
1369 array[new_addr] = array_buf[i];
1370 }
1371
1372 shift += RADIX_BASE;
1373 }
1374}
1375
1376/*
1377 * Size of the core byte set - how many bytes cover 90% of the sample
1378 *
1379 * There are several types of structured binary data that use nearly all byte
1380 * values. The distribution can be uniform and counts in all buckets will be
1381 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1382 *
1383 * Other possibility is normal (Gaussian) distribution, where the data could
1384 * be potentially compressible, but we have to take a few more steps to decide
1385 * how much.
1386 *
1387 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1388 * compression algo can easy fix that
1389 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1390 * probability is not compressible
1391 */
1392#define BYTE_CORE_SET_LOW (64)
1393#define BYTE_CORE_SET_HIGH (200)
1394
1395static int byte_core_set_size(struct heuristic_ws *ws)
1396{
1397 u32 i;
1398 u32 coreset_sum = 0;
1399 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1400 struct bucket_item *bucket = ws->bucket;
1401
1402 /* Sort in reverse order */
1403 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1404
1405 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1406 coreset_sum += bucket[i].count;
1407
1408 if (coreset_sum > core_set_threshold)
1409 return i;
1410
1411 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1412 coreset_sum += bucket[i].count;
1413 if (coreset_sum > core_set_threshold)
1414 break;
1415 }
1416
1417 return i;
1418}
1419
1420/*
1421 * Count byte values in buckets.
1422 * This heuristic can detect textual data (configs, xml, json, html, etc).
1423 * Because in most text-like data byte set is restricted to limited number of
1424 * possible characters, and that restriction in most cases makes data easy to
1425 * compress.
1426 *
1427 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1428 * less - compressible
1429 * more - need additional analysis
1430 */
1431#define BYTE_SET_THRESHOLD (64)
1432
1433static u32 byte_set_size(const struct heuristic_ws *ws)
1434{
1435 u32 i;
1436 u32 byte_set_size = 0;
1437
1438 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1439 if (ws->bucket[i].count > 0)
1440 byte_set_size++;
1441 }
1442
1443 /*
1444 * Continue collecting count of byte values in buckets. If the byte
1445 * set size is bigger then the threshold, it's pointless to continue,
1446 * the detection technique would fail for this type of data.
1447 */
1448 for (; i < BUCKET_SIZE; i++) {
1449 if (ws->bucket[i].count > 0) {
1450 byte_set_size++;
1451 if (byte_set_size > BYTE_SET_THRESHOLD)
1452 return byte_set_size;
1453 }
1454 }
1455
1456 return byte_set_size;
1457}
1458
1459static bool sample_repeated_patterns(struct heuristic_ws *ws)
1460{
1461 const u32 half_of_sample = ws->sample_size / 2;
1462 const u8 *data = ws->sample;
1463
1464 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1465}
1466
1467static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1468 struct heuristic_ws *ws)
1469{
1470 struct page *page;
1471 u64 index, index_end;
1472 u32 i, curr_sample_pos;
1473 u8 *in_data;
1474
1475 /*
1476 * Compression handles the input data by chunks of 128KiB
1477 * (defined by BTRFS_MAX_UNCOMPRESSED)
1478 *
1479 * We do the same for the heuristic and loop over the whole range.
1480 *
1481 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1482 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1483 */
1484 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1485 end = start + BTRFS_MAX_UNCOMPRESSED;
1486
1487 index = start >> PAGE_SHIFT;
1488 index_end = end >> PAGE_SHIFT;
1489
1490 /* Don't miss unaligned end */
1491 if (!IS_ALIGNED(end, PAGE_SIZE))
1492 index_end++;
1493
1494 curr_sample_pos = 0;
1495 while (index < index_end) {
1496 page = find_get_page(inode->i_mapping, index);
1497 in_data = kmap(page);
1498 /* Handle case where the start is not aligned to PAGE_SIZE */
1499 i = start % PAGE_SIZE;
1500 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1501 /* Don't sample any garbage from the last page */
1502 if (start > end - SAMPLING_READ_SIZE)
1503 break;
1504 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1505 SAMPLING_READ_SIZE);
1506 i += SAMPLING_INTERVAL;
1507 start += SAMPLING_INTERVAL;
1508 curr_sample_pos += SAMPLING_READ_SIZE;
1509 }
1510 kunmap(page);
1511 put_page(page);
1512
1513 index++;
1514 }
1515
1516 ws->sample_size = curr_sample_pos;
1517}
1518
1519/*
1520 * Compression heuristic.
1521 *
1522 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1523 * quickly (compared to direct compression) detect data characteristics
1524 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1525 * data.
1526 *
1527 * The following types of analysis can be performed:
1528 * - detect mostly zero data
1529 * - detect data with low "byte set" size (text, etc)
1530 * - detect data with low/high "core byte" set
1531 *
1532 * Return non-zero if the compression should be done, 0 otherwise.
1533 */
1534int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1535{
1536 struct list_head *ws_list = __find_workspace(0, true);
1537 struct heuristic_ws *ws;
1538 u32 i;
1539 u8 byte;
1540 int ret = 0;
1541
1542 ws = list_entry(ws_list, struct heuristic_ws, list);
1543
1544 heuristic_collect_sample(inode, start, end, ws);
1545
1546 if (sample_repeated_patterns(ws)) {
1547 ret = 1;
1548 goto out;
1549 }
1550
1551 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1552
1553 for (i = 0; i < ws->sample_size; i++) {
1554 byte = ws->sample[i];
1555 ws->bucket[byte].count++;
1556 }
1557
1558 i = byte_set_size(ws);
1559 if (i < BYTE_SET_THRESHOLD) {
1560 ret = 2;
1561 goto out;
1562 }
1563
1564 i = byte_core_set_size(ws);
1565 if (i <= BYTE_CORE_SET_LOW) {
1566 ret = 3;
1567 goto out;
1568 }
1569
1570 if (i >= BYTE_CORE_SET_HIGH) {
1571 ret = 0;
1572 goto out;
1573 }
1574
1575 i = shannon_entropy(ws);
1576 if (i <= ENTROPY_LVL_ACEPTABLE) {
1577 ret = 4;
1578 goto out;
1579 }
1580
1581 /*
1582 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1583 * needed to give green light to compression.
1584 *
1585 * For now just assume that compression at that level is not worth the
1586 * resources because:
1587 *
1588 * 1. it is possible to defrag the data later
1589 *
1590 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1591 * values, every bucket has counter at level ~54. The heuristic would
1592 * be confused. This can happen when data have some internal repeated
1593 * patterns like "abbacbbc...". This can be detected by analyzing
1594 * pairs of bytes, which is too costly.
1595 */
1596 if (i < ENTROPY_LVL_HIGH) {
1597 ret = 5;
1598 goto out;
1599 } else {
1600 ret = 0;
1601 goto out;
1602 }
1603
1604out:
1605 __free_workspace(0, ws_list, true);
1606 return ret;
1607}
1608
1609unsigned int btrfs_compress_str2level(const char *str)
1610{
1611 if (strncmp(str, "zlib", 4) != 0)
1612 return 0;
1613
1614 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1615 if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
1616 return str[5] - '0';
1617
1618 return BTRFS_ZLIB_DEFAULT_LEVEL;
1619}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
8#include <linux/file.h>
9#include <linux/fs.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/time.h>
13#include <linux/init.h>
14#include <linux/string.h>
15#include <linux/backing-dev.h>
16#include <linux/writeback.h>
17#include <linux/slab.h>
18#include <linux/sched/mm.h>
19#include <linux/log2.h>
20#include <crypto/hash.h>
21#include "misc.h"
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "volumes.h"
27#include "ordered-data.h"
28#include "compression.h"
29#include "extent_io.h"
30#include "extent_map.h"
31
32static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
33
34const char* btrfs_compress_type2str(enum btrfs_compression_type type)
35{
36 switch (type) {
37 case BTRFS_COMPRESS_ZLIB:
38 case BTRFS_COMPRESS_LZO:
39 case BTRFS_COMPRESS_ZSTD:
40 case BTRFS_COMPRESS_NONE:
41 return btrfs_compress_types[type];
42 }
43
44 return NULL;
45}
46
47bool btrfs_compress_is_valid_type(const char *str, size_t len)
48{
49 int i;
50
51 for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
52 size_t comp_len = strlen(btrfs_compress_types[i]);
53
54 if (len < comp_len)
55 continue;
56
57 if (!strncmp(btrfs_compress_types[i], str, comp_len))
58 return true;
59 }
60 return false;
61}
62
63static int btrfs_decompress_bio(struct compressed_bio *cb);
64
65static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
66 unsigned long disk_size)
67{
68 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
69
70 return sizeof(struct compressed_bio) +
71 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
72}
73
74static int check_compressed_csum(struct btrfs_inode *inode,
75 struct compressed_bio *cb,
76 u64 disk_start)
77{
78 struct btrfs_fs_info *fs_info = inode->root->fs_info;
79 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
80 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
81 int ret;
82 struct page *page;
83 unsigned long i;
84 char *kaddr;
85 u8 csum[BTRFS_CSUM_SIZE];
86 u8 *cb_sum = cb->sums;
87
88 if (inode->flags & BTRFS_INODE_NODATASUM)
89 return 0;
90
91 shash->tfm = fs_info->csum_shash;
92
93 for (i = 0; i < cb->nr_pages; i++) {
94 page = cb->compressed_pages[i];
95
96 crypto_shash_init(shash);
97 kaddr = kmap_atomic(page);
98 crypto_shash_update(shash, kaddr, PAGE_SIZE);
99 kunmap_atomic(kaddr);
100 crypto_shash_final(shash, (u8 *)&csum);
101
102 if (memcmp(&csum, cb_sum, csum_size)) {
103 btrfs_print_data_csum_error(inode, disk_start,
104 csum, cb_sum, cb->mirror_num);
105 ret = -EIO;
106 goto fail;
107 }
108 cb_sum += csum_size;
109
110 }
111 ret = 0;
112fail:
113 return ret;
114}
115
116/* when we finish reading compressed pages from the disk, we
117 * decompress them and then run the bio end_io routines on the
118 * decompressed pages (in the inode address space).
119 *
120 * This allows the checksumming and other IO error handling routines
121 * to work normally
122 *
123 * The compressed pages are freed here, and it must be run
124 * in process context
125 */
126static void end_compressed_bio_read(struct bio *bio)
127{
128 struct compressed_bio *cb = bio->bi_private;
129 struct inode *inode;
130 struct page *page;
131 unsigned long index;
132 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
133 int ret = 0;
134
135 if (bio->bi_status)
136 cb->errors = 1;
137
138 /* if there are more bios still pending for this compressed
139 * extent, just exit
140 */
141 if (!refcount_dec_and_test(&cb->pending_bios))
142 goto out;
143
144 /*
145 * Record the correct mirror_num in cb->orig_bio so that
146 * read-repair can work properly.
147 */
148 ASSERT(btrfs_io_bio(cb->orig_bio));
149 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
150 cb->mirror_num = mirror;
151
152 /*
153 * Some IO in this cb have failed, just skip checksum as there
154 * is no way it could be correct.
155 */
156 if (cb->errors == 1)
157 goto csum_failed;
158
159 inode = cb->inode;
160 ret = check_compressed_csum(BTRFS_I(inode), cb,
161 (u64)bio->bi_iter.bi_sector << 9);
162 if (ret)
163 goto csum_failed;
164
165 /* ok, we're the last bio for this extent, lets start
166 * the decompression.
167 */
168 ret = btrfs_decompress_bio(cb);
169
170csum_failed:
171 if (ret)
172 cb->errors = 1;
173
174 /* release the compressed pages */
175 index = 0;
176 for (index = 0; index < cb->nr_pages; index++) {
177 page = cb->compressed_pages[index];
178 page->mapping = NULL;
179 put_page(page);
180 }
181
182 /* do io completion on the original bio */
183 if (cb->errors) {
184 bio_io_error(cb->orig_bio);
185 } else {
186 struct bio_vec *bvec;
187 struct bvec_iter_all iter_all;
188
189 /*
190 * we have verified the checksum already, set page
191 * checked so the end_io handlers know about it
192 */
193 ASSERT(!bio_flagged(bio, BIO_CLONED));
194 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
195 SetPageChecked(bvec->bv_page);
196
197 bio_endio(cb->orig_bio);
198 }
199
200 /* finally free the cb struct */
201 kfree(cb->compressed_pages);
202 kfree(cb);
203out:
204 bio_put(bio);
205}
206
207/*
208 * Clear the writeback bits on all of the file
209 * pages for a compressed write
210 */
211static noinline void end_compressed_writeback(struct inode *inode,
212 const struct compressed_bio *cb)
213{
214 unsigned long index = cb->start >> PAGE_SHIFT;
215 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
216 struct page *pages[16];
217 unsigned long nr_pages = end_index - index + 1;
218 int i;
219 int ret;
220
221 if (cb->errors)
222 mapping_set_error(inode->i_mapping, -EIO);
223
224 while (nr_pages > 0) {
225 ret = find_get_pages_contig(inode->i_mapping, index,
226 min_t(unsigned long,
227 nr_pages, ARRAY_SIZE(pages)), pages);
228 if (ret == 0) {
229 nr_pages -= 1;
230 index += 1;
231 continue;
232 }
233 for (i = 0; i < ret; i++) {
234 if (cb->errors)
235 SetPageError(pages[i]);
236 end_page_writeback(pages[i]);
237 put_page(pages[i]);
238 }
239 nr_pages -= ret;
240 index += ret;
241 }
242 /* the inode may be gone now */
243}
244
245/*
246 * do the cleanup once all the compressed pages hit the disk.
247 * This will clear writeback on the file pages and free the compressed
248 * pages.
249 *
250 * This also calls the writeback end hooks for the file pages so that
251 * metadata and checksums can be updated in the file.
252 */
253static void end_compressed_bio_write(struct bio *bio)
254{
255 struct compressed_bio *cb = bio->bi_private;
256 struct inode *inode;
257 struct page *page;
258 unsigned long index;
259
260 if (bio->bi_status)
261 cb->errors = 1;
262
263 /* if there are more bios still pending for this compressed
264 * extent, just exit
265 */
266 if (!refcount_dec_and_test(&cb->pending_bios))
267 goto out;
268
269 /* ok, we're the last bio for this extent, step one is to
270 * call back into the FS and do all the end_io operations
271 */
272 inode = cb->inode;
273 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
274 btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
275 cb->start, cb->start + cb->len - 1,
276 bio->bi_status == BLK_STS_OK);
277 cb->compressed_pages[0]->mapping = NULL;
278
279 end_compressed_writeback(inode, cb);
280 /* note, our inode could be gone now */
281
282 /*
283 * release the compressed pages, these came from alloc_page and
284 * are not attached to the inode at all
285 */
286 index = 0;
287 for (index = 0; index < cb->nr_pages; index++) {
288 page = cb->compressed_pages[index];
289 page->mapping = NULL;
290 put_page(page);
291 }
292
293 /* finally free the cb struct */
294 kfree(cb->compressed_pages);
295 kfree(cb);
296out:
297 bio_put(bio);
298}
299
300/*
301 * worker function to build and submit bios for previously compressed pages.
302 * The corresponding pages in the inode should be marked for writeback
303 * and the compressed pages should have a reference on them for dropping
304 * when the IO is complete.
305 *
306 * This also checksums the file bytes and gets things ready for
307 * the end io hooks.
308 */
309blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
310 unsigned long len, u64 disk_start,
311 unsigned long compressed_len,
312 struct page **compressed_pages,
313 unsigned long nr_pages,
314 unsigned int write_flags)
315{
316 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
317 struct bio *bio = NULL;
318 struct compressed_bio *cb;
319 unsigned long bytes_left;
320 int pg_index = 0;
321 struct page *page;
322 u64 first_byte = disk_start;
323 struct block_device *bdev;
324 blk_status_t ret;
325 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
326
327 WARN_ON(!PAGE_ALIGNED(start));
328 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
329 if (!cb)
330 return BLK_STS_RESOURCE;
331 refcount_set(&cb->pending_bios, 0);
332 cb->errors = 0;
333 cb->inode = inode;
334 cb->start = start;
335 cb->len = len;
336 cb->mirror_num = 0;
337 cb->compressed_pages = compressed_pages;
338 cb->compressed_len = compressed_len;
339 cb->orig_bio = NULL;
340 cb->nr_pages = nr_pages;
341
342 bdev = fs_info->fs_devices->latest_bdev;
343
344 bio = btrfs_bio_alloc(first_byte);
345 bio_set_dev(bio, bdev);
346 bio->bi_opf = REQ_OP_WRITE | write_flags;
347 bio->bi_private = cb;
348 bio->bi_end_io = end_compressed_bio_write;
349 refcount_set(&cb->pending_bios, 1);
350
351 /* create and submit bios for the compressed pages */
352 bytes_left = compressed_len;
353 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
354 int submit = 0;
355
356 page = compressed_pages[pg_index];
357 page->mapping = inode->i_mapping;
358 if (bio->bi_iter.bi_size)
359 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
360 0);
361
362 page->mapping = NULL;
363 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
364 PAGE_SIZE) {
365 /*
366 * inc the count before we submit the bio so
367 * we know the end IO handler won't happen before
368 * we inc the count. Otherwise, the cb might get
369 * freed before we're done setting it up
370 */
371 refcount_inc(&cb->pending_bios);
372 ret = btrfs_bio_wq_end_io(fs_info, bio,
373 BTRFS_WQ_ENDIO_DATA);
374 BUG_ON(ret); /* -ENOMEM */
375
376 if (!skip_sum) {
377 ret = btrfs_csum_one_bio(inode, bio, start, 1);
378 BUG_ON(ret); /* -ENOMEM */
379 }
380
381 ret = btrfs_map_bio(fs_info, bio, 0, 1);
382 if (ret) {
383 bio->bi_status = ret;
384 bio_endio(bio);
385 }
386
387 bio = btrfs_bio_alloc(first_byte);
388 bio_set_dev(bio, bdev);
389 bio->bi_opf = REQ_OP_WRITE | write_flags;
390 bio->bi_private = cb;
391 bio->bi_end_io = end_compressed_bio_write;
392 bio_add_page(bio, page, PAGE_SIZE, 0);
393 }
394 if (bytes_left < PAGE_SIZE) {
395 btrfs_info(fs_info,
396 "bytes left %lu compress len %lu nr %lu",
397 bytes_left, cb->compressed_len, cb->nr_pages);
398 }
399 bytes_left -= PAGE_SIZE;
400 first_byte += PAGE_SIZE;
401 cond_resched();
402 }
403
404 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
405 BUG_ON(ret); /* -ENOMEM */
406
407 if (!skip_sum) {
408 ret = btrfs_csum_one_bio(inode, bio, start, 1);
409 BUG_ON(ret); /* -ENOMEM */
410 }
411
412 ret = btrfs_map_bio(fs_info, bio, 0, 1);
413 if (ret) {
414 bio->bi_status = ret;
415 bio_endio(bio);
416 }
417
418 return 0;
419}
420
421static u64 bio_end_offset(struct bio *bio)
422{
423 struct bio_vec *last = bio_last_bvec_all(bio);
424
425 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
426}
427
428static noinline int add_ra_bio_pages(struct inode *inode,
429 u64 compressed_end,
430 struct compressed_bio *cb)
431{
432 unsigned long end_index;
433 unsigned long pg_index;
434 u64 last_offset;
435 u64 isize = i_size_read(inode);
436 int ret;
437 struct page *page;
438 unsigned long nr_pages = 0;
439 struct extent_map *em;
440 struct address_space *mapping = inode->i_mapping;
441 struct extent_map_tree *em_tree;
442 struct extent_io_tree *tree;
443 u64 end;
444 int misses = 0;
445
446 last_offset = bio_end_offset(cb->orig_bio);
447 em_tree = &BTRFS_I(inode)->extent_tree;
448 tree = &BTRFS_I(inode)->io_tree;
449
450 if (isize == 0)
451 return 0;
452
453 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
454
455 while (last_offset < compressed_end) {
456 pg_index = last_offset >> PAGE_SHIFT;
457
458 if (pg_index > end_index)
459 break;
460
461 page = xa_load(&mapping->i_pages, pg_index);
462 if (page && !xa_is_value(page)) {
463 misses++;
464 if (misses > 4)
465 break;
466 goto next;
467 }
468
469 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
470 ~__GFP_FS));
471 if (!page)
472 break;
473
474 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
475 put_page(page);
476 goto next;
477 }
478
479 end = last_offset + PAGE_SIZE - 1;
480 /*
481 * at this point, we have a locked page in the page cache
482 * for these bytes in the file. But, we have to make
483 * sure they map to this compressed extent on disk.
484 */
485 set_page_extent_mapped(page);
486 lock_extent(tree, last_offset, end);
487 read_lock(&em_tree->lock);
488 em = lookup_extent_mapping(em_tree, last_offset,
489 PAGE_SIZE);
490 read_unlock(&em_tree->lock);
491
492 if (!em || last_offset < em->start ||
493 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
494 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
495 free_extent_map(em);
496 unlock_extent(tree, last_offset, end);
497 unlock_page(page);
498 put_page(page);
499 break;
500 }
501 free_extent_map(em);
502
503 if (page->index == end_index) {
504 char *userpage;
505 size_t zero_offset = offset_in_page(isize);
506
507 if (zero_offset) {
508 int zeros;
509 zeros = PAGE_SIZE - zero_offset;
510 userpage = kmap_atomic(page);
511 memset(userpage + zero_offset, 0, zeros);
512 flush_dcache_page(page);
513 kunmap_atomic(userpage);
514 }
515 }
516
517 ret = bio_add_page(cb->orig_bio, page,
518 PAGE_SIZE, 0);
519
520 if (ret == PAGE_SIZE) {
521 nr_pages++;
522 put_page(page);
523 } else {
524 unlock_extent(tree, last_offset, end);
525 unlock_page(page);
526 put_page(page);
527 break;
528 }
529next:
530 last_offset += PAGE_SIZE;
531 }
532 return 0;
533}
534
535/*
536 * for a compressed read, the bio we get passed has all the inode pages
537 * in it. We don't actually do IO on those pages but allocate new ones
538 * to hold the compressed pages on disk.
539 *
540 * bio->bi_iter.bi_sector points to the compressed extent on disk
541 * bio->bi_io_vec points to all of the inode pages
542 *
543 * After the compressed pages are read, we copy the bytes into the
544 * bio we were passed and then call the bio end_io calls
545 */
546blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
547 int mirror_num, unsigned long bio_flags)
548{
549 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
550 struct extent_map_tree *em_tree;
551 struct compressed_bio *cb;
552 unsigned long compressed_len;
553 unsigned long nr_pages;
554 unsigned long pg_index;
555 struct page *page;
556 struct block_device *bdev;
557 struct bio *comp_bio;
558 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
559 u64 em_len;
560 u64 em_start;
561 struct extent_map *em;
562 blk_status_t ret = BLK_STS_RESOURCE;
563 int faili = 0;
564 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
565 u8 *sums;
566
567 em_tree = &BTRFS_I(inode)->extent_tree;
568
569 /* we need the actual starting offset of this extent in the file */
570 read_lock(&em_tree->lock);
571 em = lookup_extent_mapping(em_tree,
572 page_offset(bio_first_page_all(bio)),
573 PAGE_SIZE);
574 read_unlock(&em_tree->lock);
575 if (!em)
576 return BLK_STS_IOERR;
577
578 compressed_len = em->block_len;
579 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
580 if (!cb)
581 goto out;
582
583 refcount_set(&cb->pending_bios, 0);
584 cb->errors = 0;
585 cb->inode = inode;
586 cb->mirror_num = mirror_num;
587 sums = cb->sums;
588
589 cb->start = em->orig_start;
590 em_len = em->len;
591 em_start = em->start;
592
593 free_extent_map(em);
594 em = NULL;
595
596 cb->len = bio->bi_iter.bi_size;
597 cb->compressed_len = compressed_len;
598 cb->compress_type = extent_compress_type(bio_flags);
599 cb->orig_bio = bio;
600
601 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
602 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
603 GFP_NOFS);
604 if (!cb->compressed_pages)
605 goto fail1;
606
607 bdev = fs_info->fs_devices->latest_bdev;
608
609 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
610 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
611 __GFP_HIGHMEM);
612 if (!cb->compressed_pages[pg_index]) {
613 faili = pg_index - 1;
614 ret = BLK_STS_RESOURCE;
615 goto fail2;
616 }
617 }
618 faili = nr_pages - 1;
619 cb->nr_pages = nr_pages;
620
621 add_ra_bio_pages(inode, em_start + em_len, cb);
622
623 /* include any pages we added in add_ra-bio_pages */
624 cb->len = bio->bi_iter.bi_size;
625
626 comp_bio = btrfs_bio_alloc(cur_disk_byte);
627 bio_set_dev(comp_bio, bdev);
628 comp_bio->bi_opf = REQ_OP_READ;
629 comp_bio->bi_private = cb;
630 comp_bio->bi_end_io = end_compressed_bio_read;
631 refcount_set(&cb->pending_bios, 1);
632
633 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
634 int submit = 0;
635
636 page = cb->compressed_pages[pg_index];
637 page->mapping = inode->i_mapping;
638 page->index = em_start >> PAGE_SHIFT;
639
640 if (comp_bio->bi_iter.bi_size)
641 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
642 comp_bio, 0);
643
644 page->mapping = NULL;
645 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
646 PAGE_SIZE) {
647 unsigned int nr_sectors;
648
649 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
650 BTRFS_WQ_ENDIO_DATA);
651 BUG_ON(ret); /* -ENOMEM */
652
653 /*
654 * inc the count before we submit the bio so
655 * we know the end IO handler won't happen before
656 * we inc the count. Otherwise, the cb might get
657 * freed before we're done setting it up
658 */
659 refcount_inc(&cb->pending_bios);
660
661 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
662 ret = btrfs_lookup_bio_sums(inode, comp_bio,
663 sums);
664 BUG_ON(ret); /* -ENOMEM */
665 }
666
667 nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
668 fs_info->sectorsize);
669 sums += csum_size * nr_sectors;
670
671 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
672 if (ret) {
673 comp_bio->bi_status = ret;
674 bio_endio(comp_bio);
675 }
676
677 comp_bio = btrfs_bio_alloc(cur_disk_byte);
678 bio_set_dev(comp_bio, bdev);
679 comp_bio->bi_opf = REQ_OP_READ;
680 comp_bio->bi_private = cb;
681 comp_bio->bi_end_io = end_compressed_bio_read;
682
683 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
684 }
685 cur_disk_byte += PAGE_SIZE;
686 }
687
688 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
689 BUG_ON(ret); /* -ENOMEM */
690
691 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
692 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
693 BUG_ON(ret); /* -ENOMEM */
694 }
695
696 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
697 if (ret) {
698 comp_bio->bi_status = ret;
699 bio_endio(comp_bio);
700 }
701
702 return 0;
703
704fail2:
705 while (faili >= 0) {
706 __free_page(cb->compressed_pages[faili]);
707 faili--;
708 }
709
710 kfree(cb->compressed_pages);
711fail1:
712 kfree(cb);
713out:
714 free_extent_map(em);
715 return ret;
716}
717
718/*
719 * Heuristic uses systematic sampling to collect data from the input data
720 * range, the logic can be tuned by the following constants:
721 *
722 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
723 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
724 */
725#define SAMPLING_READ_SIZE (16)
726#define SAMPLING_INTERVAL (256)
727
728/*
729 * For statistical analysis of the input data we consider bytes that form a
730 * Galois Field of 256 objects. Each object has an attribute count, ie. how
731 * many times the object appeared in the sample.
732 */
733#define BUCKET_SIZE (256)
734
735/*
736 * The size of the sample is based on a statistical sampling rule of thumb.
737 * The common way is to perform sampling tests as long as the number of
738 * elements in each cell is at least 5.
739 *
740 * Instead of 5, we choose 32 to obtain more accurate results.
741 * If the data contain the maximum number of symbols, which is 256, we obtain a
742 * sample size bound by 8192.
743 *
744 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
745 * from up to 512 locations.
746 */
747#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
748 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
749
750struct bucket_item {
751 u32 count;
752};
753
754struct heuristic_ws {
755 /* Partial copy of input data */
756 u8 *sample;
757 u32 sample_size;
758 /* Buckets store counters for each byte value */
759 struct bucket_item *bucket;
760 /* Sorting buffer */
761 struct bucket_item *bucket_b;
762 struct list_head list;
763};
764
765static struct workspace_manager heuristic_wsm;
766
767static void heuristic_init_workspace_manager(void)
768{
769 btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress);
770}
771
772static void heuristic_cleanup_workspace_manager(void)
773{
774 btrfs_cleanup_workspace_manager(&heuristic_wsm);
775}
776
777static struct list_head *heuristic_get_workspace(unsigned int level)
778{
779 return btrfs_get_workspace(&heuristic_wsm, level);
780}
781
782static void heuristic_put_workspace(struct list_head *ws)
783{
784 btrfs_put_workspace(&heuristic_wsm, ws);
785}
786
787static void free_heuristic_ws(struct list_head *ws)
788{
789 struct heuristic_ws *workspace;
790
791 workspace = list_entry(ws, struct heuristic_ws, list);
792
793 kvfree(workspace->sample);
794 kfree(workspace->bucket);
795 kfree(workspace->bucket_b);
796 kfree(workspace);
797}
798
799static struct list_head *alloc_heuristic_ws(unsigned int level)
800{
801 struct heuristic_ws *ws;
802
803 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
804 if (!ws)
805 return ERR_PTR(-ENOMEM);
806
807 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
808 if (!ws->sample)
809 goto fail;
810
811 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
812 if (!ws->bucket)
813 goto fail;
814
815 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
816 if (!ws->bucket_b)
817 goto fail;
818
819 INIT_LIST_HEAD(&ws->list);
820 return &ws->list;
821fail:
822 free_heuristic_ws(&ws->list);
823 return ERR_PTR(-ENOMEM);
824}
825
826const struct btrfs_compress_op btrfs_heuristic_compress = {
827 .init_workspace_manager = heuristic_init_workspace_manager,
828 .cleanup_workspace_manager = heuristic_cleanup_workspace_manager,
829 .get_workspace = heuristic_get_workspace,
830 .put_workspace = heuristic_put_workspace,
831 .alloc_workspace = alloc_heuristic_ws,
832 .free_workspace = free_heuristic_ws,
833};
834
835static const struct btrfs_compress_op * const btrfs_compress_op[] = {
836 /* The heuristic is represented as compression type 0 */
837 &btrfs_heuristic_compress,
838 &btrfs_zlib_compress,
839 &btrfs_lzo_compress,
840 &btrfs_zstd_compress,
841};
842
843void btrfs_init_workspace_manager(struct workspace_manager *wsm,
844 const struct btrfs_compress_op *ops)
845{
846 struct list_head *workspace;
847
848 wsm->ops = ops;
849
850 INIT_LIST_HEAD(&wsm->idle_ws);
851 spin_lock_init(&wsm->ws_lock);
852 atomic_set(&wsm->total_ws, 0);
853 init_waitqueue_head(&wsm->ws_wait);
854
855 /*
856 * Preallocate one workspace for each compression type so we can
857 * guarantee forward progress in the worst case
858 */
859 workspace = wsm->ops->alloc_workspace(0);
860 if (IS_ERR(workspace)) {
861 pr_warn(
862 "BTRFS: cannot preallocate compression workspace, will try later\n");
863 } else {
864 atomic_set(&wsm->total_ws, 1);
865 wsm->free_ws = 1;
866 list_add(workspace, &wsm->idle_ws);
867 }
868}
869
870void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
871{
872 struct list_head *ws;
873
874 while (!list_empty(&wsman->idle_ws)) {
875 ws = wsman->idle_ws.next;
876 list_del(ws);
877 wsman->ops->free_workspace(ws);
878 atomic_dec(&wsman->total_ws);
879 }
880}
881
882/*
883 * This finds an available workspace or allocates a new one.
884 * If it's not possible to allocate a new one, waits until there's one.
885 * Preallocation makes a forward progress guarantees and we do not return
886 * errors.
887 */
888struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
889 unsigned int level)
890{
891 struct list_head *workspace;
892 int cpus = num_online_cpus();
893 unsigned nofs_flag;
894 struct list_head *idle_ws;
895 spinlock_t *ws_lock;
896 atomic_t *total_ws;
897 wait_queue_head_t *ws_wait;
898 int *free_ws;
899
900 idle_ws = &wsm->idle_ws;
901 ws_lock = &wsm->ws_lock;
902 total_ws = &wsm->total_ws;
903 ws_wait = &wsm->ws_wait;
904 free_ws = &wsm->free_ws;
905
906again:
907 spin_lock(ws_lock);
908 if (!list_empty(idle_ws)) {
909 workspace = idle_ws->next;
910 list_del(workspace);
911 (*free_ws)--;
912 spin_unlock(ws_lock);
913 return workspace;
914
915 }
916 if (atomic_read(total_ws) > cpus) {
917 DEFINE_WAIT(wait);
918
919 spin_unlock(ws_lock);
920 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
921 if (atomic_read(total_ws) > cpus && !*free_ws)
922 schedule();
923 finish_wait(ws_wait, &wait);
924 goto again;
925 }
926 atomic_inc(total_ws);
927 spin_unlock(ws_lock);
928
929 /*
930 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
931 * to turn it off here because we might get called from the restricted
932 * context of btrfs_compress_bio/btrfs_compress_pages
933 */
934 nofs_flag = memalloc_nofs_save();
935 workspace = wsm->ops->alloc_workspace(level);
936 memalloc_nofs_restore(nofs_flag);
937
938 if (IS_ERR(workspace)) {
939 atomic_dec(total_ws);
940 wake_up(ws_wait);
941
942 /*
943 * Do not return the error but go back to waiting. There's a
944 * workspace preallocated for each type and the compression
945 * time is bounded so we get to a workspace eventually. This
946 * makes our caller's life easier.
947 *
948 * To prevent silent and low-probability deadlocks (when the
949 * initial preallocation fails), check if there are any
950 * workspaces at all.
951 */
952 if (atomic_read(total_ws) == 0) {
953 static DEFINE_RATELIMIT_STATE(_rs,
954 /* once per minute */ 60 * HZ,
955 /* no burst */ 1);
956
957 if (__ratelimit(&_rs)) {
958 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
959 }
960 }
961 goto again;
962 }
963 return workspace;
964}
965
966static struct list_head *get_workspace(int type, int level)
967{
968 return btrfs_compress_op[type]->get_workspace(level);
969}
970
971/*
972 * put a workspace struct back on the list or free it if we have enough
973 * idle ones sitting around
974 */
975void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
976{
977 struct list_head *idle_ws;
978 spinlock_t *ws_lock;
979 atomic_t *total_ws;
980 wait_queue_head_t *ws_wait;
981 int *free_ws;
982
983 idle_ws = &wsm->idle_ws;
984 ws_lock = &wsm->ws_lock;
985 total_ws = &wsm->total_ws;
986 ws_wait = &wsm->ws_wait;
987 free_ws = &wsm->free_ws;
988
989 spin_lock(ws_lock);
990 if (*free_ws <= num_online_cpus()) {
991 list_add(ws, idle_ws);
992 (*free_ws)++;
993 spin_unlock(ws_lock);
994 goto wake;
995 }
996 spin_unlock(ws_lock);
997
998 wsm->ops->free_workspace(ws);
999 atomic_dec(total_ws);
1000wake:
1001 cond_wake_up(ws_wait);
1002}
1003
1004static void put_workspace(int type, struct list_head *ws)
1005{
1006 return btrfs_compress_op[type]->put_workspace(ws);
1007}
1008
1009/*
1010 * Given an address space and start and length, compress the bytes into @pages
1011 * that are allocated on demand.
1012 *
1013 * @type_level is encoded algorithm and level, where level 0 means whatever
1014 * default the algorithm chooses and is opaque here;
1015 * - compression algo are 0-3
1016 * - the level are bits 4-7
1017 *
1018 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1019 * and returns number of actually allocated pages
1020 *
1021 * @total_in is used to return the number of bytes actually read. It
1022 * may be smaller than the input length if we had to exit early because we
1023 * ran out of room in the pages array or because we cross the
1024 * max_out threshold.
1025 *
1026 * @total_out is an in/out parameter, must be set to the input length and will
1027 * be also used to return the total number of compressed bytes
1028 *
1029 * @max_out tells us the max number of bytes that we're allowed to
1030 * stuff into pages
1031 */
1032int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1033 u64 start, struct page **pages,
1034 unsigned long *out_pages,
1035 unsigned long *total_in,
1036 unsigned long *total_out)
1037{
1038 int type = btrfs_compress_type(type_level);
1039 int level = btrfs_compress_level(type_level);
1040 struct list_head *workspace;
1041 int ret;
1042
1043 level = btrfs_compress_set_level(type, level);
1044 workspace = get_workspace(type, level);
1045 ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
1046 start, pages,
1047 out_pages,
1048 total_in, total_out);
1049 put_workspace(type, workspace);
1050 return ret;
1051}
1052
1053/*
1054 * pages_in is an array of pages with compressed data.
1055 *
1056 * disk_start is the starting logical offset of this array in the file
1057 *
1058 * orig_bio contains the pages from the file that we want to decompress into
1059 *
1060 * srclen is the number of bytes in pages_in
1061 *
1062 * The basic idea is that we have a bio that was created by readpages.
1063 * The pages in the bio are for the uncompressed data, and they may not
1064 * be contiguous. They all correspond to the range of bytes covered by
1065 * the compressed extent.
1066 */
1067static int btrfs_decompress_bio(struct compressed_bio *cb)
1068{
1069 struct list_head *workspace;
1070 int ret;
1071 int type = cb->compress_type;
1072
1073 workspace = get_workspace(type, 0);
1074 ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
1075 put_workspace(type, workspace);
1076
1077 return ret;
1078}
1079
1080/*
1081 * a less complex decompression routine. Our compressed data fits in a
1082 * single page, and we want to read a single page out of it.
1083 * start_byte tells us the offset into the compressed data we're interested in
1084 */
1085int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1086 unsigned long start_byte, size_t srclen, size_t destlen)
1087{
1088 struct list_head *workspace;
1089 int ret;
1090
1091 workspace = get_workspace(type, 0);
1092 ret = btrfs_compress_op[type]->decompress(workspace, data_in,
1093 dest_page, start_byte,
1094 srclen, destlen);
1095 put_workspace(type, workspace);
1096
1097 return ret;
1098}
1099
1100void __init btrfs_init_compress(void)
1101{
1102 int i;
1103
1104 for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
1105 btrfs_compress_op[i]->init_workspace_manager();
1106}
1107
1108void __cold btrfs_exit_compress(void)
1109{
1110 int i;
1111
1112 for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
1113 btrfs_compress_op[i]->cleanup_workspace_manager();
1114}
1115
1116/*
1117 * Copy uncompressed data from working buffer to pages.
1118 *
1119 * buf_start is the byte offset we're of the start of our workspace buffer.
1120 *
1121 * total_out is the last byte of the buffer
1122 */
1123int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1124 unsigned long total_out, u64 disk_start,
1125 struct bio *bio)
1126{
1127 unsigned long buf_offset;
1128 unsigned long current_buf_start;
1129 unsigned long start_byte;
1130 unsigned long prev_start_byte;
1131 unsigned long working_bytes = total_out - buf_start;
1132 unsigned long bytes;
1133 char *kaddr;
1134 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1135
1136 /*
1137 * start byte is the first byte of the page we're currently
1138 * copying into relative to the start of the compressed data.
1139 */
1140 start_byte = page_offset(bvec.bv_page) - disk_start;
1141
1142 /* we haven't yet hit data corresponding to this page */
1143 if (total_out <= start_byte)
1144 return 1;
1145
1146 /*
1147 * the start of the data we care about is offset into
1148 * the middle of our working buffer
1149 */
1150 if (total_out > start_byte && buf_start < start_byte) {
1151 buf_offset = start_byte - buf_start;
1152 working_bytes -= buf_offset;
1153 } else {
1154 buf_offset = 0;
1155 }
1156 current_buf_start = buf_start;
1157
1158 /* copy bytes from the working buffer into the pages */
1159 while (working_bytes > 0) {
1160 bytes = min_t(unsigned long, bvec.bv_len,
1161 PAGE_SIZE - buf_offset);
1162 bytes = min(bytes, working_bytes);
1163
1164 kaddr = kmap_atomic(bvec.bv_page);
1165 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1166 kunmap_atomic(kaddr);
1167 flush_dcache_page(bvec.bv_page);
1168
1169 buf_offset += bytes;
1170 working_bytes -= bytes;
1171 current_buf_start += bytes;
1172
1173 /* check if we need to pick another page */
1174 bio_advance(bio, bytes);
1175 if (!bio->bi_iter.bi_size)
1176 return 0;
1177 bvec = bio_iter_iovec(bio, bio->bi_iter);
1178 prev_start_byte = start_byte;
1179 start_byte = page_offset(bvec.bv_page) - disk_start;
1180
1181 /*
1182 * We need to make sure we're only adjusting
1183 * our offset into compression working buffer when
1184 * we're switching pages. Otherwise we can incorrectly
1185 * keep copying when we were actually done.
1186 */
1187 if (start_byte != prev_start_byte) {
1188 /*
1189 * make sure our new page is covered by this
1190 * working buffer
1191 */
1192 if (total_out <= start_byte)
1193 return 1;
1194
1195 /*
1196 * the next page in the biovec might not be adjacent
1197 * to the last page, but it might still be found
1198 * inside this working buffer. bump our offset pointer
1199 */
1200 if (total_out > start_byte &&
1201 current_buf_start < start_byte) {
1202 buf_offset = start_byte - buf_start;
1203 working_bytes = total_out - start_byte;
1204 current_buf_start = buf_start + buf_offset;
1205 }
1206 }
1207 }
1208
1209 return 1;
1210}
1211
1212/*
1213 * Shannon Entropy calculation
1214 *
1215 * Pure byte distribution analysis fails to determine compressibility of data.
1216 * Try calculating entropy to estimate the average minimum number of bits
1217 * needed to encode the sampled data.
1218 *
1219 * For convenience, return the percentage of needed bits, instead of amount of
1220 * bits directly.
1221 *
1222 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1223 * and can be compressible with high probability
1224 *
1225 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1226 *
1227 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1228 */
1229#define ENTROPY_LVL_ACEPTABLE (65)
1230#define ENTROPY_LVL_HIGH (80)
1231
1232/*
1233 * For increasead precision in shannon_entropy calculation,
1234 * let's do pow(n, M) to save more digits after comma:
1235 *
1236 * - maximum int bit length is 64
1237 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1238 * - 13 * 4 = 52 < 64 -> M = 4
1239 *
1240 * So use pow(n, 4).
1241 */
1242static inline u32 ilog2_w(u64 n)
1243{
1244 return ilog2(n * n * n * n);
1245}
1246
1247static u32 shannon_entropy(struct heuristic_ws *ws)
1248{
1249 const u32 entropy_max = 8 * ilog2_w(2);
1250 u32 entropy_sum = 0;
1251 u32 p, p_base, sz_base;
1252 u32 i;
1253
1254 sz_base = ilog2_w(ws->sample_size);
1255 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1256 p = ws->bucket[i].count;
1257 p_base = ilog2_w(p);
1258 entropy_sum += p * (sz_base - p_base);
1259 }
1260
1261 entropy_sum /= ws->sample_size;
1262 return entropy_sum * 100 / entropy_max;
1263}
1264
1265#define RADIX_BASE 4U
1266#define COUNTERS_SIZE (1U << RADIX_BASE)
1267
1268static u8 get4bits(u64 num, int shift) {
1269 u8 low4bits;
1270
1271 num >>= shift;
1272 /* Reverse order */
1273 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1274 return low4bits;
1275}
1276
1277/*
1278 * Use 4 bits as radix base
1279 * Use 16 u32 counters for calculating new position in buf array
1280 *
1281 * @array - array that will be sorted
1282 * @array_buf - buffer array to store sorting results
1283 * must be equal in size to @array
1284 * @num - array size
1285 */
1286static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1287 int num)
1288{
1289 u64 max_num;
1290 u64 buf_num;
1291 u32 counters[COUNTERS_SIZE];
1292 u32 new_addr;
1293 u32 addr;
1294 int bitlen;
1295 int shift;
1296 int i;
1297
1298 /*
1299 * Try avoid useless loop iterations for small numbers stored in big
1300 * counters. Example: 48 33 4 ... in 64bit array
1301 */
1302 max_num = array[0].count;
1303 for (i = 1; i < num; i++) {
1304 buf_num = array[i].count;
1305 if (buf_num > max_num)
1306 max_num = buf_num;
1307 }
1308
1309 buf_num = ilog2(max_num);
1310 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1311
1312 shift = 0;
1313 while (shift < bitlen) {
1314 memset(counters, 0, sizeof(counters));
1315
1316 for (i = 0; i < num; i++) {
1317 buf_num = array[i].count;
1318 addr = get4bits(buf_num, shift);
1319 counters[addr]++;
1320 }
1321
1322 for (i = 1; i < COUNTERS_SIZE; i++)
1323 counters[i] += counters[i - 1];
1324
1325 for (i = num - 1; i >= 0; i--) {
1326 buf_num = array[i].count;
1327 addr = get4bits(buf_num, shift);
1328 counters[addr]--;
1329 new_addr = counters[addr];
1330 array_buf[new_addr] = array[i];
1331 }
1332
1333 shift += RADIX_BASE;
1334
1335 /*
1336 * Normal radix expects to move data from a temporary array, to
1337 * the main one. But that requires some CPU time. Avoid that
1338 * by doing another sort iteration to original array instead of
1339 * memcpy()
1340 */
1341 memset(counters, 0, sizeof(counters));
1342
1343 for (i = 0; i < num; i ++) {
1344 buf_num = array_buf[i].count;
1345 addr = get4bits(buf_num, shift);
1346 counters[addr]++;
1347 }
1348
1349 for (i = 1; i < COUNTERS_SIZE; i++)
1350 counters[i] += counters[i - 1];
1351
1352 for (i = num - 1; i >= 0; i--) {
1353 buf_num = array_buf[i].count;
1354 addr = get4bits(buf_num, shift);
1355 counters[addr]--;
1356 new_addr = counters[addr];
1357 array[new_addr] = array_buf[i];
1358 }
1359
1360 shift += RADIX_BASE;
1361 }
1362}
1363
1364/*
1365 * Size of the core byte set - how many bytes cover 90% of the sample
1366 *
1367 * There are several types of structured binary data that use nearly all byte
1368 * values. The distribution can be uniform and counts in all buckets will be
1369 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1370 *
1371 * Other possibility is normal (Gaussian) distribution, where the data could
1372 * be potentially compressible, but we have to take a few more steps to decide
1373 * how much.
1374 *
1375 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1376 * compression algo can easy fix that
1377 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1378 * probability is not compressible
1379 */
1380#define BYTE_CORE_SET_LOW (64)
1381#define BYTE_CORE_SET_HIGH (200)
1382
1383static int byte_core_set_size(struct heuristic_ws *ws)
1384{
1385 u32 i;
1386 u32 coreset_sum = 0;
1387 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1388 struct bucket_item *bucket = ws->bucket;
1389
1390 /* Sort in reverse order */
1391 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1392
1393 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1394 coreset_sum += bucket[i].count;
1395
1396 if (coreset_sum > core_set_threshold)
1397 return i;
1398
1399 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1400 coreset_sum += bucket[i].count;
1401 if (coreset_sum > core_set_threshold)
1402 break;
1403 }
1404
1405 return i;
1406}
1407
1408/*
1409 * Count byte values in buckets.
1410 * This heuristic can detect textual data (configs, xml, json, html, etc).
1411 * Because in most text-like data byte set is restricted to limited number of
1412 * possible characters, and that restriction in most cases makes data easy to
1413 * compress.
1414 *
1415 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1416 * less - compressible
1417 * more - need additional analysis
1418 */
1419#define BYTE_SET_THRESHOLD (64)
1420
1421static u32 byte_set_size(const struct heuristic_ws *ws)
1422{
1423 u32 i;
1424 u32 byte_set_size = 0;
1425
1426 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1427 if (ws->bucket[i].count > 0)
1428 byte_set_size++;
1429 }
1430
1431 /*
1432 * Continue collecting count of byte values in buckets. If the byte
1433 * set size is bigger then the threshold, it's pointless to continue,
1434 * the detection technique would fail for this type of data.
1435 */
1436 for (; i < BUCKET_SIZE; i++) {
1437 if (ws->bucket[i].count > 0) {
1438 byte_set_size++;
1439 if (byte_set_size > BYTE_SET_THRESHOLD)
1440 return byte_set_size;
1441 }
1442 }
1443
1444 return byte_set_size;
1445}
1446
1447static bool sample_repeated_patterns(struct heuristic_ws *ws)
1448{
1449 const u32 half_of_sample = ws->sample_size / 2;
1450 const u8 *data = ws->sample;
1451
1452 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1453}
1454
1455static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1456 struct heuristic_ws *ws)
1457{
1458 struct page *page;
1459 u64 index, index_end;
1460 u32 i, curr_sample_pos;
1461 u8 *in_data;
1462
1463 /*
1464 * Compression handles the input data by chunks of 128KiB
1465 * (defined by BTRFS_MAX_UNCOMPRESSED)
1466 *
1467 * We do the same for the heuristic and loop over the whole range.
1468 *
1469 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1470 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1471 */
1472 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1473 end = start + BTRFS_MAX_UNCOMPRESSED;
1474
1475 index = start >> PAGE_SHIFT;
1476 index_end = end >> PAGE_SHIFT;
1477
1478 /* Don't miss unaligned end */
1479 if (!IS_ALIGNED(end, PAGE_SIZE))
1480 index_end++;
1481
1482 curr_sample_pos = 0;
1483 while (index < index_end) {
1484 page = find_get_page(inode->i_mapping, index);
1485 in_data = kmap(page);
1486 /* Handle case where the start is not aligned to PAGE_SIZE */
1487 i = start % PAGE_SIZE;
1488 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1489 /* Don't sample any garbage from the last page */
1490 if (start > end - SAMPLING_READ_SIZE)
1491 break;
1492 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1493 SAMPLING_READ_SIZE);
1494 i += SAMPLING_INTERVAL;
1495 start += SAMPLING_INTERVAL;
1496 curr_sample_pos += SAMPLING_READ_SIZE;
1497 }
1498 kunmap(page);
1499 put_page(page);
1500
1501 index++;
1502 }
1503
1504 ws->sample_size = curr_sample_pos;
1505}
1506
1507/*
1508 * Compression heuristic.
1509 *
1510 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1511 * quickly (compared to direct compression) detect data characteristics
1512 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1513 * data.
1514 *
1515 * The following types of analysis can be performed:
1516 * - detect mostly zero data
1517 * - detect data with low "byte set" size (text, etc)
1518 * - detect data with low/high "core byte" set
1519 *
1520 * Return non-zero if the compression should be done, 0 otherwise.
1521 */
1522int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1523{
1524 struct list_head *ws_list = get_workspace(0, 0);
1525 struct heuristic_ws *ws;
1526 u32 i;
1527 u8 byte;
1528 int ret = 0;
1529
1530 ws = list_entry(ws_list, struct heuristic_ws, list);
1531
1532 heuristic_collect_sample(inode, start, end, ws);
1533
1534 if (sample_repeated_patterns(ws)) {
1535 ret = 1;
1536 goto out;
1537 }
1538
1539 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1540
1541 for (i = 0; i < ws->sample_size; i++) {
1542 byte = ws->sample[i];
1543 ws->bucket[byte].count++;
1544 }
1545
1546 i = byte_set_size(ws);
1547 if (i < BYTE_SET_THRESHOLD) {
1548 ret = 2;
1549 goto out;
1550 }
1551
1552 i = byte_core_set_size(ws);
1553 if (i <= BYTE_CORE_SET_LOW) {
1554 ret = 3;
1555 goto out;
1556 }
1557
1558 if (i >= BYTE_CORE_SET_HIGH) {
1559 ret = 0;
1560 goto out;
1561 }
1562
1563 i = shannon_entropy(ws);
1564 if (i <= ENTROPY_LVL_ACEPTABLE) {
1565 ret = 4;
1566 goto out;
1567 }
1568
1569 /*
1570 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1571 * needed to give green light to compression.
1572 *
1573 * For now just assume that compression at that level is not worth the
1574 * resources because:
1575 *
1576 * 1. it is possible to defrag the data later
1577 *
1578 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1579 * values, every bucket has counter at level ~54. The heuristic would
1580 * be confused. This can happen when data have some internal repeated
1581 * patterns like "abbacbbc...". This can be detected by analyzing
1582 * pairs of bytes, which is too costly.
1583 */
1584 if (i < ENTROPY_LVL_HIGH) {
1585 ret = 5;
1586 goto out;
1587 } else {
1588 ret = 0;
1589 goto out;
1590 }
1591
1592out:
1593 put_workspace(0, ws_list);
1594 return ret;
1595}
1596
1597/*
1598 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1599 * level, unrecognized string will set the default level
1600 */
1601unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1602{
1603 unsigned int level = 0;
1604 int ret;
1605
1606 if (!type)
1607 return 0;
1608
1609 if (str[0] == ':') {
1610 ret = kstrtouint(str + 1, 10, &level);
1611 if (ret)
1612 level = 0;
1613 }
1614
1615 level = btrfs_compress_set_level(type, level);
1616
1617 return level;
1618}
1619
1620/*
1621 * Adjust @level according to the limits of the compression algorithm or
1622 * fallback to default
1623 */
1624unsigned int btrfs_compress_set_level(int type, unsigned level)
1625{
1626 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1627
1628 if (level == 0)
1629 level = ops->default_level;
1630 else
1631 level = min(level, ops->max_level);
1632
1633 return level;
1634}