Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
   8#include <linux/buffer_head.h>
   9#include <linux/file.h>
  10#include <linux/fs.h>
  11#include <linux/pagemap.h>
  12#include <linux/highmem.h>
  13#include <linux/time.h>
  14#include <linux/init.h>
  15#include <linux/string.h>
  16#include <linux/backing-dev.h>
  17#include <linux/mpage.h>
  18#include <linux/swap.h>
  19#include <linux/writeback.h>
  20#include <linux/bit_spinlock.h>
  21#include <linux/slab.h>
  22#include <linux/sched/mm.h>
  23#include <linux/log2.h>
 
 
  24#include "ctree.h"
  25#include "disk-io.h"
  26#include "transaction.h"
  27#include "btrfs_inode.h"
  28#include "volumes.h"
  29#include "ordered-data.h"
  30#include "compression.h"
  31#include "extent_io.h"
  32#include "extent_map.h"
 
  33
  34static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  35
  36const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  37{
  38	switch (type) {
  39	case BTRFS_COMPRESS_ZLIB:
  40	case BTRFS_COMPRESS_LZO:
  41	case BTRFS_COMPRESS_ZSTD:
  42	case BTRFS_COMPRESS_NONE:
  43		return btrfs_compress_types[type];
 
 
  44	}
  45
  46	return NULL;
  47}
  48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49static int btrfs_decompress_bio(struct compressed_bio *cb);
  50
  51static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
  52				      unsigned long disk_size)
  53{
  54	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  55
  56	return sizeof(struct compressed_bio) +
  57		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
  58}
  59
  60static int check_compressed_csum(struct btrfs_inode *inode,
  61				 struct compressed_bio *cb,
  62				 u64 disk_start)
  63{
  64	int ret;
 
 
 
  65	struct page *page;
  66	unsigned long i;
  67	char *kaddr;
  68	u32 csum;
  69	u32 *cb_sum = &cb->sums;
 
  70
  71	if (inode->flags & BTRFS_INODE_NODATASUM)
  72		return 0;
  73
 
 
  74	for (i = 0; i < cb->nr_pages; i++) {
 
 
  75		page = cb->compressed_pages[i];
  76		csum = ~(u32)0;
  77
  78		kaddr = kmap_atomic(page);
  79		csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
  80		btrfs_csum_final(csum, (u8 *)&csum);
  81		kunmap_atomic(kaddr);
  82
  83		if (csum != *cb_sum) {
  84			btrfs_print_data_csum_error(inode, disk_start, csum,
  85					*cb_sum, cb->mirror_num);
  86			ret = -EIO;
  87			goto fail;
 
 
 
 
 
 
 
 
 
 
 
 
 
  88		}
  89		cb_sum++;
  90
  91	}
  92	ret = 0;
  93fail:
  94	return ret;
  95}
  96
  97/* when we finish reading compressed pages from the disk, we
  98 * decompress them and then run the bio end_io routines on the
  99 * decompressed pages (in the inode address space).
 100 *
 101 * This allows the checksumming and other IO error handling routines
 102 * to work normally
 103 *
 104 * The compressed pages are freed here, and it must be run
 105 * in process context
 106 */
 107static void end_compressed_bio_read(struct bio *bio)
 108{
 109	struct compressed_bio *cb = bio->bi_private;
 110	struct inode *inode;
 111	struct page *page;
 112	unsigned long index;
 113	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
 114	int ret = 0;
 115
 116	if (bio->bi_status)
 117		cb->errors = 1;
 118
 119	/* if there are more bios still pending for this compressed
 120	 * extent, just exit
 121	 */
 122	if (!refcount_dec_and_test(&cb->pending_bios))
 123		goto out;
 124
 125	/*
 126	 * Record the correct mirror_num in cb->orig_bio so that
 127	 * read-repair can work properly.
 128	 */
 129	ASSERT(btrfs_io_bio(cb->orig_bio));
 130	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
 131	cb->mirror_num = mirror;
 132
 133	/*
 134	 * Some IO in this cb have failed, just skip checksum as there
 135	 * is no way it could be correct.
 136	 */
 137	if (cb->errors == 1)
 138		goto csum_failed;
 139
 140	inode = cb->inode;
 141	ret = check_compressed_csum(BTRFS_I(inode), cb,
 142				    (u64)bio->bi_iter.bi_sector << 9);
 143	if (ret)
 144		goto csum_failed;
 145
 146	/* ok, we're the last bio for this extent, lets start
 147	 * the decompression.
 148	 */
 149	ret = btrfs_decompress_bio(cb);
 150
 151csum_failed:
 152	if (ret)
 153		cb->errors = 1;
 154
 155	/* release the compressed pages */
 156	index = 0;
 157	for (index = 0; index < cb->nr_pages; index++) {
 158		page = cb->compressed_pages[index];
 159		page->mapping = NULL;
 160		put_page(page);
 161	}
 162
 163	/* do io completion on the original bio */
 164	if (cb->errors) {
 165		bio_io_error(cb->orig_bio);
 166	} else {
 167		int i;
 168		struct bio_vec *bvec;
 
 169
 170		/*
 171		 * we have verified the checksum already, set page
 172		 * checked so the end_io handlers know about it
 173		 */
 174		ASSERT(!bio_flagged(bio, BIO_CLONED));
 175		bio_for_each_segment_all(bvec, cb->orig_bio, i)
 176			SetPageChecked(bvec->bv_page);
 177
 178		bio_endio(cb->orig_bio);
 179	}
 180
 181	/* finally free the cb struct */
 182	kfree(cb->compressed_pages);
 183	kfree(cb);
 184out:
 185	bio_put(bio);
 186}
 187
 188/*
 189 * Clear the writeback bits on all of the file
 190 * pages for a compressed write
 191 */
 192static noinline void end_compressed_writeback(struct inode *inode,
 193					      const struct compressed_bio *cb)
 194{
 195	unsigned long index = cb->start >> PAGE_SHIFT;
 196	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 197	struct page *pages[16];
 198	unsigned long nr_pages = end_index - index + 1;
 199	int i;
 200	int ret;
 201
 202	if (cb->errors)
 203		mapping_set_error(inode->i_mapping, -EIO);
 204
 205	while (nr_pages > 0) {
 206		ret = find_get_pages_contig(inode->i_mapping, index,
 207				     min_t(unsigned long,
 208				     nr_pages, ARRAY_SIZE(pages)), pages);
 209		if (ret == 0) {
 210			nr_pages -= 1;
 211			index += 1;
 212			continue;
 213		}
 214		for (i = 0; i < ret; i++) {
 215			if (cb->errors)
 216				SetPageError(pages[i]);
 217			end_page_writeback(pages[i]);
 218			put_page(pages[i]);
 219		}
 220		nr_pages -= ret;
 221		index += ret;
 222	}
 223	/* the inode may be gone now */
 224}
 225
 226/*
 227 * do the cleanup once all the compressed pages hit the disk.
 228 * This will clear writeback on the file pages and free the compressed
 229 * pages.
 230 *
 231 * This also calls the writeback end hooks for the file pages so that
 232 * metadata and checksums can be updated in the file.
 233 */
 234static void end_compressed_bio_write(struct bio *bio)
 235{
 236	struct extent_io_tree *tree;
 237	struct compressed_bio *cb = bio->bi_private;
 238	struct inode *inode;
 239	struct page *page;
 240	unsigned long index;
 241
 242	if (bio->bi_status)
 243		cb->errors = 1;
 244
 245	/* if there are more bios still pending for this compressed
 246	 * extent, just exit
 247	 */
 248	if (!refcount_dec_and_test(&cb->pending_bios))
 249		goto out;
 250
 251	/* ok, we're the last bio for this extent, step one is to
 252	 * call back into the FS and do all the end_io operations
 253	 */
 254	inode = cb->inode;
 255	tree = &BTRFS_I(inode)->io_tree;
 256	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
 257	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
 258					 cb->start,
 259					 cb->start + cb->len - 1,
 260					 NULL,
 261					 bio->bi_status ?
 262					 BLK_STS_OK : BLK_STS_NOTSUPP);
 263	cb->compressed_pages[0]->mapping = NULL;
 264
 265	end_compressed_writeback(inode, cb);
 266	/* note, our inode could be gone now */
 267
 268	/*
 269	 * release the compressed pages, these came from alloc_page and
 270	 * are not attached to the inode at all
 271	 */
 272	index = 0;
 273	for (index = 0; index < cb->nr_pages; index++) {
 274		page = cb->compressed_pages[index];
 275		page->mapping = NULL;
 276		put_page(page);
 277	}
 278
 279	/* finally free the cb struct */
 280	kfree(cb->compressed_pages);
 281	kfree(cb);
 282out:
 283	bio_put(bio);
 284}
 285
 286/*
 287 * worker function to build and submit bios for previously compressed pages.
 288 * The corresponding pages in the inode should be marked for writeback
 289 * and the compressed pages should have a reference on them for dropping
 290 * when the IO is complete.
 291 *
 292 * This also checksums the file bytes and gets things ready for
 293 * the end io hooks.
 294 */
 295blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
 296				 unsigned long len, u64 disk_start,
 297				 unsigned long compressed_len,
 298				 struct page **compressed_pages,
 299				 unsigned long nr_pages,
 300				 unsigned int write_flags)
 
 301{
 302	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 303	struct bio *bio = NULL;
 304	struct compressed_bio *cb;
 305	unsigned long bytes_left;
 306	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 307	int pg_index = 0;
 308	struct page *page;
 309	u64 first_byte = disk_start;
 310	struct block_device *bdev;
 311	blk_status_t ret;
 312	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
 
 313
 314	WARN_ON(start & ((u64)PAGE_SIZE - 1));
 315	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 316	if (!cb)
 317		return BLK_STS_RESOURCE;
 318	refcount_set(&cb->pending_bios, 0);
 319	cb->errors = 0;
 320	cb->inode = inode;
 321	cb->start = start;
 322	cb->len = len;
 323	cb->mirror_num = 0;
 324	cb->compressed_pages = compressed_pages;
 325	cb->compressed_len = compressed_len;
 326	cb->orig_bio = NULL;
 327	cb->nr_pages = nr_pages;
 328
 329	bdev = fs_info->fs_devices->latest_bdev;
 330
 331	bio = btrfs_bio_alloc(bdev, first_byte);
 332	bio->bi_opf = REQ_OP_WRITE | write_flags;
 333	bio->bi_private = cb;
 334	bio->bi_end_io = end_compressed_bio_write;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335	refcount_set(&cb->pending_bios, 1);
 336
 337	/* create and submit bios for the compressed pages */
 338	bytes_left = compressed_len;
 339	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 340		int submit = 0;
 
 341
 342		page = compressed_pages[pg_index];
 343		page->mapping = inode->i_mapping;
 344		if (bio->bi_iter.bi_size)
 345			submit = io_tree->ops->merge_bio_hook(page, 0,
 346							   PAGE_SIZE,
 347							   bio, 0);
 
 
 
 
 
 
 
 
 
 
 
 348
 349		page->mapping = NULL;
 350		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
 351		    PAGE_SIZE) {
 352			/*
 353			 * inc the count before we submit the bio so
 354			 * we know the end IO handler won't happen before
 355			 * we inc the count.  Otherwise, the cb might get
 356			 * freed before we're done setting it up
 357			 */
 358			refcount_inc(&cb->pending_bios);
 359			ret = btrfs_bio_wq_end_io(fs_info, bio,
 360						  BTRFS_WQ_ENDIO_DATA);
 361			BUG_ON(ret); /* -ENOMEM */
 362
 363			if (!skip_sum) {
 364				ret = btrfs_csum_one_bio(inode, bio, start, 1);
 365				BUG_ON(ret); /* -ENOMEM */
 366			}
 367
 368			ret = btrfs_map_bio(fs_info, bio, 0, 1);
 369			if (ret) {
 370				bio->bi_status = ret;
 371				bio_endio(bio);
 372			}
 373
 374			bio = btrfs_bio_alloc(bdev, first_byte);
 375			bio->bi_opf = REQ_OP_WRITE | write_flags;
 376			bio->bi_private = cb;
 377			bio->bi_end_io = end_compressed_bio_write;
 
 
 
 
 
 
 378			bio_add_page(bio, page, PAGE_SIZE, 0);
 379		}
 380		if (bytes_left < PAGE_SIZE) {
 381			btrfs_info(fs_info,
 382					"bytes left %lu compress len %lu nr %lu",
 383			       bytes_left, cb->compressed_len, cb->nr_pages);
 384		}
 385		bytes_left -= PAGE_SIZE;
 386		first_byte += PAGE_SIZE;
 387		cond_resched();
 388	}
 389
 390	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 391	BUG_ON(ret); /* -ENOMEM */
 392
 393	if (!skip_sum) {
 394		ret = btrfs_csum_one_bio(inode, bio, start, 1);
 395		BUG_ON(ret); /* -ENOMEM */
 396	}
 397
 398	ret = btrfs_map_bio(fs_info, bio, 0, 1);
 399	if (ret) {
 400		bio->bi_status = ret;
 401		bio_endio(bio);
 402	}
 403
 
 
 
 404	return 0;
 405}
 406
 407static u64 bio_end_offset(struct bio *bio)
 408{
 409	struct bio_vec *last = bio_last_bvec_all(bio);
 410
 411	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
 412}
 413
 414static noinline int add_ra_bio_pages(struct inode *inode,
 415				     u64 compressed_end,
 416				     struct compressed_bio *cb)
 417{
 418	unsigned long end_index;
 419	unsigned long pg_index;
 420	u64 last_offset;
 421	u64 isize = i_size_read(inode);
 422	int ret;
 423	struct page *page;
 424	unsigned long nr_pages = 0;
 425	struct extent_map *em;
 426	struct address_space *mapping = inode->i_mapping;
 427	struct extent_map_tree *em_tree;
 428	struct extent_io_tree *tree;
 429	u64 end;
 430	int misses = 0;
 431
 432	last_offset = bio_end_offset(cb->orig_bio);
 433	em_tree = &BTRFS_I(inode)->extent_tree;
 434	tree = &BTRFS_I(inode)->io_tree;
 435
 436	if (isize == 0)
 437		return 0;
 438
 439	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 440
 441	while (last_offset < compressed_end) {
 442		pg_index = last_offset >> PAGE_SHIFT;
 443
 444		if (pg_index > end_index)
 445			break;
 446
 447		rcu_read_lock();
 448		page = radix_tree_lookup(&mapping->i_pages, pg_index);
 449		rcu_read_unlock();
 450		if (page && !radix_tree_exceptional_entry(page)) {
 451			misses++;
 452			if (misses > 4)
 453				break;
 454			goto next;
 455		}
 456
 457		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
 458								 ~__GFP_FS));
 459		if (!page)
 460			break;
 461
 462		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
 463			put_page(page);
 464			goto next;
 465		}
 466
 467		end = last_offset + PAGE_SIZE - 1;
 468		/*
 469		 * at this point, we have a locked page in the page cache
 470		 * for these bytes in the file.  But, we have to make
 471		 * sure they map to this compressed extent on disk.
 472		 */
 473		set_page_extent_mapped(page);
 
 
 
 
 
 
 
 474		lock_extent(tree, last_offset, end);
 475		read_lock(&em_tree->lock);
 476		em = lookup_extent_mapping(em_tree, last_offset,
 477					   PAGE_SIZE);
 478		read_unlock(&em_tree->lock);
 479
 480		if (!em || last_offset < em->start ||
 481		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
 482		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 483			free_extent_map(em);
 484			unlock_extent(tree, last_offset, end);
 485			unlock_page(page);
 486			put_page(page);
 487			break;
 488		}
 489		free_extent_map(em);
 490
 491		if (page->index == end_index) {
 492			char *userpage;
 493			size_t zero_offset = isize & (PAGE_SIZE - 1);
 494
 495			if (zero_offset) {
 496				int zeros;
 497				zeros = PAGE_SIZE - zero_offset;
 498				userpage = kmap_atomic(page);
 499				memset(userpage + zero_offset, 0, zeros);
 500				flush_dcache_page(page);
 501				kunmap_atomic(userpage);
 502			}
 503		}
 504
 505		ret = bio_add_page(cb->orig_bio, page,
 506				   PAGE_SIZE, 0);
 507
 508		if (ret == PAGE_SIZE) {
 509			nr_pages++;
 510			put_page(page);
 511		} else {
 512			unlock_extent(tree, last_offset, end);
 513			unlock_page(page);
 514			put_page(page);
 515			break;
 516		}
 517next:
 518		last_offset += PAGE_SIZE;
 519	}
 520	return 0;
 521}
 522
 523/*
 524 * for a compressed read, the bio we get passed has all the inode pages
 525 * in it.  We don't actually do IO on those pages but allocate new ones
 526 * to hold the compressed pages on disk.
 527 *
 528 * bio->bi_iter.bi_sector points to the compressed extent on disk
 529 * bio->bi_io_vec points to all of the inode pages
 530 *
 531 * After the compressed pages are read, we copy the bytes into the
 532 * bio we were passed and then call the bio end_io calls
 533 */
 534blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 535				 int mirror_num, unsigned long bio_flags)
 536{
 537	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 538	struct extent_io_tree *tree;
 539	struct extent_map_tree *em_tree;
 540	struct compressed_bio *cb;
 541	unsigned long compressed_len;
 542	unsigned long nr_pages;
 543	unsigned long pg_index;
 544	struct page *page;
 545	struct block_device *bdev;
 546	struct bio *comp_bio;
 547	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
 548	u64 em_len;
 549	u64 em_start;
 550	struct extent_map *em;
 551	blk_status_t ret = BLK_STS_RESOURCE;
 552	int faili = 0;
 553	u32 *sums;
 554
 555	tree = &BTRFS_I(inode)->io_tree;
 556	em_tree = &BTRFS_I(inode)->extent_tree;
 557
 558	/* we need the actual starting offset of this extent in the file */
 559	read_lock(&em_tree->lock);
 560	em = lookup_extent_mapping(em_tree,
 561				   page_offset(bio_first_page_all(bio)),
 562				   PAGE_SIZE);
 563	read_unlock(&em_tree->lock);
 564	if (!em)
 565		return BLK_STS_IOERR;
 566
 567	compressed_len = em->block_len;
 568	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 569	if (!cb)
 570		goto out;
 571
 572	refcount_set(&cb->pending_bios, 0);
 573	cb->errors = 0;
 574	cb->inode = inode;
 575	cb->mirror_num = mirror_num;
 576	sums = &cb->sums;
 577
 578	cb->start = em->orig_start;
 579	em_len = em->len;
 580	em_start = em->start;
 581
 582	free_extent_map(em);
 583	em = NULL;
 584
 585	cb->len = bio->bi_iter.bi_size;
 586	cb->compressed_len = compressed_len;
 587	cb->compress_type = extent_compress_type(bio_flags);
 588	cb->orig_bio = bio;
 589
 590	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 591	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
 592				       GFP_NOFS);
 593	if (!cb->compressed_pages)
 594		goto fail1;
 595
 596	bdev = fs_info->fs_devices->latest_bdev;
 597
 598	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 599		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 600							      __GFP_HIGHMEM);
 601		if (!cb->compressed_pages[pg_index]) {
 602			faili = pg_index - 1;
 603			ret = BLK_STS_RESOURCE;
 604			goto fail2;
 605		}
 606	}
 607	faili = nr_pages - 1;
 608	cb->nr_pages = nr_pages;
 609
 610	add_ra_bio_pages(inode, em_start + em_len, cb);
 611
 612	/* include any pages we added in add_ra-bio_pages */
 613	cb->len = bio->bi_iter.bi_size;
 614
 615	comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
 616	bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
 617	comp_bio->bi_private = cb;
 618	comp_bio->bi_end_io = end_compressed_bio_read;
 619	refcount_set(&cb->pending_bios, 1);
 620
 621	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 
 622		int submit = 0;
 623
 
 
 
 
 
 
 
 
 
 
 
 624		page = cb->compressed_pages[pg_index];
 625		page->mapping = inode->i_mapping;
 626		page->index = em_start >> PAGE_SHIFT;
 627
 628		if (comp_bio->bi_iter.bi_size)
 629			submit = tree->ops->merge_bio_hook(page, 0,
 630							PAGE_SIZE,
 631							comp_bio, 0);
 632
 633		page->mapping = NULL;
 634		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
 635		    PAGE_SIZE) {
 
 636			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
 637						  BTRFS_WQ_ENDIO_DATA);
 638			BUG_ON(ret); /* -ENOMEM */
 639
 640			/*
 641			 * inc the count before we submit the bio so
 642			 * we know the end IO handler won't happen before
 643			 * we inc the count.  Otherwise, the cb might get
 644			 * freed before we're done setting it up
 645			 */
 646			refcount_inc(&cb->pending_bios);
 647
 648			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 649				ret = btrfs_lookup_bio_sums(inode, comp_bio,
 650							    sums);
 651				BUG_ON(ret); /* -ENOMEM */
 652			}
 653			sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
 654					     fs_info->sectorsize);
 655
 656			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 657			if (ret) {
 658				comp_bio->bi_status = ret;
 659				bio_endio(comp_bio);
 660			}
 661
 662			comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
 663			bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
 664			comp_bio->bi_private = cb;
 665			comp_bio->bi_end_io = end_compressed_bio_read;
 666
 667			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
 668		}
 669		cur_disk_byte += PAGE_SIZE;
 670	}
 671
 672	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
 673	BUG_ON(ret); /* -ENOMEM */
 674
 675	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 676		ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 677		BUG_ON(ret); /* -ENOMEM */
 678	}
 679
 680	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 681	if (ret) {
 682		comp_bio->bi_status = ret;
 683		bio_endio(comp_bio);
 684	}
 685
 686	return 0;
 687
 688fail2:
 689	while (faili >= 0) {
 690		__free_page(cb->compressed_pages[faili]);
 691		faili--;
 692	}
 693
 694	kfree(cb->compressed_pages);
 695fail1:
 696	kfree(cb);
 697out:
 698	free_extent_map(em);
 699	return ret;
 700}
 701
 702/*
 703 * Heuristic uses systematic sampling to collect data from the input data
 704 * range, the logic can be tuned by the following constants:
 705 *
 706 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 707 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 708 */
 709#define SAMPLING_READ_SIZE	(16)
 710#define SAMPLING_INTERVAL	(256)
 711
 712/*
 713 * For statistical analysis of the input data we consider bytes that form a
 714 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 715 * many times the object appeared in the sample.
 716 */
 717#define BUCKET_SIZE		(256)
 718
 719/*
 720 * The size of the sample is based on a statistical sampling rule of thumb.
 721 * The common way is to perform sampling tests as long as the number of
 722 * elements in each cell is at least 5.
 723 *
 724 * Instead of 5, we choose 32 to obtain more accurate results.
 725 * If the data contain the maximum number of symbols, which is 256, we obtain a
 726 * sample size bound by 8192.
 727 *
 728 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 729 * from up to 512 locations.
 730 */
 731#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
 732				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 733
 734struct bucket_item {
 735	u32 count;
 736};
 737
 738struct heuristic_ws {
 739	/* Partial copy of input data */
 740	u8 *sample;
 741	u32 sample_size;
 742	/* Buckets store counters for each byte value */
 743	struct bucket_item *bucket;
 744	/* Sorting buffer */
 745	struct bucket_item *bucket_b;
 746	struct list_head list;
 747};
 748
 
 
 749static void free_heuristic_ws(struct list_head *ws)
 750{
 751	struct heuristic_ws *workspace;
 752
 753	workspace = list_entry(ws, struct heuristic_ws, list);
 754
 755	kvfree(workspace->sample);
 756	kfree(workspace->bucket);
 757	kfree(workspace->bucket_b);
 758	kfree(workspace);
 759}
 760
 761static struct list_head *alloc_heuristic_ws(void)
 762{
 763	struct heuristic_ws *ws;
 764
 765	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 766	if (!ws)
 767		return ERR_PTR(-ENOMEM);
 768
 769	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 770	if (!ws->sample)
 771		goto fail;
 772
 773	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 774	if (!ws->bucket)
 775		goto fail;
 776
 777	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 778	if (!ws->bucket_b)
 779		goto fail;
 780
 781	INIT_LIST_HEAD(&ws->list);
 782	return &ws->list;
 783fail:
 784	free_heuristic_ws(&ws->list);
 785	return ERR_PTR(-ENOMEM);
 786}
 787
 788struct workspaces_list {
 789	struct list_head idle_ws;
 790	spinlock_t ws_lock;
 791	/* Number of free workspaces */
 792	int free_ws;
 793	/* Total number of allocated workspaces */
 794	atomic_t total_ws;
 795	/* Waiters for a free workspace */
 796	wait_queue_head_t ws_wait;
 797};
 798
 799static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
 800
 801static struct workspaces_list btrfs_heuristic_ws;
 802
 803static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 
 
 804	&btrfs_zlib_compress,
 805	&btrfs_lzo_compress,
 806	&btrfs_zstd_compress,
 807};
 808
 809void __init btrfs_init_compress(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 810{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 811	struct list_head *workspace;
 812	int i;
 813
 814	INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
 815	spin_lock_init(&btrfs_heuristic_ws.ws_lock);
 816	atomic_set(&btrfs_heuristic_ws.total_ws, 0);
 817	init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
 
 818
 819	workspace = alloc_heuristic_ws();
 
 
 
 
 820	if (IS_ERR(workspace)) {
 821		pr_warn(
 822	"BTRFS: cannot preallocate heuristic workspace, will try later\n");
 823	} else {
 824		atomic_set(&btrfs_heuristic_ws.total_ws, 1);
 825		btrfs_heuristic_ws.free_ws = 1;
 826		list_add(workspace, &btrfs_heuristic_ws.idle_ws);
 827	}
 
 828
 829	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
 830		INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
 831		spin_lock_init(&btrfs_comp_ws[i].ws_lock);
 832		atomic_set(&btrfs_comp_ws[i].total_ws, 0);
 833		init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
 834
 835		/*
 836		 * Preallocate one workspace for each compression type so
 837		 * we can guarantee forward progress in the worst case
 838		 */
 839		workspace = btrfs_compress_op[i]->alloc_workspace();
 840		if (IS_ERR(workspace)) {
 841			pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
 842		} else {
 843			atomic_set(&btrfs_comp_ws[i].total_ws, 1);
 844			btrfs_comp_ws[i].free_ws = 1;
 845			list_add(workspace, &btrfs_comp_ws[i].idle_ws);
 846		}
 847	}
 848}
 849
 850/*
 851 * This finds an available workspace or allocates a new one.
 852 * If it's not possible to allocate a new one, waits until there's one.
 853 * Preallocation makes a forward progress guarantees and we do not return
 854 * errors.
 855 */
 856static struct list_head *__find_workspace(int type, bool heuristic)
 857{
 
 858	struct list_head *workspace;
 859	int cpus = num_online_cpus();
 860	int idx = type - 1;
 861	unsigned nofs_flag;
 862	struct list_head *idle_ws;
 863	spinlock_t *ws_lock;
 864	atomic_t *total_ws;
 865	wait_queue_head_t *ws_wait;
 866	int *free_ws;
 867
 868	if (heuristic) {
 869		idle_ws	 = &btrfs_heuristic_ws.idle_ws;
 870		ws_lock	 = &btrfs_heuristic_ws.ws_lock;
 871		total_ws = &btrfs_heuristic_ws.total_ws;
 872		ws_wait	 = &btrfs_heuristic_ws.ws_wait;
 873		free_ws	 = &btrfs_heuristic_ws.free_ws;
 874	} else {
 875		idle_ws	 = &btrfs_comp_ws[idx].idle_ws;
 876		ws_lock	 = &btrfs_comp_ws[idx].ws_lock;
 877		total_ws = &btrfs_comp_ws[idx].total_ws;
 878		ws_wait	 = &btrfs_comp_ws[idx].ws_wait;
 879		free_ws	 = &btrfs_comp_ws[idx].free_ws;
 880	}
 881
 882again:
 883	spin_lock(ws_lock);
 884	if (!list_empty(idle_ws)) {
 885		workspace = idle_ws->next;
 886		list_del(workspace);
 887		(*free_ws)--;
 888		spin_unlock(ws_lock);
 889		return workspace;
 890
 891	}
 892	if (atomic_read(total_ws) > cpus) {
 893		DEFINE_WAIT(wait);
 894
 895		spin_unlock(ws_lock);
 896		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
 897		if (atomic_read(total_ws) > cpus && !*free_ws)
 898			schedule();
 899		finish_wait(ws_wait, &wait);
 900		goto again;
 901	}
 902	atomic_inc(total_ws);
 903	spin_unlock(ws_lock);
 904
 905	/*
 906	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
 907	 * to turn it off here because we might get called from the restricted
 908	 * context of btrfs_compress_bio/btrfs_compress_pages
 909	 */
 910	nofs_flag = memalloc_nofs_save();
 911	if (heuristic)
 912		workspace = alloc_heuristic_ws();
 913	else
 914		workspace = btrfs_compress_op[idx]->alloc_workspace();
 915	memalloc_nofs_restore(nofs_flag);
 916
 917	if (IS_ERR(workspace)) {
 918		atomic_dec(total_ws);
 919		wake_up(ws_wait);
 920
 921		/*
 922		 * Do not return the error but go back to waiting. There's a
 923		 * workspace preallocated for each type and the compression
 924		 * time is bounded so we get to a workspace eventually. This
 925		 * makes our caller's life easier.
 926		 *
 927		 * To prevent silent and low-probability deadlocks (when the
 928		 * initial preallocation fails), check if there are any
 929		 * workspaces at all.
 930		 */
 931		if (atomic_read(total_ws) == 0) {
 932			static DEFINE_RATELIMIT_STATE(_rs,
 933					/* once per minute */ 60 * HZ,
 934					/* no burst */ 1);
 935
 936			if (__ratelimit(&_rs)) {
 937				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
 938			}
 939		}
 940		goto again;
 941	}
 942	return workspace;
 943}
 944
 945static struct list_head *find_workspace(int type)
 946{
 947	return __find_workspace(type, false);
 
 
 
 
 
 
 
 
 
 
 
 948}
 949
 950/*
 951 * put a workspace struct back on the list or free it if we have enough
 952 * idle ones sitting around
 953 */
 954static void __free_workspace(int type, struct list_head *workspace,
 955			     bool heuristic)
 956{
 957	int idx = type - 1;
 958	struct list_head *idle_ws;
 959	spinlock_t *ws_lock;
 960	atomic_t *total_ws;
 961	wait_queue_head_t *ws_wait;
 962	int *free_ws;
 963
 964	if (heuristic) {
 965		idle_ws	 = &btrfs_heuristic_ws.idle_ws;
 966		ws_lock	 = &btrfs_heuristic_ws.ws_lock;
 967		total_ws = &btrfs_heuristic_ws.total_ws;
 968		ws_wait	 = &btrfs_heuristic_ws.ws_wait;
 969		free_ws	 = &btrfs_heuristic_ws.free_ws;
 970	} else {
 971		idle_ws	 = &btrfs_comp_ws[idx].idle_ws;
 972		ws_lock	 = &btrfs_comp_ws[idx].ws_lock;
 973		total_ws = &btrfs_comp_ws[idx].total_ws;
 974		ws_wait	 = &btrfs_comp_ws[idx].ws_wait;
 975		free_ws	 = &btrfs_comp_ws[idx].free_ws;
 976	}
 977
 978	spin_lock(ws_lock);
 979	if (*free_ws <= num_online_cpus()) {
 980		list_add(workspace, idle_ws);
 981		(*free_ws)++;
 982		spin_unlock(ws_lock);
 983		goto wake;
 984	}
 985	spin_unlock(ws_lock);
 986
 987	if (heuristic)
 988		free_heuristic_ws(workspace);
 989	else
 990		btrfs_compress_op[idx]->free_workspace(workspace);
 991	atomic_dec(total_ws);
 992wake:
 993	/*
 994	 * Make sure counter is updated before we wake up waiters.
 995	 */
 996	smp_mb();
 997	if (waitqueue_active(ws_wait))
 998		wake_up(ws_wait);
 999}
1000
1001static void free_workspace(int type, struct list_head *ws)
1002{
1003	return __free_workspace(type, ws, false);
 
 
 
 
 
 
 
 
 
 
 
1004}
1005
1006/*
1007 * cleanup function for module exit
 
1008 */
1009static void free_workspaces(void)
1010{
1011	struct list_head *workspace;
1012	int i;
1013
1014	while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
1015		workspace = btrfs_heuristic_ws.idle_ws.next;
1016		list_del(workspace);
1017		free_heuristic_ws(workspace);
1018		atomic_dec(&btrfs_heuristic_ws.total_ws);
1019	}
1020
1021	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
1022		while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
1023			workspace = btrfs_comp_ws[i].idle_ws.next;
1024			list_del(workspace);
1025			btrfs_compress_op[i]->free_workspace(workspace);
1026			atomic_dec(&btrfs_comp_ws[i].total_ws);
1027		}
1028	}
1029}
1030
1031/*
1032 * Given an address space and start and length, compress the bytes into @pages
1033 * that are allocated on demand.
1034 *
1035 * @type_level is encoded algorithm and level, where level 0 means whatever
1036 * default the algorithm chooses and is opaque here;
1037 * - compression algo are 0-3
1038 * - the level are bits 4-7
1039 *
1040 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1041 * and returns number of actually allocated pages
1042 *
1043 * @total_in is used to return the number of bytes actually read.  It
1044 * may be smaller than the input length if we had to exit early because we
1045 * ran out of room in the pages array or because we cross the
1046 * max_out threshold.
1047 *
1048 * @total_out is an in/out parameter, must be set to the input length and will
1049 * be also used to return the total number of compressed bytes
1050 *
1051 * @max_out tells us the max number of bytes that we're allowed to
1052 * stuff into pages
1053 */
1054int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1055			 u64 start, struct page **pages,
1056			 unsigned long *out_pages,
1057			 unsigned long *total_in,
1058			 unsigned long *total_out)
1059{
 
 
1060	struct list_head *workspace;
1061	int ret;
1062	int type = type_level & 0xF;
1063
1064	workspace = find_workspace(type);
1065
1066	btrfs_compress_op[type - 1]->set_level(workspace, type_level);
1067	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
1068						      start, pages,
1069						      out_pages,
1070						      total_in, total_out);
1071	free_workspace(type, workspace);
1072	return ret;
1073}
1074
1075/*
1076 * pages_in is an array of pages with compressed data.
1077 *
1078 * disk_start is the starting logical offset of this array in the file
1079 *
1080 * orig_bio contains the pages from the file that we want to decompress into
1081 *
1082 * srclen is the number of bytes in pages_in
1083 *
1084 * The basic idea is that we have a bio that was created by readpages.
1085 * The pages in the bio are for the uncompressed data, and they may not
1086 * be contiguous.  They all correspond to the range of bytes covered by
1087 * the compressed extent.
1088 */
1089static int btrfs_decompress_bio(struct compressed_bio *cb)
1090{
1091	struct list_head *workspace;
1092	int ret;
1093	int type = cb->compress_type;
1094
1095	workspace = find_workspace(type);
1096	ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
1097	free_workspace(type, workspace);
1098
1099	return ret;
1100}
1101
1102/*
1103 * a less complex decompression routine.  Our compressed data fits in a
1104 * single page, and we want to read a single page out of it.
1105 * start_byte tells us the offset into the compressed data we're interested in
1106 */
1107int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1108		     unsigned long start_byte, size_t srclen, size_t destlen)
1109{
1110	struct list_head *workspace;
1111	int ret;
1112
1113	workspace = find_workspace(type);
 
 
 
1114
1115	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
1116						  dest_page, start_byte,
1117						  srclen, destlen);
1118
1119	free_workspace(type, workspace);
1120	return ret;
1121}
1122
 
 
 
 
 
 
 
 
1123void __cold btrfs_exit_compress(void)
1124{
1125	free_workspaces();
 
 
 
1126}
1127
1128/*
1129 * Copy uncompressed data from working buffer to pages.
1130 *
1131 * buf_start is the byte offset we're of the start of our workspace buffer.
1132 *
1133 * total_out is the last byte of the buffer
1134 */
1135int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1136			      unsigned long total_out, u64 disk_start,
1137			      struct bio *bio)
1138{
1139	unsigned long buf_offset;
1140	unsigned long current_buf_start;
1141	unsigned long start_byte;
1142	unsigned long prev_start_byte;
1143	unsigned long working_bytes = total_out - buf_start;
1144	unsigned long bytes;
1145	char *kaddr;
1146	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1147
1148	/*
1149	 * start byte is the first byte of the page we're currently
1150	 * copying into relative to the start of the compressed data.
1151	 */
1152	start_byte = page_offset(bvec.bv_page) - disk_start;
1153
1154	/* we haven't yet hit data corresponding to this page */
1155	if (total_out <= start_byte)
1156		return 1;
1157
1158	/*
1159	 * the start of the data we care about is offset into
1160	 * the middle of our working buffer
1161	 */
1162	if (total_out > start_byte && buf_start < start_byte) {
1163		buf_offset = start_byte - buf_start;
1164		working_bytes -= buf_offset;
1165	} else {
1166		buf_offset = 0;
1167	}
1168	current_buf_start = buf_start;
1169
1170	/* copy bytes from the working buffer into the pages */
1171	while (working_bytes > 0) {
1172		bytes = min_t(unsigned long, bvec.bv_len,
1173				PAGE_SIZE - buf_offset);
1174		bytes = min(bytes, working_bytes);
1175
1176		kaddr = kmap_atomic(bvec.bv_page);
1177		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1178		kunmap_atomic(kaddr);
1179		flush_dcache_page(bvec.bv_page);
1180
1181		buf_offset += bytes;
1182		working_bytes -= bytes;
1183		current_buf_start += bytes;
1184
1185		/* check if we need to pick another page */
1186		bio_advance(bio, bytes);
1187		if (!bio->bi_iter.bi_size)
1188			return 0;
1189		bvec = bio_iter_iovec(bio, bio->bi_iter);
1190		prev_start_byte = start_byte;
1191		start_byte = page_offset(bvec.bv_page) - disk_start;
1192
1193		/*
1194		 * We need to make sure we're only adjusting
1195		 * our offset into compression working buffer when
1196		 * we're switching pages.  Otherwise we can incorrectly
1197		 * keep copying when we were actually done.
1198		 */
1199		if (start_byte != prev_start_byte) {
1200			/*
1201			 * make sure our new page is covered by this
1202			 * working buffer
1203			 */
1204			if (total_out <= start_byte)
1205				return 1;
1206
1207			/*
1208			 * the next page in the biovec might not be adjacent
1209			 * to the last page, but it might still be found
1210			 * inside this working buffer. bump our offset pointer
1211			 */
1212			if (total_out > start_byte &&
1213			    current_buf_start < start_byte) {
1214				buf_offset = start_byte - buf_start;
1215				working_bytes = total_out - start_byte;
1216				current_buf_start = buf_start + buf_offset;
1217			}
1218		}
1219	}
1220
1221	return 1;
1222}
1223
1224/*
1225 * Shannon Entropy calculation
1226 *
1227 * Pure byte distribution analysis fails to determine compressiability of data.
1228 * Try calculating entropy to estimate the average minimum number of bits
1229 * needed to encode the sampled data.
1230 *
1231 * For convenience, return the percentage of needed bits, instead of amount of
1232 * bits directly.
1233 *
1234 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1235 *			    and can be compressible with high probability
1236 *
1237 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1238 *
1239 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1240 */
1241#define ENTROPY_LVL_ACEPTABLE		(65)
1242#define ENTROPY_LVL_HIGH		(80)
1243
1244/*
1245 * For increasead precision in shannon_entropy calculation,
1246 * let's do pow(n, M) to save more digits after comma:
1247 *
1248 * - maximum int bit length is 64
1249 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1250 * - 13 * 4 = 52 < 64		-> M = 4
1251 *
1252 * So use pow(n, 4).
1253 */
1254static inline u32 ilog2_w(u64 n)
1255{
1256	return ilog2(n * n * n * n);
1257}
1258
1259static u32 shannon_entropy(struct heuristic_ws *ws)
1260{
1261	const u32 entropy_max = 8 * ilog2_w(2);
1262	u32 entropy_sum = 0;
1263	u32 p, p_base, sz_base;
1264	u32 i;
1265
1266	sz_base = ilog2_w(ws->sample_size);
1267	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1268		p = ws->bucket[i].count;
1269		p_base = ilog2_w(p);
1270		entropy_sum += p * (sz_base - p_base);
1271	}
1272
1273	entropy_sum /= ws->sample_size;
1274	return entropy_sum * 100 / entropy_max;
1275}
1276
1277#define RADIX_BASE		4U
1278#define COUNTERS_SIZE		(1U << RADIX_BASE)
1279
1280static u8 get4bits(u64 num, int shift) {
1281	u8 low4bits;
1282
1283	num >>= shift;
1284	/* Reverse order */
1285	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1286	return low4bits;
1287}
1288
1289/*
1290 * Use 4 bits as radix base
1291 * Use 16 u32 counters for calculating new possition in buf array
1292 *
1293 * @array     - array that will be sorted
1294 * @array_buf - buffer array to store sorting results
1295 *              must be equal in size to @array
1296 * @num       - array size
1297 */
1298static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1299		       int num)
1300{
1301	u64 max_num;
1302	u64 buf_num;
1303	u32 counters[COUNTERS_SIZE];
1304	u32 new_addr;
1305	u32 addr;
1306	int bitlen;
1307	int shift;
1308	int i;
1309
1310	/*
1311	 * Try avoid useless loop iterations for small numbers stored in big
1312	 * counters.  Example: 48 33 4 ... in 64bit array
1313	 */
1314	max_num = array[0].count;
1315	for (i = 1; i < num; i++) {
1316		buf_num = array[i].count;
1317		if (buf_num > max_num)
1318			max_num = buf_num;
1319	}
1320
1321	buf_num = ilog2(max_num);
1322	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1323
1324	shift = 0;
1325	while (shift < bitlen) {
1326		memset(counters, 0, sizeof(counters));
1327
1328		for (i = 0; i < num; i++) {
1329			buf_num = array[i].count;
1330			addr = get4bits(buf_num, shift);
1331			counters[addr]++;
1332		}
1333
1334		for (i = 1; i < COUNTERS_SIZE; i++)
1335			counters[i] += counters[i - 1];
1336
1337		for (i = num - 1; i >= 0; i--) {
1338			buf_num = array[i].count;
1339			addr = get4bits(buf_num, shift);
1340			counters[addr]--;
1341			new_addr = counters[addr];
1342			array_buf[new_addr] = array[i];
1343		}
1344
1345		shift += RADIX_BASE;
1346
1347		/*
1348		 * Normal radix expects to move data from a temporary array, to
1349		 * the main one.  But that requires some CPU time. Avoid that
1350		 * by doing another sort iteration to original array instead of
1351		 * memcpy()
1352		 */
1353		memset(counters, 0, sizeof(counters));
1354
1355		for (i = 0; i < num; i ++) {
1356			buf_num = array_buf[i].count;
1357			addr = get4bits(buf_num, shift);
1358			counters[addr]++;
1359		}
1360
1361		for (i = 1; i < COUNTERS_SIZE; i++)
1362			counters[i] += counters[i - 1];
1363
1364		for (i = num - 1; i >= 0; i--) {
1365			buf_num = array_buf[i].count;
1366			addr = get4bits(buf_num, shift);
1367			counters[addr]--;
1368			new_addr = counters[addr];
1369			array[new_addr] = array_buf[i];
1370		}
1371
1372		shift += RADIX_BASE;
1373	}
1374}
1375
1376/*
1377 * Size of the core byte set - how many bytes cover 90% of the sample
1378 *
1379 * There are several types of structured binary data that use nearly all byte
1380 * values. The distribution can be uniform and counts in all buckets will be
1381 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1382 *
1383 * Other possibility is normal (Gaussian) distribution, where the data could
1384 * be potentially compressible, but we have to take a few more steps to decide
1385 * how much.
1386 *
1387 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1388 *                       compression algo can easy fix that
1389 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1390 *                       probability is not compressible
1391 */
1392#define BYTE_CORE_SET_LOW		(64)
1393#define BYTE_CORE_SET_HIGH		(200)
1394
1395static int byte_core_set_size(struct heuristic_ws *ws)
1396{
1397	u32 i;
1398	u32 coreset_sum = 0;
1399	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1400	struct bucket_item *bucket = ws->bucket;
1401
1402	/* Sort in reverse order */
1403	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1404
1405	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1406		coreset_sum += bucket[i].count;
1407
1408	if (coreset_sum > core_set_threshold)
1409		return i;
1410
1411	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1412		coreset_sum += bucket[i].count;
1413		if (coreset_sum > core_set_threshold)
1414			break;
1415	}
1416
1417	return i;
1418}
1419
1420/*
1421 * Count byte values in buckets.
1422 * This heuristic can detect textual data (configs, xml, json, html, etc).
1423 * Because in most text-like data byte set is restricted to limited number of
1424 * possible characters, and that restriction in most cases makes data easy to
1425 * compress.
1426 *
1427 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1428 *	less - compressible
1429 *	more - need additional analysis
1430 */
1431#define BYTE_SET_THRESHOLD		(64)
1432
1433static u32 byte_set_size(const struct heuristic_ws *ws)
1434{
1435	u32 i;
1436	u32 byte_set_size = 0;
1437
1438	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1439		if (ws->bucket[i].count > 0)
1440			byte_set_size++;
1441	}
1442
1443	/*
1444	 * Continue collecting count of byte values in buckets.  If the byte
1445	 * set size is bigger then the threshold, it's pointless to continue,
1446	 * the detection technique would fail for this type of data.
1447	 */
1448	for (; i < BUCKET_SIZE; i++) {
1449		if (ws->bucket[i].count > 0) {
1450			byte_set_size++;
1451			if (byte_set_size > BYTE_SET_THRESHOLD)
1452				return byte_set_size;
1453		}
1454	}
1455
1456	return byte_set_size;
1457}
1458
1459static bool sample_repeated_patterns(struct heuristic_ws *ws)
1460{
1461	const u32 half_of_sample = ws->sample_size / 2;
1462	const u8 *data = ws->sample;
1463
1464	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1465}
1466
1467static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1468				     struct heuristic_ws *ws)
1469{
1470	struct page *page;
1471	u64 index, index_end;
1472	u32 i, curr_sample_pos;
1473	u8 *in_data;
1474
1475	/*
1476	 * Compression handles the input data by chunks of 128KiB
1477	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1478	 *
1479	 * We do the same for the heuristic and loop over the whole range.
1480	 *
1481	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1482	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1483	 */
1484	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1485		end = start + BTRFS_MAX_UNCOMPRESSED;
1486
1487	index = start >> PAGE_SHIFT;
1488	index_end = end >> PAGE_SHIFT;
1489
1490	/* Don't miss unaligned end */
1491	if (!IS_ALIGNED(end, PAGE_SIZE))
1492		index_end++;
1493
1494	curr_sample_pos = 0;
1495	while (index < index_end) {
1496		page = find_get_page(inode->i_mapping, index);
1497		in_data = kmap(page);
1498		/* Handle case where the start is not aligned to PAGE_SIZE */
1499		i = start % PAGE_SIZE;
1500		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1501			/* Don't sample any garbage from the last page */
1502			if (start > end - SAMPLING_READ_SIZE)
1503				break;
1504			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1505					SAMPLING_READ_SIZE);
1506			i += SAMPLING_INTERVAL;
1507			start += SAMPLING_INTERVAL;
1508			curr_sample_pos += SAMPLING_READ_SIZE;
1509		}
1510		kunmap(page);
1511		put_page(page);
1512
1513		index++;
1514	}
1515
1516	ws->sample_size = curr_sample_pos;
1517}
1518
1519/*
1520 * Compression heuristic.
1521 *
1522 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1523 * quickly (compared to direct compression) detect data characteristics
1524 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1525 * data.
1526 *
1527 * The following types of analysis can be performed:
1528 * - detect mostly zero data
1529 * - detect data with low "byte set" size (text, etc)
1530 * - detect data with low/high "core byte" set
1531 *
1532 * Return non-zero if the compression should be done, 0 otherwise.
1533 */
1534int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1535{
1536	struct list_head *ws_list = __find_workspace(0, true);
1537	struct heuristic_ws *ws;
1538	u32 i;
1539	u8 byte;
1540	int ret = 0;
1541
1542	ws = list_entry(ws_list, struct heuristic_ws, list);
1543
1544	heuristic_collect_sample(inode, start, end, ws);
1545
1546	if (sample_repeated_patterns(ws)) {
1547		ret = 1;
1548		goto out;
1549	}
1550
1551	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1552
1553	for (i = 0; i < ws->sample_size; i++) {
1554		byte = ws->sample[i];
1555		ws->bucket[byte].count++;
1556	}
1557
1558	i = byte_set_size(ws);
1559	if (i < BYTE_SET_THRESHOLD) {
1560		ret = 2;
1561		goto out;
1562	}
1563
1564	i = byte_core_set_size(ws);
1565	if (i <= BYTE_CORE_SET_LOW) {
1566		ret = 3;
1567		goto out;
1568	}
1569
1570	if (i >= BYTE_CORE_SET_HIGH) {
1571		ret = 0;
1572		goto out;
1573	}
1574
1575	i = shannon_entropy(ws);
1576	if (i <= ENTROPY_LVL_ACEPTABLE) {
1577		ret = 4;
1578		goto out;
1579	}
1580
1581	/*
1582	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1583	 * needed to give green light to compression.
1584	 *
1585	 * For now just assume that compression at that level is not worth the
1586	 * resources because:
1587	 *
1588	 * 1. it is possible to defrag the data later
1589	 *
1590	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1591	 * values, every bucket has counter at level ~54. The heuristic would
1592	 * be confused. This can happen when data have some internal repeated
1593	 * patterns like "abbacbbc...". This can be detected by analyzing
1594	 * pairs of bytes, which is too costly.
1595	 */
1596	if (i < ENTROPY_LVL_HIGH) {
1597		ret = 5;
1598		goto out;
1599	} else {
1600		ret = 0;
1601		goto out;
1602	}
1603
1604out:
1605	__free_workspace(0, ws_list, true);
1606	return ret;
1607}
1608
1609unsigned int btrfs_compress_str2level(const char *str)
 
 
 
 
1610{
1611	if (strncmp(str, "zlib", 4) != 0)
 
 
 
1612		return 0;
1613
1614	/* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1615	if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
1616		return str[5] - '0';
 
 
 
 
1617
1618	return BTRFS_ZLIB_DEFAULT_LEVEL;
1619}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
 
   8#include <linux/file.h>
   9#include <linux/fs.h>
  10#include <linux/pagemap.h>
  11#include <linux/highmem.h>
  12#include <linux/time.h>
  13#include <linux/init.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
 
 
  16#include <linux/writeback.h>
 
  17#include <linux/slab.h>
  18#include <linux/sched/mm.h>
  19#include <linux/log2.h>
  20#include <crypto/hash.h>
  21#include "misc.h"
  22#include "ctree.h"
  23#include "disk-io.h"
  24#include "transaction.h"
  25#include "btrfs_inode.h"
  26#include "volumes.h"
  27#include "ordered-data.h"
  28#include "compression.h"
  29#include "extent_io.h"
  30#include "extent_map.h"
  31#include "zoned.h"
  32
  33static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  34
  35const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  36{
  37	switch (type) {
  38	case BTRFS_COMPRESS_ZLIB:
  39	case BTRFS_COMPRESS_LZO:
  40	case BTRFS_COMPRESS_ZSTD:
  41	case BTRFS_COMPRESS_NONE:
  42		return btrfs_compress_types[type];
  43	default:
  44		break;
  45	}
  46
  47	return NULL;
  48}
  49
  50bool btrfs_compress_is_valid_type(const char *str, size_t len)
  51{
  52	int i;
  53
  54	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
  55		size_t comp_len = strlen(btrfs_compress_types[i]);
  56
  57		if (len < comp_len)
  58			continue;
  59
  60		if (!strncmp(btrfs_compress_types[i], str, comp_len))
  61			return true;
  62	}
  63	return false;
  64}
  65
  66static int compression_compress_pages(int type, struct list_head *ws,
  67               struct address_space *mapping, u64 start, struct page **pages,
  68               unsigned long *out_pages, unsigned long *total_in,
  69               unsigned long *total_out)
  70{
  71	switch (type) {
  72	case BTRFS_COMPRESS_ZLIB:
  73		return zlib_compress_pages(ws, mapping, start, pages,
  74				out_pages, total_in, total_out);
  75	case BTRFS_COMPRESS_LZO:
  76		return lzo_compress_pages(ws, mapping, start, pages,
  77				out_pages, total_in, total_out);
  78	case BTRFS_COMPRESS_ZSTD:
  79		return zstd_compress_pages(ws, mapping, start, pages,
  80				out_pages, total_in, total_out);
  81	case BTRFS_COMPRESS_NONE:
  82	default:
  83		/*
  84		 * This can happen when compression races with remount setting
  85		 * it to 'no compress', while caller doesn't call
  86		 * inode_need_compress() to check if we really need to
  87		 * compress.
  88		 *
  89		 * Not a big deal, just need to inform caller that we
  90		 * haven't allocated any pages yet.
  91		 */
  92		*out_pages = 0;
  93		return -E2BIG;
  94	}
  95}
  96
  97static int compression_decompress_bio(int type, struct list_head *ws,
  98		struct compressed_bio *cb)
  99{
 100	switch (type) {
 101	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
 102	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
 103	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
 104	case BTRFS_COMPRESS_NONE:
 105	default:
 106		/*
 107		 * This can't happen, the type is validated several times
 108		 * before we get here.
 109		 */
 110		BUG();
 111	}
 112}
 113
 114static int compression_decompress(int type, struct list_head *ws,
 115               unsigned char *data_in, struct page *dest_page,
 116               unsigned long start_byte, size_t srclen, size_t destlen)
 117{
 118	switch (type) {
 119	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
 120						start_byte, srclen, destlen);
 121	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
 122						start_byte, srclen, destlen);
 123	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
 124						start_byte, srclen, destlen);
 125	case BTRFS_COMPRESS_NONE:
 126	default:
 127		/*
 128		 * This can't happen, the type is validated several times
 129		 * before we get here.
 130		 */
 131		BUG();
 132	}
 133}
 134
 135static int btrfs_decompress_bio(struct compressed_bio *cb);
 136
 137static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
 138				      unsigned long disk_size)
 139{
 
 
 140	return sizeof(struct compressed_bio) +
 141		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
 142}
 143
 144static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
 
 145				 u64 disk_start)
 146{
 147	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 148	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 149	const u32 csum_size = fs_info->csum_size;
 150	const u32 sectorsize = fs_info->sectorsize;
 151	struct page *page;
 152	unsigned int i;
 153	char *kaddr;
 154	u8 csum[BTRFS_CSUM_SIZE];
 155	struct compressed_bio *cb = bio->bi_private;
 156	u8 *cb_sum = cb->sums;
 157
 158	if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
 159		return 0;
 160
 161	shash->tfm = fs_info->csum_shash;
 162
 163	for (i = 0; i < cb->nr_pages; i++) {
 164		u32 pg_offset;
 165		u32 bytes_left = PAGE_SIZE;
 166		page = cb->compressed_pages[i];
 
 167
 168		/* Determine the remaining bytes inside the page first */
 169		if (i == cb->nr_pages - 1)
 170			bytes_left = cb->compressed_len - i * PAGE_SIZE;
 171
 172		/* Hash through the page sector by sector */
 173		for (pg_offset = 0; pg_offset < bytes_left;
 174		     pg_offset += sectorsize) {
 175			kaddr = kmap_atomic(page);
 176			crypto_shash_digest(shash, kaddr + pg_offset,
 177					    sectorsize, csum);
 178			kunmap_atomic(kaddr);
 179
 180			if (memcmp(&csum, cb_sum, csum_size) != 0) {
 181				btrfs_print_data_csum_error(inode, disk_start,
 182						csum, cb_sum, cb->mirror_num);
 183				if (btrfs_io_bio(bio)->device)
 184					btrfs_dev_stat_inc_and_print(
 185						btrfs_io_bio(bio)->device,
 186						BTRFS_DEV_STAT_CORRUPTION_ERRS);
 187				return -EIO;
 188			}
 189			cb_sum += csum_size;
 190			disk_start += sectorsize;
 191		}
 
 
 192	}
 193	return 0;
 
 
 194}
 195
 196/* when we finish reading compressed pages from the disk, we
 197 * decompress them and then run the bio end_io routines on the
 198 * decompressed pages (in the inode address space).
 199 *
 200 * This allows the checksumming and other IO error handling routines
 201 * to work normally
 202 *
 203 * The compressed pages are freed here, and it must be run
 204 * in process context
 205 */
 206static void end_compressed_bio_read(struct bio *bio)
 207{
 208	struct compressed_bio *cb = bio->bi_private;
 209	struct inode *inode;
 210	struct page *page;
 211	unsigned int index;
 212	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
 213	int ret = 0;
 214
 215	if (bio->bi_status)
 216		cb->errors = 1;
 217
 218	/* if there are more bios still pending for this compressed
 219	 * extent, just exit
 220	 */
 221	if (!refcount_dec_and_test(&cb->pending_bios))
 222		goto out;
 223
 224	/*
 225	 * Record the correct mirror_num in cb->orig_bio so that
 226	 * read-repair can work properly.
 227	 */
 
 228	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
 229	cb->mirror_num = mirror;
 230
 231	/*
 232	 * Some IO in this cb have failed, just skip checksum as there
 233	 * is no way it could be correct.
 234	 */
 235	if (cb->errors == 1)
 236		goto csum_failed;
 237
 238	inode = cb->inode;
 239	ret = check_compressed_csum(BTRFS_I(inode), bio,
 240				    bio->bi_iter.bi_sector << 9);
 241	if (ret)
 242		goto csum_failed;
 243
 244	/* ok, we're the last bio for this extent, lets start
 245	 * the decompression.
 246	 */
 247	ret = btrfs_decompress_bio(cb);
 248
 249csum_failed:
 250	if (ret)
 251		cb->errors = 1;
 252
 253	/* release the compressed pages */
 254	index = 0;
 255	for (index = 0; index < cb->nr_pages; index++) {
 256		page = cb->compressed_pages[index];
 257		page->mapping = NULL;
 258		put_page(page);
 259	}
 260
 261	/* do io completion on the original bio */
 262	if (cb->errors) {
 263		bio_io_error(cb->orig_bio);
 264	} else {
 
 265		struct bio_vec *bvec;
 266		struct bvec_iter_all iter_all;
 267
 268		/*
 269		 * we have verified the checksum already, set page
 270		 * checked so the end_io handlers know about it
 271		 */
 272		ASSERT(!bio_flagged(bio, BIO_CLONED));
 273		bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
 274			SetPageChecked(bvec->bv_page);
 275
 276		bio_endio(cb->orig_bio);
 277	}
 278
 279	/* finally free the cb struct */
 280	kfree(cb->compressed_pages);
 281	kfree(cb);
 282out:
 283	bio_put(bio);
 284}
 285
 286/*
 287 * Clear the writeback bits on all of the file
 288 * pages for a compressed write
 289 */
 290static noinline void end_compressed_writeback(struct inode *inode,
 291					      const struct compressed_bio *cb)
 292{
 293	unsigned long index = cb->start >> PAGE_SHIFT;
 294	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 295	struct page *pages[16];
 296	unsigned long nr_pages = end_index - index + 1;
 297	int i;
 298	int ret;
 299
 300	if (cb->errors)
 301		mapping_set_error(inode->i_mapping, -EIO);
 302
 303	while (nr_pages > 0) {
 304		ret = find_get_pages_contig(inode->i_mapping, index,
 305				     min_t(unsigned long,
 306				     nr_pages, ARRAY_SIZE(pages)), pages);
 307		if (ret == 0) {
 308			nr_pages -= 1;
 309			index += 1;
 310			continue;
 311		}
 312		for (i = 0; i < ret; i++) {
 313			if (cb->errors)
 314				SetPageError(pages[i]);
 315			end_page_writeback(pages[i]);
 316			put_page(pages[i]);
 317		}
 318		nr_pages -= ret;
 319		index += ret;
 320	}
 321	/* the inode may be gone now */
 322}
 323
 324/*
 325 * do the cleanup once all the compressed pages hit the disk.
 326 * This will clear writeback on the file pages and free the compressed
 327 * pages.
 328 *
 329 * This also calls the writeback end hooks for the file pages so that
 330 * metadata and checksums can be updated in the file.
 331 */
 332static void end_compressed_bio_write(struct bio *bio)
 333{
 
 334	struct compressed_bio *cb = bio->bi_private;
 335	struct inode *inode;
 336	struct page *page;
 337	unsigned int index;
 338
 339	if (bio->bi_status)
 340		cb->errors = 1;
 341
 342	/* if there are more bios still pending for this compressed
 343	 * extent, just exit
 344	 */
 345	if (!refcount_dec_and_test(&cb->pending_bios))
 346		goto out;
 347
 348	/* ok, we're the last bio for this extent, step one is to
 349	 * call back into the FS and do all the end_io operations
 350	 */
 351	inode = cb->inode;
 352	btrfs_record_physical_zoned(inode, cb->start, bio);
 353	btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
 354			cb->start, cb->start + cb->len - 1,
 355			!cb->errors);
 
 
 
 
 
 356
 357	end_compressed_writeback(inode, cb);
 358	/* note, our inode could be gone now */
 359
 360	/*
 361	 * release the compressed pages, these came from alloc_page and
 362	 * are not attached to the inode at all
 363	 */
 364	index = 0;
 365	for (index = 0; index < cb->nr_pages; index++) {
 366		page = cb->compressed_pages[index];
 367		page->mapping = NULL;
 368		put_page(page);
 369	}
 370
 371	/* finally free the cb struct */
 372	kfree(cb->compressed_pages);
 373	kfree(cb);
 374out:
 375	bio_put(bio);
 376}
 377
 378/*
 379 * worker function to build and submit bios for previously compressed pages.
 380 * The corresponding pages in the inode should be marked for writeback
 381 * and the compressed pages should have a reference on them for dropping
 382 * when the IO is complete.
 383 *
 384 * This also checksums the file bytes and gets things ready for
 385 * the end io hooks.
 386 */
 387blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
 388				 unsigned int len, u64 disk_start,
 389				 unsigned int compressed_len,
 390				 struct page **compressed_pages,
 391				 unsigned int nr_pages,
 392				 unsigned int write_flags,
 393				 struct cgroup_subsys_state *blkcg_css)
 394{
 395	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 396	struct bio *bio = NULL;
 397	struct compressed_bio *cb;
 398	unsigned long bytes_left;
 
 399	int pg_index = 0;
 400	struct page *page;
 401	u64 first_byte = disk_start;
 
 402	blk_status_t ret;
 403	int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
 404	const bool use_append = btrfs_use_zone_append(inode, disk_start);
 405	const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
 406
 407	WARN_ON(!PAGE_ALIGNED(start));
 408	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 409	if (!cb)
 410		return BLK_STS_RESOURCE;
 411	refcount_set(&cb->pending_bios, 0);
 412	cb->errors = 0;
 413	cb->inode = &inode->vfs_inode;
 414	cb->start = start;
 415	cb->len = len;
 416	cb->mirror_num = 0;
 417	cb->compressed_pages = compressed_pages;
 418	cb->compressed_len = compressed_len;
 419	cb->orig_bio = NULL;
 420	cb->nr_pages = nr_pages;
 421
 422	bio = btrfs_bio_alloc(first_byte);
 423	bio->bi_opf = bio_op | write_flags;
 
 
 424	bio->bi_private = cb;
 425	bio->bi_end_io = end_compressed_bio_write;
 426
 427	if (use_append) {
 428		struct btrfs_device *device;
 429
 430		device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE);
 431		if (IS_ERR(device)) {
 432			kfree(cb);
 433			bio_put(bio);
 434			return BLK_STS_NOTSUPP;
 435		}
 436
 437		bio_set_dev(bio, device->bdev);
 438	}
 439
 440	if (blkcg_css) {
 441		bio->bi_opf |= REQ_CGROUP_PUNT;
 442		kthread_associate_blkcg(blkcg_css);
 443	}
 444	refcount_set(&cb->pending_bios, 1);
 445
 446	/* create and submit bios for the compressed pages */
 447	bytes_left = compressed_len;
 448	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 449		int submit = 0;
 450		int len = 0;
 451
 452		page = compressed_pages[pg_index];
 453		page->mapping = inode->vfs_inode.i_mapping;
 454		if (bio->bi_iter.bi_size)
 455			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
 456							  0);
 457
 458		/*
 459		 * Page can only be added to bio if the current bio fits in
 460		 * stripe.
 461		 */
 462		if (!submit) {
 463			if (pg_index == 0 && use_append)
 464				len = bio_add_zone_append_page(bio, page,
 465							       PAGE_SIZE, 0);
 466			else
 467				len = bio_add_page(bio, page, PAGE_SIZE, 0);
 468		}
 469
 470		page->mapping = NULL;
 471		if (submit || len < PAGE_SIZE) {
 
 472			/*
 473			 * inc the count before we submit the bio so
 474			 * we know the end IO handler won't happen before
 475			 * we inc the count.  Otherwise, the cb might get
 476			 * freed before we're done setting it up
 477			 */
 478			refcount_inc(&cb->pending_bios);
 479			ret = btrfs_bio_wq_end_io(fs_info, bio,
 480						  BTRFS_WQ_ENDIO_DATA);
 481			BUG_ON(ret); /* -ENOMEM */
 482
 483			if (!skip_sum) {
 484				ret = btrfs_csum_one_bio(inode, bio, start, 1);
 485				BUG_ON(ret); /* -ENOMEM */
 486			}
 487
 488			ret = btrfs_map_bio(fs_info, bio, 0);
 489			if (ret) {
 490				bio->bi_status = ret;
 491				bio_endio(bio);
 492			}
 493
 494			bio = btrfs_bio_alloc(first_byte);
 495			bio->bi_opf = bio_op | write_flags;
 496			bio->bi_private = cb;
 497			bio->bi_end_io = end_compressed_bio_write;
 498			if (blkcg_css)
 499				bio->bi_opf |= REQ_CGROUP_PUNT;
 500			/*
 501			 * Use bio_add_page() to ensure the bio has at least one
 502			 * page.
 503			 */
 504			bio_add_page(bio, page, PAGE_SIZE, 0);
 505		}
 506		if (bytes_left < PAGE_SIZE) {
 507			btrfs_info(fs_info,
 508					"bytes left %lu compress len %u nr %u",
 509			       bytes_left, cb->compressed_len, cb->nr_pages);
 510		}
 511		bytes_left -= PAGE_SIZE;
 512		first_byte += PAGE_SIZE;
 513		cond_resched();
 514	}
 515
 516	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 517	BUG_ON(ret); /* -ENOMEM */
 518
 519	if (!skip_sum) {
 520		ret = btrfs_csum_one_bio(inode, bio, start, 1);
 521		BUG_ON(ret); /* -ENOMEM */
 522	}
 523
 524	ret = btrfs_map_bio(fs_info, bio, 0);
 525	if (ret) {
 526		bio->bi_status = ret;
 527		bio_endio(bio);
 528	}
 529
 530	if (blkcg_css)
 531		kthread_associate_blkcg(NULL);
 532
 533	return 0;
 534}
 535
 536static u64 bio_end_offset(struct bio *bio)
 537{
 538	struct bio_vec *last = bio_last_bvec_all(bio);
 539
 540	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
 541}
 542
 543static noinline int add_ra_bio_pages(struct inode *inode,
 544				     u64 compressed_end,
 545				     struct compressed_bio *cb)
 546{
 547	unsigned long end_index;
 548	unsigned long pg_index;
 549	u64 last_offset;
 550	u64 isize = i_size_read(inode);
 551	int ret;
 552	struct page *page;
 553	unsigned long nr_pages = 0;
 554	struct extent_map *em;
 555	struct address_space *mapping = inode->i_mapping;
 556	struct extent_map_tree *em_tree;
 557	struct extent_io_tree *tree;
 558	u64 end;
 559	int misses = 0;
 560
 561	last_offset = bio_end_offset(cb->orig_bio);
 562	em_tree = &BTRFS_I(inode)->extent_tree;
 563	tree = &BTRFS_I(inode)->io_tree;
 564
 565	if (isize == 0)
 566		return 0;
 567
 568	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 569
 570	while (last_offset < compressed_end) {
 571		pg_index = last_offset >> PAGE_SHIFT;
 572
 573		if (pg_index > end_index)
 574			break;
 575
 576		page = xa_load(&mapping->i_pages, pg_index);
 577		if (page && !xa_is_value(page)) {
 
 
 578			misses++;
 579			if (misses > 4)
 580				break;
 581			goto next;
 582		}
 583
 584		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
 585								 ~__GFP_FS));
 586		if (!page)
 587			break;
 588
 589		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
 590			put_page(page);
 591			goto next;
 592		}
 593
 
 594		/*
 595		 * at this point, we have a locked page in the page cache
 596		 * for these bytes in the file.  But, we have to make
 597		 * sure they map to this compressed extent on disk.
 598		 */
 599		ret = set_page_extent_mapped(page);
 600		if (ret < 0) {
 601			unlock_page(page);
 602			put_page(page);
 603			break;
 604		}
 605
 606		end = last_offset + PAGE_SIZE - 1;
 607		lock_extent(tree, last_offset, end);
 608		read_lock(&em_tree->lock);
 609		em = lookup_extent_mapping(em_tree, last_offset,
 610					   PAGE_SIZE);
 611		read_unlock(&em_tree->lock);
 612
 613		if (!em || last_offset < em->start ||
 614		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
 615		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 616			free_extent_map(em);
 617			unlock_extent(tree, last_offset, end);
 618			unlock_page(page);
 619			put_page(page);
 620			break;
 621		}
 622		free_extent_map(em);
 623
 624		if (page->index == end_index) {
 625			size_t zero_offset = offset_in_page(isize);
 
 626
 627			if (zero_offset) {
 628				int zeros;
 629				zeros = PAGE_SIZE - zero_offset;
 630				memzero_page(page, zero_offset, zeros);
 
 631				flush_dcache_page(page);
 
 632			}
 633		}
 634
 635		ret = bio_add_page(cb->orig_bio, page,
 636				   PAGE_SIZE, 0);
 637
 638		if (ret == PAGE_SIZE) {
 639			nr_pages++;
 640			put_page(page);
 641		} else {
 642			unlock_extent(tree, last_offset, end);
 643			unlock_page(page);
 644			put_page(page);
 645			break;
 646		}
 647next:
 648		last_offset += PAGE_SIZE;
 649	}
 650	return 0;
 651}
 652
 653/*
 654 * for a compressed read, the bio we get passed has all the inode pages
 655 * in it.  We don't actually do IO on those pages but allocate new ones
 656 * to hold the compressed pages on disk.
 657 *
 658 * bio->bi_iter.bi_sector points to the compressed extent on disk
 659 * bio->bi_io_vec points to all of the inode pages
 660 *
 661 * After the compressed pages are read, we copy the bytes into the
 662 * bio we were passed and then call the bio end_io calls
 663 */
 664blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 665				 int mirror_num, unsigned long bio_flags)
 666{
 667	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
 668	struct extent_map_tree *em_tree;
 669	struct compressed_bio *cb;
 670	unsigned int compressed_len;
 671	unsigned int nr_pages;
 672	unsigned int pg_index;
 673	struct page *page;
 
 674	struct bio *comp_bio;
 675	u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
 676	u64 em_len;
 677	u64 em_start;
 678	struct extent_map *em;
 679	blk_status_t ret = BLK_STS_RESOURCE;
 680	int faili = 0;
 681	u8 *sums;
 682
 
 683	em_tree = &BTRFS_I(inode)->extent_tree;
 684
 685	/* we need the actual starting offset of this extent in the file */
 686	read_lock(&em_tree->lock);
 687	em = lookup_extent_mapping(em_tree,
 688				   page_offset(bio_first_page_all(bio)),
 689				   fs_info->sectorsize);
 690	read_unlock(&em_tree->lock);
 691	if (!em)
 692		return BLK_STS_IOERR;
 693
 694	compressed_len = em->block_len;
 695	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 696	if (!cb)
 697		goto out;
 698
 699	refcount_set(&cb->pending_bios, 0);
 700	cb->errors = 0;
 701	cb->inode = inode;
 702	cb->mirror_num = mirror_num;
 703	sums = cb->sums;
 704
 705	cb->start = em->orig_start;
 706	em_len = em->len;
 707	em_start = em->start;
 708
 709	free_extent_map(em);
 710	em = NULL;
 711
 712	cb->len = bio->bi_iter.bi_size;
 713	cb->compressed_len = compressed_len;
 714	cb->compress_type = extent_compress_type(bio_flags);
 715	cb->orig_bio = bio;
 716
 717	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 718	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
 719				       GFP_NOFS);
 720	if (!cb->compressed_pages)
 721		goto fail1;
 722
 
 
 723	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 724		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 725							      __GFP_HIGHMEM);
 726		if (!cb->compressed_pages[pg_index]) {
 727			faili = pg_index - 1;
 728			ret = BLK_STS_RESOURCE;
 729			goto fail2;
 730		}
 731	}
 732	faili = nr_pages - 1;
 733	cb->nr_pages = nr_pages;
 734
 735	add_ra_bio_pages(inode, em_start + em_len, cb);
 736
 737	/* include any pages we added in add_ra-bio_pages */
 738	cb->len = bio->bi_iter.bi_size;
 739
 740	comp_bio = btrfs_bio_alloc(cur_disk_byte);
 741	comp_bio->bi_opf = REQ_OP_READ;
 742	comp_bio->bi_private = cb;
 743	comp_bio->bi_end_io = end_compressed_bio_read;
 744	refcount_set(&cb->pending_bios, 1);
 745
 746	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 747		u32 pg_len = PAGE_SIZE;
 748		int submit = 0;
 749
 750		/*
 751		 * To handle subpage case, we need to make sure the bio only
 752		 * covers the range we need.
 753		 *
 754		 * If we're at the last page, truncate the length to only cover
 755		 * the remaining part.
 756		 */
 757		if (pg_index == nr_pages - 1)
 758			pg_len = min_t(u32, PAGE_SIZE,
 759					compressed_len - pg_index * PAGE_SIZE);
 760
 761		page = cb->compressed_pages[pg_index];
 762		page->mapping = inode->i_mapping;
 763		page->index = em_start >> PAGE_SHIFT;
 764
 765		if (comp_bio->bi_iter.bi_size)
 766			submit = btrfs_bio_fits_in_stripe(page, pg_len,
 767							  comp_bio, 0);
 
 768
 769		page->mapping = NULL;
 770		if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
 771			unsigned int nr_sectors;
 772
 773			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
 774						  BTRFS_WQ_ENDIO_DATA);
 775			BUG_ON(ret); /* -ENOMEM */
 776
 777			/*
 778			 * inc the count before we submit the bio so
 779			 * we know the end IO handler won't happen before
 780			 * we inc the count.  Otherwise, the cb might get
 781			 * freed before we're done setting it up
 782			 */
 783			refcount_inc(&cb->pending_bios);
 784
 785			ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 786			BUG_ON(ret); /* -ENOMEM */
 787
 788			nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
 789						  fs_info->sectorsize);
 790			sums += fs_info->csum_size * nr_sectors;
 
 791
 792			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
 793			if (ret) {
 794				comp_bio->bi_status = ret;
 795				bio_endio(comp_bio);
 796			}
 797
 798			comp_bio = btrfs_bio_alloc(cur_disk_byte);
 799			comp_bio->bi_opf = REQ_OP_READ;
 800			comp_bio->bi_private = cb;
 801			comp_bio->bi_end_io = end_compressed_bio_read;
 802
 803			bio_add_page(comp_bio, page, pg_len, 0);
 804		}
 805		cur_disk_byte += pg_len;
 806	}
 807
 808	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
 809	BUG_ON(ret); /* -ENOMEM */
 810
 811	ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 812	BUG_ON(ret); /* -ENOMEM */
 
 
 813
 814	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
 815	if (ret) {
 816		comp_bio->bi_status = ret;
 817		bio_endio(comp_bio);
 818	}
 819
 820	return 0;
 821
 822fail2:
 823	while (faili >= 0) {
 824		__free_page(cb->compressed_pages[faili]);
 825		faili--;
 826	}
 827
 828	kfree(cb->compressed_pages);
 829fail1:
 830	kfree(cb);
 831out:
 832	free_extent_map(em);
 833	return ret;
 834}
 835
 836/*
 837 * Heuristic uses systematic sampling to collect data from the input data
 838 * range, the logic can be tuned by the following constants:
 839 *
 840 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 841 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 842 */
 843#define SAMPLING_READ_SIZE	(16)
 844#define SAMPLING_INTERVAL	(256)
 845
 846/*
 847 * For statistical analysis of the input data we consider bytes that form a
 848 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 849 * many times the object appeared in the sample.
 850 */
 851#define BUCKET_SIZE		(256)
 852
 853/*
 854 * The size of the sample is based on a statistical sampling rule of thumb.
 855 * The common way is to perform sampling tests as long as the number of
 856 * elements in each cell is at least 5.
 857 *
 858 * Instead of 5, we choose 32 to obtain more accurate results.
 859 * If the data contain the maximum number of symbols, which is 256, we obtain a
 860 * sample size bound by 8192.
 861 *
 862 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 863 * from up to 512 locations.
 864 */
 865#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
 866				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 867
 868struct bucket_item {
 869	u32 count;
 870};
 871
 872struct heuristic_ws {
 873	/* Partial copy of input data */
 874	u8 *sample;
 875	u32 sample_size;
 876	/* Buckets store counters for each byte value */
 877	struct bucket_item *bucket;
 878	/* Sorting buffer */
 879	struct bucket_item *bucket_b;
 880	struct list_head list;
 881};
 882
 883static struct workspace_manager heuristic_wsm;
 884
 885static void free_heuristic_ws(struct list_head *ws)
 886{
 887	struct heuristic_ws *workspace;
 888
 889	workspace = list_entry(ws, struct heuristic_ws, list);
 890
 891	kvfree(workspace->sample);
 892	kfree(workspace->bucket);
 893	kfree(workspace->bucket_b);
 894	kfree(workspace);
 895}
 896
 897static struct list_head *alloc_heuristic_ws(unsigned int level)
 898{
 899	struct heuristic_ws *ws;
 900
 901	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 902	if (!ws)
 903		return ERR_PTR(-ENOMEM);
 904
 905	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 906	if (!ws->sample)
 907		goto fail;
 908
 909	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 910	if (!ws->bucket)
 911		goto fail;
 912
 913	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 914	if (!ws->bucket_b)
 915		goto fail;
 916
 917	INIT_LIST_HEAD(&ws->list);
 918	return &ws->list;
 919fail:
 920	free_heuristic_ws(&ws->list);
 921	return ERR_PTR(-ENOMEM);
 922}
 923
 924const struct btrfs_compress_op btrfs_heuristic_compress = {
 925	.workspace_manager = &heuristic_wsm,
 
 
 
 
 
 
 
 926};
 927
 
 
 
 
 928static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 929	/* The heuristic is represented as compression type 0 */
 930	&btrfs_heuristic_compress,
 931	&btrfs_zlib_compress,
 932	&btrfs_lzo_compress,
 933	&btrfs_zstd_compress,
 934};
 935
 936static struct list_head *alloc_workspace(int type, unsigned int level)
 937{
 938	switch (type) {
 939	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
 940	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
 941	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
 942	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
 943	default:
 944		/*
 945		 * This can't happen, the type is validated several times
 946		 * before we get here.
 947		 */
 948		BUG();
 949	}
 950}
 951
 952static void free_workspace(int type, struct list_head *ws)
 953{
 954	switch (type) {
 955	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
 956	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
 957	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
 958	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
 959	default:
 960		/*
 961		 * This can't happen, the type is validated several times
 962		 * before we get here.
 963		 */
 964		BUG();
 965	}
 966}
 967
 968static void btrfs_init_workspace_manager(int type)
 969{
 970	struct workspace_manager *wsm;
 971	struct list_head *workspace;
 
 972
 973	wsm = btrfs_compress_op[type]->workspace_manager;
 974	INIT_LIST_HEAD(&wsm->idle_ws);
 975	spin_lock_init(&wsm->ws_lock);
 976	atomic_set(&wsm->total_ws, 0);
 977	init_waitqueue_head(&wsm->ws_wait);
 978
 979	/*
 980	 * Preallocate one workspace for each compression type so we can
 981	 * guarantee forward progress in the worst case
 982	 */
 983	workspace = alloc_workspace(type, 0);
 984	if (IS_ERR(workspace)) {
 985		pr_warn(
 986	"BTRFS: cannot preallocate compression workspace, will try later\n");
 987	} else {
 988		atomic_set(&wsm->total_ws, 1);
 989		wsm->free_ws = 1;
 990		list_add(workspace, &wsm->idle_ws);
 991	}
 992}
 993
 994static void btrfs_cleanup_workspace_manager(int type)
 995{
 996	struct workspace_manager *wsman;
 997	struct list_head *ws;
 
 998
 999	wsman = btrfs_compress_op[type]->workspace_manager;
1000	while (!list_empty(&wsman->idle_ws)) {
1001		ws = wsman->idle_ws.next;
1002		list_del(ws);
1003		free_workspace(type, ws);
1004		atomic_dec(&wsman->total_ws);
 
 
 
 
 
 
1005	}
1006}
1007
1008/*
1009 * This finds an available workspace or allocates a new one.
1010 * If it's not possible to allocate a new one, waits until there's one.
1011 * Preallocation makes a forward progress guarantees and we do not return
1012 * errors.
1013 */
1014struct list_head *btrfs_get_workspace(int type, unsigned int level)
1015{
1016	struct workspace_manager *wsm;
1017	struct list_head *workspace;
1018	int cpus = num_online_cpus();
 
1019	unsigned nofs_flag;
1020	struct list_head *idle_ws;
1021	spinlock_t *ws_lock;
1022	atomic_t *total_ws;
1023	wait_queue_head_t *ws_wait;
1024	int *free_ws;
1025
1026	wsm = btrfs_compress_op[type]->workspace_manager;
1027	idle_ws	 = &wsm->idle_ws;
1028	ws_lock	 = &wsm->ws_lock;
1029	total_ws = &wsm->total_ws;
1030	ws_wait	 = &wsm->ws_wait;
1031	free_ws	 = &wsm->free_ws;
 
 
 
 
 
 
 
1032
1033again:
1034	spin_lock(ws_lock);
1035	if (!list_empty(idle_ws)) {
1036		workspace = idle_ws->next;
1037		list_del(workspace);
1038		(*free_ws)--;
1039		spin_unlock(ws_lock);
1040		return workspace;
1041
1042	}
1043	if (atomic_read(total_ws) > cpus) {
1044		DEFINE_WAIT(wait);
1045
1046		spin_unlock(ws_lock);
1047		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1048		if (atomic_read(total_ws) > cpus && !*free_ws)
1049			schedule();
1050		finish_wait(ws_wait, &wait);
1051		goto again;
1052	}
1053	atomic_inc(total_ws);
1054	spin_unlock(ws_lock);
1055
1056	/*
1057	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1058	 * to turn it off here because we might get called from the restricted
1059	 * context of btrfs_compress_bio/btrfs_compress_pages
1060	 */
1061	nofs_flag = memalloc_nofs_save();
1062	workspace = alloc_workspace(type, level);
 
 
 
1063	memalloc_nofs_restore(nofs_flag);
1064
1065	if (IS_ERR(workspace)) {
1066		atomic_dec(total_ws);
1067		wake_up(ws_wait);
1068
1069		/*
1070		 * Do not return the error but go back to waiting. There's a
1071		 * workspace preallocated for each type and the compression
1072		 * time is bounded so we get to a workspace eventually. This
1073		 * makes our caller's life easier.
1074		 *
1075		 * To prevent silent and low-probability deadlocks (when the
1076		 * initial preallocation fails), check if there are any
1077		 * workspaces at all.
1078		 */
1079		if (atomic_read(total_ws) == 0) {
1080			static DEFINE_RATELIMIT_STATE(_rs,
1081					/* once per minute */ 60 * HZ,
1082					/* no burst */ 1);
1083
1084			if (__ratelimit(&_rs)) {
1085				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1086			}
1087		}
1088		goto again;
1089	}
1090	return workspace;
1091}
1092
1093static struct list_head *get_workspace(int type, int level)
1094{
1095	switch (type) {
1096	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1097	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1098	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1099	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1100	default:
1101		/*
1102		 * This can't happen, the type is validated several times
1103		 * before we get here.
1104		 */
1105		BUG();
1106	}
1107}
1108
1109/*
1110 * put a workspace struct back on the list or free it if we have enough
1111 * idle ones sitting around
1112 */
1113void btrfs_put_workspace(int type, struct list_head *ws)
 
1114{
1115	struct workspace_manager *wsm;
1116	struct list_head *idle_ws;
1117	spinlock_t *ws_lock;
1118	atomic_t *total_ws;
1119	wait_queue_head_t *ws_wait;
1120	int *free_ws;
1121
1122	wsm = btrfs_compress_op[type]->workspace_manager;
1123	idle_ws	 = &wsm->idle_ws;
1124	ws_lock	 = &wsm->ws_lock;
1125	total_ws = &wsm->total_ws;
1126	ws_wait	 = &wsm->ws_wait;
1127	free_ws	 = &wsm->free_ws;
 
 
 
 
 
 
 
1128
1129	spin_lock(ws_lock);
1130	if (*free_ws <= num_online_cpus()) {
1131		list_add(ws, idle_ws);
1132		(*free_ws)++;
1133		spin_unlock(ws_lock);
1134		goto wake;
1135	}
1136	spin_unlock(ws_lock);
1137
1138	free_workspace(type, ws);
 
 
 
1139	atomic_dec(total_ws);
1140wake:
1141	cond_wake_up(ws_wait);
 
 
 
 
 
1142}
1143
1144static void put_workspace(int type, struct list_head *ws)
1145{
1146	switch (type) {
1147	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1148	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1149	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1150	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1151	default:
1152		/*
1153		 * This can't happen, the type is validated several times
1154		 * before we get here.
1155		 */
1156		BUG();
1157	}
1158}
1159
1160/*
1161 * Adjust @level according to the limits of the compression algorithm or
1162 * fallback to default
1163 */
1164static unsigned int btrfs_compress_set_level(int type, unsigned level)
1165{
1166	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
 
1167
1168	if (level == 0)
1169		level = ops->default_level;
1170	else
1171		level = min(level, ops->max_level);
 
 
1172
1173	return level;
 
 
 
 
 
 
 
1174}
1175
1176/*
1177 * Given an address space and start and length, compress the bytes into @pages
1178 * that are allocated on demand.
1179 *
1180 * @type_level is encoded algorithm and level, where level 0 means whatever
1181 * default the algorithm chooses and is opaque here;
1182 * - compression algo are 0-3
1183 * - the level are bits 4-7
1184 *
1185 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1186 * and returns number of actually allocated pages
1187 *
1188 * @total_in is used to return the number of bytes actually read.  It
1189 * may be smaller than the input length if we had to exit early because we
1190 * ran out of room in the pages array or because we cross the
1191 * max_out threshold.
1192 *
1193 * @total_out is an in/out parameter, must be set to the input length and will
1194 * be also used to return the total number of compressed bytes
 
 
 
1195 */
1196int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1197			 u64 start, struct page **pages,
1198			 unsigned long *out_pages,
1199			 unsigned long *total_in,
1200			 unsigned long *total_out)
1201{
1202	int type = btrfs_compress_type(type_level);
1203	int level = btrfs_compress_level(type_level);
1204	struct list_head *workspace;
1205	int ret;
 
 
 
1206
1207	level = btrfs_compress_set_level(type, level);
1208	workspace = get_workspace(type, level);
1209	ret = compression_compress_pages(type, workspace, mapping, start, pages,
1210					 out_pages, total_in, total_out);
1211	put_workspace(type, workspace);
 
1212	return ret;
1213}
1214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1215static int btrfs_decompress_bio(struct compressed_bio *cb)
1216{
1217	struct list_head *workspace;
1218	int ret;
1219	int type = cb->compress_type;
1220
1221	workspace = get_workspace(type, 0);
1222	ret = compression_decompress_bio(type, workspace, cb);
1223	put_workspace(type, workspace);
1224
1225	return ret;
1226}
1227
1228/*
1229 * a less complex decompression routine.  Our compressed data fits in a
1230 * single page, and we want to read a single page out of it.
1231 * start_byte tells us the offset into the compressed data we're interested in
1232 */
1233int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1234		     unsigned long start_byte, size_t srclen, size_t destlen)
1235{
1236	struct list_head *workspace;
1237	int ret;
1238
1239	workspace = get_workspace(type, 0);
1240	ret = compression_decompress(type, workspace, data_in, dest_page,
1241				     start_byte, srclen, destlen);
1242	put_workspace(type, workspace);
1243
 
 
 
 
 
1244	return ret;
1245}
1246
1247void __init btrfs_init_compress(void)
1248{
1249	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1250	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1251	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1252	zstd_init_workspace_manager();
1253}
1254
1255void __cold btrfs_exit_compress(void)
1256{
1257	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1258	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1259	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1260	zstd_cleanup_workspace_manager();
1261}
1262
1263/*
1264 * Copy uncompressed data from working buffer to pages.
1265 *
1266 * buf_start is the byte offset we're of the start of our workspace buffer.
1267 *
1268 * total_out is the last byte of the buffer
1269 */
1270int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1271			      unsigned long total_out, u64 disk_start,
1272			      struct bio *bio)
1273{
1274	unsigned long buf_offset;
1275	unsigned long current_buf_start;
1276	unsigned long start_byte;
1277	unsigned long prev_start_byte;
1278	unsigned long working_bytes = total_out - buf_start;
1279	unsigned long bytes;
 
1280	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1281
1282	/*
1283	 * start byte is the first byte of the page we're currently
1284	 * copying into relative to the start of the compressed data.
1285	 */
1286	start_byte = page_offset(bvec.bv_page) - disk_start;
1287
1288	/* we haven't yet hit data corresponding to this page */
1289	if (total_out <= start_byte)
1290		return 1;
1291
1292	/*
1293	 * the start of the data we care about is offset into
1294	 * the middle of our working buffer
1295	 */
1296	if (total_out > start_byte && buf_start < start_byte) {
1297		buf_offset = start_byte - buf_start;
1298		working_bytes -= buf_offset;
1299	} else {
1300		buf_offset = 0;
1301	}
1302	current_buf_start = buf_start;
1303
1304	/* copy bytes from the working buffer into the pages */
1305	while (working_bytes > 0) {
1306		bytes = min_t(unsigned long, bvec.bv_len,
1307				PAGE_SIZE - (buf_offset % PAGE_SIZE));
1308		bytes = min(bytes, working_bytes);
1309
1310		memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
1311			       bytes);
 
1312		flush_dcache_page(bvec.bv_page);
1313
1314		buf_offset += bytes;
1315		working_bytes -= bytes;
1316		current_buf_start += bytes;
1317
1318		/* check if we need to pick another page */
1319		bio_advance(bio, bytes);
1320		if (!bio->bi_iter.bi_size)
1321			return 0;
1322		bvec = bio_iter_iovec(bio, bio->bi_iter);
1323		prev_start_byte = start_byte;
1324		start_byte = page_offset(bvec.bv_page) - disk_start;
1325
1326		/*
1327		 * We need to make sure we're only adjusting
1328		 * our offset into compression working buffer when
1329		 * we're switching pages.  Otherwise we can incorrectly
1330		 * keep copying when we were actually done.
1331		 */
1332		if (start_byte != prev_start_byte) {
1333			/*
1334			 * make sure our new page is covered by this
1335			 * working buffer
1336			 */
1337			if (total_out <= start_byte)
1338				return 1;
1339
1340			/*
1341			 * the next page in the biovec might not be adjacent
1342			 * to the last page, but it might still be found
1343			 * inside this working buffer. bump our offset pointer
1344			 */
1345			if (total_out > start_byte &&
1346			    current_buf_start < start_byte) {
1347				buf_offset = start_byte - buf_start;
1348				working_bytes = total_out - start_byte;
1349				current_buf_start = buf_start + buf_offset;
1350			}
1351		}
1352	}
1353
1354	return 1;
1355}
1356
1357/*
1358 * Shannon Entropy calculation
1359 *
1360 * Pure byte distribution analysis fails to determine compressibility of data.
1361 * Try calculating entropy to estimate the average minimum number of bits
1362 * needed to encode the sampled data.
1363 *
1364 * For convenience, return the percentage of needed bits, instead of amount of
1365 * bits directly.
1366 *
1367 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1368 *			    and can be compressible with high probability
1369 *
1370 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1371 *
1372 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1373 */
1374#define ENTROPY_LVL_ACEPTABLE		(65)
1375#define ENTROPY_LVL_HIGH		(80)
1376
1377/*
1378 * For increasead precision in shannon_entropy calculation,
1379 * let's do pow(n, M) to save more digits after comma:
1380 *
1381 * - maximum int bit length is 64
1382 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1383 * - 13 * 4 = 52 < 64		-> M = 4
1384 *
1385 * So use pow(n, 4).
1386 */
1387static inline u32 ilog2_w(u64 n)
1388{
1389	return ilog2(n * n * n * n);
1390}
1391
1392static u32 shannon_entropy(struct heuristic_ws *ws)
1393{
1394	const u32 entropy_max = 8 * ilog2_w(2);
1395	u32 entropy_sum = 0;
1396	u32 p, p_base, sz_base;
1397	u32 i;
1398
1399	sz_base = ilog2_w(ws->sample_size);
1400	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1401		p = ws->bucket[i].count;
1402		p_base = ilog2_w(p);
1403		entropy_sum += p * (sz_base - p_base);
1404	}
1405
1406	entropy_sum /= ws->sample_size;
1407	return entropy_sum * 100 / entropy_max;
1408}
1409
1410#define RADIX_BASE		4U
1411#define COUNTERS_SIZE		(1U << RADIX_BASE)
1412
1413static u8 get4bits(u64 num, int shift) {
1414	u8 low4bits;
1415
1416	num >>= shift;
1417	/* Reverse order */
1418	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1419	return low4bits;
1420}
1421
1422/*
1423 * Use 4 bits as radix base
1424 * Use 16 u32 counters for calculating new position in buf array
1425 *
1426 * @array     - array that will be sorted
1427 * @array_buf - buffer array to store sorting results
1428 *              must be equal in size to @array
1429 * @num       - array size
1430 */
1431static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1432		       int num)
1433{
1434	u64 max_num;
1435	u64 buf_num;
1436	u32 counters[COUNTERS_SIZE];
1437	u32 new_addr;
1438	u32 addr;
1439	int bitlen;
1440	int shift;
1441	int i;
1442
1443	/*
1444	 * Try avoid useless loop iterations for small numbers stored in big
1445	 * counters.  Example: 48 33 4 ... in 64bit array
1446	 */
1447	max_num = array[0].count;
1448	for (i = 1; i < num; i++) {
1449		buf_num = array[i].count;
1450		if (buf_num > max_num)
1451			max_num = buf_num;
1452	}
1453
1454	buf_num = ilog2(max_num);
1455	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1456
1457	shift = 0;
1458	while (shift < bitlen) {
1459		memset(counters, 0, sizeof(counters));
1460
1461		for (i = 0; i < num; i++) {
1462			buf_num = array[i].count;
1463			addr = get4bits(buf_num, shift);
1464			counters[addr]++;
1465		}
1466
1467		for (i = 1; i < COUNTERS_SIZE; i++)
1468			counters[i] += counters[i - 1];
1469
1470		for (i = num - 1; i >= 0; i--) {
1471			buf_num = array[i].count;
1472			addr = get4bits(buf_num, shift);
1473			counters[addr]--;
1474			new_addr = counters[addr];
1475			array_buf[new_addr] = array[i];
1476		}
1477
1478		shift += RADIX_BASE;
1479
1480		/*
1481		 * Normal radix expects to move data from a temporary array, to
1482		 * the main one.  But that requires some CPU time. Avoid that
1483		 * by doing another sort iteration to original array instead of
1484		 * memcpy()
1485		 */
1486		memset(counters, 0, sizeof(counters));
1487
1488		for (i = 0; i < num; i ++) {
1489			buf_num = array_buf[i].count;
1490			addr = get4bits(buf_num, shift);
1491			counters[addr]++;
1492		}
1493
1494		for (i = 1; i < COUNTERS_SIZE; i++)
1495			counters[i] += counters[i - 1];
1496
1497		for (i = num - 1; i >= 0; i--) {
1498			buf_num = array_buf[i].count;
1499			addr = get4bits(buf_num, shift);
1500			counters[addr]--;
1501			new_addr = counters[addr];
1502			array[new_addr] = array_buf[i];
1503		}
1504
1505		shift += RADIX_BASE;
1506	}
1507}
1508
1509/*
1510 * Size of the core byte set - how many bytes cover 90% of the sample
1511 *
1512 * There are several types of structured binary data that use nearly all byte
1513 * values. The distribution can be uniform and counts in all buckets will be
1514 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1515 *
1516 * Other possibility is normal (Gaussian) distribution, where the data could
1517 * be potentially compressible, but we have to take a few more steps to decide
1518 * how much.
1519 *
1520 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1521 *                       compression algo can easy fix that
1522 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1523 *                       probability is not compressible
1524 */
1525#define BYTE_CORE_SET_LOW		(64)
1526#define BYTE_CORE_SET_HIGH		(200)
1527
1528static int byte_core_set_size(struct heuristic_ws *ws)
1529{
1530	u32 i;
1531	u32 coreset_sum = 0;
1532	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1533	struct bucket_item *bucket = ws->bucket;
1534
1535	/* Sort in reverse order */
1536	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1537
1538	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1539		coreset_sum += bucket[i].count;
1540
1541	if (coreset_sum > core_set_threshold)
1542		return i;
1543
1544	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1545		coreset_sum += bucket[i].count;
1546		if (coreset_sum > core_set_threshold)
1547			break;
1548	}
1549
1550	return i;
1551}
1552
1553/*
1554 * Count byte values in buckets.
1555 * This heuristic can detect textual data (configs, xml, json, html, etc).
1556 * Because in most text-like data byte set is restricted to limited number of
1557 * possible characters, and that restriction in most cases makes data easy to
1558 * compress.
1559 *
1560 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1561 *	less - compressible
1562 *	more - need additional analysis
1563 */
1564#define BYTE_SET_THRESHOLD		(64)
1565
1566static u32 byte_set_size(const struct heuristic_ws *ws)
1567{
1568	u32 i;
1569	u32 byte_set_size = 0;
1570
1571	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1572		if (ws->bucket[i].count > 0)
1573			byte_set_size++;
1574	}
1575
1576	/*
1577	 * Continue collecting count of byte values in buckets.  If the byte
1578	 * set size is bigger then the threshold, it's pointless to continue,
1579	 * the detection technique would fail for this type of data.
1580	 */
1581	for (; i < BUCKET_SIZE; i++) {
1582		if (ws->bucket[i].count > 0) {
1583			byte_set_size++;
1584			if (byte_set_size > BYTE_SET_THRESHOLD)
1585				return byte_set_size;
1586		}
1587	}
1588
1589	return byte_set_size;
1590}
1591
1592static bool sample_repeated_patterns(struct heuristic_ws *ws)
1593{
1594	const u32 half_of_sample = ws->sample_size / 2;
1595	const u8 *data = ws->sample;
1596
1597	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1598}
1599
1600static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1601				     struct heuristic_ws *ws)
1602{
1603	struct page *page;
1604	u64 index, index_end;
1605	u32 i, curr_sample_pos;
1606	u8 *in_data;
1607
1608	/*
1609	 * Compression handles the input data by chunks of 128KiB
1610	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1611	 *
1612	 * We do the same for the heuristic and loop over the whole range.
1613	 *
1614	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1615	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1616	 */
1617	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1618		end = start + BTRFS_MAX_UNCOMPRESSED;
1619
1620	index = start >> PAGE_SHIFT;
1621	index_end = end >> PAGE_SHIFT;
1622
1623	/* Don't miss unaligned end */
1624	if (!IS_ALIGNED(end, PAGE_SIZE))
1625		index_end++;
1626
1627	curr_sample_pos = 0;
1628	while (index < index_end) {
1629		page = find_get_page(inode->i_mapping, index);
1630		in_data = kmap_local_page(page);
1631		/* Handle case where the start is not aligned to PAGE_SIZE */
1632		i = start % PAGE_SIZE;
1633		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1634			/* Don't sample any garbage from the last page */
1635			if (start > end - SAMPLING_READ_SIZE)
1636				break;
1637			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1638					SAMPLING_READ_SIZE);
1639			i += SAMPLING_INTERVAL;
1640			start += SAMPLING_INTERVAL;
1641			curr_sample_pos += SAMPLING_READ_SIZE;
1642		}
1643		kunmap_local(in_data);
1644		put_page(page);
1645
1646		index++;
1647	}
1648
1649	ws->sample_size = curr_sample_pos;
1650}
1651
1652/*
1653 * Compression heuristic.
1654 *
1655 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1656 * quickly (compared to direct compression) detect data characteristics
1657 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1658 * data.
1659 *
1660 * The following types of analysis can be performed:
1661 * - detect mostly zero data
1662 * - detect data with low "byte set" size (text, etc)
1663 * - detect data with low/high "core byte" set
1664 *
1665 * Return non-zero if the compression should be done, 0 otherwise.
1666 */
1667int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1668{
1669	struct list_head *ws_list = get_workspace(0, 0);
1670	struct heuristic_ws *ws;
1671	u32 i;
1672	u8 byte;
1673	int ret = 0;
1674
1675	ws = list_entry(ws_list, struct heuristic_ws, list);
1676
1677	heuristic_collect_sample(inode, start, end, ws);
1678
1679	if (sample_repeated_patterns(ws)) {
1680		ret = 1;
1681		goto out;
1682	}
1683
1684	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1685
1686	for (i = 0; i < ws->sample_size; i++) {
1687		byte = ws->sample[i];
1688		ws->bucket[byte].count++;
1689	}
1690
1691	i = byte_set_size(ws);
1692	if (i < BYTE_SET_THRESHOLD) {
1693		ret = 2;
1694		goto out;
1695	}
1696
1697	i = byte_core_set_size(ws);
1698	if (i <= BYTE_CORE_SET_LOW) {
1699		ret = 3;
1700		goto out;
1701	}
1702
1703	if (i >= BYTE_CORE_SET_HIGH) {
1704		ret = 0;
1705		goto out;
1706	}
1707
1708	i = shannon_entropy(ws);
1709	if (i <= ENTROPY_LVL_ACEPTABLE) {
1710		ret = 4;
1711		goto out;
1712	}
1713
1714	/*
1715	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1716	 * needed to give green light to compression.
1717	 *
1718	 * For now just assume that compression at that level is not worth the
1719	 * resources because:
1720	 *
1721	 * 1. it is possible to defrag the data later
1722	 *
1723	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1724	 * values, every bucket has counter at level ~54. The heuristic would
1725	 * be confused. This can happen when data have some internal repeated
1726	 * patterns like "abbacbbc...". This can be detected by analyzing
1727	 * pairs of bytes, which is too costly.
1728	 */
1729	if (i < ENTROPY_LVL_HIGH) {
1730		ret = 5;
1731		goto out;
1732	} else {
1733		ret = 0;
1734		goto out;
1735	}
1736
1737out:
1738	put_workspace(0, ws_list);
1739	return ret;
1740}
1741
1742/*
1743 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1744 * level, unrecognized string will set the default level
1745 */
1746unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1747{
1748	unsigned int level = 0;
1749	int ret;
1750
1751	if (!type)
1752		return 0;
1753
1754	if (str[0] == ':') {
1755		ret = kstrtouint(str + 1, 10, &level);
1756		if (ret)
1757			level = 0;
1758	}
1759
1760	level = btrfs_compress_set_level(type, level);
1761
1762	return level;
1763}