Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
   8#include <linux/file.h>
   9#include <linux/fs.h>
  10#include <linux/pagemap.h>
  11#include <linux/pagevec.h>
  12#include <linux/highmem.h>
  13#include <linux/kthread.h>
  14#include <linux/time.h>
  15#include <linux/init.h>
  16#include <linux/string.h>
  17#include <linux/backing-dev.h>
  18#include <linux/writeback.h>
  19#include <linux/psi.h>
  20#include <linux/slab.h>
  21#include <linux/sched/mm.h>
  22#include <linux/log2.h>
  23#include <linux/shrinker.h>
  24#include <crypto/hash.h>
  25#include "misc.h"
  26#include "ctree.h"
  27#include "fs.h"
 
  28#include "btrfs_inode.h"
  29#include "bio.h"
  30#include "ordered-data.h"
  31#include "compression.h"
  32#include "extent_io.h"
  33#include "extent_map.h"
  34#include "subpage.h"
  35#include "messages.h"
  36#include "super.h"
  37
  38static struct bio_set btrfs_compressed_bioset;
  39
  40static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  41
  42const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  43{
  44	switch (type) {
  45	case BTRFS_COMPRESS_ZLIB:
  46	case BTRFS_COMPRESS_LZO:
  47	case BTRFS_COMPRESS_ZSTD:
  48	case BTRFS_COMPRESS_NONE:
  49		return btrfs_compress_types[type];
  50	default:
  51		break;
  52	}
  53
  54	return NULL;
  55}
  56
  57static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio)
  58{
  59	return container_of(bbio, struct compressed_bio, bbio);
  60}
  61
  62static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
  63						   u64 start, blk_opf_t op,
  64						   btrfs_bio_end_io_t end_io)
  65{
  66	struct btrfs_bio *bbio;
  67
  68	bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
  69					  GFP_NOFS, &btrfs_compressed_bioset));
  70	btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL);
  71	bbio->inode = inode;
  72	bbio->file_offset = start;
  73	return to_compressed_bio(bbio);
  74}
  75
  76bool btrfs_compress_is_valid_type(const char *str, size_t len)
  77{
  78	int i;
  79
  80	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
  81		size_t comp_len = strlen(btrfs_compress_types[i]);
  82
  83		if (len < comp_len)
  84			continue;
  85
  86		if (!strncmp(btrfs_compress_types[i], str, comp_len))
  87			return true;
  88	}
  89	return false;
  90}
  91
  92static int compression_compress_pages(int type, struct list_head *ws,
  93				      struct address_space *mapping, u64 start,
  94				      struct folio **folios, unsigned long *out_folios,
  95				      unsigned long *total_in, unsigned long *total_out)
  96{
  97	switch (type) {
  98	case BTRFS_COMPRESS_ZLIB:
  99		return zlib_compress_folios(ws, mapping, start, folios,
 100					    out_folios, total_in, total_out);
 101	case BTRFS_COMPRESS_LZO:
 102		return lzo_compress_folios(ws, mapping, start, folios,
 103					   out_folios, total_in, total_out);
 104	case BTRFS_COMPRESS_ZSTD:
 105		return zstd_compress_folios(ws, mapping, start, folios,
 106					    out_folios, total_in, total_out);
 107	case BTRFS_COMPRESS_NONE:
 108	default:
 109		/*
 110		 * This can happen when compression races with remount setting
 111		 * it to 'no compress', while caller doesn't call
 112		 * inode_need_compress() to check if we really need to
 113		 * compress.
 114		 *
 115		 * Not a big deal, just need to inform caller that we
 116		 * haven't allocated any pages yet.
 117		 */
 118		*out_folios = 0;
 119		return -E2BIG;
 120	}
 121}
 122
 123static int compression_decompress_bio(struct list_head *ws,
 124				      struct compressed_bio *cb)
 125{
 126	switch (cb->compress_type) {
 127	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
 128	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
 129	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
 130	case BTRFS_COMPRESS_NONE:
 131	default:
 132		/*
 133		 * This can't happen, the type is validated several times
 134		 * before we get here.
 135		 */
 136		BUG();
 137	}
 138}
 139
 140static int compression_decompress(int type, struct list_head *ws,
 141		const u8 *data_in, struct folio *dest_folio,
 142		unsigned long dest_pgoff, size_t srclen, size_t destlen)
 143{
 144	switch (type) {
 145	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio,
 146						dest_pgoff, srclen, destlen);
 147	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_folio,
 148						dest_pgoff, srclen, destlen);
 149	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio,
 150						dest_pgoff, srclen, destlen);
 151	case BTRFS_COMPRESS_NONE:
 152	default:
 153		/*
 154		 * This can't happen, the type is validated several times
 155		 * before we get here.
 156		 */
 157		BUG();
 158	}
 159}
 160
 161static void btrfs_free_compressed_folios(struct compressed_bio *cb)
 162{
 163	for (unsigned int i = 0; i < cb->nr_folios; i++)
 164		btrfs_free_compr_folio(cb->compressed_folios[i]);
 165	kfree(cb->compressed_folios);
 166}
 167
 168static int btrfs_decompress_bio(struct compressed_bio *cb);
 169
 170/*
 171 * Global cache of last unused pages for compression/decompression.
 172 */
 173static struct btrfs_compr_pool {
 174	struct shrinker *shrinker;
 175	spinlock_t lock;
 176	struct list_head list;
 177	int count;
 178	int thresh;
 179} compr_pool;
 180
 181static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc)
 182{
 183	int ret;
 184
 185	/*
 186	 * We must not read the values more than once if 'ret' gets expanded in
 187	 * the return statement so we don't accidentally return a negative
 188	 * number, even if the first condition finds it positive.
 189	 */
 190	ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh);
 191
 192	return ret > 0 ? ret : 0;
 193}
 194
 195static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc)
 
 196{
 197	struct list_head remove;
 198	struct list_head *tmp, *next;
 199	int freed;
 200
 201	if (compr_pool.count == 0)
 202		return SHRINK_STOP;
 203
 204	INIT_LIST_HEAD(&remove);
 
 
 205
 206	/* For now, just simply drain the whole list. */
 207	spin_lock(&compr_pool.lock);
 208	list_splice_init(&compr_pool.list, &remove);
 209	freed = compr_pool.count;
 210	compr_pool.count = 0;
 211	spin_unlock(&compr_pool.lock);
 212
 213	list_for_each_safe(tmp, next, &remove) {
 214		struct page *page = list_entry(tmp, struct page, lru);
 215
 216		ASSERT(page_ref_count(page) == 1);
 217		put_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 218	}
 219
 220	return freed;
 221}
 222
 223/*
 224 * Common wrappers for page allocation from compression wrappers
 
 
 
 
 
 
 
 225 */
 226struct folio *btrfs_alloc_compr_folio(void)
 227{
 228	struct folio *folio = NULL;
 
 
 
 
 
 229
 230	spin_lock(&compr_pool.lock);
 231	if (compr_pool.count > 0) {
 232		folio = list_first_entry(&compr_pool.list, struct folio, lru);
 233		list_del_init(&folio->lru);
 234		compr_pool.count--;
 235	}
 236	spin_unlock(&compr_pool.lock);
 237
 238	if (folio)
 239		return folio;
 
 
 
 240
 241	return folio_alloc(GFP_NOFS, 0);
 242}
 
 
 
 
 243
 244void btrfs_free_compr_folio(struct folio *folio)
 245{
 246	bool do_free = false;
 
 
 
 247
 248	spin_lock(&compr_pool.lock);
 249	if (compr_pool.count > compr_pool.thresh) {
 250		do_free = true;
 251	} else {
 252		list_add(&folio->lru, &compr_pool.list);
 253		compr_pool.count++;
 254	}
 255	spin_unlock(&compr_pool.lock);
 256
 257	if (!do_free)
 258		return;
 
 
 259
 260	ASSERT(folio_ref_count(folio) == 1);
 261	folio_put(folio);
 262}
 
 
 
 
 
 
 
 
 263
 264static void end_bbio_compressed_read(struct btrfs_bio *bbio)
 265{
 266	struct compressed_bio *cb = to_compressed_bio(bbio);
 267	blk_status_t status = bbio->bio.bi_status;
 
 
 268
 269	if (!status)
 270		status = errno_to_blk_status(btrfs_decompress_bio(cb));
 
 
 
 
 
 
 
 
 271
 272	btrfs_free_compressed_folios(cb);
 273	btrfs_bio_end_io(cb->orig_bbio, status);
 274	bio_put(&bbio->bio);
 
 
 275}
 276
 277/*
 278 * Clear the writeback bits on all of the file
 279 * pages for a compressed write
 280 */
 281static noinline void end_compressed_writeback(const struct compressed_bio *cb)
 
 282{
 283	struct inode *inode = &cb->bbio.inode->vfs_inode;
 284	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 285	unsigned long index = cb->start >> PAGE_SHIFT;
 286	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 287	struct folio_batch fbatch;
 288	const int error = blk_status_to_errno(cb->bbio.bio.bi_status);
 289	int i;
 290	int ret;
 291
 292	if (error)
 293		mapping_set_error(inode->i_mapping, error);
 294
 295	folio_batch_init(&fbatch);
 296	while (index <= end_index) {
 297		ret = filemap_get_folios(inode->i_mapping, &index, end_index,
 298				&fbatch);
 299
 300		if (ret == 0)
 301			return;
 302
 
 
 
 
 
 
 
 
 
 303		for (i = 0; i < ret; i++) {
 304			struct folio *folio = fbatch.folios[i];
 305
 306			btrfs_folio_clamp_clear_writeback(fs_info, folio,
 307							  cb->start, cb->len);
 308		}
 309		folio_batch_release(&fbatch);
 
 310	}
 311	/* the inode may be gone now */
 312}
 313
 314static void btrfs_finish_compressed_write_work(struct work_struct *work)
 315{
 316	struct compressed_bio *cb =
 317		container_of(work, struct compressed_bio, write_end_work);
 318
 319	btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
 320				    cb->bbio.bio.bi_status == BLK_STS_OK);
 321
 322	if (cb->writeback)
 323		end_compressed_writeback(cb);
 324	/* Note, our inode could be gone now */
 325
 326	btrfs_free_compressed_folios(cb);
 327	bio_put(&cb->bbio.bio);
 328}
 329
 330/*
 331 * Do the cleanup once all the compressed pages hit the disk.  This will clear
 332 * writeback on the file pages and free the compressed pages.
 
 333 *
 334 * This also calls the writeback end hooks for the file pages so that metadata
 335 * and checksums can be updated in the file.
 336 */
 337static void end_bbio_compressed_write(struct btrfs_bio *bbio)
 338{
 339	struct compressed_bio *cb = to_compressed_bio(bbio);
 340	struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
 
 
 341
 342	queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
 343}
 344
 345static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
 346{
 347	struct bio *bio = &cb->bbio.bio;
 348	u32 offset = 0;
 
 349
 350	while (offset < cb->compressed_len) {
 351		int ret;
 352		u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
 
 
 
 
 
 
 
 
 353
 354		/* Maximum compressed extent is smaller than bio size limit. */
 355		ret = bio_add_folio(bio, cb->compressed_folios[offset >> PAGE_SHIFT],
 356				    len, 0);
 357		ASSERT(ret);
 358		offset += len;
 
 
 
 
 359	}
 
 
 
 
 
 
 360}
 361
 362/*
 363 * worker function to build and submit bios for previously compressed pages.
 364 * The corresponding pages in the inode should be marked for writeback
 365 * and the compressed pages should have a reference on them for dropping
 366 * when the IO is complete.
 367 *
 368 * This also checksums the file bytes and gets things ready for
 369 * the end io hooks.
 370 */
 371void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
 372				   struct folio **compressed_folios,
 373				   unsigned int nr_folios,
 374				   blk_opf_t write_flags,
 375				   bool writeback)
 
 
 376{
 377	struct btrfs_inode *inode = ordered->inode;
 378	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 
 379	struct compressed_bio *cb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380
 381	ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
 382	ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
 
 
 
 383
 384	cb = alloc_compressed_bio(inode, ordered->file_offset,
 385				  REQ_OP_WRITE | write_flags,
 386				  end_bbio_compressed_write);
 387	cb->start = ordered->file_offset;
 388	cb->len = ordered->num_bytes;
 389	cb->compressed_folios = compressed_folios;
 390	cb->compressed_len = ordered->disk_num_bytes;
 391	cb->writeback = writeback;
 392	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
 393	cb->nr_folios = nr_folios;
 394	cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
 395	cb->bbio.ordered = ordered;
 396	btrfs_add_compressed_bio_folios(cb);
 397
 398	btrfs_submit_bbio(&cb->bbio, 0);
 
 
 
 
 
 
 
 399}
 400
 401/*
 402 * Add extra pages in the same compressed file extent so that we don't need to
 403 * re-read the same extent again and again.
 404 *
 405 * NOTE: this won't work well for subpage, as for subpage read, we lock the
 406 * full page then submit bio for each compressed/regular extents.
 407 *
 408 * This means, if we have several sectors in the same page points to the same
 409 * on-disk compressed data, we will re-read the same extent many times and
 410 * this function can only help for the next page.
 411 */
 412static noinline int add_ra_bio_pages(struct inode *inode,
 413				     u64 compressed_end,
 414				     struct compressed_bio *cb,
 415				     int *memstall, unsigned long *pflags)
 416{
 417	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 418	unsigned long end_index;
 419	struct bio *orig_bio = &cb->orig_bbio->bio;
 420	u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
 421	u64 isize = i_size_read(inode);
 422	int ret;
 423	struct folio *folio;
 
 424	struct extent_map *em;
 425	struct address_space *mapping = inode->i_mapping;
 426	struct extent_map_tree *em_tree;
 427	struct extent_io_tree *tree;
 428	int sectors_missed = 0;
 
 429
 
 430	em_tree = &BTRFS_I(inode)->extent_tree;
 431	tree = &BTRFS_I(inode)->io_tree;
 432
 433	if (isize == 0)
 434		return 0;
 435
 436	/*
 437	 * For current subpage support, we only support 64K page size,
 438	 * which means maximum compressed extent size (128K) is just 2x page
 439	 * size.
 440	 * This makes readahead less effective, so here disable readahead for
 441	 * subpage for now, until full compressed write is supported.
 442	 */
 443	if (fs_info->sectorsize < PAGE_SIZE)
 444		return 0;
 445
 446	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 447
 448	while (cur < compressed_end) {
 449		u64 page_end;
 450		u64 pg_index = cur >> PAGE_SHIFT;
 451		u32 add_size;
 452
 453		if (pg_index > end_index)
 454			break;
 455
 456		folio = filemap_get_folio(mapping, pg_index);
 457		if (!IS_ERR(folio)) {
 458			u64 folio_sz = folio_size(folio);
 459			u64 offset = offset_in_folio(folio, cur);
 460
 461			folio_put(folio);
 462			sectors_missed += (folio_sz - offset) >>
 463					  fs_info->sectorsize_bits;
 464
 465			/* Beyond threshold, no need to continue */
 466			if (sectors_missed > 4)
 467				break;
 468
 469			/*
 470			 * Jump to next page start as we already have page for
 471			 * current offset.
 472			 */
 473			cur += (folio_sz - offset);
 474			continue;
 475		}
 476
 477		folio = filemap_alloc_folio(mapping_gfp_constraint(mapping,
 478								   ~__GFP_FS), 0);
 479		if (!folio)
 480			break;
 481
 482		if (filemap_add_folio(mapping, folio, pg_index, GFP_NOFS)) {
 483			/* There is already a page, skip to page end */
 484			cur += folio_size(folio);
 485			folio_put(folio);
 486			continue;
 487		}
 488
 489		if (!*memstall && folio_test_workingset(folio)) {
 490			psi_memstall_enter(pflags);
 491			*memstall = 1;
 492		}
 493
 494		ret = set_folio_extent_mapped(folio);
 
 
 
 
 
 495		if (ret < 0) {
 496			folio_unlock(folio);
 497			folio_put(folio);
 498			break;
 499		}
 500
 501		page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
 502		lock_extent(tree, cur, page_end, NULL);
 503		read_lock(&em_tree->lock);
 504		em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
 
 505		read_unlock(&em_tree->lock);
 506
 507		/*
 508		 * At this point, we have a locked page in the page cache for
 509		 * these bytes in the file.  But, we have to make sure they map
 510		 * to this compressed extent on disk.
 511		 */
 512		if (!em || cur < em->start ||
 513		    (cur + fs_info->sectorsize > extent_map_end(em)) ||
 514		    (extent_map_block_start(em) >> SECTOR_SHIFT) !=
 515		    orig_bio->bi_iter.bi_sector) {
 516			free_extent_map(em);
 517			unlock_extent(tree, cur, page_end, NULL);
 518			folio_unlock(folio);
 519			folio_put(folio);
 520			break;
 521		}
 522		add_size = min(em->start + em->len, page_end + 1) - cur;
 523		free_extent_map(em);
 524		unlock_extent(tree, cur, page_end, NULL);
 525
 526		if (folio->index == end_index) {
 527			size_t zero_offset = offset_in_folio(folio, isize);
 528
 529			if (zero_offset) {
 530				int zeros;
 531				zeros = folio_size(folio) - zero_offset;
 532				folio_zero_range(folio, zero_offset, zeros);
 
 533			}
 534		}
 535
 536		if (!bio_add_folio(orig_bio, folio, add_size,
 537				   offset_in_folio(folio, cur))) {
 538			folio_unlock(folio);
 539			folio_put(folio);
 
 
 
 
 
 
 540			break;
 541		}
 542		/*
 543		 * If it's subpage, we also need to increase its
 544		 * subpage::readers number, as at endio we will decrease
 545		 * subpage::readers and to unlock the page.
 546		 */
 547		if (fs_info->sectorsize < PAGE_SIZE)
 548			btrfs_folio_set_lock(fs_info, folio, cur, add_size);
 549		folio_put(folio);
 550		cur += add_size;
 551	}
 552	return 0;
 553}
 554
 555/*
 556 * for a compressed read, the bio we get passed has all the inode pages
 557 * in it.  We don't actually do IO on those pages but allocate new ones
 558 * to hold the compressed pages on disk.
 559 *
 560 * bio->bi_iter.bi_sector points to the compressed extent on disk
 561 * bio->bi_io_vec points to all of the inode pages
 562 *
 563 * After the compressed pages are read, we copy the bytes into the
 564 * bio we were passed and then call the bio end_io calls
 565 */
 566void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
 
 567{
 568	struct btrfs_inode *inode = bbio->inode;
 569	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 570	struct extent_map_tree *em_tree = &inode->extent_tree;
 571	struct compressed_bio *cb;
 572	unsigned int compressed_len;
 573	u64 file_offset = bbio->file_offset;
 
 
 
 
 574	u64 em_len;
 575	u64 em_start;
 576	struct extent_map *em;
 577	unsigned long pflags;
 578	int memstall = 0;
 579	blk_status_t ret;
 580	int ret2;
 
 581
 582	/* we need the actual starting offset of this extent in the file */
 583	read_lock(&em_tree->lock);
 584	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
 
 
 585	read_unlock(&em_tree->lock);
 586	if (!em) {
 587		ret = BLK_STS_IOERR;
 588		goto out;
 589	}
 590
 591	ASSERT(extent_map_is_compressed(em));
 592	compressed_len = em->disk_num_bytes;
 
 
 593
 594	cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
 595				  end_bbio_compressed_read);
 
 
 
 596
 597	cb->start = em->start - em->offset;
 598	em_len = em->len;
 599	em_start = em->start;
 600
 601	cb->len = bbio->bio.bi_iter.bi_size;
 
 
 
 602	cb->compressed_len = compressed_len;
 603	cb->compress_type = extent_map_compression(em);
 604	cb->orig_bbio = bbio;
 605
 606	free_extent_map(em);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 607
 608	cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 609	cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
 610	if (!cb->compressed_folios) {
 611		ret = BLK_STS_RESOURCE;
 612		goto out_free_bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 613	}
 614
 615	ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios);
 616	if (ret2) {
 617		ret = BLK_STS_RESOURCE;
 618		goto out_free_compressed_pages;
 
 
 
 
 
 
 619	}
 620
 621	add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
 622			 &pflags);
 623
 624	/* include any pages we added in add_ra-bio_pages */
 625	cb->len = bbio->bio.bi_iter.bi_size;
 626	cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
 627	btrfs_add_compressed_bio_folios(cb);
 628
 629	if (memstall)
 630		psi_memstall_leave(&pflags);
 631
 632	btrfs_submit_bbio(&cb->bbio, 0);
 633	return;
 634
 635out_free_compressed_pages:
 636	kfree(cb->compressed_folios);
 637out_free_bio:
 638	bio_put(&cb->bbio.bio);
 639out:
 640	btrfs_bio_end_io(bbio, ret);
 
 641}
 642
 643/*
 644 * Heuristic uses systematic sampling to collect data from the input data
 645 * range, the logic can be tuned by the following constants:
 646 *
 647 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 648 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 649 */
 650#define SAMPLING_READ_SIZE	(16)
 651#define SAMPLING_INTERVAL	(256)
 652
 653/*
 654 * For statistical analysis of the input data we consider bytes that form a
 655 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 656 * many times the object appeared in the sample.
 657 */
 658#define BUCKET_SIZE		(256)
 659
 660/*
 661 * The size of the sample is based on a statistical sampling rule of thumb.
 662 * The common way is to perform sampling tests as long as the number of
 663 * elements in each cell is at least 5.
 664 *
 665 * Instead of 5, we choose 32 to obtain more accurate results.
 666 * If the data contain the maximum number of symbols, which is 256, we obtain a
 667 * sample size bound by 8192.
 668 *
 669 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 670 * from up to 512 locations.
 671 */
 672#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
 673				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 674
 675struct bucket_item {
 676	u32 count;
 677};
 678
 679struct heuristic_ws {
 680	/* Partial copy of input data */
 681	u8 *sample;
 682	u32 sample_size;
 683	/* Buckets store counters for each byte value */
 684	struct bucket_item *bucket;
 685	/* Sorting buffer */
 686	struct bucket_item *bucket_b;
 687	struct list_head list;
 688};
 689
 690static struct workspace_manager heuristic_wsm;
 691
 692static void free_heuristic_ws(struct list_head *ws)
 693{
 694	struct heuristic_ws *workspace;
 695
 696	workspace = list_entry(ws, struct heuristic_ws, list);
 697
 698	kvfree(workspace->sample);
 699	kfree(workspace->bucket);
 700	kfree(workspace->bucket_b);
 701	kfree(workspace);
 702}
 703
 704static struct list_head *alloc_heuristic_ws(void)
 705{
 706	struct heuristic_ws *ws;
 707
 708	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 709	if (!ws)
 710		return ERR_PTR(-ENOMEM);
 711
 712	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 713	if (!ws->sample)
 714		goto fail;
 715
 716	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 717	if (!ws->bucket)
 718		goto fail;
 719
 720	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 721	if (!ws->bucket_b)
 722		goto fail;
 723
 724	INIT_LIST_HEAD(&ws->list);
 725	return &ws->list;
 726fail:
 727	free_heuristic_ws(&ws->list);
 728	return ERR_PTR(-ENOMEM);
 729}
 730
 731const struct btrfs_compress_op btrfs_heuristic_compress = {
 732	.workspace_manager = &heuristic_wsm,
 733};
 734
 735static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 736	/* The heuristic is represented as compression type 0 */
 737	&btrfs_heuristic_compress,
 738	&btrfs_zlib_compress,
 739	&btrfs_lzo_compress,
 740	&btrfs_zstd_compress,
 741};
 742
 743static struct list_head *alloc_workspace(int type, unsigned int level)
 744{
 745	switch (type) {
 746	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws();
 747	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
 748	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace();
 749	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
 750	default:
 751		/*
 752		 * This can't happen, the type is validated several times
 753		 * before we get here.
 754		 */
 755		BUG();
 756	}
 757}
 758
 759static void free_workspace(int type, struct list_head *ws)
 760{
 761	switch (type) {
 762	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
 763	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
 764	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
 765	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
 766	default:
 767		/*
 768		 * This can't happen, the type is validated several times
 769		 * before we get here.
 770		 */
 771		BUG();
 772	}
 773}
 774
 775static void btrfs_init_workspace_manager(int type)
 776{
 777	struct workspace_manager *wsm;
 778	struct list_head *workspace;
 779
 780	wsm = btrfs_compress_op[type]->workspace_manager;
 781	INIT_LIST_HEAD(&wsm->idle_ws);
 782	spin_lock_init(&wsm->ws_lock);
 783	atomic_set(&wsm->total_ws, 0);
 784	init_waitqueue_head(&wsm->ws_wait);
 785
 786	/*
 787	 * Preallocate one workspace for each compression type so we can
 788	 * guarantee forward progress in the worst case
 789	 */
 790	workspace = alloc_workspace(type, 0);
 791	if (IS_ERR(workspace)) {
 792		pr_warn(
 793	"BTRFS: cannot preallocate compression workspace, will try later\n");
 794	} else {
 795		atomic_set(&wsm->total_ws, 1);
 796		wsm->free_ws = 1;
 797		list_add(workspace, &wsm->idle_ws);
 798	}
 799}
 800
 801static void btrfs_cleanup_workspace_manager(int type)
 802{
 803	struct workspace_manager *wsman;
 804	struct list_head *ws;
 805
 806	wsman = btrfs_compress_op[type]->workspace_manager;
 807	while (!list_empty(&wsman->idle_ws)) {
 808		ws = wsman->idle_ws.next;
 809		list_del(ws);
 810		free_workspace(type, ws);
 811		atomic_dec(&wsman->total_ws);
 812	}
 813}
 814
 815/*
 816 * This finds an available workspace or allocates a new one.
 817 * If it's not possible to allocate a new one, waits until there's one.
 818 * Preallocation makes a forward progress guarantees and we do not return
 819 * errors.
 820 */
 821struct list_head *btrfs_get_workspace(int type, unsigned int level)
 822{
 823	struct workspace_manager *wsm;
 824	struct list_head *workspace;
 825	int cpus = num_online_cpus();
 826	unsigned nofs_flag;
 827	struct list_head *idle_ws;
 828	spinlock_t *ws_lock;
 829	atomic_t *total_ws;
 830	wait_queue_head_t *ws_wait;
 831	int *free_ws;
 832
 833	wsm = btrfs_compress_op[type]->workspace_manager;
 834	idle_ws	 = &wsm->idle_ws;
 835	ws_lock	 = &wsm->ws_lock;
 836	total_ws = &wsm->total_ws;
 837	ws_wait	 = &wsm->ws_wait;
 838	free_ws	 = &wsm->free_ws;
 839
 840again:
 841	spin_lock(ws_lock);
 842	if (!list_empty(idle_ws)) {
 843		workspace = idle_ws->next;
 844		list_del(workspace);
 845		(*free_ws)--;
 846		spin_unlock(ws_lock);
 847		return workspace;
 848
 849	}
 850	if (atomic_read(total_ws) > cpus) {
 851		DEFINE_WAIT(wait);
 852
 853		spin_unlock(ws_lock);
 854		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
 855		if (atomic_read(total_ws) > cpus && !*free_ws)
 856			schedule();
 857		finish_wait(ws_wait, &wait);
 858		goto again;
 859	}
 860	atomic_inc(total_ws);
 861	spin_unlock(ws_lock);
 862
 863	/*
 864	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
 865	 * to turn it off here because we might get called from the restricted
 866	 * context of btrfs_compress_bio/btrfs_compress_pages
 867	 */
 868	nofs_flag = memalloc_nofs_save();
 869	workspace = alloc_workspace(type, level);
 870	memalloc_nofs_restore(nofs_flag);
 871
 872	if (IS_ERR(workspace)) {
 873		atomic_dec(total_ws);
 874		wake_up(ws_wait);
 875
 876		/*
 877		 * Do not return the error but go back to waiting. There's a
 878		 * workspace preallocated for each type and the compression
 879		 * time is bounded so we get to a workspace eventually. This
 880		 * makes our caller's life easier.
 881		 *
 882		 * To prevent silent and low-probability deadlocks (when the
 883		 * initial preallocation fails), check if there are any
 884		 * workspaces at all.
 885		 */
 886		if (atomic_read(total_ws) == 0) {
 887			static DEFINE_RATELIMIT_STATE(_rs,
 888					/* once per minute */ 60 * HZ,
 889					/* no burst */ 1);
 890
 891			if (__ratelimit(&_rs)) {
 892				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
 893			}
 894		}
 895		goto again;
 896	}
 897	return workspace;
 898}
 899
 900static struct list_head *get_workspace(int type, int level)
 901{
 902	switch (type) {
 903	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
 904	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
 905	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
 906	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
 907	default:
 908		/*
 909		 * This can't happen, the type is validated several times
 910		 * before we get here.
 911		 */
 912		BUG();
 913	}
 914}
 915
 916/*
 917 * put a workspace struct back on the list or free it if we have enough
 918 * idle ones sitting around
 919 */
 920void btrfs_put_workspace(int type, struct list_head *ws)
 921{
 922	struct workspace_manager *wsm;
 923	struct list_head *idle_ws;
 924	spinlock_t *ws_lock;
 925	atomic_t *total_ws;
 926	wait_queue_head_t *ws_wait;
 927	int *free_ws;
 928
 929	wsm = btrfs_compress_op[type]->workspace_manager;
 930	idle_ws	 = &wsm->idle_ws;
 931	ws_lock	 = &wsm->ws_lock;
 932	total_ws = &wsm->total_ws;
 933	ws_wait	 = &wsm->ws_wait;
 934	free_ws	 = &wsm->free_ws;
 935
 936	spin_lock(ws_lock);
 937	if (*free_ws <= num_online_cpus()) {
 938		list_add(ws, idle_ws);
 939		(*free_ws)++;
 940		spin_unlock(ws_lock);
 941		goto wake;
 942	}
 943	spin_unlock(ws_lock);
 944
 945	free_workspace(type, ws);
 946	atomic_dec(total_ws);
 947wake:
 948	cond_wake_up(ws_wait);
 949}
 950
 951static void put_workspace(int type, struct list_head *ws)
 952{
 953	switch (type) {
 954	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
 955	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
 956	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
 957	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
 958	default:
 959		/*
 960		 * This can't happen, the type is validated several times
 961		 * before we get here.
 962		 */
 963		BUG();
 964	}
 965}
 966
 967/*
 968 * Adjust @level according to the limits of the compression algorithm or
 969 * fallback to default
 970 */
 971static unsigned int btrfs_compress_set_level(int type, unsigned level)
 972{
 973	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
 974
 975	if (level == 0)
 976		level = ops->default_level;
 977	else
 978		level = min(level, ops->max_level);
 979
 980	return level;
 981}
 982
 983/* Wrapper around find_get_page(), with extra error message. */
 984int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
 985				     struct folio **in_folio_ret)
 986{
 987	struct folio *in_folio;
 988
 989	/*
 990	 * The compressed write path should have the folio locked already, thus
 991	 * we only need to grab one reference.
 992	 */
 993	in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
 994	if (IS_ERR(in_folio)) {
 995		struct btrfs_inode *inode = BTRFS_I(mapping->host);
 996
 997		btrfs_crit(inode->root->fs_info,
 998		"failed to get page cache, root %lld ino %llu file offset %llu",
 999			   btrfs_root_id(inode->root), btrfs_ino(inode), start);
1000		return -ENOENT;
1001	}
1002	*in_folio_ret = in_folio;
1003	return 0;
1004}
1005
1006/*
1007 * Given an address space and start and length, compress the bytes into @pages
1008 * that are allocated on demand.
1009 *
1010 * @type_level is encoded algorithm and level, where level 0 means whatever
1011 * default the algorithm chooses and is opaque here;
1012 * - compression algo are 0-3
1013 * - the level are bits 4-7
1014 *
1015 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1016 * and returns number of actually allocated pages
1017 *
1018 * @total_in is used to return the number of bytes actually read.  It
1019 * may be smaller than the input length if we had to exit early because we
1020 * ran out of room in the pages array or because we cross the
1021 * max_out threshold.
1022 *
1023 * @total_out is an in/out parameter, must be set to the input length and will
1024 * be also used to return the total number of compressed bytes
1025 */
1026int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
1027			 u64 start, struct folio **folios, unsigned long *out_folios,
1028			 unsigned long *total_in, unsigned long *total_out)
 
 
1029{
1030	int type = btrfs_compress_type(type_level);
1031	int level = btrfs_compress_level(type_level);
1032	const unsigned long orig_len = *total_out;
1033	struct list_head *workspace;
1034	int ret;
1035
1036	level = btrfs_compress_set_level(type, level);
1037	workspace = get_workspace(type, level);
1038	ret = compression_compress_pages(type, workspace, mapping, start, folios,
1039					 out_folios, total_in, total_out);
1040	/* The total read-in bytes should be no larger than the input. */
1041	ASSERT(*total_in <= orig_len);
1042	put_workspace(type, workspace);
1043	return ret;
1044}
1045
1046static int btrfs_decompress_bio(struct compressed_bio *cb)
1047{
1048	struct list_head *workspace;
1049	int ret;
1050	int type = cb->compress_type;
1051
1052	workspace = get_workspace(type, 0);
1053	ret = compression_decompress_bio(workspace, cb);
1054	put_workspace(type, workspace);
1055
1056	if (!ret)
1057		zero_fill_bio(&cb->orig_bbio->bio);
1058	return ret;
1059}
1060
1061/*
1062 * a less complex decompression routine.  Our compressed data fits in a
1063 * single page, and we want to read a single page out of it.
1064 * start_byte tells us the offset into the compressed data we're interested in
1065 */
1066int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
1067		     unsigned long dest_pgoff, size_t srclen, size_t destlen)
1068{
1069	struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
1070	struct list_head *workspace;
1071	const u32 sectorsize = fs_info->sectorsize;
1072	int ret;
1073
1074	/*
1075	 * The full destination page range should not exceed the page size.
1076	 * And the @destlen should not exceed sectorsize, as this is only called for
1077	 * inline file extents, which should not exceed sectorsize.
1078	 */
1079	ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
1080
1081	workspace = get_workspace(type, 0);
1082	ret = compression_decompress(type, workspace, data_in, dest_folio,
1083				     dest_pgoff, srclen, destlen);
1084	put_workspace(type, workspace);
1085
1086	return ret;
1087}
1088
1089int __init btrfs_init_compress(void)
1090{
1091	if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE,
1092			offsetof(struct compressed_bio, bbio.bio),
1093			BIOSET_NEED_BVECS))
1094		return -ENOMEM;
1095
1096	compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, "btrfs-compr-pages");
1097	if (!compr_pool.shrinker)
1098		return -ENOMEM;
1099
1100	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1101	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1102	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1103	zstd_init_workspace_manager();
1104
1105	spin_lock_init(&compr_pool.lock);
1106	INIT_LIST_HEAD(&compr_pool.list);
1107	compr_pool.count = 0;
1108	/* 128K / 4K = 32, for 8 threads is 256 pages. */
1109	compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8;
1110	compr_pool.shrinker->count_objects = btrfs_compr_pool_count;
1111	compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan;
1112	compr_pool.shrinker->batch = 32;
1113	compr_pool.shrinker->seeks = DEFAULT_SEEKS;
1114	shrinker_register(compr_pool.shrinker);
1115
1116	return 0;
1117}
1118
1119void __cold btrfs_exit_compress(void)
1120{
1121	/* For now scan drains all pages and does not touch the parameters. */
1122	btrfs_compr_pool_scan(NULL, NULL);
1123	shrinker_free(compr_pool.shrinker);
1124
1125	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1126	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1127	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1128	zstd_cleanup_workspace_manager();
1129	bioset_exit(&btrfs_compressed_bioset);
1130}
1131
1132/*
1133 * Copy decompressed data from working buffer to pages.
1134 *
1135 * @buf:		The decompressed data buffer
1136 * @buf_len:		The decompressed data length
1137 * @decompressed:	Number of bytes that are already decompressed inside the
1138 * 			compressed extent
1139 * @cb:			The compressed extent descriptor
1140 * @orig_bio:		The original bio that the caller wants to read for
1141 *
1142 * An easier to understand graph is like below:
1143 *
1144 * 		|<- orig_bio ->|     |<- orig_bio->|
1145 * 	|<-------      full decompressed extent      ----->|
1146 * 	|<-----------    @cb range   ---->|
1147 * 	|			|<-- @buf_len -->|
1148 * 	|<--- @decompressed --->|
1149 *
1150 * Note that, @cb can be a subpage of the full decompressed extent, but
1151 * @cb->start always has the same as the orig_file_offset value of the full
1152 * decompressed extent.
1153 *
1154 * When reading compressed extent, we have to read the full compressed extent,
1155 * while @orig_bio may only want part of the range.
1156 * Thus this function will ensure only data covered by @orig_bio will be copied
1157 * to.
1158 *
1159 * Return 0 if we have copied all needed contents for @orig_bio.
1160 * Return >0 if we need continue decompress.
1161 */
1162int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1163			      struct compressed_bio *cb, u32 decompressed)
1164{
1165	struct bio *orig_bio = &cb->orig_bbio->bio;
1166	/* Offset inside the full decompressed extent */
1167	u32 cur_offset;
1168
1169	cur_offset = decompressed;
1170	/* The main loop to do the copy */
1171	while (cur_offset < decompressed + buf_len) {
1172		struct bio_vec bvec;
1173		size_t copy_len;
1174		u32 copy_start;
1175		/* Offset inside the full decompressed extent */
1176		u32 bvec_offset;
1177
1178		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1179		/*
1180		 * cb->start may underflow, but subtracting that value can still
1181		 * give us correct offset inside the full decompressed extent.
1182		 */
1183		bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1184
1185		/* Haven't reached the bvec range, exit */
1186		if (decompressed + buf_len <= bvec_offset)
1187			return 1;
1188
1189		copy_start = max(cur_offset, bvec_offset);
1190		copy_len = min(bvec_offset + bvec.bv_len,
1191			       decompressed + buf_len) - copy_start;
1192		ASSERT(copy_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193
1194		/*
1195		 * Extra range check to ensure we didn't go beyond
1196		 * @buf + @buf_len.
 
 
1197		 */
1198		ASSERT(copy_start - decompressed < buf_len);
1199		memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1200			       buf + copy_start - decompressed, copy_len);
1201		cur_offset += copy_len;
1202
1203		bio_advance(orig_bio, copy_len);
1204		/* Finished the bio */
1205		if (!orig_bio->bi_iter.bi_size)
1206			return 0;
 
 
 
 
 
 
 
 
 
 
 
1207	}
 
1208	return 1;
1209}
1210
1211/*
1212 * Shannon Entropy calculation
1213 *
1214 * Pure byte distribution analysis fails to determine compressibility of data.
1215 * Try calculating entropy to estimate the average minimum number of bits
1216 * needed to encode the sampled data.
1217 *
1218 * For convenience, return the percentage of needed bits, instead of amount of
1219 * bits directly.
1220 *
1221 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1222 *			    and can be compressible with high probability
1223 *
1224 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1225 *
1226 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1227 */
1228#define ENTROPY_LVL_ACEPTABLE		(65)
1229#define ENTROPY_LVL_HIGH		(80)
1230
1231/*
1232 * For increasead precision in shannon_entropy calculation,
1233 * let's do pow(n, M) to save more digits after comma:
1234 *
1235 * - maximum int bit length is 64
1236 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1237 * - 13 * 4 = 52 < 64		-> M = 4
1238 *
1239 * So use pow(n, 4).
1240 */
1241static inline u32 ilog2_w(u64 n)
1242{
1243	return ilog2(n * n * n * n);
1244}
1245
1246static u32 shannon_entropy(struct heuristic_ws *ws)
1247{
1248	const u32 entropy_max = 8 * ilog2_w(2);
1249	u32 entropy_sum = 0;
1250	u32 p, p_base, sz_base;
1251	u32 i;
1252
1253	sz_base = ilog2_w(ws->sample_size);
1254	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1255		p = ws->bucket[i].count;
1256		p_base = ilog2_w(p);
1257		entropy_sum += p * (sz_base - p_base);
1258	}
1259
1260	entropy_sum /= ws->sample_size;
1261	return entropy_sum * 100 / entropy_max;
1262}
1263
1264#define RADIX_BASE		4U
1265#define COUNTERS_SIZE		(1U << RADIX_BASE)
1266
1267static u8 get4bits(u64 num, int shift) {
1268	u8 low4bits;
1269
1270	num >>= shift;
1271	/* Reverse order */
1272	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1273	return low4bits;
1274}
1275
1276/*
1277 * Use 4 bits as radix base
1278 * Use 16 u32 counters for calculating new position in buf array
1279 *
1280 * @array     - array that will be sorted
1281 * @array_buf - buffer array to store sorting results
1282 *              must be equal in size to @array
1283 * @num       - array size
1284 */
1285static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1286		       int num)
1287{
1288	u64 max_num;
1289	u64 buf_num;
1290	u32 counters[COUNTERS_SIZE];
1291	u32 new_addr;
1292	u32 addr;
1293	int bitlen;
1294	int shift;
1295	int i;
1296
1297	/*
1298	 * Try avoid useless loop iterations for small numbers stored in big
1299	 * counters.  Example: 48 33 4 ... in 64bit array
1300	 */
1301	max_num = array[0].count;
1302	for (i = 1; i < num; i++) {
1303		buf_num = array[i].count;
1304		if (buf_num > max_num)
1305			max_num = buf_num;
1306	}
1307
1308	buf_num = ilog2(max_num);
1309	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1310
1311	shift = 0;
1312	while (shift < bitlen) {
1313		memset(counters, 0, sizeof(counters));
1314
1315		for (i = 0; i < num; i++) {
1316			buf_num = array[i].count;
1317			addr = get4bits(buf_num, shift);
1318			counters[addr]++;
1319		}
1320
1321		for (i = 1; i < COUNTERS_SIZE; i++)
1322			counters[i] += counters[i - 1];
1323
1324		for (i = num - 1; i >= 0; i--) {
1325			buf_num = array[i].count;
1326			addr = get4bits(buf_num, shift);
1327			counters[addr]--;
1328			new_addr = counters[addr];
1329			array_buf[new_addr] = array[i];
1330		}
1331
1332		shift += RADIX_BASE;
1333
1334		/*
1335		 * Normal radix expects to move data from a temporary array, to
1336		 * the main one.  But that requires some CPU time. Avoid that
1337		 * by doing another sort iteration to original array instead of
1338		 * memcpy()
1339		 */
1340		memset(counters, 0, sizeof(counters));
1341
1342		for (i = 0; i < num; i ++) {
1343			buf_num = array_buf[i].count;
1344			addr = get4bits(buf_num, shift);
1345			counters[addr]++;
1346		}
1347
1348		for (i = 1; i < COUNTERS_SIZE; i++)
1349			counters[i] += counters[i - 1];
1350
1351		for (i = num - 1; i >= 0; i--) {
1352			buf_num = array_buf[i].count;
1353			addr = get4bits(buf_num, shift);
1354			counters[addr]--;
1355			new_addr = counters[addr];
1356			array[new_addr] = array_buf[i];
1357		}
1358
1359		shift += RADIX_BASE;
1360	}
1361}
1362
1363/*
1364 * Size of the core byte set - how many bytes cover 90% of the sample
1365 *
1366 * There are several types of structured binary data that use nearly all byte
1367 * values. The distribution can be uniform and counts in all buckets will be
1368 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1369 *
1370 * Other possibility is normal (Gaussian) distribution, where the data could
1371 * be potentially compressible, but we have to take a few more steps to decide
1372 * how much.
1373 *
1374 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1375 *                       compression algo can easy fix that
1376 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1377 *                       probability is not compressible
1378 */
1379#define BYTE_CORE_SET_LOW		(64)
1380#define BYTE_CORE_SET_HIGH		(200)
1381
1382static int byte_core_set_size(struct heuristic_ws *ws)
1383{
1384	u32 i;
1385	u32 coreset_sum = 0;
1386	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1387	struct bucket_item *bucket = ws->bucket;
1388
1389	/* Sort in reverse order */
1390	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1391
1392	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1393		coreset_sum += bucket[i].count;
1394
1395	if (coreset_sum > core_set_threshold)
1396		return i;
1397
1398	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1399		coreset_sum += bucket[i].count;
1400		if (coreset_sum > core_set_threshold)
1401			break;
1402	}
1403
1404	return i;
1405}
1406
1407/*
1408 * Count byte values in buckets.
1409 * This heuristic can detect textual data (configs, xml, json, html, etc).
1410 * Because in most text-like data byte set is restricted to limited number of
1411 * possible characters, and that restriction in most cases makes data easy to
1412 * compress.
1413 *
1414 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1415 *	less - compressible
1416 *	more - need additional analysis
1417 */
1418#define BYTE_SET_THRESHOLD		(64)
1419
1420static u32 byte_set_size(const struct heuristic_ws *ws)
1421{
1422	u32 i;
1423	u32 byte_set_size = 0;
1424
1425	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1426		if (ws->bucket[i].count > 0)
1427			byte_set_size++;
1428	}
1429
1430	/*
1431	 * Continue collecting count of byte values in buckets.  If the byte
1432	 * set size is bigger then the threshold, it's pointless to continue,
1433	 * the detection technique would fail for this type of data.
1434	 */
1435	for (; i < BUCKET_SIZE; i++) {
1436		if (ws->bucket[i].count > 0) {
1437			byte_set_size++;
1438			if (byte_set_size > BYTE_SET_THRESHOLD)
1439				return byte_set_size;
1440		}
1441	}
1442
1443	return byte_set_size;
1444}
1445
1446static bool sample_repeated_patterns(struct heuristic_ws *ws)
1447{
1448	const u32 half_of_sample = ws->sample_size / 2;
1449	const u8 *data = ws->sample;
1450
1451	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1452}
1453
1454static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1455				     struct heuristic_ws *ws)
1456{
1457	struct page *page;
1458	u64 index, index_end;
1459	u32 i, curr_sample_pos;
1460	u8 *in_data;
1461
1462	/*
1463	 * Compression handles the input data by chunks of 128KiB
1464	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1465	 *
1466	 * We do the same for the heuristic and loop over the whole range.
1467	 *
1468	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1469	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1470	 */
1471	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1472		end = start + BTRFS_MAX_UNCOMPRESSED;
1473
1474	index = start >> PAGE_SHIFT;
1475	index_end = end >> PAGE_SHIFT;
1476
1477	/* Don't miss unaligned end */
1478	if (!PAGE_ALIGNED(end))
1479		index_end++;
1480
1481	curr_sample_pos = 0;
1482	while (index < index_end) {
1483		page = find_get_page(inode->i_mapping, index);
1484		in_data = kmap_local_page(page);
1485		/* Handle case where the start is not aligned to PAGE_SIZE */
1486		i = start % PAGE_SIZE;
1487		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1488			/* Don't sample any garbage from the last page */
1489			if (start > end - SAMPLING_READ_SIZE)
1490				break;
1491			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1492					SAMPLING_READ_SIZE);
1493			i += SAMPLING_INTERVAL;
1494			start += SAMPLING_INTERVAL;
1495			curr_sample_pos += SAMPLING_READ_SIZE;
1496		}
1497		kunmap_local(in_data);
1498		put_page(page);
1499
1500		index++;
1501	}
1502
1503	ws->sample_size = curr_sample_pos;
1504}
1505
1506/*
1507 * Compression heuristic.
1508 *
 
 
 
 
 
1509 * The following types of analysis can be performed:
1510 * - detect mostly zero data
1511 * - detect data with low "byte set" size (text, etc)
1512 * - detect data with low/high "core byte" set
1513 *
1514 * Return non-zero if the compression should be done, 0 otherwise.
1515 */
1516int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end)
1517{
1518	struct list_head *ws_list = get_workspace(0, 0);
1519	struct heuristic_ws *ws;
1520	u32 i;
1521	u8 byte;
1522	int ret = 0;
1523
1524	ws = list_entry(ws_list, struct heuristic_ws, list);
1525
1526	heuristic_collect_sample(&inode->vfs_inode, start, end, ws);
1527
1528	if (sample_repeated_patterns(ws)) {
1529		ret = 1;
1530		goto out;
1531	}
1532
1533	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1534
1535	for (i = 0; i < ws->sample_size; i++) {
1536		byte = ws->sample[i];
1537		ws->bucket[byte].count++;
1538	}
1539
1540	i = byte_set_size(ws);
1541	if (i < BYTE_SET_THRESHOLD) {
1542		ret = 2;
1543		goto out;
1544	}
1545
1546	i = byte_core_set_size(ws);
1547	if (i <= BYTE_CORE_SET_LOW) {
1548		ret = 3;
1549		goto out;
1550	}
1551
1552	if (i >= BYTE_CORE_SET_HIGH) {
1553		ret = 0;
1554		goto out;
1555	}
1556
1557	i = shannon_entropy(ws);
1558	if (i <= ENTROPY_LVL_ACEPTABLE) {
1559		ret = 4;
1560		goto out;
1561	}
1562
1563	/*
1564	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1565	 * needed to give green light to compression.
1566	 *
1567	 * For now just assume that compression at that level is not worth the
1568	 * resources because:
1569	 *
1570	 * 1. it is possible to defrag the data later
1571	 *
1572	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1573	 * values, every bucket has counter at level ~54. The heuristic would
1574	 * be confused. This can happen when data have some internal repeated
1575	 * patterns like "abbacbbc...". This can be detected by analyzing
1576	 * pairs of bytes, which is too costly.
1577	 */
1578	if (i < ENTROPY_LVL_HIGH) {
1579		ret = 5;
1580		goto out;
1581	} else {
1582		ret = 0;
1583		goto out;
1584	}
1585
1586out:
1587	put_workspace(0, ws_list);
1588	return ret;
1589}
1590
1591/*
1592 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1593 * level, unrecognized string will set the default level
1594 */
1595unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1596{
1597	unsigned int level = 0;
1598	int ret;
1599
1600	if (!type)
1601		return 0;
1602
1603	if (str[0] == ':') {
1604		ret = kstrtouint(str + 1, 10, &level);
1605		if (ret)
1606			level = 0;
1607	}
1608
1609	level = btrfs_compress_set_level(type, level);
1610
1611	return level;
1612}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
   8#include <linux/file.h>
   9#include <linux/fs.h>
  10#include <linux/pagemap.h>
 
  11#include <linux/highmem.h>
 
  12#include <linux/time.h>
  13#include <linux/init.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
  16#include <linux/writeback.h>
 
  17#include <linux/slab.h>
  18#include <linux/sched/mm.h>
  19#include <linux/log2.h>
 
  20#include <crypto/hash.h>
  21#include "misc.h"
  22#include "ctree.h"
  23#include "disk-io.h"
  24#include "transaction.h"
  25#include "btrfs_inode.h"
  26#include "volumes.h"
  27#include "ordered-data.h"
  28#include "compression.h"
  29#include "extent_io.h"
  30#include "extent_map.h"
  31#include "zoned.h"
 
 
 
 
  32
  33static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  34
  35const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  36{
  37	switch (type) {
  38	case BTRFS_COMPRESS_ZLIB:
  39	case BTRFS_COMPRESS_LZO:
  40	case BTRFS_COMPRESS_ZSTD:
  41	case BTRFS_COMPRESS_NONE:
  42		return btrfs_compress_types[type];
  43	default:
  44		break;
  45	}
  46
  47	return NULL;
  48}
  49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50bool btrfs_compress_is_valid_type(const char *str, size_t len)
  51{
  52	int i;
  53
  54	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
  55		size_t comp_len = strlen(btrfs_compress_types[i]);
  56
  57		if (len < comp_len)
  58			continue;
  59
  60		if (!strncmp(btrfs_compress_types[i], str, comp_len))
  61			return true;
  62	}
  63	return false;
  64}
  65
  66static int compression_compress_pages(int type, struct list_head *ws,
  67               struct address_space *mapping, u64 start, struct page **pages,
  68               unsigned long *out_pages, unsigned long *total_in,
  69               unsigned long *total_out)
  70{
  71	switch (type) {
  72	case BTRFS_COMPRESS_ZLIB:
  73		return zlib_compress_pages(ws, mapping, start, pages,
  74				out_pages, total_in, total_out);
  75	case BTRFS_COMPRESS_LZO:
  76		return lzo_compress_pages(ws, mapping, start, pages,
  77				out_pages, total_in, total_out);
  78	case BTRFS_COMPRESS_ZSTD:
  79		return zstd_compress_pages(ws, mapping, start, pages,
  80				out_pages, total_in, total_out);
  81	case BTRFS_COMPRESS_NONE:
  82	default:
  83		/*
  84		 * This can happen when compression races with remount setting
  85		 * it to 'no compress', while caller doesn't call
  86		 * inode_need_compress() to check if we really need to
  87		 * compress.
  88		 *
  89		 * Not a big deal, just need to inform caller that we
  90		 * haven't allocated any pages yet.
  91		 */
  92		*out_pages = 0;
  93		return -E2BIG;
  94	}
  95}
  96
  97static int compression_decompress_bio(int type, struct list_head *ws,
  98		struct compressed_bio *cb)
  99{
 100	switch (type) {
 101	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
 102	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
 103	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
 104	case BTRFS_COMPRESS_NONE:
 105	default:
 106		/*
 107		 * This can't happen, the type is validated several times
 108		 * before we get here.
 109		 */
 110		BUG();
 111	}
 112}
 113
 114static int compression_decompress(int type, struct list_head *ws,
 115               unsigned char *data_in, struct page *dest_page,
 116               unsigned long start_byte, size_t srclen, size_t destlen)
 117{
 118	switch (type) {
 119	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
 120						start_byte, srclen, destlen);
 121	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
 122						start_byte, srclen, destlen);
 123	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
 124						start_byte, srclen, destlen);
 125	case BTRFS_COMPRESS_NONE:
 126	default:
 127		/*
 128		 * This can't happen, the type is validated several times
 129		 * before we get here.
 130		 */
 131		BUG();
 132	}
 133}
 134
 
 
 
 
 
 
 
 135static int btrfs_decompress_bio(struct compressed_bio *cb);
 136
 137static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
 138				      unsigned long disk_size)
 
 
 
 
 
 
 
 
 
 
 139{
 140	return sizeof(struct compressed_bio) +
 141		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
 
 
 
 
 
 
 
 
 142}
 143
 144static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
 145				 u64 disk_start)
 146{
 147	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 148	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 149	const u32 csum_size = fs_info->csum_size;
 150	const u32 sectorsize = fs_info->sectorsize;
 151	struct page *page;
 152	unsigned int i;
 153	char *kaddr;
 154	u8 csum[BTRFS_CSUM_SIZE];
 155	struct compressed_bio *cb = bio->bi_private;
 156	u8 *cb_sum = cb->sums;
 157
 158	if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
 159		return 0;
 
 
 
 
 160
 161	shash->tfm = fs_info->csum_shash;
 
 162
 163	for (i = 0; i < cb->nr_pages; i++) {
 164		u32 pg_offset;
 165		u32 bytes_left = PAGE_SIZE;
 166		page = cb->compressed_pages[i];
 167
 168		/* Determine the remaining bytes inside the page first */
 169		if (i == cb->nr_pages - 1)
 170			bytes_left = cb->compressed_len - i * PAGE_SIZE;
 171
 172		/* Hash through the page sector by sector */
 173		for (pg_offset = 0; pg_offset < bytes_left;
 174		     pg_offset += sectorsize) {
 175			kaddr = kmap_atomic(page);
 176			crypto_shash_digest(shash, kaddr + pg_offset,
 177					    sectorsize, csum);
 178			kunmap_atomic(kaddr);
 179
 180			if (memcmp(&csum, cb_sum, csum_size) != 0) {
 181				btrfs_print_data_csum_error(inode, disk_start,
 182						csum, cb_sum, cb->mirror_num);
 183				if (btrfs_io_bio(bio)->device)
 184					btrfs_dev_stat_inc_and_print(
 185						btrfs_io_bio(bio)->device,
 186						BTRFS_DEV_STAT_CORRUPTION_ERRS);
 187				return -EIO;
 188			}
 189			cb_sum += csum_size;
 190			disk_start += sectorsize;
 191		}
 192	}
 193	return 0;
 
 194}
 195
 196/* when we finish reading compressed pages from the disk, we
 197 * decompress them and then run the bio end_io routines on the
 198 * decompressed pages (in the inode address space).
 199 *
 200 * This allows the checksumming and other IO error handling routines
 201 * to work normally
 202 *
 203 * The compressed pages are freed here, and it must be run
 204 * in process context
 205 */
 206static void end_compressed_bio_read(struct bio *bio)
 207{
 208	struct compressed_bio *cb = bio->bi_private;
 209	struct inode *inode;
 210	struct page *page;
 211	unsigned int index;
 212	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
 213	int ret = 0;
 214
 215	if (bio->bi_status)
 216		cb->errors = 1;
 
 
 
 
 
 217
 218	/* if there are more bios still pending for this compressed
 219	 * extent, just exit
 220	 */
 221	if (!refcount_dec_and_test(&cb->pending_bios))
 222		goto out;
 223
 224	/*
 225	 * Record the correct mirror_num in cb->orig_bio so that
 226	 * read-repair can work properly.
 227	 */
 228	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
 229	cb->mirror_num = mirror;
 230
 231	/*
 232	 * Some IO in this cb have failed, just skip checksum as there
 233	 * is no way it could be correct.
 234	 */
 235	if (cb->errors == 1)
 236		goto csum_failed;
 237
 238	inode = cb->inode;
 239	ret = check_compressed_csum(BTRFS_I(inode), bio,
 240				    bio->bi_iter.bi_sector << 9);
 241	if (ret)
 242		goto csum_failed;
 
 
 
 243
 244	/* ok, we're the last bio for this extent, lets start
 245	 * the decompression.
 246	 */
 247	ret = btrfs_decompress_bio(cb);
 248
 249csum_failed:
 250	if (ret)
 251		cb->errors = 1;
 252
 253	/* release the compressed pages */
 254	index = 0;
 255	for (index = 0; index < cb->nr_pages; index++) {
 256		page = cb->compressed_pages[index];
 257		page->mapping = NULL;
 258		put_page(page);
 259	}
 260
 261	/* do io completion on the original bio */
 262	if (cb->errors) {
 263		bio_io_error(cb->orig_bio);
 264	} else {
 265		struct bio_vec *bvec;
 266		struct bvec_iter_all iter_all;
 267
 268		/*
 269		 * we have verified the checksum already, set page
 270		 * checked so the end_io handlers know about it
 271		 */
 272		ASSERT(!bio_flagged(bio, BIO_CLONED));
 273		bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
 274			SetPageChecked(bvec->bv_page);
 275
 276		bio_endio(cb->orig_bio);
 277	}
 278
 279	/* finally free the cb struct */
 280	kfree(cb->compressed_pages);
 281	kfree(cb);
 282out:
 283	bio_put(bio);
 284}
 285
 286/*
 287 * Clear the writeback bits on all of the file
 288 * pages for a compressed write
 289 */
 290static noinline void end_compressed_writeback(struct inode *inode,
 291					      const struct compressed_bio *cb)
 292{
 
 
 293	unsigned long index = cb->start >> PAGE_SHIFT;
 294	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 295	struct page *pages[16];
 296	unsigned long nr_pages = end_index - index + 1;
 297	int i;
 298	int ret;
 299
 300	if (cb->errors)
 301		mapping_set_error(inode->i_mapping, -EIO);
 
 
 
 
 
 
 
 
 302
 303	while (nr_pages > 0) {
 304		ret = find_get_pages_contig(inode->i_mapping, index,
 305				     min_t(unsigned long,
 306				     nr_pages, ARRAY_SIZE(pages)), pages);
 307		if (ret == 0) {
 308			nr_pages -= 1;
 309			index += 1;
 310			continue;
 311		}
 312		for (i = 0; i < ret; i++) {
 313			if (cb->errors)
 314				SetPageError(pages[i]);
 315			end_page_writeback(pages[i]);
 316			put_page(pages[i]);
 317		}
 318		nr_pages -= ret;
 319		index += ret;
 320	}
 321	/* the inode may be gone now */
 322}
 323
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 324/*
 325 * do the cleanup once all the compressed pages hit the disk.
 326 * This will clear writeback on the file pages and free the compressed
 327 * pages.
 328 *
 329 * This also calls the writeback end hooks for the file pages so that
 330 * metadata and checksums can be updated in the file.
 331 */
 332static void end_compressed_bio_write(struct bio *bio)
 333{
 334	struct compressed_bio *cb = bio->bi_private;
 335	struct inode *inode;
 336	struct page *page;
 337	unsigned int index;
 338
 339	if (bio->bi_status)
 340		cb->errors = 1;
 341
 342	/* if there are more bios still pending for this compressed
 343	 * extent, just exit
 344	 */
 345	if (!refcount_dec_and_test(&cb->pending_bios))
 346		goto out;
 347
 348	/* ok, we're the last bio for this extent, step one is to
 349	 * call back into the FS and do all the end_io operations
 350	 */
 351	inode = cb->inode;
 352	btrfs_record_physical_zoned(inode, cb->start, bio);
 353	btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
 354			cb->start, cb->start + cb->len - 1,
 355			!cb->errors);
 356
 357	end_compressed_writeback(inode, cb);
 358	/* note, our inode could be gone now */
 359
 360	/*
 361	 * release the compressed pages, these came from alloc_page and
 362	 * are not attached to the inode at all
 363	 */
 364	index = 0;
 365	for (index = 0; index < cb->nr_pages; index++) {
 366		page = cb->compressed_pages[index];
 367		page->mapping = NULL;
 368		put_page(page);
 369	}
 370
 371	/* finally free the cb struct */
 372	kfree(cb->compressed_pages);
 373	kfree(cb);
 374out:
 375	bio_put(bio);
 376}
 377
 378/*
 379 * worker function to build and submit bios for previously compressed pages.
 380 * The corresponding pages in the inode should be marked for writeback
 381 * and the compressed pages should have a reference on them for dropping
 382 * when the IO is complete.
 383 *
 384 * This also checksums the file bytes and gets things ready for
 385 * the end io hooks.
 386 */
 387blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
 388				 unsigned int len, u64 disk_start,
 389				 unsigned int compressed_len,
 390				 struct page **compressed_pages,
 391				 unsigned int nr_pages,
 392				 unsigned int write_flags,
 393				 struct cgroup_subsys_state *blkcg_css)
 394{
 
 395	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 396	struct bio *bio = NULL;
 397	struct compressed_bio *cb;
 398	unsigned long bytes_left;
 399	int pg_index = 0;
 400	struct page *page;
 401	u64 first_byte = disk_start;
 402	blk_status_t ret;
 403	int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
 404	const bool use_append = btrfs_use_zone_append(inode, disk_start);
 405	const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
 406
 407	WARN_ON(!PAGE_ALIGNED(start));
 408	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 409	if (!cb)
 410		return BLK_STS_RESOURCE;
 411	refcount_set(&cb->pending_bios, 0);
 412	cb->errors = 0;
 413	cb->inode = &inode->vfs_inode;
 414	cb->start = start;
 415	cb->len = len;
 416	cb->mirror_num = 0;
 417	cb->compressed_pages = compressed_pages;
 418	cb->compressed_len = compressed_len;
 419	cb->orig_bio = NULL;
 420	cb->nr_pages = nr_pages;
 421
 422	bio = btrfs_bio_alloc(first_byte);
 423	bio->bi_opf = bio_op | write_flags;
 424	bio->bi_private = cb;
 425	bio->bi_end_io = end_compressed_bio_write;
 426
 427	if (use_append) {
 428		struct btrfs_device *device;
 429
 430		device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE);
 431		if (IS_ERR(device)) {
 432			kfree(cb);
 433			bio_put(bio);
 434			return BLK_STS_NOTSUPP;
 435		}
 436
 437		bio_set_dev(bio, device->bdev);
 438	}
 439
 440	if (blkcg_css) {
 441		bio->bi_opf |= REQ_CGROUP_PUNT;
 442		kthread_associate_blkcg(blkcg_css);
 443	}
 444	refcount_set(&cb->pending_bios, 1);
 445
 446	/* create and submit bios for the compressed pages */
 447	bytes_left = compressed_len;
 448	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 449		int submit = 0;
 450		int len = 0;
 451
 452		page = compressed_pages[pg_index];
 453		page->mapping = inode->vfs_inode.i_mapping;
 454		if (bio->bi_iter.bi_size)
 455			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
 456							  0);
 457
 458		/*
 459		 * Page can only be added to bio if the current bio fits in
 460		 * stripe.
 461		 */
 462		if (!submit) {
 463			if (pg_index == 0 && use_append)
 464				len = bio_add_zone_append_page(bio, page,
 465							       PAGE_SIZE, 0);
 466			else
 467				len = bio_add_page(bio, page, PAGE_SIZE, 0);
 468		}
 469
 470		page->mapping = NULL;
 471		if (submit || len < PAGE_SIZE) {
 472			/*
 473			 * inc the count before we submit the bio so
 474			 * we know the end IO handler won't happen before
 475			 * we inc the count.  Otherwise, the cb might get
 476			 * freed before we're done setting it up
 477			 */
 478			refcount_inc(&cb->pending_bios);
 479			ret = btrfs_bio_wq_end_io(fs_info, bio,
 480						  BTRFS_WQ_ENDIO_DATA);
 481			BUG_ON(ret); /* -ENOMEM */
 482
 483			if (!skip_sum) {
 484				ret = btrfs_csum_one_bio(inode, bio, start, 1);
 485				BUG_ON(ret); /* -ENOMEM */
 486			}
 487
 488			ret = btrfs_map_bio(fs_info, bio, 0);
 489			if (ret) {
 490				bio->bi_status = ret;
 491				bio_endio(bio);
 492			}
 493
 494			bio = btrfs_bio_alloc(first_byte);
 495			bio->bi_opf = bio_op | write_flags;
 496			bio->bi_private = cb;
 497			bio->bi_end_io = end_compressed_bio_write;
 498			if (blkcg_css)
 499				bio->bi_opf |= REQ_CGROUP_PUNT;
 500			/*
 501			 * Use bio_add_page() to ensure the bio has at least one
 502			 * page.
 503			 */
 504			bio_add_page(bio, page, PAGE_SIZE, 0);
 505		}
 506		if (bytes_left < PAGE_SIZE) {
 507			btrfs_info(fs_info,
 508					"bytes left %lu compress len %u nr %u",
 509			       bytes_left, cb->compressed_len, cb->nr_pages);
 510		}
 511		bytes_left -= PAGE_SIZE;
 512		first_byte += PAGE_SIZE;
 513		cond_resched();
 514	}
 515
 516	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 517	BUG_ON(ret); /* -ENOMEM */
 518
 519	if (!skip_sum) {
 520		ret = btrfs_csum_one_bio(inode, bio, start, 1);
 521		BUG_ON(ret); /* -ENOMEM */
 522	}
 523
 524	ret = btrfs_map_bio(fs_info, bio, 0);
 525	if (ret) {
 526		bio->bi_status = ret;
 527		bio_endio(bio);
 528	}
 529
 530	if (blkcg_css)
 531		kthread_associate_blkcg(NULL);
 
 
 
 
 
 
 
 
 
 
 
 532
 533	return 0;
 534}
 535
 536static u64 bio_end_offset(struct bio *bio)
 537{
 538	struct bio_vec *last = bio_last_bvec_all(bio);
 539
 540	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
 541}
 542
 
 
 
 
 
 
 
 
 
 
 
 543static noinline int add_ra_bio_pages(struct inode *inode,
 544				     u64 compressed_end,
 545				     struct compressed_bio *cb)
 
 546{
 
 547	unsigned long end_index;
 548	unsigned long pg_index;
 549	u64 last_offset;
 550	u64 isize = i_size_read(inode);
 551	int ret;
 552	struct page *page;
 553	unsigned long nr_pages = 0;
 554	struct extent_map *em;
 555	struct address_space *mapping = inode->i_mapping;
 556	struct extent_map_tree *em_tree;
 557	struct extent_io_tree *tree;
 558	u64 end;
 559	int misses = 0;
 560
 561	last_offset = bio_end_offset(cb->orig_bio);
 562	em_tree = &BTRFS_I(inode)->extent_tree;
 563	tree = &BTRFS_I(inode)->io_tree;
 564
 565	if (isize == 0)
 566		return 0;
 567
 
 
 
 
 
 
 
 
 
 
 568	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 569
 570	while (last_offset < compressed_end) {
 571		pg_index = last_offset >> PAGE_SHIFT;
 
 
 572
 573		if (pg_index > end_index)
 574			break;
 575
 576		page = xa_load(&mapping->i_pages, pg_index);
 577		if (page && !xa_is_value(page)) {
 578			misses++;
 579			if (misses > 4)
 
 
 
 
 
 
 
 580				break;
 581			goto next;
 
 
 
 
 
 
 582		}
 583
 584		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
 585								 ~__GFP_FS));
 586		if (!page)
 587			break;
 588
 589		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
 590			put_page(page);
 591			goto next;
 
 
 
 
 
 
 
 592		}
 593
 594		/*
 595		 * at this point, we have a locked page in the page cache
 596		 * for these bytes in the file.  But, we have to make
 597		 * sure they map to this compressed extent on disk.
 598		 */
 599		ret = set_page_extent_mapped(page);
 600		if (ret < 0) {
 601			unlock_page(page);
 602			put_page(page);
 603			break;
 604		}
 605
 606		end = last_offset + PAGE_SIZE - 1;
 607		lock_extent(tree, last_offset, end);
 608		read_lock(&em_tree->lock);
 609		em = lookup_extent_mapping(em_tree, last_offset,
 610					   PAGE_SIZE);
 611		read_unlock(&em_tree->lock);
 612
 613		if (!em || last_offset < em->start ||
 614		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
 615		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 
 
 
 
 
 
 616			free_extent_map(em);
 617			unlock_extent(tree, last_offset, end);
 618			unlock_page(page);
 619			put_page(page);
 620			break;
 621		}
 
 622		free_extent_map(em);
 
 623
 624		if (page->index == end_index) {
 625			size_t zero_offset = offset_in_page(isize);
 626
 627			if (zero_offset) {
 628				int zeros;
 629				zeros = PAGE_SIZE - zero_offset;
 630				memzero_page(page, zero_offset, zeros);
 631				flush_dcache_page(page);
 632			}
 633		}
 634
 635		ret = bio_add_page(cb->orig_bio, page,
 636				   PAGE_SIZE, 0);
 637
 638		if (ret == PAGE_SIZE) {
 639			nr_pages++;
 640			put_page(page);
 641		} else {
 642			unlock_extent(tree, last_offset, end);
 643			unlock_page(page);
 644			put_page(page);
 645			break;
 646		}
 647next:
 648		last_offset += PAGE_SIZE;
 
 
 
 
 
 
 
 649	}
 650	return 0;
 651}
 652
 653/*
 654 * for a compressed read, the bio we get passed has all the inode pages
 655 * in it.  We don't actually do IO on those pages but allocate new ones
 656 * to hold the compressed pages on disk.
 657 *
 658 * bio->bi_iter.bi_sector points to the compressed extent on disk
 659 * bio->bi_io_vec points to all of the inode pages
 660 *
 661 * After the compressed pages are read, we copy the bytes into the
 662 * bio we were passed and then call the bio end_io calls
 663 */
 664blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 665				 int mirror_num, unsigned long bio_flags)
 666{
 667	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 668	struct extent_map_tree *em_tree;
 
 669	struct compressed_bio *cb;
 670	unsigned int compressed_len;
 671	unsigned int nr_pages;
 672	unsigned int pg_index;
 673	struct page *page;
 674	struct bio *comp_bio;
 675	u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
 676	u64 em_len;
 677	u64 em_start;
 678	struct extent_map *em;
 679	blk_status_t ret = BLK_STS_RESOURCE;
 680	int faili = 0;
 681	u8 *sums;
 682
 683	em_tree = &BTRFS_I(inode)->extent_tree;
 684
 685	/* we need the actual starting offset of this extent in the file */
 686	read_lock(&em_tree->lock);
 687	em = lookup_extent_mapping(em_tree,
 688				   page_offset(bio_first_page_all(bio)),
 689				   fs_info->sectorsize);
 690	read_unlock(&em_tree->lock);
 691	if (!em)
 692		return BLK_STS_IOERR;
 
 
 693
 694	compressed_len = em->block_len;
 695	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 696	if (!cb)
 697		goto out;
 698
 699	refcount_set(&cb->pending_bios, 0);
 700	cb->errors = 0;
 701	cb->inode = inode;
 702	cb->mirror_num = mirror_num;
 703	sums = cb->sums;
 704
 705	cb->start = em->orig_start;
 706	em_len = em->len;
 707	em_start = em->start;
 708
 709	free_extent_map(em);
 710	em = NULL;
 711
 712	cb->len = bio->bi_iter.bi_size;
 713	cb->compressed_len = compressed_len;
 714	cb->compress_type = extent_compress_type(bio_flags);
 715	cb->orig_bio = bio;
 716
 717	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 718	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
 719				       GFP_NOFS);
 720	if (!cb->compressed_pages)
 721		goto fail1;
 722
 723	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 724		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 725							      __GFP_HIGHMEM);
 726		if (!cb->compressed_pages[pg_index]) {
 727			faili = pg_index - 1;
 728			ret = BLK_STS_RESOURCE;
 729			goto fail2;
 730		}
 731	}
 732	faili = nr_pages - 1;
 733	cb->nr_pages = nr_pages;
 734
 735	add_ra_bio_pages(inode, em_start + em_len, cb);
 736
 737	/* include any pages we added in add_ra-bio_pages */
 738	cb->len = bio->bi_iter.bi_size;
 739
 740	comp_bio = btrfs_bio_alloc(cur_disk_byte);
 741	comp_bio->bi_opf = REQ_OP_READ;
 742	comp_bio->bi_private = cb;
 743	comp_bio->bi_end_io = end_compressed_bio_read;
 744	refcount_set(&cb->pending_bios, 1);
 745
 746	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 747		u32 pg_len = PAGE_SIZE;
 748		int submit = 0;
 749
 750		/*
 751		 * To handle subpage case, we need to make sure the bio only
 752		 * covers the range we need.
 753		 *
 754		 * If we're at the last page, truncate the length to only cover
 755		 * the remaining part.
 756		 */
 757		if (pg_index == nr_pages - 1)
 758			pg_len = min_t(u32, PAGE_SIZE,
 759					compressed_len - pg_index * PAGE_SIZE);
 760
 761		page = cb->compressed_pages[pg_index];
 762		page->mapping = inode->i_mapping;
 763		page->index = em_start >> PAGE_SHIFT;
 764
 765		if (comp_bio->bi_iter.bi_size)
 766			submit = btrfs_bio_fits_in_stripe(page, pg_len,
 767							  comp_bio, 0);
 768
 769		page->mapping = NULL;
 770		if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
 771			unsigned int nr_sectors;
 772
 773			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
 774						  BTRFS_WQ_ENDIO_DATA);
 775			BUG_ON(ret); /* -ENOMEM */
 776
 777			/*
 778			 * inc the count before we submit the bio so
 779			 * we know the end IO handler won't happen before
 780			 * we inc the count.  Otherwise, the cb might get
 781			 * freed before we're done setting it up
 782			 */
 783			refcount_inc(&cb->pending_bios);
 784
 785			ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 786			BUG_ON(ret); /* -ENOMEM */
 787
 788			nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
 789						  fs_info->sectorsize);
 790			sums += fs_info->csum_size * nr_sectors;
 791
 792			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
 793			if (ret) {
 794				comp_bio->bi_status = ret;
 795				bio_endio(comp_bio);
 796			}
 797
 798			comp_bio = btrfs_bio_alloc(cur_disk_byte);
 799			comp_bio->bi_opf = REQ_OP_READ;
 800			comp_bio->bi_private = cb;
 801			comp_bio->bi_end_io = end_compressed_bio_read;
 802
 803			bio_add_page(comp_bio, page, pg_len, 0);
 804		}
 805		cur_disk_byte += pg_len;
 806	}
 807
 808	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
 809	BUG_ON(ret); /* -ENOMEM */
 810
 811	ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 812	BUG_ON(ret); /* -ENOMEM */
 813
 814	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
 815	if (ret) {
 816		comp_bio->bi_status = ret;
 817		bio_endio(comp_bio);
 818	}
 819
 820	return 0;
 
 821
 822fail2:
 823	while (faili >= 0) {
 824		__free_page(cb->compressed_pages[faili]);
 825		faili--;
 826	}
 827
 828	kfree(cb->compressed_pages);
 829fail1:
 830	kfree(cb);
 
 
 
 
 
 
 831out:
 832	free_extent_map(em);
 833	return ret;
 834}
 835
 836/*
 837 * Heuristic uses systematic sampling to collect data from the input data
 838 * range, the logic can be tuned by the following constants:
 839 *
 840 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 841 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 842 */
 843#define SAMPLING_READ_SIZE	(16)
 844#define SAMPLING_INTERVAL	(256)
 845
 846/*
 847 * For statistical analysis of the input data we consider bytes that form a
 848 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 849 * many times the object appeared in the sample.
 850 */
 851#define BUCKET_SIZE		(256)
 852
 853/*
 854 * The size of the sample is based on a statistical sampling rule of thumb.
 855 * The common way is to perform sampling tests as long as the number of
 856 * elements in each cell is at least 5.
 857 *
 858 * Instead of 5, we choose 32 to obtain more accurate results.
 859 * If the data contain the maximum number of symbols, which is 256, we obtain a
 860 * sample size bound by 8192.
 861 *
 862 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 863 * from up to 512 locations.
 864 */
 865#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
 866				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 867
 868struct bucket_item {
 869	u32 count;
 870};
 871
 872struct heuristic_ws {
 873	/* Partial copy of input data */
 874	u8 *sample;
 875	u32 sample_size;
 876	/* Buckets store counters for each byte value */
 877	struct bucket_item *bucket;
 878	/* Sorting buffer */
 879	struct bucket_item *bucket_b;
 880	struct list_head list;
 881};
 882
 883static struct workspace_manager heuristic_wsm;
 884
 885static void free_heuristic_ws(struct list_head *ws)
 886{
 887	struct heuristic_ws *workspace;
 888
 889	workspace = list_entry(ws, struct heuristic_ws, list);
 890
 891	kvfree(workspace->sample);
 892	kfree(workspace->bucket);
 893	kfree(workspace->bucket_b);
 894	kfree(workspace);
 895}
 896
 897static struct list_head *alloc_heuristic_ws(unsigned int level)
 898{
 899	struct heuristic_ws *ws;
 900
 901	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 902	if (!ws)
 903		return ERR_PTR(-ENOMEM);
 904
 905	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 906	if (!ws->sample)
 907		goto fail;
 908
 909	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 910	if (!ws->bucket)
 911		goto fail;
 912
 913	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 914	if (!ws->bucket_b)
 915		goto fail;
 916
 917	INIT_LIST_HEAD(&ws->list);
 918	return &ws->list;
 919fail:
 920	free_heuristic_ws(&ws->list);
 921	return ERR_PTR(-ENOMEM);
 922}
 923
 924const struct btrfs_compress_op btrfs_heuristic_compress = {
 925	.workspace_manager = &heuristic_wsm,
 926};
 927
 928static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 929	/* The heuristic is represented as compression type 0 */
 930	&btrfs_heuristic_compress,
 931	&btrfs_zlib_compress,
 932	&btrfs_lzo_compress,
 933	&btrfs_zstd_compress,
 934};
 935
 936static struct list_head *alloc_workspace(int type, unsigned int level)
 937{
 938	switch (type) {
 939	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
 940	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
 941	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
 942	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
 943	default:
 944		/*
 945		 * This can't happen, the type is validated several times
 946		 * before we get here.
 947		 */
 948		BUG();
 949	}
 950}
 951
 952static void free_workspace(int type, struct list_head *ws)
 953{
 954	switch (type) {
 955	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
 956	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
 957	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
 958	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
 959	default:
 960		/*
 961		 * This can't happen, the type is validated several times
 962		 * before we get here.
 963		 */
 964		BUG();
 965	}
 966}
 967
 968static void btrfs_init_workspace_manager(int type)
 969{
 970	struct workspace_manager *wsm;
 971	struct list_head *workspace;
 972
 973	wsm = btrfs_compress_op[type]->workspace_manager;
 974	INIT_LIST_HEAD(&wsm->idle_ws);
 975	spin_lock_init(&wsm->ws_lock);
 976	atomic_set(&wsm->total_ws, 0);
 977	init_waitqueue_head(&wsm->ws_wait);
 978
 979	/*
 980	 * Preallocate one workspace for each compression type so we can
 981	 * guarantee forward progress in the worst case
 982	 */
 983	workspace = alloc_workspace(type, 0);
 984	if (IS_ERR(workspace)) {
 985		pr_warn(
 986	"BTRFS: cannot preallocate compression workspace, will try later\n");
 987	} else {
 988		atomic_set(&wsm->total_ws, 1);
 989		wsm->free_ws = 1;
 990		list_add(workspace, &wsm->idle_ws);
 991	}
 992}
 993
 994static void btrfs_cleanup_workspace_manager(int type)
 995{
 996	struct workspace_manager *wsman;
 997	struct list_head *ws;
 998
 999	wsman = btrfs_compress_op[type]->workspace_manager;
1000	while (!list_empty(&wsman->idle_ws)) {
1001		ws = wsman->idle_ws.next;
1002		list_del(ws);
1003		free_workspace(type, ws);
1004		atomic_dec(&wsman->total_ws);
1005	}
1006}
1007
1008/*
1009 * This finds an available workspace or allocates a new one.
1010 * If it's not possible to allocate a new one, waits until there's one.
1011 * Preallocation makes a forward progress guarantees and we do not return
1012 * errors.
1013 */
1014struct list_head *btrfs_get_workspace(int type, unsigned int level)
1015{
1016	struct workspace_manager *wsm;
1017	struct list_head *workspace;
1018	int cpus = num_online_cpus();
1019	unsigned nofs_flag;
1020	struct list_head *idle_ws;
1021	spinlock_t *ws_lock;
1022	atomic_t *total_ws;
1023	wait_queue_head_t *ws_wait;
1024	int *free_ws;
1025
1026	wsm = btrfs_compress_op[type]->workspace_manager;
1027	idle_ws	 = &wsm->idle_ws;
1028	ws_lock	 = &wsm->ws_lock;
1029	total_ws = &wsm->total_ws;
1030	ws_wait	 = &wsm->ws_wait;
1031	free_ws	 = &wsm->free_ws;
1032
1033again:
1034	spin_lock(ws_lock);
1035	if (!list_empty(idle_ws)) {
1036		workspace = idle_ws->next;
1037		list_del(workspace);
1038		(*free_ws)--;
1039		spin_unlock(ws_lock);
1040		return workspace;
1041
1042	}
1043	if (atomic_read(total_ws) > cpus) {
1044		DEFINE_WAIT(wait);
1045
1046		spin_unlock(ws_lock);
1047		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1048		if (atomic_read(total_ws) > cpus && !*free_ws)
1049			schedule();
1050		finish_wait(ws_wait, &wait);
1051		goto again;
1052	}
1053	atomic_inc(total_ws);
1054	spin_unlock(ws_lock);
1055
1056	/*
1057	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1058	 * to turn it off here because we might get called from the restricted
1059	 * context of btrfs_compress_bio/btrfs_compress_pages
1060	 */
1061	nofs_flag = memalloc_nofs_save();
1062	workspace = alloc_workspace(type, level);
1063	memalloc_nofs_restore(nofs_flag);
1064
1065	if (IS_ERR(workspace)) {
1066		atomic_dec(total_ws);
1067		wake_up(ws_wait);
1068
1069		/*
1070		 * Do not return the error but go back to waiting. There's a
1071		 * workspace preallocated for each type and the compression
1072		 * time is bounded so we get to a workspace eventually. This
1073		 * makes our caller's life easier.
1074		 *
1075		 * To prevent silent and low-probability deadlocks (when the
1076		 * initial preallocation fails), check if there are any
1077		 * workspaces at all.
1078		 */
1079		if (atomic_read(total_ws) == 0) {
1080			static DEFINE_RATELIMIT_STATE(_rs,
1081					/* once per minute */ 60 * HZ,
1082					/* no burst */ 1);
1083
1084			if (__ratelimit(&_rs)) {
1085				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1086			}
1087		}
1088		goto again;
1089	}
1090	return workspace;
1091}
1092
1093static struct list_head *get_workspace(int type, int level)
1094{
1095	switch (type) {
1096	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1097	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1098	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1099	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1100	default:
1101		/*
1102		 * This can't happen, the type is validated several times
1103		 * before we get here.
1104		 */
1105		BUG();
1106	}
1107}
1108
1109/*
1110 * put a workspace struct back on the list or free it if we have enough
1111 * idle ones sitting around
1112 */
1113void btrfs_put_workspace(int type, struct list_head *ws)
1114{
1115	struct workspace_manager *wsm;
1116	struct list_head *idle_ws;
1117	spinlock_t *ws_lock;
1118	atomic_t *total_ws;
1119	wait_queue_head_t *ws_wait;
1120	int *free_ws;
1121
1122	wsm = btrfs_compress_op[type]->workspace_manager;
1123	idle_ws	 = &wsm->idle_ws;
1124	ws_lock	 = &wsm->ws_lock;
1125	total_ws = &wsm->total_ws;
1126	ws_wait	 = &wsm->ws_wait;
1127	free_ws	 = &wsm->free_ws;
1128
1129	spin_lock(ws_lock);
1130	if (*free_ws <= num_online_cpus()) {
1131		list_add(ws, idle_ws);
1132		(*free_ws)++;
1133		spin_unlock(ws_lock);
1134		goto wake;
1135	}
1136	spin_unlock(ws_lock);
1137
1138	free_workspace(type, ws);
1139	atomic_dec(total_ws);
1140wake:
1141	cond_wake_up(ws_wait);
1142}
1143
1144static void put_workspace(int type, struct list_head *ws)
1145{
1146	switch (type) {
1147	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1148	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1149	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1150	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1151	default:
1152		/*
1153		 * This can't happen, the type is validated several times
1154		 * before we get here.
1155		 */
1156		BUG();
1157	}
1158}
1159
1160/*
1161 * Adjust @level according to the limits of the compression algorithm or
1162 * fallback to default
1163 */
1164static unsigned int btrfs_compress_set_level(int type, unsigned level)
1165{
1166	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1167
1168	if (level == 0)
1169		level = ops->default_level;
1170	else
1171		level = min(level, ops->max_level);
1172
1173	return level;
1174}
1175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1176/*
1177 * Given an address space and start and length, compress the bytes into @pages
1178 * that are allocated on demand.
1179 *
1180 * @type_level is encoded algorithm and level, where level 0 means whatever
1181 * default the algorithm chooses and is opaque here;
1182 * - compression algo are 0-3
1183 * - the level are bits 4-7
1184 *
1185 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1186 * and returns number of actually allocated pages
1187 *
1188 * @total_in is used to return the number of bytes actually read.  It
1189 * may be smaller than the input length if we had to exit early because we
1190 * ran out of room in the pages array or because we cross the
1191 * max_out threshold.
1192 *
1193 * @total_out is an in/out parameter, must be set to the input length and will
1194 * be also used to return the total number of compressed bytes
1195 */
1196int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1197			 u64 start, struct page **pages,
1198			 unsigned long *out_pages,
1199			 unsigned long *total_in,
1200			 unsigned long *total_out)
1201{
1202	int type = btrfs_compress_type(type_level);
1203	int level = btrfs_compress_level(type_level);
 
1204	struct list_head *workspace;
1205	int ret;
1206
1207	level = btrfs_compress_set_level(type, level);
1208	workspace = get_workspace(type, level);
1209	ret = compression_compress_pages(type, workspace, mapping, start, pages,
1210					 out_pages, total_in, total_out);
 
 
1211	put_workspace(type, workspace);
1212	return ret;
1213}
1214
1215static int btrfs_decompress_bio(struct compressed_bio *cb)
1216{
1217	struct list_head *workspace;
1218	int ret;
1219	int type = cb->compress_type;
1220
1221	workspace = get_workspace(type, 0);
1222	ret = compression_decompress_bio(type, workspace, cb);
1223	put_workspace(type, workspace);
1224
 
 
1225	return ret;
1226}
1227
1228/*
1229 * a less complex decompression routine.  Our compressed data fits in a
1230 * single page, and we want to read a single page out of it.
1231 * start_byte tells us the offset into the compressed data we're interested in
1232 */
1233int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1234		     unsigned long start_byte, size_t srclen, size_t destlen)
1235{
 
1236	struct list_head *workspace;
 
1237	int ret;
1238
 
 
 
 
 
 
 
1239	workspace = get_workspace(type, 0);
1240	ret = compression_decompress(type, workspace, data_in, dest_page,
1241				     start_byte, srclen, destlen);
1242	put_workspace(type, workspace);
1243
1244	return ret;
1245}
1246
1247void __init btrfs_init_compress(void)
1248{
 
 
 
 
 
 
 
 
 
1249	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1250	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1251	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1252	zstd_init_workspace_manager();
 
 
 
 
 
 
 
 
 
 
 
 
 
1253}
1254
1255void __cold btrfs_exit_compress(void)
1256{
 
 
 
 
1257	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1258	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1259	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1260	zstd_cleanup_workspace_manager();
 
1261}
1262
1263/*
1264 * Copy uncompressed data from working buffer to pages.
1265 *
1266 * buf_start is the byte offset we're of the start of our workspace buffer.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267 *
1268 * total_out is the last byte of the buffer
 
1269 */
1270int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1271			      unsigned long total_out, u64 disk_start,
1272			      struct bio *bio)
1273{
1274	unsigned long buf_offset;
1275	unsigned long current_buf_start;
1276	unsigned long start_byte;
1277	unsigned long prev_start_byte;
1278	unsigned long working_bytes = total_out - buf_start;
1279	unsigned long bytes;
1280	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
 
 
 
 
1281
1282	/*
1283	 * start byte is the first byte of the page we're currently
1284	 * copying into relative to the start of the compressed data.
1285	 */
1286	start_byte = page_offset(bvec.bv_page) - disk_start;
 
1287
1288	/* we haven't yet hit data corresponding to this page */
1289	if (total_out <= start_byte)
1290		return 1;
1291
1292	/*
1293	 * the start of the data we care about is offset into
1294	 * the middle of our working buffer
1295	 */
1296	if (total_out > start_byte && buf_start < start_byte) {
1297		buf_offset = start_byte - buf_start;
1298		working_bytes -= buf_offset;
1299	} else {
1300		buf_offset = 0;
1301	}
1302	current_buf_start = buf_start;
1303
1304	/* copy bytes from the working buffer into the pages */
1305	while (working_bytes > 0) {
1306		bytes = min_t(unsigned long, bvec.bv_len,
1307				PAGE_SIZE - (buf_offset % PAGE_SIZE));
1308		bytes = min(bytes, working_bytes);
1309
1310		memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
1311			       bytes);
1312		flush_dcache_page(bvec.bv_page);
1313
1314		buf_offset += bytes;
1315		working_bytes -= bytes;
1316		current_buf_start += bytes;
1317
1318		/* check if we need to pick another page */
1319		bio_advance(bio, bytes);
1320		if (!bio->bi_iter.bi_size)
1321			return 0;
1322		bvec = bio_iter_iovec(bio, bio->bi_iter);
1323		prev_start_byte = start_byte;
1324		start_byte = page_offset(bvec.bv_page) - disk_start;
1325
1326		/*
1327		 * We need to make sure we're only adjusting
1328		 * our offset into compression working buffer when
1329		 * we're switching pages.  Otherwise we can incorrectly
1330		 * keep copying when we were actually done.
1331		 */
1332		if (start_byte != prev_start_byte) {
1333			/*
1334			 * make sure our new page is covered by this
1335			 * working buffer
1336			 */
1337			if (total_out <= start_byte)
1338				return 1;
1339
1340			/*
1341			 * the next page in the biovec might not be adjacent
1342			 * to the last page, but it might still be found
1343			 * inside this working buffer. bump our offset pointer
1344			 */
1345			if (total_out > start_byte &&
1346			    current_buf_start < start_byte) {
1347				buf_offset = start_byte - buf_start;
1348				working_bytes = total_out - start_byte;
1349				current_buf_start = buf_start + buf_offset;
1350			}
1351		}
1352	}
1353
1354	return 1;
1355}
1356
1357/*
1358 * Shannon Entropy calculation
1359 *
1360 * Pure byte distribution analysis fails to determine compressibility of data.
1361 * Try calculating entropy to estimate the average minimum number of bits
1362 * needed to encode the sampled data.
1363 *
1364 * For convenience, return the percentage of needed bits, instead of amount of
1365 * bits directly.
1366 *
1367 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1368 *			    and can be compressible with high probability
1369 *
1370 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1371 *
1372 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1373 */
1374#define ENTROPY_LVL_ACEPTABLE		(65)
1375#define ENTROPY_LVL_HIGH		(80)
1376
1377/*
1378 * For increasead precision in shannon_entropy calculation,
1379 * let's do pow(n, M) to save more digits after comma:
1380 *
1381 * - maximum int bit length is 64
1382 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1383 * - 13 * 4 = 52 < 64		-> M = 4
1384 *
1385 * So use pow(n, 4).
1386 */
1387static inline u32 ilog2_w(u64 n)
1388{
1389	return ilog2(n * n * n * n);
1390}
1391
1392static u32 shannon_entropy(struct heuristic_ws *ws)
1393{
1394	const u32 entropy_max = 8 * ilog2_w(2);
1395	u32 entropy_sum = 0;
1396	u32 p, p_base, sz_base;
1397	u32 i;
1398
1399	sz_base = ilog2_w(ws->sample_size);
1400	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1401		p = ws->bucket[i].count;
1402		p_base = ilog2_w(p);
1403		entropy_sum += p * (sz_base - p_base);
1404	}
1405
1406	entropy_sum /= ws->sample_size;
1407	return entropy_sum * 100 / entropy_max;
1408}
1409
1410#define RADIX_BASE		4U
1411#define COUNTERS_SIZE		(1U << RADIX_BASE)
1412
1413static u8 get4bits(u64 num, int shift) {
1414	u8 low4bits;
1415
1416	num >>= shift;
1417	/* Reverse order */
1418	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1419	return low4bits;
1420}
1421
1422/*
1423 * Use 4 bits as radix base
1424 * Use 16 u32 counters for calculating new position in buf array
1425 *
1426 * @array     - array that will be sorted
1427 * @array_buf - buffer array to store sorting results
1428 *              must be equal in size to @array
1429 * @num       - array size
1430 */
1431static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1432		       int num)
1433{
1434	u64 max_num;
1435	u64 buf_num;
1436	u32 counters[COUNTERS_SIZE];
1437	u32 new_addr;
1438	u32 addr;
1439	int bitlen;
1440	int shift;
1441	int i;
1442
1443	/*
1444	 * Try avoid useless loop iterations for small numbers stored in big
1445	 * counters.  Example: 48 33 4 ... in 64bit array
1446	 */
1447	max_num = array[0].count;
1448	for (i = 1; i < num; i++) {
1449		buf_num = array[i].count;
1450		if (buf_num > max_num)
1451			max_num = buf_num;
1452	}
1453
1454	buf_num = ilog2(max_num);
1455	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1456
1457	shift = 0;
1458	while (shift < bitlen) {
1459		memset(counters, 0, sizeof(counters));
1460
1461		for (i = 0; i < num; i++) {
1462			buf_num = array[i].count;
1463			addr = get4bits(buf_num, shift);
1464			counters[addr]++;
1465		}
1466
1467		for (i = 1; i < COUNTERS_SIZE; i++)
1468			counters[i] += counters[i - 1];
1469
1470		for (i = num - 1; i >= 0; i--) {
1471			buf_num = array[i].count;
1472			addr = get4bits(buf_num, shift);
1473			counters[addr]--;
1474			new_addr = counters[addr];
1475			array_buf[new_addr] = array[i];
1476		}
1477
1478		shift += RADIX_BASE;
1479
1480		/*
1481		 * Normal radix expects to move data from a temporary array, to
1482		 * the main one.  But that requires some CPU time. Avoid that
1483		 * by doing another sort iteration to original array instead of
1484		 * memcpy()
1485		 */
1486		memset(counters, 0, sizeof(counters));
1487
1488		for (i = 0; i < num; i ++) {
1489			buf_num = array_buf[i].count;
1490			addr = get4bits(buf_num, shift);
1491			counters[addr]++;
1492		}
1493
1494		for (i = 1; i < COUNTERS_SIZE; i++)
1495			counters[i] += counters[i - 1];
1496
1497		for (i = num - 1; i >= 0; i--) {
1498			buf_num = array_buf[i].count;
1499			addr = get4bits(buf_num, shift);
1500			counters[addr]--;
1501			new_addr = counters[addr];
1502			array[new_addr] = array_buf[i];
1503		}
1504
1505		shift += RADIX_BASE;
1506	}
1507}
1508
1509/*
1510 * Size of the core byte set - how many bytes cover 90% of the sample
1511 *
1512 * There are several types of structured binary data that use nearly all byte
1513 * values. The distribution can be uniform and counts in all buckets will be
1514 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1515 *
1516 * Other possibility is normal (Gaussian) distribution, where the data could
1517 * be potentially compressible, but we have to take a few more steps to decide
1518 * how much.
1519 *
1520 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1521 *                       compression algo can easy fix that
1522 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1523 *                       probability is not compressible
1524 */
1525#define BYTE_CORE_SET_LOW		(64)
1526#define BYTE_CORE_SET_HIGH		(200)
1527
1528static int byte_core_set_size(struct heuristic_ws *ws)
1529{
1530	u32 i;
1531	u32 coreset_sum = 0;
1532	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1533	struct bucket_item *bucket = ws->bucket;
1534
1535	/* Sort in reverse order */
1536	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1537
1538	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1539		coreset_sum += bucket[i].count;
1540
1541	if (coreset_sum > core_set_threshold)
1542		return i;
1543
1544	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1545		coreset_sum += bucket[i].count;
1546		if (coreset_sum > core_set_threshold)
1547			break;
1548	}
1549
1550	return i;
1551}
1552
1553/*
1554 * Count byte values in buckets.
1555 * This heuristic can detect textual data (configs, xml, json, html, etc).
1556 * Because in most text-like data byte set is restricted to limited number of
1557 * possible characters, and that restriction in most cases makes data easy to
1558 * compress.
1559 *
1560 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1561 *	less - compressible
1562 *	more - need additional analysis
1563 */
1564#define BYTE_SET_THRESHOLD		(64)
1565
1566static u32 byte_set_size(const struct heuristic_ws *ws)
1567{
1568	u32 i;
1569	u32 byte_set_size = 0;
1570
1571	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1572		if (ws->bucket[i].count > 0)
1573			byte_set_size++;
1574	}
1575
1576	/*
1577	 * Continue collecting count of byte values in buckets.  If the byte
1578	 * set size is bigger then the threshold, it's pointless to continue,
1579	 * the detection technique would fail for this type of data.
1580	 */
1581	for (; i < BUCKET_SIZE; i++) {
1582		if (ws->bucket[i].count > 0) {
1583			byte_set_size++;
1584			if (byte_set_size > BYTE_SET_THRESHOLD)
1585				return byte_set_size;
1586		}
1587	}
1588
1589	return byte_set_size;
1590}
1591
1592static bool sample_repeated_patterns(struct heuristic_ws *ws)
1593{
1594	const u32 half_of_sample = ws->sample_size / 2;
1595	const u8 *data = ws->sample;
1596
1597	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1598}
1599
1600static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1601				     struct heuristic_ws *ws)
1602{
1603	struct page *page;
1604	u64 index, index_end;
1605	u32 i, curr_sample_pos;
1606	u8 *in_data;
1607
1608	/*
1609	 * Compression handles the input data by chunks of 128KiB
1610	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1611	 *
1612	 * We do the same for the heuristic and loop over the whole range.
1613	 *
1614	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1615	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1616	 */
1617	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1618		end = start + BTRFS_MAX_UNCOMPRESSED;
1619
1620	index = start >> PAGE_SHIFT;
1621	index_end = end >> PAGE_SHIFT;
1622
1623	/* Don't miss unaligned end */
1624	if (!IS_ALIGNED(end, PAGE_SIZE))
1625		index_end++;
1626
1627	curr_sample_pos = 0;
1628	while (index < index_end) {
1629		page = find_get_page(inode->i_mapping, index);
1630		in_data = kmap_local_page(page);
1631		/* Handle case where the start is not aligned to PAGE_SIZE */
1632		i = start % PAGE_SIZE;
1633		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1634			/* Don't sample any garbage from the last page */
1635			if (start > end - SAMPLING_READ_SIZE)
1636				break;
1637			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1638					SAMPLING_READ_SIZE);
1639			i += SAMPLING_INTERVAL;
1640			start += SAMPLING_INTERVAL;
1641			curr_sample_pos += SAMPLING_READ_SIZE;
1642		}
1643		kunmap_local(in_data);
1644		put_page(page);
1645
1646		index++;
1647	}
1648
1649	ws->sample_size = curr_sample_pos;
1650}
1651
1652/*
1653 * Compression heuristic.
1654 *
1655 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1656 * quickly (compared to direct compression) detect data characteristics
1657 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1658 * data.
1659 *
1660 * The following types of analysis can be performed:
1661 * - detect mostly zero data
1662 * - detect data with low "byte set" size (text, etc)
1663 * - detect data with low/high "core byte" set
1664 *
1665 * Return non-zero if the compression should be done, 0 otherwise.
1666 */
1667int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1668{
1669	struct list_head *ws_list = get_workspace(0, 0);
1670	struct heuristic_ws *ws;
1671	u32 i;
1672	u8 byte;
1673	int ret = 0;
1674
1675	ws = list_entry(ws_list, struct heuristic_ws, list);
1676
1677	heuristic_collect_sample(inode, start, end, ws);
1678
1679	if (sample_repeated_patterns(ws)) {
1680		ret = 1;
1681		goto out;
1682	}
1683
1684	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1685
1686	for (i = 0; i < ws->sample_size; i++) {
1687		byte = ws->sample[i];
1688		ws->bucket[byte].count++;
1689	}
1690
1691	i = byte_set_size(ws);
1692	if (i < BYTE_SET_THRESHOLD) {
1693		ret = 2;
1694		goto out;
1695	}
1696
1697	i = byte_core_set_size(ws);
1698	if (i <= BYTE_CORE_SET_LOW) {
1699		ret = 3;
1700		goto out;
1701	}
1702
1703	if (i >= BYTE_CORE_SET_HIGH) {
1704		ret = 0;
1705		goto out;
1706	}
1707
1708	i = shannon_entropy(ws);
1709	if (i <= ENTROPY_LVL_ACEPTABLE) {
1710		ret = 4;
1711		goto out;
1712	}
1713
1714	/*
1715	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1716	 * needed to give green light to compression.
1717	 *
1718	 * For now just assume that compression at that level is not worth the
1719	 * resources because:
1720	 *
1721	 * 1. it is possible to defrag the data later
1722	 *
1723	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1724	 * values, every bucket has counter at level ~54. The heuristic would
1725	 * be confused. This can happen when data have some internal repeated
1726	 * patterns like "abbacbbc...". This can be detected by analyzing
1727	 * pairs of bytes, which is too costly.
1728	 */
1729	if (i < ENTROPY_LVL_HIGH) {
1730		ret = 5;
1731		goto out;
1732	} else {
1733		ret = 0;
1734		goto out;
1735	}
1736
1737out:
1738	put_workspace(0, ws_list);
1739	return ret;
1740}
1741
1742/*
1743 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1744 * level, unrecognized string will set the default level
1745 */
1746unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1747{
1748	unsigned int level = 0;
1749	int ret;
1750
1751	if (!type)
1752		return 0;
1753
1754	if (str[0] == ':') {
1755		ret = kstrtouint(str + 1, 10, &level);
1756		if (ret)
1757			level = 0;
1758	}
1759
1760	level = btrfs_compress_set_level(type, level);
1761
1762	return level;
1763}