Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
   8#include <linux/file.h>
   9#include <linux/fs.h>
  10#include <linux/pagemap.h>
  11#include <linux/pagevec.h>
  12#include <linux/highmem.h>
  13#include <linux/kthread.h>
  14#include <linux/time.h>
  15#include <linux/init.h>
  16#include <linux/string.h>
  17#include <linux/backing-dev.h>
  18#include <linux/writeback.h>
  19#include <linux/psi.h>
  20#include <linux/slab.h>
  21#include <linux/sched/mm.h>
  22#include <linux/log2.h>
  23#include <linux/shrinker.h>
  24#include <crypto/hash.h>
  25#include "misc.h"
  26#include "ctree.h"
  27#include "fs.h"
 
  28#include "btrfs_inode.h"
  29#include "bio.h"
  30#include "ordered-data.h"
  31#include "compression.h"
  32#include "extent_io.h"
  33#include "extent_map.h"
  34#include "subpage.h"
  35#include "messages.h"
  36#include "super.h"
  37
  38static struct bio_set btrfs_compressed_bioset;
  39
  40static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  41
  42const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  43{
  44	switch (type) {
  45	case BTRFS_COMPRESS_ZLIB:
  46	case BTRFS_COMPRESS_LZO:
  47	case BTRFS_COMPRESS_ZSTD:
  48	case BTRFS_COMPRESS_NONE:
  49		return btrfs_compress_types[type];
  50	default:
  51		break;
  52	}
  53
  54	return NULL;
  55}
  56
  57static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio)
  58{
  59	return container_of(bbio, struct compressed_bio, bbio);
  60}
  61
  62static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
  63						   u64 start, blk_opf_t op,
  64						   btrfs_bio_end_io_t end_io)
  65{
  66	struct btrfs_bio *bbio;
  67
  68	bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
  69					  GFP_NOFS, &btrfs_compressed_bioset));
  70	btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL);
  71	bbio->inode = inode;
  72	bbio->file_offset = start;
  73	return to_compressed_bio(bbio);
  74}
  75
  76bool btrfs_compress_is_valid_type(const char *str, size_t len)
  77{
  78	int i;
  79
  80	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
  81		size_t comp_len = strlen(btrfs_compress_types[i]);
  82
  83		if (len < comp_len)
  84			continue;
  85
  86		if (!strncmp(btrfs_compress_types[i], str, comp_len))
  87			return true;
  88	}
  89	return false;
  90}
  91
  92static int compression_compress_pages(int type, struct list_head *ws,
  93				      struct address_space *mapping, u64 start,
  94				      struct folio **folios, unsigned long *out_folios,
  95				      unsigned long *total_in, unsigned long *total_out)
  96{
  97	switch (type) {
  98	case BTRFS_COMPRESS_ZLIB:
  99		return zlib_compress_folios(ws, mapping, start, folios,
 100					    out_folios, total_in, total_out);
 101	case BTRFS_COMPRESS_LZO:
 102		return lzo_compress_folios(ws, mapping, start, folios,
 103					   out_folios, total_in, total_out);
 104	case BTRFS_COMPRESS_ZSTD:
 105		return zstd_compress_folios(ws, mapping, start, folios,
 106					    out_folios, total_in, total_out);
 107	case BTRFS_COMPRESS_NONE:
 108	default:
 109		/*
 110		 * This can happen when compression races with remount setting
 111		 * it to 'no compress', while caller doesn't call
 112		 * inode_need_compress() to check if we really need to
 113		 * compress.
 114		 *
 115		 * Not a big deal, just need to inform caller that we
 116		 * haven't allocated any pages yet.
 117		 */
 118		*out_folios = 0;
 119		return -E2BIG;
 120	}
 121}
 122
 123static int compression_decompress_bio(struct list_head *ws,
 124				      struct compressed_bio *cb)
 125{
 126	switch (cb->compress_type) {
 127	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
 128	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
 129	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
 130	case BTRFS_COMPRESS_NONE:
 131	default:
 132		/*
 133		 * This can't happen, the type is validated several times
 134		 * before we get here.
 135		 */
 136		BUG();
 137	}
 138}
 139
 140static int compression_decompress(int type, struct list_head *ws,
 141		const u8 *data_in, struct folio *dest_folio,
 142		unsigned long dest_pgoff, size_t srclen, size_t destlen)
 143{
 144	switch (type) {
 145	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio,
 146						dest_pgoff, srclen, destlen);
 147	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_folio,
 148						dest_pgoff, srclen, destlen);
 149	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio,
 150						dest_pgoff, srclen, destlen);
 151	case BTRFS_COMPRESS_NONE:
 152	default:
 153		/*
 154		 * This can't happen, the type is validated several times
 155		 * before we get here.
 156		 */
 157		BUG();
 158	}
 159}
 160
 161static void btrfs_free_compressed_folios(struct compressed_bio *cb)
 162{
 163	for (unsigned int i = 0; i < cb->nr_folios; i++)
 164		btrfs_free_compr_folio(cb->compressed_folios[i]);
 165	kfree(cb->compressed_folios);
 166}
 167
 168static int btrfs_decompress_bio(struct compressed_bio *cb);
 169
 170/*
 171 * Global cache of last unused pages for compression/decompression.
 172 */
 173static struct btrfs_compr_pool {
 174	struct shrinker *shrinker;
 175	spinlock_t lock;
 176	struct list_head list;
 177	int count;
 178	int thresh;
 179} compr_pool;
 180
 181static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc)
 182{
 183	int ret;
 184
 185	/*
 186	 * We must not read the values more than once if 'ret' gets expanded in
 187	 * the return statement so we don't accidentally return a negative
 188	 * number, even if the first condition finds it positive.
 189	 */
 190	ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh);
 191
 192	return ret > 0 ? ret : 0;
 193}
 194
 195static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc)
 
 
 196{
 197	struct list_head remove;
 198	struct list_head *tmp, *next;
 199	int freed;
 
 
 
 
 
 
 200
 201	if (compr_pool.count == 0)
 202		return SHRINK_STOP;
 203
 204	INIT_LIST_HEAD(&remove);
 205
 206	/* For now, just simply drain the whole list. */
 207	spin_lock(&compr_pool.lock);
 208	list_splice_init(&compr_pool.list, &remove);
 209	freed = compr_pool.count;
 210	compr_pool.count = 0;
 211	spin_unlock(&compr_pool.lock);
 212
 213	list_for_each_safe(tmp, next, &remove) {
 214		struct page *page = list_entry(tmp, struct page, lru);
 
 
 
 
 
 
 
 
 
 
 
 215
 216		ASSERT(page_ref_count(page) == 1);
 217		put_page(page);
 218	}
 219
 220	return freed;
 
 221}
 222
 223/*
 224 * Common wrappers for page allocation from compression wrappers
 
 
 
 
 
 
 
 225 */
 226struct folio *btrfs_alloc_compr_folio(void)
 227{
 228	struct folio *folio = NULL;
 
 
 
 
 
 229
 230	spin_lock(&compr_pool.lock);
 231	if (compr_pool.count > 0) {
 232		folio = list_first_entry(&compr_pool.list, struct folio, lru);
 233		list_del_init(&folio->lru);
 234		compr_pool.count--;
 235	}
 236	spin_unlock(&compr_pool.lock);
 237
 238	if (folio)
 239		return folio;
 
 
 
 240
 241	return folio_alloc(GFP_NOFS, 0);
 242}
 
 
 
 
 
 243
 244void btrfs_free_compr_folio(struct folio *folio)
 245{
 246	bool do_free = false;
 
 
 
 247
 248	spin_lock(&compr_pool.lock);
 249	if (compr_pool.count > compr_pool.thresh) {
 250		do_free = true;
 251	} else {
 252		list_add(&folio->lru, &compr_pool.list);
 253		compr_pool.count++;
 254	}
 255	spin_unlock(&compr_pool.lock);
 256
 257	if (!do_free)
 258		return;
 
 
 259
 260	ASSERT(folio_ref_count(folio) == 1);
 261	folio_put(folio);
 262}
 
 
 
 
 
 
 
 
 263
 264static void end_bbio_compressed_read(struct btrfs_bio *bbio)
 265{
 266	struct compressed_bio *cb = to_compressed_bio(bbio);
 267	blk_status_t status = bbio->bio.bi_status;
 
 
 268
 269	if (!status)
 270		status = errno_to_blk_status(btrfs_decompress_bio(cb));
 
 
 
 
 
 
 
 
 271
 272	btrfs_free_compressed_folios(cb);
 273	btrfs_bio_end_io(cb->orig_bbio, status);
 274	bio_put(&bbio->bio);
 
 
 275}
 276
 277/*
 278 * Clear the writeback bits on all of the file
 279 * pages for a compressed write
 280 */
 281static noinline void end_compressed_writeback(const struct compressed_bio *cb)
 
 282{
 283	struct inode *inode = &cb->bbio.inode->vfs_inode;
 284	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 285	unsigned long index = cb->start >> PAGE_SHIFT;
 286	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 287	struct folio_batch fbatch;
 288	const int error = blk_status_to_errno(cb->bbio.bio.bi_status);
 289	int i;
 290	int ret;
 291
 292	if (error)
 293		mapping_set_error(inode->i_mapping, error);
 294
 295	folio_batch_init(&fbatch);
 296	while (index <= end_index) {
 297		ret = filemap_get_folios(inode->i_mapping, &index, end_index,
 298				&fbatch);
 299
 300		if (ret == 0)
 301			return;
 302
 
 
 
 
 
 
 
 
 
 303		for (i = 0; i < ret; i++) {
 304			struct folio *folio = fbatch.folios[i];
 305
 306			btrfs_folio_clamp_clear_writeback(fs_info, folio,
 307							  cb->start, cb->len);
 308		}
 309		folio_batch_release(&fbatch);
 
 310	}
 311	/* the inode may be gone now */
 312}
 313
 314static void btrfs_finish_compressed_write_work(struct work_struct *work)
 315{
 316	struct compressed_bio *cb =
 317		container_of(work, struct compressed_bio, write_end_work);
 318
 319	btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
 320				    cb->bbio.bio.bi_status == BLK_STS_OK);
 321
 322	if (cb->writeback)
 323		end_compressed_writeback(cb);
 324	/* Note, our inode could be gone now */
 325
 326	btrfs_free_compressed_folios(cb);
 327	bio_put(&cb->bbio.bio);
 328}
 329
 330/*
 331 * Do the cleanup once all the compressed pages hit the disk.  This will clear
 332 * writeback on the file pages and free the compressed pages.
 
 333 *
 334 * This also calls the writeback end hooks for the file pages so that metadata
 335 * and checksums can be updated in the file.
 336 */
 337static void end_bbio_compressed_write(struct btrfs_bio *bbio)
 338{
 339	struct compressed_bio *cb = to_compressed_bio(bbio);
 340	struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
 
 
 341
 342	queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
 343}
 344
 345static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
 346{
 347	struct bio *bio = &cb->bbio.bio;
 348	u32 offset = 0;
 
 
 
 
 
 
 
 
 
 
 
 349
 350	while (offset < cb->compressed_len) {
 351		int ret;
 352		u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
 353
 354		/* Maximum compressed extent is smaller than bio size limit. */
 355		ret = bio_add_folio(bio, cb->compressed_folios[offset >> PAGE_SHIFT],
 356				    len, 0);
 357		ASSERT(ret);
 358		offset += len;
 
 
 
 
 359	}
 
 
 
 
 
 
 360}
 361
 362/*
 363 * worker function to build and submit bios for previously compressed pages.
 364 * The corresponding pages in the inode should be marked for writeback
 365 * and the compressed pages should have a reference on them for dropping
 366 * when the IO is complete.
 367 *
 368 * This also checksums the file bytes and gets things ready for
 369 * the end io hooks.
 370 */
 371void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
 372				   struct folio **compressed_folios,
 373				   unsigned int nr_folios,
 374				   blk_opf_t write_flags,
 375				   bool writeback)
 
 376{
 377	struct btrfs_inode *inode = ordered->inode;
 378	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 379	struct compressed_bio *cb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380
 381	ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
 382	ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
 383
 384	cb = alloc_compressed_bio(inode, ordered->file_offset,
 385				  REQ_OP_WRITE | write_flags,
 386				  end_bbio_compressed_write);
 387	cb->start = ordered->file_offset;
 388	cb->len = ordered->num_bytes;
 389	cb->compressed_folios = compressed_folios;
 390	cb->compressed_len = ordered->disk_num_bytes;
 391	cb->writeback = writeback;
 392	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
 393	cb->nr_folios = nr_folios;
 394	cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
 395	cb->bbio.ordered = ordered;
 396	btrfs_add_compressed_bio_folios(cb);
 397
 398	btrfs_submit_bbio(&cb->bbio, 0);
 
 
 
 
 399}
 400
 401/*
 402 * Add extra pages in the same compressed file extent so that we don't need to
 403 * re-read the same extent again and again.
 404 *
 405 * NOTE: this won't work well for subpage, as for subpage read, we lock the
 406 * full page then submit bio for each compressed/regular extents.
 407 *
 408 * This means, if we have several sectors in the same page points to the same
 409 * on-disk compressed data, we will re-read the same extent many times and
 410 * this function can only help for the next page.
 411 */
 412static noinline int add_ra_bio_pages(struct inode *inode,
 413				     u64 compressed_end,
 414				     struct compressed_bio *cb,
 415				     int *memstall, unsigned long *pflags)
 416{
 417	struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
 418	unsigned long end_index;
 419	struct bio *orig_bio = &cb->orig_bbio->bio;
 420	u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
 421	u64 isize = i_size_read(inode);
 422	int ret;
 423	struct folio *folio;
 
 424	struct extent_map *em;
 425	struct address_space *mapping = inode->i_mapping;
 426	struct extent_map_tree *em_tree;
 427	struct extent_io_tree *tree;
 428	int sectors_missed = 0;
 
 429
 
 430	em_tree = &BTRFS_I(inode)->extent_tree;
 431	tree = &BTRFS_I(inode)->io_tree;
 432
 433	if (isize == 0)
 434		return 0;
 435
 436	/*
 437	 * For current subpage support, we only support 64K page size,
 438	 * which means maximum compressed extent size (128K) is just 2x page
 439	 * size.
 440	 * This makes readahead less effective, so here disable readahead for
 441	 * subpage for now, until full compressed write is supported.
 442	 */
 443	if (fs_info->sectorsize < PAGE_SIZE)
 444		return 0;
 445
 446	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 447
 448	while (cur < compressed_end) {
 449		u64 page_end;
 450		u64 pg_index = cur >> PAGE_SHIFT;
 451		u32 add_size;
 452
 453		if (pg_index > end_index)
 454			break;
 455
 456		folio = filemap_get_folio(mapping, pg_index);
 457		if (!IS_ERR(folio)) {
 458			u64 folio_sz = folio_size(folio);
 459			u64 offset = offset_in_folio(folio, cur);
 460
 461			folio_put(folio);
 462			sectors_missed += (folio_sz - offset) >>
 463					  fs_info->sectorsize_bits;
 464
 465			/* Beyond threshold, no need to continue */
 466			if (sectors_missed > 4)
 467				break;
 468
 469			/*
 470			 * Jump to next page start as we already have page for
 471			 * current offset.
 472			 */
 473			cur += (folio_sz - offset);
 474			continue;
 475		}
 476
 477		folio = filemap_alloc_folio(mapping_gfp_constraint(mapping,
 478								   ~__GFP_FS), 0);
 479		if (!folio)
 480			break;
 481
 482		if (filemap_add_folio(mapping, folio, pg_index, GFP_NOFS)) {
 483			/* There is already a page, skip to page end */
 484			cur += folio_size(folio);
 485			folio_put(folio);
 486			continue;
 487		}
 488
 489		if (!*memstall && folio_test_workingset(folio)) {
 490			psi_memstall_enter(pflags);
 491			*memstall = 1;
 492		}
 493
 494		ret = set_folio_extent_mapped(folio);
 495		if (ret < 0) {
 496			folio_unlock(folio);
 497			folio_put(folio);
 498			break;
 499		}
 500
 501		page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1;
 502		lock_extent(tree, cur, page_end, NULL);
 503		read_lock(&em_tree->lock);
 504		em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
 
 505		read_unlock(&em_tree->lock);
 506
 507		/*
 508		 * At this point, we have a locked page in the page cache for
 509		 * these bytes in the file.  But, we have to make sure they map
 510		 * to this compressed extent on disk.
 511		 */
 512		if (!em || cur < em->start ||
 513		    (cur + fs_info->sectorsize > extent_map_end(em)) ||
 514		    (extent_map_block_start(em) >> SECTOR_SHIFT) !=
 515		    orig_bio->bi_iter.bi_sector) {
 516			free_extent_map(em);
 517			unlock_extent(tree, cur, page_end, NULL);
 518			folio_unlock(folio);
 519			folio_put(folio);
 520			break;
 521		}
 522		add_size = min(em->start + em->len, page_end + 1) - cur;
 523		free_extent_map(em);
 524		unlock_extent(tree, cur, page_end, NULL);
 525
 526		if (folio->index == end_index) {
 527			size_t zero_offset = offset_in_folio(folio, isize);
 
 528
 529			if (zero_offset) {
 530				int zeros;
 531				zeros = folio_size(folio) - zero_offset;
 532				folio_zero_range(folio, zero_offset, zeros);
 
 
 
 533			}
 534		}
 535
 536		if (!bio_add_folio(orig_bio, folio, add_size,
 537				   offset_in_folio(folio, cur))) {
 538			folio_unlock(folio);
 539			folio_put(folio);
 
 
 
 
 
 
 540			break;
 541		}
 542		/*
 543		 * If it's subpage, we also need to increase its
 544		 * subpage::readers number, as at endio we will decrease
 545		 * subpage::readers and to unlock the page.
 546		 */
 547		if (fs_info->sectorsize < PAGE_SIZE)
 548			btrfs_folio_set_lock(fs_info, folio, cur, add_size);
 549		folio_put(folio);
 550		cur += add_size;
 551	}
 552	return 0;
 553}
 554
 555/*
 556 * for a compressed read, the bio we get passed has all the inode pages
 557 * in it.  We don't actually do IO on those pages but allocate new ones
 558 * to hold the compressed pages on disk.
 559 *
 560 * bio->bi_iter.bi_sector points to the compressed extent on disk
 561 * bio->bi_io_vec points to all of the inode pages
 562 *
 563 * After the compressed pages are read, we copy the bytes into the
 564 * bio we were passed and then call the bio end_io calls
 565 */
 566void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
 
 567{
 568	struct btrfs_inode *inode = bbio->inode;
 569	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 570	struct extent_map_tree *em_tree = &inode->extent_tree;
 571	struct compressed_bio *cb;
 572	unsigned int compressed_len;
 573	u64 file_offset = bbio->file_offset;
 
 
 
 
 
 574	u64 em_len;
 575	u64 em_start;
 576	struct extent_map *em;
 577	unsigned long pflags;
 578	int memstall = 0;
 579	blk_status_t ret;
 580	int ret2;
 
 
 581
 582	/* we need the actual starting offset of this extent in the file */
 583	read_lock(&em_tree->lock);
 584	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
 
 
 585	read_unlock(&em_tree->lock);
 586	if (!em) {
 587		ret = BLK_STS_IOERR;
 588		goto out;
 589	}
 590
 591	ASSERT(extent_map_is_compressed(em));
 592	compressed_len = em->disk_num_bytes;
 
 
 593
 594	cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
 595				  end_bbio_compressed_read);
 
 
 
 596
 597	cb->start = em->start - em->offset;
 598	em_len = em->len;
 599	em_start = em->start;
 600
 601	cb->len = bbio->bio.bi_iter.bi_size;
 
 
 
 602	cb->compressed_len = compressed_len;
 603	cb->compress_type = extent_map_compression(em);
 604	cb->orig_bbio = bbio;
 605
 606	free_extent_map(em);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 607
 608	cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 609	cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
 610	if (!cb->compressed_folios) {
 611		ret = BLK_STS_RESOURCE;
 612		goto out_free_bio;
 613	}
 614
 615	ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios);
 616	if (ret2) {
 617		ret = BLK_STS_RESOURCE;
 618		goto out_free_compressed_pages;
 
 
 619	}
 620
 621	add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
 622			 &pflags);
 
 
 
 623
 624	/* include any pages we added in add_ra-bio_pages */
 625	cb->len = bbio->bio.bi_iter.bi_size;
 626	cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
 627	btrfs_add_compressed_bio_folios(cb);
 628
 629	if (memstall)
 630		psi_memstall_leave(&pflags);
 631
 632	btrfs_submit_bbio(&cb->bbio, 0);
 633	return;
 634
 635out_free_compressed_pages:
 636	kfree(cb->compressed_folios);
 637out_free_bio:
 638	bio_put(&cb->bbio.bio);
 639out:
 640	btrfs_bio_end_io(bbio, ret);
 
 641}
 642
 643/*
 644 * Heuristic uses systematic sampling to collect data from the input data
 645 * range, the logic can be tuned by the following constants:
 646 *
 647 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 648 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 649 */
 650#define SAMPLING_READ_SIZE	(16)
 651#define SAMPLING_INTERVAL	(256)
 652
 653/*
 654 * For statistical analysis of the input data we consider bytes that form a
 655 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 656 * many times the object appeared in the sample.
 657 */
 658#define BUCKET_SIZE		(256)
 659
 660/*
 661 * The size of the sample is based on a statistical sampling rule of thumb.
 662 * The common way is to perform sampling tests as long as the number of
 663 * elements in each cell is at least 5.
 664 *
 665 * Instead of 5, we choose 32 to obtain more accurate results.
 666 * If the data contain the maximum number of symbols, which is 256, we obtain a
 667 * sample size bound by 8192.
 668 *
 669 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 670 * from up to 512 locations.
 671 */
 672#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
 673				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 674
 675struct bucket_item {
 676	u32 count;
 677};
 678
 679struct heuristic_ws {
 680	/* Partial copy of input data */
 681	u8 *sample;
 682	u32 sample_size;
 683	/* Buckets store counters for each byte value */
 684	struct bucket_item *bucket;
 685	/* Sorting buffer */
 686	struct bucket_item *bucket_b;
 687	struct list_head list;
 688};
 689
 690static struct workspace_manager heuristic_wsm;
 691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692static void free_heuristic_ws(struct list_head *ws)
 693{
 694	struct heuristic_ws *workspace;
 695
 696	workspace = list_entry(ws, struct heuristic_ws, list);
 697
 698	kvfree(workspace->sample);
 699	kfree(workspace->bucket);
 700	kfree(workspace->bucket_b);
 701	kfree(workspace);
 702}
 703
 704static struct list_head *alloc_heuristic_ws(void)
 705{
 706	struct heuristic_ws *ws;
 707
 708	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 709	if (!ws)
 710		return ERR_PTR(-ENOMEM);
 711
 712	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 713	if (!ws->sample)
 714		goto fail;
 715
 716	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 717	if (!ws->bucket)
 718		goto fail;
 719
 720	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 721	if (!ws->bucket_b)
 722		goto fail;
 723
 724	INIT_LIST_HEAD(&ws->list);
 725	return &ws->list;
 726fail:
 727	free_heuristic_ws(&ws->list);
 728	return ERR_PTR(-ENOMEM);
 729}
 730
 731const struct btrfs_compress_op btrfs_heuristic_compress = {
 732	.workspace_manager = &heuristic_wsm,
 
 
 
 
 
 733};
 734
 735static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 736	/* The heuristic is represented as compression type 0 */
 737	&btrfs_heuristic_compress,
 738	&btrfs_zlib_compress,
 739	&btrfs_lzo_compress,
 740	&btrfs_zstd_compress,
 741};
 742
 743static struct list_head *alloc_workspace(int type, unsigned int level)
 744{
 745	switch (type) {
 746	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws();
 747	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
 748	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace();
 749	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
 750	default:
 751		/*
 752		 * This can't happen, the type is validated several times
 753		 * before we get here.
 754		 */
 755		BUG();
 756	}
 757}
 758
 759static void free_workspace(int type, struct list_head *ws)
 760{
 761	switch (type) {
 762	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
 763	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
 764	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
 765	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
 766	default:
 767		/*
 768		 * This can't happen, the type is validated several times
 769		 * before we get here.
 770		 */
 771		BUG();
 772	}
 773}
 774
 775static void btrfs_init_workspace_manager(int type)
 776{
 777	struct workspace_manager *wsm;
 778	struct list_head *workspace;
 779
 780	wsm = btrfs_compress_op[type]->workspace_manager;
 
 781	INIT_LIST_HEAD(&wsm->idle_ws);
 782	spin_lock_init(&wsm->ws_lock);
 783	atomic_set(&wsm->total_ws, 0);
 784	init_waitqueue_head(&wsm->ws_wait);
 785
 786	/*
 787	 * Preallocate one workspace for each compression type so we can
 788	 * guarantee forward progress in the worst case
 789	 */
 790	workspace = alloc_workspace(type, 0);
 791	if (IS_ERR(workspace)) {
 792		pr_warn(
 793	"BTRFS: cannot preallocate compression workspace, will try later\n");
 794	} else {
 795		atomic_set(&wsm->total_ws, 1);
 796		wsm->free_ws = 1;
 797		list_add(workspace, &wsm->idle_ws);
 798	}
 799}
 800
 801static void btrfs_cleanup_workspace_manager(int type)
 802{
 803	struct workspace_manager *wsman;
 804	struct list_head *ws;
 805
 806	wsman = btrfs_compress_op[type]->workspace_manager;
 807	while (!list_empty(&wsman->idle_ws)) {
 808		ws = wsman->idle_ws.next;
 809		list_del(ws);
 810		free_workspace(type, ws);
 811		atomic_dec(&wsman->total_ws);
 812	}
 813}
 814
 815/*
 816 * This finds an available workspace or allocates a new one.
 817 * If it's not possible to allocate a new one, waits until there's one.
 818 * Preallocation makes a forward progress guarantees and we do not return
 819 * errors.
 820 */
 821struct list_head *btrfs_get_workspace(int type, unsigned int level)
 
 822{
 823	struct workspace_manager *wsm;
 824	struct list_head *workspace;
 825	int cpus = num_online_cpus();
 826	unsigned nofs_flag;
 827	struct list_head *idle_ws;
 828	spinlock_t *ws_lock;
 829	atomic_t *total_ws;
 830	wait_queue_head_t *ws_wait;
 831	int *free_ws;
 832
 833	wsm = btrfs_compress_op[type]->workspace_manager;
 834	idle_ws	 = &wsm->idle_ws;
 835	ws_lock	 = &wsm->ws_lock;
 836	total_ws = &wsm->total_ws;
 837	ws_wait	 = &wsm->ws_wait;
 838	free_ws	 = &wsm->free_ws;
 839
 840again:
 841	spin_lock(ws_lock);
 842	if (!list_empty(idle_ws)) {
 843		workspace = idle_ws->next;
 844		list_del(workspace);
 845		(*free_ws)--;
 846		spin_unlock(ws_lock);
 847		return workspace;
 848
 849	}
 850	if (atomic_read(total_ws) > cpus) {
 851		DEFINE_WAIT(wait);
 852
 853		spin_unlock(ws_lock);
 854		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
 855		if (atomic_read(total_ws) > cpus && !*free_ws)
 856			schedule();
 857		finish_wait(ws_wait, &wait);
 858		goto again;
 859	}
 860	atomic_inc(total_ws);
 861	spin_unlock(ws_lock);
 862
 863	/*
 864	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
 865	 * to turn it off here because we might get called from the restricted
 866	 * context of btrfs_compress_bio/btrfs_compress_pages
 867	 */
 868	nofs_flag = memalloc_nofs_save();
 869	workspace = alloc_workspace(type, level);
 870	memalloc_nofs_restore(nofs_flag);
 871
 872	if (IS_ERR(workspace)) {
 873		atomic_dec(total_ws);
 874		wake_up(ws_wait);
 875
 876		/*
 877		 * Do not return the error but go back to waiting. There's a
 878		 * workspace preallocated for each type and the compression
 879		 * time is bounded so we get to a workspace eventually. This
 880		 * makes our caller's life easier.
 881		 *
 882		 * To prevent silent and low-probability deadlocks (when the
 883		 * initial preallocation fails), check if there are any
 884		 * workspaces at all.
 885		 */
 886		if (atomic_read(total_ws) == 0) {
 887			static DEFINE_RATELIMIT_STATE(_rs,
 888					/* once per minute */ 60 * HZ,
 889					/* no burst */ 1);
 890
 891			if (__ratelimit(&_rs)) {
 892				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
 893			}
 894		}
 895		goto again;
 896	}
 897	return workspace;
 898}
 899
 900static struct list_head *get_workspace(int type, int level)
 901{
 902	switch (type) {
 903	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
 904	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
 905	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
 906	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
 907	default:
 908		/*
 909		 * This can't happen, the type is validated several times
 910		 * before we get here.
 911		 */
 912		BUG();
 913	}
 914}
 915
 916/*
 917 * put a workspace struct back on the list or free it if we have enough
 918 * idle ones sitting around
 919 */
 920void btrfs_put_workspace(int type, struct list_head *ws)
 921{
 922	struct workspace_manager *wsm;
 923	struct list_head *idle_ws;
 924	spinlock_t *ws_lock;
 925	atomic_t *total_ws;
 926	wait_queue_head_t *ws_wait;
 927	int *free_ws;
 928
 929	wsm = btrfs_compress_op[type]->workspace_manager;
 930	idle_ws	 = &wsm->idle_ws;
 931	ws_lock	 = &wsm->ws_lock;
 932	total_ws = &wsm->total_ws;
 933	ws_wait	 = &wsm->ws_wait;
 934	free_ws	 = &wsm->free_ws;
 935
 936	spin_lock(ws_lock);
 937	if (*free_ws <= num_online_cpus()) {
 938		list_add(ws, idle_ws);
 939		(*free_ws)++;
 940		spin_unlock(ws_lock);
 941		goto wake;
 942	}
 943	spin_unlock(ws_lock);
 944
 945	free_workspace(type, ws);
 946	atomic_dec(total_ws);
 947wake:
 948	cond_wake_up(ws_wait);
 949}
 950
 951static void put_workspace(int type, struct list_head *ws)
 952{
 953	switch (type) {
 954	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
 955	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
 956	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
 957	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
 958	default:
 959		/*
 960		 * This can't happen, the type is validated several times
 961		 * before we get here.
 962		 */
 963		BUG();
 964	}
 965}
 966
 967/*
 968 * Adjust @level according to the limits of the compression algorithm or
 969 * fallback to default
 970 */
 971static unsigned int btrfs_compress_set_level(int type, unsigned level)
 972{
 973	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
 974
 975	if (level == 0)
 976		level = ops->default_level;
 977	else
 978		level = min(level, ops->max_level);
 979
 980	return level;
 981}
 982
 983/* Wrapper around find_get_page(), with extra error message. */
 984int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
 985				     struct folio **in_folio_ret)
 986{
 987	struct folio *in_folio;
 988
 989	/*
 990	 * The compressed write path should have the folio locked already, thus
 991	 * we only need to grab one reference.
 992	 */
 993	in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
 994	if (IS_ERR(in_folio)) {
 995		struct btrfs_inode *inode = BTRFS_I(mapping->host);
 996
 997		btrfs_crit(inode->root->fs_info,
 998		"failed to get page cache, root %lld ino %llu file offset %llu",
 999			   btrfs_root_id(inode->root), btrfs_ino(inode), start);
1000		return -ENOENT;
1001	}
1002	*in_folio_ret = in_folio;
1003	return 0;
1004}
1005
1006/*
1007 * Given an address space and start and length, compress the bytes into @pages
1008 * that are allocated on demand.
1009 *
1010 * @type_level is encoded algorithm and level, where level 0 means whatever
1011 * default the algorithm chooses and is opaque here;
1012 * - compression algo are 0-3
1013 * - the level are bits 4-7
1014 *
1015 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1016 * and returns number of actually allocated pages
1017 *
1018 * @total_in is used to return the number of bytes actually read.  It
1019 * may be smaller than the input length if we had to exit early because we
1020 * ran out of room in the pages array or because we cross the
1021 * max_out threshold.
1022 *
1023 * @total_out is an in/out parameter, must be set to the input length and will
1024 * be also used to return the total number of compressed bytes
 
 
 
1025 */
1026int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
1027			 u64 start, struct folio **folios, unsigned long *out_folios,
1028			 unsigned long *total_in, unsigned long *total_out)
 
 
1029{
1030	int type = btrfs_compress_type(type_level);
1031	int level = btrfs_compress_level(type_level);
1032	const unsigned long orig_len = *total_out;
1033	struct list_head *workspace;
1034	int ret;
1035
1036	level = btrfs_compress_set_level(type, level);
1037	workspace = get_workspace(type, level);
1038	ret = compression_compress_pages(type, workspace, mapping, start, folios,
1039					 out_folios, total_in, total_out);
1040	/* The total read-in bytes should be no larger than the input. */
1041	ASSERT(*total_in <= orig_len);
1042	put_workspace(type, workspace);
1043	return ret;
1044}
1045
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1046static int btrfs_decompress_bio(struct compressed_bio *cb)
1047{
1048	struct list_head *workspace;
1049	int ret;
1050	int type = cb->compress_type;
1051
1052	workspace = get_workspace(type, 0);
1053	ret = compression_decompress_bio(workspace, cb);
1054	put_workspace(type, workspace);
1055
1056	if (!ret)
1057		zero_fill_bio(&cb->orig_bbio->bio);
1058	return ret;
1059}
1060
1061/*
1062 * a less complex decompression routine.  Our compressed data fits in a
1063 * single page, and we want to read a single page out of it.
1064 * start_byte tells us the offset into the compressed data we're interested in
1065 */
1066int btrfs_decompress(int type, const u8 *data_in, struct folio *dest_folio,
1067		     unsigned long dest_pgoff, size_t srclen, size_t destlen)
1068{
1069	struct btrfs_fs_info *fs_info = folio_to_fs_info(dest_folio);
1070	struct list_head *workspace;
1071	const u32 sectorsize = fs_info->sectorsize;
1072	int ret;
1073
1074	/*
1075	 * The full destination page range should not exceed the page size.
1076	 * And the @destlen should not exceed sectorsize, as this is only called for
1077	 * inline file extents, which should not exceed sectorsize.
1078	 */
1079	ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
1080
1081	workspace = get_workspace(type, 0);
1082	ret = compression_decompress(type, workspace, data_in, dest_folio,
1083				     dest_pgoff, srclen, destlen);
 
1084	put_workspace(type, workspace);
1085
1086	return ret;
1087}
1088
1089int __init btrfs_init_compress(void)
1090{
1091	if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE,
1092			offsetof(struct compressed_bio, bbio.bio),
1093			BIOSET_NEED_BVECS))
1094		return -ENOMEM;
1095
1096	compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, "btrfs-compr-pages");
1097	if (!compr_pool.shrinker)
1098		return -ENOMEM;
1099
1100	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1101	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1102	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1103	zstd_init_workspace_manager();
1104
1105	spin_lock_init(&compr_pool.lock);
1106	INIT_LIST_HEAD(&compr_pool.list);
1107	compr_pool.count = 0;
1108	/* 128K / 4K = 32, for 8 threads is 256 pages. */
1109	compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8;
1110	compr_pool.shrinker->count_objects = btrfs_compr_pool_count;
1111	compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan;
1112	compr_pool.shrinker->batch = 32;
1113	compr_pool.shrinker->seeks = DEFAULT_SEEKS;
1114	shrinker_register(compr_pool.shrinker);
1115
1116	return 0;
 
1117}
1118
1119void __cold btrfs_exit_compress(void)
1120{
1121	/* For now scan drains all pages and does not touch the parameters. */
1122	btrfs_compr_pool_scan(NULL, NULL);
1123	shrinker_free(compr_pool.shrinker);
1124
1125	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1126	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1127	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1128	zstd_cleanup_workspace_manager();
1129	bioset_exit(&btrfs_compressed_bioset);
1130}
1131
1132/*
1133 * Copy decompressed data from working buffer to pages.
1134 *
1135 * @buf:		The decompressed data buffer
1136 * @buf_len:		The decompressed data length
1137 * @decompressed:	Number of bytes that are already decompressed inside the
1138 * 			compressed extent
1139 * @cb:			The compressed extent descriptor
1140 * @orig_bio:		The original bio that the caller wants to read for
1141 *
1142 * An easier to understand graph is like below:
1143 *
1144 * 		|<- orig_bio ->|     |<- orig_bio->|
1145 * 	|<-------      full decompressed extent      ----->|
1146 * 	|<-----------    @cb range   ---->|
1147 * 	|			|<-- @buf_len -->|
1148 * 	|<--- @decompressed --->|
1149 *
1150 * Note that, @cb can be a subpage of the full decompressed extent, but
1151 * @cb->start always has the same as the orig_file_offset value of the full
1152 * decompressed extent.
1153 *
1154 * When reading compressed extent, we have to read the full compressed extent,
1155 * while @orig_bio may only want part of the range.
1156 * Thus this function will ensure only data covered by @orig_bio will be copied
1157 * to.
1158 *
1159 * Return 0 if we have copied all needed contents for @orig_bio.
1160 * Return >0 if we need continue decompress.
1161 */
1162int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1163			      struct compressed_bio *cb, u32 decompressed)
1164{
1165	struct bio *orig_bio = &cb->orig_bbio->bio;
1166	/* Offset inside the full decompressed extent */
1167	u32 cur_offset;
1168
1169	cur_offset = decompressed;
1170	/* The main loop to do the copy */
1171	while (cur_offset < decompressed + buf_len) {
1172		struct bio_vec bvec;
1173		size_t copy_len;
1174		u32 copy_start;
1175		/* Offset inside the full decompressed extent */
1176		u32 bvec_offset;
1177
1178		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1179		/*
1180		 * cb->start may underflow, but subtracting that value can still
1181		 * give us correct offset inside the full decompressed extent.
1182		 */
1183		bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1184
1185		/* Haven't reached the bvec range, exit */
1186		if (decompressed + buf_len <= bvec_offset)
1187			return 1;
1188
1189		copy_start = max(cur_offset, bvec_offset);
1190		copy_len = min(bvec_offset + bvec.bv_len,
1191			       decompressed + buf_len) - copy_start;
1192		ASSERT(copy_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193
1194		/*
1195		 * Extra range check to ensure we didn't go beyond
1196		 * @buf + @buf_len.
 
 
1197		 */
1198		ASSERT(copy_start - decompressed < buf_len);
1199		memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1200			       buf + copy_start - decompressed, copy_len);
1201		cur_offset += copy_len;
1202
1203		bio_advance(orig_bio, copy_len);
1204		/* Finished the bio */
1205		if (!orig_bio->bi_iter.bi_size)
1206			return 0;
 
 
 
 
 
 
 
 
 
 
 
1207	}
 
1208	return 1;
1209}
1210
1211/*
1212 * Shannon Entropy calculation
1213 *
1214 * Pure byte distribution analysis fails to determine compressibility of data.
1215 * Try calculating entropy to estimate the average minimum number of bits
1216 * needed to encode the sampled data.
1217 *
1218 * For convenience, return the percentage of needed bits, instead of amount of
1219 * bits directly.
1220 *
1221 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1222 *			    and can be compressible with high probability
1223 *
1224 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1225 *
1226 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1227 */
1228#define ENTROPY_LVL_ACEPTABLE		(65)
1229#define ENTROPY_LVL_HIGH		(80)
1230
1231/*
1232 * For increasead precision in shannon_entropy calculation,
1233 * let's do pow(n, M) to save more digits after comma:
1234 *
1235 * - maximum int bit length is 64
1236 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1237 * - 13 * 4 = 52 < 64		-> M = 4
1238 *
1239 * So use pow(n, 4).
1240 */
1241static inline u32 ilog2_w(u64 n)
1242{
1243	return ilog2(n * n * n * n);
1244}
1245
1246static u32 shannon_entropy(struct heuristic_ws *ws)
1247{
1248	const u32 entropy_max = 8 * ilog2_w(2);
1249	u32 entropy_sum = 0;
1250	u32 p, p_base, sz_base;
1251	u32 i;
1252
1253	sz_base = ilog2_w(ws->sample_size);
1254	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1255		p = ws->bucket[i].count;
1256		p_base = ilog2_w(p);
1257		entropy_sum += p * (sz_base - p_base);
1258	}
1259
1260	entropy_sum /= ws->sample_size;
1261	return entropy_sum * 100 / entropy_max;
1262}
1263
1264#define RADIX_BASE		4U
1265#define COUNTERS_SIZE		(1U << RADIX_BASE)
1266
1267static u8 get4bits(u64 num, int shift) {
1268	u8 low4bits;
1269
1270	num >>= shift;
1271	/* Reverse order */
1272	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1273	return low4bits;
1274}
1275
1276/*
1277 * Use 4 bits as radix base
1278 * Use 16 u32 counters for calculating new position in buf array
1279 *
1280 * @array     - array that will be sorted
1281 * @array_buf - buffer array to store sorting results
1282 *              must be equal in size to @array
1283 * @num       - array size
1284 */
1285static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1286		       int num)
1287{
1288	u64 max_num;
1289	u64 buf_num;
1290	u32 counters[COUNTERS_SIZE];
1291	u32 new_addr;
1292	u32 addr;
1293	int bitlen;
1294	int shift;
1295	int i;
1296
1297	/*
1298	 * Try avoid useless loop iterations for small numbers stored in big
1299	 * counters.  Example: 48 33 4 ... in 64bit array
1300	 */
1301	max_num = array[0].count;
1302	for (i = 1; i < num; i++) {
1303		buf_num = array[i].count;
1304		if (buf_num > max_num)
1305			max_num = buf_num;
1306	}
1307
1308	buf_num = ilog2(max_num);
1309	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1310
1311	shift = 0;
1312	while (shift < bitlen) {
1313		memset(counters, 0, sizeof(counters));
1314
1315		for (i = 0; i < num; i++) {
1316			buf_num = array[i].count;
1317			addr = get4bits(buf_num, shift);
1318			counters[addr]++;
1319		}
1320
1321		for (i = 1; i < COUNTERS_SIZE; i++)
1322			counters[i] += counters[i - 1];
1323
1324		for (i = num - 1; i >= 0; i--) {
1325			buf_num = array[i].count;
1326			addr = get4bits(buf_num, shift);
1327			counters[addr]--;
1328			new_addr = counters[addr];
1329			array_buf[new_addr] = array[i];
1330		}
1331
1332		shift += RADIX_BASE;
1333
1334		/*
1335		 * Normal radix expects to move data from a temporary array, to
1336		 * the main one.  But that requires some CPU time. Avoid that
1337		 * by doing another sort iteration to original array instead of
1338		 * memcpy()
1339		 */
1340		memset(counters, 0, sizeof(counters));
1341
1342		for (i = 0; i < num; i ++) {
1343			buf_num = array_buf[i].count;
1344			addr = get4bits(buf_num, shift);
1345			counters[addr]++;
1346		}
1347
1348		for (i = 1; i < COUNTERS_SIZE; i++)
1349			counters[i] += counters[i - 1];
1350
1351		for (i = num - 1; i >= 0; i--) {
1352			buf_num = array_buf[i].count;
1353			addr = get4bits(buf_num, shift);
1354			counters[addr]--;
1355			new_addr = counters[addr];
1356			array[new_addr] = array_buf[i];
1357		}
1358
1359		shift += RADIX_BASE;
1360	}
1361}
1362
1363/*
1364 * Size of the core byte set - how many bytes cover 90% of the sample
1365 *
1366 * There are several types of structured binary data that use nearly all byte
1367 * values. The distribution can be uniform and counts in all buckets will be
1368 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1369 *
1370 * Other possibility is normal (Gaussian) distribution, where the data could
1371 * be potentially compressible, but we have to take a few more steps to decide
1372 * how much.
1373 *
1374 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1375 *                       compression algo can easy fix that
1376 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1377 *                       probability is not compressible
1378 */
1379#define BYTE_CORE_SET_LOW		(64)
1380#define BYTE_CORE_SET_HIGH		(200)
1381
1382static int byte_core_set_size(struct heuristic_ws *ws)
1383{
1384	u32 i;
1385	u32 coreset_sum = 0;
1386	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1387	struct bucket_item *bucket = ws->bucket;
1388
1389	/* Sort in reverse order */
1390	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1391
1392	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1393		coreset_sum += bucket[i].count;
1394
1395	if (coreset_sum > core_set_threshold)
1396		return i;
1397
1398	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1399		coreset_sum += bucket[i].count;
1400		if (coreset_sum > core_set_threshold)
1401			break;
1402	}
1403
1404	return i;
1405}
1406
1407/*
1408 * Count byte values in buckets.
1409 * This heuristic can detect textual data (configs, xml, json, html, etc).
1410 * Because in most text-like data byte set is restricted to limited number of
1411 * possible characters, and that restriction in most cases makes data easy to
1412 * compress.
1413 *
1414 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1415 *	less - compressible
1416 *	more - need additional analysis
1417 */
1418#define BYTE_SET_THRESHOLD		(64)
1419
1420static u32 byte_set_size(const struct heuristic_ws *ws)
1421{
1422	u32 i;
1423	u32 byte_set_size = 0;
1424
1425	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1426		if (ws->bucket[i].count > 0)
1427			byte_set_size++;
1428	}
1429
1430	/*
1431	 * Continue collecting count of byte values in buckets.  If the byte
1432	 * set size is bigger then the threshold, it's pointless to continue,
1433	 * the detection technique would fail for this type of data.
1434	 */
1435	for (; i < BUCKET_SIZE; i++) {
1436		if (ws->bucket[i].count > 0) {
1437			byte_set_size++;
1438			if (byte_set_size > BYTE_SET_THRESHOLD)
1439				return byte_set_size;
1440		}
1441	}
1442
1443	return byte_set_size;
1444}
1445
1446static bool sample_repeated_patterns(struct heuristic_ws *ws)
1447{
1448	const u32 half_of_sample = ws->sample_size / 2;
1449	const u8 *data = ws->sample;
1450
1451	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1452}
1453
1454static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1455				     struct heuristic_ws *ws)
1456{
1457	struct page *page;
1458	u64 index, index_end;
1459	u32 i, curr_sample_pos;
1460	u8 *in_data;
1461
1462	/*
1463	 * Compression handles the input data by chunks of 128KiB
1464	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1465	 *
1466	 * We do the same for the heuristic and loop over the whole range.
1467	 *
1468	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1469	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1470	 */
1471	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1472		end = start + BTRFS_MAX_UNCOMPRESSED;
1473
1474	index = start >> PAGE_SHIFT;
1475	index_end = end >> PAGE_SHIFT;
1476
1477	/* Don't miss unaligned end */
1478	if (!PAGE_ALIGNED(end))
1479		index_end++;
1480
1481	curr_sample_pos = 0;
1482	while (index < index_end) {
1483		page = find_get_page(inode->i_mapping, index);
1484		in_data = kmap_local_page(page);
1485		/* Handle case where the start is not aligned to PAGE_SIZE */
1486		i = start % PAGE_SIZE;
1487		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1488			/* Don't sample any garbage from the last page */
1489			if (start > end - SAMPLING_READ_SIZE)
1490				break;
1491			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1492					SAMPLING_READ_SIZE);
1493			i += SAMPLING_INTERVAL;
1494			start += SAMPLING_INTERVAL;
1495			curr_sample_pos += SAMPLING_READ_SIZE;
1496		}
1497		kunmap_local(in_data);
1498		put_page(page);
1499
1500		index++;
1501	}
1502
1503	ws->sample_size = curr_sample_pos;
1504}
1505
1506/*
1507 * Compression heuristic.
1508 *
 
 
 
 
 
1509 * The following types of analysis can be performed:
1510 * - detect mostly zero data
1511 * - detect data with low "byte set" size (text, etc)
1512 * - detect data with low/high "core byte" set
1513 *
1514 * Return non-zero if the compression should be done, 0 otherwise.
1515 */
1516int btrfs_compress_heuristic(struct btrfs_inode *inode, u64 start, u64 end)
1517{
1518	struct list_head *ws_list = get_workspace(0, 0);
1519	struct heuristic_ws *ws;
1520	u32 i;
1521	u8 byte;
1522	int ret = 0;
1523
1524	ws = list_entry(ws_list, struct heuristic_ws, list);
1525
1526	heuristic_collect_sample(&inode->vfs_inode, start, end, ws);
1527
1528	if (sample_repeated_patterns(ws)) {
1529		ret = 1;
1530		goto out;
1531	}
1532
1533	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1534
1535	for (i = 0; i < ws->sample_size; i++) {
1536		byte = ws->sample[i];
1537		ws->bucket[byte].count++;
1538	}
1539
1540	i = byte_set_size(ws);
1541	if (i < BYTE_SET_THRESHOLD) {
1542		ret = 2;
1543		goto out;
1544	}
1545
1546	i = byte_core_set_size(ws);
1547	if (i <= BYTE_CORE_SET_LOW) {
1548		ret = 3;
1549		goto out;
1550	}
1551
1552	if (i >= BYTE_CORE_SET_HIGH) {
1553		ret = 0;
1554		goto out;
1555	}
1556
1557	i = shannon_entropy(ws);
1558	if (i <= ENTROPY_LVL_ACEPTABLE) {
1559		ret = 4;
1560		goto out;
1561	}
1562
1563	/*
1564	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1565	 * needed to give green light to compression.
1566	 *
1567	 * For now just assume that compression at that level is not worth the
1568	 * resources because:
1569	 *
1570	 * 1. it is possible to defrag the data later
1571	 *
1572	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1573	 * values, every bucket has counter at level ~54. The heuristic would
1574	 * be confused. This can happen when data have some internal repeated
1575	 * patterns like "abbacbbc...". This can be detected by analyzing
1576	 * pairs of bytes, which is too costly.
1577	 */
1578	if (i < ENTROPY_LVL_HIGH) {
1579		ret = 5;
1580		goto out;
1581	} else {
1582		ret = 0;
1583		goto out;
1584	}
1585
1586out:
1587	put_workspace(0, ws_list);
1588	return ret;
1589}
1590
1591/*
1592 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1593 * level, unrecognized string will set the default level
1594 */
1595unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1596{
1597	unsigned int level = 0;
1598	int ret;
1599
1600	if (!type)
1601		return 0;
1602
1603	if (str[0] == ':') {
1604		ret = kstrtouint(str + 1, 10, &level);
1605		if (ret)
1606			level = 0;
1607	}
1608
1609	level = btrfs_compress_set_level(type, level);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1610
1611	return level;
1612}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
   8#include <linux/file.h>
   9#include <linux/fs.h>
  10#include <linux/pagemap.h>
 
  11#include <linux/highmem.h>
 
  12#include <linux/time.h>
  13#include <linux/init.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
  16#include <linux/writeback.h>
 
  17#include <linux/slab.h>
  18#include <linux/sched/mm.h>
  19#include <linux/log2.h>
 
  20#include <crypto/hash.h>
  21#include "misc.h"
  22#include "ctree.h"
  23#include "disk-io.h"
  24#include "transaction.h"
  25#include "btrfs_inode.h"
  26#include "volumes.h"
  27#include "ordered-data.h"
  28#include "compression.h"
  29#include "extent_io.h"
  30#include "extent_map.h"
 
 
 
 
 
  31
  32static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  33
  34const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  35{
  36	switch (type) {
  37	case BTRFS_COMPRESS_ZLIB:
  38	case BTRFS_COMPRESS_LZO:
  39	case BTRFS_COMPRESS_ZSTD:
  40	case BTRFS_COMPRESS_NONE:
  41		return btrfs_compress_types[type];
 
 
  42	}
  43
  44	return NULL;
  45}
  46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  47bool btrfs_compress_is_valid_type(const char *str, size_t len)
  48{
  49	int i;
  50
  51	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
  52		size_t comp_len = strlen(btrfs_compress_types[i]);
  53
  54		if (len < comp_len)
  55			continue;
  56
  57		if (!strncmp(btrfs_compress_types[i], str, comp_len))
  58			return true;
  59	}
  60	return false;
  61}
  62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63static int btrfs_decompress_bio(struct compressed_bio *cb);
  64
  65static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
  66				      unsigned long disk_size)
 
 
 
 
 
 
 
 
 
 
  67{
  68	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  69
  70	return sizeof(struct compressed_bio) +
  71		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
 
 
 
 
 
 
  72}
  73
  74static int check_compressed_csum(struct btrfs_inode *inode,
  75				 struct compressed_bio *cb,
  76				 u64 disk_start)
  77{
  78	struct btrfs_fs_info *fs_info = inode->root->fs_info;
  79	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
  80	const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  81	int ret;
  82	struct page *page;
  83	unsigned long i;
  84	char *kaddr;
  85	u8 csum[BTRFS_CSUM_SIZE];
  86	u8 *cb_sum = cb->sums;
  87
  88	if (inode->flags & BTRFS_INODE_NODATASUM)
  89		return 0;
  90
  91	shash->tfm = fs_info->csum_shash;
  92
  93	for (i = 0; i < cb->nr_pages; i++) {
  94		page = cb->compressed_pages[i];
 
 
 
 
  95
  96		crypto_shash_init(shash);
  97		kaddr = kmap_atomic(page);
  98		crypto_shash_update(shash, kaddr, PAGE_SIZE);
  99		kunmap_atomic(kaddr);
 100		crypto_shash_final(shash, (u8 *)&csum);
 101
 102		if (memcmp(&csum, cb_sum, csum_size)) {
 103			btrfs_print_data_csum_error(inode, disk_start,
 104					csum, cb_sum, cb->mirror_num);
 105			ret = -EIO;
 106			goto fail;
 107		}
 108		cb_sum += csum_size;
 109
 
 
 110	}
 111	ret = 0;
 112fail:
 113	return ret;
 114}
 115
 116/* when we finish reading compressed pages from the disk, we
 117 * decompress them and then run the bio end_io routines on the
 118 * decompressed pages (in the inode address space).
 119 *
 120 * This allows the checksumming and other IO error handling routines
 121 * to work normally
 122 *
 123 * The compressed pages are freed here, and it must be run
 124 * in process context
 125 */
 126static void end_compressed_bio_read(struct bio *bio)
 127{
 128	struct compressed_bio *cb = bio->bi_private;
 129	struct inode *inode;
 130	struct page *page;
 131	unsigned long index;
 132	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
 133	int ret = 0;
 134
 135	if (bio->bi_status)
 136		cb->errors = 1;
 
 
 
 
 
 137
 138	/* if there are more bios still pending for this compressed
 139	 * extent, just exit
 140	 */
 141	if (!refcount_dec_and_test(&cb->pending_bios))
 142		goto out;
 143
 144	/*
 145	 * Record the correct mirror_num in cb->orig_bio so that
 146	 * read-repair can work properly.
 147	 */
 148	ASSERT(btrfs_io_bio(cb->orig_bio));
 149	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
 150	cb->mirror_num = mirror;
 151
 152	/*
 153	 * Some IO in this cb have failed, just skip checksum as there
 154	 * is no way it could be correct.
 155	 */
 156	if (cb->errors == 1)
 157		goto csum_failed;
 158
 159	inode = cb->inode;
 160	ret = check_compressed_csum(BTRFS_I(inode), cb,
 161				    (u64)bio->bi_iter.bi_sector << 9);
 162	if (ret)
 163		goto csum_failed;
 
 
 
 164
 165	/* ok, we're the last bio for this extent, lets start
 166	 * the decompression.
 167	 */
 168	ret = btrfs_decompress_bio(cb);
 169
 170csum_failed:
 171	if (ret)
 172		cb->errors = 1;
 173
 174	/* release the compressed pages */
 175	index = 0;
 176	for (index = 0; index < cb->nr_pages; index++) {
 177		page = cb->compressed_pages[index];
 178		page->mapping = NULL;
 179		put_page(page);
 180	}
 181
 182	/* do io completion on the original bio */
 183	if (cb->errors) {
 184		bio_io_error(cb->orig_bio);
 185	} else {
 186		struct bio_vec *bvec;
 187		struct bvec_iter_all iter_all;
 188
 189		/*
 190		 * we have verified the checksum already, set page
 191		 * checked so the end_io handlers know about it
 192		 */
 193		ASSERT(!bio_flagged(bio, BIO_CLONED));
 194		bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
 195			SetPageChecked(bvec->bv_page);
 196
 197		bio_endio(cb->orig_bio);
 198	}
 199
 200	/* finally free the cb struct */
 201	kfree(cb->compressed_pages);
 202	kfree(cb);
 203out:
 204	bio_put(bio);
 205}
 206
 207/*
 208 * Clear the writeback bits on all of the file
 209 * pages for a compressed write
 210 */
 211static noinline void end_compressed_writeback(struct inode *inode,
 212					      const struct compressed_bio *cb)
 213{
 
 
 214	unsigned long index = cb->start >> PAGE_SHIFT;
 215	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 216	struct page *pages[16];
 217	unsigned long nr_pages = end_index - index + 1;
 218	int i;
 219	int ret;
 220
 221	if (cb->errors)
 222		mapping_set_error(inode->i_mapping, -EIO);
 
 
 
 
 
 
 
 
 223
 224	while (nr_pages > 0) {
 225		ret = find_get_pages_contig(inode->i_mapping, index,
 226				     min_t(unsigned long,
 227				     nr_pages, ARRAY_SIZE(pages)), pages);
 228		if (ret == 0) {
 229			nr_pages -= 1;
 230			index += 1;
 231			continue;
 232		}
 233		for (i = 0; i < ret; i++) {
 234			if (cb->errors)
 235				SetPageError(pages[i]);
 236			end_page_writeback(pages[i]);
 237			put_page(pages[i]);
 238		}
 239		nr_pages -= ret;
 240		index += ret;
 241	}
 242	/* the inode may be gone now */
 243}
 244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245/*
 246 * do the cleanup once all the compressed pages hit the disk.
 247 * This will clear writeback on the file pages and free the compressed
 248 * pages.
 249 *
 250 * This also calls the writeback end hooks for the file pages so that
 251 * metadata and checksums can be updated in the file.
 252 */
 253static void end_compressed_bio_write(struct bio *bio)
 254{
 255	struct compressed_bio *cb = bio->bi_private;
 256	struct inode *inode;
 257	struct page *page;
 258	unsigned long index;
 259
 260	if (bio->bi_status)
 261		cb->errors = 1;
 262
 263	/* if there are more bios still pending for this compressed
 264	 * extent, just exit
 265	 */
 266	if (!refcount_dec_and_test(&cb->pending_bios))
 267		goto out;
 268
 269	/* ok, we're the last bio for this extent, step one is to
 270	 * call back into the FS and do all the end_io operations
 271	 */
 272	inode = cb->inode;
 273	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
 274	btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
 275			cb->start, cb->start + cb->len - 1,
 276			bio->bi_status == BLK_STS_OK);
 277	cb->compressed_pages[0]->mapping = NULL;
 278
 279	end_compressed_writeback(inode, cb);
 280	/* note, our inode could be gone now */
 
 281
 282	/*
 283	 * release the compressed pages, these came from alloc_page and
 284	 * are not attached to the inode at all
 285	 */
 286	index = 0;
 287	for (index = 0; index < cb->nr_pages; index++) {
 288		page = cb->compressed_pages[index];
 289		page->mapping = NULL;
 290		put_page(page);
 291	}
 292
 293	/* finally free the cb struct */
 294	kfree(cb->compressed_pages);
 295	kfree(cb);
 296out:
 297	bio_put(bio);
 298}
 299
 300/*
 301 * worker function to build and submit bios for previously compressed pages.
 302 * The corresponding pages in the inode should be marked for writeback
 303 * and the compressed pages should have a reference on them for dropping
 304 * when the IO is complete.
 305 *
 306 * This also checksums the file bytes and gets things ready for
 307 * the end io hooks.
 308 */
 309blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
 310				 unsigned long len, u64 disk_start,
 311				 unsigned long compressed_len,
 312				 struct page **compressed_pages,
 313				 unsigned long nr_pages,
 314				 unsigned int write_flags)
 315{
 316	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 317	struct bio *bio = NULL;
 318	struct compressed_bio *cb;
 319	unsigned long bytes_left;
 320	int pg_index = 0;
 321	struct page *page;
 322	u64 first_byte = disk_start;
 323	struct block_device *bdev;
 324	blk_status_t ret;
 325	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 326
 327	WARN_ON(!PAGE_ALIGNED(start));
 328	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 329	if (!cb)
 330		return BLK_STS_RESOURCE;
 331	refcount_set(&cb->pending_bios, 0);
 332	cb->errors = 0;
 333	cb->inode = inode;
 334	cb->start = start;
 335	cb->len = len;
 336	cb->mirror_num = 0;
 337	cb->compressed_pages = compressed_pages;
 338	cb->compressed_len = compressed_len;
 339	cb->orig_bio = NULL;
 340	cb->nr_pages = nr_pages;
 341
 342	bdev = fs_info->fs_devices->latest_bdev;
 343
 344	bio = btrfs_bio_alloc(first_byte);
 345	bio_set_dev(bio, bdev);
 346	bio->bi_opf = REQ_OP_WRITE | write_flags;
 347	bio->bi_private = cb;
 348	bio->bi_end_io = end_compressed_bio_write;
 349	refcount_set(&cb->pending_bios, 1);
 350
 351	/* create and submit bios for the compressed pages */
 352	bytes_left = compressed_len;
 353	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 354		int submit = 0;
 355
 356		page = compressed_pages[pg_index];
 357		page->mapping = inode->i_mapping;
 358		if (bio->bi_iter.bi_size)
 359			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
 360							  0);
 361
 362		page->mapping = NULL;
 363		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
 364		    PAGE_SIZE) {
 365			/*
 366			 * inc the count before we submit the bio so
 367			 * we know the end IO handler won't happen before
 368			 * we inc the count.  Otherwise, the cb might get
 369			 * freed before we're done setting it up
 370			 */
 371			refcount_inc(&cb->pending_bios);
 372			ret = btrfs_bio_wq_end_io(fs_info, bio,
 373						  BTRFS_WQ_ENDIO_DATA);
 374			BUG_ON(ret); /* -ENOMEM */
 375
 376			if (!skip_sum) {
 377				ret = btrfs_csum_one_bio(inode, bio, start, 1);
 378				BUG_ON(ret); /* -ENOMEM */
 379			}
 380
 381			ret = btrfs_map_bio(fs_info, bio, 0, 1);
 382			if (ret) {
 383				bio->bi_status = ret;
 384				bio_endio(bio);
 385			}
 386
 387			bio = btrfs_bio_alloc(first_byte);
 388			bio_set_dev(bio, bdev);
 389			bio->bi_opf = REQ_OP_WRITE | write_flags;
 390			bio->bi_private = cb;
 391			bio->bi_end_io = end_compressed_bio_write;
 392			bio_add_page(bio, page, PAGE_SIZE, 0);
 393		}
 394		if (bytes_left < PAGE_SIZE) {
 395			btrfs_info(fs_info,
 396					"bytes left %lu compress len %lu nr %lu",
 397			       bytes_left, cb->compressed_len, cb->nr_pages);
 398		}
 399		bytes_left -= PAGE_SIZE;
 400		first_byte += PAGE_SIZE;
 401		cond_resched();
 402	}
 403
 404	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 405	BUG_ON(ret); /* -ENOMEM */
 406
 407	if (!skip_sum) {
 408		ret = btrfs_csum_one_bio(inode, bio, start, 1);
 409		BUG_ON(ret); /* -ENOMEM */
 410	}
 411
 412	ret = btrfs_map_bio(fs_info, bio, 0, 1);
 413	if (ret) {
 414		bio->bi_status = ret;
 415		bio_endio(bio);
 416	}
 417
 418	return 0;
 419}
 420
 421static u64 bio_end_offset(struct bio *bio)
 422{
 423	struct bio_vec *last = bio_last_bvec_all(bio);
 424
 425	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
 426}
 427
 
 
 
 
 
 
 
 
 
 
 
 428static noinline int add_ra_bio_pages(struct inode *inode,
 429				     u64 compressed_end,
 430				     struct compressed_bio *cb)
 
 431{
 
 432	unsigned long end_index;
 433	unsigned long pg_index;
 434	u64 last_offset;
 435	u64 isize = i_size_read(inode);
 436	int ret;
 437	struct page *page;
 438	unsigned long nr_pages = 0;
 439	struct extent_map *em;
 440	struct address_space *mapping = inode->i_mapping;
 441	struct extent_map_tree *em_tree;
 442	struct extent_io_tree *tree;
 443	u64 end;
 444	int misses = 0;
 445
 446	last_offset = bio_end_offset(cb->orig_bio);
 447	em_tree = &BTRFS_I(inode)->extent_tree;
 448	tree = &BTRFS_I(inode)->io_tree;
 449
 450	if (isize == 0)
 451		return 0;
 452
 
 
 
 
 
 
 
 
 
 
 453	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 454
 455	while (last_offset < compressed_end) {
 456		pg_index = last_offset >> PAGE_SHIFT;
 
 
 457
 458		if (pg_index > end_index)
 459			break;
 460
 461		page = xa_load(&mapping->i_pages, pg_index);
 462		if (page && !xa_is_value(page)) {
 463			misses++;
 464			if (misses > 4)
 
 
 
 
 
 
 
 465				break;
 466			goto next;
 
 
 
 
 
 
 467		}
 468
 469		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
 470								 ~__GFP_FS));
 471		if (!page)
 472			break;
 473
 474		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
 475			put_page(page);
 476			goto next;
 
 
 477		}
 478
 479		end = last_offset + PAGE_SIZE - 1;
 480		/*
 481		 * at this point, we have a locked page in the page cache
 482		 * for these bytes in the file.  But, we have to make
 483		 * sure they map to this compressed extent on disk.
 484		 */
 485		set_page_extent_mapped(page);
 486		lock_extent(tree, last_offset, end);
 
 
 
 
 
 
 487		read_lock(&em_tree->lock);
 488		em = lookup_extent_mapping(em_tree, last_offset,
 489					   PAGE_SIZE);
 490		read_unlock(&em_tree->lock);
 491
 492		if (!em || last_offset < em->start ||
 493		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
 494		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 
 
 
 
 
 
 495			free_extent_map(em);
 496			unlock_extent(tree, last_offset, end);
 497			unlock_page(page);
 498			put_page(page);
 499			break;
 500		}
 
 501		free_extent_map(em);
 
 502
 503		if (page->index == end_index) {
 504			char *userpage;
 505			size_t zero_offset = offset_in_page(isize);
 506
 507			if (zero_offset) {
 508				int zeros;
 509				zeros = PAGE_SIZE - zero_offset;
 510				userpage = kmap_atomic(page);
 511				memset(userpage + zero_offset, 0, zeros);
 512				flush_dcache_page(page);
 513				kunmap_atomic(userpage);
 514			}
 515		}
 516
 517		ret = bio_add_page(cb->orig_bio, page,
 518				   PAGE_SIZE, 0);
 519
 520		if (ret == PAGE_SIZE) {
 521			nr_pages++;
 522			put_page(page);
 523		} else {
 524			unlock_extent(tree, last_offset, end);
 525			unlock_page(page);
 526			put_page(page);
 527			break;
 528		}
 529next:
 530		last_offset += PAGE_SIZE;
 
 
 
 
 
 
 
 531	}
 532	return 0;
 533}
 534
 535/*
 536 * for a compressed read, the bio we get passed has all the inode pages
 537 * in it.  We don't actually do IO on those pages but allocate new ones
 538 * to hold the compressed pages on disk.
 539 *
 540 * bio->bi_iter.bi_sector points to the compressed extent on disk
 541 * bio->bi_io_vec points to all of the inode pages
 542 *
 543 * After the compressed pages are read, we copy the bytes into the
 544 * bio we were passed and then call the bio end_io calls
 545 */
 546blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 547				 int mirror_num, unsigned long bio_flags)
 548{
 549	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 550	struct extent_map_tree *em_tree;
 
 551	struct compressed_bio *cb;
 552	unsigned long compressed_len;
 553	unsigned long nr_pages;
 554	unsigned long pg_index;
 555	struct page *page;
 556	struct block_device *bdev;
 557	struct bio *comp_bio;
 558	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
 559	u64 em_len;
 560	u64 em_start;
 561	struct extent_map *em;
 562	blk_status_t ret = BLK_STS_RESOURCE;
 563	int faili = 0;
 564	const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 565	u8 *sums;
 566
 567	em_tree = &BTRFS_I(inode)->extent_tree;
 568
 569	/* we need the actual starting offset of this extent in the file */
 570	read_lock(&em_tree->lock);
 571	em = lookup_extent_mapping(em_tree,
 572				   page_offset(bio_first_page_all(bio)),
 573				   PAGE_SIZE);
 574	read_unlock(&em_tree->lock);
 575	if (!em)
 576		return BLK_STS_IOERR;
 
 
 577
 578	compressed_len = em->block_len;
 579	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 580	if (!cb)
 581		goto out;
 582
 583	refcount_set(&cb->pending_bios, 0);
 584	cb->errors = 0;
 585	cb->inode = inode;
 586	cb->mirror_num = mirror_num;
 587	sums = cb->sums;
 588
 589	cb->start = em->orig_start;
 590	em_len = em->len;
 591	em_start = em->start;
 592
 593	free_extent_map(em);
 594	em = NULL;
 595
 596	cb->len = bio->bi_iter.bi_size;
 597	cb->compressed_len = compressed_len;
 598	cb->compress_type = extent_compress_type(bio_flags);
 599	cb->orig_bio = bio;
 600
 601	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 602	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
 603				       GFP_NOFS);
 604	if (!cb->compressed_pages)
 605		goto fail1;
 606
 607	bdev = fs_info->fs_devices->latest_bdev;
 608
 609	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 610		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 611							      __GFP_HIGHMEM);
 612		if (!cb->compressed_pages[pg_index]) {
 613			faili = pg_index - 1;
 614			ret = BLK_STS_RESOURCE;
 615			goto fail2;
 616		}
 617	}
 618	faili = nr_pages - 1;
 619	cb->nr_pages = nr_pages;
 620
 621	add_ra_bio_pages(inode, em_start + em_len, cb);
 622
 623	/* include any pages we added in add_ra-bio_pages */
 624	cb->len = bio->bi_iter.bi_size;
 625
 626	comp_bio = btrfs_bio_alloc(cur_disk_byte);
 627	bio_set_dev(comp_bio, bdev);
 628	comp_bio->bi_opf = REQ_OP_READ;
 629	comp_bio->bi_private = cb;
 630	comp_bio->bi_end_io = end_compressed_bio_read;
 631	refcount_set(&cb->pending_bios, 1);
 632
 633	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 634		int submit = 0;
 635
 636		page = cb->compressed_pages[pg_index];
 637		page->mapping = inode->i_mapping;
 638		page->index = em_start >> PAGE_SHIFT;
 639
 640		if (comp_bio->bi_iter.bi_size)
 641			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
 642							  comp_bio, 0);
 643
 644		page->mapping = NULL;
 645		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
 646		    PAGE_SIZE) {
 647			unsigned int nr_sectors;
 648
 649			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
 650						  BTRFS_WQ_ENDIO_DATA);
 651			BUG_ON(ret); /* -ENOMEM */
 652
 653			/*
 654			 * inc the count before we submit the bio so
 655			 * we know the end IO handler won't happen before
 656			 * we inc the count.  Otherwise, the cb might get
 657			 * freed before we're done setting it up
 658			 */
 659			refcount_inc(&cb->pending_bios);
 660
 661			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 662				ret = btrfs_lookup_bio_sums(inode, comp_bio,
 663							    sums);
 664				BUG_ON(ret); /* -ENOMEM */
 665			}
 666
 667			nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
 668						  fs_info->sectorsize);
 669			sums += csum_size * nr_sectors;
 670
 671			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 672			if (ret) {
 673				comp_bio->bi_status = ret;
 674				bio_endio(comp_bio);
 675			}
 676
 677			comp_bio = btrfs_bio_alloc(cur_disk_byte);
 678			bio_set_dev(comp_bio, bdev);
 679			comp_bio->bi_opf = REQ_OP_READ;
 680			comp_bio->bi_private = cb;
 681			comp_bio->bi_end_io = end_compressed_bio_read;
 682
 683			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
 684		}
 685		cur_disk_byte += PAGE_SIZE;
 
 
 686	}
 687
 688	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
 689	BUG_ON(ret); /* -ENOMEM */
 690
 691	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 692		ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 693		BUG_ON(ret); /* -ENOMEM */
 694	}
 695
 696	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 697	if (ret) {
 698		comp_bio->bi_status = ret;
 699		bio_endio(comp_bio);
 700	}
 701
 702	return 0;
 703
 704fail2:
 705	while (faili >= 0) {
 706		__free_page(cb->compressed_pages[faili]);
 707		faili--;
 708	}
 709
 710	kfree(cb->compressed_pages);
 711fail1:
 712	kfree(cb);
 
 
 
 
 713out:
 714	free_extent_map(em);
 715	return ret;
 716}
 717
 718/*
 719 * Heuristic uses systematic sampling to collect data from the input data
 720 * range, the logic can be tuned by the following constants:
 721 *
 722 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 723 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 724 */
 725#define SAMPLING_READ_SIZE	(16)
 726#define SAMPLING_INTERVAL	(256)
 727
 728/*
 729 * For statistical analysis of the input data we consider bytes that form a
 730 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 731 * many times the object appeared in the sample.
 732 */
 733#define BUCKET_SIZE		(256)
 734
 735/*
 736 * The size of the sample is based on a statistical sampling rule of thumb.
 737 * The common way is to perform sampling tests as long as the number of
 738 * elements in each cell is at least 5.
 739 *
 740 * Instead of 5, we choose 32 to obtain more accurate results.
 741 * If the data contain the maximum number of symbols, which is 256, we obtain a
 742 * sample size bound by 8192.
 743 *
 744 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 745 * from up to 512 locations.
 746 */
 747#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
 748				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 749
 750struct bucket_item {
 751	u32 count;
 752};
 753
 754struct heuristic_ws {
 755	/* Partial copy of input data */
 756	u8 *sample;
 757	u32 sample_size;
 758	/* Buckets store counters for each byte value */
 759	struct bucket_item *bucket;
 760	/* Sorting buffer */
 761	struct bucket_item *bucket_b;
 762	struct list_head list;
 763};
 764
 765static struct workspace_manager heuristic_wsm;
 766
 767static void heuristic_init_workspace_manager(void)
 768{
 769	btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress);
 770}
 771
 772static void heuristic_cleanup_workspace_manager(void)
 773{
 774	btrfs_cleanup_workspace_manager(&heuristic_wsm);
 775}
 776
 777static struct list_head *heuristic_get_workspace(unsigned int level)
 778{
 779	return btrfs_get_workspace(&heuristic_wsm, level);
 780}
 781
 782static void heuristic_put_workspace(struct list_head *ws)
 783{
 784	btrfs_put_workspace(&heuristic_wsm, ws);
 785}
 786
 787static void free_heuristic_ws(struct list_head *ws)
 788{
 789	struct heuristic_ws *workspace;
 790
 791	workspace = list_entry(ws, struct heuristic_ws, list);
 792
 793	kvfree(workspace->sample);
 794	kfree(workspace->bucket);
 795	kfree(workspace->bucket_b);
 796	kfree(workspace);
 797}
 798
 799static struct list_head *alloc_heuristic_ws(unsigned int level)
 800{
 801	struct heuristic_ws *ws;
 802
 803	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 804	if (!ws)
 805		return ERR_PTR(-ENOMEM);
 806
 807	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 808	if (!ws->sample)
 809		goto fail;
 810
 811	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 812	if (!ws->bucket)
 813		goto fail;
 814
 815	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 816	if (!ws->bucket_b)
 817		goto fail;
 818
 819	INIT_LIST_HEAD(&ws->list);
 820	return &ws->list;
 821fail:
 822	free_heuristic_ws(&ws->list);
 823	return ERR_PTR(-ENOMEM);
 824}
 825
 826const struct btrfs_compress_op btrfs_heuristic_compress = {
 827	.init_workspace_manager = heuristic_init_workspace_manager,
 828	.cleanup_workspace_manager = heuristic_cleanup_workspace_manager,
 829	.get_workspace = heuristic_get_workspace,
 830	.put_workspace = heuristic_put_workspace,
 831	.alloc_workspace = alloc_heuristic_ws,
 832	.free_workspace = free_heuristic_ws,
 833};
 834
 835static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 836	/* The heuristic is represented as compression type 0 */
 837	&btrfs_heuristic_compress,
 838	&btrfs_zlib_compress,
 839	&btrfs_lzo_compress,
 840	&btrfs_zstd_compress,
 841};
 842
 843void btrfs_init_workspace_manager(struct workspace_manager *wsm,
 844				  const struct btrfs_compress_op *ops)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 845{
 
 846	struct list_head *workspace;
 847
 848	wsm->ops = ops;
 849
 850	INIT_LIST_HEAD(&wsm->idle_ws);
 851	spin_lock_init(&wsm->ws_lock);
 852	atomic_set(&wsm->total_ws, 0);
 853	init_waitqueue_head(&wsm->ws_wait);
 854
 855	/*
 856	 * Preallocate one workspace for each compression type so we can
 857	 * guarantee forward progress in the worst case
 858	 */
 859	workspace = wsm->ops->alloc_workspace(0);
 860	if (IS_ERR(workspace)) {
 861		pr_warn(
 862	"BTRFS: cannot preallocate compression workspace, will try later\n");
 863	} else {
 864		atomic_set(&wsm->total_ws, 1);
 865		wsm->free_ws = 1;
 866		list_add(workspace, &wsm->idle_ws);
 867	}
 868}
 869
 870void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
 871{
 
 872	struct list_head *ws;
 873
 
 874	while (!list_empty(&wsman->idle_ws)) {
 875		ws = wsman->idle_ws.next;
 876		list_del(ws);
 877		wsman->ops->free_workspace(ws);
 878		atomic_dec(&wsman->total_ws);
 879	}
 880}
 881
 882/*
 883 * This finds an available workspace or allocates a new one.
 884 * If it's not possible to allocate a new one, waits until there's one.
 885 * Preallocation makes a forward progress guarantees and we do not return
 886 * errors.
 887 */
 888struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
 889				      unsigned int level)
 890{
 
 891	struct list_head *workspace;
 892	int cpus = num_online_cpus();
 893	unsigned nofs_flag;
 894	struct list_head *idle_ws;
 895	spinlock_t *ws_lock;
 896	atomic_t *total_ws;
 897	wait_queue_head_t *ws_wait;
 898	int *free_ws;
 899
 
 900	idle_ws	 = &wsm->idle_ws;
 901	ws_lock	 = &wsm->ws_lock;
 902	total_ws = &wsm->total_ws;
 903	ws_wait	 = &wsm->ws_wait;
 904	free_ws	 = &wsm->free_ws;
 905
 906again:
 907	spin_lock(ws_lock);
 908	if (!list_empty(idle_ws)) {
 909		workspace = idle_ws->next;
 910		list_del(workspace);
 911		(*free_ws)--;
 912		spin_unlock(ws_lock);
 913		return workspace;
 914
 915	}
 916	if (atomic_read(total_ws) > cpus) {
 917		DEFINE_WAIT(wait);
 918
 919		spin_unlock(ws_lock);
 920		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
 921		if (atomic_read(total_ws) > cpus && !*free_ws)
 922			schedule();
 923		finish_wait(ws_wait, &wait);
 924		goto again;
 925	}
 926	atomic_inc(total_ws);
 927	spin_unlock(ws_lock);
 928
 929	/*
 930	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
 931	 * to turn it off here because we might get called from the restricted
 932	 * context of btrfs_compress_bio/btrfs_compress_pages
 933	 */
 934	nofs_flag = memalloc_nofs_save();
 935	workspace = wsm->ops->alloc_workspace(level);
 936	memalloc_nofs_restore(nofs_flag);
 937
 938	if (IS_ERR(workspace)) {
 939		atomic_dec(total_ws);
 940		wake_up(ws_wait);
 941
 942		/*
 943		 * Do not return the error but go back to waiting. There's a
 944		 * workspace preallocated for each type and the compression
 945		 * time is bounded so we get to a workspace eventually. This
 946		 * makes our caller's life easier.
 947		 *
 948		 * To prevent silent and low-probability deadlocks (when the
 949		 * initial preallocation fails), check if there are any
 950		 * workspaces at all.
 951		 */
 952		if (atomic_read(total_ws) == 0) {
 953			static DEFINE_RATELIMIT_STATE(_rs,
 954					/* once per minute */ 60 * HZ,
 955					/* no burst */ 1);
 956
 957			if (__ratelimit(&_rs)) {
 958				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
 959			}
 960		}
 961		goto again;
 962	}
 963	return workspace;
 964}
 965
 966static struct list_head *get_workspace(int type, int level)
 967{
 968	return btrfs_compress_op[type]->get_workspace(level);
 
 
 
 
 
 
 
 
 
 
 
 969}
 970
 971/*
 972 * put a workspace struct back on the list or free it if we have enough
 973 * idle ones sitting around
 974 */
 975void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
 976{
 
 977	struct list_head *idle_ws;
 978	spinlock_t *ws_lock;
 979	atomic_t *total_ws;
 980	wait_queue_head_t *ws_wait;
 981	int *free_ws;
 982
 
 983	idle_ws	 = &wsm->idle_ws;
 984	ws_lock	 = &wsm->ws_lock;
 985	total_ws = &wsm->total_ws;
 986	ws_wait	 = &wsm->ws_wait;
 987	free_ws	 = &wsm->free_ws;
 988
 989	spin_lock(ws_lock);
 990	if (*free_ws <= num_online_cpus()) {
 991		list_add(ws, idle_ws);
 992		(*free_ws)++;
 993		spin_unlock(ws_lock);
 994		goto wake;
 995	}
 996	spin_unlock(ws_lock);
 997
 998	wsm->ops->free_workspace(ws);
 999	atomic_dec(total_ws);
1000wake:
1001	cond_wake_up(ws_wait);
1002}
1003
1004static void put_workspace(int type, struct list_head *ws)
1005{
1006	return btrfs_compress_op[type]->put_workspace(ws);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007}
1008
1009/*
1010 * Given an address space and start and length, compress the bytes into @pages
1011 * that are allocated on demand.
1012 *
1013 * @type_level is encoded algorithm and level, where level 0 means whatever
1014 * default the algorithm chooses and is opaque here;
1015 * - compression algo are 0-3
1016 * - the level are bits 4-7
1017 *
1018 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1019 * and returns number of actually allocated pages
1020 *
1021 * @total_in is used to return the number of bytes actually read.  It
1022 * may be smaller than the input length if we had to exit early because we
1023 * ran out of room in the pages array or because we cross the
1024 * max_out threshold.
1025 *
1026 * @total_out is an in/out parameter, must be set to the input length and will
1027 * be also used to return the total number of compressed bytes
1028 *
1029 * @max_out tells us the max number of bytes that we're allowed to
1030 * stuff into pages
1031 */
1032int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1033			 u64 start, struct page **pages,
1034			 unsigned long *out_pages,
1035			 unsigned long *total_in,
1036			 unsigned long *total_out)
1037{
1038	int type = btrfs_compress_type(type_level);
1039	int level = btrfs_compress_level(type_level);
 
1040	struct list_head *workspace;
1041	int ret;
1042
1043	level = btrfs_compress_set_level(type, level);
1044	workspace = get_workspace(type, level);
1045	ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
1046						      start, pages,
1047						      out_pages,
1048						      total_in, total_out);
1049	put_workspace(type, workspace);
1050	return ret;
1051}
1052
1053/*
1054 * pages_in is an array of pages with compressed data.
1055 *
1056 * disk_start is the starting logical offset of this array in the file
1057 *
1058 * orig_bio contains the pages from the file that we want to decompress into
1059 *
1060 * srclen is the number of bytes in pages_in
1061 *
1062 * The basic idea is that we have a bio that was created by readpages.
1063 * The pages in the bio are for the uncompressed data, and they may not
1064 * be contiguous.  They all correspond to the range of bytes covered by
1065 * the compressed extent.
1066 */
1067static int btrfs_decompress_bio(struct compressed_bio *cb)
1068{
1069	struct list_head *workspace;
1070	int ret;
1071	int type = cb->compress_type;
1072
1073	workspace = get_workspace(type, 0);
1074	ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
1075	put_workspace(type, workspace);
1076
 
 
1077	return ret;
1078}
1079
1080/*
1081 * a less complex decompression routine.  Our compressed data fits in a
1082 * single page, and we want to read a single page out of it.
1083 * start_byte tells us the offset into the compressed data we're interested in
1084 */
1085int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1086		     unsigned long start_byte, size_t srclen, size_t destlen)
1087{
 
1088	struct list_head *workspace;
 
1089	int ret;
1090
 
 
 
 
 
 
 
1091	workspace = get_workspace(type, 0);
1092	ret = btrfs_compress_op[type]->decompress(workspace, data_in,
1093						  dest_page, start_byte,
1094						  srclen, destlen);
1095	put_workspace(type, workspace);
1096
1097	return ret;
1098}
1099
1100void __init btrfs_init_compress(void)
1101{
1102	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103
1104	for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
1105		btrfs_compress_op[i]->init_workspace_manager();
1106}
1107
1108void __cold btrfs_exit_compress(void)
1109{
1110	int i;
1111
1112	for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
1113		btrfs_compress_op[i]->cleanup_workspace_manager();
 
 
 
 
 
1114}
1115
1116/*
1117 * Copy uncompressed data from working buffer to pages.
1118 *
1119 * buf_start is the byte offset we're of the start of our workspace buffer.
1120 *
1121 * total_out is the last byte of the buffer
1122 */
1123int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1124			      unsigned long total_out, u64 disk_start,
1125			      struct bio *bio)
1126{
1127	unsigned long buf_offset;
1128	unsigned long current_buf_start;
1129	unsigned long start_byte;
1130	unsigned long prev_start_byte;
1131	unsigned long working_bytes = total_out - buf_start;
1132	unsigned long bytes;
1133	char *kaddr;
1134	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135
1136	/*
1137	 * start byte is the first byte of the page we're currently
1138	 * copying into relative to the start of the compressed data.
1139	 */
1140	start_byte = page_offset(bvec.bv_page) - disk_start;
 
1141
1142	/* we haven't yet hit data corresponding to this page */
1143	if (total_out <= start_byte)
1144		return 1;
1145
1146	/*
1147	 * the start of the data we care about is offset into
1148	 * the middle of our working buffer
1149	 */
1150	if (total_out > start_byte && buf_start < start_byte) {
1151		buf_offset = start_byte - buf_start;
1152		working_bytes -= buf_offset;
1153	} else {
1154		buf_offset = 0;
1155	}
1156	current_buf_start = buf_start;
1157
1158	/* copy bytes from the working buffer into the pages */
1159	while (working_bytes > 0) {
1160		bytes = min_t(unsigned long, bvec.bv_len,
1161				PAGE_SIZE - buf_offset);
1162		bytes = min(bytes, working_bytes);
1163
1164		kaddr = kmap_atomic(bvec.bv_page);
1165		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1166		kunmap_atomic(kaddr);
1167		flush_dcache_page(bvec.bv_page);
1168
1169		buf_offset += bytes;
1170		working_bytes -= bytes;
1171		current_buf_start += bytes;
1172
1173		/* check if we need to pick another page */
1174		bio_advance(bio, bytes);
1175		if (!bio->bi_iter.bi_size)
1176			return 0;
1177		bvec = bio_iter_iovec(bio, bio->bi_iter);
1178		prev_start_byte = start_byte;
1179		start_byte = page_offset(bvec.bv_page) - disk_start;
1180
1181		/*
1182		 * We need to make sure we're only adjusting
1183		 * our offset into compression working buffer when
1184		 * we're switching pages.  Otherwise we can incorrectly
1185		 * keep copying when we were actually done.
1186		 */
1187		if (start_byte != prev_start_byte) {
1188			/*
1189			 * make sure our new page is covered by this
1190			 * working buffer
1191			 */
1192			if (total_out <= start_byte)
1193				return 1;
1194
1195			/*
1196			 * the next page in the biovec might not be adjacent
1197			 * to the last page, but it might still be found
1198			 * inside this working buffer. bump our offset pointer
1199			 */
1200			if (total_out > start_byte &&
1201			    current_buf_start < start_byte) {
1202				buf_offset = start_byte - buf_start;
1203				working_bytes = total_out - start_byte;
1204				current_buf_start = buf_start + buf_offset;
1205			}
1206		}
1207	}
1208
1209	return 1;
1210}
1211
1212/*
1213 * Shannon Entropy calculation
1214 *
1215 * Pure byte distribution analysis fails to determine compressibility of data.
1216 * Try calculating entropy to estimate the average minimum number of bits
1217 * needed to encode the sampled data.
1218 *
1219 * For convenience, return the percentage of needed bits, instead of amount of
1220 * bits directly.
1221 *
1222 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1223 *			    and can be compressible with high probability
1224 *
1225 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1226 *
1227 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1228 */
1229#define ENTROPY_LVL_ACEPTABLE		(65)
1230#define ENTROPY_LVL_HIGH		(80)
1231
1232/*
1233 * For increasead precision in shannon_entropy calculation,
1234 * let's do pow(n, M) to save more digits after comma:
1235 *
1236 * - maximum int bit length is 64
1237 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1238 * - 13 * 4 = 52 < 64		-> M = 4
1239 *
1240 * So use pow(n, 4).
1241 */
1242static inline u32 ilog2_w(u64 n)
1243{
1244	return ilog2(n * n * n * n);
1245}
1246
1247static u32 shannon_entropy(struct heuristic_ws *ws)
1248{
1249	const u32 entropy_max = 8 * ilog2_w(2);
1250	u32 entropy_sum = 0;
1251	u32 p, p_base, sz_base;
1252	u32 i;
1253
1254	sz_base = ilog2_w(ws->sample_size);
1255	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1256		p = ws->bucket[i].count;
1257		p_base = ilog2_w(p);
1258		entropy_sum += p * (sz_base - p_base);
1259	}
1260
1261	entropy_sum /= ws->sample_size;
1262	return entropy_sum * 100 / entropy_max;
1263}
1264
1265#define RADIX_BASE		4U
1266#define COUNTERS_SIZE		(1U << RADIX_BASE)
1267
1268static u8 get4bits(u64 num, int shift) {
1269	u8 low4bits;
1270
1271	num >>= shift;
1272	/* Reverse order */
1273	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1274	return low4bits;
1275}
1276
1277/*
1278 * Use 4 bits as radix base
1279 * Use 16 u32 counters for calculating new position in buf array
1280 *
1281 * @array     - array that will be sorted
1282 * @array_buf - buffer array to store sorting results
1283 *              must be equal in size to @array
1284 * @num       - array size
1285 */
1286static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1287		       int num)
1288{
1289	u64 max_num;
1290	u64 buf_num;
1291	u32 counters[COUNTERS_SIZE];
1292	u32 new_addr;
1293	u32 addr;
1294	int bitlen;
1295	int shift;
1296	int i;
1297
1298	/*
1299	 * Try avoid useless loop iterations for small numbers stored in big
1300	 * counters.  Example: 48 33 4 ... in 64bit array
1301	 */
1302	max_num = array[0].count;
1303	for (i = 1; i < num; i++) {
1304		buf_num = array[i].count;
1305		if (buf_num > max_num)
1306			max_num = buf_num;
1307	}
1308
1309	buf_num = ilog2(max_num);
1310	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1311
1312	shift = 0;
1313	while (shift < bitlen) {
1314		memset(counters, 0, sizeof(counters));
1315
1316		for (i = 0; i < num; i++) {
1317			buf_num = array[i].count;
1318			addr = get4bits(buf_num, shift);
1319			counters[addr]++;
1320		}
1321
1322		for (i = 1; i < COUNTERS_SIZE; i++)
1323			counters[i] += counters[i - 1];
1324
1325		for (i = num - 1; i >= 0; i--) {
1326			buf_num = array[i].count;
1327			addr = get4bits(buf_num, shift);
1328			counters[addr]--;
1329			new_addr = counters[addr];
1330			array_buf[new_addr] = array[i];
1331		}
1332
1333		shift += RADIX_BASE;
1334
1335		/*
1336		 * Normal radix expects to move data from a temporary array, to
1337		 * the main one.  But that requires some CPU time. Avoid that
1338		 * by doing another sort iteration to original array instead of
1339		 * memcpy()
1340		 */
1341		memset(counters, 0, sizeof(counters));
1342
1343		for (i = 0; i < num; i ++) {
1344			buf_num = array_buf[i].count;
1345			addr = get4bits(buf_num, shift);
1346			counters[addr]++;
1347		}
1348
1349		for (i = 1; i < COUNTERS_SIZE; i++)
1350			counters[i] += counters[i - 1];
1351
1352		for (i = num - 1; i >= 0; i--) {
1353			buf_num = array_buf[i].count;
1354			addr = get4bits(buf_num, shift);
1355			counters[addr]--;
1356			new_addr = counters[addr];
1357			array[new_addr] = array_buf[i];
1358		}
1359
1360		shift += RADIX_BASE;
1361	}
1362}
1363
1364/*
1365 * Size of the core byte set - how many bytes cover 90% of the sample
1366 *
1367 * There are several types of structured binary data that use nearly all byte
1368 * values. The distribution can be uniform and counts in all buckets will be
1369 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1370 *
1371 * Other possibility is normal (Gaussian) distribution, where the data could
1372 * be potentially compressible, but we have to take a few more steps to decide
1373 * how much.
1374 *
1375 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1376 *                       compression algo can easy fix that
1377 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1378 *                       probability is not compressible
1379 */
1380#define BYTE_CORE_SET_LOW		(64)
1381#define BYTE_CORE_SET_HIGH		(200)
1382
1383static int byte_core_set_size(struct heuristic_ws *ws)
1384{
1385	u32 i;
1386	u32 coreset_sum = 0;
1387	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1388	struct bucket_item *bucket = ws->bucket;
1389
1390	/* Sort in reverse order */
1391	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1392
1393	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1394		coreset_sum += bucket[i].count;
1395
1396	if (coreset_sum > core_set_threshold)
1397		return i;
1398
1399	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1400		coreset_sum += bucket[i].count;
1401		if (coreset_sum > core_set_threshold)
1402			break;
1403	}
1404
1405	return i;
1406}
1407
1408/*
1409 * Count byte values in buckets.
1410 * This heuristic can detect textual data (configs, xml, json, html, etc).
1411 * Because in most text-like data byte set is restricted to limited number of
1412 * possible characters, and that restriction in most cases makes data easy to
1413 * compress.
1414 *
1415 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1416 *	less - compressible
1417 *	more - need additional analysis
1418 */
1419#define BYTE_SET_THRESHOLD		(64)
1420
1421static u32 byte_set_size(const struct heuristic_ws *ws)
1422{
1423	u32 i;
1424	u32 byte_set_size = 0;
1425
1426	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1427		if (ws->bucket[i].count > 0)
1428			byte_set_size++;
1429	}
1430
1431	/*
1432	 * Continue collecting count of byte values in buckets.  If the byte
1433	 * set size is bigger then the threshold, it's pointless to continue,
1434	 * the detection technique would fail for this type of data.
1435	 */
1436	for (; i < BUCKET_SIZE; i++) {
1437		if (ws->bucket[i].count > 0) {
1438			byte_set_size++;
1439			if (byte_set_size > BYTE_SET_THRESHOLD)
1440				return byte_set_size;
1441		}
1442	}
1443
1444	return byte_set_size;
1445}
1446
1447static bool sample_repeated_patterns(struct heuristic_ws *ws)
1448{
1449	const u32 half_of_sample = ws->sample_size / 2;
1450	const u8 *data = ws->sample;
1451
1452	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1453}
1454
1455static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1456				     struct heuristic_ws *ws)
1457{
1458	struct page *page;
1459	u64 index, index_end;
1460	u32 i, curr_sample_pos;
1461	u8 *in_data;
1462
1463	/*
1464	 * Compression handles the input data by chunks of 128KiB
1465	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1466	 *
1467	 * We do the same for the heuristic and loop over the whole range.
1468	 *
1469	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1470	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1471	 */
1472	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1473		end = start + BTRFS_MAX_UNCOMPRESSED;
1474
1475	index = start >> PAGE_SHIFT;
1476	index_end = end >> PAGE_SHIFT;
1477
1478	/* Don't miss unaligned end */
1479	if (!IS_ALIGNED(end, PAGE_SIZE))
1480		index_end++;
1481
1482	curr_sample_pos = 0;
1483	while (index < index_end) {
1484		page = find_get_page(inode->i_mapping, index);
1485		in_data = kmap(page);
1486		/* Handle case where the start is not aligned to PAGE_SIZE */
1487		i = start % PAGE_SIZE;
1488		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1489			/* Don't sample any garbage from the last page */
1490			if (start > end - SAMPLING_READ_SIZE)
1491				break;
1492			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1493					SAMPLING_READ_SIZE);
1494			i += SAMPLING_INTERVAL;
1495			start += SAMPLING_INTERVAL;
1496			curr_sample_pos += SAMPLING_READ_SIZE;
1497		}
1498		kunmap(page);
1499		put_page(page);
1500
1501		index++;
1502	}
1503
1504	ws->sample_size = curr_sample_pos;
1505}
1506
1507/*
1508 * Compression heuristic.
1509 *
1510 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1511 * quickly (compared to direct compression) detect data characteristics
1512 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1513 * data.
1514 *
1515 * The following types of analysis can be performed:
1516 * - detect mostly zero data
1517 * - detect data with low "byte set" size (text, etc)
1518 * - detect data with low/high "core byte" set
1519 *
1520 * Return non-zero if the compression should be done, 0 otherwise.
1521 */
1522int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1523{
1524	struct list_head *ws_list = get_workspace(0, 0);
1525	struct heuristic_ws *ws;
1526	u32 i;
1527	u8 byte;
1528	int ret = 0;
1529
1530	ws = list_entry(ws_list, struct heuristic_ws, list);
1531
1532	heuristic_collect_sample(inode, start, end, ws);
1533
1534	if (sample_repeated_patterns(ws)) {
1535		ret = 1;
1536		goto out;
1537	}
1538
1539	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1540
1541	for (i = 0; i < ws->sample_size; i++) {
1542		byte = ws->sample[i];
1543		ws->bucket[byte].count++;
1544	}
1545
1546	i = byte_set_size(ws);
1547	if (i < BYTE_SET_THRESHOLD) {
1548		ret = 2;
1549		goto out;
1550	}
1551
1552	i = byte_core_set_size(ws);
1553	if (i <= BYTE_CORE_SET_LOW) {
1554		ret = 3;
1555		goto out;
1556	}
1557
1558	if (i >= BYTE_CORE_SET_HIGH) {
1559		ret = 0;
1560		goto out;
1561	}
1562
1563	i = shannon_entropy(ws);
1564	if (i <= ENTROPY_LVL_ACEPTABLE) {
1565		ret = 4;
1566		goto out;
1567	}
1568
1569	/*
1570	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1571	 * needed to give green light to compression.
1572	 *
1573	 * For now just assume that compression at that level is not worth the
1574	 * resources because:
1575	 *
1576	 * 1. it is possible to defrag the data later
1577	 *
1578	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1579	 * values, every bucket has counter at level ~54. The heuristic would
1580	 * be confused. This can happen when data have some internal repeated
1581	 * patterns like "abbacbbc...". This can be detected by analyzing
1582	 * pairs of bytes, which is too costly.
1583	 */
1584	if (i < ENTROPY_LVL_HIGH) {
1585		ret = 5;
1586		goto out;
1587	} else {
1588		ret = 0;
1589		goto out;
1590	}
1591
1592out:
1593	put_workspace(0, ws_list);
1594	return ret;
1595}
1596
1597/*
1598 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1599 * level, unrecognized string will set the default level
1600 */
1601unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1602{
1603	unsigned int level = 0;
1604	int ret;
1605
1606	if (!type)
1607		return 0;
1608
1609	if (str[0] == ':') {
1610		ret = kstrtouint(str + 1, 10, &level);
1611		if (ret)
1612			level = 0;
1613	}
1614
1615	level = btrfs_compress_set_level(type, level);
1616
1617	return level;
1618}
1619
1620/*
1621 * Adjust @level according to the limits of the compression algorithm or
1622 * fallback to default
1623 */
1624unsigned int btrfs_compress_set_level(int type, unsigned level)
1625{
1626	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1627
1628	if (level == 0)
1629		level = ops->default_level;
1630	else
1631		level = min(level, ops->max_level);
1632
1633	return level;
1634}