Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
 
   8#include <linux/file.h>
   9#include <linux/fs.h>
  10#include <linux/pagemap.h>
  11#include <linux/pagevec.h>
  12#include <linux/highmem.h>
  13#include <linux/kthread.h>
  14#include <linux/time.h>
  15#include <linux/init.h>
  16#include <linux/string.h>
  17#include <linux/backing-dev.h>
 
 
  18#include <linux/writeback.h>
  19#include <linux/psi.h>
  20#include <linux/slab.h>
  21#include <linux/sched/mm.h>
  22#include <linux/log2.h>
  23#include <linux/shrinker.h>
  24#include <crypto/hash.h>
  25#include "misc.h"
  26#include "ctree.h"
  27#include "fs.h"
  28#include "disk-io.h"
  29#include "transaction.h"
  30#include "btrfs_inode.h"
  31#include "bio.h"
  32#include "ordered-data.h"
  33#include "compression.h"
  34#include "extent_io.h"
  35#include "extent_map.h"
  36#include "subpage.h"
  37#include "zoned.h"
  38#include "file-item.h"
  39#include "super.h"
  40
  41static struct bio_set btrfs_compressed_bioset;
  42
  43static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  44
  45const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  46{
  47	switch (type) {
  48	case BTRFS_COMPRESS_ZLIB:
  49	case BTRFS_COMPRESS_LZO:
  50	case BTRFS_COMPRESS_ZSTD:
  51	case BTRFS_COMPRESS_NONE:
  52		return btrfs_compress_types[type];
  53	default:
  54		break;
  55	}
  56
  57	return NULL;
  58}
  59
  60static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio)
  61{
  62	return container_of(bbio, struct compressed_bio, bbio);
  63}
  64
  65static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
  66						   u64 start, blk_opf_t op,
  67						   btrfs_bio_end_io_t end_io)
  68{
  69	struct btrfs_bio *bbio;
  70
  71	bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
  72					  GFP_NOFS, &btrfs_compressed_bioset));
  73	btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL);
  74	bbio->inode = inode;
  75	bbio->file_offset = start;
  76	return to_compressed_bio(bbio);
  77}
  78
  79bool btrfs_compress_is_valid_type(const char *str, size_t len)
 
 
  80{
  81	int i;
  82
  83	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
  84		size_t comp_len = strlen(btrfs_compress_types[i]);
  85
  86		if (len < comp_len)
  87			continue;
  88
  89		if (!strncmp(btrfs_compress_types[i], str, comp_len))
  90			return true;
  91	}
  92	return false;
  93}
  94
  95static int compression_compress_pages(int type, struct list_head *ws,
  96               struct address_space *mapping, u64 start, struct page **pages,
  97               unsigned long *out_pages, unsigned long *total_in,
  98               unsigned long *total_out)
  99{
 100	switch (type) {
 101	case BTRFS_COMPRESS_ZLIB:
 102		return zlib_compress_pages(ws, mapping, start, pages,
 103				out_pages, total_in, total_out);
 104	case BTRFS_COMPRESS_LZO:
 105		return lzo_compress_pages(ws, mapping, start, pages,
 106				out_pages, total_in, total_out);
 107	case BTRFS_COMPRESS_ZSTD:
 108		return zstd_compress_pages(ws, mapping, start, pages,
 109				out_pages, total_in, total_out);
 110	case BTRFS_COMPRESS_NONE:
 111	default:
 112		/*
 113		 * This can happen when compression races with remount setting
 114		 * it to 'no compress', while caller doesn't call
 115		 * inode_need_compress() to check if we really need to
 116		 * compress.
 117		 *
 118		 * Not a big deal, just need to inform caller that we
 119		 * haven't allocated any pages yet.
 120		 */
 121		*out_pages = 0;
 122		return -E2BIG;
 123	}
 124}
 125
 126static int compression_decompress_bio(struct list_head *ws,
 127				      struct compressed_bio *cb)
 128{
 129	switch (cb->compress_type) {
 130	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
 131	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
 132	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
 133	case BTRFS_COMPRESS_NONE:
 134	default:
 135		/*
 136		 * This can't happen, the type is validated several times
 137		 * before we get here.
 138		 */
 139		BUG();
 140	}
 141}
 142
 143static int compression_decompress(int type, struct list_head *ws,
 144		const u8 *data_in, struct page *dest_page,
 145		unsigned long dest_pgoff, size_t srclen, size_t destlen)
 146{
 147	switch (type) {
 148	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
 149						dest_pgoff, srclen, destlen);
 150	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
 151						dest_pgoff, srclen, destlen);
 152	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
 153						dest_pgoff, srclen, destlen);
 154	case BTRFS_COMPRESS_NONE:
 155	default:
 156		/*
 157		 * This can't happen, the type is validated several times
 158		 * before we get here.
 159		 */
 160		BUG();
 161	}
 
 
 
 162}
 163
 164static void btrfs_free_compressed_pages(struct compressed_bio *cb)
 
 
 
 
 
 
 
 
 
 
 165{
 166	for (unsigned int i = 0; i < cb->nr_pages; i++)
 167		btrfs_free_compr_page(cb->compressed_pages[i]);
 168	kfree(cb->compressed_pages);
 169}
 
 
 170
 171static int btrfs_decompress_bio(struct compressed_bio *cb);
 
 172
 173/*
 174 * Global cache of last unused pages for compression/decompression.
 175 */
 176static struct btrfs_compr_pool {
 177	struct shrinker *shrinker;
 178	spinlock_t lock;
 179	struct list_head list;
 180	int count;
 181	int thresh;
 182} compr_pool;
 183
 184static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc)
 185{
 186	int ret;
 187
 188	/*
 189	 * We must not read the values more than once if 'ret' gets expanded in
 190	 * the return statement so we don't accidentally return a negative
 191	 * number, even if the first condition finds it positive.
 192	 */
 193	ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh);
 194
 195	return ret > 0 ? ret : 0;
 196}
 197
 198static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc)
 199{
 200	struct list_head remove;
 201	struct list_head *tmp, *next;
 202	int freed;
 203
 204	if (compr_pool.count == 0)
 205		return SHRINK_STOP;
 206
 207	INIT_LIST_HEAD(&remove);
 208
 209	/* For now, just simply drain the whole list. */
 210	spin_lock(&compr_pool.lock);
 211	list_splice_init(&compr_pool.list, &remove);
 212	freed = compr_pool.count;
 213	compr_pool.count = 0;
 214	spin_unlock(&compr_pool.lock);
 215
 216	list_for_each_safe(tmp, next, &remove) {
 217		struct page *page = list_entry(tmp, struct page, lru);
 
 
 218
 219		ASSERT(page_ref_count(page) == 1);
 
 
 
 
 
 
 
 
 220		put_page(page);
 221	}
 222
 223	return freed;
 224}
 225
 226/*
 227 * Common wrappers for page allocation from compression wrappers
 228 */
 229struct page *btrfs_alloc_compr_page(void)
 230{
 231	struct page *page = NULL;
 232
 233	spin_lock(&compr_pool.lock);
 234	if (compr_pool.count > 0) {
 235		page = list_first_entry(&compr_pool.list, struct page, lru);
 236		list_del_init(&page->lru);
 237		compr_pool.count--;
 238	}
 239	spin_unlock(&compr_pool.lock);
 240
 241	if (page)
 242		return page;
 243
 244	return alloc_page(GFP_NOFS);
 245}
 246
 247void btrfs_free_compr_page(struct page *page)
 248{
 249	bool do_free = false;
 250
 251	spin_lock(&compr_pool.lock);
 252	if (compr_pool.count > compr_pool.thresh) {
 253		do_free = true;
 254	} else {
 255		list_add(&page->lru, &compr_pool.list);
 256		compr_pool.count++;
 257	}
 258	spin_unlock(&compr_pool.lock);
 259
 260	if (!do_free)
 261		return;
 262
 263	ASSERT(page_ref_count(page) == 1);
 264	put_page(page);
 265}
 266
 267static void end_bbio_comprssed_read(struct btrfs_bio *bbio)
 268{
 269	struct compressed_bio *cb = to_compressed_bio(bbio);
 270	blk_status_t status = bbio->bio.bi_status;
 271
 272	if (!status)
 273		status = errno_to_blk_status(btrfs_decompress_bio(cb));
 274
 275	btrfs_free_compressed_pages(cb);
 276	btrfs_bio_end_io(cb->orig_bbio, status);
 277	bio_put(&bbio->bio);
 
 
 278}
 279
 280/*
 281 * Clear the writeback bits on all of the file
 282 * pages for a compressed write
 283 */
 284static noinline void end_compressed_writeback(const struct compressed_bio *cb)
 
 285{
 286	struct inode *inode = &cb->bbio.inode->vfs_inode;
 287	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 288	unsigned long index = cb->start >> PAGE_SHIFT;
 289	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 290	struct folio_batch fbatch;
 291	const int error = blk_status_to_errno(cb->bbio.bio.bi_status);
 292	int i;
 293	int ret;
 294
 295	if (error)
 296		mapping_set_error(inode->i_mapping, error);
 297
 298	folio_batch_init(&fbatch);
 299	while (index <= end_index) {
 300		ret = filemap_get_folios(inode->i_mapping, &index, end_index,
 301				&fbatch);
 302
 303		if (ret == 0)
 304			return;
 305
 
 
 
 
 
 
 
 
 
 306		for (i = 0; i < ret; i++) {
 307			struct folio *folio = fbatch.folios[i];
 308
 309			btrfs_folio_clamp_clear_writeback(fs_info, folio,
 310							  cb->start, cb->len);
 311		}
 312		folio_batch_release(&fbatch);
 
 313	}
 314	/* the inode may be gone now */
 315}
 316
 317static void btrfs_finish_compressed_write_work(struct work_struct *work)
 318{
 319	struct compressed_bio *cb =
 320		container_of(work, struct compressed_bio, write_end_work);
 321
 322	btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
 323				    cb->bbio.bio.bi_status == BLK_STS_OK);
 324
 325	if (cb->writeback)
 326		end_compressed_writeback(cb);
 327	/* Note, our inode could be gone now */
 328
 329	btrfs_free_compressed_pages(cb);
 330	bio_put(&cb->bbio.bio);
 331}
 332
 333/*
 334 * Do the cleanup once all the compressed pages hit the disk.  This will clear
 335 * writeback on the file pages and free the compressed pages.
 
 336 *
 337 * This also calls the writeback end hooks for the file pages so that metadata
 338 * and checksums can be updated in the file.
 339 */
 340static void end_bbio_comprssed_write(struct btrfs_bio *bbio)
 341{
 342	struct compressed_bio *cb = to_compressed_bio(bbio);
 343	struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
 
 
 
 344
 345	queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
 346}
 347
 348static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb)
 349{
 350	struct bio *bio = &cb->bbio.bio;
 351	u32 offset = 0;
 
 352
 353	while (offset < cb->compressed_len) {
 354		u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 355
 356		/* Maximum compressed extent is smaller than bio size limit. */
 357		__bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT],
 358			       len, 0);
 359		offset += len;
 
 
 
 
 
 360	}
 
 
 
 
 
 
 361}
 362
 363/*
 364 * worker function to build and submit bios for previously compressed pages.
 365 * The corresponding pages in the inode should be marked for writeback
 366 * and the compressed pages should have a reference on them for dropping
 367 * when the IO is complete.
 368 *
 369 * This also checksums the file bytes and gets things ready for
 370 * the end io hooks.
 371 */
 372void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
 373				   struct page **compressed_pages,
 374				   unsigned int nr_pages,
 375				   blk_opf_t write_flags,
 376				   bool writeback)
 
 377{
 378	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
 379	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 380	struct compressed_bio *cb;
 
 
 
 
 
 
 
 
 381
 382	ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
 383	ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
 384
 385	cb = alloc_compressed_bio(inode, ordered->file_offset,
 386				  REQ_OP_WRITE | write_flags,
 387				  end_bbio_comprssed_write);
 388	cb->start = ordered->file_offset;
 389	cb->len = ordered->num_bytes;
 
 
 390	cb->compressed_pages = compressed_pages;
 391	cb->compressed_len = ordered->disk_num_bytes;
 392	cb->writeback = writeback;
 393	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
 394	cb->nr_pages = nr_pages;
 395	cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
 396	cb->bbio.ordered = ordered;
 397	btrfs_add_compressed_bio_pages(cb);
 398
 399	btrfs_submit_bio(&cb->bbio, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400}
 401
 402/*
 403 * Add extra pages in the same compressed file extent so that we don't need to
 404 * re-read the same extent again and again.
 405 *
 406 * NOTE: this won't work well for subpage, as for subpage read, we lock the
 407 * full page then submit bio for each compressed/regular extents.
 408 *
 409 * This means, if we have several sectors in the same page points to the same
 410 * on-disk compressed data, we will re-read the same extent many times and
 411 * this function can only help for the next page.
 412 */
 413static noinline int add_ra_bio_pages(struct inode *inode,
 414				     u64 compressed_end,
 415				     struct compressed_bio *cb,
 416				     int *memstall, unsigned long *pflags)
 417{
 418	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 419	unsigned long end_index;
 420	struct bio *orig_bio = &cb->orig_bbio->bio;
 421	u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
 422	u64 isize = i_size_read(inode);
 423	int ret;
 424	struct page *page;
 
 425	struct extent_map *em;
 426	struct address_space *mapping = inode->i_mapping;
 427	struct extent_map_tree *em_tree;
 428	struct extent_io_tree *tree;
 429	int sectors_missed = 0;
 
 430
 
 431	em_tree = &BTRFS_I(inode)->extent_tree;
 432	tree = &BTRFS_I(inode)->io_tree;
 433
 434	if (isize == 0)
 435		return 0;
 436
 437	/*
 438	 * For current subpage support, we only support 64K page size,
 439	 * which means maximum compressed extent size (128K) is just 2x page
 440	 * size.
 441	 * This makes readahead less effective, so here disable readahead for
 442	 * subpage for now, until full compressed write is supported.
 443	 */
 444	if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
 445		return 0;
 446
 447	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 448
 449	while (cur < compressed_end) {
 450		u64 page_end;
 451		u64 pg_index = cur >> PAGE_SHIFT;
 452		u32 add_size;
 453
 454		if (pg_index > end_index)
 455			break;
 456
 457		page = xa_load(&mapping->i_pages, pg_index);
 458		if (page && !xa_is_value(page)) {
 459			sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
 460					  fs_info->sectorsize_bits;
 461
 462			/* Beyond threshold, no need to continue */
 463			if (sectors_missed > 4)
 464				break;
 465
 466			/*
 467			 * Jump to next page start as we already have page for
 468			 * current offset.
 469			 */
 470			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
 471			continue;
 472		}
 473
 474		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
 475								 ~__GFP_FS));
 476		if (!page)
 477			break;
 478
 479		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
 480			put_page(page);
 481			/* There is already a page, skip to page end */
 482			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
 483			continue;
 484		}
 485
 486		if (!*memstall && PageWorkingset(page)) {
 487			psi_memstall_enter(pflags);
 488			*memstall = 1;
 489		}
 490
 491		ret = set_page_extent_mapped(page);
 492		if (ret < 0) {
 493			unlock_page(page);
 494			put_page(page);
 495			break;
 496		}
 497
 498		page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
 499		lock_extent(tree, cur, page_end, NULL);
 
 
 
 
 
 
 500		read_lock(&em_tree->lock);
 501		em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
 
 502		read_unlock(&em_tree->lock);
 503
 504		/*
 505		 * At this point, we have a locked page in the page cache for
 506		 * these bytes in the file.  But, we have to make sure they map
 507		 * to this compressed extent on disk.
 508		 */
 509		if (!em || cur < em->start ||
 510		    (cur + fs_info->sectorsize > extent_map_end(em)) ||
 511		    (em->block_start >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) {
 512			free_extent_map(em);
 513			unlock_extent(tree, cur, page_end, NULL);
 514			unlock_page(page);
 515			put_page(page);
 516			break;
 517		}
 518		free_extent_map(em);
 519
 520		if (page->index == end_index) {
 521			size_t zero_offset = offset_in_page(isize);
 
 522
 523			if (zero_offset) {
 524				int zeros;
 525				zeros = PAGE_SIZE - zero_offset;
 526				memzero_page(page, zero_offset, zeros);
 
 
 
 527			}
 528		}
 529
 530		add_size = min(em->start + em->len, page_end + 1) - cur;
 531		ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur));
 532		if (ret != add_size) {
 533			unlock_extent(tree, cur, page_end, NULL);
 
 
 
 
 534			unlock_page(page);
 535			put_page(page);
 536			break;
 537		}
 538		/*
 539		 * If it's subpage, we also need to increase its
 540		 * subpage::readers number, as at endio we will decrease
 541		 * subpage::readers and to unlock the page.
 542		 */
 543		if (fs_info->sectorsize < PAGE_SIZE)
 544			btrfs_subpage_start_reader(fs_info, page_folio(page),
 545						   cur, add_size);
 546		put_page(page);
 547		cur += add_size;
 548	}
 549	return 0;
 550}
 551
 552/*
 553 * for a compressed read, the bio we get passed has all the inode pages
 554 * in it.  We don't actually do IO on those pages but allocate new ones
 555 * to hold the compressed pages on disk.
 556 *
 557 * bio->bi_iter.bi_sector points to the compressed extent on disk
 558 * bio->bi_io_vec points to all of the inode pages
 559 *
 560 * After the compressed pages are read, we copy the bytes into the
 561 * bio we were passed and then call the bio end_io calls
 562 */
 563void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
 
 564{
 565	struct btrfs_inode *inode = bbio->inode;
 566	struct btrfs_fs_info *fs_info = inode->root->fs_info;
 567	struct extent_map_tree *em_tree = &inode->extent_tree;
 568	struct compressed_bio *cb;
 569	unsigned int compressed_len;
 570	u64 file_offset = bbio->file_offset;
 
 
 
 
 
 571	u64 em_len;
 572	u64 em_start;
 573	struct extent_map *em;
 574	unsigned long pflags;
 575	int memstall = 0;
 576	blk_status_t ret;
 577	int ret2;
 
 
 578
 579	/* we need the actual starting offset of this extent in the file */
 580	read_lock(&em_tree->lock);
 581	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
 
 
 582	read_unlock(&em_tree->lock);
 583	if (!em) {
 584		ret = BLK_STS_IOERR;
 585		goto out;
 586	}
 587
 588	ASSERT(extent_map_is_compressed(em));
 589	compressed_len = em->block_len;
 
 
 
 590
 591	cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
 592				  end_bbio_comprssed_read);
 
 
 
 593
 594	cb->start = em->orig_start;
 595	em_len = em->len;
 596	em_start = em->start;
 597
 598	cb->len = bbio->bio.bi_iter.bi_size;
 599	cb->compressed_len = compressed_len;
 600	cb->compress_type = extent_map_compression(em);
 601	cb->orig_bbio = bbio;
 602
 603	free_extent_map(em);
 
 604
 605	cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 606	cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
 607	if (!cb->compressed_pages) {
 608		ret = BLK_STS_RESOURCE;
 609		goto out_free_bio;
 610	}
 611
 612	ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages, 0);
 613	if (ret2) {
 614		ret = BLK_STS_RESOURCE;
 615		goto out_free_compressed_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 616	}
 
 
 617
 618	add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
 619			 &pflags);
 620
 621	/* include any pages we added in add_ra-bio_pages */
 622	cb->len = bbio->bio.bi_iter.bi_size;
 623	cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
 624	btrfs_add_compressed_bio_pages(cb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625
 626	if (memstall)
 627		psi_memstall_leave(&pflags);
 
 
 628
 629	btrfs_submit_bio(&cb->bbio, 0);
 630	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 631
 632out_free_compressed_pages:
 633	kfree(cb->compressed_pages);
 634out_free_bio:
 635	bio_put(&cb->bbio.bio);
 636out:
 637	btrfs_bio_end_io(bbio, ret);
 
 638}
 639
 640/*
 641 * Heuristic uses systematic sampling to collect data from the input data
 642 * range, the logic can be tuned by the following constants:
 643 *
 644 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 645 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 646 */
 647#define SAMPLING_READ_SIZE	(16)
 648#define SAMPLING_INTERVAL	(256)
 649
 650/*
 651 * For statistical analysis of the input data we consider bytes that form a
 652 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 653 * many times the object appeared in the sample.
 654 */
 655#define BUCKET_SIZE		(256)
 656
 657/*
 658 * The size of the sample is based on a statistical sampling rule of thumb.
 659 * The common way is to perform sampling tests as long as the number of
 660 * elements in each cell is at least 5.
 661 *
 662 * Instead of 5, we choose 32 to obtain more accurate results.
 663 * If the data contain the maximum number of symbols, which is 256, we obtain a
 664 * sample size bound by 8192.
 665 *
 666 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 667 * from up to 512 locations.
 668 */
 669#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
 670				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 671
 672struct bucket_item {
 673	u32 count;
 674};
 675
 676struct heuristic_ws {
 677	/* Partial copy of input data */
 678	u8 *sample;
 679	u32 sample_size;
 680	/* Buckets store counters for each byte value */
 681	struct bucket_item *bucket;
 682	/* Sorting buffer */
 683	struct bucket_item *bucket_b;
 684	struct list_head list;
 685};
 686
 687static struct workspace_manager heuristic_wsm;
 688
 689static void free_heuristic_ws(struct list_head *ws)
 690{
 691	struct heuristic_ws *workspace;
 692
 693	workspace = list_entry(ws, struct heuristic_ws, list);
 694
 695	kvfree(workspace->sample);
 696	kfree(workspace->bucket);
 697	kfree(workspace->bucket_b);
 698	kfree(workspace);
 699}
 700
 701static struct list_head *alloc_heuristic_ws(unsigned int level)
 702{
 703	struct heuristic_ws *ws;
 704
 705	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 706	if (!ws)
 707		return ERR_PTR(-ENOMEM);
 708
 709	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 710	if (!ws->sample)
 711		goto fail;
 712
 713	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 714	if (!ws->bucket)
 715		goto fail;
 716
 717	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 718	if (!ws->bucket_b)
 719		goto fail;
 720
 721	INIT_LIST_HEAD(&ws->list);
 722	return &ws->list;
 723fail:
 724	free_heuristic_ws(&ws->list);
 725	return ERR_PTR(-ENOMEM);
 726}
 727
 728const struct btrfs_compress_op btrfs_heuristic_compress = {
 729	.workspace_manager = &heuristic_wsm,
 
 
 
 
 
 
 
 730};
 731
 
 
 
 
 732static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 733	/* The heuristic is represented as compression type 0 */
 734	&btrfs_heuristic_compress,
 735	&btrfs_zlib_compress,
 736	&btrfs_lzo_compress,
 737	&btrfs_zstd_compress,
 738};
 739
 740static struct list_head *alloc_workspace(int type, unsigned int level)
 741{
 742	switch (type) {
 743	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
 744	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
 745	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
 746	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
 747	default:
 748		/*
 749		 * This can't happen, the type is validated several times
 750		 * before we get here.
 751		 */
 752		BUG();
 753	}
 754}
 755
 756static void free_workspace(int type, struct list_head *ws)
 757{
 758	switch (type) {
 759	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
 760	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
 761	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
 762	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
 763	default:
 764		/*
 765		 * This can't happen, the type is validated several times
 766		 * before we get here.
 767		 */
 768		BUG();
 769	}
 770}
 771
 772static void btrfs_init_workspace_manager(int type)
 773{
 774	struct workspace_manager *wsm;
 775	struct list_head *workspace;
 
 776
 777	wsm = btrfs_compress_op[type]->workspace_manager;
 778	INIT_LIST_HEAD(&wsm->idle_ws);
 779	spin_lock_init(&wsm->ws_lock);
 780	atomic_set(&wsm->total_ws, 0);
 781	init_waitqueue_head(&wsm->ws_wait);
 782
 783	/*
 784	 * Preallocate one workspace for each compression type so we can
 785	 * guarantee forward progress in the worst case
 786	 */
 787	workspace = alloc_workspace(type, 0);
 788	if (IS_ERR(workspace)) {
 789		pr_warn(
 790	"BTRFS: cannot preallocate compression workspace, will try later\n");
 791	} else {
 792		atomic_set(&wsm->total_ws, 1);
 793		wsm->free_ws = 1;
 794		list_add(workspace, &wsm->idle_ws);
 795	}
 796}
 797
 798static void btrfs_cleanup_workspace_manager(int type)
 799{
 800	struct workspace_manager *wsman;
 801	struct list_head *ws;
 
 802
 803	wsman = btrfs_compress_op[type]->workspace_manager;
 804	while (!list_empty(&wsman->idle_ws)) {
 805		ws = wsman->idle_ws.next;
 806		list_del(ws);
 807		free_workspace(type, ws);
 808		atomic_dec(&wsman->total_ws);
 
 
 
 
 
 
 809	}
 810}
 811
 812/*
 813 * This finds an available workspace or allocates a new one.
 814 * If it's not possible to allocate a new one, waits until there's one.
 815 * Preallocation makes a forward progress guarantees and we do not return
 816 * errors.
 817 */
 818struct list_head *btrfs_get_workspace(int type, unsigned int level)
 819{
 820	struct workspace_manager *wsm;
 821	struct list_head *workspace;
 822	int cpus = num_online_cpus();
 
 823	unsigned nofs_flag;
 824	struct list_head *idle_ws;
 825	spinlock_t *ws_lock;
 826	atomic_t *total_ws;
 827	wait_queue_head_t *ws_wait;
 828	int *free_ws;
 829
 830	wsm = btrfs_compress_op[type]->workspace_manager;
 831	idle_ws	 = &wsm->idle_ws;
 832	ws_lock	 = &wsm->ws_lock;
 833	total_ws = &wsm->total_ws;
 834	ws_wait	 = &wsm->ws_wait;
 835	free_ws	 = &wsm->free_ws;
 
 
 
 
 
 
 
 836
 837again:
 838	spin_lock(ws_lock);
 839	if (!list_empty(idle_ws)) {
 840		workspace = idle_ws->next;
 841		list_del(workspace);
 842		(*free_ws)--;
 843		spin_unlock(ws_lock);
 844		return workspace;
 845
 846	}
 847	if (atomic_read(total_ws) > cpus) {
 848		DEFINE_WAIT(wait);
 849
 850		spin_unlock(ws_lock);
 851		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
 852		if (atomic_read(total_ws) > cpus && !*free_ws)
 853			schedule();
 854		finish_wait(ws_wait, &wait);
 855		goto again;
 856	}
 857	atomic_inc(total_ws);
 858	spin_unlock(ws_lock);
 859
 860	/*
 861	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
 862	 * to turn it off here because we might get called from the restricted
 863	 * context of btrfs_compress_bio/btrfs_compress_pages
 864	 */
 865	nofs_flag = memalloc_nofs_save();
 866	workspace = alloc_workspace(type, level);
 
 
 
 867	memalloc_nofs_restore(nofs_flag);
 868
 869	if (IS_ERR(workspace)) {
 870		atomic_dec(total_ws);
 871		wake_up(ws_wait);
 872
 873		/*
 874		 * Do not return the error but go back to waiting. There's a
 875		 * workspace preallocated for each type and the compression
 876		 * time is bounded so we get to a workspace eventually. This
 877		 * makes our caller's life easier.
 878		 *
 879		 * To prevent silent and low-probability deadlocks (when the
 880		 * initial preallocation fails), check if there are any
 881		 * workspaces at all.
 882		 */
 883		if (atomic_read(total_ws) == 0) {
 884			static DEFINE_RATELIMIT_STATE(_rs,
 885					/* once per minute */ 60 * HZ,
 886					/* no burst */ 1);
 887
 888			if (__ratelimit(&_rs)) {
 889				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
 890			}
 891		}
 892		goto again;
 893	}
 894	return workspace;
 895}
 896
 897static struct list_head *get_workspace(int type, int level)
 898{
 899	switch (type) {
 900	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
 901	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
 902	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
 903	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
 904	default:
 905		/*
 906		 * This can't happen, the type is validated several times
 907		 * before we get here.
 908		 */
 909		BUG();
 910	}
 911}
 912
 913/*
 914 * put a workspace struct back on the list or free it if we have enough
 915 * idle ones sitting around
 916 */
 917void btrfs_put_workspace(int type, struct list_head *ws)
 
 918{
 919	struct workspace_manager *wsm;
 920	struct list_head *idle_ws;
 921	spinlock_t *ws_lock;
 922	atomic_t *total_ws;
 923	wait_queue_head_t *ws_wait;
 924	int *free_ws;
 925
 926	wsm = btrfs_compress_op[type]->workspace_manager;
 927	idle_ws	 = &wsm->idle_ws;
 928	ws_lock	 = &wsm->ws_lock;
 929	total_ws = &wsm->total_ws;
 930	ws_wait	 = &wsm->ws_wait;
 931	free_ws	 = &wsm->free_ws;
 
 
 
 
 
 
 
 932
 933	spin_lock(ws_lock);
 934	if (*free_ws <= num_online_cpus()) {
 935		list_add(ws, idle_ws);
 936		(*free_ws)++;
 937		spin_unlock(ws_lock);
 938		goto wake;
 939	}
 940	spin_unlock(ws_lock);
 941
 942	free_workspace(type, ws);
 
 
 
 943	atomic_dec(total_ws);
 944wake:
 945	cond_wake_up(ws_wait);
 
 
 
 
 
 946}
 947
 948static void put_workspace(int type, struct list_head *ws)
 949{
 950	switch (type) {
 951	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
 952	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
 953	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
 954	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
 955	default:
 956		/*
 957		 * This can't happen, the type is validated several times
 958		 * before we get here.
 959		 */
 960		BUG();
 961	}
 962}
 963
 964/*
 965 * Adjust @level according to the limits of the compression algorithm or
 966 * fallback to default
 967 */
 968static unsigned int btrfs_compress_set_level(int type, unsigned level)
 969{
 970	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
 
 971
 972	if (level == 0)
 973		level = ops->default_level;
 974	else
 975		level = min(level, ops->max_level);
 
 
 976
 977	return level;
 
 
 
 
 
 
 
 978}
 979
 980/*
 981 * Given an address space and start and length, compress the bytes into @pages
 982 * that are allocated on demand.
 983 *
 984 * @type_level is encoded algorithm and level, where level 0 means whatever
 985 * default the algorithm chooses and is opaque here;
 986 * - compression algo are 0-3
 987 * - the level are bits 4-7
 988 *
 989 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
 990 * and returns number of actually allocated pages
 991 *
 992 * @total_in is used to return the number of bytes actually read.  It
 993 * may be smaller than the input length if we had to exit early because we
 994 * ran out of room in the pages array or because we cross the
 995 * max_out threshold.
 996 *
 997 * @total_out is an in/out parameter, must be set to the input length and will
 998 * be also used to return the total number of compressed bytes
 
 
 
 999 */
1000int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1001			 u64 start, struct page **pages,
1002			 unsigned long *out_pages,
1003			 unsigned long *total_in,
1004			 unsigned long *total_out)
1005{
1006	int type = btrfs_compress_type(type_level);
1007	int level = btrfs_compress_level(type_level);
1008	struct list_head *workspace;
1009	int ret;
 
1010
1011	level = btrfs_compress_set_level(type, level);
1012	workspace = get_workspace(type, level);
1013	ret = compression_compress_pages(type, workspace, mapping, start, pages,
1014					 out_pages, total_in, total_out);
1015	put_workspace(type, workspace);
 
 
 
1016	return ret;
1017}
1018
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1019static int btrfs_decompress_bio(struct compressed_bio *cb)
1020{
1021	struct list_head *workspace;
1022	int ret;
1023	int type = cb->compress_type;
1024
1025	workspace = get_workspace(type, 0);
1026	ret = compression_decompress_bio(workspace, cb);
1027	put_workspace(type, workspace);
1028
1029	if (!ret)
1030		zero_fill_bio(&cb->orig_bbio->bio);
1031	return ret;
1032}
1033
1034/*
1035 * a less complex decompression routine.  Our compressed data fits in a
1036 * single page, and we want to read a single page out of it.
1037 * start_byte tells us the offset into the compressed data we're interested in
1038 */
1039int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
1040		     unsigned long dest_pgoff, size_t srclen, size_t destlen)
1041{
1042	struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
1043	struct list_head *workspace;
1044	const u32 sectorsize = fs_info->sectorsize;
1045	int ret;
1046
1047	/*
1048	 * The full destination page range should not exceed the page size.
1049	 * And the @destlen should not exceed sectorsize, as this is only called for
1050	 * inline file extents, which should not exceed sectorsize.
1051	 */
1052	ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
1053
1054	workspace = get_workspace(type, 0);
1055	ret = compression_decompress(type, workspace, data_in, dest_page,
1056				     dest_pgoff, srclen, destlen);
1057	put_workspace(type, workspace);
1058
 
1059	return ret;
1060}
1061
1062int __init btrfs_init_compress(void)
1063{
1064	if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE,
1065			offsetof(struct compressed_bio, bbio.bio),
1066			BIOSET_NEED_BVECS))
1067		return -ENOMEM;
1068
1069	compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, "btrfs-compr-pages");
1070	if (!compr_pool.shrinker)
1071		return -ENOMEM;
1072
1073	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1074	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1075	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1076	zstd_init_workspace_manager();
1077
1078	spin_lock_init(&compr_pool.lock);
1079	INIT_LIST_HEAD(&compr_pool.list);
1080	compr_pool.count = 0;
1081	/* 128K / 4K = 32, for 8 threads is 256 pages. */
1082	compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8;
1083	compr_pool.shrinker->count_objects = btrfs_compr_pool_count;
1084	compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan;
1085	compr_pool.shrinker->batch = 32;
1086	compr_pool.shrinker->seeks = DEFAULT_SEEKS;
1087	shrinker_register(compr_pool.shrinker);
1088
1089	return 0;
1090}
1091
1092void __cold btrfs_exit_compress(void)
1093{
1094	/* For now scan drains all pages and does not touch the parameters. */
1095	btrfs_compr_pool_scan(NULL, NULL);
1096	shrinker_free(compr_pool.shrinker);
1097
1098	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1099	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1100	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1101	zstd_cleanup_workspace_manager();
1102	bioset_exit(&btrfs_compressed_bioset);
1103}
1104
1105/*
1106 * Copy decompressed data from working buffer to pages.
1107 *
1108 * @buf:		The decompressed data buffer
1109 * @buf_len:		The decompressed data length
1110 * @decompressed:	Number of bytes that are already decompressed inside the
1111 * 			compressed extent
1112 * @cb:			The compressed extent descriptor
1113 * @orig_bio:		The original bio that the caller wants to read for
1114 *
1115 * An easier to understand graph is like below:
1116 *
1117 * 		|<- orig_bio ->|     |<- orig_bio->|
1118 * 	|<-------      full decompressed extent      ----->|
1119 * 	|<-----------    @cb range   ---->|
1120 * 	|			|<-- @buf_len -->|
1121 * 	|<--- @decompressed --->|
1122 *
1123 * Note that, @cb can be a subpage of the full decompressed extent, but
1124 * @cb->start always has the same as the orig_file_offset value of the full
1125 * decompressed extent.
1126 *
1127 * When reading compressed extent, we have to read the full compressed extent,
1128 * while @orig_bio may only want part of the range.
1129 * Thus this function will ensure only data covered by @orig_bio will be copied
1130 * to.
1131 *
1132 * Return 0 if we have copied all needed contents for @orig_bio.
1133 * Return >0 if we need continue decompress.
1134 */
1135int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1136			      struct compressed_bio *cb, u32 decompressed)
1137{
1138	struct bio *orig_bio = &cb->orig_bbio->bio;
1139	/* Offset inside the full decompressed extent */
1140	u32 cur_offset;
1141
1142	cur_offset = decompressed;
1143	/* The main loop to do the copy */
1144	while (cur_offset < decompressed + buf_len) {
1145		struct bio_vec bvec;
1146		size_t copy_len;
1147		u32 copy_start;
1148		/* Offset inside the full decompressed extent */
1149		u32 bvec_offset;
1150
1151		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1152		/*
1153		 * cb->start may underflow, but subtracting that value can still
1154		 * give us correct offset inside the full decompressed extent.
1155		 */
1156		bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1157
1158		/* Haven't reached the bvec range, exit */
1159		if (decompressed + buf_len <= bvec_offset)
1160			return 1;
1161
1162		copy_start = max(cur_offset, bvec_offset);
1163		copy_len = min(bvec_offset + bvec.bv_len,
1164			       decompressed + buf_len) - copy_start;
1165		ASSERT(copy_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1166
1167		/*
1168		 * Extra range check to ensure we didn't go beyond
1169		 * @buf + @buf_len.
 
 
1170		 */
1171		ASSERT(copy_start - decompressed < buf_len);
1172		memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1173			       buf + copy_start - decompressed, copy_len);
1174		cur_offset += copy_len;
1175
1176		bio_advance(orig_bio, copy_len);
1177		/* Finished the bio */
1178		if (!orig_bio->bi_iter.bi_size)
1179			return 0;
 
 
 
 
 
 
 
 
 
 
 
1180	}
 
1181	return 1;
1182}
1183
1184/*
1185 * Shannon Entropy calculation
1186 *
1187 * Pure byte distribution analysis fails to determine compressibility of data.
1188 * Try calculating entropy to estimate the average minimum number of bits
1189 * needed to encode the sampled data.
1190 *
1191 * For convenience, return the percentage of needed bits, instead of amount of
1192 * bits directly.
1193 *
1194 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1195 *			    and can be compressible with high probability
1196 *
1197 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1198 *
1199 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1200 */
1201#define ENTROPY_LVL_ACEPTABLE		(65)
1202#define ENTROPY_LVL_HIGH		(80)
1203
1204/*
1205 * For increasead precision in shannon_entropy calculation,
1206 * let's do pow(n, M) to save more digits after comma:
1207 *
1208 * - maximum int bit length is 64
1209 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1210 * - 13 * 4 = 52 < 64		-> M = 4
1211 *
1212 * So use pow(n, 4).
1213 */
1214static inline u32 ilog2_w(u64 n)
1215{
1216	return ilog2(n * n * n * n);
1217}
1218
1219static u32 shannon_entropy(struct heuristic_ws *ws)
1220{
1221	const u32 entropy_max = 8 * ilog2_w(2);
1222	u32 entropy_sum = 0;
1223	u32 p, p_base, sz_base;
1224	u32 i;
1225
1226	sz_base = ilog2_w(ws->sample_size);
1227	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1228		p = ws->bucket[i].count;
1229		p_base = ilog2_w(p);
1230		entropy_sum += p * (sz_base - p_base);
1231	}
1232
1233	entropy_sum /= ws->sample_size;
1234	return entropy_sum * 100 / entropy_max;
1235}
1236
1237#define RADIX_BASE		4U
1238#define COUNTERS_SIZE		(1U << RADIX_BASE)
1239
1240static u8 get4bits(u64 num, int shift) {
1241	u8 low4bits;
1242
1243	num >>= shift;
1244	/* Reverse order */
1245	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1246	return low4bits;
1247}
1248
1249/*
1250 * Use 4 bits as radix base
1251 * Use 16 u32 counters for calculating new position in buf array
1252 *
1253 * @array     - array that will be sorted
1254 * @array_buf - buffer array to store sorting results
1255 *              must be equal in size to @array
1256 * @num       - array size
1257 */
1258static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1259		       int num)
1260{
1261	u64 max_num;
1262	u64 buf_num;
1263	u32 counters[COUNTERS_SIZE];
1264	u32 new_addr;
1265	u32 addr;
1266	int bitlen;
1267	int shift;
1268	int i;
1269
1270	/*
1271	 * Try avoid useless loop iterations for small numbers stored in big
1272	 * counters.  Example: 48 33 4 ... in 64bit array
1273	 */
1274	max_num = array[0].count;
1275	for (i = 1; i < num; i++) {
1276		buf_num = array[i].count;
1277		if (buf_num > max_num)
1278			max_num = buf_num;
1279	}
1280
1281	buf_num = ilog2(max_num);
1282	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1283
1284	shift = 0;
1285	while (shift < bitlen) {
1286		memset(counters, 0, sizeof(counters));
1287
1288		for (i = 0; i < num; i++) {
1289			buf_num = array[i].count;
1290			addr = get4bits(buf_num, shift);
1291			counters[addr]++;
1292		}
1293
1294		for (i = 1; i < COUNTERS_SIZE; i++)
1295			counters[i] += counters[i - 1];
1296
1297		for (i = num - 1; i >= 0; i--) {
1298			buf_num = array[i].count;
1299			addr = get4bits(buf_num, shift);
1300			counters[addr]--;
1301			new_addr = counters[addr];
1302			array_buf[new_addr] = array[i];
1303		}
1304
1305		shift += RADIX_BASE;
1306
1307		/*
1308		 * Normal radix expects to move data from a temporary array, to
1309		 * the main one.  But that requires some CPU time. Avoid that
1310		 * by doing another sort iteration to original array instead of
1311		 * memcpy()
1312		 */
1313		memset(counters, 0, sizeof(counters));
1314
1315		for (i = 0; i < num; i ++) {
1316			buf_num = array_buf[i].count;
1317			addr = get4bits(buf_num, shift);
1318			counters[addr]++;
1319		}
1320
1321		for (i = 1; i < COUNTERS_SIZE; i++)
1322			counters[i] += counters[i - 1];
1323
1324		for (i = num - 1; i >= 0; i--) {
1325			buf_num = array_buf[i].count;
1326			addr = get4bits(buf_num, shift);
1327			counters[addr]--;
1328			new_addr = counters[addr];
1329			array[new_addr] = array_buf[i];
1330		}
1331
1332		shift += RADIX_BASE;
1333	}
1334}
1335
1336/*
1337 * Size of the core byte set - how many bytes cover 90% of the sample
1338 *
1339 * There are several types of structured binary data that use nearly all byte
1340 * values. The distribution can be uniform and counts in all buckets will be
1341 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1342 *
1343 * Other possibility is normal (Gaussian) distribution, where the data could
1344 * be potentially compressible, but we have to take a few more steps to decide
1345 * how much.
1346 *
1347 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1348 *                       compression algo can easy fix that
1349 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1350 *                       probability is not compressible
1351 */
1352#define BYTE_CORE_SET_LOW		(64)
1353#define BYTE_CORE_SET_HIGH		(200)
1354
1355static int byte_core_set_size(struct heuristic_ws *ws)
1356{
1357	u32 i;
1358	u32 coreset_sum = 0;
1359	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1360	struct bucket_item *bucket = ws->bucket;
1361
1362	/* Sort in reverse order */
1363	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1364
1365	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1366		coreset_sum += bucket[i].count;
1367
1368	if (coreset_sum > core_set_threshold)
1369		return i;
1370
1371	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1372		coreset_sum += bucket[i].count;
1373		if (coreset_sum > core_set_threshold)
1374			break;
1375	}
1376
1377	return i;
1378}
1379
1380/*
1381 * Count byte values in buckets.
1382 * This heuristic can detect textual data (configs, xml, json, html, etc).
1383 * Because in most text-like data byte set is restricted to limited number of
1384 * possible characters, and that restriction in most cases makes data easy to
1385 * compress.
1386 *
1387 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1388 *	less - compressible
1389 *	more - need additional analysis
1390 */
1391#define BYTE_SET_THRESHOLD		(64)
1392
1393static u32 byte_set_size(const struct heuristic_ws *ws)
1394{
1395	u32 i;
1396	u32 byte_set_size = 0;
1397
1398	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1399		if (ws->bucket[i].count > 0)
1400			byte_set_size++;
1401	}
1402
1403	/*
1404	 * Continue collecting count of byte values in buckets.  If the byte
1405	 * set size is bigger then the threshold, it's pointless to continue,
1406	 * the detection technique would fail for this type of data.
1407	 */
1408	for (; i < BUCKET_SIZE; i++) {
1409		if (ws->bucket[i].count > 0) {
1410			byte_set_size++;
1411			if (byte_set_size > BYTE_SET_THRESHOLD)
1412				return byte_set_size;
1413		}
1414	}
1415
1416	return byte_set_size;
1417}
1418
1419static bool sample_repeated_patterns(struct heuristic_ws *ws)
1420{
1421	const u32 half_of_sample = ws->sample_size / 2;
1422	const u8 *data = ws->sample;
1423
1424	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1425}
1426
1427static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1428				     struct heuristic_ws *ws)
1429{
1430	struct page *page;
1431	u64 index, index_end;
1432	u32 i, curr_sample_pos;
1433	u8 *in_data;
1434
1435	/*
1436	 * Compression handles the input data by chunks of 128KiB
1437	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1438	 *
1439	 * We do the same for the heuristic and loop over the whole range.
1440	 *
1441	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1442	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1443	 */
1444	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1445		end = start + BTRFS_MAX_UNCOMPRESSED;
1446
1447	index = start >> PAGE_SHIFT;
1448	index_end = end >> PAGE_SHIFT;
1449
1450	/* Don't miss unaligned end */
1451	if (!PAGE_ALIGNED(end))
1452		index_end++;
1453
1454	curr_sample_pos = 0;
1455	while (index < index_end) {
1456		page = find_get_page(inode->i_mapping, index);
1457		in_data = kmap_local_page(page);
1458		/* Handle case where the start is not aligned to PAGE_SIZE */
1459		i = start % PAGE_SIZE;
1460		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1461			/* Don't sample any garbage from the last page */
1462			if (start > end - SAMPLING_READ_SIZE)
1463				break;
1464			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1465					SAMPLING_READ_SIZE);
1466			i += SAMPLING_INTERVAL;
1467			start += SAMPLING_INTERVAL;
1468			curr_sample_pos += SAMPLING_READ_SIZE;
1469		}
1470		kunmap_local(in_data);
1471		put_page(page);
1472
1473		index++;
1474	}
1475
1476	ws->sample_size = curr_sample_pos;
1477}
1478
1479/*
1480 * Compression heuristic.
1481 *
1482 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1483 * quickly (compared to direct compression) detect data characteristics
1484 * (compressible/incompressible) to avoid wasting CPU time on incompressible
1485 * data.
1486 *
1487 * The following types of analysis can be performed:
1488 * - detect mostly zero data
1489 * - detect data with low "byte set" size (text, etc)
1490 * - detect data with low/high "core byte" set
1491 *
1492 * Return non-zero if the compression should be done, 0 otherwise.
1493 */
1494int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1495{
1496	struct list_head *ws_list = get_workspace(0, 0);
1497	struct heuristic_ws *ws;
1498	u32 i;
1499	u8 byte;
1500	int ret = 0;
1501
1502	ws = list_entry(ws_list, struct heuristic_ws, list);
1503
1504	heuristic_collect_sample(inode, start, end, ws);
1505
1506	if (sample_repeated_patterns(ws)) {
1507		ret = 1;
1508		goto out;
1509	}
1510
1511	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1512
1513	for (i = 0; i < ws->sample_size; i++) {
1514		byte = ws->sample[i];
1515		ws->bucket[byte].count++;
1516	}
1517
1518	i = byte_set_size(ws);
1519	if (i < BYTE_SET_THRESHOLD) {
1520		ret = 2;
1521		goto out;
1522	}
1523
1524	i = byte_core_set_size(ws);
1525	if (i <= BYTE_CORE_SET_LOW) {
1526		ret = 3;
1527		goto out;
1528	}
1529
1530	if (i >= BYTE_CORE_SET_HIGH) {
1531		ret = 0;
1532		goto out;
1533	}
1534
1535	i = shannon_entropy(ws);
1536	if (i <= ENTROPY_LVL_ACEPTABLE) {
1537		ret = 4;
1538		goto out;
1539	}
1540
1541	/*
1542	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1543	 * needed to give green light to compression.
1544	 *
1545	 * For now just assume that compression at that level is not worth the
1546	 * resources because:
1547	 *
1548	 * 1. it is possible to defrag the data later
1549	 *
1550	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1551	 * values, every bucket has counter at level ~54. The heuristic would
1552	 * be confused. This can happen when data have some internal repeated
1553	 * patterns like "abbacbbc...". This can be detected by analyzing
1554	 * pairs of bytes, which is too costly.
1555	 */
1556	if (i < ENTROPY_LVL_HIGH) {
1557		ret = 5;
1558		goto out;
1559	} else {
1560		ret = 0;
1561		goto out;
1562	}
1563
1564out:
1565	put_workspace(0, ws_list);
1566	return ret;
1567}
1568
1569/*
1570 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1571 * level, unrecognized string will set the default level
1572 */
1573unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1574{
1575	unsigned int level = 0;
1576	int ret;
1577
1578	if (!type)
1579		return 0;
1580
1581	if (str[0] == ':') {
1582		ret = kstrtouint(str + 1, 10, &level);
1583		if (ret)
1584			level = 0;
1585	}
1586
1587	level = btrfs_compress_set_level(type, level);
1588
1589	return level;
1590}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
   8#include <linux/buffer_head.h>
   9#include <linux/file.h>
  10#include <linux/fs.h>
  11#include <linux/pagemap.h>
 
  12#include <linux/highmem.h>
 
  13#include <linux/time.h>
  14#include <linux/init.h>
  15#include <linux/string.h>
  16#include <linux/backing-dev.h>
  17#include <linux/mpage.h>
  18#include <linux/swap.h>
  19#include <linux/writeback.h>
  20#include <linux/bit_spinlock.h>
  21#include <linux/slab.h>
  22#include <linux/sched/mm.h>
  23#include <linux/log2.h>
 
 
 
  24#include "ctree.h"
 
  25#include "disk-io.h"
  26#include "transaction.h"
  27#include "btrfs_inode.h"
  28#include "volumes.h"
  29#include "ordered-data.h"
  30#include "compression.h"
  31#include "extent_io.h"
  32#include "extent_map.h"
 
 
 
 
 
 
  33
  34static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  35
  36const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  37{
  38	switch (type) {
  39	case BTRFS_COMPRESS_ZLIB:
  40	case BTRFS_COMPRESS_LZO:
  41	case BTRFS_COMPRESS_ZSTD:
  42	case BTRFS_COMPRESS_NONE:
  43		return btrfs_compress_types[type];
 
 
  44	}
  45
  46	return NULL;
  47}
  48
  49static int btrfs_decompress_bio(struct compressed_bio *cb);
 
 
 
  50
  51static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
  52				      unsigned long disk_size)
 
  53{
  54	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  55
  56	return sizeof(struct compressed_bio) +
  57		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
 
 
 
 
  58}
  59
  60static int check_compressed_csum(struct btrfs_inode *inode,
  61				 struct compressed_bio *cb,
  62				 u64 disk_start)
  63{
  64	int ret;
  65	struct page *page;
  66	unsigned long i;
  67	char *kaddr;
  68	u32 csum;
  69	u32 *cb_sum = &cb->sums;
 
 
 
 
 
 
 
  70
  71	if (inode->flags & BTRFS_INODE_NODATASUM)
  72		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73
  74	for (i = 0; i < cb->nr_pages; i++) {
  75		page = cb->compressed_pages[i];
  76		csum = ~(u32)0;
  77
  78		kaddr = kmap_atomic(page);
  79		csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
  80		btrfs_csum_final(csum, (u8 *)&csum);
  81		kunmap_atomic(kaddr);
  82
  83		if (csum != *cb_sum) {
  84			btrfs_print_data_csum_error(inode, disk_start, csum,
  85					*cb_sum, cb->mirror_num);
  86			ret = -EIO;
  87			goto fail;
  88		}
  89		cb_sum++;
  90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  91	}
  92	ret = 0;
  93fail:
  94	return ret;
  95}
  96
  97/* when we finish reading compressed pages from the disk, we
  98 * decompress them and then run the bio end_io routines on the
  99 * decompressed pages (in the inode address space).
 100 *
 101 * This allows the checksumming and other IO error handling routines
 102 * to work normally
 103 *
 104 * The compressed pages are freed here, and it must be run
 105 * in process context
 106 */
 107static void end_compressed_bio_read(struct bio *bio)
 108{
 109	struct compressed_bio *cb = bio->bi_private;
 110	struct inode *inode;
 111	struct page *page;
 112	unsigned long index;
 113	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
 114	int ret = 0;
 115
 116	if (bio->bi_status)
 117		cb->errors = 1;
 118
 119	/* if there are more bios still pending for this compressed
 120	 * extent, just exit
 121	 */
 122	if (!refcount_dec_and_test(&cb->pending_bios))
 123		goto out;
 
 
 
 
 
 
 
 
 
 124
 125	/*
 126	 * Record the correct mirror_num in cb->orig_bio so that
 127	 * read-repair can work properly.
 
 128	 */
 129	ASSERT(btrfs_io_bio(cb->orig_bio));
 130	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
 131	cb->mirror_num = mirror;
 
 132
 133	/*
 134	 * Some IO in this cb have failed, just skip checksum as there
 135	 * is no way it could be correct.
 136	 */
 137	if (cb->errors == 1)
 138		goto csum_failed;
 
 
 
 
 139
 140	inode = cb->inode;
 141	ret = check_compressed_csum(BTRFS_I(inode), cb,
 142				    (u64)bio->bi_iter.bi_sector << 9);
 143	if (ret)
 144		goto csum_failed;
 
 145
 146	/* ok, we're the last bio for this extent, lets start
 147	 * the decompression.
 148	 */
 149	ret = btrfs_decompress_bio(cb);
 150
 151csum_failed:
 152	if (ret)
 153		cb->errors = 1;
 154
 155	/* release the compressed pages */
 156	index = 0;
 157	for (index = 0; index < cb->nr_pages; index++) {
 158		page = cb->compressed_pages[index];
 159		page->mapping = NULL;
 160		put_page(page);
 161	}
 162
 163	/* do io completion on the original bio */
 164	if (cb->errors) {
 165		bio_io_error(cb->orig_bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166	} else {
 167		int i;
 168		struct bio_vec *bvec;
 
 
 
 
 
 169
 170		/*
 171		 * we have verified the checksum already, set page
 172		 * checked so the end_io handlers know about it
 173		 */
 174		ASSERT(!bio_flagged(bio, BIO_CLONED));
 175		bio_for_each_segment_all(bvec, cb->orig_bio, i)
 176			SetPageChecked(bvec->bv_page);
 
 177
 178		bio_endio(cb->orig_bio);
 179	}
 180
 181	/* finally free the cb struct */
 182	kfree(cb->compressed_pages);
 183	kfree(cb);
 184out:
 185	bio_put(bio);
 186}
 187
 188/*
 189 * Clear the writeback bits on all of the file
 190 * pages for a compressed write
 191 */
 192static noinline void end_compressed_writeback(struct inode *inode,
 193					      const struct compressed_bio *cb)
 194{
 
 
 195	unsigned long index = cb->start >> PAGE_SHIFT;
 196	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 197	struct page *pages[16];
 198	unsigned long nr_pages = end_index - index + 1;
 199	int i;
 200	int ret;
 201
 202	if (cb->errors)
 203		mapping_set_error(inode->i_mapping, -EIO);
 
 
 
 
 
 
 
 
 204
 205	while (nr_pages > 0) {
 206		ret = find_get_pages_contig(inode->i_mapping, index,
 207				     min_t(unsigned long,
 208				     nr_pages, ARRAY_SIZE(pages)), pages);
 209		if (ret == 0) {
 210			nr_pages -= 1;
 211			index += 1;
 212			continue;
 213		}
 214		for (i = 0; i < ret; i++) {
 215			if (cb->errors)
 216				SetPageError(pages[i]);
 217			end_page_writeback(pages[i]);
 218			put_page(pages[i]);
 219		}
 220		nr_pages -= ret;
 221		index += ret;
 222	}
 223	/* the inode may be gone now */
 224}
 225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 226/*
 227 * do the cleanup once all the compressed pages hit the disk.
 228 * This will clear writeback on the file pages and free the compressed
 229 * pages.
 230 *
 231 * This also calls the writeback end hooks for the file pages so that
 232 * metadata and checksums can be updated in the file.
 233 */
 234static void end_compressed_bio_write(struct bio *bio)
 235{
 236	struct extent_io_tree *tree;
 237	struct compressed_bio *cb = bio->bi_private;
 238	struct inode *inode;
 239	struct page *page;
 240	unsigned long index;
 241
 242	if (bio->bi_status)
 243		cb->errors = 1;
 244
 245	/* if there are more bios still pending for this compressed
 246	 * extent, just exit
 247	 */
 248	if (!refcount_dec_and_test(&cb->pending_bios))
 249		goto out;
 250
 251	/* ok, we're the last bio for this extent, step one is to
 252	 * call back into the FS and do all the end_io operations
 253	 */
 254	inode = cb->inode;
 255	tree = &BTRFS_I(inode)->io_tree;
 256	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
 257	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
 258					 cb->start,
 259					 cb->start + cb->len - 1,
 260					 NULL,
 261					 bio->bi_status ?
 262					 BLK_STS_OK : BLK_STS_NOTSUPP);
 263	cb->compressed_pages[0]->mapping = NULL;
 264
 265	end_compressed_writeback(inode, cb);
 266	/* note, our inode could be gone now */
 267
 268	/*
 269	 * release the compressed pages, these came from alloc_page and
 270	 * are not attached to the inode at all
 271	 */
 272	index = 0;
 273	for (index = 0; index < cb->nr_pages; index++) {
 274		page = cb->compressed_pages[index];
 275		page->mapping = NULL;
 276		put_page(page);
 277	}
 278
 279	/* finally free the cb struct */
 280	kfree(cb->compressed_pages);
 281	kfree(cb);
 282out:
 283	bio_put(bio);
 284}
 285
 286/*
 287 * worker function to build and submit bios for previously compressed pages.
 288 * The corresponding pages in the inode should be marked for writeback
 289 * and the compressed pages should have a reference on them for dropping
 290 * when the IO is complete.
 291 *
 292 * This also checksums the file bytes and gets things ready for
 293 * the end io hooks.
 294 */
 295blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
 296				 unsigned long len, u64 disk_start,
 297				 unsigned long compressed_len,
 298				 struct page **compressed_pages,
 299				 unsigned long nr_pages,
 300				 unsigned int write_flags)
 301{
 302	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 303	struct bio *bio = NULL;
 304	struct compressed_bio *cb;
 305	unsigned long bytes_left;
 306	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 307	int pg_index = 0;
 308	struct page *page;
 309	u64 first_byte = disk_start;
 310	struct block_device *bdev;
 311	blk_status_t ret;
 312	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 313
 314	WARN_ON(start & ((u64)PAGE_SIZE - 1));
 315	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 316	if (!cb)
 317		return BLK_STS_RESOURCE;
 318	refcount_set(&cb->pending_bios, 0);
 319	cb->errors = 0;
 320	cb->inode = inode;
 321	cb->start = start;
 322	cb->len = len;
 323	cb->mirror_num = 0;
 324	cb->compressed_pages = compressed_pages;
 325	cb->compressed_len = compressed_len;
 326	cb->orig_bio = NULL;
 
 327	cb->nr_pages = nr_pages;
 
 
 
 328
 329	bdev = fs_info->fs_devices->latest_bdev;
 330
 331	bio = btrfs_bio_alloc(bdev, first_byte);
 332	bio->bi_opf = REQ_OP_WRITE | write_flags;
 333	bio->bi_private = cb;
 334	bio->bi_end_io = end_compressed_bio_write;
 335	refcount_set(&cb->pending_bios, 1);
 336
 337	/* create and submit bios for the compressed pages */
 338	bytes_left = compressed_len;
 339	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 340		int submit = 0;
 341
 342		page = compressed_pages[pg_index];
 343		page->mapping = inode->i_mapping;
 344		if (bio->bi_iter.bi_size)
 345			submit = io_tree->ops->merge_bio_hook(page, 0,
 346							   PAGE_SIZE,
 347							   bio, 0);
 348
 349		page->mapping = NULL;
 350		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
 351		    PAGE_SIZE) {
 352			/*
 353			 * inc the count before we submit the bio so
 354			 * we know the end IO handler won't happen before
 355			 * we inc the count.  Otherwise, the cb might get
 356			 * freed before we're done setting it up
 357			 */
 358			refcount_inc(&cb->pending_bios);
 359			ret = btrfs_bio_wq_end_io(fs_info, bio,
 360						  BTRFS_WQ_ENDIO_DATA);
 361			BUG_ON(ret); /* -ENOMEM */
 362
 363			if (!skip_sum) {
 364				ret = btrfs_csum_one_bio(inode, bio, start, 1);
 365				BUG_ON(ret); /* -ENOMEM */
 366			}
 367
 368			ret = btrfs_map_bio(fs_info, bio, 0, 1);
 369			if (ret) {
 370				bio->bi_status = ret;
 371				bio_endio(bio);
 372			}
 373
 374			bio = btrfs_bio_alloc(bdev, first_byte);
 375			bio->bi_opf = REQ_OP_WRITE | write_flags;
 376			bio->bi_private = cb;
 377			bio->bi_end_io = end_compressed_bio_write;
 378			bio_add_page(bio, page, PAGE_SIZE, 0);
 379		}
 380		if (bytes_left < PAGE_SIZE) {
 381			btrfs_info(fs_info,
 382					"bytes left %lu compress len %lu nr %lu",
 383			       bytes_left, cb->compressed_len, cb->nr_pages);
 384		}
 385		bytes_left -= PAGE_SIZE;
 386		first_byte += PAGE_SIZE;
 387		cond_resched();
 388	}
 389
 390	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 391	BUG_ON(ret); /* -ENOMEM */
 392
 393	if (!skip_sum) {
 394		ret = btrfs_csum_one_bio(inode, bio, start, 1);
 395		BUG_ON(ret); /* -ENOMEM */
 396	}
 397
 398	ret = btrfs_map_bio(fs_info, bio, 0, 1);
 399	if (ret) {
 400		bio->bi_status = ret;
 401		bio_endio(bio);
 402	}
 403
 404	return 0;
 405}
 406
 407static u64 bio_end_offset(struct bio *bio)
 408{
 409	struct bio_vec *last = bio_last_bvec_all(bio);
 410
 411	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
 412}
 413
 
 
 
 
 
 
 
 
 
 
 
 414static noinline int add_ra_bio_pages(struct inode *inode,
 415				     u64 compressed_end,
 416				     struct compressed_bio *cb)
 
 417{
 
 418	unsigned long end_index;
 419	unsigned long pg_index;
 420	u64 last_offset;
 421	u64 isize = i_size_read(inode);
 422	int ret;
 423	struct page *page;
 424	unsigned long nr_pages = 0;
 425	struct extent_map *em;
 426	struct address_space *mapping = inode->i_mapping;
 427	struct extent_map_tree *em_tree;
 428	struct extent_io_tree *tree;
 429	u64 end;
 430	int misses = 0;
 431
 432	last_offset = bio_end_offset(cb->orig_bio);
 433	em_tree = &BTRFS_I(inode)->extent_tree;
 434	tree = &BTRFS_I(inode)->io_tree;
 435
 436	if (isize == 0)
 437		return 0;
 438
 
 
 
 
 
 
 
 
 
 
 439	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 440
 441	while (last_offset < compressed_end) {
 442		pg_index = last_offset >> PAGE_SHIFT;
 
 
 443
 444		if (pg_index > end_index)
 445			break;
 446
 447		rcu_read_lock();
 448		page = radix_tree_lookup(&mapping->i_pages, pg_index);
 449		rcu_read_unlock();
 450		if (page && !radix_tree_exceptional_entry(page)) {
 451			misses++;
 452			if (misses > 4)
 
 453				break;
 454			goto next;
 
 
 
 
 
 
 455		}
 456
 457		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
 458								 ~__GFP_FS));
 459		if (!page)
 460			break;
 461
 462		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
 463			put_page(page);
 464			goto next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 465		}
 466
 467		end = last_offset + PAGE_SIZE - 1;
 468		/*
 469		 * at this point, we have a locked page in the page cache
 470		 * for these bytes in the file.  But, we have to make
 471		 * sure they map to this compressed extent on disk.
 472		 */
 473		set_page_extent_mapped(page);
 474		lock_extent(tree, last_offset, end);
 475		read_lock(&em_tree->lock);
 476		em = lookup_extent_mapping(em_tree, last_offset,
 477					   PAGE_SIZE);
 478		read_unlock(&em_tree->lock);
 479
 480		if (!em || last_offset < em->start ||
 481		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
 482		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 
 
 
 
 
 483			free_extent_map(em);
 484			unlock_extent(tree, last_offset, end);
 485			unlock_page(page);
 486			put_page(page);
 487			break;
 488		}
 489		free_extent_map(em);
 490
 491		if (page->index == end_index) {
 492			char *userpage;
 493			size_t zero_offset = isize & (PAGE_SIZE - 1);
 494
 495			if (zero_offset) {
 496				int zeros;
 497				zeros = PAGE_SIZE - zero_offset;
 498				userpage = kmap_atomic(page);
 499				memset(userpage + zero_offset, 0, zeros);
 500				flush_dcache_page(page);
 501				kunmap_atomic(userpage);
 502			}
 503		}
 504
 505		ret = bio_add_page(cb->orig_bio, page,
 506				   PAGE_SIZE, 0);
 507
 508		if (ret == PAGE_SIZE) {
 509			nr_pages++;
 510			put_page(page);
 511		} else {
 512			unlock_extent(tree, last_offset, end);
 513			unlock_page(page);
 514			put_page(page);
 515			break;
 516		}
 517next:
 518		last_offset += PAGE_SIZE;
 
 
 
 
 
 
 
 
 519	}
 520	return 0;
 521}
 522
 523/*
 524 * for a compressed read, the bio we get passed has all the inode pages
 525 * in it.  We don't actually do IO on those pages but allocate new ones
 526 * to hold the compressed pages on disk.
 527 *
 528 * bio->bi_iter.bi_sector points to the compressed extent on disk
 529 * bio->bi_io_vec points to all of the inode pages
 530 *
 531 * After the compressed pages are read, we copy the bytes into the
 532 * bio we were passed and then call the bio end_io calls
 533 */
 534blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 535				 int mirror_num, unsigned long bio_flags)
 536{
 537	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 538	struct extent_io_tree *tree;
 539	struct extent_map_tree *em_tree;
 540	struct compressed_bio *cb;
 541	unsigned long compressed_len;
 542	unsigned long nr_pages;
 543	unsigned long pg_index;
 544	struct page *page;
 545	struct block_device *bdev;
 546	struct bio *comp_bio;
 547	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
 548	u64 em_len;
 549	u64 em_start;
 550	struct extent_map *em;
 551	blk_status_t ret = BLK_STS_RESOURCE;
 552	int faili = 0;
 553	u32 *sums;
 554
 555	tree = &BTRFS_I(inode)->io_tree;
 556	em_tree = &BTRFS_I(inode)->extent_tree;
 557
 558	/* we need the actual starting offset of this extent in the file */
 559	read_lock(&em_tree->lock);
 560	em = lookup_extent_mapping(em_tree,
 561				   page_offset(bio_first_page_all(bio)),
 562				   PAGE_SIZE);
 563	read_unlock(&em_tree->lock);
 564	if (!em)
 565		return BLK_STS_IOERR;
 
 
 566
 
 567	compressed_len = em->block_len;
 568	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 569	if (!cb)
 570		goto out;
 571
 572	refcount_set(&cb->pending_bios, 0);
 573	cb->errors = 0;
 574	cb->inode = inode;
 575	cb->mirror_num = mirror_num;
 576	sums = &cb->sums;
 577
 578	cb->start = em->orig_start;
 579	em_len = em->len;
 580	em_start = em->start;
 581
 
 
 
 
 
 582	free_extent_map(em);
 583	em = NULL;
 584
 585	cb->len = bio->bi_iter.bi_size;
 586	cb->compressed_len = compressed_len;
 587	cb->compress_type = extent_compress_type(bio_flags);
 588	cb->orig_bio = bio;
 
 
 589
 590	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 591	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
 592				       GFP_NOFS);
 593	if (!cb->compressed_pages)
 594		goto fail1;
 595
 596	bdev = fs_info->fs_devices->latest_bdev;
 597
 598	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 599		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 600							      __GFP_HIGHMEM);
 601		if (!cb->compressed_pages[pg_index]) {
 602			faili = pg_index - 1;
 603			ret = BLK_STS_RESOURCE;
 604			goto fail2;
 605		}
 606	}
 607	faili = nr_pages - 1;
 608	cb->nr_pages = nr_pages;
 609
 610	add_ra_bio_pages(inode, em_start + em_len, cb);
 
 611
 612	/* include any pages we added in add_ra-bio_pages */
 613	cb->len = bio->bi_iter.bi_size;
 614
 615	comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
 616	bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
 617	comp_bio->bi_private = cb;
 618	comp_bio->bi_end_io = end_compressed_bio_read;
 619	refcount_set(&cb->pending_bios, 1);
 620
 621	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 622		int submit = 0;
 623
 624		page = cb->compressed_pages[pg_index];
 625		page->mapping = inode->i_mapping;
 626		page->index = em_start >> PAGE_SHIFT;
 627
 628		if (comp_bio->bi_iter.bi_size)
 629			submit = tree->ops->merge_bio_hook(page, 0,
 630							PAGE_SIZE,
 631							comp_bio, 0);
 632
 633		page->mapping = NULL;
 634		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
 635		    PAGE_SIZE) {
 636			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
 637						  BTRFS_WQ_ENDIO_DATA);
 638			BUG_ON(ret); /* -ENOMEM */
 639
 640			/*
 641			 * inc the count before we submit the bio so
 642			 * we know the end IO handler won't happen before
 643			 * we inc the count.  Otherwise, the cb might get
 644			 * freed before we're done setting it up
 645			 */
 646			refcount_inc(&cb->pending_bios);
 647
 648			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 649				ret = btrfs_lookup_bio_sums(inode, comp_bio,
 650							    sums);
 651				BUG_ON(ret); /* -ENOMEM */
 652			}
 653			sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
 654					     fs_info->sectorsize);
 655
 656			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 657			if (ret) {
 658				comp_bio->bi_status = ret;
 659				bio_endio(comp_bio);
 660			}
 661
 662			comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
 663			bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
 664			comp_bio->bi_private = cb;
 665			comp_bio->bi_end_io = end_compressed_bio_read;
 666
 667			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
 668		}
 669		cur_disk_byte += PAGE_SIZE;
 670	}
 671
 672	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
 673	BUG_ON(ret); /* -ENOMEM */
 674
 675	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 676		ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 677		BUG_ON(ret); /* -ENOMEM */
 678	}
 679
 680	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 681	if (ret) {
 682		comp_bio->bi_status = ret;
 683		bio_endio(comp_bio);
 684	}
 685
 686	return 0;
 687
 688fail2:
 689	while (faili >= 0) {
 690		__free_page(cb->compressed_pages[faili]);
 691		faili--;
 692	}
 693
 
 694	kfree(cb->compressed_pages);
 695fail1:
 696	kfree(cb);
 697out:
 698	free_extent_map(em);
 699	return ret;
 700}
 701
 702/*
 703 * Heuristic uses systematic sampling to collect data from the input data
 704 * range, the logic can be tuned by the following constants:
 705 *
 706 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 707 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 708 */
 709#define SAMPLING_READ_SIZE	(16)
 710#define SAMPLING_INTERVAL	(256)
 711
 712/*
 713 * For statistical analysis of the input data we consider bytes that form a
 714 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 715 * many times the object appeared in the sample.
 716 */
 717#define BUCKET_SIZE		(256)
 718
 719/*
 720 * The size of the sample is based on a statistical sampling rule of thumb.
 721 * The common way is to perform sampling tests as long as the number of
 722 * elements in each cell is at least 5.
 723 *
 724 * Instead of 5, we choose 32 to obtain more accurate results.
 725 * If the data contain the maximum number of symbols, which is 256, we obtain a
 726 * sample size bound by 8192.
 727 *
 728 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 729 * from up to 512 locations.
 730 */
 731#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
 732				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 733
 734struct bucket_item {
 735	u32 count;
 736};
 737
 738struct heuristic_ws {
 739	/* Partial copy of input data */
 740	u8 *sample;
 741	u32 sample_size;
 742	/* Buckets store counters for each byte value */
 743	struct bucket_item *bucket;
 744	/* Sorting buffer */
 745	struct bucket_item *bucket_b;
 746	struct list_head list;
 747};
 748
 
 
 749static void free_heuristic_ws(struct list_head *ws)
 750{
 751	struct heuristic_ws *workspace;
 752
 753	workspace = list_entry(ws, struct heuristic_ws, list);
 754
 755	kvfree(workspace->sample);
 756	kfree(workspace->bucket);
 757	kfree(workspace->bucket_b);
 758	kfree(workspace);
 759}
 760
 761static struct list_head *alloc_heuristic_ws(void)
 762{
 763	struct heuristic_ws *ws;
 764
 765	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 766	if (!ws)
 767		return ERR_PTR(-ENOMEM);
 768
 769	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 770	if (!ws->sample)
 771		goto fail;
 772
 773	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 774	if (!ws->bucket)
 775		goto fail;
 776
 777	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 778	if (!ws->bucket_b)
 779		goto fail;
 780
 781	INIT_LIST_HEAD(&ws->list);
 782	return &ws->list;
 783fail:
 784	free_heuristic_ws(&ws->list);
 785	return ERR_PTR(-ENOMEM);
 786}
 787
 788struct workspaces_list {
 789	struct list_head idle_ws;
 790	spinlock_t ws_lock;
 791	/* Number of free workspaces */
 792	int free_ws;
 793	/* Total number of allocated workspaces */
 794	atomic_t total_ws;
 795	/* Waiters for a free workspace */
 796	wait_queue_head_t ws_wait;
 797};
 798
 799static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
 800
 801static struct workspaces_list btrfs_heuristic_ws;
 802
 803static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 
 
 804	&btrfs_zlib_compress,
 805	&btrfs_lzo_compress,
 806	&btrfs_zstd_compress,
 807};
 808
 809void __init btrfs_init_compress(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 810{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 811	struct list_head *workspace;
 812	int i;
 813
 814	INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
 815	spin_lock_init(&btrfs_heuristic_ws.ws_lock);
 816	atomic_set(&btrfs_heuristic_ws.total_ws, 0);
 817	init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
 
 818
 819	workspace = alloc_heuristic_ws();
 
 
 
 
 820	if (IS_ERR(workspace)) {
 821		pr_warn(
 822	"BTRFS: cannot preallocate heuristic workspace, will try later\n");
 823	} else {
 824		atomic_set(&btrfs_heuristic_ws.total_ws, 1);
 825		btrfs_heuristic_ws.free_ws = 1;
 826		list_add(workspace, &btrfs_heuristic_ws.idle_ws);
 827	}
 
 828
 829	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
 830		INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
 831		spin_lock_init(&btrfs_comp_ws[i].ws_lock);
 832		atomic_set(&btrfs_comp_ws[i].total_ws, 0);
 833		init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
 834
 835		/*
 836		 * Preallocate one workspace for each compression type so
 837		 * we can guarantee forward progress in the worst case
 838		 */
 839		workspace = btrfs_compress_op[i]->alloc_workspace();
 840		if (IS_ERR(workspace)) {
 841			pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
 842		} else {
 843			atomic_set(&btrfs_comp_ws[i].total_ws, 1);
 844			btrfs_comp_ws[i].free_ws = 1;
 845			list_add(workspace, &btrfs_comp_ws[i].idle_ws);
 846		}
 847	}
 848}
 849
 850/*
 851 * This finds an available workspace or allocates a new one.
 852 * If it's not possible to allocate a new one, waits until there's one.
 853 * Preallocation makes a forward progress guarantees and we do not return
 854 * errors.
 855 */
 856static struct list_head *__find_workspace(int type, bool heuristic)
 857{
 
 858	struct list_head *workspace;
 859	int cpus = num_online_cpus();
 860	int idx = type - 1;
 861	unsigned nofs_flag;
 862	struct list_head *idle_ws;
 863	spinlock_t *ws_lock;
 864	atomic_t *total_ws;
 865	wait_queue_head_t *ws_wait;
 866	int *free_ws;
 867
 868	if (heuristic) {
 869		idle_ws	 = &btrfs_heuristic_ws.idle_ws;
 870		ws_lock	 = &btrfs_heuristic_ws.ws_lock;
 871		total_ws = &btrfs_heuristic_ws.total_ws;
 872		ws_wait	 = &btrfs_heuristic_ws.ws_wait;
 873		free_ws	 = &btrfs_heuristic_ws.free_ws;
 874	} else {
 875		idle_ws	 = &btrfs_comp_ws[idx].idle_ws;
 876		ws_lock	 = &btrfs_comp_ws[idx].ws_lock;
 877		total_ws = &btrfs_comp_ws[idx].total_ws;
 878		ws_wait	 = &btrfs_comp_ws[idx].ws_wait;
 879		free_ws	 = &btrfs_comp_ws[idx].free_ws;
 880	}
 881
 882again:
 883	spin_lock(ws_lock);
 884	if (!list_empty(idle_ws)) {
 885		workspace = idle_ws->next;
 886		list_del(workspace);
 887		(*free_ws)--;
 888		spin_unlock(ws_lock);
 889		return workspace;
 890
 891	}
 892	if (atomic_read(total_ws) > cpus) {
 893		DEFINE_WAIT(wait);
 894
 895		spin_unlock(ws_lock);
 896		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
 897		if (atomic_read(total_ws) > cpus && !*free_ws)
 898			schedule();
 899		finish_wait(ws_wait, &wait);
 900		goto again;
 901	}
 902	atomic_inc(total_ws);
 903	spin_unlock(ws_lock);
 904
 905	/*
 906	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
 907	 * to turn it off here because we might get called from the restricted
 908	 * context of btrfs_compress_bio/btrfs_compress_pages
 909	 */
 910	nofs_flag = memalloc_nofs_save();
 911	if (heuristic)
 912		workspace = alloc_heuristic_ws();
 913	else
 914		workspace = btrfs_compress_op[idx]->alloc_workspace();
 915	memalloc_nofs_restore(nofs_flag);
 916
 917	if (IS_ERR(workspace)) {
 918		atomic_dec(total_ws);
 919		wake_up(ws_wait);
 920
 921		/*
 922		 * Do not return the error but go back to waiting. There's a
 923		 * workspace preallocated for each type and the compression
 924		 * time is bounded so we get to a workspace eventually. This
 925		 * makes our caller's life easier.
 926		 *
 927		 * To prevent silent and low-probability deadlocks (when the
 928		 * initial preallocation fails), check if there are any
 929		 * workspaces at all.
 930		 */
 931		if (atomic_read(total_ws) == 0) {
 932			static DEFINE_RATELIMIT_STATE(_rs,
 933					/* once per minute */ 60 * HZ,
 934					/* no burst */ 1);
 935
 936			if (__ratelimit(&_rs)) {
 937				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
 938			}
 939		}
 940		goto again;
 941	}
 942	return workspace;
 943}
 944
 945static struct list_head *find_workspace(int type)
 946{
 947	return __find_workspace(type, false);
 
 
 
 
 
 
 
 
 
 
 
 948}
 949
 950/*
 951 * put a workspace struct back on the list or free it if we have enough
 952 * idle ones sitting around
 953 */
 954static void __free_workspace(int type, struct list_head *workspace,
 955			     bool heuristic)
 956{
 957	int idx = type - 1;
 958	struct list_head *idle_ws;
 959	spinlock_t *ws_lock;
 960	atomic_t *total_ws;
 961	wait_queue_head_t *ws_wait;
 962	int *free_ws;
 963
 964	if (heuristic) {
 965		idle_ws	 = &btrfs_heuristic_ws.idle_ws;
 966		ws_lock	 = &btrfs_heuristic_ws.ws_lock;
 967		total_ws = &btrfs_heuristic_ws.total_ws;
 968		ws_wait	 = &btrfs_heuristic_ws.ws_wait;
 969		free_ws	 = &btrfs_heuristic_ws.free_ws;
 970	} else {
 971		idle_ws	 = &btrfs_comp_ws[idx].idle_ws;
 972		ws_lock	 = &btrfs_comp_ws[idx].ws_lock;
 973		total_ws = &btrfs_comp_ws[idx].total_ws;
 974		ws_wait	 = &btrfs_comp_ws[idx].ws_wait;
 975		free_ws	 = &btrfs_comp_ws[idx].free_ws;
 976	}
 977
 978	spin_lock(ws_lock);
 979	if (*free_ws <= num_online_cpus()) {
 980		list_add(workspace, idle_ws);
 981		(*free_ws)++;
 982		spin_unlock(ws_lock);
 983		goto wake;
 984	}
 985	spin_unlock(ws_lock);
 986
 987	if (heuristic)
 988		free_heuristic_ws(workspace);
 989	else
 990		btrfs_compress_op[idx]->free_workspace(workspace);
 991	atomic_dec(total_ws);
 992wake:
 993	/*
 994	 * Make sure counter is updated before we wake up waiters.
 995	 */
 996	smp_mb();
 997	if (waitqueue_active(ws_wait))
 998		wake_up(ws_wait);
 999}
1000
1001static void free_workspace(int type, struct list_head *ws)
1002{
1003	return __free_workspace(type, ws, false);
 
 
 
 
 
 
 
 
 
 
 
1004}
1005
1006/*
1007 * cleanup function for module exit
 
1008 */
1009static void free_workspaces(void)
1010{
1011	struct list_head *workspace;
1012	int i;
1013
1014	while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
1015		workspace = btrfs_heuristic_ws.idle_ws.next;
1016		list_del(workspace);
1017		free_heuristic_ws(workspace);
1018		atomic_dec(&btrfs_heuristic_ws.total_ws);
1019	}
1020
1021	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
1022		while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
1023			workspace = btrfs_comp_ws[i].idle_ws.next;
1024			list_del(workspace);
1025			btrfs_compress_op[i]->free_workspace(workspace);
1026			atomic_dec(&btrfs_comp_ws[i].total_ws);
1027		}
1028	}
1029}
1030
1031/*
1032 * Given an address space and start and length, compress the bytes into @pages
1033 * that are allocated on demand.
1034 *
1035 * @type_level is encoded algorithm and level, where level 0 means whatever
1036 * default the algorithm chooses and is opaque here;
1037 * - compression algo are 0-3
1038 * - the level are bits 4-7
1039 *
1040 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1041 * and returns number of actually allocated pages
1042 *
1043 * @total_in is used to return the number of bytes actually read.  It
1044 * may be smaller than the input length if we had to exit early because we
1045 * ran out of room in the pages array or because we cross the
1046 * max_out threshold.
1047 *
1048 * @total_out is an in/out parameter, must be set to the input length and will
1049 * be also used to return the total number of compressed bytes
1050 *
1051 * @max_out tells us the max number of bytes that we're allowed to
1052 * stuff into pages
1053 */
1054int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1055			 u64 start, struct page **pages,
1056			 unsigned long *out_pages,
1057			 unsigned long *total_in,
1058			 unsigned long *total_out)
1059{
 
 
1060	struct list_head *workspace;
1061	int ret;
1062	int type = type_level & 0xF;
1063
1064	workspace = find_workspace(type);
1065
1066	btrfs_compress_op[type - 1]->set_level(workspace, type_level);
1067	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
1068						      start, pages,
1069						      out_pages,
1070						      total_in, total_out);
1071	free_workspace(type, workspace);
1072	return ret;
1073}
1074
1075/*
1076 * pages_in is an array of pages with compressed data.
1077 *
1078 * disk_start is the starting logical offset of this array in the file
1079 *
1080 * orig_bio contains the pages from the file that we want to decompress into
1081 *
1082 * srclen is the number of bytes in pages_in
1083 *
1084 * The basic idea is that we have a bio that was created by readpages.
1085 * The pages in the bio are for the uncompressed data, and they may not
1086 * be contiguous.  They all correspond to the range of bytes covered by
1087 * the compressed extent.
1088 */
1089static int btrfs_decompress_bio(struct compressed_bio *cb)
1090{
1091	struct list_head *workspace;
1092	int ret;
1093	int type = cb->compress_type;
1094
1095	workspace = find_workspace(type);
1096	ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
1097	free_workspace(type, workspace);
1098
 
 
1099	return ret;
1100}
1101
1102/*
1103 * a less complex decompression routine.  Our compressed data fits in a
1104 * single page, and we want to read a single page out of it.
1105 * start_byte tells us the offset into the compressed data we're interested in
1106 */
1107int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1108		     unsigned long start_byte, size_t srclen, size_t destlen)
1109{
 
1110	struct list_head *workspace;
 
1111	int ret;
1112
1113	workspace = find_workspace(type);
 
 
 
 
 
1114
1115	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
1116						  dest_page, start_byte,
1117						  srclen, destlen);
 
1118
1119	free_workspace(type, workspace);
1120	return ret;
1121}
1122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1123void __cold btrfs_exit_compress(void)
1124{
1125	free_workspaces();
 
 
 
 
 
 
 
 
1126}
1127
1128/*
1129 * Copy uncompressed data from working buffer to pages.
1130 *
1131 * buf_start is the byte offset we're of the start of our workspace buffer.
1132 *
1133 * total_out is the last byte of the buffer
1134 */
1135int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1136			      unsigned long total_out, u64 disk_start,
1137			      struct bio *bio)
1138{
1139	unsigned long buf_offset;
1140	unsigned long current_buf_start;
1141	unsigned long start_byte;
1142	unsigned long prev_start_byte;
1143	unsigned long working_bytes = total_out - buf_start;
1144	unsigned long bytes;
1145	char *kaddr;
1146	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147
1148	/*
1149	 * start byte is the first byte of the page we're currently
1150	 * copying into relative to the start of the compressed data.
1151	 */
1152	start_byte = page_offset(bvec.bv_page) - disk_start;
 
1153
1154	/* we haven't yet hit data corresponding to this page */
1155	if (total_out <= start_byte)
1156		return 1;
1157
1158	/*
1159	 * the start of the data we care about is offset into
1160	 * the middle of our working buffer
1161	 */
1162	if (total_out > start_byte && buf_start < start_byte) {
1163		buf_offset = start_byte - buf_start;
1164		working_bytes -= buf_offset;
1165	} else {
1166		buf_offset = 0;
1167	}
1168	current_buf_start = buf_start;
1169
1170	/* copy bytes from the working buffer into the pages */
1171	while (working_bytes > 0) {
1172		bytes = min_t(unsigned long, bvec.bv_len,
1173				PAGE_SIZE - buf_offset);
1174		bytes = min(bytes, working_bytes);
1175
1176		kaddr = kmap_atomic(bvec.bv_page);
1177		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1178		kunmap_atomic(kaddr);
1179		flush_dcache_page(bvec.bv_page);
1180
1181		buf_offset += bytes;
1182		working_bytes -= bytes;
1183		current_buf_start += bytes;
1184
1185		/* check if we need to pick another page */
1186		bio_advance(bio, bytes);
1187		if (!bio->bi_iter.bi_size)
1188			return 0;
1189		bvec = bio_iter_iovec(bio, bio->bi_iter);
1190		prev_start_byte = start_byte;
1191		start_byte = page_offset(bvec.bv_page) - disk_start;
1192
1193		/*
1194		 * We need to make sure we're only adjusting
1195		 * our offset into compression working buffer when
1196		 * we're switching pages.  Otherwise we can incorrectly
1197		 * keep copying when we were actually done.
1198		 */
1199		if (start_byte != prev_start_byte) {
1200			/*
1201			 * make sure our new page is covered by this
1202			 * working buffer
1203			 */
1204			if (total_out <= start_byte)
1205				return 1;
1206
1207			/*
1208			 * the next page in the biovec might not be adjacent
1209			 * to the last page, but it might still be found
1210			 * inside this working buffer. bump our offset pointer
1211			 */
1212			if (total_out > start_byte &&
1213			    current_buf_start < start_byte) {
1214				buf_offset = start_byte - buf_start;
1215				working_bytes = total_out - start_byte;
1216				current_buf_start = buf_start + buf_offset;
1217			}
1218		}
1219	}
1220
1221	return 1;
1222}
1223
1224/*
1225 * Shannon Entropy calculation
1226 *
1227 * Pure byte distribution analysis fails to determine compressiability of data.
1228 * Try calculating entropy to estimate the average minimum number of bits
1229 * needed to encode the sampled data.
1230 *
1231 * For convenience, return the percentage of needed bits, instead of amount of
1232 * bits directly.
1233 *
1234 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1235 *			    and can be compressible with high probability
1236 *
1237 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1238 *
1239 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1240 */
1241#define ENTROPY_LVL_ACEPTABLE		(65)
1242#define ENTROPY_LVL_HIGH		(80)
1243
1244/*
1245 * For increasead precision in shannon_entropy calculation,
1246 * let's do pow(n, M) to save more digits after comma:
1247 *
1248 * - maximum int bit length is 64
1249 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1250 * - 13 * 4 = 52 < 64		-> M = 4
1251 *
1252 * So use pow(n, 4).
1253 */
1254static inline u32 ilog2_w(u64 n)
1255{
1256	return ilog2(n * n * n * n);
1257}
1258
1259static u32 shannon_entropy(struct heuristic_ws *ws)
1260{
1261	const u32 entropy_max = 8 * ilog2_w(2);
1262	u32 entropy_sum = 0;
1263	u32 p, p_base, sz_base;
1264	u32 i;
1265
1266	sz_base = ilog2_w(ws->sample_size);
1267	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1268		p = ws->bucket[i].count;
1269		p_base = ilog2_w(p);
1270		entropy_sum += p * (sz_base - p_base);
1271	}
1272
1273	entropy_sum /= ws->sample_size;
1274	return entropy_sum * 100 / entropy_max;
1275}
1276
1277#define RADIX_BASE		4U
1278#define COUNTERS_SIZE		(1U << RADIX_BASE)
1279
1280static u8 get4bits(u64 num, int shift) {
1281	u8 low4bits;
1282
1283	num >>= shift;
1284	/* Reverse order */
1285	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1286	return low4bits;
1287}
1288
1289/*
1290 * Use 4 bits as radix base
1291 * Use 16 u32 counters for calculating new possition in buf array
1292 *
1293 * @array     - array that will be sorted
1294 * @array_buf - buffer array to store sorting results
1295 *              must be equal in size to @array
1296 * @num       - array size
1297 */
1298static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1299		       int num)
1300{
1301	u64 max_num;
1302	u64 buf_num;
1303	u32 counters[COUNTERS_SIZE];
1304	u32 new_addr;
1305	u32 addr;
1306	int bitlen;
1307	int shift;
1308	int i;
1309
1310	/*
1311	 * Try avoid useless loop iterations for small numbers stored in big
1312	 * counters.  Example: 48 33 4 ... in 64bit array
1313	 */
1314	max_num = array[0].count;
1315	for (i = 1; i < num; i++) {
1316		buf_num = array[i].count;
1317		if (buf_num > max_num)
1318			max_num = buf_num;
1319	}
1320
1321	buf_num = ilog2(max_num);
1322	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1323
1324	shift = 0;
1325	while (shift < bitlen) {
1326		memset(counters, 0, sizeof(counters));
1327
1328		for (i = 0; i < num; i++) {
1329			buf_num = array[i].count;
1330			addr = get4bits(buf_num, shift);
1331			counters[addr]++;
1332		}
1333
1334		for (i = 1; i < COUNTERS_SIZE; i++)
1335			counters[i] += counters[i - 1];
1336
1337		for (i = num - 1; i >= 0; i--) {
1338			buf_num = array[i].count;
1339			addr = get4bits(buf_num, shift);
1340			counters[addr]--;
1341			new_addr = counters[addr];
1342			array_buf[new_addr] = array[i];
1343		}
1344
1345		shift += RADIX_BASE;
1346
1347		/*
1348		 * Normal radix expects to move data from a temporary array, to
1349		 * the main one.  But that requires some CPU time. Avoid that
1350		 * by doing another sort iteration to original array instead of
1351		 * memcpy()
1352		 */
1353		memset(counters, 0, sizeof(counters));
1354
1355		for (i = 0; i < num; i ++) {
1356			buf_num = array_buf[i].count;
1357			addr = get4bits(buf_num, shift);
1358			counters[addr]++;
1359		}
1360
1361		for (i = 1; i < COUNTERS_SIZE; i++)
1362			counters[i] += counters[i - 1];
1363
1364		for (i = num - 1; i >= 0; i--) {
1365			buf_num = array_buf[i].count;
1366			addr = get4bits(buf_num, shift);
1367			counters[addr]--;
1368			new_addr = counters[addr];
1369			array[new_addr] = array_buf[i];
1370		}
1371
1372		shift += RADIX_BASE;
1373	}
1374}
1375
1376/*
1377 * Size of the core byte set - how many bytes cover 90% of the sample
1378 *
1379 * There are several types of structured binary data that use nearly all byte
1380 * values. The distribution can be uniform and counts in all buckets will be
1381 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1382 *
1383 * Other possibility is normal (Gaussian) distribution, where the data could
1384 * be potentially compressible, but we have to take a few more steps to decide
1385 * how much.
1386 *
1387 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1388 *                       compression algo can easy fix that
1389 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1390 *                       probability is not compressible
1391 */
1392#define BYTE_CORE_SET_LOW		(64)
1393#define BYTE_CORE_SET_HIGH		(200)
1394
1395static int byte_core_set_size(struct heuristic_ws *ws)
1396{
1397	u32 i;
1398	u32 coreset_sum = 0;
1399	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1400	struct bucket_item *bucket = ws->bucket;
1401
1402	/* Sort in reverse order */
1403	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1404
1405	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1406		coreset_sum += bucket[i].count;
1407
1408	if (coreset_sum > core_set_threshold)
1409		return i;
1410
1411	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1412		coreset_sum += bucket[i].count;
1413		if (coreset_sum > core_set_threshold)
1414			break;
1415	}
1416
1417	return i;
1418}
1419
1420/*
1421 * Count byte values in buckets.
1422 * This heuristic can detect textual data (configs, xml, json, html, etc).
1423 * Because in most text-like data byte set is restricted to limited number of
1424 * possible characters, and that restriction in most cases makes data easy to
1425 * compress.
1426 *
1427 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1428 *	less - compressible
1429 *	more - need additional analysis
1430 */
1431#define BYTE_SET_THRESHOLD		(64)
1432
1433static u32 byte_set_size(const struct heuristic_ws *ws)
1434{
1435	u32 i;
1436	u32 byte_set_size = 0;
1437
1438	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1439		if (ws->bucket[i].count > 0)
1440			byte_set_size++;
1441	}
1442
1443	/*
1444	 * Continue collecting count of byte values in buckets.  If the byte
1445	 * set size is bigger then the threshold, it's pointless to continue,
1446	 * the detection technique would fail for this type of data.
1447	 */
1448	for (; i < BUCKET_SIZE; i++) {
1449		if (ws->bucket[i].count > 0) {
1450			byte_set_size++;
1451			if (byte_set_size > BYTE_SET_THRESHOLD)
1452				return byte_set_size;
1453		}
1454	}
1455
1456	return byte_set_size;
1457}
1458
1459static bool sample_repeated_patterns(struct heuristic_ws *ws)
1460{
1461	const u32 half_of_sample = ws->sample_size / 2;
1462	const u8 *data = ws->sample;
1463
1464	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1465}
1466
1467static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1468				     struct heuristic_ws *ws)
1469{
1470	struct page *page;
1471	u64 index, index_end;
1472	u32 i, curr_sample_pos;
1473	u8 *in_data;
1474
1475	/*
1476	 * Compression handles the input data by chunks of 128KiB
1477	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1478	 *
1479	 * We do the same for the heuristic and loop over the whole range.
1480	 *
1481	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1482	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1483	 */
1484	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1485		end = start + BTRFS_MAX_UNCOMPRESSED;
1486
1487	index = start >> PAGE_SHIFT;
1488	index_end = end >> PAGE_SHIFT;
1489
1490	/* Don't miss unaligned end */
1491	if (!IS_ALIGNED(end, PAGE_SIZE))
1492		index_end++;
1493
1494	curr_sample_pos = 0;
1495	while (index < index_end) {
1496		page = find_get_page(inode->i_mapping, index);
1497		in_data = kmap(page);
1498		/* Handle case where the start is not aligned to PAGE_SIZE */
1499		i = start % PAGE_SIZE;
1500		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1501			/* Don't sample any garbage from the last page */
1502			if (start > end - SAMPLING_READ_SIZE)
1503				break;
1504			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1505					SAMPLING_READ_SIZE);
1506			i += SAMPLING_INTERVAL;
1507			start += SAMPLING_INTERVAL;
1508			curr_sample_pos += SAMPLING_READ_SIZE;
1509		}
1510		kunmap(page);
1511		put_page(page);
1512
1513		index++;
1514	}
1515
1516	ws->sample_size = curr_sample_pos;
1517}
1518
1519/*
1520 * Compression heuristic.
1521 *
1522 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1523 * quickly (compared to direct compression) detect data characteristics
1524 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1525 * data.
1526 *
1527 * The following types of analysis can be performed:
1528 * - detect mostly zero data
1529 * - detect data with low "byte set" size (text, etc)
1530 * - detect data with low/high "core byte" set
1531 *
1532 * Return non-zero if the compression should be done, 0 otherwise.
1533 */
1534int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1535{
1536	struct list_head *ws_list = __find_workspace(0, true);
1537	struct heuristic_ws *ws;
1538	u32 i;
1539	u8 byte;
1540	int ret = 0;
1541
1542	ws = list_entry(ws_list, struct heuristic_ws, list);
1543
1544	heuristic_collect_sample(inode, start, end, ws);
1545
1546	if (sample_repeated_patterns(ws)) {
1547		ret = 1;
1548		goto out;
1549	}
1550
1551	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1552
1553	for (i = 0; i < ws->sample_size; i++) {
1554		byte = ws->sample[i];
1555		ws->bucket[byte].count++;
1556	}
1557
1558	i = byte_set_size(ws);
1559	if (i < BYTE_SET_THRESHOLD) {
1560		ret = 2;
1561		goto out;
1562	}
1563
1564	i = byte_core_set_size(ws);
1565	if (i <= BYTE_CORE_SET_LOW) {
1566		ret = 3;
1567		goto out;
1568	}
1569
1570	if (i >= BYTE_CORE_SET_HIGH) {
1571		ret = 0;
1572		goto out;
1573	}
1574
1575	i = shannon_entropy(ws);
1576	if (i <= ENTROPY_LVL_ACEPTABLE) {
1577		ret = 4;
1578		goto out;
1579	}
1580
1581	/*
1582	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1583	 * needed to give green light to compression.
1584	 *
1585	 * For now just assume that compression at that level is not worth the
1586	 * resources because:
1587	 *
1588	 * 1. it is possible to defrag the data later
1589	 *
1590	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1591	 * values, every bucket has counter at level ~54. The heuristic would
1592	 * be confused. This can happen when data have some internal repeated
1593	 * patterns like "abbacbbc...". This can be detected by analyzing
1594	 * pairs of bytes, which is too costly.
1595	 */
1596	if (i < ENTROPY_LVL_HIGH) {
1597		ret = 5;
1598		goto out;
1599	} else {
1600		ret = 0;
1601		goto out;
1602	}
1603
1604out:
1605	__free_workspace(0, ws_list, true);
1606	return ret;
1607}
1608
1609unsigned int btrfs_compress_str2level(const char *str)
 
 
 
 
1610{
1611	if (strncmp(str, "zlib", 4) != 0)
 
 
 
1612		return 0;
1613
1614	/* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1615	if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
1616		return str[5] - '0';
 
 
 
 
1617
1618	return BTRFS_ZLIB_DEFAULT_LEVEL;
1619}