Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v3.1
 
   1/*
   2 * Copyright (C) 2008 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/bio.h>
  21#include <linux/buffer_head.h>
  22#include <linux/file.h>
  23#include <linux/fs.h>
  24#include <linux/pagemap.h>
  25#include <linux/highmem.h>
  26#include <linux/time.h>
  27#include <linux/init.h>
  28#include <linux/string.h>
  29#include <linux/backing-dev.h>
  30#include <linux/mpage.h>
  31#include <linux/swap.h>
  32#include <linux/writeback.h>
  33#include <linux/bit_spinlock.h>
  34#include <linux/slab.h>
  35#include "compat.h"
 
 
 
  36#include "ctree.h"
  37#include "disk-io.h"
  38#include "transaction.h"
  39#include "btrfs_inode.h"
  40#include "volumes.h"
  41#include "ordered-data.h"
  42#include "compression.h"
  43#include "extent_io.h"
  44#include "extent_map.h"
  45
  46struct compressed_bio {
  47	/* number of bios pending for this compressed extent */
  48	atomic_t pending_bios;
  49
  50	/* the pages with the compressed data on them */
  51	struct page **compressed_pages;
  52
  53	/* inode that owns this data */
  54	struct inode *inode;
  55
  56	/* starting offset in the inode for our pages */
  57	u64 start;
  58
  59	/* number of bytes in the inode we're working on */
  60	unsigned long len;
 
 
 
 
 
 
 
  61
  62	/* number of bytes on disk */
  63	unsigned long compressed_len;
  64
  65	/* the compression algorithm for this bio */
  66	int compress_type;
 
  67
  68	/* number of compressed pages in the array */
  69	unsigned long nr_pages;
  70
  71	/* IO errors */
  72	int errors;
  73	int mirror_num;
  74
  75	/* for reads, this is the bio we are copying the data into */
  76	struct bio *orig_bio;
 
 
 
  77
  78	/*
  79	 * the start of a variable length array of checksums only
  80	 * used by reads
  81	 */
  82	u32 sums;
  83};
  84
  85static inline int compressed_bio_size(struct btrfs_root *root,
  86				      unsigned long disk_size)
  87{
  88	u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
  89	return sizeof(struct compressed_bio) +
  90		((disk_size + root->sectorsize - 1) / root->sectorsize) *
  91		csum_size;
  92}
  93
  94static struct bio *compressed_bio_alloc(struct block_device *bdev,
  95					u64 first_byte, gfp_t gfp_flags)
  96{
  97	int nr_vecs;
  98
  99	nr_vecs = bio_get_nr_vecs(bdev);
 100	return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags);
 101}
 102
 103static int check_compressed_csum(struct inode *inode,
 104				 struct compressed_bio *cb,
 105				 u64 disk_start)
 106{
 
 
 
 107	int ret;
 108	struct btrfs_root *root = BTRFS_I(inode)->root;
 109	struct page *page;
 110	unsigned long i;
 111	char *kaddr;
 112	u32 csum;
 113	u32 *cb_sum = &cb->sums;
 114
 115	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
 116		return 0;
 117
 
 
 118	for (i = 0; i < cb->nr_pages; i++) {
 119		page = cb->compressed_pages[i];
 120		csum = ~(u32)0;
 121
 122		kaddr = kmap_atomic(page, KM_USER0);
 123		csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
 124		btrfs_csum_final(csum, (char *)&csum);
 125		kunmap_atomic(kaddr, KM_USER0);
 126
 127		if (csum != *cb_sum) {
 128			printk(KERN_INFO "btrfs csum failed ino %llu "
 129			       "extent %llu csum %u "
 130			       "wanted %u mirror %d\n",
 131			       (unsigned long long)btrfs_ino(inode),
 132			       (unsigned long long)disk_start,
 133			       csum, *cb_sum, cb->mirror_num);
 134			ret = -EIO;
 135			goto fail;
 136		}
 137		cb_sum++;
 138
 139	}
 140	ret = 0;
 141fail:
 142	return ret;
 143}
 144
 145/* when we finish reading compressed pages from the disk, we
 146 * decompress them and then run the bio end_io routines on the
 147 * decompressed pages (in the inode address space).
 148 *
 149 * This allows the checksumming and other IO error handling routines
 150 * to work normally
 151 *
 152 * The compressed pages are freed here, and it must be run
 153 * in process context
 154 */
 155static void end_compressed_bio_read(struct bio *bio, int err)
 156{
 157	struct compressed_bio *cb = bio->bi_private;
 158	struct inode *inode;
 159	struct page *page;
 160	unsigned long index;
 161	int ret;
 
 162
 163	if (err)
 164		cb->errors = 1;
 165
 166	/* if there are more bios still pending for this compressed
 167	 * extent, just exit
 168	 */
 169	if (!atomic_dec_and_test(&cb->pending_bios))
 170		goto out;
 171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172	inode = cb->inode;
 173	ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9);
 
 174	if (ret)
 175		goto csum_failed;
 176
 177	/* ok, we're the last bio for this extent, lets start
 178	 * the decompression.
 179	 */
 180	ret = btrfs_decompress_biovec(cb->compress_type,
 181				      cb->compressed_pages,
 182				      cb->start,
 183				      cb->orig_bio->bi_io_vec,
 184				      cb->orig_bio->bi_vcnt,
 185				      cb->compressed_len);
 186csum_failed:
 187	if (ret)
 188		cb->errors = 1;
 189
 190	/* release the compressed pages */
 191	index = 0;
 192	for (index = 0; index < cb->nr_pages; index++) {
 193		page = cb->compressed_pages[index];
 194		page->mapping = NULL;
 195		page_cache_release(page);
 196	}
 197
 198	/* do io completion on the original bio */
 199	if (cb->errors) {
 200		bio_io_error(cb->orig_bio);
 201	} else {
 202		int bio_index = 0;
 203		struct bio_vec *bvec = cb->orig_bio->bi_io_vec;
 204
 205		/*
 206		 * we have verified the checksum already, set page
 207		 * checked so the end_io handlers know about it
 208		 */
 209		while (bio_index < cb->orig_bio->bi_vcnt) {
 
 210			SetPageChecked(bvec->bv_page);
 211			bvec++;
 212			bio_index++;
 213		}
 214		bio_endio(cb->orig_bio, 0);
 215	}
 216
 217	/* finally free the cb struct */
 218	kfree(cb->compressed_pages);
 219	kfree(cb);
 220out:
 221	bio_put(bio);
 222}
 223
 224/*
 225 * Clear the writeback bits on all of the file
 226 * pages for a compressed write
 227 */
 228static noinline int end_compressed_writeback(struct inode *inode, u64 start,
 229					     unsigned long ram_size)
 230{
 231	unsigned long index = start >> PAGE_CACHE_SHIFT;
 232	unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
 233	struct page *pages[16];
 234	unsigned long nr_pages = end_index - index + 1;
 235	int i;
 236	int ret;
 237
 
 
 
 238	while (nr_pages > 0) {
 239		ret = find_get_pages_contig(inode->i_mapping, index,
 240				     min_t(unsigned long,
 241				     nr_pages, ARRAY_SIZE(pages)), pages);
 242		if (ret == 0) {
 243			nr_pages -= 1;
 244			index += 1;
 245			continue;
 246		}
 247		for (i = 0; i < ret; i++) {
 
 
 248			end_page_writeback(pages[i]);
 249			page_cache_release(pages[i]);
 250		}
 251		nr_pages -= ret;
 252		index += ret;
 253	}
 254	/* the inode may be gone now */
 255	return 0;
 256}
 257
 258/*
 259 * do the cleanup once all the compressed pages hit the disk.
 260 * This will clear writeback on the file pages and free the compressed
 261 * pages.
 262 *
 263 * This also calls the writeback end hooks for the file pages so that
 264 * metadata and checksums can be updated in the file.
 265 */
 266static void end_compressed_bio_write(struct bio *bio, int err)
 267{
 268	struct extent_io_tree *tree;
 269	struct compressed_bio *cb = bio->bi_private;
 270	struct inode *inode;
 271	struct page *page;
 272	unsigned long index;
 273
 274	if (err)
 275		cb->errors = 1;
 276
 277	/* if there are more bios still pending for this compressed
 278	 * extent, just exit
 279	 */
 280	if (!atomic_dec_and_test(&cb->pending_bios))
 281		goto out;
 282
 283	/* ok, we're the last bio for this extent, step one is to
 284	 * call back into the FS and do all the end_io operations
 285	 */
 286	inode = cb->inode;
 287	tree = &BTRFS_I(inode)->io_tree;
 288	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
 289	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
 290					 cb->start,
 291					 cb->start + cb->len - 1,
 292					 NULL, 1);
 293	cb->compressed_pages[0]->mapping = NULL;
 294
 295	end_compressed_writeback(inode, cb->start, cb->len);
 296	/* note, our inode could be gone now */
 297
 298	/*
 299	 * release the compressed pages, these came from alloc_page and
 300	 * are not attached to the inode at all
 301	 */
 302	index = 0;
 303	for (index = 0; index < cb->nr_pages; index++) {
 304		page = cb->compressed_pages[index];
 305		page->mapping = NULL;
 306		page_cache_release(page);
 307	}
 308
 309	/* finally free the cb struct */
 310	kfree(cb->compressed_pages);
 311	kfree(cb);
 312out:
 313	bio_put(bio);
 314}
 315
 316/*
 317 * worker function to build and submit bios for previously compressed pages.
 318 * The corresponding pages in the inode should be marked for writeback
 319 * and the compressed pages should have a reference on them for dropping
 320 * when the IO is complete.
 321 *
 322 * This also checksums the file bytes and gets things ready for
 323 * the end io hooks.
 324 */
 325int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 326				 unsigned long len, u64 disk_start,
 327				 unsigned long compressed_len,
 328				 struct page **compressed_pages,
 329				 unsigned long nr_pages)
 
 330{
 
 331	struct bio *bio = NULL;
 332	struct btrfs_root *root = BTRFS_I(inode)->root;
 333	struct compressed_bio *cb;
 334	unsigned long bytes_left;
 335	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 336	int pg_index = 0;
 337	struct page *page;
 338	u64 first_byte = disk_start;
 339	struct block_device *bdev;
 340	int ret;
 341	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 342
 343	WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
 344	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
 345	if (!cb)
 346		return -ENOMEM;
 347	atomic_set(&cb->pending_bios, 0);
 348	cb->errors = 0;
 349	cb->inode = inode;
 350	cb->start = start;
 351	cb->len = len;
 352	cb->mirror_num = 0;
 353	cb->compressed_pages = compressed_pages;
 354	cb->compressed_len = compressed_len;
 355	cb->orig_bio = NULL;
 356	cb->nr_pages = nr_pages;
 357
 358	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 359
 360	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
 361	if(!bio) {
 362		kfree(cb);
 363		return -ENOMEM;
 364	}
 365	bio->bi_private = cb;
 366	bio->bi_end_io = end_compressed_bio_write;
 367	atomic_inc(&cb->pending_bios);
 368
 369	/* create and submit bios for the compressed pages */
 370	bytes_left = compressed_len;
 371	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 
 
 372		page = compressed_pages[pg_index];
 373		page->mapping = inode->i_mapping;
 374		if (bio->bi_size)
 375			ret = io_tree->ops->merge_bio_hook(page, 0,
 376							   PAGE_CACHE_SIZE,
 377							   bio, 0);
 378		else
 379			ret = 0;
 380
 381		page->mapping = NULL;
 382		if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
 383		    PAGE_CACHE_SIZE) {
 384			bio_get(bio);
 385
 386			/*
 387			 * inc the count before we submit the bio so
 388			 * we know the end IO handler won't happen before
 389			 * we inc the count.  Otherwise, the cb might get
 390			 * freed before we're done setting it up
 391			 */
 392			atomic_inc(&cb->pending_bios);
 393			ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
 394			BUG_ON(ret);
 
 395
 396			if (!skip_sum) {
 397				ret = btrfs_csum_one_bio(root, inode, bio,
 398							 start, 1);
 399				BUG_ON(ret);
 400			}
 401
 402			ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
 403			BUG_ON(ret);
 404
 405			bio_put(bio);
 
 406
 407			bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
 
 
 408			bio->bi_private = cb;
 409			bio->bi_end_io = end_compressed_bio_write;
 410			bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
 411		}
 412		if (bytes_left < PAGE_CACHE_SIZE) {
 413			printk("bytes left %lu compress len %lu nr %lu\n",
 
 414			       bytes_left, cb->compressed_len, cb->nr_pages);
 415		}
 416		bytes_left -= PAGE_CACHE_SIZE;
 417		first_byte += PAGE_CACHE_SIZE;
 418		cond_resched();
 419	}
 420	bio_get(bio);
 421
 422	ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
 423	BUG_ON(ret);
 424
 425	if (!skip_sum) {
 426		ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
 427		BUG_ON(ret);
 428	}
 429
 430	ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
 431	BUG_ON(ret);
 
 
 
 432
 433	bio_put(bio);
 434	return 0;
 435}
 436
 
 
 
 
 
 
 
 437static noinline int add_ra_bio_pages(struct inode *inode,
 438				     u64 compressed_end,
 439				     struct compressed_bio *cb)
 440{
 441	unsigned long end_index;
 442	unsigned long pg_index;
 443	u64 last_offset;
 444	u64 isize = i_size_read(inode);
 445	int ret;
 446	struct page *page;
 447	unsigned long nr_pages = 0;
 448	struct extent_map *em;
 449	struct address_space *mapping = inode->i_mapping;
 450	struct extent_map_tree *em_tree;
 451	struct extent_io_tree *tree;
 452	u64 end;
 453	int misses = 0;
 454
 455	page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
 456	last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
 457	em_tree = &BTRFS_I(inode)->extent_tree;
 458	tree = &BTRFS_I(inode)->io_tree;
 459
 460	if (isize == 0)
 461		return 0;
 462
 463	end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
 464
 465	while (last_offset < compressed_end) {
 466		pg_index = last_offset >> PAGE_CACHE_SHIFT;
 467
 468		if (pg_index > end_index)
 469			break;
 470
 471		rcu_read_lock();
 472		page = radix_tree_lookup(&mapping->page_tree, pg_index);
 473		rcu_read_unlock();
 474		if (page) {
 475			misses++;
 476			if (misses > 4)
 477				break;
 478			goto next;
 479		}
 480
 481		page = __page_cache_alloc(mapping_gfp_mask(mapping) &
 482								~__GFP_FS);
 483		if (!page)
 484			break;
 485
 486		if (add_to_page_cache_lru(page, mapping, pg_index,
 487								GFP_NOFS)) {
 488			page_cache_release(page);
 489			goto next;
 490		}
 491
 492		end = last_offset + PAGE_CACHE_SIZE - 1;
 493		/*
 494		 * at this point, we have a locked page in the page cache
 495		 * for these bytes in the file.  But, we have to make
 496		 * sure they map to this compressed extent on disk.
 497		 */
 498		set_page_extent_mapped(page);
 499		lock_extent(tree, last_offset, end, GFP_NOFS);
 500		read_lock(&em_tree->lock);
 501		em = lookup_extent_mapping(em_tree, last_offset,
 502					   PAGE_CACHE_SIZE);
 503		read_unlock(&em_tree->lock);
 504
 505		if (!em || last_offset < em->start ||
 506		    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
 507		    (em->block_start >> 9) != cb->orig_bio->bi_sector) {
 508			free_extent_map(em);
 509			unlock_extent(tree, last_offset, end, GFP_NOFS);
 510			unlock_page(page);
 511			page_cache_release(page);
 512			break;
 513		}
 514		free_extent_map(em);
 515
 516		if (page->index == end_index) {
 517			char *userpage;
 518			size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
 519
 520			if (zero_offset) {
 521				int zeros;
 522				zeros = PAGE_CACHE_SIZE - zero_offset;
 523				userpage = kmap_atomic(page, KM_USER0);
 524				memset(userpage + zero_offset, 0, zeros);
 525				flush_dcache_page(page);
 526				kunmap_atomic(userpage, KM_USER0);
 527			}
 528		}
 529
 530		ret = bio_add_page(cb->orig_bio, page,
 531				   PAGE_CACHE_SIZE, 0);
 532
 533		if (ret == PAGE_CACHE_SIZE) {
 534			nr_pages++;
 535			page_cache_release(page);
 536		} else {
 537			unlock_extent(tree, last_offset, end, GFP_NOFS);
 538			unlock_page(page);
 539			page_cache_release(page);
 540			break;
 541		}
 542next:
 543		last_offset += PAGE_CACHE_SIZE;
 544	}
 545	return 0;
 546}
 547
 548/*
 549 * for a compressed read, the bio we get passed has all the inode pages
 550 * in it.  We don't actually do IO on those pages but allocate new ones
 551 * to hold the compressed pages on disk.
 552 *
 553 * bio->bi_sector points to the compressed extent on disk
 554 * bio->bi_io_vec points to all of the inode pages
 555 * bio->bi_vcnt is a count of pages
 556 *
 557 * After the compressed pages are read, we copy the bytes into the
 558 * bio we were passed and then call the bio end_io calls
 559 */
 560int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 561				 int mirror_num, unsigned long bio_flags)
 562{
 563	struct extent_io_tree *tree;
 564	struct extent_map_tree *em_tree;
 565	struct compressed_bio *cb;
 566	struct btrfs_root *root = BTRFS_I(inode)->root;
 567	unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
 568	unsigned long compressed_len;
 569	unsigned long nr_pages;
 570	unsigned long pg_index;
 571	struct page *page;
 572	struct block_device *bdev;
 573	struct bio *comp_bio;
 574	u64 cur_disk_byte = (u64)bio->bi_sector << 9;
 575	u64 em_len;
 576	u64 em_start;
 577	struct extent_map *em;
 578	int ret = -ENOMEM;
 579	u32 *sums;
 
 
 580
 581	tree = &BTRFS_I(inode)->io_tree;
 582	em_tree = &BTRFS_I(inode)->extent_tree;
 583
 584	/* we need the actual starting offset of this extent in the file */
 585	read_lock(&em_tree->lock);
 586	em = lookup_extent_mapping(em_tree,
 587				   page_offset(bio->bi_io_vec->bv_page),
 588				   PAGE_CACHE_SIZE);
 589	read_unlock(&em_tree->lock);
 
 
 590
 591	compressed_len = em->block_len;
 592	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
 593	if (!cb)
 594		goto out;
 595
 596	atomic_set(&cb->pending_bios, 0);
 597	cb->errors = 0;
 598	cb->inode = inode;
 599	cb->mirror_num = mirror_num;
 600	sums = &cb->sums;
 601
 602	cb->start = em->orig_start;
 603	em_len = em->len;
 604	em_start = em->start;
 605
 606	free_extent_map(em);
 607	em = NULL;
 608
 609	cb->len = uncompressed_len;
 610	cb->compressed_len = compressed_len;
 611	cb->compress_type = extent_compress_type(bio_flags);
 612	cb->orig_bio = bio;
 613
 614	nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
 615				 PAGE_CACHE_SIZE;
 616	cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
 617				       GFP_NOFS);
 618	if (!cb->compressed_pages)
 619		goto fail1;
 620
 621	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 622
 623	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 624		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 625							      __GFP_HIGHMEM);
 626		if (!cb->compressed_pages[pg_index])
 
 
 627			goto fail2;
 
 628	}
 
 629	cb->nr_pages = nr_pages;
 630
 631	add_ra_bio_pages(inode, em_start + em_len, cb);
 632
 633	/* include any pages we added in add_ra-bio_pages */
 634	uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
 635	cb->len = uncompressed_len;
 636
 637	comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
 638	if (!comp_bio)
 639		goto fail2;
 640	comp_bio->bi_private = cb;
 641	comp_bio->bi_end_io = end_compressed_bio_read;
 642	atomic_inc(&cb->pending_bios);
 643
 644	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 
 
 645		page = cb->compressed_pages[pg_index];
 646		page->mapping = inode->i_mapping;
 647		page->index = em_start >> PAGE_CACHE_SHIFT;
 648
 649		if (comp_bio->bi_size)
 650			ret = tree->ops->merge_bio_hook(page, 0,
 651							PAGE_CACHE_SIZE,
 652							comp_bio, 0);
 653		else
 654			ret = 0;
 655
 656		page->mapping = NULL;
 657		if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
 658		    PAGE_CACHE_SIZE) {
 659			bio_get(comp_bio);
 660
 661			ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
 662			BUG_ON(ret);
 
 663
 664			/*
 665			 * inc the count before we submit the bio so
 666			 * we know the end IO handler won't happen before
 667			 * we inc the count.  Otherwise, the cb might get
 668			 * freed before we're done setting it up
 669			 */
 670			atomic_inc(&cb->pending_bios);
 671
 672			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 673				ret = btrfs_lookup_bio_sums(root, inode,
 674							comp_bio, sums);
 675				BUG_ON(ret);
 676			}
 677			sums += (comp_bio->bi_size + root->sectorsize - 1) /
 678				root->sectorsize;
 679
 680			ret = btrfs_map_bio(root, READ, comp_bio,
 681					    mirror_num, 0);
 682			BUG_ON(ret);
 683
 684			bio_put(comp_bio);
 
 
 
 
 
 
 
 
 685
 686			comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
 687							GFP_NOFS);
 
 688			comp_bio->bi_private = cb;
 689			comp_bio->bi_end_io = end_compressed_bio_read;
 690
 691			bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
 692		}
 693		cur_disk_byte += PAGE_CACHE_SIZE;
 694	}
 695	bio_get(comp_bio);
 696
 697	ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
 698	BUG_ON(ret);
 699
 700	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 701		ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
 702		BUG_ON(ret);
 703	}
 704
 705	ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
 706	BUG_ON(ret);
 
 
 
 707
 708	bio_put(comp_bio);
 709	return 0;
 710
 711fail2:
 712	for (pg_index = 0; pg_index < nr_pages; pg_index++)
 713		free_page((unsigned long)cb->compressed_pages[pg_index]);
 
 
 714
 715	kfree(cb->compressed_pages);
 716fail1:
 717	kfree(cb);
 718out:
 719	free_extent_map(em);
 720	return ret;
 721}
 722
 723static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
 724static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES];
 725static int comp_num_workspace[BTRFS_COMPRESS_TYPES];
 726static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
 727static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 728
 729struct btrfs_compress_op *btrfs_compress_op[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 730	&btrfs_zlib_compress,
 731	&btrfs_lzo_compress,
 
 732};
 733
 734int __init btrfs_init_compress(void)
 
 735{
 736	int i;
 
 
 737
 738	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
 739		INIT_LIST_HEAD(&comp_idle_workspace[i]);
 740		spin_lock_init(&comp_workspace_lock[i]);
 741		atomic_set(&comp_alloc_workspace[i], 0);
 742		init_waitqueue_head(&comp_workspace_wait[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743	}
 744	return 0;
 745}
 746
 747/*
 748 * this finds an available workspace or allocates a new one
 749 * ERR_PTR is returned if things go bad.
 
 
 750 */
 751static struct list_head *find_workspace(int type)
 
 752{
 753	struct list_head *workspace;
 754	int cpus = num_online_cpus();
 755	int idx = type - 1;
 
 
 
 
 
 
 
 
 
 
 
 756
 757	struct list_head *idle_workspace	= &comp_idle_workspace[idx];
 758	spinlock_t *workspace_lock		= &comp_workspace_lock[idx];
 759	atomic_t *alloc_workspace		= &comp_alloc_workspace[idx];
 760	wait_queue_head_t *workspace_wait	= &comp_workspace_wait[idx];
 761	int *num_workspace			= &comp_num_workspace[idx];
 762again:
 763	spin_lock(workspace_lock);
 764	if (!list_empty(idle_workspace)) {
 765		workspace = idle_workspace->next;
 766		list_del(workspace);
 767		(*num_workspace)--;
 768		spin_unlock(workspace_lock);
 769		return workspace;
 770
 771	}
 772	if (atomic_read(alloc_workspace) > cpus) {
 773		DEFINE_WAIT(wait);
 774
 775		spin_unlock(workspace_lock);
 776		prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE);
 777		if (atomic_read(alloc_workspace) > cpus && !*num_workspace)
 778			schedule();
 779		finish_wait(workspace_wait, &wait);
 780		goto again;
 781	}
 782	atomic_inc(alloc_workspace);
 783	spin_unlock(workspace_lock);
 
 
 
 
 
 
 
 
 
 784
 785	workspace = btrfs_compress_op[idx]->alloc_workspace();
 786	if (IS_ERR(workspace)) {
 787		atomic_dec(alloc_workspace);
 788		wake_up(workspace_wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 789	}
 790	return workspace;
 791}
 792
 
 
 
 
 
 793/*
 794 * put a workspace struct back on the list or free it if we have enough
 795 * idle ones sitting around
 796 */
 797static void free_workspace(int type, struct list_head *workspace)
 798{
 799	int idx = type - 1;
 800	struct list_head *idle_workspace	= &comp_idle_workspace[idx];
 801	spinlock_t *workspace_lock		= &comp_workspace_lock[idx];
 802	atomic_t *alloc_workspace		= &comp_alloc_workspace[idx];
 803	wait_queue_head_t *workspace_wait	= &comp_workspace_wait[idx];
 804	int *num_workspace			= &comp_num_workspace[idx];
 805
 806	spin_lock(workspace_lock);
 807	if (*num_workspace < num_online_cpus()) {
 808		list_add_tail(workspace, idle_workspace);
 809		(*num_workspace)++;
 810		spin_unlock(workspace_lock);
 
 
 
 
 
 811		goto wake;
 812	}
 813	spin_unlock(workspace_lock);
 814
 815	btrfs_compress_op[idx]->free_workspace(workspace);
 816	atomic_dec(alloc_workspace);
 817wake:
 818	if (waitqueue_active(workspace_wait))
 819		wake_up(workspace_wait);
 820}
 821
 822/*
 823 * cleanup function for module exit
 824 */
 825static void free_workspaces(void)
 826{
 827	struct list_head *workspace;
 828	int i;
 829
 830	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
 831		while (!list_empty(&comp_idle_workspace[i])) {
 832			workspace = comp_idle_workspace[i].next;
 833			list_del(workspace);
 834			btrfs_compress_op[i]->free_workspace(workspace);
 835			atomic_dec(&comp_alloc_workspace[i]);
 836		}
 837	}
 838}
 839
 840/*
 841 * given an address space and start/len, compress the bytes.
 
 842 *
 843 * pages are allocated to hold the compressed result and stored
 844 * in 'pages'
 
 
 845 *
 846 * out_pages is used to return the number of pages allocated.  There
 847 * may be pages allocated even if we return an error
 848 *
 849 * total_in is used to return the number of bytes actually read.  It
 850 * may be smaller then len if we had to exit early because we
 851 * ran out of room in the pages array or because we cross the
 852 * max_out threshold.
 853 *
 854 * total_out is used to return the total number of compressed bytes
 
 855 *
 856 * max_out tells us the max number of bytes that we're allowed to
 857 * stuff into pages
 858 */
 859int btrfs_compress_pages(int type, struct address_space *mapping,
 860			 u64 start, unsigned long len,
 861			 struct page **pages,
 862			 unsigned long nr_dest_pages,
 863			 unsigned long *out_pages,
 864			 unsigned long *total_in,
 865			 unsigned long *total_out,
 866			 unsigned long max_out)
 867{
 
 
 868	struct list_head *workspace;
 869	int ret;
 870
 871	workspace = find_workspace(type);
 872	if (IS_ERR(workspace))
 873		return -1;
 874
 875	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
 876						      start, len, pages,
 877						      nr_dest_pages, out_pages,
 878						      total_in, total_out,
 879						      max_out);
 880	free_workspace(type, workspace);
 881	return ret;
 882}
 883
 884/*
 885 * pages_in is an array of pages with compressed data.
 886 *
 887 * disk_start is the starting logical offset of this array in the file
 888 *
 889 * bvec is a bio_vec of pages from the file that we want to decompress into
 890 *
 891 * vcnt is the count of pages in the biovec
 892 *
 893 * srclen is the number of bytes in pages_in
 894 *
 895 * The basic idea is that we have a bio that was created by readpages.
 896 * The pages in the bio are for the uncompressed data, and they may not
 897 * be contiguous.  They all correspond to the range of bytes covered by
 898 * the compressed extent.
 899 */
 900int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start,
 901			    struct bio_vec *bvec, int vcnt, size_t srclen)
 902{
 903	struct list_head *workspace;
 904	int ret;
 
 
 
 
 
 905
 906	workspace = find_workspace(type);
 907	if (IS_ERR(workspace))
 908		return -ENOMEM;
 909
 910	ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
 911							 disk_start,
 912							 bvec, vcnt, srclen);
 913	free_workspace(type, workspace);
 914	return ret;
 915}
 916
 917/*
 918 * a less complex decompression routine.  Our compressed data fits in a
 919 * single page, and we want to read a single page out of it.
 920 * start_byte tells us the offset into the compressed data we're interested in
 921 */
 922int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
 923		     unsigned long start_byte, size_t srclen, size_t destlen)
 924{
 925	struct list_head *workspace;
 926	int ret;
 927
 928	workspace = find_workspace(type);
 929	if (IS_ERR(workspace))
 930		return -ENOMEM;
 931
 932	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
 933						  dest_page, start_byte,
 934						  srclen, destlen);
 
 935
 936	free_workspace(type, workspace);
 937	return ret;
 938}
 939
 940void btrfs_exit_compress(void)
 
 
 
 
 
 
 
 
 941{
 942	free_workspaces();
 
 
 
 943}
 944
 945/*
 946 * Copy uncompressed data from working buffer to pages.
 947 *
 948 * buf_start is the byte offset we're of the start of our workspace buffer.
 949 *
 950 * total_out is the last byte of the buffer
 951 */
 952int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 953			      unsigned long total_out, u64 disk_start,
 954			      struct bio_vec *bvec, int vcnt,
 955			      unsigned long *pg_index,
 956			      unsigned long *pg_offset)
 957{
 958	unsigned long buf_offset;
 959	unsigned long current_buf_start;
 960	unsigned long start_byte;
 
 961	unsigned long working_bytes = total_out - buf_start;
 962	unsigned long bytes;
 963	char *kaddr;
 964	struct page *page_out = bvec[*pg_index].bv_page;
 965
 966	/*
 967	 * start byte is the first byte of the page we're currently
 968	 * copying into relative to the start of the compressed data.
 969	 */
 970	start_byte = page_offset(page_out) - disk_start;
 971
 972	/* we haven't yet hit data corresponding to this page */
 973	if (total_out <= start_byte)
 974		return 1;
 975
 976	/*
 977	 * the start of the data we care about is offset into
 978	 * the middle of our working buffer
 979	 */
 980	if (total_out > start_byte && buf_start < start_byte) {
 981		buf_offset = start_byte - buf_start;
 982		working_bytes -= buf_offset;
 983	} else {
 984		buf_offset = 0;
 985	}
 986	current_buf_start = buf_start;
 987
 988	/* copy bytes from the working buffer into the pages */
 989	while (working_bytes > 0) {
 990		bytes = min(PAGE_CACHE_SIZE - *pg_offset,
 991			    PAGE_CACHE_SIZE - buf_offset);
 992		bytes = min(bytes, working_bytes);
 993		kaddr = kmap_atomic(page_out, KM_USER0);
 994		memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
 995		kunmap_atomic(kaddr, KM_USER0);
 996		flush_dcache_page(page_out);
 997
 998		*pg_offset += bytes;
 
 
 
 
 999		buf_offset += bytes;
1000		working_bytes -= bytes;
1001		current_buf_start += bytes;
1002
1003		/* check if we need to pick another page */
1004		if (*pg_offset == PAGE_CACHE_SIZE) {
1005			(*pg_index)++;
1006			if (*pg_index >= vcnt)
1007				return 0;
1008
1009			page_out = bvec[*pg_index].bv_page;
1010			*pg_offset = 0;
1011			start_byte = page_offset(page_out) - disk_start;
1012
 
 
 
 
 
 
 
1013			/*
1014			 * make sure our new page is covered by this
1015			 * working buffer
1016			 */
1017			if (total_out <= start_byte)
1018				return 1;
1019
1020			/*
1021			 * the next page in the biovec might not be adjacent
1022			 * to the last page, but it might still be found
1023			 * inside this working buffer. bump our offset pointer
1024			 */
1025			if (total_out > start_byte &&
1026			    current_buf_start < start_byte) {
1027				buf_offset = start_byte - buf_start;
1028				working_bytes = total_out - start_byte;
1029				current_buf_start = buf_start + buf_offset;
1030			}
1031		}
1032	}
1033
1034	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1035}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
 
   8#include <linux/file.h>
   9#include <linux/fs.h>
  10#include <linux/pagemap.h>
  11#include <linux/highmem.h>
  12#include <linux/time.h>
  13#include <linux/init.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
 
 
  16#include <linux/writeback.h>
 
  17#include <linux/slab.h>
  18#include <linux/sched/mm.h>
  19#include <linux/log2.h>
  20#include <crypto/hash.h>
  21#include "misc.h"
  22#include "ctree.h"
  23#include "disk-io.h"
  24#include "transaction.h"
  25#include "btrfs_inode.h"
  26#include "volumes.h"
  27#include "ordered-data.h"
  28#include "compression.h"
  29#include "extent_io.h"
  30#include "extent_map.h"
  31
  32static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
 
 
 
 
 
 
 
 
 
 
 
  33
  34const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  35{
  36	switch (type) {
  37	case BTRFS_COMPRESS_ZLIB:
  38	case BTRFS_COMPRESS_LZO:
  39	case BTRFS_COMPRESS_ZSTD:
  40	case BTRFS_COMPRESS_NONE:
  41		return btrfs_compress_types[type];
  42	}
  43
  44	return NULL;
  45}
  46
  47bool btrfs_compress_is_valid_type(const char *str, size_t len)
  48{
  49	int i;
  50
  51	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
  52		size_t comp_len = strlen(btrfs_compress_types[i]);
  53
  54		if (len < comp_len)
  55			continue;
 
  56
  57		if (!strncmp(btrfs_compress_types[i], str, comp_len))
  58			return true;
  59	}
  60	return false;
  61}
  62
  63static int btrfs_decompress_bio(struct compressed_bio *cb);
 
 
 
 
 
  64
  65static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
  66				      unsigned long disk_size)
  67{
  68	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
 
 
 
 
 
 
 
 
  69
  70	return sizeof(struct compressed_bio) +
  71		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
  72}
  73
  74static int check_compressed_csum(struct btrfs_inode *inode,
  75				 struct compressed_bio *cb,
  76				 u64 disk_start)
  77{
  78	struct btrfs_fs_info *fs_info = inode->root->fs_info;
  79	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
  80	const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  81	int ret;
 
  82	struct page *page;
  83	unsigned long i;
  84	char *kaddr;
  85	u8 csum[BTRFS_CSUM_SIZE];
  86	u8 *cb_sum = cb->sums;
  87
  88	if (inode->flags & BTRFS_INODE_NODATASUM)
  89		return 0;
  90
  91	shash->tfm = fs_info->csum_shash;
  92
  93	for (i = 0; i < cb->nr_pages; i++) {
  94		page = cb->compressed_pages[i];
 
  95
  96		crypto_shash_init(shash);
  97		kaddr = kmap_atomic(page);
  98		crypto_shash_update(shash, kaddr, PAGE_SIZE);
  99		kunmap_atomic(kaddr);
 100		crypto_shash_final(shash, (u8 *)&csum);
 101
 102		if (memcmp(&csum, cb_sum, csum_size)) {
 103			btrfs_print_data_csum_error(inode, disk_start,
 104					csum, cb_sum, cb->mirror_num);
 
 
 
 105			ret = -EIO;
 106			goto fail;
 107		}
 108		cb_sum += csum_size;
 109
 110	}
 111	ret = 0;
 112fail:
 113	return ret;
 114}
 115
 116/* when we finish reading compressed pages from the disk, we
 117 * decompress them and then run the bio end_io routines on the
 118 * decompressed pages (in the inode address space).
 119 *
 120 * This allows the checksumming and other IO error handling routines
 121 * to work normally
 122 *
 123 * The compressed pages are freed here, and it must be run
 124 * in process context
 125 */
 126static void end_compressed_bio_read(struct bio *bio)
 127{
 128	struct compressed_bio *cb = bio->bi_private;
 129	struct inode *inode;
 130	struct page *page;
 131	unsigned long index;
 132	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
 133	int ret = 0;
 134
 135	if (bio->bi_status)
 136		cb->errors = 1;
 137
 138	/* if there are more bios still pending for this compressed
 139	 * extent, just exit
 140	 */
 141	if (!refcount_dec_and_test(&cb->pending_bios))
 142		goto out;
 143
 144	/*
 145	 * Record the correct mirror_num in cb->orig_bio so that
 146	 * read-repair can work properly.
 147	 */
 148	ASSERT(btrfs_io_bio(cb->orig_bio));
 149	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
 150	cb->mirror_num = mirror;
 151
 152	/*
 153	 * Some IO in this cb have failed, just skip checksum as there
 154	 * is no way it could be correct.
 155	 */
 156	if (cb->errors == 1)
 157		goto csum_failed;
 158
 159	inode = cb->inode;
 160	ret = check_compressed_csum(BTRFS_I(inode), cb,
 161				    (u64)bio->bi_iter.bi_sector << 9);
 162	if (ret)
 163		goto csum_failed;
 164
 165	/* ok, we're the last bio for this extent, lets start
 166	 * the decompression.
 167	 */
 168	ret = btrfs_decompress_bio(cb);
 169
 
 
 
 
 170csum_failed:
 171	if (ret)
 172		cb->errors = 1;
 173
 174	/* release the compressed pages */
 175	index = 0;
 176	for (index = 0; index < cb->nr_pages; index++) {
 177		page = cb->compressed_pages[index];
 178		page->mapping = NULL;
 179		put_page(page);
 180	}
 181
 182	/* do io completion on the original bio */
 183	if (cb->errors) {
 184		bio_io_error(cb->orig_bio);
 185	} else {
 186		struct bio_vec *bvec;
 187		struct bvec_iter_all iter_all;
 188
 189		/*
 190		 * we have verified the checksum already, set page
 191		 * checked so the end_io handlers know about it
 192		 */
 193		ASSERT(!bio_flagged(bio, BIO_CLONED));
 194		bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
 195			SetPageChecked(bvec->bv_page);
 196
 197		bio_endio(cb->orig_bio);
 
 
 198	}
 199
 200	/* finally free the cb struct */
 201	kfree(cb->compressed_pages);
 202	kfree(cb);
 203out:
 204	bio_put(bio);
 205}
 206
 207/*
 208 * Clear the writeback bits on all of the file
 209 * pages for a compressed write
 210 */
 211static noinline void end_compressed_writeback(struct inode *inode,
 212					      const struct compressed_bio *cb)
 213{
 214	unsigned long index = cb->start >> PAGE_SHIFT;
 215	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 216	struct page *pages[16];
 217	unsigned long nr_pages = end_index - index + 1;
 218	int i;
 219	int ret;
 220
 221	if (cb->errors)
 222		mapping_set_error(inode->i_mapping, -EIO);
 223
 224	while (nr_pages > 0) {
 225		ret = find_get_pages_contig(inode->i_mapping, index,
 226				     min_t(unsigned long,
 227				     nr_pages, ARRAY_SIZE(pages)), pages);
 228		if (ret == 0) {
 229			nr_pages -= 1;
 230			index += 1;
 231			continue;
 232		}
 233		for (i = 0; i < ret; i++) {
 234			if (cb->errors)
 235				SetPageError(pages[i]);
 236			end_page_writeback(pages[i]);
 237			put_page(pages[i]);
 238		}
 239		nr_pages -= ret;
 240		index += ret;
 241	}
 242	/* the inode may be gone now */
 
 243}
 244
 245/*
 246 * do the cleanup once all the compressed pages hit the disk.
 247 * This will clear writeback on the file pages and free the compressed
 248 * pages.
 249 *
 250 * This also calls the writeback end hooks for the file pages so that
 251 * metadata and checksums can be updated in the file.
 252 */
 253static void end_compressed_bio_write(struct bio *bio)
 254{
 
 255	struct compressed_bio *cb = bio->bi_private;
 256	struct inode *inode;
 257	struct page *page;
 258	unsigned long index;
 259
 260	if (bio->bi_status)
 261		cb->errors = 1;
 262
 263	/* if there are more bios still pending for this compressed
 264	 * extent, just exit
 265	 */
 266	if (!refcount_dec_and_test(&cb->pending_bios))
 267		goto out;
 268
 269	/* ok, we're the last bio for this extent, step one is to
 270	 * call back into the FS and do all the end_io operations
 271	 */
 272	inode = cb->inode;
 
 273	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
 274	btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
 275			cb->start, cb->start + cb->len - 1,
 276			bio->bi_status == BLK_STS_OK);
 
 277	cb->compressed_pages[0]->mapping = NULL;
 278
 279	end_compressed_writeback(inode, cb);
 280	/* note, our inode could be gone now */
 281
 282	/*
 283	 * release the compressed pages, these came from alloc_page and
 284	 * are not attached to the inode at all
 285	 */
 286	index = 0;
 287	for (index = 0; index < cb->nr_pages; index++) {
 288		page = cb->compressed_pages[index];
 289		page->mapping = NULL;
 290		put_page(page);
 291	}
 292
 293	/* finally free the cb struct */
 294	kfree(cb->compressed_pages);
 295	kfree(cb);
 296out:
 297	bio_put(bio);
 298}
 299
 300/*
 301 * worker function to build and submit bios for previously compressed pages.
 302 * The corresponding pages in the inode should be marked for writeback
 303 * and the compressed pages should have a reference on them for dropping
 304 * when the IO is complete.
 305 *
 306 * This also checksums the file bytes and gets things ready for
 307 * the end io hooks.
 308 */
 309blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
 310				 unsigned long len, u64 disk_start,
 311				 unsigned long compressed_len,
 312				 struct page **compressed_pages,
 313				 unsigned long nr_pages,
 314				 unsigned int write_flags)
 315{
 316	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 317	struct bio *bio = NULL;
 
 318	struct compressed_bio *cb;
 319	unsigned long bytes_left;
 
 320	int pg_index = 0;
 321	struct page *page;
 322	u64 first_byte = disk_start;
 323	struct block_device *bdev;
 324	blk_status_t ret;
 325	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 326
 327	WARN_ON(!PAGE_ALIGNED(start));
 328	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 329	if (!cb)
 330		return BLK_STS_RESOURCE;
 331	refcount_set(&cb->pending_bios, 0);
 332	cb->errors = 0;
 333	cb->inode = inode;
 334	cb->start = start;
 335	cb->len = len;
 336	cb->mirror_num = 0;
 337	cb->compressed_pages = compressed_pages;
 338	cb->compressed_len = compressed_len;
 339	cb->orig_bio = NULL;
 340	cb->nr_pages = nr_pages;
 341
 342	bdev = fs_info->fs_devices->latest_bdev;
 343
 344	bio = btrfs_bio_alloc(first_byte);
 345	bio_set_dev(bio, bdev);
 346	bio->bi_opf = REQ_OP_WRITE | write_flags;
 
 
 347	bio->bi_private = cb;
 348	bio->bi_end_io = end_compressed_bio_write;
 349	refcount_set(&cb->pending_bios, 1);
 350
 351	/* create and submit bios for the compressed pages */
 352	bytes_left = compressed_len;
 353	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 354		int submit = 0;
 355
 356		page = compressed_pages[pg_index];
 357		page->mapping = inode->i_mapping;
 358		if (bio->bi_iter.bi_size)
 359			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
 360							  0);
 
 
 
 361
 362		page->mapping = NULL;
 363		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
 364		    PAGE_SIZE) {
 
 
 365			/*
 366			 * inc the count before we submit the bio so
 367			 * we know the end IO handler won't happen before
 368			 * we inc the count.  Otherwise, the cb might get
 369			 * freed before we're done setting it up
 370			 */
 371			refcount_inc(&cb->pending_bios);
 372			ret = btrfs_bio_wq_end_io(fs_info, bio,
 373						  BTRFS_WQ_ENDIO_DATA);
 374			BUG_ON(ret); /* -ENOMEM */
 375
 376			if (!skip_sum) {
 377				ret = btrfs_csum_one_bio(inode, bio, start, 1);
 378				BUG_ON(ret); /* -ENOMEM */
 
 379			}
 380
 381			ret = btrfs_map_bio(fs_info, bio, 0, 1);
 382			if (ret) {
 383				bio->bi_status = ret;
 384				bio_endio(bio);
 385			}
 386
 387			bio = btrfs_bio_alloc(first_byte);
 388			bio_set_dev(bio, bdev);
 389			bio->bi_opf = REQ_OP_WRITE | write_flags;
 390			bio->bi_private = cb;
 391			bio->bi_end_io = end_compressed_bio_write;
 392			bio_add_page(bio, page, PAGE_SIZE, 0);
 393		}
 394		if (bytes_left < PAGE_SIZE) {
 395			btrfs_info(fs_info,
 396					"bytes left %lu compress len %lu nr %lu",
 397			       bytes_left, cb->compressed_len, cb->nr_pages);
 398		}
 399		bytes_left -= PAGE_SIZE;
 400		first_byte += PAGE_SIZE;
 401		cond_resched();
 402	}
 
 403
 404	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 405	BUG_ON(ret); /* -ENOMEM */
 406
 407	if (!skip_sum) {
 408		ret = btrfs_csum_one_bio(inode, bio, start, 1);
 409		BUG_ON(ret); /* -ENOMEM */
 410	}
 411
 412	ret = btrfs_map_bio(fs_info, bio, 0, 1);
 413	if (ret) {
 414		bio->bi_status = ret;
 415		bio_endio(bio);
 416	}
 417
 
 418	return 0;
 419}
 420
 421static u64 bio_end_offset(struct bio *bio)
 422{
 423	struct bio_vec *last = bio_last_bvec_all(bio);
 424
 425	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
 426}
 427
 428static noinline int add_ra_bio_pages(struct inode *inode,
 429				     u64 compressed_end,
 430				     struct compressed_bio *cb)
 431{
 432	unsigned long end_index;
 433	unsigned long pg_index;
 434	u64 last_offset;
 435	u64 isize = i_size_read(inode);
 436	int ret;
 437	struct page *page;
 438	unsigned long nr_pages = 0;
 439	struct extent_map *em;
 440	struct address_space *mapping = inode->i_mapping;
 441	struct extent_map_tree *em_tree;
 442	struct extent_io_tree *tree;
 443	u64 end;
 444	int misses = 0;
 445
 446	last_offset = bio_end_offset(cb->orig_bio);
 
 447	em_tree = &BTRFS_I(inode)->extent_tree;
 448	tree = &BTRFS_I(inode)->io_tree;
 449
 450	if (isize == 0)
 451		return 0;
 452
 453	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 454
 455	while (last_offset < compressed_end) {
 456		pg_index = last_offset >> PAGE_SHIFT;
 457
 458		if (pg_index > end_index)
 459			break;
 460
 461		page = xa_load(&mapping->i_pages, pg_index);
 462		if (page && !xa_is_value(page)) {
 
 
 463			misses++;
 464			if (misses > 4)
 465				break;
 466			goto next;
 467		}
 468
 469		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
 470								 ~__GFP_FS));
 471		if (!page)
 472			break;
 473
 474		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
 475			put_page(page);
 
 476			goto next;
 477		}
 478
 479		end = last_offset + PAGE_SIZE - 1;
 480		/*
 481		 * at this point, we have a locked page in the page cache
 482		 * for these bytes in the file.  But, we have to make
 483		 * sure they map to this compressed extent on disk.
 484		 */
 485		set_page_extent_mapped(page);
 486		lock_extent(tree, last_offset, end);
 487		read_lock(&em_tree->lock);
 488		em = lookup_extent_mapping(em_tree, last_offset,
 489					   PAGE_SIZE);
 490		read_unlock(&em_tree->lock);
 491
 492		if (!em || last_offset < em->start ||
 493		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
 494		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 495			free_extent_map(em);
 496			unlock_extent(tree, last_offset, end);
 497			unlock_page(page);
 498			put_page(page);
 499			break;
 500		}
 501		free_extent_map(em);
 502
 503		if (page->index == end_index) {
 504			char *userpage;
 505			size_t zero_offset = offset_in_page(isize);
 506
 507			if (zero_offset) {
 508				int zeros;
 509				zeros = PAGE_SIZE - zero_offset;
 510				userpage = kmap_atomic(page);
 511				memset(userpage + zero_offset, 0, zeros);
 512				flush_dcache_page(page);
 513				kunmap_atomic(userpage);
 514			}
 515		}
 516
 517		ret = bio_add_page(cb->orig_bio, page,
 518				   PAGE_SIZE, 0);
 519
 520		if (ret == PAGE_SIZE) {
 521			nr_pages++;
 522			put_page(page);
 523		} else {
 524			unlock_extent(tree, last_offset, end);
 525			unlock_page(page);
 526			put_page(page);
 527			break;
 528		}
 529next:
 530		last_offset += PAGE_SIZE;
 531	}
 532	return 0;
 533}
 534
 535/*
 536 * for a compressed read, the bio we get passed has all the inode pages
 537 * in it.  We don't actually do IO on those pages but allocate new ones
 538 * to hold the compressed pages on disk.
 539 *
 540 * bio->bi_iter.bi_sector points to the compressed extent on disk
 541 * bio->bi_io_vec points to all of the inode pages
 
 542 *
 543 * After the compressed pages are read, we copy the bytes into the
 544 * bio we were passed and then call the bio end_io calls
 545 */
 546blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 547				 int mirror_num, unsigned long bio_flags)
 548{
 549	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 550	struct extent_map_tree *em_tree;
 551	struct compressed_bio *cb;
 
 
 552	unsigned long compressed_len;
 553	unsigned long nr_pages;
 554	unsigned long pg_index;
 555	struct page *page;
 556	struct block_device *bdev;
 557	struct bio *comp_bio;
 558	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
 559	u64 em_len;
 560	u64 em_start;
 561	struct extent_map *em;
 562	blk_status_t ret = BLK_STS_RESOURCE;
 563	int faili = 0;
 564	const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 565	u8 *sums;
 566
 
 567	em_tree = &BTRFS_I(inode)->extent_tree;
 568
 569	/* we need the actual starting offset of this extent in the file */
 570	read_lock(&em_tree->lock);
 571	em = lookup_extent_mapping(em_tree,
 572				   page_offset(bio_first_page_all(bio)),
 573				   PAGE_SIZE);
 574	read_unlock(&em_tree->lock);
 575	if (!em)
 576		return BLK_STS_IOERR;
 577
 578	compressed_len = em->block_len;
 579	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 580	if (!cb)
 581		goto out;
 582
 583	refcount_set(&cb->pending_bios, 0);
 584	cb->errors = 0;
 585	cb->inode = inode;
 586	cb->mirror_num = mirror_num;
 587	sums = cb->sums;
 588
 589	cb->start = em->orig_start;
 590	em_len = em->len;
 591	em_start = em->start;
 592
 593	free_extent_map(em);
 594	em = NULL;
 595
 596	cb->len = bio->bi_iter.bi_size;
 597	cb->compressed_len = compressed_len;
 598	cb->compress_type = extent_compress_type(bio_flags);
 599	cb->orig_bio = bio;
 600
 601	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 602	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
 
 603				       GFP_NOFS);
 604	if (!cb->compressed_pages)
 605		goto fail1;
 606
 607	bdev = fs_info->fs_devices->latest_bdev;
 608
 609	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 610		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 611							      __GFP_HIGHMEM);
 612		if (!cb->compressed_pages[pg_index]) {
 613			faili = pg_index - 1;
 614			ret = BLK_STS_RESOURCE;
 615			goto fail2;
 616		}
 617	}
 618	faili = nr_pages - 1;
 619	cb->nr_pages = nr_pages;
 620
 621	add_ra_bio_pages(inode, em_start + em_len, cb);
 622
 623	/* include any pages we added in add_ra-bio_pages */
 624	cb->len = bio->bi_iter.bi_size;
 
 625
 626	comp_bio = btrfs_bio_alloc(cur_disk_byte);
 627	bio_set_dev(comp_bio, bdev);
 628	comp_bio->bi_opf = REQ_OP_READ;
 629	comp_bio->bi_private = cb;
 630	comp_bio->bi_end_io = end_compressed_bio_read;
 631	refcount_set(&cb->pending_bios, 1);
 632
 633	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 634		int submit = 0;
 635
 636		page = cb->compressed_pages[pg_index];
 637		page->mapping = inode->i_mapping;
 638		page->index = em_start >> PAGE_SHIFT;
 639
 640		if (comp_bio->bi_iter.bi_size)
 641			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
 642							  comp_bio, 0);
 
 
 
 643
 644		page->mapping = NULL;
 645		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
 646		    PAGE_SIZE) {
 647			unsigned int nr_sectors;
 648
 649			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
 650						  BTRFS_WQ_ENDIO_DATA);
 651			BUG_ON(ret); /* -ENOMEM */
 652
 653			/*
 654			 * inc the count before we submit the bio so
 655			 * we know the end IO handler won't happen before
 656			 * we inc the count.  Otherwise, the cb might get
 657			 * freed before we're done setting it up
 658			 */
 659			refcount_inc(&cb->pending_bios);
 660
 661			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 662				ret = btrfs_lookup_bio_sums(inode, comp_bio,
 663							    sums);
 664				BUG_ON(ret); /* -ENOMEM */
 665			}
 
 
 
 
 
 
 666
 667			nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
 668						  fs_info->sectorsize);
 669			sums += csum_size * nr_sectors;
 670
 671			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 672			if (ret) {
 673				comp_bio->bi_status = ret;
 674				bio_endio(comp_bio);
 675			}
 676
 677			comp_bio = btrfs_bio_alloc(cur_disk_byte);
 678			bio_set_dev(comp_bio, bdev);
 679			comp_bio->bi_opf = REQ_OP_READ;
 680			comp_bio->bi_private = cb;
 681			comp_bio->bi_end_io = end_compressed_bio_read;
 682
 683			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
 684		}
 685		cur_disk_byte += PAGE_SIZE;
 686	}
 
 687
 688	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
 689	BUG_ON(ret); /* -ENOMEM */
 690
 691	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
 692		ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 693		BUG_ON(ret); /* -ENOMEM */
 694	}
 695
 696	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 697	if (ret) {
 698		comp_bio->bi_status = ret;
 699		bio_endio(comp_bio);
 700	}
 701
 
 702	return 0;
 703
 704fail2:
 705	while (faili >= 0) {
 706		__free_page(cb->compressed_pages[faili]);
 707		faili--;
 708	}
 709
 710	kfree(cb->compressed_pages);
 711fail1:
 712	kfree(cb);
 713out:
 714	free_extent_map(em);
 715	return ret;
 716}
 717
 718/*
 719 * Heuristic uses systematic sampling to collect data from the input data
 720 * range, the logic can be tuned by the following constants:
 721 *
 722 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 723 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 724 */
 725#define SAMPLING_READ_SIZE	(16)
 726#define SAMPLING_INTERVAL	(256)
 727
 728/*
 729 * For statistical analysis of the input data we consider bytes that form a
 730 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 731 * many times the object appeared in the sample.
 732 */
 733#define BUCKET_SIZE		(256)
 734
 735/*
 736 * The size of the sample is based on a statistical sampling rule of thumb.
 737 * The common way is to perform sampling tests as long as the number of
 738 * elements in each cell is at least 5.
 739 *
 740 * Instead of 5, we choose 32 to obtain more accurate results.
 741 * If the data contain the maximum number of symbols, which is 256, we obtain a
 742 * sample size bound by 8192.
 743 *
 744 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 745 * from up to 512 locations.
 746 */
 747#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
 748				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 749
 750struct bucket_item {
 751	u32 count;
 752};
 753
 754struct heuristic_ws {
 755	/* Partial copy of input data */
 756	u8 *sample;
 757	u32 sample_size;
 758	/* Buckets store counters for each byte value */
 759	struct bucket_item *bucket;
 760	/* Sorting buffer */
 761	struct bucket_item *bucket_b;
 762	struct list_head list;
 763};
 764
 765static struct workspace_manager heuristic_wsm;
 766
 767static void heuristic_init_workspace_manager(void)
 768{
 769	btrfs_init_workspace_manager(&heuristic_wsm, &btrfs_heuristic_compress);
 770}
 771
 772static void heuristic_cleanup_workspace_manager(void)
 773{
 774	btrfs_cleanup_workspace_manager(&heuristic_wsm);
 775}
 776
 777static struct list_head *heuristic_get_workspace(unsigned int level)
 778{
 779	return btrfs_get_workspace(&heuristic_wsm, level);
 780}
 781
 782static void heuristic_put_workspace(struct list_head *ws)
 783{
 784	btrfs_put_workspace(&heuristic_wsm, ws);
 785}
 786
 787static void free_heuristic_ws(struct list_head *ws)
 788{
 789	struct heuristic_ws *workspace;
 790
 791	workspace = list_entry(ws, struct heuristic_ws, list);
 792
 793	kvfree(workspace->sample);
 794	kfree(workspace->bucket);
 795	kfree(workspace->bucket_b);
 796	kfree(workspace);
 797}
 798
 799static struct list_head *alloc_heuristic_ws(unsigned int level)
 800{
 801	struct heuristic_ws *ws;
 802
 803	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 804	if (!ws)
 805		return ERR_PTR(-ENOMEM);
 806
 807	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 808	if (!ws->sample)
 809		goto fail;
 810
 811	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 812	if (!ws->bucket)
 813		goto fail;
 814
 815	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 816	if (!ws->bucket_b)
 817		goto fail;
 818
 819	INIT_LIST_HEAD(&ws->list);
 820	return &ws->list;
 821fail:
 822	free_heuristic_ws(&ws->list);
 823	return ERR_PTR(-ENOMEM);
 824}
 825
 826const struct btrfs_compress_op btrfs_heuristic_compress = {
 827	.init_workspace_manager = heuristic_init_workspace_manager,
 828	.cleanup_workspace_manager = heuristic_cleanup_workspace_manager,
 829	.get_workspace = heuristic_get_workspace,
 830	.put_workspace = heuristic_put_workspace,
 831	.alloc_workspace = alloc_heuristic_ws,
 832	.free_workspace = free_heuristic_ws,
 833};
 834
 835static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 836	/* The heuristic is represented as compression type 0 */
 837	&btrfs_heuristic_compress,
 838	&btrfs_zlib_compress,
 839	&btrfs_lzo_compress,
 840	&btrfs_zstd_compress,
 841};
 842
 843void btrfs_init_workspace_manager(struct workspace_manager *wsm,
 844				  const struct btrfs_compress_op *ops)
 845{
 846	struct list_head *workspace;
 847
 848	wsm->ops = ops;
 849
 850	INIT_LIST_HEAD(&wsm->idle_ws);
 851	spin_lock_init(&wsm->ws_lock);
 852	atomic_set(&wsm->total_ws, 0);
 853	init_waitqueue_head(&wsm->ws_wait);
 854
 855	/*
 856	 * Preallocate one workspace for each compression type so we can
 857	 * guarantee forward progress in the worst case
 858	 */
 859	workspace = wsm->ops->alloc_workspace(0);
 860	if (IS_ERR(workspace)) {
 861		pr_warn(
 862	"BTRFS: cannot preallocate compression workspace, will try later\n");
 863	} else {
 864		atomic_set(&wsm->total_ws, 1);
 865		wsm->free_ws = 1;
 866		list_add(workspace, &wsm->idle_ws);
 867	}
 868}
 869
 870void btrfs_cleanup_workspace_manager(struct workspace_manager *wsman)
 871{
 872	struct list_head *ws;
 873
 874	while (!list_empty(&wsman->idle_ws)) {
 875		ws = wsman->idle_ws.next;
 876		list_del(ws);
 877		wsman->ops->free_workspace(ws);
 878		atomic_dec(&wsman->total_ws);
 879	}
 
 880}
 881
 882/*
 883 * This finds an available workspace or allocates a new one.
 884 * If it's not possible to allocate a new one, waits until there's one.
 885 * Preallocation makes a forward progress guarantees and we do not return
 886 * errors.
 887 */
 888struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
 889				      unsigned int level)
 890{
 891	struct list_head *workspace;
 892	int cpus = num_online_cpus();
 893	unsigned nofs_flag;
 894	struct list_head *idle_ws;
 895	spinlock_t *ws_lock;
 896	atomic_t *total_ws;
 897	wait_queue_head_t *ws_wait;
 898	int *free_ws;
 899
 900	idle_ws	 = &wsm->idle_ws;
 901	ws_lock	 = &wsm->ws_lock;
 902	total_ws = &wsm->total_ws;
 903	ws_wait	 = &wsm->ws_wait;
 904	free_ws	 = &wsm->free_ws;
 905
 
 
 
 
 
 906again:
 907	spin_lock(ws_lock);
 908	if (!list_empty(idle_ws)) {
 909		workspace = idle_ws->next;
 910		list_del(workspace);
 911		(*free_ws)--;
 912		spin_unlock(ws_lock);
 913		return workspace;
 914
 915	}
 916	if (atomic_read(total_ws) > cpus) {
 917		DEFINE_WAIT(wait);
 918
 919		spin_unlock(ws_lock);
 920		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
 921		if (atomic_read(total_ws) > cpus && !*free_ws)
 922			schedule();
 923		finish_wait(ws_wait, &wait);
 924		goto again;
 925	}
 926	atomic_inc(total_ws);
 927	spin_unlock(ws_lock);
 928
 929	/*
 930	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
 931	 * to turn it off here because we might get called from the restricted
 932	 * context of btrfs_compress_bio/btrfs_compress_pages
 933	 */
 934	nofs_flag = memalloc_nofs_save();
 935	workspace = wsm->ops->alloc_workspace(level);
 936	memalloc_nofs_restore(nofs_flag);
 937
 
 938	if (IS_ERR(workspace)) {
 939		atomic_dec(total_ws);
 940		wake_up(ws_wait);
 941
 942		/*
 943		 * Do not return the error but go back to waiting. There's a
 944		 * workspace preallocated for each type and the compression
 945		 * time is bounded so we get to a workspace eventually. This
 946		 * makes our caller's life easier.
 947		 *
 948		 * To prevent silent and low-probability deadlocks (when the
 949		 * initial preallocation fails), check if there are any
 950		 * workspaces at all.
 951		 */
 952		if (atomic_read(total_ws) == 0) {
 953			static DEFINE_RATELIMIT_STATE(_rs,
 954					/* once per minute */ 60 * HZ,
 955					/* no burst */ 1);
 956
 957			if (__ratelimit(&_rs)) {
 958				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
 959			}
 960		}
 961		goto again;
 962	}
 963	return workspace;
 964}
 965
 966static struct list_head *get_workspace(int type, int level)
 967{
 968	return btrfs_compress_op[type]->get_workspace(level);
 969}
 970
 971/*
 972 * put a workspace struct back on the list or free it if we have enough
 973 * idle ones sitting around
 974 */
 975void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws)
 976{
 977	struct list_head *idle_ws;
 978	spinlock_t *ws_lock;
 979	atomic_t *total_ws;
 980	wait_queue_head_t *ws_wait;
 981	int *free_ws;
 982
 983	idle_ws	 = &wsm->idle_ws;
 984	ws_lock	 = &wsm->ws_lock;
 985	total_ws = &wsm->total_ws;
 986	ws_wait	 = &wsm->ws_wait;
 987	free_ws	 = &wsm->free_ws;
 988
 989	spin_lock(ws_lock);
 990	if (*free_ws <= num_online_cpus()) {
 991		list_add(ws, idle_ws);
 992		(*free_ws)++;
 993		spin_unlock(ws_lock);
 994		goto wake;
 995	}
 996	spin_unlock(ws_lock);
 997
 998	wsm->ops->free_workspace(ws);
 999	atomic_dec(total_ws);
1000wake:
1001	cond_wake_up(ws_wait);
 
1002}
1003
1004static void put_workspace(int type, struct list_head *ws)
 
 
 
1005{
1006	return btrfs_compress_op[type]->put_workspace(ws);
 
 
 
 
 
 
 
 
 
 
1007}
1008
1009/*
1010 * Given an address space and start and length, compress the bytes into @pages
1011 * that are allocated on demand.
1012 *
1013 * @type_level is encoded algorithm and level, where level 0 means whatever
1014 * default the algorithm chooses and is opaque here;
1015 * - compression algo are 0-3
1016 * - the level are bits 4-7
1017 *
1018 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1019 * and returns number of actually allocated pages
1020 *
1021 * @total_in is used to return the number of bytes actually read.  It
1022 * may be smaller than the input length if we had to exit early because we
1023 * ran out of room in the pages array or because we cross the
1024 * max_out threshold.
1025 *
1026 * @total_out is an in/out parameter, must be set to the input length and will
1027 * be also used to return the total number of compressed bytes
1028 *
1029 * @max_out tells us the max number of bytes that we're allowed to
1030 * stuff into pages
1031 */
1032int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1033			 u64 start, struct page **pages,
 
 
1034			 unsigned long *out_pages,
1035			 unsigned long *total_in,
1036			 unsigned long *total_out)
 
1037{
1038	int type = btrfs_compress_type(type_level);
1039	int level = btrfs_compress_level(type_level);
1040	struct list_head *workspace;
1041	int ret;
1042
1043	level = btrfs_compress_set_level(type, level);
1044	workspace = get_workspace(type, level);
1045	ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
1046						      start, pages,
1047						      out_pages,
1048						      total_in, total_out);
1049	put_workspace(type, workspace);
 
 
 
1050	return ret;
1051}
1052
1053/*
1054 * pages_in is an array of pages with compressed data.
1055 *
1056 * disk_start is the starting logical offset of this array in the file
1057 *
1058 * orig_bio contains the pages from the file that we want to decompress into
 
 
1059 *
1060 * srclen is the number of bytes in pages_in
1061 *
1062 * The basic idea is that we have a bio that was created by readpages.
1063 * The pages in the bio are for the uncompressed data, and they may not
1064 * be contiguous.  They all correspond to the range of bytes covered by
1065 * the compressed extent.
1066 */
1067static int btrfs_decompress_bio(struct compressed_bio *cb)
 
1068{
1069	struct list_head *workspace;
1070	int ret;
1071	int type = cb->compress_type;
1072
1073	workspace = get_workspace(type, 0);
1074	ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
1075	put_workspace(type, workspace);
1076
 
 
 
 
 
 
 
 
1077	return ret;
1078}
1079
1080/*
1081 * a less complex decompression routine.  Our compressed data fits in a
1082 * single page, and we want to read a single page out of it.
1083 * start_byte tells us the offset into the compressed data we're interested in
1084 */
1085int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1086		     unsigned long start_byte, size_t srclen, size_t destlen)
1087{
1088	struct list_head *workspace;
1089	int ret;
1090
1091	workspace = get_workspace(type, 0);
1092	ret = btrfs_compress_op[type]->decompress(workspace, data_in,
 
 
 
1093						  dest_page, start_byte,
1094						  srclen, destlen);
1095	put_workspace(type, workspace);
1096
 
1097	return ret;
1098}
1099
1100void __init btrfs_init_compress(void)
1101{
1102	int i;
1103
1104	for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
1105		btrfs_compress_op[i]->init_workspace_manager();
1106}
1107
1108void __cold btrfs_exit_compress(void)
1109{
1110	int i;
1111
1112	for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++)
1113		btrfs_compress_op[i]->cleanup_workspace_manager();
1114}
1115
1116/*
1117 * Copy uncompressed data from working buffer to pages.
1118 *
1119 * buf_start is the byte offset we're of the start of our workspace buffer.
1120 *
1121 * total_out is the last byte of the buffer
1122 */
1123int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1124			      unsigned long total_out, u64 disk_start,
1125			      struct bio *bio)
 
 
1126{
1127	unsigned long buf_offset;
1128	unsigned long current_buf_start;
1129	unsigned long start_byte;
1130	unsigned long prev_start_byte;
1131	unsigned long working_bytes = total_out - buf_start;
1132	unsigned long bytes;
1133	char *kaddr;
1134	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1135
1136	/*
1137	 * start byte is the first byte of the page we're currently
1138	 * copying into relative to the start of the compressed data.
1139	 */
1140	start_byte = page_offset(bvec.bv_page) - disk_start;
1141
1142	/* we haven't yet hit data corresponding to this page */
1143	if (total_out <= start_byte)
1144		return 1;
1145
1146	/*
1147	 * the start of the data we care about is offset into
1148	 * the middle of our working buffer
1149	 */
1150	if (total_out > start_byte && buf_start < start_byte) {
1151		buf_offset = start_byte - buf_start;
1152		working_bytes -= buf_offset;
1153	} else {
1154		buf_offset = 0;
1155	}
1156	current_buf_start = buf_start;
1157
1158	/* copy bytes from the working buffer into the pages */
1159	while (working_bytes > 0) {
1160		bytes = min_t(unsigned long, bvec.bv_len,
1161				PAGE_SIZE - buf_offset);
1162		bytes = min(bytes, working_bytes);
 
 
 
 
1163
1164		kaddr = kmap_atomic(bvec.bv_page);
1165		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1166		kunmap_atomic(kaddr);
1167		flush_dcache_page(bvec.bv_page);
1168
1169		buf_offset += bytes;
1170		working_bytes -= bytes;
1171		current_buf_start += bytes;
1172
1173		/* check if we need to pick another page */
1174		bio_advance(bio, bytes);
1175		if (!bio->bi_iter.bi_size)
1176			return 0;
1177		bvec = bio_iter_iovec(bio, bio->bi_iter);
1178		prev_start_byte = start_byte;
1179		start_byte = page_offset(bvec.bv_page) - disk_start;
 
 
1180
1181		/*
1182		 * We need to make sure we're only adjusting
1183		 * our offset into compression working buffer when
1184		 * we're switching pages.  Otherwise we can incorrectly
1185		 * keep copying when we were actually done.
1186		 */
1187		if (start_byte != prev_start_byte) {
1188			/*
1189			 * make sure our new page is covered by this
1190			 * working buffer
1191			 */
1192			if (total_out <= start_byte)
1193				return 1;
1194
1195			/*
1196			 * the next page in the biovec might not be adjacent
1197			 * to the last page, but it might still be found
1198			 * inside this working buffer. bump our offset pointer
1199			 */
1200			if (total_out > start_byte &&
1201			    current_buf_start < start_byte) {
1202				buf_offset = start_byte - buf_start;
1203				working_bytes = total_out - start_byte;
1204				current_buf_start = buf_start + buf_offset;
1205			}
1206		}
1207	}
1208
1209	return 1;
1210}
1211
1212/*
1213 * Shannon Entropy calculation
1214 *
1215 * Pure byte distribution analysis fails to determine compressibility of data.
1216 * Try calculating entropy to estimate the average minimum number of bits
1217 * needed to encode the sampled data.
1218 *
1219 * For convenience, return the percentage of needed bits, instead of amount of
1220 * bits directly.
1221 *
1222 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1223 *			    and can be compressible with high probability
1224 *
1225 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1226 *
1227 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1228 */
1229#define ENTROPY_LVL_ACEPTABLE		(65)
1230#define ENTROPY_LVL_HIGH		(80)
1231
1232/*
1233 * For increasead precision in shannon_entropy calculation,
1234 * let's do pow(n, M) to save more digits after comma:
1235 *
1236 * - maximum int bit length is 64
1237 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1238 * - 13 * 4 = 52 < 64		-> M = 4
1239 *
1240 * So use pow(n, 4).
1241 */
1242static inline u32 ilog2_w(u64 n)
1243{
1244	return ilog2(n * n * n * n);
1245}
1246
1247static u32 shannon_entropy(struct heuristic_ws *ws)
1248{
1249	const u32 entropy_max = 8 * ilog2_w(2);
1250	u32 entropy_sum = 0;
1251	u32 p, p_base, sz_base;
1252	u32 i;
1253
1254	sz_base = ilog2_w(ws->sample_size);
1255	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1256		p = ws->bucket[i].count;
1257		p_base = ilog2_w(p);
1258		entropy_sum += p * (sz_base - p_base);
1259	}
1260
1261	entropy_sum /= ws->sample_size;
1262	return entropy_sum * 100 / entropy_max;
1263}
1264
1265#define RADIX_BASE		4U
1266#define COUNTERS_SIZE		(1U << RADIX_BASE)
1267
1268static u8 get4bits(u64 num, int shift) {
1269	u8 low4bits;
1270
1271	num >>= shift;
1272	/* Reverse order */
1273	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1274	return low4bits;
1275}
1276
1277/*
1278 * Use 4 bits as radix base
1279 * Use 16 u32 counters for calculating new position in buf array
1280 *
1281 * @array     - array that will be sorted
1282 * @array_buf - buffer array to store sorting results
1283 *              must be equal in size to @array
1284 * @num       - array size
1285 */
1286static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1287		       int num)
1288{
1289	u64 max_num;
1290	u64 buf_num;
1291	u32 counters[COUNTERS_SIZE];
1292	u32 new_addr;
1293	u32 addr;
1294	int bitlen;
1295	int shift;
1296	int i;
1297
1298	/*
1299	 * Try avoid useless loop iterations for small numbers stored in big
1300	 * counters.  Example: 48 33 4 ... in 64bit array
1301	 */
1302	max_num = array[0].count;
1303	for (i = 1; i < num; i++) {
1304		buf_num = array[i].count;
1305		if (buf_num > max_num)
1306			max_num = buf_num;
1307	}
1308
1309	buf_num = ilog2(max_num);
1310	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1311
1312	shift = 0;
1313	while (shift < bitlen) {
1314		memset(counters, 0, sizeof(counters));
1315
1316		for (i = 0; i < num; i++) {
1317			buf_num = array[i].count;
1318			addr = get4bits(buf_num, shift);
1319			counters[addr]++;
1320		}
1321
1322		for (i = 1; i < COUNTERS_SIZE; i++)
1323			counters[i] += counters[i - 1];
1324
1325		for (i = num - 1; i >= 0; i--) {
1326			buf_num = array[i].count;
1327			addr = get4bits(buf_num, shift);
1328			counters[addr]--;
1329			new_addr = counters[addr];
1330			array_buf[new_addr] = array[i];
1331		}
1332
1333		shift += RADIX_BASE;
1334
1335		/*
1336		 * Normal radix expects to move data from a temporary array, to
1337		 * the main one.  But that requires some CPU time. Avoid that
1338		 * by doing another sort iteration to original array instead of
1339		 * memcpy()
1340		 */
1341		memset(counters, 0, sizeof(counters));
1342
1343		for (i = 0; i < num; i ++) {
1344			buf_num = array_buf[i].count;
1345			addr = get4bits(buf_num, shift);
1346			counters[addr]++;
1347		}
1348
1349		for (i = 1; i < COUNTERS_SIZE; i++)
1350			counters[i] += counters[i - 1];
1351
1352		for (i = num - 1; i >= 0; i--) {
1353			buf_num = array_buf[i].count;
1354			addr = get4bits(buf_num, shift);
1355			counters[addr]--;
1356			new_addr = counters[addr];
1357			array[new_addr] = array_buf[i];
1358		}
1359
1360		shift += RADIX_BASE;
1361	}
1362}
1363
1364/*
1365 * Size of the core byte set - how many bytes cover 90% of the sample
1366 *
1367 * There are several types of structured binary data that use nearly all byte
1368 * values. The distribution can be uniform and counts in all buckets will be
1369 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1370 *
1371 * Other possibility is normal (Gaussian) distribution, where the data could
1372 * be potentially compressible, but we have to take a few more steps to decide
1373 * how much.
1374 *
1375 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1376 *                       compression algo can easy fix that
1377 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1378 *                       probability is not compressible
1379 */
1380#define BYTE_CORE_SET_LOW		(64)
1381#define BYTE_CORE_SET_HIGH		(200)
1382
1383static int byte_core_set_size(struct heuristic_ws *ws)
1384{
1385	u32 i;
1386	u32 coreset_sum = 0;
1387	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1388	struct bucket_item *bucket = ws->bucket;
1389
1390	/* Sort in reverse order */
1391	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1392
1393	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1394		coreset_sum += bucket[i].count;
1395
1396	if (coreset_sum > core_set_threshold)
1397		return i;
1398
1399	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1400		coreset_sum += bucket[i].count;
1401		if (coreset_sum > core_set_threshold)
1402			break;
1403	}
1404
1405	return i;
1406}
1407
1408/*
1409 * Count byte values in buckets.
1410 * This heuristic can detect textual data (configs, xml, json, html, etc).
1411 * Because in most text-like data byte set is restricted to limited number of
1412 * possible characters, and that restriction in most cases makes data easy to
1413 * compress.
1414 *
1415 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1416 *	less - compressible
1417 *	more - need additional analysis
1418 */
1419#define BYTE_SET_THRESHOLD		(64)
1420
1421static u32 byte_set_size(const struct heuristic_ws *ws)
1422{
1423	u32 i;
1424	u32 byte_set_size = 0;
1425
1426	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1427		if (ws->bucket[i].count > 0)
1428			byte_set_size++;
1429	}
1430
1431	/*
1432	 * Continue collecting count of byte values in buckets.  If the byte
1433	 * set size is bigger then the threshold, it's pointless to continue,
1434	 * the detection technique would fail for this type of data.
1435	 */
1436	for (; i < BUCKET_SIZE; i++) {
1437		if (ws->bucket[i].count > 0) {
1438			byte_set_size++;
1439			if (byte_set_size > BYTE_SET_THRESHOLD)
1440				return byte_set_size;
1441		}
1442	}
1443
1444	return byte_set_size;
1445}
1446
1447static bool sample_repeated_patterns(struct heuristic_ws *ws)
1448{
1449	const u32 half_of_sample = ws->sample_size / 2;
1450	const u8 *data = ws->sample;
1451
1452	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1453}
1454
1455static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1456				     struct heuristic_ws *ws)
1457{
1458	struct page *page;
1459	u64 index, index_end;
1460	u32 i, curr_sample_pos;
1461	u8 *in_data;
1462
1463	/*
1464	 * Compression handles the input data by chunks of 128KiB
1465	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1466	 *
1467	 * We do the same for the heuristic and loop over the whole range.
1468	 *
1469	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1470	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1471	 */
1472	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1473		end = start + BTRFS_MAX_UNCOMPRESSED;
1474
1475	index = start >> PAGE_SHIFT;
1476	index_end = end >> PAGE_SHIFT;
1477
1478	/* Don't miss unaligned end */
1479	if (!IS_ALIGNED(end, PAGE_SIZE))
1480		index_end++;
1481
1482	curr_sample_pos = 0;
1483	while (index < index_end) {
1484		page = find_get_page(inode->i_mapping, index);
1485		in_data = kmap(page);
1486		/* Handle case where the start is not aligned to PAGE_SIZE */
1487		i = start % PAGE_SIZE;
1488		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1489			/* Don't sample any garbage from the last page */
1490			if (start > end - SAMPLING_READ_SIZE)
1491				break;
1492			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1493					SAMPLING_READ_SIZE);
1494			i += SAMPLING_INTERVAL;
1495			start += SAMPLING_INTERVAL;
1496			curr_sample_pos += SAMPLING_READ_SIZE;
1497		}
1498		kunmap(page);
1499		put_page(page);
1500
1501		index++;
1502	}
1503
1504	ws->sample_size = curr_sample_pos;
1505}
1506
1507/*
1508 * Compression heuristic.
1509 *
1510 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1511 * quickly (compared to direct compression) detect data characteristics
1512 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1513 * data.
1514 *
1515 * The following types of analysis can be performed:
1516 * - detect mostly zero data
1517 * - detect data with low "byte set" size (text, etc)
1518 * - detect data with low/high "core byte" set
1519 *
1520 * Return non-zero if the compression should be done, 0 otherwise.
1521 */
1522int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1523{
1524	struct list_head *ws_list = get_workspace(0, 0);
1525	struct heuristic_ws *ws;
1526	u32 i;
1527	u8 byte;
1528	int ret = 0;
1529
1530	ws = list_entry(ws_list, struct heuristic_ws, list);
1531
1532	heuristic_collect_sample(inode, start, end, ws);
1533
1534	if (sample_repeated_patterns(ws)) {
1535		ret = 1;
1536		goto out;
1537	}
1538
1539	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1540
1541	for (i = 0; i < ws->sample_size; i++) {
1542		byte = ws->sample[i];
1543		ws->bucket[byte].count++;
1544	}
1545
1546	i = byte_set_size(ws);
1547	if (i < BYTE_SET_THRESHOLD) {
1548		ret = 2;
1549		goto out;
1550	}
1551
1552	i = byte_core_set_size(ws);
1553	if (i <= BYTE_CORE_SET_LOW) {
1554		ret = 3;
1555		goto out;
1556	}
1557
1558	if (i >= BYTE_CORE_SET_HIGH) {
1559		ret = 0;
1560		goto out;
1561	}
1562
1563	i = shannon_entropy(ws);
1564	if (i <= ENTROPY_LVL_ACEPTABLE) {
1565		ret = 4;
1566		goto out;
1567	}
1568
1569	/*
1570	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1571	 * needed to give green light to compression.
1572	 *
1573	 * For now just assume that compression at that level is not worth the
1574	 * resources because:
1575	 *
1576	 * 1. it is possible to defrag the data later
1577	 *
1578	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1579	 * values, every bucket has counter at level ~54. The heuristic would
1580	 * be confused. This can happen when data have some internal repeated
1581	 * patterns like "abbacbbc...". This can be detected by analyzing
1582	 * pairs of bytes, which is too costly.
1583	 */
1584	if (i < ENTROPY_LVL_HIGH) {
1585		ret = 5;
1586		goto out;
1587	} else {
1588		ret = 0;
1589		goto out;
1590	}
1591
1592out:
1593	put_workspace(0, ws_list);
1594	return ret;
1595}
1596
1597/*
1598 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1599 * level, unrecognized string will set the default level
1600 */
1601unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1602{
1603	unsigned int level = 0;
1604	int ret;
1605
1606	if (!type)
1607		return 0;
1608
1609	if (str[0] == ':') {
1610		ret = kstrtouint(str + 1, 10, &level);
1611		if (ret)
1612			level = 0;
1613	}
1614
1615	level = btrfs_compress_set_level(type, level);
1616
1617	return level;
1618}
1619
1620/*
1621 * Adjust @level according to the limits of the compression algorithm or
1622 * fallback to default
1623 */
1624unsigned int btrfs_compress_set_level(int type, unsigned level)
1625{
1626	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1627
1628	if (level == 0)
1629		level = ops->default_level;
1630	else
1631		level = min(level, ops->max_level);
1632
1633	return level;
1634}