Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/ratelimit.h>
   8#include <linux/sched/mm.h>
   9#include <crypto/hash.h>
  10#include "ctree.h"
  11#include "discard.h"
  12#include "volumes.h"
  13#include "disk-io.h"
  14#include "ordered-data.h"
  15#include "transaction.h"
  16#include "backref.h"
  17#include "extent_io.h"
  18#include "dev-replace.h"
 
 
  19#include "raid56.h"
  20#include "block-group.h"
  21#include "zoned.h"
  22#include "fs.h"
  23#include "accessors.h"
  24#include "file-item.h"
  25#include "scrub.h"
  26#include "raid-stripe-tree.h"
  27
  28/*
  29 * This is only the first step towards a full-features scrub. It reads all
  30 * extent and super block and verifies the checksums. In case a bad checksum
  31 * is found or the extent cannot be read, good data will be written back if
  32 * any can be found.
  33 *
  34 * Future enhancements:
  35 *  - In case an unrepairable extent is encountered, track which files are
  36 *    affected and report them
  37 *  - track and record media errors, throw out bad devices
  38 *  - add a mode to also read unallocated space
  39 */
  40
 
  41struct scrub_ctx;
  42
  43/*
  44 * The following value only influences the performance.
  45 *
  46 * This determines how many stripes would be submitted in one go,
  47 * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
  48 */
  49#define SCRUB_STRIPES_PER_GROUP		8
  50
  51/*
  52 * How many groups we have for each sctx.
  53 *
  54 * This would be 8M per device, the same value as the old scrub in-flight bios
  55 * size limit.
  56 */
  57#define SCRUB_GROUPS_PER_SCTX		16
  58
  59#define SCRUB_TOTAL_STRIPES		(SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
  60
  61/*
  62 * The following value times PAGE_SIZE needs to be large enough to match the
  63 * largest node/leaf/sector size that shall be supported.
 
  64 */
  65#define SCRUB_MAX_SECTORS_PER_BLOCK	(BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
  66
  67/* Represent one sector and its needed info to verify the content. */
  68struct scrub_sector_verification {
  69	bool is_metadata;
  70
  71	union {
  72		/*
  73		 * Csum pointer for data csum verification.  Should point to a
  74		 * sector csum inside scrub_stripe::csums.
  75		 *
  76		 * NULL if this data sector has no csum.
  77		 */
  78		u8 *csum;
  79
  80		/*
  81		 * Extra info for metadata verification.  All sectors inside a
  82		 * tree block share the same generation.
  83		 */
  84		u64 generation;
  85	};
  86};
  87
  88enum scrub_stripe_flags {
  89	/* Set when @mirror_num, @dev, @physical and @logical are set. */
  90	SCRUB_STRIPE_FLAG_INITIALIZED,
  91
  92	/* Set when the read-repair is finished. */
  93	SCRUB_STRIPE_FLAG_REPAIR_DONE,
 
 
 
 
 
 
 
 
 
 
 
  94
  95	/*
  96	 * Set for data stripes if it's triggered from P/Q stripe.
  97	 * During such scrub, we should not report errors in data stripes, nor
  98	 * update the accounting.
  99	 */
 100	SCRUB_STRIPE_FLAG_NO_REPORT,
 101};
 102
 103#define SCRUB_STRIPE_PAGES		(BTRFS_STRIPE_LEN / PAGE_SIZE)
 104
 105/*
 106 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
 107 */
 108struct scrub_stripe {
 109	struct scrub_ctx *sctx;
 110	struct btrfs_block_group *bg;
 111
 112	struct page *pages[SCRUB_STRIPE_PAGES];
 113	struct scrub_sector_verification *sectors;
 
 
 
 
 
 
 114
 115	struct btrfs_device *dev;
 116	u64 logical;
 117	u64 physical;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 118
 119	u16 mirror_num;
 
 
 120
 121	/* Should be BTRFS_STRIPE_LEN / sectorsize. */
 122	u16 nr_sectors;
 123
 124	/*
 125	 * How many data/meta extents are in this stripe.  Only for scrub status
 126	 * reporting purposes.
 127	 */
 128	u16 nr_data_extents;
 129	u16 nr_meta_extents;
 130
 131	atomic_t pending_io;
 132	wait_queue_head_t io_wait;
 133	wait_queue_head_t repair_wait;
 134
 135	/*
 136	 * Indicate the states of the stripe.  Bits are defined in
 137	 * scrub_stripe_flags enum.
 138	 */
 139	unsigned long state;
 140
 141	/* Indicate which sectors are covered by extent items. */
 142	unsigned long extent_sector_bitmap;
 143
 144	/*
 145	 * The errors hit during the initial read of the stripe.
 146	 *
 147	 * Would be utilized for error reporting and repair.
 148	 *
 149	 * The remaining init_nr_* records the number of errors hit, only used
 150	 * by error reporting.
 151	 */
 152	unsigned long init_error_bitmap;
 153	unsigned int init_nr_io_errors;
 154	unsigned int init_nr_csum_errors;
 155	unsigned int init_nr_meta_errors;
 156
 157	/*
 158	 * The following error bitmaps are all for the current status.
 159	 * Every time we submit a new read, these bitmaps may be updated.
 160	 *
 161	 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
 162	 *
 163	 * IO and csum errors can happen for both metadata and data.
 164	 */
 165	unsigned long error_bitmap;
 166	unsigned long io_error_bitmap;
 167	unsigned long csum_error_bitmap;
 168	unsigned long meta_error_bitmap;
 169
 170	/* For writeback (repair or replace) error reporting. */
 171	unsigned long write_error_bitmap;
 172
 173	/* Writeback can be concurrent, thus we need to protect the bitmap. */
 174	spinlock_t write_error_lock;
 175
 176	/*
 177	 * Checksum for the whole stripe if this stripe is inside a data block
 178	 * group.
 179	 */
 180	u8 *csums;
 181
 182	struct work_struct work;
 183};
 184
 185struct scrub_ctx {
 186	struct scrub_stripe	stripes[SCRUB_TOTAL_STRIPES];
 187	struct scrub_stripe	*raid56_data_stripes;
 188	struct btrfs_fs_info	*fs_info;
 189	struct btrfs_path	extent_path;
 190	struct btrfs_path	csum_path;
 191	int			first_free;
 192	int			cur_stripe;
 
 
 
 
 
 
 193	atomic_t		cancel_req;
 194	int			readonly;
 195
 196	/* State of IO submission throttling affecting the associated device */
 197	ktime_t			throttle_deadline;
 198	u64			throttle_sent;
 199
 200	int			is_dev_replace;
 201	u64			write_pointer;
 202
 
 203	struct mutex            wr_lock;
 
 204	struct btrfs_device     *wr_tgtdev;
 
 205
 206	/*
 207	 * statistics
 208	 */
 209	struct btrfs_scrub_progress stat;
 210	spinlock_t		stat_lock;
 211
 212	/*
 213	 * Use a ref counter to avoid use-after-free issues. Scrub workers
 214	 * decrement bios_in_flight and workers_pending and then do a wakeup
 215	 * on the list_wait wait queue. We must ensure the main scrub task
 216	 * doesn't free the scrub context before or while the workers are
 217	 * doing the wakeup() call.
 218	 */
 219	refcount_t              refs;
 220};
 221
 222struct scrub_warning {
 223	struct btrfs_path	*path;
 224	u64			extent_item_size;
 225	const char		*errstr;
 226	u64			physical;
 227	u64			logical;
 228	struct btrfs_device	*dev;
 229};
 230
 231static void release_scrub_stripe(struct scrub_stripe *stripe)
 232{
 233	if (!stripe)
 234		return;
 
 
 235
 236	for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
 237		if (stripe->pages[i])
 238			__free_page(stripe->pages[i]);
 239		stripe->pages[i] = NULL;
 240	}
 241	kfree(stripe->sectors);
 242	kfree(stripe->csums);
 243	stripe->sectors = NULL;
 244	stripe->csums = NULL;
 245	stripe->sctx = NULL;
 246	stripe->state = 0;
 247}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248
 249static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
 250			     struct scrub_stripe *stripe)
 251{
 252	int ret;
 253
 254	memset(stripe, 0, sizeof(*stripe));
 255
 256	stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
 257	stripe->state = 0;
 258
 259	init_waitqueue_head(&stripe->io_wait);
 260	init_waitqueue_head(&stripe->repair_wait);
 261	atomic_set(&stripe->pending_io, 0);
 262	spin_lock_init(&stripe->write_error_lock);
 263
 264	ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, false);
 265	if (ret < 0)
 266		goto error;
 267
 268	stripe->sectors = kcalloc(stripe->nr_sectors,
 269				  sizeof(struct scrub_sector_verification),
 270				  GFP_KERNEL);
 271	if (!stripe->sectors)
 272		goto error;
 273
 274	stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
 275				fs_info->csum_size, GFP_KERNEL);
 276	if (!stripe->csums)
 277		goto error;
 278	return 0;
 279error:
 280	release_scrub_stripe(stripe);
 281	return -ENOMEM;
 282}
 283
 284static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
 285{
 286	wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
 
 287}
 288
 289static void scrub_put_ctx(struct scrub_ctx *sctx);
 
 
 
 
 
 290
 291static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 292{
 293	while (atomic_read(&fs_info->scrub_pause_req)) {
 294		mutex_unlock(&fs_info->scrub_lock);
 295		wait_event(fs_info->scrub_pause_wait,
 296		   atomic_read(&fs_info->scrub_pause_req) == 0);
 297		mutex_lock(&fs_info->scrub_lock);
 298	}
 299}
 300
 301static void scrub_pause_on(struct btrfs_fs_info *fs_info)
 302{
 303	atomic_inc(&fs_info->scrubs_paused);
 304	wake_up(&fs_info->scrub_pause_wait);
 305}
 306
 307static void scrub_pause_off(struct btrfs_fs_info *fs_info)
 308{
 309	mutex_lock(&fs_info->scrub_lock);
 310	__scrub_blocked_if_needed(fs_info);
 311	atomic_dec(&fs_info->scrubs_paused);
 312	mutex_unlock(&fs_info->scrub_lock);
 313
 314	wake_up(&fs_info->scrub_pause_wait);
 315}
 316
 317static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 318{
 319	scrub_pause_on(fs_info);
 320	scrub_pause_off(fs_info);
 321}
 322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 324{
 325	int i;
 326
 327	if (!sctx)
 328		return;
 329
 330	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
 331		release_scrub_stripe(&sctx->stripes[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332
 333	kvfree(sctx);
 
 
 334}
 335
 336static void scrub_put_ctx(struct scrub_ctx *sctx)
 337{
 338	if (refcount_dec_and_test(&sctx->refs))
 339		scrub_free_ctx(sctx);
 340}
 341
 342static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 343		struct btrfs_fs_info *fs_info, int is_dev_replace)
 344{
 345	struct scrub_ctx *sctx;
 346	int		i;
 347
 348	/* Since sctx has inline 128 stripes, it can go beyond 64K easily.  Use
 349	 * kvzalloc().
 350	 */
 351	sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
 352	if (!sctx)
 353		goto nomem;
 354	refcount_set(&sctx->refs, 1);
 355	sctx->is_dev_replace = is_dev_replace;
 
 
 356	sctx->fs_info = fs_info;
 357	sctx->extent_path.search_commit_root = 1;
 358	sctx->extent_path.skip_locking = 1;
 359	sctx->csum_path.search_commit_root = 1;
 360	sctx->csum_path.skip_locking = 1;
 361	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
 362		int ret;
 363
 364		ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
 365		if (ret < 0)
 366			goto nomem;
 367		sctx->stripes[i].sctx = sctx;
 
 
 
 
 
 
 
 
 
 
 
 368	}
 369	sctx->first_free = 0;
 
 
 370	atomic_set(&sctx->cancel_req, 0);
 
 371
 
 372	spin_lock_init(&sctx->stat_lock);
 373	sctx->throttle_deadline = 0;
 374
 
 375	mutex_init(&sctx->wr_lock);
 
 376	if (is_dev_replace) {
 377		WARN_ON(!fs_info->dev_replace.tgtdev);
 
 378		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
 
 379	}
 380
 381	return sctx;
 382
 383nomem:
 384	scrub_free_ctx(sctx);
 385	return ERR_PTR(-ENOMEM);
 386}
 387
 388static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
 389				     u64 root, void *warn_ctx)
 390{
 
 391	u32 nlink;
 392	int ret;
 393	int i;
 394	unsigned nofs_flag;
 395	struct extent_buffer *eb;
 396	struct btrfs_inode_item *inode_item;
 397	struct scrub_warning *swarn = warn_ctx;
 398	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
 399	struct inode_fs_paths *ipath = NULL;
 400	struct btrfs_root *local_root;
 401	struct btrfs_key key;
 402
 403	local_root = btrfs_get_fs_root(fs_info, root, true);
 404	if (IS_ERR(local_root)) {
 405		ret = PTR_ERR(local_root);
 406		goto err;
 407	}
 408
 409	/*
 410	 * this makes the path point to (inum INODE_ITEM ioff)
 411	 */
 412	key.objectid = inum;
 413	key.type = BTRFS_INODE_ITEM_KEY;
 414	key.offset = 0;
 415
 416	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
 417	if (ret) {
 418		btrfs_put_root(local_root);
 419		btrfs_release_path(swarn->path);
 420		goto err;
 421	}
 422
 423	eb = swarn->path->nodes[0];
 424	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
 425					struct btrfs_inode_item);
 
 426	nlink = btrfs_inode_nlink(eb, inode_item);
 427	btrfs_release_path(swarn->path);
 428
 429	/*
 430	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
 431	 * uses GFP_NOFS in this context, so we keep it consistent but it does
 432	 * not seem to be strictly necessary.
 433	 */
 434	nofs_flag = memalloc_nofs_save();
 435	ipath = init_ipath(4096, local_root, swarn->path);
 436	memalloc_nofs_restore(nofs_flag);
 437	if (IS_ERR(ipath)) {
 438		btrfs_put_root(local_root);
 439		ret = PTR_ERR(ipath);
 440		ipath = NULL;
 441		goto err;
 442	}
 443	ret = paths_from_inode(inum, ipath);
 444
 445	if (ret < 0)
 446		goto err;
 447
 448	/*
 449	 * we deliberately ignore the bit ipath might have been too small to
 450	 * hold all of the paths here
 451	 */
 452	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
 453		btrfs_warn_in_rcu(fs_info,
 454"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
 455				  swarn->errstr, swarn->logical,
 456				  btrfs_dev_name(swarn->dev),
 457				  swarn->physical,
 458				  root, inum, offset,
 459				  fs_info->sectorsize, nlink,
 460				  (char *)(unsigned long)ipath->fspath->val[i]);
 461
 462	btrfs_put_root(local_root);
 463	free_ipath(ipath);
 464	return 0;
 465
 466err:
 467	btrfs_warn_in_rcu(fs_info,
 468			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
 469			  swarn->errstr, swarn->logical,
 470			  btrfs_dev_name(swarn->dev),
 471			  swarn->physical,
 472			  root, inum, offset, ret);
 473
 474	free_ipath(ipath);
 475	return 0;
 476}
 477
 478static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
 479				       bool is_super, u64 logical, u64 physical)
 480{
 481	struct btrfs_fs_info *fs_info = dev->fs_info;
 
 482	struct btrfs_path *path;
 483	struct btrfs_key found_key;
 484	struct extent_buffer *eb;
 485	struct btrfs_extent_item *ei;
 486	struct scrub_warning swarn;
 
 
 487	u64 flags = 0;
 
 488	u32 item_size;
 
 489	int ret;
 490
 491	/* Super block error, no need to search extent tree. */
 492	if (is_super) {
 493		btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
 494				  errstr, btrfs_dev_name(dev), physical);
 495		return;
 496	}
 497	path = btrfs_alloc_path();
 498	if (!path)
 499		return;
 500
 501	swarn.physical = physical;
 502	swarn.logical = logical;
 503	swarn.errstr = errstr;
 504	swarn.dev = NULL;
 505
 506	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
 507				  &flags);
 508	if (ret < 0)
 509		goto out;
 510
 
 511	swarn.extent_item_size = found_key.offset;
 512
 513	eb = path->nodes[0];
 514	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 515	item_size = btrfs_item_size(eb, path->slots[0]);
 516
 517	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 518		unsigned long ptr = 0;
 519		u8 ref_level;
 520		u64 ref_root;
 521
 522		while (true) {
 523			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
 524						      item_size, &ref_root,
 525						      &ref_level);
 526			if (ret < 0) {
 527				btrfs_warn(fs_info,
 528				"failed to resolve tree backref for logical %llu: %d",
 529						  swarn.logical, ret);
 530				break;
 531			}
 532			if (ret > 0)
 533				break;
 534			btrfs_warn_in_rcu(fs_info,
 535"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
 536				errstr, swarn.logical, btrfs_dev_name(dev),
 537				swarn.physical, (ref_level ? "node" : "leaf"),
 538				ref_level, ref_root);
 539		}
 
 
 
 540		btrfs_release_path(path);
 541	} else {
 542		struct btrfs_backref_walk_ctx ctx = { 0 };
 543
 544		btrfs_release_path(path);
 545
 546		ctx.bytenr = found_key.objectid;
 547		ctx.extent_item_pos = swarn.logical - found_key.objectid;
 548		ctx.fs_info = fs_info;
 549
 550		swarn.path = path;
 551		swarn.dev = dev;
 552
 553		iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
 
 554	}
 555
 556out:
 557	btrfs_free_path(path);
 558}
 559
 560static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
 561{
 562	int ret = 0;
 563	u64 length;
 564
 565	if (!btrfs_is_zoned(sctx->fs_info))
 566		return 0;
 567
 568	if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
 569		return 0;
 570
 571	if (sctx->write_pointer < physical) {
 572		length = physical - sctx->write_pointer;
 573
 574		ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
 575						sctx->write_pointer, length);
 576		if (!ret)
 577			sctx->write_pointer = physical;
 578	}
 579	return ret;
 580}
 581
 582static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
 
 583{
 584	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 585	int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
 586
 587	return stripe->pages[page_index];
 588}
 589
 590static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
 591						 int sector_nr)
 592{
 593	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 594
 595	return offset_in_page(sector_nr << fs_info->sectorsize_bits);
 596}
 597
 598static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
 
 
 
 
 
 
 
 
 599{
 600	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 601	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 602	const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
 603	const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
 604	const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
 605	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 606	u8 on_disk_csum[BTRFS_CSUM_SIZE];
 607	u8 calculated_csum[BTRFS_CSUM_SIZE];
 608	struct btrfs_header *header;
 
 
 
 
 
 
 
 
 609
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610	/*
 611	 * Here we don't have a good way to attach the pages (and subpages)
 612	 * to a dummy extent buffer, thus we have to directly grab the members
 613	 * from pages.
 614	 */
 615	header = (struct btrfs_header *)(page_address(first_page) + first_off);
 616	memcpy(on_disk_csum, header->csum, fs_info->csum_size);
 617
 618	if (logical != btrfs_stack_header_bytenr(header)) {
 619		bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
 620		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 621		btrfs_warn_rl(fs_info,
 622		"tree block %llu mirror %u has bad bytenr, has %llu want %llu",
 623			      logical, stripe->mirror_num,
 624			      btrfs_stack_header_bytenr(header), logical);
 625		return;
 
 626	}
 627	if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
 628		   BTRFS_FSID_SIZE) != 0) {
 629		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 630		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 631		btrfs_warn_rl(fs_info,
 632		"tree block %llu mirror %u has bad fsid, has %pU want %pU",
 633			      logical, stripe->mirror_num,
 634			      header->fsid, fs_info->fs_devices->fsid);
 635		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 636	}
 637	if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
 638		   BTRFS_UUID_SIZE) != 0) {
 639		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 640		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 641		btrfs_warn_rl(fs_info,
 642		"tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
 643			      logical, stripe->mirror_num,
 644			      header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
 645		return;
 
 646	}
 
 
 647
 648	/* Now check tree block csum. */
 649	shash->tfm = fs_info->csum_shash;
 650	crypto_shash_init(shash);
 651	crypto_shash_update(shash, page_address(first_page) + first_off +
 652			    BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
 653
 654	for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
 655		struct page *page = scrub_stripe_get_page(stripe, i);
 656		unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
 
 
 
 
 
 
 
 
 
 
 
 657
 658		crypto_shash_update(shash, page_address(page) + page_off,
 659				    fs_info->sectorsize);
 
 660	}
 661
 662	crypto_shash_final(shash, calculated_csum);
 663	if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
 664		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 665		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 666		btrfs_warn_rl(fs_info,
 667		"tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
 668			      logical, stripe->mirror_num,
 669			      CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
 670			      CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
 671		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 672	}
 673	if (stripe->sectors[sector_nr].generation !=
 674	    btrfs_stack_header_generation(header)) {
 675		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 676		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 677		btrfs_warn_rl(fs_info,
 678		"tree block %llu mirror %u has bad generation, has %llu want %llu",
 679			      logical, stripe->mirror_num,
 680			      btrfs_stack_header_generation(header),
 681			      stripe->sectors[sector_nr].generation);
 682		return;
 683	}
 684	bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 685	bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
 686	bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 687}
 688
 689static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
 690{
 691	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 692	struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
 693	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 694	struct page *page = scrub_stripe_get_page(stripe, sector_nr);
 695	unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
 696	u8 csum_buf[BTRFS_CSUM_SIZE];
 697	int ret;
 
 
 
 
 
 
 
 
 698
 699	ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
 
 700
 701	/* Sector not utilized, skip it. */
 702	if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
 703		return;
 
 
 
 704
 705	/* IO error, no need to check. */
 706	if (test_bit(sector_nr, &stripe->io_error_bitmap))
 707		return;
 
 
 708
 709	/* Metadata, verify the full tree block. */
 710	if (sector->is_metadata) {
 711		/*
 712		 * Check if the tree block crosses the stripe boundary.  If
 713		 * crossed the boundary, we cannot verify it but only give a
 714		 * warning.
 715		 *
 716		 * This can only happen on a very old filesystem where chunks
 717		 * are not ensured to be stripe aligned.
 718		 */
 719		if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
 720			btrfs_warn_rl(fs_info,
 721			"tree block at %llu crosses stripe boundary %llu",
 722				      stripe->logical +
 723				      (sector_nr << fs_info->sectorsize_bits),
 724				      stripe->logical);
 725			return;
 
 
 
 
 
 
 
 
 726		}
 727		scrub_verify_one_metadata(stripe, sector_nr);
 728		return;
 729	}
 730
 
 
 
 731	/*
 732	 * Data is easier, we just verify the data csum (if we have it).  For
 733	 * cases without csum, we have no other choice but to trust it.
 734	 */
 735	if (!sector->csum) {
 736		clear_bit(sector_nr, &stripe->error_bitmap);
 737		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738	}
 739
 740	ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
 741	if (ret < 0) {
 742		set_bit(sector_nr, &stripe->csum_error_bitmap);
 743		set_bit(sector_nr, &stripe->error_bitmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 744	} else {
 745		clear_bit(sector_nr, &stripe->csum_error_bitmap);
 746		clear_bit(sector_nr, &stripe->error_bitmap);
 
 
 
 
 
 747	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 748}
 749
 750/* Verify specified sectors of a stripe. */
 751static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
 752{
 753	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 754	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 755	int sector_nr;
 756
 757	for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
 758		scrub_verify_one_sector(stripe, sector_nr);
 759		if (stripe->sectors[sector_nr].is_metadata)
 760			sector_nr += sectors_per_tree - 1;
 761	}
 762}
 763
 764static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
 
 
 
 
 
 765{
 766	int i;
 767
 768	for (i = 0; i < stripe->nr_sectors; i++) {
 769		if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
 770		    scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
 771			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 772	}
 773	ASSERT(i < stripe->nr_sectors);
 774	return i;
 775}
 776
 777/*
 778 * Repair read is different to the regular read:
 779 *
 780 * - Only reads the failed sectors
 781 * - May have extra blocksize limits
 782 */
 783static void scrub_repair_read_endio(struct btrfs_bio *bbio)
 784{
 785	struct scrub_stripe *stripe = bbio->private;
 786	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 787	struct bio_vec *bvec;
 788	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
 789	u32 bio_size = 0;
 790	int i;
 
 
 
 
 
 
 
 
 
 
 
 791
 792	ASSERT(sector_nr < stripe->nr_sectors);
 
 
 
 
 793
 794	bio_for_each_bvec_all(bvec, &bbio->bio, i)
 795		bio_size += bvec->bv_len;
 
 
 796
 797	if (bbio->bio.bi_status) {
 798		bitmap_set(&stripe->io_error_bitmap, sector_nr,
 799			   bio_size >> fs_info->sectorsize_bits);
 800		bitmap_set(&stripe->error_bitmap, sector_nr,
 801			   bio_size >> fs_info->sectorsize_bits);
 802	} else {
 803		bitmap_clear(&stripe->io_error_bitmap, sector_nr,
 804			     bio_size >> fs_info->sectorsize_bits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 805	}
 806	bio_put(&bbio->bio);
 807	if (atomic_dec_and_test(&stripe->pending_io))
 808		wake_up(&stripe->io_wait);
 809}
 810
 811static int calc_next_mirror(int mirror, int num_copies)
 812{
 813	ASSERT(mirror <= num_copies);
 814	return (mirror + 1 > num_copies) ? 1 : mirror + 1;
 815}
 816
 817static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
 818					    int mirror, int blocksize, bool wait)
 
 819{
 820	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 821	struct btrfs_bio *bbio = NULL;
 822	const unsigned long old_error_bitmap = stripe->error_bitmap;
 823	int i;
 824
 825	ASSERT(stripe->mirror_num >= 1);
 826	ASSERT(atomic_read(&stripe->pending_io) == 0);
 
 
 
 
 
 
 
 
 827
 828	for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
 829		struct page *page;
 830		int pgoff;
 831		int ret;
 832
 833		page = scrub_stripe_get_page(stripe, i);
 834		pgoff = scrub_stripe_get_page_offset(stripe, i);
 
 
 
 
 835
 836		/* The current sector cannot be merged, submit the bio. */
 837		if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
 838			     bbio->bio.bi_iter.bi_size >= blocksize)) {
 839			ASSERT(bbio->bio.bi_iter.bi_size);
 840			atomic_inc(&stripe->pending_io);
 841			btrfs_submit_bbio(bbio, mirror);
 842			if (wait)
 843				wait_scrub_stripe_io(stripe);
 844			bbio = NULL;
 845		}
 846
 847		if (!bbio) {
 848			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
 849				fs_info, scrub_repair_read_endio, stripe);
 850			bbio->bio.bi_iter.bi_sector = (stripe->logical +
 851				(i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
 852		}
 853
 854		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
 855		ASSERT(ret == fs_info->sectorsize);
 856	}
 857	if (bbio) {
 858		ASSERT(bbio->bio.bi_iter.bi_size);
 859		atomic_inc(&stripe->pending_io);
 860		btrfs_submit_bbio(bbio, mirror);
 861		if (wait)
 862			wait_scrub_stripe_io(stripe);
 863	}
 864}
 865
 866static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
 867				       struct scrub_stripe *stripe)
 868{
 869	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
 870				      DEFAULT_RATELIMIT_BURST);
 871	struct btrfs_fs_info *fs_info = sctx->fs_info;
 872	struct btrfs_device *dev = NULL;
 873	u64 physical = 0;
 874	int nr_data_sectors = 0;
 875	int nr_meta_sectors = 0;
 876	int nr_nodatacsum_sectors = 0;
 877	int nr_repaired_sectors = 0;
 878	int sector_nr;
 879
 880	if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
 881		return;
 882
 883	/*
 884	 * Init needed infos for error reporting.
 885	 *
 886	 * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
 887	 * thus no need for dev/physical, error reporting still needs dev and physical.
 888	 */
 889	if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
 890		u64 mapped_len = fs_info->sectorsize;
 891		struct btrfs_io_context *bioc = NULL;
 892		int stripe_index = stripe->mirror_num - 1;
 893		int ret;
 894
 895		/* For scrub, our mirror_num should always start at 1. */
 896		ASSERT(stripe->mirror_num >= 1);
 897		ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
 898				      stripe->logical, &mapped_len, &bioc,
 899				      NULL, NULL);
 900		/*
 901		 * If we failed, dev will be NULL, and later detailed reports
 902		 * will just be skipped.
 903		 */
 904		if (ret < 0)
 905			goto skip;
 906		physical = bioc->stripes[stripe_index].physical;
 907		dev = bioc->stripes[stripe_index].dev;
 908		btrfs_put_bioc(bioc);
 909	}
 910
 911skip:
 912	for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
 913		bool repaired = false;
 
 
 
 
 
 
 
 
 
 914
 915		if (stripe->sectors[sector_nr].is_metadata) {
 916			nr_meta_sectors++;
 917		} else {
 918			nr_data_sectors++;
 919			if (!stripe->sectors[sector_nr].csum)
 920				nr_nodatacsum_sectors++;
 921		}
 922
 923		if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
 924		    !test_bit(sector_nr, &stripe->error_bitmap)) {
 925			nr_repaired_sectors++;
 926			repaired = true;
 927		}
 928
 929		/* Good sector from the beginning, nothing need to be done. */
 930		if (!test_bit(sector_nr, &stripe->init_error_bitmap))
 931			continue;
 932
 933		/*
 934		 * Report error for the corrupted sectors.  If repaired, just
 935		 * output the message of repaired message.
 936		 */
 937		if (repaired) {
 938			if (dev) {
 939				btrfs_err_rl_in_rcu(fs_info,
 940			"fixed up error at logical %llu on dev %s physical %llu",
 941					    stripe->logical, btrfs_dev_name(dev),
 942					    physical);
 943			} else {
 944				btrfs_err_rl_in_rcu(fs_info,
 945			"fixed up error at logical %llu on mirror %u",
 946					    stripe->logical, stripe->mirror_num);
 947			}
 948			continue;
 949		}
 950
 951		/* The remaining are all for unrepaired. */
 952		if (dev) {
 953			btrfs_err_rl_in_rcu(fs_info,
 954	"unable to fixup (regular) error at logical %llu on dev %s physical %llu",
 955					    stripe->logical, btrfs_dev_name(dev),
 956					    physical);
 957		} else {
 958			btrfs_err_rl_in_rcu(fs_info,
 959	"unable to fixup (regular) error at logical %llu on mirror %u",
 960					    stripe->logical, stripe->mirror_num);
 
 961		}
 962
 963		if (test_bit(sector_nr, &stripe->io_error_bitmap))
 964			if (__ratelimit(&rs) && dev)
 965				scrub_print_common_warning("i/o error", dev, false,
 966						     stripe->logical, physical);
 967		if (test_bit(sector_nr, &stripe->csum_error_bitmap))
 968			if (__ratelimit(&rs) && dev)
 969				scrub_print_common_warning("checksum error", dev, false,
 970						     stripe->logical, physical);
 971		if (test_bit(sector_nr, &stripe->meta_error_bitmap))
 972			if (__ratelimit(&rs) && dev)
 973				scrub_print_common_warning("header error", dev, false,
 974						     stripe->logical, physical);
 975	}
 976
 977	spin_lock(&sctx->stat_lock);
 978	sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
 979	sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
 980	sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
 981	sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
 982	sctx->stat.no_csum += nr_nodatacsum_sectors;
 983	sctx->stat.read_errors += stripe->init_nr_io_errors;
 984	sctx->stat.csum_errors += stripe->init_nr_csum_errors;
 985	sctx->stat.verify_errors += stripe->init_nr_meta_errors;
 986	sctx->stat.uncorrectable_errors +=
 987		bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
 988	sctx->stat.corrected_errors += nr_repaired_sectors;
 989	spin_unlock(&sctx->stat_lock);
 990}
 991
 992static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
 993				unsigned long write_bitmap, bool dev_replace);
 
 
 
 994
 995/*
 996 * The main entrance for all read related scrub work, including:
 997 *
 998 * - Wait for the initial read to finish
 999 * - Verify and locate any bad sectors
1000 * - Go through the remaining mirrors and try to read as large blocksize as
1001 *   possible
1002 * - Go through all mirrors (including the failed mirror) sector-by-sector
1003 * - Submit writeback for repaired sectors
1004 *
1005 * Writeback for dev-replace does not happen here, it needs extra
1006 * synchronization for zoned devices.
1007 */
1008static void scrub_stripe_read_repair_worker(struct work_struct *work)
1009{
1010	struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
1011	struct scrub_ctx *sctx = stripe->sctx;
1012	struct btrfs_fs_info *fs_info = sctx->fs_info;
1013	int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1014					  stripe->bg->length);
1015	unsigned long repaired;
1016	int mirror;
1017	int i;
1018
1019	ASSERT(stripe->mirror_num > 0);
 
 
 
 
1020
1021	wait_scrub_stripe_io(stripe);
1022	scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1023	/* Save the initial failed bitmap for later repair and report usage. */
1024	stripe->init_error_bitmap = stripe->error_bitmap;
1025	stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1026						  stripe->nr_sectors);
1027	stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1028						    stripe->nr_sectors);
1029	stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1030						    stripe->nr_sectors);
1031
1032	if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1033		goto out;
1034
1035	/*
1036	 * Try all remaining mirrors.
1037	 *
1038	 * Here we still try to read as large block as possible, as this is
1039	 * faster and we have extra safety nets to rely on.
1040	 */
1041	for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1042	     mirror != stripe->mirror_num;
1043	     mirror = calc_next_mirror(mirror, num_copies)) {
1044		const unsigned long old_error_bitmap = stripe->error_bitmap;
1045
1046		scrub_stripe_submit_repair_read(stripe, mirror,
1047						BTRFS_STRIPE_LEN, false);
1048		wait_scrub_stripe_io(stripe);
1049		scrub_verify_one_stripe(stripe, old_error_bitmap);
1050		if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1051			goto out;
1052	}
1053
1054	/*
1055	 * Last safety net, try re-checking all mirrors, including the failed
1056	 * one, sector-by-sector.
1057	 *
1058	 * As if one sector failed the drive's internal csum, the whole read
1059	 * containing the offending sector would be marked as error.
1060	 * Thus here we do sector-by-sector read.
1061	 *
1062	 * This can be slow, thus we only try it as the last resort.
1063	 */
1064
1065	for (i = 0, mirror = stripe->mirror_num;
1066	     i < num_copies;
1067	     i++, mirror = calc_next_mirror(mirror, num_copies)) {
1068		const unsigned long old_error_bitmap = stripe->error_bitmap;
1069
1070		scrub_stripe_submit_repair_read(stripe, mirror,
1071						fs_info->sectorsize, true);
1072		wait_scrub_stripe_io(stripe);
1073		scrub_verify_one_stripe(stripe, old_error_bitmap);
1074		if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1075			goto out;
1076	}
1077out:
1078	/*
1079	 * Submit the repaired sectors.  For zoned case, we cannot do repair
1080	 * in-place, but queue the bg to be relocated.
1081	 */
1082	bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap,
1083		      stripe->nr_sectors);
1084	if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
1085		if (btrfs_is_zoned(fs_info)) {
1086			btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1087		} else {
1088			scrub_write_sectors(sctx, stripe, repaired, false);
1089			wait_scrub_stripe_io(stripe);
 
 
 
 
 
1090		}
 
 
 
 
 
 
 
 
 
1091	}
1092
1093	scrub_stripe_report_errors(sctx, stripe);
1094	set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1095	wake_up(&stripe->repair_wait);
1096}
1097
1098static void scrub_read_endio(struct btrfs_bio *bbio)
1099{
1100	struct scrub_stripe *stripe = bbio->private;
1101	struct bio_vec *bvec;
1102	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1103	int num_sectors;
1104	u32 bio_size = 0;
1105	int i;
1106
1107	ASSERT(sector_nr < stripe->nr_sectors);
1108	bio_for_each_bvec_all(bvec, &bbio->bio, i)
1109		bio_size += bvec->bv_len;
1110	num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
1111
1112	if (bbio->bio.bi_status) {
1113		bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
1114		bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
1115	} else {
1116		bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
1117	}
1118	bio_put(&bbio->bio);
1119	if (atomic_dec_and_test(&stripe->pending_io)) {
1120		wake_up(&stripe->io_wait);
1121		INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1122		queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1123	}
1124}
1125
1126static void scrub_write_endio(struct btrfs_bio *bbio)
 
1127{
1128	struct scrub_stripe *stripe = bbio->private;
1129	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1130	struct bio_vec *bvec;
1131	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1132	u32 bio_size = 0;
1133	int i;
1134
1135	bio_for_each_bvec_all(bvec, &bbio->bio, i)
1136		bio_size += bvec->bv_len;
 
1137
1138	if (bbio->bio.bi_status) {
1139		unsigned long flags;
1140
1141		spin_lock_irqsave(&stripe->write_error_lock, flags);
1142		bitmap_set(&stripe->write_error_bitmap, sector_nr,
1143			   bio_size >> fs_info->sectorsize_bits);
1144		spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1145	}
1146	bio_put(&bbio->bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147
1148	if (atomic_dec_and_test(&stripe->pending_io))
1149		wake_up(&stripe->io_wait);
1150}
1151
1152static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1153				   struct scrub_stripe *stripe,
1154				   struct btrfs_bio *bbio, bool dev_replace)
1155{
1156	struct btrfs_fs_info *fs_info = sctx->fs_info;
1157	u32 bio_len = bbio->bio.bi_iter.bi_size;
1158	u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1159		      stripe->logical;
1160
1161	fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1162	atomic_inc(&stripe->pending_io);
1163	btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1164	if (!btrfs_is_zoned(fs_info))
1165		return;
1166	/*
1167	 * For zoned writeback, queue depth must be 1, thus we must wait for
1168	 * the write to finish before the next write.
1169	 */
1170	wait_scrub_stripe_io(stripe);
1171
1172	/*
1173	 * And also need to update the write pointer if write finished
1174	 * successfully.
1175	 */
1176	if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1177		      &stripe->write_error_bitmap))
1178		sctx->write_pointer += bio_len;
 
 
1179}
1180
1181/*
1182 * Submit the write bio(s) for the sectors specified by @write_bitmap.
1183 *
1184 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1185 *
1186 * - Only needs logical bytenr and mirror_num
1187 *   Just like the scrub read path
1188 *
1189 * - Would only result in writes to the specified mirror
1190 *   Unlike the regular writeback path, which would write back to all stripes
1191 *
1192 * - Handle dev-replace and read-repair writeback differently
1193 */
1194static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1195				unsigned long write_bitmap, bool dev_replace)
1196{
1197	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1198	struct btrfs_bio *bbio = NULL;
1199	int sector_nr;
1200
1201	for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1202		struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1203		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1204		int ret;
1205
1206		/* We should only writeback sectors covered by an extent. */
1207		ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
 
1208
1209		/* Cannot merge with previous sector, submit the current one. */
1210		if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1211			scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1212			bbio = NULL;
1213		}
1214		if (!bbio) {
1215			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1216					       fs_info, scrub_write_endio, stripe);
1217			bbio->bio.bi_iter.bi_sector = (stripe->logical +
1218				(sector_nr << fs_info->sectorsize_bits)) >>
1219				SECTOR_SHIFT;
 
 
 
 
 
1220		}
1221		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1222		ASSERT(ret == fs_info->sectorsize);
1223	}
1224	if (bbio)
1225		scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
 
 
 
 
 
1226}
1227
1228/*
1229 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1230 * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1231 */
1232static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1233				  unsigned int bio_size)
1234{
1235	const int time_slice = 1000;
1236	s64 delta;
1237	ktime_t now;
1238	u32 div;
1239	u64 bwlimit;
1240
1241	bwlimit = READ_ONCE(device->scrub_speed_max);
1242	if (bwlimit == 0)
1243		return;
1244
1245	/*
1246	 * Slice is divided into intervals when the IO is submitted, adjust by
1247	 * bwlimit and maximum of 64 intervals.
 
 
 
 
1248	 */
1249	div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1250	div = min_t(u32, 64, div);
 
1251
1252	/* Start new epoch, set deadline */
1253	now = ktime_get();
1254	if (sctx->throttle_deadline == 0) {
1255		sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1256		sctx->throttle_sent = 0;
1257	}
1258
1259	/* Still in the time to send? */
1260	if (ktime_before(now, sctx->throttle_deadline)) {
1261		/* If current bio is within the limit, send it */
1262		sctx->throttle_sent += bio_size;
1263		if (sctx->throttle_sent <= div_u64(bwlimit, div))
1264			return;
1265
1266		/* We're over the limit, sleep until the rest of the slice */
1267		delta = ktime_ms_delta(sctx->throttle_deadline, now);
1268	} else {
1269		/* New request after deadline, start new epoch */
1270		delta = 0;
1271	}
1272
1273	if (delta) {
1274		long timeout;
 
 
 
 
 
 
 
 
 
 
 
1275
1276		timeout = div_u64(delta * HZ, 1000);
1277		schedule_timeout_interruptible(timeout);
1278	}
 
 
 
 
 
1279
1280	/* Next call will start the deadline period */
1281	sctx->throttle_deadline = 0;
1282}
1283
1284/*
1285 * Given a physical address, this will calculate it's
1286 * logical offset. if this is a parity stripe, it will return
1287 * the most left data stripe's logical offset.
1288 *
1289 * return 0 if it is a data stripe, 1 means parity stripe.
1290 */
1291static int get_raid56_logic_offset(u64 physical, int num,
1292				   struct btrfs_chunk_map *map, u64 *offset,
1293				   u64 *stripe_start)
1294{
 
 
 
 
 
 
 
1295	int i;
1296	int j = 0;
1297	u64 last_offset;
1298	const int data_stripes = nr_data_stripes(map);
1299
1300	last_offset = (physical - map->stripes[num].physical) * data_stripes;
1301	if (stripe_start)
1302		*stripe_start = last_offset;
 
 
1303
1304	*offset = last_offset;
1305	for (i = 0; i < data_stripes; i++) {
1306		u32 stripe_nr;
1307		u32 stripe_index;
1308		u32 rot;
 
 
1309
1310		*offset = last_offset + btrfs_stripe_nr_to_offset(i);
 
 
 
1311
1312		stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
 
1313
1314		/* Work out the disk rotation on this stripe-set */
1315		rot = stripe_nr % map->num_stripes;
1316		/* calculate which stripe this data locates */
1317		rot += i;
1318		stripe_index = rot % map->num_stripes;
1319		if (stripe_index == num)
1320			return 0;
1321		if (stripe_index < num)
1322			j++;
 
 
 
1323	}
1324	*offset = last_offset + btrfs_stripe_nr_to_offset(j);
1325	return 1;
 
 
 
 
1326}
1327
1328/*
1329 * Return 0 if the extent item range covers any byte of the range.
1330 * Return <0 if the extent item is before @search_start.
1331 * Return >0 if the extent item is after @start_start + @search_len.
1332 */
1333static int compare_extent_item_range(struct btrfs_path *path,
1334				     u64 search_start, u64 search_len)
1335{
1336	struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1337	u64 len;
1338	struct btrfs_key key;
 
 
 
 
 
 
 
 
 
 
 
1339
1340	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1341	ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1342	       key.type == BTRFS_METADATA_ITEM_KEY);
1343	if (key.type == BTRFS_METADATA_ITEM_KEY)
1344		len = fs_info->nodesize;
1345	else
1346		len = key.offset;
1347
1348	if (key.objectid + len <= search_start)
1349		return -1;
1350	if (key.objectid >= search_start + search_len)
1351		return 1;
1352	return 0;
1353}
1354
1355/*
1356 * Locate one extent item which covers any byte in range
1357 * [@search_start, @search_start + @search_length)
1358 *
1359 * If the path is not initialized, we will initialize the search by doing
1360 * a btrfs_search_slot().
1361 * If the path is already initialized, we will use the path as the initial
1362 * slot, to avoid duplicated btrfs_search_slot() calls.
1363 *
1364 * NOTE: If an extent item starts before @search_start, we will still
1365 * return the extent item. This is for data extent crossing stripe boundary.
1366 *
1367 * Return 0 if we found such extent item, and @path will point to the extent item.
1368 * Return >0 if no such extent item can be found, and @path will be released.
1369 * Return <0 if hit fatal error, and @path will be released.
1370 */
1371static int find_first_extent_item(struct btrfs_root *extent_root,
1372				  struct btrfs_path *path,
1373				  u64 search_start, u64 search_len)
1374{
1375	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1376	struct btrfs_key key;
1377	int ret;
1378
1379	/* Continue using the existing path */
1380	if (path->nodes[0])
1381		goto search_forward;
 
1382
1383	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1384		key.type = BTRFS_METADATA_ITEM_KEY;
1385	else
1386		key.type = BTRFS_EXTENT_ITEM_KEY;
1387	key.objectid = search_start;
1388	key.offset = (u64)-1;
1389
1390	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1391	if (ret < 0)
1392		return ret;
1393	if (ret == 0) {
1394		/*
1395		 * Key with offset -1 found, there would have to exist an extent
1396		 * item with such offset, but this is out of the valid range.
 
1397		 */
1398		btrfs_release_path(path);
1399		return -EUCLEAN;
 
 
 
 
 
 
 
1400	}
1401
1402	/*
1403	 * Here we intentionally pass 0 as @min_objectid, as there could be
1404	 * an extent item starting before @search_start.
1405	 */
1406	ret = btrfs_previous_extent_item(extent_root, path, 0);
1407	if (ret < 0)
1408		return ret;
1409	/*
1410	 * No matter whether we have found an extent item, the next loop will
1411	 * properly do every check on the key.
1412	 */
1413search_forward:
1414	while (true) {
1415		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1416		if (key.objectid >= search_start + search_len)
1417			break;
1418		if (key.type != BTRFS_METADATA_ITEM_KEY &&
1419		    key.type != BTRFS_EXTENT_ITEM_KEY)
1420			goto next;
1421
1422		ret = compare_extent_item_range(path, search_start, search_len);
1423		if (ret == 0)
1424			return ret;
1425		if (ret > 0)
1426			break;
1427next:
1428		ret = btrfs_next_item(extent_root, path);
1429		if (ret) {
1430			/* Either no more items or a fatal error. */
1431			btrfs_release_path(path);
1432			return ret;
1433		}
1434	}
1435	btrfs_release_path(path);
1436	return 1;
1437}
1438
1439static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1440			    u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1441{
1442	struct btrfs_key key;
1443	struct btrfs_extent_item *ei;
1444
1445	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1446	ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1447	       key.type == BTRFS_EXTENT_ITEM_KEY);
1448	*extent_start_ret = key.objectid;
1449	if (key.type == BTRFS_METADATA_ITEM_KEY)
1450		*size_ret = path->nodes[0]->fs_info->nodesize;
1451	else
1452		*size_ret = key.offset;
1453	ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1454	*flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1455	*generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1456}
1457
1458static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1459					u64 physical, u64 physical_end)
1460{
1461	struct btrfs_fs_info *fs_info = sctx->fs_info;
1462	int ret = 0;
1463
1464	if (!btrfs_is_zoned(fs_info))
1465		return 0;
1466
1467	mutex_lock(&sctx->wr_lock);
1468	if (sctx->write_pointer < physical_end) {
1469		ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1470						    physical,
1471						    sctx->write_pointer);
1472		if (ret)
1473			btrfs_err(fs_info,
1474				  "zoned: failed to recover write pointer");
1475	}
1476	mutex_unlock(&sctx->wr_lock);
1477	btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1478
1479	return ret;
 
 
1480}
1481
1482static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1483				 struct scrub_stripe *stripe,
1484				 u64 extent_start, u64 extent_len,
1485				 u64 extent_flags, u64 extent_gen)
1486{
1487	for (u64 cur_logical = max(stripe->logical, extent_start);
1488	     cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1489			       extent_start + extent_len);
1490	     cur_logical += fs_info->sectorsize) {
1491		const int nr_sector = (cur_logical - stripe->logical) >>
1492				      fs_info->sectorsize_bits;
1493		struct scrub_sector_verification *sector =
1494						&stripe->sectors[nr_sector];
1495
1496		set_bit(nr_sector, &stripe->extent_sector_bitmap);
1497		if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1498			sector->is_metadata = true;
1499			sector->generation = extent_gen;
1500		}
1501	}
1502}
1503
1504static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1505{
1506	stripe->extent_sector_bitmap = 0;
1507	stripe->init_error_bitmap = 0;
1508	stripe->init_nr_io_errors = 0;
1509	stripe->init_nr_csum_errors = 0;
1510	stripe->init_nr_meta_errors = 0;
1511	stripe->error_bitmap = 0;
1512	stripe->io_error_bitmap = 0;
1513	stripe->csum_error_bitmap = 0;
1514	stripe->meta_error_bitmap = 0;
1515}
1516
1517/*
1518 * Locate one stripe which has at least one extent in its range.
1519 *
1520 * Return 0 if found such stripe, and store its info into @stripe.
1521 * Return >0 if there is no such stripe in the specified range.
1522 * Return <0 for error.
1523 */
1524static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1525					struct btrfs_path *extent_path,
1526					struct btrfs_path *csum_path,
1527					struct btrfs_device *dev, u64 physical,
1528					int mirror_num, u64 logical_start,
1529					u32 logical_len,
1530					struct scrub_stripe *stripe)
1531{
1532	struct btrfs_fs_info *fs_info = bg->fs_info;
1533	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1534	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1535	const u64 logical_end = logical_start + logical_len;
1536	u64 cur_logical = logical_start;
1537	u64 stripe_end;
1538	u64 extent_start;
1539	u64 extent_len;
1540	u64 extent_flags;
1541	u64 extent_gen;
1542	int ret;
1543
1544	if (unlikely(!extent_root)) {
1545		btrfs_err(fs_info, "no valid extent root for scrub");
1546		return -EUCLEAN;
1547	}
1548	memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1549				   stripe->nr_sectors);
1550	scrub_stripe_reset_bitmaps(stripe);
1551
1552	/* The range must be inside the bg. */
1553	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1554
1555	ret = find_first_extent_item(extent_root, extent_path, logical_start,
1556				     logical_len);
1557	/* Either error or not found. */
1558	if (ret)
1559		goto out;
1560	get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1561			&extent_gen);
1562	if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1563		stripe->nr_meta_extents++;
1564	if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1565		stripe->nr_data_extents++;
1566	cur_logical = max(extent_start, cur_logical);
1567
1568	/*
1569	 * Round down to stripe boundary.
1570	 *
1571	 * The extra calculation against bg->start is to handle block groups
1572	 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1573	 */
1574	stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1575			  bg->start;
1576	stripe->physical = physical + stripe->logical - logical_start;
1577	stripe->dev = dev;
1578	stripe->bg = bg;
1579	stripe->mirror_num = mirror_num;
1580	stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1581
1582	/* Fill the first extent info into stripe->sectors[] array. */
1583	fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1584			     extent_flags, extent_gen);
1585	cur_logical = extent_start + extent_len;
1586
1587	/* Fill the extent info for the remaining sectors. */
1588	while (cur_logical <= stripe_end) {
1589		ret = find_first_extent_item(extent_root, extent_path, cur_logical,
1590					     stripe_end - cur_logical + 1);
1591		if (ret < 0)
1592			goto out;
1593		if (ret > 0) {
1594			ret = 0;
1595			break;
1596		}
1597		get_extent_info(extent_path, &extent_start, &extent_len,
1598				&extent_flags, &extent_gen);
1599		if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1600			stripe->nr_meta_extents++;
1601		if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1602			stripe->nr_data_extents++;
1603		fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1604				     extent_flags, extent_gen);
1605		cur_logical = extent_start + extent_len;
1606	}
1607
1608	/* Now fill the data csum. */
1609	if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1610		int sector_nr;
1611		unsigned long csum_bitmap = 0;
1612
1613		/* Csum space should have already been allocated. */
1614		ASSERT(stripe->csums);
1615
1616		/*
1617		 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1618		 * should contain at most 16 sectors.
1619		 */
1620		ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1621
1622		ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1623						stripe->logical, stripe_end,
1624						stripe->csums, &csum_bitmap);
1625		if (ret < 0)
1626			goto out;
1627		if (ret > 0)
1628			ret = 0;
1629
1630		for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1631			stripe->sectors[sector_nr].csum = stripe->csums +
1632				sector_nr * fs_info->csum_size;
1633		}
1634	}
1635	set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1636out:
1637	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1638}
1639
1640static void scrub_reset_stripe(struct scrub_stripe *stripe)
1641{
1642	scrub_stripe_reset_bitmaps(stripe);
 
1643
1644	stripe->nr_meta_extents = 0;
1645	stripe->nr_data_extents = 0;
1646	stripe->state = 0;
1647
1648	for (int i = 0; i < stripe->nr_sectors; i++) {
1649		stripe->sectors[i].is_metadata = false;
1650		stripe->sectors[i].csum = NULL;
1651		stripe->sectors[i].generation = 0;
1652	}
1653}
1654
1655static u32 stripe_length(const struct scrub_stripe *stripe)
1656{
1657	ASSERT(stripe->bg);
 
 
 
 
1658
1659	return min(BTRFS_STRIPE_LEN,
1660		   stripe->bg->start + stripe->bg->length - stripe->logical);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1661}
1662
1663static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
1664{
1665	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 
 
 
1666	struct btrfs_bio *bbio = NULL;
1667	unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
1668	u64 stripe_len = BTRFS_STRIPE_LEN;
1669	int mirror = stripe->mirror_num;
1670	int i;
1671
1672	atomic_inc(&stripe->pending_io);
1673
1674	for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
1675		struct page *page = scrub_stripe_get_page(stripe, i);
1676		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
1677
1678		/* We're beyond the chunk boundary, no need to read anymore. */
1679		if (i >= nr_sectors)
1680			break;
1681
1682		/* The current sector cannot be merged, submit the bio. */
1683		if (bbio &&
1684		    ((i > 0 &&
1685		      !test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
1686		     bbio->bio.bi_iter.bi_size >= stripe_len)) {
1687			ASSERT(bbio->bio.bi_iter.bi_size);
1688			atomic_inc(&stripe->pending_io);
1689			btrfs_submit_bbio(bbio, mirror);
1690			bbio = NULL;
1691		}
1692
1693		if (!bbio) {
1694			struct btrfs_io_stripe io_stripe = {};
1695			struct btrfs_io_context *bioc = NULL;
1696			const u64 logical = stripe->logical +
1697					    (i << fs_info->sectorsize_bits);
1698			int err;
1699
1700			io_stripe.rst_search_commit_root = true;
1701			stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits;
1702			/*
1703			 * For RST cases, we need to manually split the bbio to
1704			 * follow the RST boundary.
1705			 */
1706			err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
1707					      &stripe_len, &bioc, &io_stripe, &mirror);
1708			btrfs_put_bioc(bioc);
1709			if (err < 0) {
1710				if (err != -ENODATA) {
1711					/*
1712					 * Earlier btrfs_get_raid_extent_offset()
1713					 * returned -ENODATA, which means there's
1714					 * no entry for the corresponding range
1715					 * in the stripe tree.  But if it's in
1716					 * the extent tree, then it's a preallocated
1717					 * extent and not an error.
1718					 */
1719					set_bit(i, &stripe->io_error_bitmap);
1720					set_bit(i, &stripe->error_bitmap);
1721				}
1722				continue;
1723			}
1724
1725			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
1726					       fs_info, scrub_read_endio, stripe);
1727			bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
1728		}
1729
1730		__bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1731	}
1732
1733	if (bbio) {
1734		ASSERT(bbio->bio.bi_iter.bi_size);
1735		atomic_inc(&stripe->pending_io);
1736		btrfs_submit_bbio(bbio, mirror);
1737	}
1738
1739	if (atomic_dec_and_test(&stripe->pending_io)) {
1740		wake_up(&stripe->io_wait);
1741		INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1742		queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1743	}
 
 
 
1744}
1745
1746static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1747				      struct scrub_stripe *stripe)
 
 
1748{
1749	struct btrfs_fs_info *fs_info = sctx->fs_info;
1750	struct btrfs_bio *bbio;
1751	unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
1752	int mirror = stripe->mirror_num;
1753
1754	ASSERT(stripe->bg);
1755	ASSERT(stripe->mirror_num > 0);
1756	ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
 
 
 
 
1757
1758	if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
1759		scrub_submit_extent_sector_read(stripe);
1760		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1761	}
1762
1763	bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1764			       scrub_read_endio, stripe);
 
 
 
 
 
 
 
 
 
1765
1766	bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1767	/* Read the whole range inside the chunk boundary. */
1768	for (unsigned int cur = 0; cur < nr_sectors; cur++) {
1769		struct page *page = scrub_stripe_get_page(stripe, cur);
1770		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
1771		int ret;
1772
1773		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1774		/* We should have allocated enough bio vectors. */
1775		ASSERT(ret == fs_info->sectorsize);
1776	}
1777	atomic_inc(&stripe->pending_io);
1778
1779	/*
1780	 * For dev-replace, either user asks to avoid the source dev, or
1781	 * the device is missing, we try the next mirror instead.
1782	 */
1783	if (sctx->is_dev_replace &&
1784	    (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1785	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1786	     !stripe->dev->bdev)) {
1787		int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1788						  stripe->bg->length);
1789
1790		mirror = calc_next_mirror(mirror, num_copies);
1791	}
1792	btrfs_submit_bbio(bbio, mirror);
 
 
 
 
 
 
1793}
1794
1795static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1796{
 
 
1797	int i;
1798
1799	for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1800		if (stripe->sectors[i].is_metadata) {
1801			struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 
1802
1803			btrfs_err(fs_info,
1804			"stripe %llu has unrepaired metadata sector at %llu",
1805				  stripe->logical,
1806				  stripe->logical + (i << fs_info->sectorsize_bits));
1807			return true;
1808		}
1809	}
1810	return false;
1811}
1812
1813static void submit_initial_group_read(struct scrub_ctx *sctx,
1814				      unsigned int first_slot,
1815				      unsigned int nr_stripes)
1816{
1817	struct blk_plug plug;
1818
1819	ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1820	ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
 
 
1821
1822	scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1823			      btrfs_stripe_nr_to_offset(nr_stripes));
1824	blk_start_plug(&plug);
1825	for (int i = 0; i < nr_stripes; i++) {
1826		struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
 
1827
1828		/* Those stripes should be initialized. */
1829		ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1830		scrub_submit_initial_read(sctx, stripe);
 
1831	}
1832	blk_finish_plug(&plug);
 
1833}
1834
1835static int flush_scrub_stripes(struct scrub_ctx *sctx)
 
 
1836{
1837	struct btrfs_fs_info *fs_info = sctx->fs_info;
1838	struct scrub_stripe *stripe;
1839	const int nr_stripes = sctx->cur_stripe;
1840	int ret = 0;
1841
1842	if (!nr_stripes)
1843		return 0;
 
 
1844
1845	ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
 
 
 
1846
1847	/* Submit the stripes which are populated but not submitted. */
1848	if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1849		const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1850
1851		submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
 
 
1852	}
1853
1854	for (int i = 0; i < nr_stripes; i++) {
1855		stripe = &sctx->stripes[i];
1856
1857		wait_event(stripe->repair_wait,
1858			   test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1859	}
1860
1861	/* Submit for dev-replace. */
1862	if (sctx->is_dev_replace) {
1863		/*
1864		 * For dev-replace, if we know there is something wrong with
1865		 * metadata, we should immediately abort.
1866		 */
1867		for (int i = 0; i < nr_stripes; i++) {
1868			if (stripe_has_metadata_error(&sctx->stripes[i])) {
1869				ret = -EIO;
1870				goto out;
1871			}
1872		}
1873		for (int i = 0; i < nr_stripes; i++) {
1874			unsigned long good;
1875
1876			stripe = &sctx->stripes[i];
 
 
 
 
1877
1878			ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
 
 
1879
1880			bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1881				      &stripe->error_bitmap, stripe->nr_sectors);
1882			scrub_write_sectors(sctx, stripe, good, true);
1883		}
 
 
 
 
 
 
 
 
1884	}
1885
1886	/* Wait for the above writebacks to finish. */
1887	for (int i = 0; i < nr_stripes; i++) {
1888		stripe = &sctx->stripes[i];
 
1889
1890		wait_scrub_stripe_io(stripe);
1891		spin_lock(&sctx->stat_lock);
1892		sctx->stat.last_physical = stripe->physical + stripe_length(stripe);
1893		spin_unlock(&sctx->stat_lock);
1894		scrub_reset_stripe(stripe);
1895	}
1896out:
1897	sctx->cur_stripe = 0;
1898	return ret;
1899}
1900
1901static void raid56_scrub_wait_endio(struct bio *bio)
1902{
1903	complete(bio->bi_private);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1904}
1905
1906static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1907			      struct btrfs_device *dev, int mirror_num,
1908			      u64 logical, u32 length, u64 physical,
1909			      u64 *found_logical_ret)
 
1910{
1911	struct scrub_stripe *stripe;
1912	int ret;
 
 
1913
1914	/*
1915	 * There should always be one slot left, as caller filling the last
1916	 * slot should flush them all.
1917	 */
1918	ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1919
1920	/* @found_logical_ret must be specified. */
1921	ASSERT(found_logical_ret);
 
 
 
 
 
 
1922
1923	stripe = &sctx->stripes[sctx->cur_stripe];
1924	scrub_reset_stripe(stripe);
1925	ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1926					   &sctx->csum_path, dev, physical,
1927					   mirror_num, logical, length, stripe);
1928	/* Either >0 as no more extents or <0 for error. */
1929	if (ret)
1930		return ret;
1931	*found_logical_ret = stripe->logical;
1932	sctx->cur_stripe++;
1933
1934	/* We filled one group, submit it. */
1935	if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1936		const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1937
1938		submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
 
 
 
 
1939	}
1940
1941	/* Last slot used, flush them all. */
1942	if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1943		return flush_scrub_stripes(sctx);
1944	return 0;
1945}
1946
1947static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1948				      struct btrfs_device *scrub_dev,
1949				      struct btrfs_block_group *bg,
1950				      struct btrfs_chunk_map *map,
1951				      u64 full_stripe_start)
1952{
1953	DECLARE_COMPLETION_ONSTACK(io_done);
1954	struct btrfs_fs_info *fs_info = sctx->fs_info;
1955	struct btrfs_raid_bio *rbio;
1956	struct btrfs_io_context *bioc = NULL;
1957	struct btrfs_path extent_path = { 0 };
1958	struct btrfs_path csum_path = { 0 };
1959	struct bio *bio;
1960	struct scrub_stripe *stripe;
1961	bool all_empty = true;
1962	const int data_stripes = nr_data_stripes(map);
1963	unsigned long extent_bitmap = 0;
1964	u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1965	int ret;
 
 
1966
1967	ASSERT(sctx->raid56_data_stripes);
 
 
 
1968
1969	/*
1970	 * For data stripe search, we cannot reuse the same extent/csum paths,
1971	 * as the data stripe bytenr may be smaller than previous extent.  Thus
1972	 * we have to use our own extent/csum paths.
1973	 */
1974	extent_path.search_commit_root = 1;
1975	extent_path.skip_locking = 1;
1976	csum_path.search_commit_root = 1;
1977	csum_path.skip_locking = 1;
1978
1979	for (int i = 0; i < data_stripes; i++) {
1980		int stripe_index;
1981		int rot;
1982		u64 physical;
1983
1984		stripe = &sctx->raid56_data_stripes[i];
1985		rot = div_u64(full_stripe_start - bg->start,
1986			      data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1987		stripe_index = (i + rot) % map->num_stripes;
1988		physical = map->stripes[stripe_index].physical +
1989			   btrfs_stripe_nr_to_offset(rot);
1990
1991		scrub_reset_stripe(stripe);
1992		set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1993		ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1994				map->stripes[stripe_index].dev, physical, 1,
1995				full_stripe_start + btrfs_stripe_nr_to_offset(i),
1996				BTRFS_STRIPE_LEN, stripe);
1997		if (ret < 0)
1998			goto out;
1999		/*
2000		 * No extent in this data stripe, need to manually mark them
2001		 * initialized to make later read submission happy.
2002		 */
2003		if (ret > 0) {
2004			stripe->logical = full_stripe_start +
2005					  btrfs_stripe_nr_to_offset(i);
2006			stripe->dev = map->stripes[stripe_index].dev;
2007			stripe->mirror_num = 1;
2008			set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
2009		}
2010	}
2011
2012	/* Check if all data stripes are empty. */
2013	for (int i = 0; i < data_stripes; i++) {
2014		stripe = &sctx->raid56_data_stripes[i];
2015		if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
2016			all_empty = false;
2017			break;
 
 
 
2018		}
 
 
 
 
 
 
 
 
 
2019	}
2020	if (all_empty) {
2021		ret = 0;
2022		goto out;
2023	}
2024
2025	for (int i = 0; i < data_stripes; i++) {
2026		stripe = &sctx->raid56_data_stripes[i];
2027		scrub_submit_initial_read(sctx, stripe);
2028	}
2029	for (int i = 0; i < data_stripes; i++) {
2030		stripe = &sctx->raid56_data_stripes[i];
 
 
 
 
 
 
 
 
 
 
 
 
2031
2032		wait_event(stripe->repair_wait,
2033			   test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
2034	}
2035	/* For now, no zoned support for RAID56. */
2036	ASSERT(!btrfs_is_zoned(sctx->fs_info));
2037
2038	/*
2039	 * Now all data stripes are properly verified. Check if we have any
2040	 * unrepaired, if so abort immediately or we could further corrupt the
2041	 * P/Q stripes.
2042	 *
2043	 * During the loop, also populate extent_bitmap.
2044	 */
2045	for (int i = 0; i < data_stripes; i++) {
2046		unsigned long error;
2047
2048		stripe = &sctx->raid56_data_stripes[i];
 
2049
2050		/*
2051		 * We should only check the errors where there is an extent.
2052		 * As we may hit an empty data stripe while it's missing.
2053		 */
2054		bitmap_and(&error, &stripe->error_bitmap,
2055			   &stripe->extent_sector_bitmap, stripe->nr_sectors);
2056		if (!bitmap_empty(&error, stripe->nr_sectors)) {
2057			btrfs_err(fs_info,
2058"unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
2059				  full_stripe_start, i, stripe->nr_sectors,
2060				  &error);
2061			ret = -EIO;
2062			goto out;
2063		}
2064		bitmap_or(&extent_bitmap, &extent_bitmap,
2065			  &stripe->extent_sector_bitmap, stripe->nr_sectors);
2066	}
 
 
 
2067
2068	/* Now we can check and regenerate the P/Q stripe. */
2069	bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
2070	bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
2071	bio->bi_private = &io_done;
2072	bio->bi_end_io = raid56_scrub_wait_endio;
2073
2074	btrfs_bio_counter_inc_blocked(fs_info);
2075	ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
2076			      &length, &bioc, NULL, NULL);
2077	if (ret < 0) {
2078		btrfs_put_bioc(bioc);
2079		btrfs_bio_counter_dec(fs_info);
2080		goto out;
2081	}
2082	rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
2083				BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
2084	btrfs_put_bioc(bioc);
2085	if (!rbio) {
2086		ret = -ENOMEM;
2087		btrfs_bio_counter_dec(fs_info);
2088		goto out;
2089	}
2090	/* Use the recovered stripes as cache to avoid read them from disk again. */
2091	for (int i = 0; i < data_stripes; i++) {
2092		stripe = &sctx->raid56_data_stripes[i];
2093
2094		raid56_parity_cache_data_pages(rbio, stripe->pages,
2095				full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
 
2096	}
2097	raid56_parity_submit_scrub_rbio(rbio);
2098	wait_for_completion_io(&io_done);
2099	ret = blk_status_to_errno(bio->bi_status);
2100	bio_put(bio);
2101	btrfs_bio_counter_dec(fs_info);
2102
2103	btrfs_release_path(&extent_path);
2104	btrfs_release_path(&csum_path);
2105out:
2106	return ret;
2107}
2108
2109/*
2110 * Scrub one range which can only has simple mirror based profile.
2111 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
2112 *  RAID0/RAID10).
2113 *
2114 * Since we may need to handle a subset of block group, we need @logical_start
2115 * and @logical_length parameter.
2116 */
2117static int scrub_simple_mirror(struct scrub_ctx *sctx,
2118			       struct btrfs_block_group *bg,
2119			       u64 logical_start, u64 logical_length,
2120			       struct btrfs_device *device,
2121			       u64 physical, int mirror_num)
2122{
2123	struct btrfs_fs_info *fs_info = sctx->fs_info;
2124	const u64 logical_end = logical_start + logical_length;
2125	u64 cur_logical = logical_start;
2126	int ret = 0;
2127
2128	/* The range must be inside the bg */
2129	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
 
2130
2131	/* Go through each extent items inside the logical range */
2132	while (cur_logical < logical_end) {
2133		u64 found_logical = U64_MAX;
2134		u64 cur_physical = physical + cur_logical - logical_start;
2135
2136		/* Canceled? */
2137		if (atomic_read(&fs_info->scrub_cancel_req) ||
2138		    atomic_read(&sctx->cancel_req)) {
2139			ret = -ECANCELED;
2140			break;
2141		}
2142		/* Paused? */
2143		if (atomic_read(&fs_info->scrub_pause_req)) {
2144			/* Push queued extents */
2145			scrub_blocked_if_needed(fs_info);
2146		}
2147		/* Block group removed? */
2148		spin_lock(&bg->lock);
2149		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
2150			spin_unlock(&bg->lock);
2151			ret = 0;
2152			break;
2153		}
2154		spin_unlock(&bg->lock);
2155
2156		ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2157					 cur_logical, logical_end - cur_logical,
2158					 cur_physical, &found_logical);
2159		if (ret > 0) {
2160			/* No more extent, just update the accounting */
2161			spin_lock(&sctx->stat_lock);
2162			sctx->stat.last_physical = physical + logical_length;
2163			spin_unlock(&sctx->stat_lock);
2164			ret = 0;
2165			break;
2166		}
2167		if (ret < 0)
2168			break;
2169
2170		/* queue_scrub_stripe() returned 0, @found_logical must be updated. */
2171		ASSERT(found_logical != U64_MAX);
2172		cur_logical = found_logical + BTRFS_STRIPE_LEN;
2173
2174		/* Don't hold CPU for too long time */
2175		cond_resched();
2176	}
2177	return ret;
2178}
2179
2180/* Calculate the full stripe length for simple stripe based profiles */
2181static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
2182{
2183	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2184			    BTRFS_BLOCK_GROUP_RAID10));
 
 
 
 
 
 
 
 
 
 
 
2185
2186	return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2187}
2188
2189/* Get the logical bytenr for the stripe */
2190static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
2191				     struct btrfs_block_group *bg,
2192				     int stripe_index)
2193{
2194	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2195			    BTRFS_BLOCK_GROUP_RAID10));
2196	ASSERT(stripe_index < map->num_stripes);
2197
2198	/*
2199	 * (stripe_index / sub_stripes) gives how many data stripes we need to
2200	 * skip.
2201	 */
2202	return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2203	       bg->start;
2204}
2205
2206/* Get the mirror number for the stripe */
2207static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
2208{
2209	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2210			    BTRFS_BLOCK_GROUP_RAID10));
2211	ASSERT(stripe_index < map->num_stripes);
2212
2213	/* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2214	return stripe_index % map->sub_stripes + 1;
2215}
2216
2217static int scrub_simple_stripe(struct scrub_ctx *sctx,
2218			       struct btrfs_block_group *bg,
2219			       struct btrfs_chunk_map *map,
2220			       struct btrfs_device *device,
2221			       int stripe_index)
 
2222{
2223	const u64 logical_increment = simple_stripe_full_stripe_len(map);
2224	const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2225	const u64 orig_physical = map->stripes[stripe_index].physical;
2226	const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2227	u64 cur_logical = orig_logical;
2228	u64 cur_physical = orig_physical;
2229	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2230
2231	while (cur_logical < bg->start + bg->length) {
2232		/*
2233		 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2234		 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2235		 * this stripe.
2236		 */
2237		ret = scrub_simple_mirror(sctx, bg, cur_logical,
2238					  BTRFS_STRIPE_LEN, device, cur_physical,
2239					  mirror_num);
2240		if (ret)
2241			return ret;
2242		/* Skip to next stripe which belongs to the target device */
2243		cur_logical += logical_increment;
2244		/* For physical offset, we just go to next stripe */
2245		cur_physical += BTRFS_STRIPE_LEN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2246	}
2247	return ret;
 
 
 
 
 
 
 
 
 
 
 
2248}
2249
2250static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2251					   struct btrfs_block_group *bg,
2252					   struct btrfs_chunk_map *map,
2253					   struct btrfs_device *scrub_dev,
2254					   int stripe_index)
 
2255{
 
2256	struct btrfs_fs_info *fs_info = sctx->fs_info;
2257	const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2258	const u64 chunk_logical = bg->start;
 
 
 
2259	int ret;
2260	int ret2;
2261	u64 physical = map->stripes[stripe_index].physical;
2262	const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
2263	const u64 physical_end = physical + dev_stripe_len;
2264	u64 logical;
2265	u64 logic_end;
2266	/* The logical increment after finishing one stripe */
2267	u64 increment;
2268	/* Offset inside the chunk */
 
 
 
 
 
2269	u64 offset;
 
 
 
2270	u64 stripe_logical;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2271
2272	/* Extent_path should be released by now. */
2273	ASSERT(sctx->extent_path.nodes[0] == NULL);
2274
2275	scrub_blocked_if_needed(fs_info);
2276
2277	if (sctx->is_dev_replace &&
2278	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2279		mutex_lock(&sctx->wr_lock);
2280		sctx->write_pointer = physical;
2281		mutex_unlock(&sctx->wr_lock);
2282	}
2283
2284	/* Prepare the extra data stripes used by RAID56. */
2285	if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2286		ASSERT(sctx->raid56_data_stripes == NULL);
2287
2288		sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2289						    sizeof(struct scrub_stripe),
2290						    GFP_KERNEL);
2291		if (!sctx->raid56_data_stripes) {
2292			ret = -ENOMEM;
2293			goto out;
2294		}
2295		for (int i = 0; i < nr_data_stripes(map); i++) {
2296			ret = init_scrub_stripe(fs_info,
2297						&sctx->raid56_data_stripes[i]);
2298			if (ret < 0)
2299				goto out;
2300			sctx->raid56_data_stripes[i].bg = bg;
2301			sctx->raid56_data_stripes[i].sctx = sctx;
2302		}
2303	}
2304	/*
2305	 * There used to be a big double loop to handle all profiles using the
2306	 * same routine, which grows larger and more gross over time.
2307	 *
2308	 * So here we handle each profile differently, so simpler profiles
2309	 * have simpler scrubbing function.
2310	 */
2311	if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2312			 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2313		/*
2314		 * Above check rules out all complex profile, the remaining
2315		 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2316		 * mirrored duplication without stripe.
2317		 *
2318		 * Only @physical and @mirror_num needs to calculated using
2319		 * @stripe_index.
2320		 */
2321		ret = scrub_simple_mirror(sctx, bg, bg->start, bg->length,
2322				scrub_dev, map->stripes[stripe_index].physical,
2323				stripe_index + 1);
2324		offset = 0;
2325		goto out;
2326	}
2327	if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2328		ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2329		offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
2330		goto out;
2331	}
 
 
 
2332
2333	/* Only RAID56 goes through the old code */
2334	ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2335	ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2336
2337	/* Calculate the logical end of the stripe */
2338	get_raid56_logic_offset(physical_end, stripe_index,
2339				map, &logic_end, NULL);
2340	logic_end += chunk_logical;
2341
2342	/* Initialize @offset in case we need to go to out: label */
2343	get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2344	increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2345
2346	/*
2347	 * Due to the rotation, for RAID56 it's better to iterate each stripe
2348	 * using their physical offset.
2349	 */
 
 
 
 
 
 
2350	while (physical < physical_end) {
2351		ret = get_raid56_logic_offset(physical, stripe_index, map,
2352					      &logical, &stripe_logical);
2353		logical += chunk_logical;
2354		if (ret) {
2355			/* it is parity strip */
2356			stripe_logical += chunk_logical;
2357			ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2358							 map, stripe_logical);
2359			spin_lock(&sctx->stat_lock);
2360			sctx->stat.last_physical = min(physical + BTRFS_STRIPE_LEN,
2361						       physical_end);
2362			spin_unlock(&sctx->stat_lock);
2363			if (ret)
2364				goto out;
2365			goto next;
2366		}
2367
2368		/*
2369		 * Now we're at a data stripe, scrub each extents in the range.
2370		 *
2371		 * At this stage, if we ignore the repair part, inside each data
2372		 * stripe it is no different than SINGLE profile.
2373		 * We can reuse scrub_simple_mirror() here, as the repair part
2374		 * is still based on @mirror_num.
2375		 */
2376		ret = scrub_simple_mirror(sctx, bg, logical, BTRFS_STRIPE_LEN,
2377					  scrub_dev, physical, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2378		if (ret < 0)
2379			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2380next:
 
 
 
 
2381		logical += increment;
2382		physical += BTRFS_STRIPE_LEN;
2383		spin_lock(&sctx->stat_lock);
2384		sctx->stat.last_physical = physical;
 
 
 
 
2385		spin_unlock(&sctx->stat_lock);
 
 
2386	}
2387out:
2388	ret2 = flush_scrub_stripes(sctx);
2389	if (!ret)
2390		ret = ret2;
2391	btrfs_release_path(&sctx->extent_path);
2392	btrfs_release_path(&sctx->csum_path);
2393
2394	if (sctx->raid56_data_stripes) {
2395		for (int i = 0; i < nr_data_stripes(map); i++)
2396			release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2397		kfree(sctx->raid56_data_stripes);
2398		sctx->raid56_data_stripes = NULL;
2399	}
2400
2401	if (sctx->is_dev_replace && ret >= 0) {
2402		int ret2;
2403
2404		ret2 = sync_write_pointer_for_zoned(sctx,
2405				chunk_logical + offset,
2406				map->stripes[stripe_index].physical,
2407				physical_end);
2408		if (ret2)
2409			ret = ret2;
2410	}
2411
 
 
 
2412	return ret < 0 ? ret : 0;
2413}
2414
2415static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2416					  struct btrfs_block_group *bg,
2417					  struct btrfs_device *scrub_dev,
 
2418					  u64 dev_offset,
2419					  u64 dev_extent_len)
2420{
2421	struct btrfs_fs_info *fs_info = sctx->fs_info;
2422	struct btrfs_chunk_map *map;
 
 
2423	int i;
2424	int ret = 0;
2425
2426	map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
2427	if (!map) {
 
 
 
2428		/*
2429		 * Might have been an unused block group deleted by the cleaner
2430		 * kthread or relocation.
2431		 */
2432		spin_lock(&bg->lock);
2433		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2434			ret = -EINVAL;
2435		spin_unlock(&bg->lock);
2436
2437		return ret;
2438	}
2439	if (map->start != bg->start)
 
 
2440		goto out;
2441	if (map->chunk_len < dev_extent_len)
 
2442		goto out;
2443
2444	for (i = 0; i < map->num_stripes; ++i) {
2445		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2446		    map->stripes[i].physical == dev_offset) {
2447			ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
 
2448			if (ret)
2449				goto out;
2450		}
2451	}
2452out:
2453	btrfs_free_chunk_map(map);
2454
2455	return ret;
2456}
2457
2458static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2459					  struct btrfs_block_group *cache)
2460{
2461	struct btrfs_fs_info *fs_info = cache->fs_info;
2462
2463	if (!btrfs_is_zoned(fs_info))
2464		return 0;
2465
2466	btrfs_wait_block_group_reservations(cache);
2467	btrfs_wait_nocow_writers(cache);
2468	btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
2469
2470	return btrfs_commit_current_transaction(root);
2471}
2472
2473static noinline_for_stack
2474int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2475			   struct btrfs_device *scrub_dev, u64 start, u64 end)
2476{
2477	struct btrfs_dev_extent *dev_extent = NULL;
2478	struct btrfs_path *path;
2479	struct btrfs_fs_info *fs_info = sctx->fs_info;
2480	struct btrfs_root *root = fs_info->dev_root;
 
2481	u64 chunk_offset;
2482	int ret = 0;
2483	int ro_set;
2484	int slot;
2485	struct extent_buffer *l;
2486	struct btrfs_key key;
2487	struct btrfs_key found_key;
2488	struct btrfs_block_group *cache;
2489	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2490
2491	path = btrfs_alloc_path();
2492	if (!path)
2493		return -ENOMEM;
2494
2495	path->reada = READA_FORWARD;
2496	path->search_commit_root = 1;
2497	path->skip_locking = 1;
2498
2499	key.objectid = scrub_dev->devid;
2500	key.offset = 0ull;
2501	key.type = BTRFS_DEV_EXTENT_KEY;
2502
2503	while (1) {
2504		u64 dev_extent_len;
2505
2506		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2507		if (ret < 0)
2508			break;
2509		if (ret > 0) {
2510			if (path->slots[0] >=
2511			    btrfs_header_nritems(path->nodes[0])) {
2512				ret = btrfs_next_leaf(root, path);
2513				if (ret < 0)
2514					break;
2515				if (ret > 0) {
2516					ret = 0;
2517					break;
2518				}
2519			} else {
2520				ret = 0;
2521			}
2522		}
2523
2524		l = path->nodes[0];
2525		slot = path->slots[0];
2526
2527		btrfs_item_key_to_cpu(l, &found_key, slot);
2528
2529		if (found_key.objectid != scrub_dev->devid)
2530			break;
2531
2532		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2533			break;
2534
2535		if (found_key.offset >= end)
2536			break;
2537
2538		if (found_key.offset < key.offset)
2539			break;
2540
2541		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2542		dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2543
2544		if (found_key.offset + dev_extent_len <= start)
2545			goto skip;
2546
2547		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2548
2549		/*
2550		 * get a reference on the corresponding block group to prevent
2551		 * the chunk from going away while we scrub it
2552		 */
2553		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2554
2555		/* some chunks are removed but not committed to disk yet,
2556		 * continue scrubbing */
2557		if (!cache)
2558			goto skip;
2559
2560		ASSERT(cache->start <= chunk_offset);
2561		/*
2562		 * We are using the commit root to search for device extents, so
2563		 * that means we could have found a device extent item from a
2564		 * block group that was deleted in the current transaction. The
2565		 * logical start offset of the deleted block group, stored at
2566		 * @chunk_offset, might be part of the logical address range of
2567		 * a new block group (which uses different physical extents).
2568		 * In this case btrfs_lookup_block_group() has returned the new
2569		 * block group, and its start address is less than @chunk_offset.
2570		 *
2571		 * We skip such new block groups, because it's pointless to
2572		 * process them, as we won't find their extents because we search
2573		 * for them using the commit root of the extent tree. For a device
2574		 * replace it's also fine to skip it, we won't miss copying them
2575		 * to the target device because we have the write duplication
2576		 * setup through the regular write path (by btrfs_map_block()),
2577		 * and we have committed a transaction when we started the device
2578		 * replace, right after setting up the device replace state.
2579		 */
2580		if (cache->start < chunk_offset) {
2581			btrfs_put_block_group(cache);
2582			goto skip;
2583		}
2584
2585		if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2586			if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2587				btrfs_put_block_group(cache);
2588				goto skip;
2589			}
2590		}
2591
2592		/*
2593		 * Make sure that while we are scrubbing the corresponding block
2594		 * group doesn't get its logical address and its device extents
2595		 * reused for another block group, which can possibly be of a
2596		 * different type and different profile. We do this to prevent
2597		 * false error detections and crashes due to bogus attempts to
2598		 * repair extents.
2599		 */
2600		spin_lock(&cache->lock);
2601		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2602			spin_unlock(&cache->lock);
2603			btrfs_put_block_group(cache);
2604			goto skip;
2605		}
2606		btrfs_freeze_block_group(cache);
2607		spin_unlock(&cache->lock);
2608
2609		/*
2610		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2611		 * to avoid deadlock caused by:
2612		 * btrfs_inc_block_group_ro()
2613		 * -> btrfs_wait_for_commit()
2614		 * -> btrfs_commit_transaction()
2615		 * -> btrfs_scrub_pause()
2616		 */
2617		scrub_pause_on(fs_info);
2618
2619		/*
2620		 * Don't do chunk preallocation for scrub.
2621		 *
2622		 * This is especially important for SYSTEM bgs, or we can hit
2623		 * -EFBIG from btrfs_finish_chunk_alloc() like:
2624		 * 1. The only SYSTEM bg is marked RO.
2625		 *    Since SYSTEM bg is small, that's pretty common.
2626		 * 2. New SYSTEM bg will be allocated
2627		 *    Due to regular version will allocate new chunk.
2628		 * 3. New SYSTEM bg is empty and will get cleaned up
2629		 *    Before cleanup really happens, it's marked RO again.
2630		 * 4. Empty SYSTEM bg get scrubbed
2631		 *    We go back to 2.
2632		 *
2633		 * This can easily boost the amount of SYSTEM chunks if cleaner
2634		 * thread can't be triggered fast enough, and use up all space
2635		 * of btrfs_super_block::sys_chunk_array
2636		 *
2637		 * While for dev replace, we need to try our best to mark block
2638		 * group RO, to prevent race between:
2639		 * - Write duplication
2640		 *   Contains latest data
2641		 * - Scrub copy
2642		 *   Contains data from commit tree
2643		 *
2644		 * If target block group is not marked RO, nocow writes can
2645		 * be overwritten by scrub copy, causing data corruption.
2646		 * So for dev-replace, it's not allowed to continue if a block
2647		 * group is not RO.
2648		 */
2649		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2650		if (!ret && sctx->is_dev_replace) {
2651			ret = finish_extent_writes_for_zoned(root, cache);
2652			if (ret) {
2653				btrfs_dec_block_group_ro(cache);
2654				scrub_pause_off(fs_info);
2655				btrfs_put_block_group(cache);
2656				break;
2657			}
2658		}
2659
2660		if (ret == 0) {
2661			ro_set = 1;
2662		} else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2663			   !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2664			/*
2665			 * btrfs_inc_block_group_ro return -ENOSPC when it
2666			 * failed in creating new chunk for metadata.
2667			 * It is not a problem for scrub, because
2668			 * metadata are always cowed, and our scrub paused
2669			 * commit_transactions.
2670			 *
2671			 * For RAID56 chunks, we have to mark them read-only
2672			 * for scrub, as later we would use our own cache
2673			 * out of RAID56 realm.
2674			 * Thus we want the RAID56 bg to be marked RO to
2675			 * prevent RMW from screwing up out cache.
2676			 */
2677			ro_set = 0;
2678		} else if (ret == -ETXTBSY) {
2679			btrfs_warn(fs_info,
2680		   "skipping scrub of block group %llu due to active swapfile",
2681				   cache->start);
2682			scrub_pause_off(fs_info);
2683			ret = 0;
2684			goto skip_unfreeze;
2685		} else {
2686			btrfs_warn(fs_info,
2687				   "failed setting block group ro: %d", ret);
2688			btrfs_unfreeze_block_group(cache);
2689			btrfs_put_block_group(cache);
2690			scrub_pause_off(fs_info);
2691			break;
2692		}
2693
2694		/*
2695		 * Now the target block is marked RO, wait for nocow writes to
2696		 * finish before dev-replace.
2697		 * COW is fine, as COW never overwrites extents in commit tree.
2698		 */
2699		if (sctx->is_dev_replace) {
2700			btrfs_wait_nocow_writers(cache);
2701			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
 
2702		}
2703
2704		scrub_pause_off(fs_info);
2705		down_write(&dev_replace->rwsem);
2706		dev_replace->cursor_right = found_key.offset + dev_extent_len;
2707		dev_replace->cursor_left = found_key.offset;
2708		dev_replace->item_needs_writeback = 1;
2709		up_write(&dev_replace->rwsem);
2710
2711		ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2712				  dev_extent_len);
2713		if (sctx->is_dev_replace &&
2714		    !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2715						      cache, found_key.offset))
2716			ro_set = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2717
2718		down_write(&dev_replace->rwsem);
2719		dev_replace->cursor_left = dev_replace->cursor_right;
2720		dev_replace->item_needs_writeback = 1;
2721		up_write(&dev_replace->rwsem);
2722
2723		if (ro_set)
2724			btrfs_dec_block_group_ro(cache);
2725
2726		/*
2727		 * We might have prevented the cleaner kthread from deleting
2728		 * this block group if it was already unused because we raced
2729		 * and set it to RO mode first. So add it back to the unused
2730		 * list, otherwise it might not ever be deleted unless a manual
2731		 * balance is triggered or it becomes used and unused again.
2732		 */
2733		spin_lock(&cache->lock);
2734		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2735		    !cache->ro && cache->reserved == 0 && cache->used == 0) {
2736			spin_unlock(&cache->lock);
2737			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2738				btrfs_discard_queue_work(&fs_info->discard_ctl,
2739							 cache);
2740			else
2741				btrfs_mark_bg_unused(cache);
2742		} else {
2743			spin_unlock(&cache->lock);
2744		}
2745skip_unfreeze:
2746		btrfs_unfreeze_block_group(cache);
2747		btrfs_put_block_group(cache);
2748		if (ret)
2749			break;
2750		if (sctx->is_dev_replace &&
2751		    atomic64_read(&dev_replace->num_write_errors) > 0) {
2752			ret = -EIO;
2753			break;
2754		}
2755		if (sctx->stat.malloc_errors > 0) {
2756			ret = -ENOMEM;
2757			break;
2758		}
2759skip:
2760		key.offset = found_key.offset + dev_extent_len;
2761		btrfs_release_path(path);
2762	}
2763
2764	btrfs_free_path(path);
2765
2766	return ret;
2767}
2768
2769static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2770			   struct page *page, u64 physical, u64 generation)
2771{
2772	struct btrfs_fs_info *fs_info = sctx->fs_info;
2773	struct bio_vec bvec;
2774	struct bio bio;
2775	struct btrfs_super_block *sb = page_address(page);
2776	int ret;
2777
2778	bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2779	bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2780	__bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2781	ret = submit_bio_wait(&bio);
2782	bio_uninit(&bio);
2783
2784	if (ret < 0)
2785		return ret;
2786	ret = btrfs_check_super_csum(fs_info, sb);
2787	if (ret != 0) {
2788		btrfs_err_rl(fs_info,
2789			"super block at physical %llu devid %llu has bad csum",
2790			physical, dev->devid);
2791		return -EIO;
2792	}
2793	if (btrfs_super_generation(sb) != generation) {
2794		btrfs_err_rl(fs_info,
2795"super block at physical %llu devid %llu has bad generation %llu expect %llu",
2796			     physical, dev->devid,
2797			     btrfs_super_generation(sb), generation);
2798		return -EUCLEAN;
2799	}
2800
2801	return btrfs_validate_super(fs_info, sb, -1);
2802}
2803
2804static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2805					   struct btrfs_device *scrub_dev)
2806{
2807	int	i;
2808	u64	bytenr;
2809	u64	gen;
2810	int ret = 0;
2811	struct page *page;
2812	struct btrfs_fs_info *fs_info = sctx->fs_info;
2813
2814	if (BTRFS_FS_ERROR(fs_info))
2815		return -EROFS;
2816
2817	page = alloc_page(GFP_KERNEL);
2818	if (!page) {
2819		spin_lock(&sctx->stat_lock);
2820		sctx->stat.malloc_errors++;
2821		spin_unlock(&sctx->stat_lock);
2822		return -ENOMEM;
2823	}
2824
2825	/* Seed devices of a new filesystem has their own generation. */
2826	if (scrub_dev->fs_devices != fs_info->fs_devices)
2827		gen = scrub_dev->generation;
2828	else
2829		gen = btrfs_get_last_trans_committed(fs_info);
2830
2831	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2832		ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
2833		if (ret == -ENOENT)
2834			break;
2835
2836		if (ret) {
2837			spin_lock(&sctx->stat_lock);
2838			sctx->stat.super_errors++;
2839			spin_unlock(&sctx->stat_lock);
2840			continue;
2841		}
2842
2843		if (bytenr + BTRFS_SUPER_INFO_SIZE >
2844		    scrub_dev->commit_total_bytes)
2845			break;
2846		if (!btrfs_check_super_location(scrub_dev, bytenr))
2847			continue;
2848
2849		ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2850		if (ret) {
2851			spin_lock(&sctx->stat_lock);
2852			sctx->stat.super_errors++;
2853			spin_unlock(&sctx->stat_lock);
2854		}
2855	}
2856	__free_page(page);
 
2857	return 0;
2858}
2859
2860static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2861{
2862	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2863					&fs_info->scrub_lock)) {
2864		struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
 
 
 
 
 
 
2865
2866		fs_info->scrub_workers = NULL;
 
 
2867		mutex_unlock(&fs_info->scrub_lock);
2868
2869		if (scrub_workers)
2870			destroy_workqueue(scrub_workers);
 
2871	}
2872}
2873
2874/*
2875 * get a reference count on fs_info->scrub_workers. start worker if necessary
2876 */
2877static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
 
2878{
2879	struct workqueue_struct *scrub_workers = NULL;
 
 
2880	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2881	int max_active = fs_info->thread_pool_size;
2882	int ret = -ENOMEM;
2883
2884	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2885		return 0;
2886
2887	scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
 
2888	if (!scrub_workers)
2889		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
2890
2891	mutex_lock(&fs_info->scrub_lock);
2892	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2893		ASSERT(fs_info->scrub_workers == NULL);
 
 
2894		fs_info->scrub_workers = scrub_workers;
 
 
2895		refcount_set(&fs_info->scrub_workers_refcnt, 1);
2896		mutex_unlock(&fs_info->scrub_lock);
2897		return 0;
2898	}
2899	/* Other thread raced in and created the workers for us */
2900	refcount_inc(&fs_info->scrub_workers_refcnt);
2901	mutex_unlock(&fs_info->scrub_lock);
2902
2903	ret = 0;
2904
2905	destroy_workqueue(scrub_workers);
 
 
 
 
2906	return ret;
2907}
2908
2909int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2910		    u64 end, struct btrfs_scrub_progress *progress,
2911		    int readonly, int is_dev_replace)
2912{
2913	struct btrfs_dev_lookup_args args = { .devid = devid };
2914	struct scrub_ctx *sctx;
2915	int ret;
2916	struct btrfs_device *dev;
2917	unsigned int nofs_flag;
2918	bool need_commit = false;
2919
2920	if (btrfs_fs_closing(fs_info))
2921		return -EAGAIN;
2922
2923	/* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2924	ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
 
 
 
 
 
 
 
 
 
 
2925
2926	/*
2927	 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2928	 * value (max nodesize / min sectorsize), thus nodesize should always
2929	 * be fine.
2930	 */
2931	ASSERT(fs_info->nodesize <=
2932	       SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2933
2934	/* Allocate outside of device_list_mutex */
2935	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2936	if (IS_ERR(sctx))
2937		return PTR_ERR(sctx);
2938
2939	ret = scrub_workers_get(fs_info);
2940	if (ret)
2941		goto out_free_ctx;
2942
2943	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2944	dev = btrfs_find_device(fs_info->fs_devices, &args);
2945	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2946		     !is_dev_replace)) {
2947		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2948		ret = -ENODEV;
2949		goto out;
2950	}
2951
2952	if (!is_dev_replace && !readonly &&
2953	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2954		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2955		btrfs_err_in_rcu(fs_info,
2956			"scrub on devid %llu: filesystem on %s is not writable",
2957				 devid, btrfs_dev_name(dev));
2958		ret = -EROFS;
2959		goto out;
2960	}
2961
2962	mutex_lock(&fs_info->scrub_lock);
2963	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2964	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2965		mutex_unlock(&fs_info->scrub_lock);
2966		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2967		ret = -EIO;
2968		goto out;
2969	}
2970
2971	down_read(&fs_info->dev_replace.rwsem);
2972	if (dev->scrub_ctx ||
2973	    (!is_dev_replace &&
2974	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2975		up_read(&fs_info->dev_replace.rwsem);
2976		mutex_unlock(&fs_info->scrub_lock);
2977		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2978		ret = -EINPROGRESS;
2979		goto out;
2980	}
2981	up_read(&fs_info->dev_replace.rwsem);
2982
2983	sctx->readonly = readonly;
2984	dev->scrub_ctx = sctx;
2985	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2986
2987	/*
2988	 * checking @scrub_pause_req here, we can avoid
2989	 * race between committing transaction and scrubbing.
2990	 */
2991	__scrub_blocked_if_needed(fs_info);
2992	atomic_inc(&fs_info->scrubs_running);
2993	mutex_unlock(&fs_info->scrub_lock);
2994
2995	/*
2996	 * In order to avoid deadlock with reclaim when there is a transaction
2997	 * trying to pause scrub, make sure we use GFP_NOFS for all the
2998	 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2999	 * invoked by our callees. The pausing request is done when the
3000	 * transaction commit starts, and it blocks the transaction until scrub
3001	 * is paused (done at specific points at scrub_stripe() or right above
3002	 * before incrementing fs_info->scrubs_running).
3003	 */
3004	nofs_flag = memalloc_nofs_save();
3005	if (!is_dev_replace) {
3006		u64 old_super_errors;
3007
3008		spin_lock(&sctx->stat_lock);
3009		old_super_errors = sctx->stat.super_errors;
3010		spin_unlock(&sctx->stat_lock);
3011
3012		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
3013		/*
3014		 * by holding device list mutex, we can
3015		 * kick off writing super in log tree sync.
3016		 */
3017		mutex_lock(&fs_info->fs_devices->device_list_mutex);
3018		ret = scrub_supers(sctx, dev);
3019		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3020
3021		spin_lock(&sctx->stat_lock);
3022		/*
3023		 * Super block errors found, but we can not commit transaction
3024		 * at current context, since btrfs_commit_transaction() needs
3025		 * to pause the current running scrub (hold by ourselves).
3026		 */
3027		if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
3028			need_commit = true;
3029		spin_unlock(&sctx->stat_lock);
3030	}
3031
3032	if (!ret)
3033		ret = scrub_enumerate_chunks(sctx, dev, start, end);
3034	memalloc_nofs_restore(nofs_flag);
3035
 
3036	atomic_dec(&fs_info->scrubs_running);
3037	wake_up(&fs_info->scrub_pause_wait);
3038
 
 
3039	if (progress)
3040		memcpy(progress, &sctx->stat, sizeof(*progress));
3041
3042	if (!is_dev_replace)
3043		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
3044			ret ? "not finished" : "finished", devid, ret);
3045
3046	mutex_lock(&fs_info->scrub_lock);
3047	dev->scrub_ctx = NULL;
3048	mutex_unlock(&fs_info->scrub_lock);
3049
3050	scrub_workers_put(fs_info);
3051	scrub_put_ctx(sctx);
3052
3053	/*
3054	 * We found some super block errors before, now try to force a
3055	 * transaction commit, as scrub has finished.
3056	 */
3057	if (need_commit) {
3058		struct btrfs_trans_handle *trans;
3059
3060		trans = btrfs_start_transaction(fs_info->tree_root, 0);
3061		if (IS_ERR(trans)) {
3062			ret = PTR_ERR(trans);
3063			btrfs_err(fs_info,
3064	"scrub: failed to start transaction to fix super block errors: %d", ret);
3065			return ret;
3066		}
3067		ret = btrfs_commit_transaction(trans);
3068		if (ret < 0)
3069			btrfs_err(fs_info,
3070	"scrub: failed to commit transaction to fix super block errors: %d", ret);
3071	}
3072	return ret;
3073out:
3074	scrub_workers_put(fs_info);
3075out_free_ctx:
3076	scrub_free_ctx(sctx);
3077
3078	return ret;
3079}
3080
3081void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3082{
3083	mutex_lock(&fs_info->scrub_lock);
3084	atomic_inc(&fs_info->scrub_pause_req);
3085	while (atomic_read(&fs_info->scrubs_paused) !=
3086	       atomic_read(&fs_info->scrubs_running)) {
3087		mutex_unlock(&fs_info->scrub_lock);
3088		wait_event(fs_info->scrub_pause_wait,
3089			   atomic_read(&fs_info->scrubs_paused) ==
3090			   atomic_read(&fs_info->scrubs_running));
3091		mutex_lock(&fs_info->scrub_lock);
3092	}
3093	mutex_unlock(&fs_info->scrub_lock);
3094}
3095
3096void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3097{
3098	atomic_dec(&fs_info->scrub_pause_req);
3099	wake_up(&fs_info->scrub_pause_wait);
3100}
3101
3102int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3103{
3104	mutex_lock(&fs_info->scrub_lock);
3105	if (!atomic_read(&fs_info->scrubs_running)) {
3106		mutex_unlock(&fs_info->scrub_lock);
3107		return -ENOTCONN;
3108	}
3109
3110	atomic_inc(&fs_info->scrub_cancel_req);
3111	while (atomic_read(&fs_info->scrubs_running)) {
3112		mutex_unlock(&fs_info->scrub_lock);
3113		wait_event(fs_info->scrub_pause_wait,
3114			   atomic_read(&fs_info->scrubs_running) == 0);
3115		mutex_lock(&fs_info->scrub_lock);
3116	}
3117	atomic_dec(&fs_info->scrub_cancel_req);
3118	mutex_unlock(&fs_info->scrub_lock);
3119
3120	return 0;
3121}
3122
3123int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
3124{
3125	struct btrfs_fs_info *fs_info = dev->fs_info;
3126	struct scrub_ctx *sctx;
3127
3128	mutex_lock(&fs_info->scrub_lock);
3129	sctx = dev->scrub_ctx;
3130	if (!sctx) {
3131		mutex_unlock(&fs_info->scrub_lock);
3132		return -ENOTCONN;
3133	}
3134	atomic_inc(&sctx->cancel_req);
3135	while (dev->scrub_ctx) {
3136		mutex_unlock(&fs_info->scrub_lock);
3137		wait_event(fs_info->scrub_pause_wait,
3138			   dev->scrub_ctx == NULL);
3139		mutex_lock(&fs_info->scrub_lock);
3140	}
3141	mutex_unlock(&fs_info->scrub_lock);
3142
3143	return 0;
3144}
3145
3146int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3147			 struct btrfs_scrub_progress *progress)
3148{
3149	struct btrfs_dev_lookup_args args = { .devid = devid };
3150	struct btrfs_device *dev;
3151	struct scrub_ctx *sctx = NULL;
3152
3153	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3154	dev = btrfs_find_device(fs_info->fs_devices, &args);
3155	if (dev)
3156		sctx = dev->scrub_ctx;
3157	if (sctx)
3158		memcpy(progress, &sctx->stat, sizeof(*progress));
3159	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3160
3161	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3162}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/ratelimit.h>
   8#include <linux/sched/mm.h>
   9#include <crypto/hash.h>
  10#include "ctree.h"
  11#include "discard.h"
  12#include "volumes.h"
  13#include "disk-io.h"
  14#include "ordered-data.h"
  15#include "transaction.h"
  16#include "backref.h"
  17#include "extent_io.h"
  18#include "dev-replace.h"
  19#include "check-integrity.h"
  20#include "rcu-string.h"
  21#include "raid56.h"
  22#include "block-group.h"
 
 
 
 
 
 
  23
  24/*
  25 * This is only the first step towards a full-features scrub. It reads all
  26 * extent and super block and verifies the checksums. In case a bad checksum
  27 * is found or the extent cannot be read, good data will be written back if
  28 * any can be found.
  29 *
  30 * Future enhancements:
  31 *  - In case an unrepairable extent is encountered, track which files are
  32 *    affected and report them
  33 *  - track and record media errors, throw out bad devices
  34 *  - add a mode to also read unallocated space
  35 */
  36
  37struct scrub_block;
  38struct scrub_ctx;
  39
  40/*
  41 * the following three values only influence the performance.
  42 * The last one configures the number of parallel and outstanding I/O
  43 * operations. The first two values configure an upper limit for the number
  44 * of (dynamically allocated) pages that are added to a bio.
 
 
 
 
 
 
 
 
  45 */
  46#define SCRUB_PAGES_PER_RD_BIO	32	/* 128k per bio */
  47#define SCRUB_PAGES_PER_WR_BIO	32	/* 128k per bio */
  48#define SCRUB_BIOS_PER_SCTX	64	/* 8MB per device in flight */
  49
  50/*
  51 * the following value times PAGE_SIZE needs to be large enough to match the
  52 * largest node/leaf/sector size that shall be supported.
  53 * Values larger than BTRFS_STRIPE_LEN are not supported.
  54 */
  55#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
 
 
 
 
 
 
 
 
 
 
 
 
 
  56
  57struct scrub_recover {
  58	refcount_t		refs;
  59	struct btrfs_bio	*bbio;
  60	u64			map_length;
 
 
  61};
  62
  63struct scrub_page {
  64	struct scrub_block	*sblock;
  65	struct page		*page;
  66	struct btrfs_device	*dev;
  67	struct list_head	list;
  68	u64			flags;  /* extent flags */
  69	u64			generation;
  70	u64			logical;
  71	u64			physical;
  72	u64			physical_for_dev_replace;
  73	atomic_t		refs;
  74	struct {
  75		unsigned int	mirror_num:8;
  76		unsigned int	have_csum:1;
  77		unsigned int	io_error:1;
  78	};
  79	u8			csum[BTRFS_CSUM_SIZE];
  80
  81	struct scrub_recover	*recover;
 
 
 
 
 
  82};
  83
  84struct scrub_bio {
  85	int			index;
  86	struct scrub_ctx	*sctx;
  87	struct btrfs_device	*dev;
  88	struct bio		*bio;
  89	blk_status_t		status;
  90	u64			logical;
  91	u64			physical;
  92#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
  93	struct scrub_page	*pagev[SCRUB_PAGES_PER_WR_BIO];
  94#else
  95	struct scrub_page	*pagev[SCRUB_PAGES_PER_RD_BIO];
  96#endif
  97	int			page_count;
  98	int			next_free;
  99	struct btrfs_work	work;
 100};
 101
 102struct scrub_block {
 103	struct scrub_page	*pagev[SCRUB_MAX_PAGES_PER_BLOCK];
 104	int			page_count;
 105	atomic_t		outstanding_pages;
 106	refcount_t		refs; /* free mem on transition to zero */
 107	struct scrub_ctx	*sctx;
 108	struct scrub_parity	*sparity;
 109	struct {
 110		unsigned int	header_error:1;
 111		unsigned int	checksum_error:1;
 112		unsigned int	no_io_error_seen:1;
 113		unsigned int	generation_error:1; /* also sets header_error */
 114
 115		/* The following is for the data used to check parity */
 116		/* It is for the data with checksum */
 117		unsigned int	data_corrected:1;
 118	};
 119	struct btrfs_work	work;
 120};
 121
 122/* Used for the chunks with parity stripe such RAID5/6 */
 123struct scrub_parity {
 124	struct scrub_ctx	*sctx;
 125
 126	struct btrfs_device	*scrub_dev;
 
 127
 128	u64			logic_start;
 
 
 
 
 
 129
 130	u64			logic_end;
 
 
 131
 132	int			nsectors;
 
 
 
 
 133
 134	u64			stripe_len;
 
 135
 136	refcount_t		refs;
 
 
 
 
 
 
 
 
 
 
 
 137
 138	struct list_head	spages;
 
 
 
 
 
 
 
 
 
 
 
 139
 140	/* Work of parity check and repair */
 141	struct btrfs_work	work;
 142
 143	/* Mark the parity blocks which have data */
 144	unsigned long		*dbitmap;
 145
 146	/*
 147	 * Mark the parity blocks which have data, but errors happen when
 148	 * read data or check data
 149	 */
 150	unsigned long		*ebitmap;
 151
 152	unsigned long		bitmap[];
 153};
 154
 155struct scrub_ctx {
 156	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
 
 157	struct btrfs_fs_info	*fs_info;
 
 
 158	int			first_free;
 159	int			curr;
 160	atomic_t		bios_in_flight;
 161	atomic_t		workers_pending;
 162	spinlock_t		list_lock;
 163	wait_queue_head_t	list_wait;
 164	u16			csum_size;
 165	struct list_head	csum_list;
 166	atomic_t		cancel_req;
 167	int			readonly;
 168	int			pages_per_rd_bio;
 
 
 
 169
 170	int			is_dev_replace;
 
 171
 172	struct scrub_bio        *wr_curr_bio;
 173	struct mutex            wr_lock;
 174	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
 175	struct btrfs_device     *wr_tgtdev;
 176	bool                    flush_all_writes;
 177
 178	/*
 179	 * statistics
 180	 */
 181	struct btrfs_scrub_progress stat;
 182	spinlock_t		stat_lock;
 183
 184	/*
 185	 * Use a ref counter to avoid use-after-free issues. Scrub workers
 186	 * decrement bios_in_flight and workers_pending and then do a wakeup
 187	 * on the list_wait wait queue. We must ensure the main scrub task
 188	 * doesn't free the scrub context before or while the workers are
 189	 * doing the wakeup() call.
 190	 */
 191	refcount_t              refs;
 192};
 193
 194struct scrub_warning {
 195	struct btrfs_path	*path;
 196	u64			extent_item_size;
 197	const char		*errstr;
 198	u64			physical;
 199	u64			logical;
 200	struct btrfs_device	*dev;
 201};
 202
 203struct full_stripe_lock {
 204	struct rb_node node;
 205	u64 logical;
 206	u64 refs;
 207	struct mutex mutex;
 208};
 209
 210static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
 211static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
 212static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
 213static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 214				     struct scrub_block *sblocks_for_recheck);
 215static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
 216				struct scrub_block *sblock,
 217				int retry_failed_mirror);
 218static void scrub_recheck_block_checksum(struct scrub_block *sblock);
 219static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
 220					     struct scrub_block *sblock_good);
 221static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 222					    struct scrub_block *sblock_good,
 223					    int page_num, int force_write);
 224static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
 225static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
 226					   int page_num);
 227static int scrub_checksum_data(struct scrub_block *sblock);
 228static int scrub_checksum_tree_block(struct scrub_block *sblock);
 229static int scrub_checksum_super(struct scrub_block *sblock);
 230static void scrub_block_get(struct scrub_block *sblock);
 231static void scrub_block_put(struct scrub_block *sblock);
 232static void scrub_page_get(struct scrub_page *spage);
 233static void scrub_page_put(struct scrub_page *spage);
 234static void scrub_parity_get(struct scrub_parity *sparity);
 235static void scrub_parity_put(struct scrub_parity *sparity);
 236static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
 237				    struct scrub_page *spage);
 238static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 239		       u64 physical, struct btrfs_device *dev, u64 flags,
 240		       u64 gen, int mirror_num, u8 *csum, int force,
 241		       u64 physical_for_dev_replace);
 242static void scrub_bio_end_io(struct bio *bio);
 243static void scrub_bio_end_io_worker(struct btrfs_work *work);
 244static void scrub_block_complete(struct scrub_block *sblock);
 245static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
 246			       u64 extent_logical, u64 extent_len,
 247			       u64 *extent_physical,
 248			       struct btrfs_device **extent_dev,
 249			       int *extent_mirror_num);
 250static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 251				    struct scrub_page *spage);
 252static void scrub_wr_submit(struct scrub_ctx *sctx);
 253static void scrub_wr_bio_end_io(struct bio *bio);
 254static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
 255static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 256static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 257static void scrub_put_ctx(struct scrub_ctx *sctx);
 258
 259static inline int scrub_is_page_on_raid56(struct scrub_page *page)
 
 260{
 261	return page->recover &&
 262	       (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263}
 264
 265static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
 266{
 267	refcount_inc(&sctx->refs);
 268	atomic_inc(&sctx->bios_in_flight);
 269}
 270
 271static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
 272{
 273	atomic_dec(&sctx->bios_in_flight);
 274	wake_up(&sctx->list_wait);
 275	scrub_put_ctx(sctx);
 276}
 277
 278static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 279{
 280	while (atomic_read(&fs_info->scrub_pause_req)) {
 281		mutex_unlock(&fs_info->scrub_lock);
 282		wait_event(fs_info->scrub_pause_wait,
 283		   atomic_read(&fs_info->scrub_pause_req) == 0);
 284		mutex_lock(&fs_info->scrub_lock);
 285	}
 286}
 287
 288static void scrub_pause_on(struct btrfs_fs_info *fs_info)
 289{
 290	atomic_inc(&fs_info->scrubs_paused);
 291	wake_up(&fs_info->scrub_pause_wait);
 292}
 293
 294static void scrub_pause_off(struct btrfs_fs_info *fs_info)
 295{
 296	mutex_lock(&fs_info->scrub_lock);
 297	__scrub_blocked_if_needed(fs_info);
 298	atomic_dec(&fs_info->scrubs_paused);
 299	mutex_unlock(&fs_info->scrub_lock);
 300
 301	wake_up(&fs_info->scrub_pause_wait);
 302}
 303
 304static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 305{
 306	scrub_pause_on(fs_info);
 307	scrub_pause_off(fs_info);
 308}
 309
 310/*
 311 * Insert new full stripe lock into full stripe locks tree
 312 *
 313 * Return pointer to existing or newly inserted full_stripe_lock structure if
 314 * everything works well.
 315 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
 316 *
 317 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
 318 * function
 319 */
 320static struct full_stripe_lock *insert_full_stripe_lock(
 321		struct btrfs_full_stripe_locks_tree *locks_root,
 322		u64 fstripe_logical)
 323{
 324	struct rb_node **p;
 325	struct rb_node *parent = NULL;
 326	struct full_stripe_lock *entry;
 327	struct full_stripe_lock *ret;
 328
 329	lockdep_assert_held(&locks_root->lock);
 330
 331	p = &locks_root->root.rb_node;
 332	while (*p) {
 333		parent = *p;
 334		entry = rb_entry(parent, struct full_stripe_lock, node);
 335		if (fstripe_logical < entry->logical) {
 336			p = &(*p)->rb_left;
 337		} else if (fstripe_logical > entry->logical) {
 338			p = &(*p)->rb_right;
 339		} else {
 340			entry->refs++;
 341			return entry;
 342		}
 343	}
 344
 345	/*
 346	 * Insert new lock.
 347	 */
 348	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
 349	if (!ret)
 350		return ERR_PTR(-ENOMEM);
 351	ret->logical = fstripe_logical;
 352	ret->refs = 1;
 353	mutex_init(&ret->mutex);
 354
 355	rb_link_node(&ret->node, parent, p);
 356	rb_insert_color(&ret->node, &locks_root->root);
 357	return ret;
 358}
 359
 360/*
 361 * Search for a full stripe lock of a block group
 362 *
 363 * Return pointer to existing full stripe lock if found
 364 * Return NULL if not found
 365 */
 366static struct full_stripe_lock *search_full_stripe_lock(
 367		struct btrfs_full_stripe_locks_tree *locks_root,
 368		u64 fstripe_logical)
 369{
 370	struct rb_node *node;
 371	struct full_stripe_lock *entry;
 372
 373	lockdep_assert_held(&locks_root->lock);
 374
 375	node = locks_root->root.rb_node;
 376	while (node) {
 377		entry = rb_entry(node, struct full_stripe_lock, node);
 378		if (fstripe_logical < entry->logical)
 379			node = node->rb_left;
 380		else if (fstripe_logical > entry->logical)
 381			node = node->rb_right;
 382		else
 383			return entry;
 384	}
 385	return NULL;
 386}
 387
 388/*
 389 * Helper to get full stripe logical from a normal bytenr.
 390 *
 391 * Caller must ensure @cache is a RAID56 block group.
 392 */
 393static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
 394{
 395	u64 ret;
 396
 397	/*
 398	 * Due to chunk item size limit, full stripe length should not be
 399	 * larger than U32_MAX. Just a sanity check here.
 400	 */
 401	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
 402
 403	/*
 404	 * round_down() can only handle power of 2, while RAID56 full
 405	 * stripe length can be 64KiB * n, so we need to manually round down.
 406	 */
 407	ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
 408			cache->full_stripe_len + cache->start;
 409	return ret;
 410}
 411
 412/*
 413 * Lock a full stripe to avoid concurrency of recovery and read
 414 *
 415 * It's only used for profiles with parities (RAID5/6), for other profiles it
 416 * does nothing.
 417 *
 418 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
 419 * So caller must call unlock_full_stripe() at the same context.
 420 *
 421 * Return <0 if encounters error.
 422 */
 423static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
 424			    bool *locked_ret)
 425{
 426	struct btrfs_block_group *bg_cache;
 427	struct btrfs_full_stripe_locks_tree *locks_root;
 428	struct full_stripe_lock *existing;
 429	u64 fstripe_start;
 430	int ret = 0;
 431
 432	*locked_ret = false;
 433	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
 434	if (!bg_cache) {
 435		ASSERT(0);
 436		return -ENOENT;
 437	}
 438
 439	/* Profiles not based on parity don't need full stripe lock */
 440	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
 441		goto out;
 442	locks_root = &bg_cache->full_stripe_locks_root;
 443
 444	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
 445
 446	/* Now insert the full stripe lock */
 447	mutex_lock(&locks_root->lock);
 448	existing = insert_full_stripe_lock(locks_root, fstripe_start);
 449	mutex_unlock(&locks_root->lock);
 450	if (IS_ERR(existing)) {
 451		ret = PTR_ERR(existing);
 452		goto out;
 453	}
 454	mutex_lock(&existing->mutex);
 455	*locked_ret = true;
 456out:
 457	btrfs_put_block_group(bg_cache);
 458	return ret;
 459}
 460
 461/*
 462 * Unlock a full stripe.
 463 *
 464 * NOTE: Caller must ensure it's the same context calling corresponding
 465 * lock_full_stripe().
 466 *
 467 * Return 0 if we unlock full stripe without problem.
 468 * Return <0 for error
 469 */
 470static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
 471			      bool locked)
 472{
 473	struct btrfs_block_group *bg_cache;
 474	struct btrfs_full_stripe_locks_tree *locks_root;
 475	struct full_stripe_lock *fstripe_lock;
 476	u64 fstripe_start;
 477	bool freeit = false;
 478	int ret = 0;
 479
 480	/* If we didn't acquire full stripe lock, no need to continue */
 481	if (!locked)
 482		return 0;
 483
 484	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
 485	if (!bg_cache) {
 486		ASSERT(0);
 487		return -ENOENT;
 488	}
 489	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
 490		goto out;
 491
 492	locks_root = &bg_cache->full_stripe_locks_root;
 493	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
 494
 495	mutex_lock(&locks_root->lock);
 496	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
 497	/* Unpaired unlock_full_stripe() detected */
 498	if (!fstripe_lock) {
 499		WARN_ON(1);
 500		ret = -ENOENT;
 501		mutex_unlock(&locks_root->lock);
 502		goto out;
 503	}
 504
 505	if (fstripe_lock->refs == 0) {
 506		WARN_ON(1);
 507		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
 508			fstripe_lock->logical);
 509	} else {
 510		fstripe_lock->refs--;
 511	}
 512
 513	if (fstripe_lock->refs == 0) {
 514		rb_erase(&fstripe_lock->node, &locks_root->root);
 515		freeit = true;
 516	}
 517	mutex_unlock(&locks_root->lock);
 518
 519	mutex_unlock(&fstripe_lock->mutex);
 520	if (freeit)
 521		kfree(fstripe_lock);
 522out:
 523	btrfs_put_block_group(bg_cache);
 524	return ret;
 525}
 526
 527static void scrub_free_csums(struct scrub_ctx *sctx)
 528{
 529	while (!list_empty(&sctx->csum_list)) {
 530		struct btrfs_ordered_sum *sum;
 531		sum = list_first_entry(&sctx->csum_list,
 532				       struct btrfs_ordered_sum, list);
 533		list_del(&sum->list);
 534		kfree(sum);
 535	}
 536}
 537
 538static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 539{
 540	int i;
 541
 542	if (!sctx)
 543		return;
 544
 545	/* this can happen when scrub is cancelled */
 546	if (sctx->curr != -1) {
 547		struct scrub_bio *sbio = sctx->bios[sctx->curr];
 548
 549		for (i = 0; i < sbio->page_count; i++) {
 550			WARN_ON(!sbio->pagev[i]->page);
 551			scrub_block_put(sbio->pagev[i]->sblock);
 552		}
 553		bio_put(sbio->bio);
 554	}
 555
 556	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
 557		struct scrub_bio *sbio = sctx->bios[i];
 558
 559		if (!sbio)
 560			break;
 561		kfree(sbio);
 562	}
 563
 564	kfree(sctx->wr_curr_bio);
 565	scrub_free_csums(sctx);
 566	kfree(sctx);
 567}
 568
 569static void scrub_put_ctx(struct scrub_ctx *sctx)
 570{
 571	if (refcount_dec_and_test(&sctx->refs))
 572		scrub_free_ctx(sctx);
 573}
 574
 575static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 576		struct btrfs_fs_info *fs_info, int is_dev_replace)
 577{
 578	struct scrub_ctx *sctx;
 579	int		i;
 580
 581	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
 
 
 
 582	if (!sctx)
 583		goto nomem;
 584	refcount_set(&sctx->refs, 1);
 585	sctx->is_dev_replace = is_dev_replace;
 586	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
 587	sctx->curr = -1;
 588	sctx->fs_info = fs_info;
 589	INIT_LIST_HEAD(&sctx->csum_list);
 590	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
 591		struct scrub_bio *sbio;
 
 
 
 592
 593		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
 594		if (!sbio)
 595			goto nomem;
 596		sctx->bios[i] = sbio;
 597
 598		sbio->index = i;
 599		sbio->sctx = sctx;
 600		sbio->page_count = 0;
 601		btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
 602				NULL);
 603
 604		if (i != SCRUB_BIOS_PER_SCTX - 1)
 605			sctx->bios[i]->next_free = i + 1;
 606		else
 607			sctx->bios[i]->next_free = -1;
 608	}
 609	sctx->first_free = 0;
 610	atomic_set(&sctx->bios_in_flight, 0);
 611	atomic_set(&sctx->workers_pending, 0);
 612	atomic_set(&sctx->cancel_req, 0);
 613	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
 614
 615	spin_lock_init(&sctx->list_lock);
 616	spin_lock_init(&sctx->stat_lock);
 617	init_waitqueue_head(&sctx->list_wait);
 618
 619	WARN_ON(sctx->wr_curr_bio != NULL);
 620	mutex_init(&sctx->wr_lock);
 621	sctx->wr_curr_bio = NULL;
 622	if (is_dev_replace) {
 623		WARN_ON(!fs_info->dev_replace.tgtdev);
 624		sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
 625		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
 626		sctx->flush_all_writes = false;
 627	}
 628
 629	return sctx;
 630
 631nomem:
 632	scrub_free_ctx(sctx);
 633	return ERR_PTR(-ENOMEM);
 634}
 635
 636static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
 637				     void *warn_ctx)
 638{
 639	u64 isize;
 640	u32 nlink;
 641	int ret;
 642	int i;
 643	unsigned nofs_flag;
 644	struct extent_buffer *eb;
 645	struct btrfs_inode_item *inode_item;
 646	struct scrub_warning *swarn = warn_ctx;
 647	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
 648	struct inode_fs_paths *ipath = NULL;
 649	struct btrfs_root *local_root;
 650	struct btrfs_key key;
 651
 652	local_root = btrfs_get_fs_root(fs_info, root, true);
 653	if (IS_ERR(local_root)) {
 654		ret = PTR_ERR(local_root);
 655		goto err;
 656	}
 657
 658	/*
 659	 * this makes the path point to (inum INODE_ITEM ioff)
 660	 */
 661	key.objectid = inum;
 662	key.type = BTRFS_INODE_ITEM_KEY;
 663	key.offset = 0;
 664
 665	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
 666	if (ret) {
 667		btrfs_put_root(local_root);
 668		btrfs_release_path(swarn->path);
 669		goto err;
 670	}
 671
 672	eb = swarn->path->nodes[0];
 673	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
 674					struct btrfs_inode_item);
 675	isize = btrfs_inode_size(eb, inode_item);
 676	nlink = btrfs_inode_nlink(eb, inode_item);
 677	btrfs_release_path(swarn->path);
 678
 679	/*
 680	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
 681	 * uses GFP_NOFS in this context, so we keep it consistent but it does
 682	 * not seem to be strictly necessary.
 683	 */
 684	nofs_flag = memalloc_nofs_save();
 685	ipath = init_ipath(4096, local_root, swarn->path);
 686	memalloc_nofs_restore(nofs_flag);
 687	if (IS_ERR(ipath)) {
 688		btrfs_put_root(local_root);
 689		ret = PTR_ERR(ipath);
 690		ipath = NULL;
 691		goto err;
 692	}
 693	ret = paths_from_inode(inum, ipath);
 694
 695	if (ret < 0)
 696		goto err;
 697
 698	/*
 699	 * we deliberately ignore the bit ipath might have been too small to
 700	 * hold all of the paths here
 701	 */
 702	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
 703		btrfs_warn_in_rcu(fs_info,
 704"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
 705				  swarn->errstr, swarn->logical,
 706				  rcu_str_deref(swarn->dev->name),
 707				  swarn->physical,
 708				  root, inum, offset,
 709				  min(isize - offset, (u64)PAGE_SIZE), nlink,
 710				  (char *)(unsigned long)ipath->fspath->val[i]);
 711
 712	btrfs_put_root(local_root);
 713	free_ipath(ipath);
 714	return 0;
 715
 716err:
 717	btrfs_warn_in_rcu(fs_info,
 718			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
 719			  swarn->errstr, swarn->logical,
 720			  rcu_str_deref(swarn->dev->name),
 721			  swarn->physical,
 722			  root, inum, offset, ret);
 723
 724	free_ipath(ipath);
 725	return 0;
 726}
 727
 728static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
 
 729{
 730	struct btrfs_device *dev;
 731	struct btrfs_fs_info *fs_info;
 732	struct btrfs_path *path;
 733	struct btrfs_key found_key;
 734	struct extent_buffer *eb;
 735	struct btrfs_extent_item *ei;
 736	struct scrub_warning swarn;
 737	unsigned long ptr = 0;
 738	u64 extent_item_pos;
 739	u64 flags = 0;
 740	u64 ref_root;
 741	u32 item_size;
 742	u8 ref_level = 0;
 743	int ret;
 744
 745	WARN_ON(sblock->page_count < 1);
 746	dev = sblock->pagev[0]->dev;
 747	fs_info = sblock->sctx->fs_info;
 748
 
 
 749	path = btrfs_alloc_path();
 750	if (!path)
 751		return;
 752
 753	swarn.physical = sblock->pagev[0]->physical;
 754	swarn.logical = sblock->pagev[0]->logical;
 755	swarn.errstr = errstr;
 756	swarn.dev = NULL;
 757
 758	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
 759				  &flags);
 760	if (ret < 0)
 761		goto out;
 762
 763	extent_item_pos = swarn.logical - found_key.objectid;
 764	swarn.extent_item_size = found_key.offset;
 765
 766	eb = path->nodes[0];
 767	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 768	item_size = btrfs_item_size_nr(eb, path->slots[0]);
 769
 770	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 771		do {
 
 
 
 
 772			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
 773						      item_size, &ref_root,
 774						      &ref_level);
 
 
 
 
 
 
 
 
 775			btrfs_warn_in_rcu(fs_info,
 776"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
 777				errstr, swarn.logical,
 778				rcu_str_deref(dev->name),
 779				swarn.physical,
 780				ref_level ? "node" : "leaf",
 781				ret < 0 ? -1 : ref_level,
 782				ret < 0 ? -1 : ref_root);
 783		} while (ret != 1);
 784		btrfs_release_path(path);
 785	} else {
 
 
 786		btrfs_release_path(path);
 
 
 
 
 
 787		swarn.path = path;
 788		swarn.dev = dev;
 789		iterate_extent_inodes(fs_info, found_key.objectid,
 790					extent_item_pos, 1,
 791					scrub_print_warning_inode, &swarn, false);
 792	}
 793
 794out:
 795	btrfs_free_path(path);
 796}
 797
 798static inline void scrub_get_recover(struct scrub_recover *recover)
 799{
 800	refcount_inc(&recover->refs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 801}
 802
 803static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
 804				     struct scrub_recover *recover)
 805{
 806	if (refcount_dec_and_test(&recover->refs)) {
 807		btrfs_bio_counter_dec(fs_info);
 808		btrfs_put_bbio(recover->bbio);
 809		kfree(recover);
 810	}
 
 
 
 
 
 
 
 811}
 812
 813/*
 814 * scrub_handle_errored_block gets called when either verification of the
 815 * pages failed or the bio failed to read, e.g. with EIO. In the latter
 816 * case, this function handles all pages in the bio, even though only one
 817 * may be bad.
 818 * The goal of this function is to repair the errored block by using the
 819 * contents of one of the mirrors.
 820 */
 821static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 822{
 823	struct scrub_ctx *sctx = sblock_to_check->sctx;
 824	struct btrfs_device *dev;
 825	struct btrfs_fs_info *fs_info;
 826	u64 logical;
 827	unsigned int failed_mirror_index;
 828	unsigned int is_metadata;
 829	unsigned int have_csum;
 830	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
 831	struct scrub_block *sblock_bad;
 832	int ret;
 833	int mirror_index;
 834	int page_num;
 835	int success;
 836	bool full_stripe_locked;
 837	unsigned int nofs_flag;
 838	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
 839				      DEFAULT_RATELIMIT_BURST);
 840
 841	BUG_ON(sblock_to_check->page_count < 1);
 842	fs_info = sctx->fs_info;
 843	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
 844		/*
 845		 * if we find an error in a super block, we just report it.
 846		 * They will get written with the next transaction commit
 847		 * anyway
 848		 */
 849		spin_lock(&sctx->stat_lock);
 850		++sctx->stat.super_errors;
 851		spin_unlock(&sctx->stat_lock);
 852		return 0;
 853	}
 854	logical = sblock_to_check->pagev[0]->logical;
 855	BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
 856	failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
 857	is_metadata = !(sblock_to_check->pagev[0]->flags &
 858			BTRFS_EXTENT_FLAG_DATA);
 859	have_csum = sblock_to_check->pagev[0]->have_csum;
 860	dev = sblock_to_check->pagev[0]->dev;
 861
 862	/*
 863	 * We must use GFP_NOFS because the scrub task might be waiting for a
 864	 * worker task executing this function and in turn a transaction commit
 865	 * might be waiting the scrub task to pause (which needs to wait for all
 866	 * the worker tasks to complete before pausing).
 867	 * We do allocations in the workers through insert_full_stripe_lock()
 868	 * and scrub_add_page_to_wr_bio(), which happens down the call chain of
 869	 * this function.
 870	 */
 871	nofs_flag = memalloc_nofs_save();
 872	/*
 873	 * For RAID5/6, race can happen for a different device scrub thread.
 874	 * For data corruption, Parity and Data threads will both try
 875	 * to recovery the data.
 876	 * Race can lead to doubly added csum error, or even unrecoverable
 877	 * error.
 878	 */
 879	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
 880	if (ret < 0) {
 881		memalloc_nofs_restore(nofs_flag);
 882		spin_lock(&sctx->stat_lock);
 883		if (ret == -ENOMEM)
 884			sctx->stat.malloc_errors++;
 885		sctx->stat.read_errors++;
 886		sctx->stat.uncorrectable_errors++;
 887		spin_unlock(&sctx->stat_lock);
 888		return ret;
 889	}
 890
 891	/*
 892	 * read all mirrors one after the other. This includes to
 893	 * re-read the extent or metadata block that failed (that was
 894	 * the cause that this fixup code is called) another time,
 895	 * page by page this time in order to know which pages
 896	 * caused I/O errors and which ones are good (for all mirrors).
 897	 * It is the goal to handle the situation when more than one
 898	 * mirror contains I/O errors, but the errors do not
 899	 * overlap, i.e. the data can be repaired by selecting the
 900	 * pages from those mirrors without I/O error on the
 901	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
 902	 * would be that mirror #1 has an I/O error on the first page,
 903	 * the second page is good, and mirror #2 has an I/O error on
 904	 * the second page, but the first page is good.
 905	 * Then the first page of the first mirror can be repaired by
 906	 * taking the first page of the second mirror, and the
 907	 * second page of the second mirror can be repaired by
 908	 * copying the contents of the 2nd page of the 1st mirror.
 909	 * One more note: if the pages of one mirror contain I/O
 910	 * errors, the checksum cannot be verified. In order to get
 911	 * the best data for repairing, the first attempt is to find
 912	 * a mirror without I/O errors and with a validated checksum.
 913	 * Only if this is not possible, the pages are picked from
 914	 * mirrors with I/O errors without considering the checksum.
 915	 * If the latter is the case, at the end, the checksum of the
 916	 * repaired area is verified in order to correctly maintain
 917	 * the statistics.
 918	 */
 919
 920	sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
 921				      sizeof(*sblocks_for_recheck), GFP_KERNEL);
 922	if (!sblocks_for_recheck) {
 923		spin_lock(&sctx->stat_lock);
 924		sctx->stat.malloc_errors++;
 925		sctx->stat.read_errors++;
 926		sctx->stat.uncorrectable_errors++;
 927		spin_unlock(&sctx->stat_lock);
 928		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
 929		goto out;
 930	}
 931
 932	/* setup the context, map the logical blocks and alloc the pages */
 933	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
 934	if (ret) {
 935		spin_lock(&sctx->stat_lock);
 936		sctx->stat.read_errors++;
 937		sctx->stat.uncorrectable_errors++;
 938		spin_unlock(&sctx->stat_lock);
 939		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
 940		goto out;
 941	}
 942	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
 943	sblock_bad = sblocks_for_recheck + failed_mirror_index;
 944
 945	/* build and submit the bios for the failed mirror, check checksums */
 946	scrub_recheck_block(fs_info, sblock_bad, 1);
 
 
 
 947
 948	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
 949	    sblock_bad->no_io_error_seen) {
 950		/*
 951		 * the error disappeared after reading page by page, or
 952		 * the area was part of a huge bio and other parts of the
 953		 * bio caused I/O errors, or the block layer merged several
 954		 * read requests into one and the error is caused by a
 955		 * different bio (usually one of the two latter cases is
 956		 * the cause)
 957		 */
 958		spin_lock(&sctx->stat_lock);
 959		sctx->stat.unverified_errors++;
 960		sblock_to_check->data_corrected = 1;
 961		spin_unlock(&sctx->stat_lock);
 962
 963		if (sctx->is_dev_replace)
 964			scrub_write_block_to_dev_replace(sblock_bad);
 965		goto out;
 966	}
 967
 968	if (!sblock_bad->no_io_error_seen) {
 969		spin_lock(&sctx->stat_lock);
 970		sctx->stat.read_errors++;
 971		spin_unlock(&sctx->stat_lock);
 972		if (__ratelimit(&_rs))
 973			scrub_print_warning("i/o error", sblock_to_check);
 974		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
 975	} else if (sblock_bad->checksum_error) {
 976		spin_lock(&sctx->stat_lock);
 977		sctx->stat.csum_errors++;
 978		spin_unlock(&sctx->stat_lock);
 979		if (__ratelimit(&_rs))
 980			scrub_print_warning("checksum error", sblock_to_check);
 981		btrfs_dev_stat_inc_and_print(dev,
 982					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
 983	} else if (sblock_bad->header_error) {
 984		spin_lock(&sctx->stat_lock);
 985		sctx->stat.verify_errors++;
 986		spin_unlock(&sctx->stat_lock);
 987		if (__ratelimit(&_rs))
 988			scrub_print_warning("checksum/header error",
 989					    sblock_to_check);
 990		if (sblock_bad->generation_error)
 991			btrfs_dev_stat_inc_and_print(dev,
 992				BTRFS_DEV_STAT_GENERATION_ERRS);
 993		else
 994			btrfs_dev_stat_inc_and_print(dev,
 995				BTRFS_DEV_STAT_CORRUPTION_ERRS);
 996	}
 997
 998	if (sctx->readonly) {
 999		ASSERT(!sctx->is_dev_replace);
1000		goto out;
 
 
 
 
 
 
1001	}
 
 
 
 
1002
1003	/*
1004	 * now build and submit the bios for the other mirrors, check
1005	 * checksums.
1006	 * First try to pick the mirror which is completely without I/O
1007	 * errors and also does not have a checksum error.
1008	 * If one is found, and if a checksum is present, the full block
1009	 * that is known to contain an error is rewritten. Afterwards
1010	 * the block is known to be corrected.
1011	 * If a mirror is found which is completely correct, and no
1012	 * checksum is present, only those pages are rewritten that had
1013	 * an I/O error in the block to be repaired, since it cannot be
1014	 * determined, which copy of the other pages is better (and it
1015	 * could happen otherwise that a correct page would be
1016	 * overwritten by a bad one).
1017	 */
1018	for (mirror_index = 0; ;mirror_index++) {
1019		struct scrub_block *sblock_other;
1020
1021		if (mirror_index == failed_mirror_index)
1022			continue;
1023
1024		/* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1025		if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1026			if (mirror_index >= BTRFS_MAX_MIRRORS)
1027				break;
1028			if (!sblocks_for_recheck[mirror_index].page_count)
1029				break;
1030
1031			sblock_other = sblocks_for_recheck + mirror_index;
1032		} else {
1033			struct scrub_recover *r = sblock_bad->pagev[0]->recover;
1034			int max_allowed = r->bbio->num_stripes -
1035						r->bbio->num_tgtdevs;
1036
1037			if (mirror_index >= max_allowed)
1038				break;
1039			if (!sblocks_for_recheck[1].page_count)
1040				break;
1041
1042			ASSERT(failed_mirror_index == 0);
1043			sblock_other = sblocks_for_recheck + 1;
1044			sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
1045		}
1046
1047		/* build and submit the bios, check checksums */
1048		scrub_recheck_block(fs_info, sblock_other, 0);
1049
1050		if (!sblock_other->header_error &&
1051		    !sblock_other->checksum_error &&
1052		    sblock_other->no_io_error_seen) {
1053			if (sctx->is_dev_replace) {
1054				scrub_write_block_to_dev_replace(sblock_other);
1055				goto corrected_error;
1056			} else {
1057				ret = scrub_repair_block_from_good_copy(
1058						sblock_bad, sblock_other);
1059				if (!ret)
1060					goto corrected_error;
1061			}
1062		}
 
 
1063	}
1064
1065	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1066		goto did_not_correct_error;
1067
1068	/*
1069	 * In case of I/O errors in the area that is supposed to be
1070	 * repaired, continue by picking good copies of those pages.
1071	 * Select the good pages from mirrors to rewrite bad pages from
1072	 * the area to fix. Afterwards verify the checksum of the block
1073	 * that is supposed to be repaired. This verification step is
1074	 * only done for the purpose of statistic counting and for the
1075	 * final scrub report, whether errors remain.
1076	 * A perfect algorithm could make use of the checksum and try
1077	 * all possible combinations of pages from the different mirrors
1078	 * until the checksum verification succeeds. For example, when
1079	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1080	 * of mirror #2 is readable but the final checksum test fails,
1081	 * then the 2nd page of mirror #3 could be tried, whether now
1082	 * the final checksum succeeds. But this would be a rare
1083	 * exception and is therefore not implemented. At least it is
1084	 * avoided that the good copy is overwritten.
1085	 * A more useful improvement would be to pick the sectors
1086	 * without I/O error based on sector sizes (512 bytes on legacy
1087	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1088	 * mirror could be repaired by taking 512 byte of a different
1089	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1090	 * area are unreadable.
1091	 */
1092	success = 1;
1093	for (page_num = 0; page_num < sblock_bad->page_count;
1094	     page_num++) {
1095		struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1096		struct scrub_block *sblock_other = NULL;
1097
1098		/* skip no-io-error page in scrub */
1099		if (!page_bad->io_error && !sctx->is_dev_replace)
1100			continue;
1101
1102		if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1103			/*
1104			 * In case of dev replace, if raid56 rebuild process
1105			 * didn't work out correct data, then copy the content
1106			 * in sblock_bad to make sure target device is identical
1107			 * to source device, instead of writing garbage data in
1108			 * sblock_for_recheck array to target device.
1109			 */
1110			sblock_other = NULL;
1111		} else if (page_bad->io_error) {
1112			/* try to find no-io-error page in mirrors */
1113			for (mirror_index = 0;
1114			     mirror_index < BTRFS_MAX_MIRRORS &&
1115			     sblocks_for_recheck[mirror_index].page_count > 0;
1116			     mirror_index++) {
1117				if (!sblocks_for_recheck[mirror_index].
1118				    pagev[page_num]->io_error) {
1119					sblock_other = sblocks_for_recheck +
1120						       mirror_index;
1121					break;
1122				}
1123			}
1124			if (!sblock_other)
1125				success = 0;
1126		}
1127
1128		if (sctx->is_dev_replace) {
1129			/*
1130			 * did not find a mirror to fetch the page
1131			 * from. scrub_write_page_to_dev_replace()
1132			 * handles this case (page->io_error), by
1133			 * filling the block with zeros before
1134			 * submitting the write request
1135			 */
1136			if (!sblock_other)
1137				sblock_other = sblock_bad;
1138
1139			if (scrub_write_page_to_dev_replace(sblock_other,
1140							    page_num) != 0) {
1141				atomic64_inc(
1142					&fs_info->dev_replace.num_write_errors);
1143				success = 0;
1144			}
1145		} else if (sblock_other) {
1146			ret = scrub_repair_page_from_good_copy(sblock_bad,
1147							       sblock_other,
1148							       page_num, 0);
1149			if (0 == ret)
1150				page_bad->io_error = 0;
1151			else
1152				success = 0;
1153		}
1154	}
1155
1156	if (success && !sctx->is_dev_replace) {
1157		if (is_metadata || have_csum) {
1158			/*
1159			 * need to verify the checksum now that all
1160			 * sectors on disk are repaired (the write
1161			 * request for data to be repaired is on its way).
1162			 * Just be lazy and use scrub_recheck_block()
1163			 * which re-reads the data before the checksum
1164			 * is verified, but most likely the data comes out
1165			 * of the page cache.
1166			 */
1167			scrub_recheck_block(fs_info, sblock_bad, 1);
1168			if (!sblock_bad->header_error &&
1169			    !sblock_bad->checksum_error &&
1170			    sblock_bad->no_io_error_seen)
1171				goto corrected_error;
1172			else
1173				goto did_not_correct_error;
1174		} else {
1175corrected_error:
1176			spin_lock(&sctx->stat_lock);
1177			sctx->stat.corrected_errors++;
1178			sblock_to_check->data_corrected = 1;
1179			spin_unlock(&sctx->stat_lock);
1180			btrfs_err_rl_in_rcu(fs_info,
1181				"fixed up error at logical %llu on dev %s",
1182				logical, rcu_str_deref(dev->name));
1183		}
1184	} else {
1185did_not_correct_error:
1186		spin_lock(&sctx->stat_lock);
1187		sctx->stat.uncorrectable_errors++;
1188		spin_unlock(&sctx->stat_lock);
1189		btrfs_err_rl_in_rcu(fs_info,
1190			"unable to fixup (regular) error at logical %llu on dev %s",
1191			logical, rcu_str_deref(dev->name));
1192	}
1193
1194out:
1195	if (sblocks_for_recheck) {
1196		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1197		     mirror_index++) {
1198			struct scrub_block *sblock = sblocks_for_recheck +
1199						     mirror_index;
1200			struct scrub_recover *recover;
1201			int page_index;
1202
1203			for (page_index = 0; page_index < sblock->page_count;
1204			     page_index++) {
1205				sblock->pagev[page_index]->sblock = NULL;
1206				recover = sblock->pagev[page_index]->recover;
1207				if (recover) {
1208					scrub_put_recover(fs_info, recover);
1209					sblock->pagev[page_index]->recover =
1210									NULL;
1211				}
1212				scrub_page_put(sblock->pagev[page_index]);
1213			}
1214		}
1215		kfree(sblocks_for_recheck);
1216	}
1217
1218	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1219	memalloc_nofs_restore(nofs_flag);
1220	if (ret < 0)
1221		return ret;
1222	return 0;
1223}
1224
1225static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
 
1226{
1227	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1228		return 2;
1229	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1230		return 3;
1231	else
1232		return (int)bbio->num_stripes;
 
 
 
1233}
1234
1235static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1236						 u64 *raid_map,
1237						 u64 mapped_length,
1238						 int nstripes, int mirror,
1239						 int *stripe_index,
1240						 u64 *stripe_offset)
1241{
1242	int i;
1243
1244	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1245		/* RAID5/6 */
1246		for (i = 0; i < nstripes; i++) {
1247			if (raid_map[i] == RAID6_Q_STRIPE ||
1248			    raid_map[i] == RAID5_P_STRIPE)
1249				continue;
1250
1251			if (logical >= raid_map[i] &&
1252			    logical < raid_map[i] + mapped_length)
1253				break;
1254		}
1255
1256		*stripe_index = i;
1257		*stripe_offset = logical - raid_map[i];
1258	} else {
1259		/* The other RAID type */
1260		*stripe_index = mirror;
1261		*stripe_offset = 0;
1262	}
 
 
1263}
1264
1265static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1266				     struct scrub_block *sblocks_for_recheck)
 
 
 
 
 
1267{
1268	struct scrub_ctx *sctx = original_sblock->sctx;
1269	struct btrfs_fs_info *fs_info = sctx->fs_info;
1270	u64 length = original_sblock->page_count * PAGE_SIZE;
1271	u64 logical = original_sblock->pagev[0]->logical;
1272	u64 generation = original_sblock->pagev[0]->generation;
1273	u64 flags = original_sblock->pagev[0]->flags;
1274	u64 have_csum = original_sblock->pagev[0]->have_csum;
1275	struct scrub_recover *recover;
1276	struct btrfs_bio *bbio;
1277	u64 sublen;
1278	u64 mapped_length;
1279	u64 stripe_offset;
1280	int stripe_index;
1281	int page_index = 0;
1282	int mirror_index;
1283	int nmirrors;
1284	int ret;
1285
1286	/*
1287	 * note: the two members refs and outstanding_pages
1288	 * are not used (and not set) in the blocks that are used for
1289	 * the recheck procedure
1290	 */
1291
1292	while (length > 0) {
1293		sublen = min_t(u64, length, PAGE_SIZE);
1294		mapped_length = sublen;
1295		bbio = NULL;
1296
1297		/*
1298		 * with a length of PAGE_SIZE, each returned stripe
1299		 * represents one mirror
1300		 */
1301		btrfs_bio_counter_inc_blocked(fs_info);
1302		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1303				logical, &mapped_length, &bbio);
1304		if (ret || !bbio || mapped_length < sublen) {
1305			btrfs_put_bbio(bbio);
1306			btrfs_bio_counter_dec(fs_info);
1307			return -EIO;
1308		}
1309
1310		recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1311		if (!recover) {
1312			btrfs_put_bbio(bbio);
1313			btrfs_bio_counter_dec(fs_info);
1314			return -ENOMEM;
1315		}
1316
1317		refcount_set(&recover->refs, 1);
1318		recover->bbio = bbio;
1319		recover->map_length = mapped_length;
1320
1321		BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1322
1323		nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1324
1325		for (mirror_index = 0; mirror_index < nmirrors;
1326		     mirror_index++) {
1327			struct scrub_block *sblock;
1328			struct scrub_page *page;
1329
1330			sblock = sblocks_for_recheck + mirror_index;
1331			sblock->sctx = sctx;
1332
1333			page = kzalloc(sizeof(*page), GFP_NOFS);
1334			if (!page) {
1335leave_nomem:
1336				spin_lock(&sctx->stat_lock);
1337				sctx->stat.malloc_errors++;
1338				spin_unlock(&sctx->stat_lock);
1339				scrub_put_recover(fs_info, recover);
1340				return -ENOMEM;
1341			}
1342			scrub_page_get(page);
1343			sblock->pagev[page_index] = page;
1344			page->sblock = sblock;
1345			page->flags = flags;
1346			page->generation = generation;
1347			page->logical = logical;
1348			page->have_csum = have_csum;
1349			if (have_csum)
1350				memcpy(page->csum,
1351				       original_sblock->pagev[0]->csum,
1352				       sctx->csum_size);
1353
1354			scrub_stripe_index_and_offset(logical,
1355						      bbio->map_type,
1356						      bbio->raid_map,
1357						      mapped_length,
1358						      bbio->num_stripes -
1359						      bbio->num_tgtdevs,
1360						      mirror_index,
1361						      &stripe_index,
1362						      &stripe_offset);
1363			page->physical = bbio->stripes[stripe_index].physical +
1364					 stripe_offset;
1365			page->dev = bbio->stripes[stripe_index].dev;
1366
1367			BUG_ON(page_index >= original_sblock->page_count);
1368			page->physical_for_dev_replace =
1369				original_sblock->pagev[page_index]->
1370				physical_for_dev_replace;
1371			/* for missing devices, dev->bdev is NULL */
1372			page->mirror_num = mirror_index + 1;
1373			sblock->page_count++;
1374			page->page = alloc_page(GFP_NOFS);
1375			if (!page->page)
1376				goto leave_nomem;
1377
1378			scrub_get_recover(recover);
1379			page->recover = recover;
1380		}
1381		scrub_put_recover(fs_info, recover);
1382		length -= sublen;
1383		logical += sublen;
1384		page_index++;
1385	}
1386
1387	return 0;
 
1388}
1389
1390static void scrub_bio_wait_endio(struct bio *bio)
1391{
1392	complete(bio->bi_private);
 
1393}
1394
1395static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1396					struct bio *bio,
1397					struct scrub_page *page)
1398{
1399	DECLARE_COMPLETION_ONSTACK(done);
1400	int ret;
1401	int mirror_num;
 
1402
1403	bio->bi_iter.bi_sector = page->logical >> 9;
1404	bio->bi_private = &done;
1405	bio->bi_end_io = scrub_bio_wait_endio;
1406
1407	mirror_num = page->sblock->pagev[0]->mirror_num;
1408	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
1409				    page->recover->map_length,
1410				    mirror_num, 0);
1411	if (ret)
1412		return ret;
1413
1414	wait_for_completion_io(&done);
1415	return blk_status_to_errno(bio->bi_status);
1416}
 
1417
1418static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1419					  struct scrub_block *sblock)
1420{
1421	struct scrub_page *first_page = sblock->pagev[0];
1422	struct bio *bio;
1423	int page_num;
1424
1425	/* All pages in sblock belong to the same stripe on the same device. */
1426	ASSERT(first_page->dev);
1427	if (!first_page->dev->bdev)
1428		goto out;
 
 
 
 
 
 
1429
1430	bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
1431	bio_set_dev(bio, first_page->dev->bdev);
1432
1433	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1434		struct scrub_page *page = sblock->pagev[page_num];
 
1435
1436		WARN_ON(!page->page);
1437		bio_add_page(bio, page->page, PAGE_SIZE, 0);
1438	}
1439
1440	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
1441		bio_put(bio);
1442		goto out;
 
 
1443	}
 
1444
1445	bio_put(bio);
 
 
 
 
 
 
 
 
 
 
 
 
1446
1447	scrub_recheck_block_checksum(sblock);
 
1448
1449	return;
1450out:
1451	for (page_num = 0; page_num < sblock->page_count; page_num++)
1452		sblock->pagev[page_num]->io_error = 1;
 
 
 
 
 
 
 
1453
1454	sblock->no_io_error_seen = 0;
1455}
 
 
 
 
 
 
 
 
 
 
 
 
 
1456
1457/*
1458 * this function will check the on disk data for checksum errors, header
1459 * errors and read I/O errors. If any I/O errors happen, the exact pages
1460 * which are errored are marked as being bad. The goal is to enable scrub
1461 * to take those pages that are not errored from all the mirrors so that
1462 * the pages that are errored in the just handled mirror can be repaired.
1463 */
1464static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1465				struct scrub_block *sblock,
1466				int retry_failed_mirror)
1467{
1468	int page_num;
1469
1470	sblock->no_io_error_seen = 1;
 
 
 
 
 
 
1471
1472	/* short cut for raid56 */
1473	if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
1474		return scrub_recheck_block_on_raid56(fs_info, sblock);
 
 
1475
1476	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1477		struct bio *bio;
1478		struct scrub_page *page = sblock->pagev[page_num];
1479
1480		if (page->dev->bdev == NULL) {
1481			page->io_error = 1;
1482			sblock->no_io_error_seen = 0;
 
 
 
 
 
 
 
 
 
 
 
 
1483			continue;
1484		}
1485
1486		WARN_ON(!page->page);
1487		bio = btrfs_io_bio_alloc(1);
1488		bio_set_dev(bio, page->dev->bdev);
1489
1490		bio_add_page(bio, page->page, PAGE_SIZE, 0);
1491		bio->bi_iter.bi_sector = page->physical >> 9;
1492		bio->bi_opf = REQ_OP_READ;
1493
1494		if (btrfsic_submit_bio_wait(bio)) {
1495			page->io_error = 1;
1496			sblock->no_io_error_seen = 0;
1497		}
1498
1499		bio_put(bio);
 
 
 
 
 
 
 
 
 
 
 
1500	}
1501
1502	if (sblock->no_io_error_seen)
1503		scrub_recheck_block_checksum(sblock);
 
 
 
 
 
 
 
 
 
 
 
1504}
1505
1506static inline int scrub_check_fsid(u8 fsid[],
1507				   struct scrub_page *spage)
1508{
1509	struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1510	int ret;
1511
1512	ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1513	return !ret;
1514}
1515
1516static void scrub_recheck_block_checksum(struct scrub_block *sblock)
 
 
 
 
 
 
 
 
 
1517{
1518	sblock->header_error = 0;
1519	sblock->checksum_error = 0;
1520	sblock->generation_error = 0;
 
 
 
 
 
1521
1522	if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1523		scrub_checksum_data(sblock);
1524	else
1525		scrub_checksum_tree_block(sblock);
1526}
1527
1528static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1529					     struct scrub_block *sblock_good)
1530{
1531	int page_num;
1532	int ret = 0;
 
 
 
 
 
1533
1534	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1535		int ret_sub;
1536
1537		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1538							   sblock_good,
1539							   page_num, 1);
1540		if (ret_sub)
1541			ret = ret_sub;
 
 
 
 
 
 
 
 
 
 
 
 
1542	}
1543
1544	return ret;
1545}
 
 
 
 
 
 
 
 
1546
1547static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1548					    struct scrub_block *sblock_good,
1549					    int page_num, int force_write)
1550{
1551	struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1552	struct scrub_page *page_good = sblock_good->pagev[page_num];
1553	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1554
1555	BUG_ON(page_bad->page == NULL);
1556	BUG_ON(page_good->page == NULL);
1557	if (force_write || sblock_bad->header_error ||
1558	    sblock_bad->checksum_error || page_bad->io_error) {
1559		struct bio *bio;
1560		int ret;
1561
1562		if (!page_bad->dev->bdev) {
1563			btrfs_warn_rl(fs_info,
1564				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1565			return -EIO;
1566		}
1567
1568		bio = btrfs_io_bio_alloc(1);
1569		bio_set_dev(bio, page_bad->dev->bdev);
1570		bio->bi_iter.bi_sector = page_bad->physical >> 9;
1571		bio->bi_opf = REQ_OP_WRITE;
1572
1573		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1574		if (PAGE_SIZE != ret) {
1575			bio_put(bio);
1576			return -EIO;
1577		}
1578
1579		if (btrfsic_submit_bio_wait(bio)) {
1580			btrfs_dev_stat_inc_and_print(page_bad->dev,
1581				BTRFS_DEV_STAT_WRITE_ERRS);
1582			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1583			bio_put(bio);
1584			return -EIO;
1585		}
1586		bio_put(bio);
1587	}
1588
1589	return 0;
 
 
1590}
1591
1592static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1593{
1594	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1595	int page_num;
 
 
 
 
1596
1597	/*
1598	 * This block is used for the check of the parity on the source device,
1599	 * so the data needn't be written into the destination device.
1600	 */
1601	if (sblock->sparity)
1602		return;
1603
1604	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1605		int ret;
1606
1607		ret = scrub_write_page_to_dev_replace(sblock, page_num);
1608		if (ret)
1609			atomic64_inc(&fs_info->dev_replace.num_write_errors);
 
 
 
1610	}
1611}
1612
1613static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1614					   int page_num)
1615{
1616	struct scrub_page *spage = sblock->pagev[page_num];
 
 
 
 
 
1617
1618	BUG_ON(spage->page == NULL);
1619	if (spage->io_error)
1620		clear_page(page_address(spage->page));
1621
1622	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1623}
1624
1625static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1626				    struct scrub_page *spage)
1627{
1628	struct scrub_bio *sbio;
1629	int ret;
1630
1631	mutex_lock(&sctx->wr_lock);
1632again:
1633	if (!sctx->wr_curr_bio) {
1634		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1635					      GFP_KERNEL);
1636		if (!sctx->wr_curr_bio) {
1637			mutex_unlock(&sctx->wr_lock);
1638			return -ENOMEM;
1639		}
1640		sctx->wr_curr_bio->sctx = sctx;
1641		sctx->wr_curr_bio->page_count = 0;
1642	}
1643	sbio = sctx->wr_curr_bio;
1644	if (sbio->page_count == 0) {
1645		struct bio *bio;
1646
1647		sbio->physical = spage->physical_for_dev_replace;
1648		sbio->logical = spage->logical;
1649		sbio->dev = sctx->wr_tgtdev;
1650		bio = sbio->bio;
1651		if (!bio) {
1652			bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1653			sbio->bio = bio;
1654		}
1655
1656		bio->bi_private = sbio;
1657		bio->bi_end_io = scrub_wr_bio_end_io;
1658		bio_set_dev(bio, sbio->dev->bdev);
1659		bio->bi_iter.bi_sector = sbio->physical >> 9;
1660		bio->bi_opf = REQ_OP_WRITE;
1661		sbio->status = 0;
1662	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1663		   spage->physical_for_dev_replace ||
1664		   sbio->logical + sbio->page_count * PAGE_SIZE !=
1665		   spage->logical) {
1666		scrub_wr_submit(sctx);
1667		goto again;
1668	}
1669
1670	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1671	if (ret != PAGE_SIZE) {
1672		if (sbio->page_count < 1) {
1673			bio_put(sbio->bio);
1674			sbio->bio = NULL;
1675			mutex_unlock(&sctx->wr_lock);
1676			return -EIO;
1677		}
1678		scrub_wr_submit(sctx);
1679		goto again;
1680	}
1681
1682	sbio->pagev[sbio->page_count] = spage;
1683	scrub_page_get(spage);
1684	sbio->page_count++;
1685	if (sbio->page_count == sctx->pages_per_wr_bio)
1686		scrub_wr_submit(sctx);
1687	mutex_unlock(&sctx->wr_lock);
1688
1689	return 0;
 
1690}
1691
1692static void scrub_wr_submit(struct scrub_ctx *sctx)
 
 
1693{
1694	struct scrub_bio *sbio;
1695
1696	if (!sctx->wr_curr_bio)
 
 
 
 
 
 
1697		return;
 
 
 
 
 
1698
1699	sbio = sctx->wr_curr_bio;
1700	sctx->wr_curr_bio = NULL;
1701	WARN_ON(!sbio->bio->bi_disk);
1702	scrub_pending_bio_inc(sctx);
1703	/* process all writes in a single worker thread. Then the block layer
1704	 * orders the requests before sending them to the driver which
1705	 * doubled the write performance on spinning disks when measured
1706	 * with Linux 3.5 */
1707	btrfsic_submit_bio(sbio->bio);
1708}
1709
1710static void scrub_wr_bio_end_io(struct bio *bio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1711{
1712	struct scrub_bio *sbio = bio->bi_private;
1713	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
 
1714
1715	sbio->status = bio->bi_status;
1716	sbio->bio = bio;
 
 
1717
1718	btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
1719	btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1720}
1721
1722static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1723{
1724	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1725	struct scrub_ctx *sctx = sbio->sctx;
1726	int i;
1727
1728	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1729	if (sbio->status) {
1730		struct btrfs_dev_replace *dev_replace =
1731			&sbio->sctx->fs_info->dev_replace;
1732
1733		for (i = 0; i < sbio->page_count; i++) {
1734			struct scrub_page *spage = sbio->pagev[i];
1735
1736			spage->io_error = 1;
1737			atomic64_inc(&dev_replace->num_write_errors);
1738		}
 
 
1739	}
1740
1741	for (i = 0; i < sbio->page_count; i++)
1742		scrub_page_put(sbio->pagev[i]);
1743
1744	bio_put(sbio->bio);
1745	kfree(sbio);
1746	scrub_pending_bio_dec(sctx);
1747}
1748
1749static int scrub_checksum(struct scrub_block *sblock)
 
 
 
 
 
1750{
1751	u64 flags;
1752	int ret;
 
 
 
 
 
 
 
1753
1754	/*
1755	 * No need to initialize these stats currently,
1756	 * because this function only use return value
1757	 * instead of these stats value.
1758	 *
1759	 * Todo:
1760	 * always use stats
1761	 */
1762	sblock->header_error = 0;
1763	sblock->generation_error = 0;
1764	sblock->checksum_error = 0;
1765
1766	WARN_ON(sblock->page_count < 1);
1767	flags = sblock->pagev[0]->flags;
1768	ret = 0;
1769	if (flags & BTRFS_EXTENT_FLAG_DATA)
1770		ret = scrub_checksum_data(sblock);
1771	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1772		ret = scrub_checksum_tree_block(sblock);
1773	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1774		(void)scrub_checksum_super(sblock);
1775	else
1776		WARN_ON(1);
1777	if (ret)
1778		scrub_handle_errored_block(sblock);
1779
1780	return ret;
1781}
 
 
 
 
1782
1783static int scrub_checksum_data(struct scrub_block *sblock)
1784{
1785	struct scrub_ctx *sctx = sblock->sctx;
1786	struct btrfs_fs_info *fs_info = sctx->fs_info;
1787	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1788	u8 csum[BTRFS_CSUM_SIZE];
1789	struct scrub_page *spage;
1790	char *kaddr;
1791
1792	BUG_ON(sblock->page_count < 1);
1793	spage = sblock->pagev[0];
1794	if (!spage->have_csum)
1795		return 0;
1796
1797	kaddr = page_address(spage->page);
1798
1799	shash->tfm = fs_info->csum_shash;
1800	crypto_shash_init(shash);
1801	crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum);
1802
1803	if (memcmp(csum, spage->csum, sctx->csum_size))
1804		sblock->checksum_error = 1;
1805
1806	return sblock->checksum_error;
 
1807}
1808
1809static int scrub_checksum_tree_block(struct scrub_block *sblock)
 
 
 
 
 
 
 
 
 
1810{
1811	struct scrub_ctx *sctx = sblock->sctx;
1812	struct btrfs_header *h;
1813	struct btrfs_fs_info *fs_info = sctx->fs_info;
1814	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1815	u8 calculated_csum[BTRFS_CSUM_SIZE];
1816	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1817	const int num_pages = sctx->fs_info->nodesize >> PAGE_SHIFT;
1818	int i;
1819	struct scrub_page *spage;
1820	char *kaddr;
 
1821
1822	BUG_ON(sblock->page_count < 1);
1823	spage = sblock->pagev[0];
1824	kaddr = page_address(spage->page);
1825	h = (struct btrfs_header *)kaddr;
1826	memcpy(on_disk_csum, h->csum, sctx->csum_size);
1827
1828	/*
1829	 * we don't use the getter functions here, as we
1830	 * a) don't have an extent buffer and
1831	 * b) the page is already kmapped
1832	 */
1833	if (spage->logical != btrfs_stack_header_bytenr(h))
1834		sblock->header_error = 1;
1835
1836	if (spage->generation != btrfs_stack_header_generation(h)) {
1837		sblock->header_error = 1;
1838		sblock->generation_error = 1;
1839	}
1840
1841	if (!scrub_check_fsid(h->fsid, spage))
1842		sblock->header_error = 1;
1843
1844	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1845		   BTRFS_UUID_SIZE))
1846		sblock->header_error = 1;
1847
1848	shash->tfm = fs_info->csum_shash;
1849	crypto_shash_init(shash);
1850	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
1851			    PAGE_SIZE - BTRFS_CSUM_SIZE);
1852
1853	for (i = 1; i < num_pages; i++) {
1854		kaddr = page_address(sblock->pagev[i]->page);
1855		crypto_shash_update(shash, kaddr, PAGE_SIZE);
1856	}
1857
1858	crypto_shash_final(shash, calculated_csum);
1859	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1860		sblock->checksum_error = 1;
1861
1862	return sblock->header_error || sblock->checksum_error;
1863}
1864
1865static int scrub_checksum_super(struct scrub_block *sblock)
 
 
 
 
 
 
1866{
1867	struct btrfs_super_block *s;
1868	struct scrub_ctx *sctx = sblock->sctx;
1869	struct btrfs_fs_info *fs_info = sctx->fs_info;
1870	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1871	u8 calculated_csum[BTRFS_CSUM_SIZE];
1872	struct scrub_page *spage;
1873	char *kaddr;
1874	int fail_gen = 0;
1875	int fail_cor = 0;
1876
1877	BUG_ON(sblock->page_count < 1);
1878	spage = sblock->pagev[0];
1879	kaddr = page_address(spage->page);
1880	s = (struct btrfs_super_block *)kaddr;
1881
1882	if (spage->logical != btrfs_super_bytenr(s))
1883		++fail_cor;
 
 
 
 
 
1884
1885	if (spage->generation != btrfs_super_generation(s))
1886		++fail_gen;
 
 
 
 
1887
1888	if (!scrub_check_fsid(s->fsid, spage))
1889		++fail_cor;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1890
1891	shash->tfm = fs_info->csum_shash;
1892	crypto_shash_init(shash);
1893	crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
1894			BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
1895
1896	if (memcmp(calculated_csum, s->csum, sctx->csum_size))
1897		++fail_cor;
 
 
 
 
1898
1899	if (fail_cor + fail_gen) {
 
 
 
1900		/*
1901		 * if we find an error in a super block, we just report it.
1902		 * They will get written with the next transaction commit
1903		 * anyway
1904		 */
1905		spin_lock(&sctx->stat_lock);
1906		++sctx->stat.super_errors;
1907		spin_unlock(&sctx->stat_lock);
1908		if (fail_cor)
1909			btrfs_dev_stat_inc_and_print(spage->dev,
1910				BTRFS_DEV_STAT_CORRUPTION_ERRS);
1911		else
1912			btrfs_dev_stat_inc_and_print(spage->dev,
1913				BTRFS_DEV_STAT_GENERATION_ERRS);
1914	}
1915
1916	return fail_cor + fail_gen;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1917}
1918
1919static void scrub_block_get(struct scrub_block *sblock)
 
1920{
1921	refcount_inc(&sblock->refs);
 
 
 
 
 
 
 
 
 
 
 
 
 
1922}
1923
1924static void scrub_block_put(struct scrub_block *sblock)
 
1925{
1926	if (refcount_dec_and_test(&sblock->refs)) {
1927		int i;
1928
1929		if (sblock->sparity)
1930			scrub_parity_put(sblock->sparity);
1931
1932		for (i = 0; i < sblock->page_count; i++)
1933			scrub_page_put(sblock->pagev[i]);
1934		kfree(sblock);
 
 
 
 
 
1935	}
1936}
 
1937
1938static void scrub_page_get(struct scrub_page *spage)
1939{
1940	atomic_inc(&spage->refs);
1941}
1942
1943static void scrub_page_put(struct scrub_page *spage)
1944{
1945	if (atomic_dec_and_test(&spage->refs)) {
1946		if (spage->page)
1947			__free_page(spage->page);
1948		kfree(spage);
 
 
 
 
 
 
 
 
 
 
 
 
 
1949	}
1950}
1951
1952static void scrub_submit(struct scrub_ctx *sctx)
1953{
1954	struct scrub_bio *sbio;
1955
1956	if (sctx->curr == -1)
1957		return;
1958
1959	sbio = sctx->bios[sctx->curr];
1960	sctx->curr = -1;
1961	scrub_pending_bio_inc(sctx);
1962	btrfsic_submit_bio(sbio->bio);
1963}
1964
1965static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
1966				    struct scrub_page *spage)
1967{
1968	struct scrub_block *sblock = spage->sblock;
1969	struct scrub_bio *sbio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1970	int ret;
1971
1972again:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1973	/*
1974	 * grab a fresh bio or wait for one to become available
 
 
 
1975	 */
1976	while (sctx->curr == -1) {
1977		spin_lock(&sctx->list_lock);
1978		sctx->curr = sctx->first_free;
1979		if (sctx->curr != -1) {
1980			sctx->first_free = sctx->bios[sctx->curr]->next_free;
1981			sctx->bios[sctx->curr]->next_free = -1;
1982			sctx->bios[sctx->curr]->page_count = 0;
1983			spin_unlock(&sctx->list_lock);
1984		} else {
1985			spin_unlock(&sctx->list_lock);
1986			wait_event(sctx->list_wait, sctx->first_free != -1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1987		}
1988	}
1989	sbio = sctx->bios[sctx->curr];
1990	if (sbio->page_count == 0) {
1991		struct bio *bio;
1992
1993		sbio->physical = spage->physical;
1994		sbio->logical = spage->logical;
1995		sbio->dev = spage->dev;
1996		bio = sbio->bio;
1997		if (!bio) {
1998			bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
1999			sbio->bio = bio;
2000		}
2001
2002		bio->bi_private = sbio;
2003		bio->bi_end_io = scrub_bio_end_io;
2004		bio_set_dev(bio, sbio->dev->bdev);
2005		bio->bi_iter.bi_sector = sbio->physical >> 9;
2006		bio->bi_opf = REQ_OP_READ;
2007		sbio->status = 0;
2008	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2009		   spage->physical ||
2010		   sbio->logical + sbio->page_count * PAGE_SIZE !=
2011		   spage->logical ||
2012		   sbio->dev != spage->dev) {
2013		scrub_submit(sctx);
2014		goto again;
2015	}
2016
2017	sbio->pagev[sbio->page_count] = spage;
2018	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2019	if (ret != PAGE_SIZE) {
2020		if (sbio->page_count < 1) {
2021			bio_put(sbio->bio);
2022			sbio->bio = NULL;
2023			return -EIO;
2024		}
2025		scrub_submit(sctx);
2026		goto again;
2027	}
2028
2029	scrub_block_get(sblock); /* one for the page added to the bio */
2030	atomic_inc(&sblock->outstanding_pages);
2031	sbio->page_count++;
2032	if (sbio->page_count == sctx->pages_per_rd_bio)
2033		scrub_submit(sctx);
2034
2035	return 0;
2036}
2037
2038static void scrub_missing_raid56_end_io(struct bio *bio)
2039{
2040	struct scrub_block *sblock = bio->bi_private;
2041	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2042
2043	if (bio->bi_status)
2044		sblock->no_io_error_seen = 0;
 
2045
2046	bio_put(bio);
2047
2048	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
 
 
2049}
2050
2051static void scrub_missing_raid56_worker(struct btrfs_work *work)
2052{
2053	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2054	struct scrub_ctx *sctx = sblock->sctx;
2055	struct btrfs_fs_info *fs_info = sctx->fs_info;
2056	u64 logical;
2057	struct btrfs_device *dev;
2058
2059	logical = sblock->pagev[0]->logical;
2060	dev = sblock->pagev[0]->dev;
2061
2062	if (sblock->no_io_error_seen)
2063		scrub_recheck_block_checksum(sblock);
2064
2065	if (!sblock->no_io_error_seen) {
2066		spin_lock(&sctx->stat_lock);
2067		sctx->stat.read_errors++;
2068		spin_unlock(&sctx->stat_lock);
2069		btrfs_err_rl_in_rcu(fs_info,
2070			"IO error rebuilding logical %llu for dev %s",
2071			logical, rcu_str_deref(dev->name));
2072	} else if (sblock->header_error || sblock->checksum_error) {
2073		spin_lock(&sctx->stat_lock);
2074		sctx->stat.uncorrectable_errors++;
2075		spin_unlock(&sctx->stat_lock);
2076		btrfs_err_rl_in_rcu(fs_info,
2077			"failed to rebuild valid logical %llu for dev %s",
2078			logical, rcu_str_deref(dev->name));
2079	} else {
2080		scrub_write_block_to_dev_replace(sblock);
2081	}
2082
2083	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2084		mutex_lock(&sctx->wr_lock);
2085		scrub_wr_submit(sctx);
2086		mutex_unlock(&sctx->wr_lock);
2087	}
2088
2089	scrub_block_put(sblock);
2090	scrub_pending_bio_dec(sctx);
2091}
2092
2093static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2094{
2095	struct scrub_ctx *sctx = sblock->sctx;
2096	struct btrfs_fs_info *fs_info = sctx->fs_info;
2097	u64 length = sblock->page_count * PAGE_SIZE;
2098	u64 logical = sblock->pagev[0]->logical;
2099	struct btrfs_bio *bbio = NULL;
2100	struct bio *bio;
2101	struct btrfs_raid_bio *rbio;
2102	int ret;
2103	int i;
2104
2105	btrfs_bio_counter_inc_blocked(fs_info);
2106	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2107			&length, &bbio);
2108	if (ret || !bbio || !bbio->raid_map)
2109		goto bbio_out;
 
 
 
 
2110
2111	if (WARN_ON(!sctx->is_dev_replace ||
2112		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2113		/*
2114		 * We shouldn't be scrubbing a missing device. Even for dev
2115		 * replace, we should only get here for RAID 5/6. We either
2116		 * managed to mount something with no mirrors remaining or
2117		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2118		 */
2119		goto bbio_out;
2120	}
2121
2122	bio = btrfs_io_bio_alloc(0);
2123	bio->bi_iter.bi_sector = logical >> 9;
2124	bio->bi_private = sblock;
2125	bio->bi_end_io = scrub_missing_raid56_end_io;
 
 
2126
2127	rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2128	if (!rbio)
2129		goto rbio_out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2130
2131	for (i = 0; i < sblock->page_count; i++) {
2132		struct scrub_page *spage = sblock->pagev[i];
 
 
2133
2134		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2135	}
2136
2137	btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
2138	scrub_block_get(sblock);
2139	scrub_pending_bio_inc(sctx);
2140	raid56_submit_missing_rbio(rbio);
2141	return;
2142
2143rbio_out:
2144	bio_put(bio);
2145bbio_out:
2146	btrfs_bio_counter_dec(fs_info);
2147	btrfs_put_bbio(bbio);
2148	spin_lock(&sctx->stat_lock);
2149	sctx->stat.malloc_errors++;
2150	spin_unlock(&sctx->stat_lock);
2151}
2152
2153static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2154		       u64 physical, struct btrfs_device *dev, u64 flags,
2155		       u64 gen, int mirror_num, u8 *csum, int force,
2156		       u64 physical_for_dev_replace)
2157{
2158	struct scrub_block *sblock;
2159	int index;
 
 
2160
2161	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2162	if (!sblock) {
2163		spin_lock(&sctx->stat_lock);
2164		sctx->stat.malloc_errors++;
2165		spin_unlock(&sctx->stat_lock);
2166		return -ENOMEM;
2167	}
2168
2169	/* one ref inside this function, plus one for each page added to
2170	 * a bio later on */
2171	refcount_set(&sblock->refs, 1);
2172	sblock->sctx = sctx;
2173	sblock->no_io_error_seen = 1;
2174
2175	for (index = 0; len > 0; index++) {
2176		struct scrub_page *spage;
2177		u64 l = min_t(u64, len, PAGE_SIZE);
2178
2179		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2180		if (!spage) {
2181leave_nomem:
2182			spin_lock(&sctx->stat_lock);
2183			sctx->stat.malloc_errors++;
2184			spin_unlock(&sctx->stat_lock);
2185			scrub_block_put(sblock);
2186			return -ENOMEM;
2187		}
2188		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2189		scrub_page_get(spage);
2190		sblock->pagev[index] = spage;
2191		spage->sblock = sblock;
2192		spage->dev = dev;
2193		spage->flags = flags;
2194		spage->generation = gen;
2195		spage->logical = logical;
2196		spage->physical = physical;
2197		spage->physical_for_dev_replace = physical_for_dev_replace;
2198		spage->mirror_num = mirror_num;
2199		if (csum) {
2200			spage->have_csum = 1;
2201			memcpy(spage->csum, csum, sctx->csum_size);
2202		} else {
2203			spage->have_csum = 0;
2204		}
2205		sblock->page_count++;
2206		spage->page = alloc_page(GFP_KERNEL);
2207		if (!spage->page)
2208			goto leave_nomem;
2209		len -= l;
2210		logical += l;
2211		physical += l;
2212		physical_for_dev_replace += l;
2213	}
2214
2215	WARN_ON(sblock->page_count == 0);
2216	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2217		/*
2218		 * This case should only be hit for RAID 5/6 device replace. See
2219		 * the comment in scrub_missing_raid56_pages() for details.
2220		 */
2221		scrub_missing_raid56_pages(sblock);
2222	} else {
2223		for (index = 0; index < sblock->page_count; index++) {
2224			struct scrub_page *spage = sblock->pagev[index];
2225			int ret;
2226
2227			ret = scrub_add_page_to_rd_bio(sctx, spage);
2228			if (ret) {
2229				scrub_block_put(sblock);
2230				return ret;
2231			}
2232		}
2233
2234		if (force)
2235			scrub_submit(sctx);
 
2236	}
 
2237
2238	/* last one frees, either here or in bio completion for last page */
2239	scrub_block_put(sblock);
2240	return 0;
2241}
 
 
 
 
 
 
2242
2243static void scrub_bio_end_io(struct bio *bio)
2244{
2245	struct scrub_bio *sbio = bio->bi_private;
2246	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2247
2248	sbio->status = bio->bi_status;
2249	sbio->bio = bio;
2250
2251	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2252}
2253
2254static void scrub_bio_end_io_worker(struct btrfs_work *work)
2255{
2256	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2257	struct scrub_ctx *sctx = sbio->sctx;
2258	int i;
2259
2260	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2261	if (sbio->status) {
2262		for (i = 0; i < sbio->page_count; i++) {
2263			struct scrub_page *spage = sbio->pagev[i];
2264
2265			spage->io_error = 1;
2266			spage->sblock->no_io_error_seen = 0;
 
 
 
2267		}
2268	}
 
 
2269
2270	/* now complete the scrub_block items that have all pages completed */
2271	for (i = 0; i < sbio->page_count; i++) {
2272		struct scrub_page *spage = sbio->pagev[i];
2273		struct scrub_block *sblock = spage->sblock;
 
2274
2275		if (atomic_dec_and_test(&sblock->outstanding_pages))
2276			scrub_block_complete(sblock);
2277		scrub_block_put(sblock);
2278	}
2279
2280	bio_put(sbio->bio);
2281	sbio->bio = NULL;
2282	spin_lock(&sctx->list_lock);
2283	sbio->next_free = sctx->first_free;
2284	sctx->first_free = sbio->index;
2285	spin_unlock(&sctx->list_lock);
2286
2287	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2288		mutex_lock(&sctx->wr_lock);
2289		scrub_wr_submit(sctx);
2290		mutex_unlock(&sctx->wr_lock);
2291	}
2292
2293	scrub_pending_bio_dec(sctx);
2294}
2295
2296static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2297				       unsigned long *bitmap,
2298				       u64 start, u64 len)
2299{
2300	u64 offset;
2301	u64 nsectors64;
2302	u32 nsectors;
2303	int sectorsize = sparity->sctx->fs_info->sectorsize;
2304
2305	if (len >= sparity->stripe_len) {
2306		bitmap_set(bitmap, 0, sparity->nsectors);
2307		return;
2308	}
2309
2310	start -= sparity->logic_start;
2311	start = div64_u64_rem(start, sparity->stripe_len, &offset);
2312	offset = div_u64(offset, sectorsize);
2313	nsectors64 = div_u64(len, sectorsize);
2314
2315	ASSERT(nsectors64 < UINT_MAX);
2316	nsectors = (u32)nsectors64;
 
2317
2318	if (offset + nsectors <= sparity->nsectors) {
2319		bitmap_set(bitmap, offset, nsectors);
2320		return;
2321	}
2322
2323	bitmap_set(bitmap, offset, sparity->nsectors - offset);
2324	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2325}
 
 
 
2326
2327static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2328						   u64 start, u64 len)
2329{
2330	__scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2331}
 
 
 
 
 
 
 
 
 
2332
2333static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2334						  u64 start, u64 len)
2335{
2336	__scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2337}
2338
2339static void scrub_block_complete(struct scrub_block *sblock)
2340{
2341	int corrupted = 0;
2342
2343	if (!sblock->no_io_error_seen) {
2344		corrupted = 1;
2345		scrub_handle_errored_block(sblock);
2346	} else {
2347		/*
2348		 * if has checksum error, write via repair mechanism in
2349		 * dev replace case, otherwise write here in dev replace
2350		 * case.
2351		 */
2352		corrupted = scrub_checksum(sblock);
2353		if (!corrupted && sblock->sctx->is_dev_replace)
2354			scrub_write_block_to_dev_replace(sblock);
2355	}
2356
2357	if (sblock->sparity && corrupted && !sblock->data_corrected) {
2358		u64 start = sblock->pagev[0]->logical;
2359		u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2360			  PAGE_SIZE;
2361
2362		scrub_parity_mark_sectors_error(sblock->sparity,
2363						start, end - start);
 
 
 
2364	}
 
 
 
2365}
2366
2367static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2368{
2369	struct btrfs_ordered_sum *sum = NULL;
2370	unsigned long index;
2371	unsigned long num_sectors;
2372
2373	while (!list_empty(&sctx->csum_list)) {
2374		sum = list_first_entry(&sctx->csum_list,
2375				       struct btrfs_ordered_sum, list);
2376		if (sum->bytenr > logical)
2377			return 0;
2378		if (sum->bytenr + sum->len > logical)
2379			break;
2380
2381		++sctx->stat.csum_discards;
2382		list_del(&sum->list);
2383		kfree(sum);
2384		sum = NULL;
2385	}
2386	if (!sum)
2387		return 0;
2388
2389	index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
2390	ASSERT(index < UINT_MAX);
2391
2392	num_sectors = sum->len / sctx->fs_info->sectorsize;
2393	memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size);
2394	if (index == num_sectors - 1) {
2395		list_del(&sum->list);
2396		kfree(sum);
2397	}
2398	return 1;
2399}
2400
2401/* scrub extent tries to collect up to 64 kB for each bio */
2402static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2403			u64 logical, u64 len,
2404			u64 physical, struct btrfs_device *dev, u64 flags,
2405			u64 gen, int mirror_num, u64 physical_for_dev_replace)
2406{
 
2407	int ret;
2408	u8 csum[BTRFS_CSUM_SIZE];
2409	u32 blocksize;
2410
2411	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2412		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2413			blocksize = map->stripe_len;
2414		else
2415			blocksize = sctx->fs_info->sectorsize;
2416		spin_lock(&sctx->stat_lock);
2417		sctx->stat.data_extents_scrubbed++;
2418		sctx->stat.data_bytes_scrubbed += len;
2419		spin_unlock(&sctx->stat_lock);
2420	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2421		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2422			blocksize = map->stripe_len;
2423		else
2424			blocksize = sctx->fs_info->nodesize;
2425		spin_lock(&sctx->stat_lock);
2426		sctx->stat.tree_extents_scrubbed++;
2427		sctx->stat.tree_bytes_scrubbed += len;
2428		spin_unlock(&sctx->stat_lock);
2429	} else {
2430		blocksize = sctx->fs_info->sectorsize;
2431		WARN_ON(1);
2432	}
2433
2434	while (len) {
2435		u64 l = min_t(u64, len, blocksize);
2436		int have_csum = 0;
2437
2438		if (flags & BTRFS_EXTENT_FLAG_DATA) {
2439			/* push csums to sbio */
2440			have_csum = scrub_find_csum(sctx, logical, csum);
2441			if (have_csum == 0)
2442				++sctx->stat.no_csum;
2443		}
2444		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2445				  mirror_num, have_csum ? csum : NULL, 0,
2446				  physical_for_dev_replace);
2447		if (ret)
2448			return ret;
2449		len -= l;
2450		logical += l;
2451		physical += l;
2452		physical_for_dev_replace += l;
2453	}
2454	return 0;
2455}
2456
2457static int scrub_pages_for_parity(struct scrub_parity *sparity,
2458				  u64 logical, u64 len,
2459				  u64 physical, struct btrfs_device *dev,
2460				  u64 flags, u64 gen, int mirror_num, u8 *csum)
2461{
2462	struct scrub_ctx *sctx = sparity->sctx;
2463	struct scrub_block *sblock;
2464	int index;
2465
2466	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2467	if (!sblock) {
2468		spin_lock(&sctx->stat_lock);
2469		sctx->stat.malloc_errors++;
2470		spin_unlock(&sctx->stat_lock);
2471		return -ENOMEM;
2472	}
 
 
 
2473
2474	/* one ref inside this function, plus one for each page added to
2475	 * a bio later on */
2476	refcount_set(&sblock->refs, 1);
2477	sblock->sctx = sctx;
2478	sblock->no_io_error_seen = 1;
2479	sblock->sparity = sparity;
2480	scrub_parity_get(sparity);
2481
2482	for (index = 0; len > 0; index++) {
2483		struct scrub_page *spage;
2484		u64 l = min_t(u64, len, PAGE_SIZE);
2485
2486		spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2487		if (!spage) {
2488leave_nomem:
2489			spin_lock(&sctx->stat_lock);
2490			sctx->stat.malloc_errors++;
2491			spin_unlock(&sctx->stat_lock);
2492			scrub_block_put(sblock);
2493			return -ENOMEM;
2494		}
2495		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2496		/* For scrub block */
2497		scrub_page_get(spage);
2498		sblock->pagev[index] = spage;
2499		/* For scrub parity */
2500		scrub_page_get(spage);
2501		list_add_tail(&spage->list, &sparity->spages);
2502		spage->sblock = sblock;
2503		spage->dev = dev;
2504		spage->flags = flags;
2505		spage->generation = gen;
2506		spage->logical = logical;
2507		spage->physical = physical;
2508		spage->mirror_num = mirror_num;
2509		if (csum) {
2510			spage->have_csum = 1;
2511			memcpy(spage->csum, csum, sctx->csum_size);
2512		} else {
2513			spage->have_csum = 0;
2514		}
2515		sblock->page_count++;
2516		spage->page = alloc_page(GFP_KERNEL);
2517		if (!spage->page)
2518			goto leave_nomem;
2519		len -= l;
2520		logical += l;
2521		physical += l;
2522	}
2523
2524	WARN_ON(sblock->page_count == 0);
2525	for (index = 0; index < sblock->page_count; index++) {
2526		struct scrub_page *spage = sblock->pagev[index];
2527		int ret;
2528
2529		ret = scrub_add_page_to_rd_bio(sctx, spage);
2530		if (ret) {
2531			scrub_block_put(sblock);
2532			return ret;
2533		}
2534	}
2535
2536	/* last one frees, either here or in bio completion for last page */
2537	scrub_block_put(sblock);
 
2538	return 0;
2539}
2540
2541static int scrub_extent_for_parity(struct scrub_parity *sparity,
2542				   u64 logical, u64 len,
2543				   u64 physical, struct btrfs_device *dev,
2544				   u64 flags, u64 gen, int mirror_num)
 
2545{
2546	struct scrub_ctx *sctx = sparity->sctx;
 
 
 
 
 
 
 
 
 
 
 
2547	int ret;
2548	u8 csum[BTRFS_CSUM_SIZE];
2549	u32 blocksize;
2550
2551	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2552		scrub_parity_mark_sectors_error(sparity, logical, len);
2553		return 0;
2554	}
2555
2556	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2557		blocksize = sparity->stripe_len;
2558	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2559		blocksize = sparity->stripe_len;
2560	} else {
2561		blocksize = sctx->fs_info->sectorsize;
2562		WARN_ON(1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2563	}
2564
2565	while (len) {
2566		u64 l = min_t(u64, len, blocksize);
2567		int have_csum = 0;
2568
2569		if (flags & BTRFS_EXTENT_FLAG_DATA) {
2570			/* push csums to sbio */
2571			have_csum = scrub_find_csum(sctx, logical, csum);
2572			if (have_csum == 0)
2573				goto skip;
2574		}
2575		ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2576					     flags, gen, mirror_num,
2577					     have_csum ? csum : NULL);
2578		if (ret)
2579			return ret;
2580skip:
2581		len -= l;
2582		logical += l;
2583		physical += l;
2584	}
2585	return 0;
2586}
 
 
2587
2588/*
2589 * Given a physical address, this will calculate it's
2590 * logical offset. if this is a parity stripe, it will return
2591 * the most left data stripe's logical offset.
2592 *
2593 * return 0 if it is a data stripe, 1 means parity stripe.
2594 */
2595static int get_raid56_logic_offset(u64 physical, int num,
2596				   struct map_lookup *map, u64 *offset,
2597				   u64 *stripe_start)
2598{
2599	int i;
2600	int j = 0;
2601	u64 stripe_nr;
2602	u64 last_offset;
2603	u32 stripe_index;
2604	u32 rot;
2605	const int data_stripes = nr_data_stripes(map);
2606
2607	last_offset = (physical - map->stripes[num].physical) * data_stripes;
2608	if (stripe_start)
2609		*stripe_start = last_offset;
 
 
2610
2611	*offset = last_offset;
2612	for (i = 0; i < data_stripes; i++) {
2613		*offset = last_offset + i * map->stripe_len;
 
 
 
 
 
 
2614
2615		stripe_nr = div64_u64(*offset, map->stripe_len);
2616		stripe_nr = div_u64(stripe_nr, data_stripes);
2617
2618		/* Work out the disk rotation on this stripe-set */
2619		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2620		/* calculate which stripe this data locates */
2621		rot += i;
2622		stripe_index = rot % map->num_stripes;
2623		if (stripe_index == num)
2624			return 0;
2625		if (stripe_index < num)
2626			j++;
 
 
 
 
 
 
 
2627	}
2628	*offset = last_offset + j * map->stripe_len;
2629	return 1;
2630}
2631
2632static void scrub_free_parity(struct scrub_parity *sparity)
2633{
2634	struct scrub_ctx *sctx = sparity->sctx;
2635	struct scrub_page *curr, *next;
2636	int nbits;
2637
2638	nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2639	if (nbits) {
2640		spin_lock(&sctx->stat_lock);
2641		sctx->stat.read_errors += nbits;
2642		sctx->stat.uncorrectable_errors += nbits;
2643		spin_unlock(&sctx->stat_lock);
 
 
 
 
 
 
 
 
 
2644	}
 
 
 
2645
2646	list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2647		list_del_init(&curr->list);
2648		scrub_page_put(curr);
2649	}
 
 
 
 
 
2650
2651	kfree(sparity);
 
 
 
2652}
2653
2654static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
 
 
 
 
 
 
 
 
 
 
 
 
2655{
2656	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2657						    work);
2658	struct scrub_ctx *sctx = sparity->sctx;
 
2659
2660	scrub_free_parity(sparity);
2661	scrub_pending_bio_dec(sctx);
2662}
2663
2664static void scrub_parity_bio_endio(struct bio *bio)
2665{
2666	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2667	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2668
2669	if (bio->bi_status)
2670		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2671			  sparity->nsectors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2672
2673	bio_put(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2674
2675	btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
2676			NULL);
2677	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
 
2678}
2679
2680static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
 
2681{
2682	struct scrub_ctx *sctx = sparity->sctx;
2683	struct btrfs_fs_info *fs_info = sctx->fs_info;
2684	struct bio *bio;
2685	struct btrfs_raid_bio *rbio;
2686	struct btrfs_bio *bbio = NULL;
2687	u64 length;
2688	int ret;
2689
2690	if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2691			   sparity->nsectors))
2692		goto out;
2693
2694	length = sparity->logic_end - sparity->logic_start;
2695
2696	btrfs_bio_counter_inc_blocked(fs_info);
2697	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2698			       &length, &bbio);
2699	if (ret || !bbio || !bbio->raid_map)
2700		goto bbio_out;
2701
2702	bio = btrfs_io_bio_alloc(0);
2703	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2704	bio->bi_private = sparity;
2705	bio->bi_end_io = scrub_parity_bio_endio;
2706
2707	rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
2708					      length, sparity->scrub_dev,
2709					      sparity->dbitmap,
2710					      sparity->nsectors);
2711	if (!rbio)
2712		goto rbio_out;
2713
2714	scrub_pending_bio_inc(sctx);
2715	raid56_parity_submit_scrub_rbio(rbio);
2716	return;
2717
2718rbio_out:
2719	bio_put(bio);
2720bbio_out:
2721	btrfs_bio_counter_dec(fs_info);
2722	btrfs_put_bbio(bbio);
2723	bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2724		  sparity->nsectors);
2725	spin_lock(&sctx->stat_lock);
2726	sctx->stat.malloc_errors++;
2727	spin_unlock(&sctx->stat_lock);
2728out:
2729	scrub_free_parity(sparity);
2730}
2731
2732static inline int scrub_calc_parity_bitmap_len(int nsectors)
 
 
 
2733{
2734	return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2735}
 
2736
2737static void scrub_parity_get(struct scrub_parity *sparity)
2738{
2739	refcount_inc(&sparity->refs);
 
 
 
2740}
2741
2742static void scrub_parity_put(struct scrub_parity *sparity)
 
2743{
2744	if (!refcount_dec_and_test(&sparity->refs))
2745		return;
 
2746
2747	scrub_parity_check_and_repair(sparity);
 
2748}
2749
2750static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2751						  struct map_lookup *map,
2752						  struct btrfs_device *sdev,
2753						  struct btrfs_path *path,
2754						  u64 logic_start,
2755						  u64 logic_end)
2756{
2757	struct btrfs_fs_info *fs_info = sctx->fs_info;
2758	struct btrfs_root *root = fs_info->extent_root;
2759	struct btrfs_root *csum_root = fs_info->csum_root;
2760	struct btrfs_extent_item *extent;
2761	struct btrfs_bio *bbio = NULL;
2762	u64 flags;
2763	int ret;
2764	int slot;
2765	struct extent_buffer *l;
2766	struct btrfs_key key;
2767	u64 generation;
2768	u64 extent_logical;
2769	u64 extent_physical;
2770	u64 extent_len;
2771	u64 mapped_length;
2772	struct btrfs_device *extent_dev;
2773	struct scrub_parity *sparity;
2774	int nsectors;
2775	int bitmap_len;
2776	int extent_mirror_num;
2777	int stop_loop = 0;
2778
2779	nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
2780	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2781	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2782			  GFP_NOFS);
2783	if (!sparity) {
2784		spin_lock(&sctx->stat_lock);
2785		sctx->stat.malloc_errors++;
2786		spin_unlock(&sctx->stat_lock);
2787		return -ENOMEM;
2788	}
2789
2790	sparity->stripe_len = map->stripe_len;
2791	sparity->nsectors = nsectors;
2792	sparity->sctx = sctx;
2793	sparity->scrub_dev = sdev;
2794	sparity->logic_start = logic_start;
2795	sparity->logic_end = logic_end;
2796	refcount_set(&sparity->refs, 1);
2797	INIT_LIST_HEAD(&sparity->spages);
2798	sparity->dbitmap = sparity->bitmap;
2799	sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2800
2801	ret = 0;
2802	while (logic_start < logic_end) {
2803		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2804			key.type = BTRFS_METADATA_ITEM_KEY;
2805		else
2806			key.type = BTRFS_EXTENT_ITEM_KEY;
2807		key.objectid = logic_start;
2808		key.offset = (u64)-1;
2809
2810		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2811		if (ret < 0)
2812			goto out;
2813
2814		if (ret > 0) {
2815			ret = btrfs_previous_extent_item(root, path, 0);
2816			if (ret < 0)
2817				goto out;
2818			if (ret > 0) {
2819				btrfs_release_path(path);
2820				ret = btrfs_search_slot(NULL, root, &key,
2821							path, 0, 0);
2822				if (ret < 0)
2823					goto out;
2824			}
2825		}
2826
2827		stop_loop = 0;
2828		while (1) {
2829			u64 bytes;
2830
2831			l = path->nodes[0];
2832			slot = path->slots[0];
2833			if (slot >= btrfs_header_nritems(l)) {
2834				ret = btrfs_next_leaf(root, path);
2835				if (ret == 0)
2836					continue;
2837				if (ret < 0)
2838					goto out;
2839
2840				stop_loop = 1;
2841				break;
2842			}
2843			btrfs_item_key_to_cpu(l, &key, slot);
2844
2845			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2846			    key.type != BTRFS_METADATA_ITEM_KEY)
2847				goto next;
2848
2849			if (key.type == BTRFS_METADATA_ITEM_KEY)
2850				bytes = fs_info->nodesize;
2851			else
2852				bytes = key.offset;
2853
2854			if (key.objectid + bytes <= logic_start)
2855				goto next;
2856
2857			if (key.objectid >= logic_end) {
2858				stop_loop = 1;
2859				break;
2860			}
2861
2862			while (key.objectid >= logic_start + map->stripe_len)
2863				logic_start += map->stripe_len;
2864
2865			extent = btrfs_item_ptr(l, slot,
2866						struct btrfs_extent_item);
2867			flags = btrfs_extent_flags(l, extent);
2868			generation = btrfs_extent_generation(l, extent);
2869
2870			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2871			    (key.objectid < logic_start ||
2872			     key.objectid + bytes >
2873			     logic_start + map->stripe_len)) {
2874				btrfs_err(fs_info,
2875					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2876					  key.objectid, logic_start);
2877				spin_lock(&sctx->stat_lock);
2878				sctx->stat.uncorrectable_errors++;
2879				spin_unlock(&sctx->stat_lock);
2880				goto next;
2881			}
2882again:
2883			extent_logical = key.objectid;
2884			extent_len = bytes;
2885
2886			if (extent_logical < logic_start) {
2887				extent_len -= logic_start - extent_logical;
2888				extent_logical = logic_start;
2889			}
2890
2891			if (extent_logical + extent_len >
2892			    logic_start + map->stripe_len)
2893				extent_len = logic_start + map->stripe_len -
2894					     extent_logical;
2895
2896			scrub_parity_mark_sectors_data(sparity, extent_logical,
2897						       extent_len);
2898
2899			mapped_length = extent_len;
2900			bbio = NULL;
2901			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
2902					extent_logical, &mapped_length, &bbio,
2903					0);
2904			if (!ret) {
2905				if (!bbio || mapped_length < extent_len)
2906					ret = -EIO;
2907			}
2908			if (ret) {
2909				btrfs_put_bbio(bbio);
2910				goto out;
2911			}
2912			extent_physical = bbio->stripes[0].physical;
2913			extent_mirror_num = bbio->mirror_num;
2914			extent_dev = bbio->stripes[0].dev;
2915			btrfs_put_bbio(bbio);
2916
2917			ret = btrfs_lookup_csums_range(csum_root,
2918						extent_logical,
2919						extent_logical + extent_len - 1,
2920						&sctx->csum_list, 1);
2921			if (ret)
2922				goto out;
2923
2924			ret = scrub_extent_for_parity(sparity, extent_logical,
2925						      extent_len,
2926						      extent_physical,
2927						      extent_dev, flags,
2928						      generation,
2929						      extent_mirror_num);
2930
2931			scrub_free_csums(sctx);
2932
2933			if (ret)
2934				goto out;
2935
2936			if (extent_logical + extent_len <
2937			    key.objectid + bytes) {
2938				logic_start += map->stripe_len;
2939
2940				if (logic_start >= logic_end) {
2941					stop_loop = 1;
2942					break;
2943				}
2944
2945				if (logic_start < key.objectid + bytes) {
2946					cond_resched();
2947					goto again;
2948				}
2949			}
2950next:
2951			path->slots[0]++;
2952		}
2953
2954		btrfs_release_path(path);
2955
2956		if (stop_loop)
2957			break;
2958
2959		logic_start += map->stripe_len;
2960	}
2961out:
2962	if (ret < 0)
2963		scrub_parity_mark_sectors_error(sparity, logic_start,
2964						logic_end - logic_start);
2965	scrub_parity_put(sparity);
2966	scrub_submit(sctx);
2967	mutex_lock(&sctx->wr_lock);
2968	scrub_wr_submit(sctx);
2969	mutex_unlock(&sctx->wr_lock);
2970
2971	btrfs_release_path(path);
2972	return ret < 0 ? ret : 0;
2973}
2974
2975static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2976					   struct map_lookup *map,
 
2977					   struct btrfs_device *scrub_dev,
2978					   int num, u64 base, u64 length,
2979					   struct btrfs_block_group *cache)
2980{
2981	struct btrfs_path *path, *ppath;
2982	struct btrfs_fs_info *fs_info = sctx->fs_info;
2983	struct btrfs_root *root = fs_info->extent_root;
2984	struct btrfs_root *csum_root = fs_info->csum_root;
2985	struct btrfs_extent_item *extent;
2986	struct blk_plug plug;
2987	u64 flags;
2988	int ret;
2989	int slot;
2990	u64 nstripes;
2991	struct extent_buffer *l;
2992	u64 physical;
2993	u64 logical;
2994	u64 logic_end;
2995	u64 physical_end;
2996	u64 generation;
2997	int mirror_num;
2998	struct reada_control *reada1;
2999	struct reada_control *reada2;
3000	struct btrfs_key key;
3001	struct btrfs_key key_end;
3002	u64 increment = map->stripe_len;
3003	u64 offset;
3004	u64 extent_logical;
3005	u64 extent_physical;
3006	u64 extent_len;
3007	u64 stripe_logical;
3008	u64 stripe_end;
3009	struct btrfs_device *extent_dev;
3010	int extent_mirror_num;
3011	int stop_loop = 0;
3012
3013	physical = map->stripes[num].physical;
3014	offset = 0;
3015	nstripes = div64_u64(length, map->stripe_len);
3016	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3017		offset = map->stripe_len * num;
3018		increment = map->stripe_len * map->num_stripes;
3019		mirror_num = 1;
3020	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3021		int factor = map->num_stripes / map->sub_stripes;
3022		offset = map->stripe_len * (num / map->sub_stripes);
3023		increment = map->stripe_len * factor;
3024		mirror_num = num % map->sub_stripes + 1;
3025	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
3026		increment = map->stripe_len;
3027		mirror_num = num % map->num_stripes + 1;
3028	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3029		increment = map->stripe_len;
3030		mirror_num = num % map->num_stripes + 1;
3031	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3032		get_raid56_logic_offset(physical, num, map, &offset, NULL);
3033		increment = map->stripe_len * nr_data_stripes(map);
3034		mirror_num = 1;
3035	} else {
3036		increment = map->stripe_len;
3037		mirror_num = 1;
3038	}
3039
3040	path = btrfs_alloc_path();
3041	if (!path)
3042		return -ENOMEM;
 
3043
3044	ppath = btrfs_alloc_path();
3045	if (!ppath) {
3046		btrfs_free_path(path);
3047		return -ENOMEM;
 
3048	}
3049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3050	/*
3051	 * work on commit root. The related disk blocks are static as
3052	 * long as COW is applied. This means, it is save to rewrite
3053	 * them to repair disk errors without any race conditions
 
 
3054	 */
3055	path->search_commit_root = 1;
3056	path->skip_locking = 1;
3057
3058	ppath->search_commit_root = 1;
3059	ppath->skip_locking = 1;
3060	/*
3061	 * trigger the readahead for extent tree csum tree and wait for
3062	 * completion. During readahead, the scrub is officially paused
3063	 * to not hold off transaction commits
3064	 */
3065	logical = base + offset;
3066	physical_end = physical + nstripes * map->stripe_len;
3067	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3068		get_raid56_logic_offset(physical_end, num,
3069					map, &logic_end, NULL);
3070		logic_end += base;
3071	} else {
3072		logic_end = logical + increment * nstripes;
 
 
3073	}
3074	wait_event(sctx->list_wait,
3075		   atomic_read(&sctx->bios_in_flight) == 0);
3076	scrub_blocked_if_needed(fs_info);
3077
3078	/* FIXME it might be better to start readahead at commit root */
3079	key.objectid = logical;
3080	key.type = BTRFS_EXTENT_ITEM_KEY;
3081	key.offset = (u64)0;
3082	key_end.objectid = logic_end;
3083	key_end.type = BTRFS_METADATA_ITEM_KEY;
3084	key_end.offset = (u64)-1;
3085	reada1 = btrfs_reada_add(root, &key, &key_end);
3086
3087	key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3088	key.type = BTRFS_EXTENT_CSUM_KEY;
3089	key.offset = logical;
3090	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3091	key_end.type = BTRFS_EXTENT_CSUM_KEY;
3092	key_end.offset = logic_end;
3093	reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3094
3095	if (!IS_ERR(reada1))
3096		btrfs_reada_wait(reada1);
3097	if (!IS_ERR(reada2))
3098		btrfs_reada_wait(reada2);
3099
 
 
 
 
 
 
 
 
3100
3101	/*
3102	 * collect all data csums for the stripe to avoid seeking during
3103	 * the scrub. This might currently (crc32) end up to be about 1MB
3104	 */
3105	blk_start_plug(&plug);
3106
3107	/*
3108	 * now find all extents for each stripe and scrub them
3109	 */
3110	ret = 0;
3111	while (physical < physical_end) {
3112		/*
3113		 * canceled?
3114		 */
3115		if (atomic_read(&fs_info->scrub_cancel_req) ||
3116		    atomic_read(&sctx->cancel_req)) {
3117			ret = -ECANCELED;
3118			goto out;
 
 
 
 
 
 
 
 
3119		}
 
3120		/*
3121		 * check to see if we have to pause
 
 
 
 
 
3122		 */
3123		if (atomic_read(&fs_info->scrub_pause_req)) {
3124			/* push queued extents */
3125			sctx->flush_all_writes = true;
3126			scrub_submit(sctx);
3127			mutex_lock(&sctx->wr_lock);
3128			scrub_wr_submit(sctx);
3129			mutex_unlock(&sctx->wr_lock);
3130			wait_event(sctx->list_wait,
3131				   atomic_read(&sctx->bios_in_flight) == 0);
3132			sctx->flush_all_writes = false;
3133			scrub_blocked_if_needed(fs_info);
3134		}
3135
3136		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3137			ret = get_raid56_logic_offset(physical, num, map,
3138						      &logical,
3139						      &stripe_logical);
3140			logical += base;
3141			if (ret) {
3142				/* it is parity strip */
3143				stripe_logical += base;
3144				stripe_end = stripe_logical + increment;
3145				ret = scrub_raid56_parity(sctx, map, scrub_dev,
3146							  ppath, stripe_logical,
3147							  stripe_end);
3148				if (ret)
3149					goto out;
3150				goto skip;
3151			}
3152		}
3153
3154		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3155			key.type = BTRFS_METADATA_ITEM_KEY;
3156		else
3157			key.type = BTRFS_EXTENT_ITEM_KEY;
3158		key.objectid = logical;
3159		key.offset = (u64)-1;
3160
3161		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3162		if (ret < 0)
3163			goto out;
3164
3165		if (ret > 0) {
3166			ret = btrfs_previous_extent_item(root, path, 0);
3167			if (ret < 0)
3168				goto out;
3169			if (ret > 0) {
3170				/* there's no smaller item, so stick with the
3171				 * larger one */
3172				btrfs_release_path(path);
3173				ret = btrfs_search_slot(NULL, root, &key,
3174							path, 0, 0);
3175				if (ret < 0)
3176					goto out;
3177			}
3178		}
3179
3180		stop_loop = 0;
3181		while (1) {
3182			u64 bytes;
3183
3184			l = path->nodes[0];
3185			slot = path->slots[0];
3186			if (slot >= btrfs_header_nritems(l)) {
3187				ret = btrfs_next_leaf(root, path);
3188				if (ret == 0)
3189					continue;
3190				if (ret < 0)
3191					goto out;
3192
3193				stop_loop = 1;
3194				break;
3195			}
3196			btrfs_item_key_to_cpu(l, &key, slot);
3197
3198			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3199			    key.type != BTRFS_METADATA_ITEM_KEY)
3200				goto next;
3201
3202			if (key.type == BTRFS_METADATA_ITEM_KEY)
3203				bytes = fs_info->nodesize;
3204			else
3205				bytes = key.offset;
3206
3207			if (key.objectid + bytes <= logical)
3208				goto next;
3209
3210			if (key.objectid >= logical + map->stripe_len) {
3211				/* out of this device extent */
3212				if (key.objectid >= logic_end)
3213					stop_loop = 1;
3214				break;
3215			}
3216
3217			/*
3218			 * If our block group was removed in the meanwhile, just
3219			 * stop scrubbing since there is no point in continuing.
3220			 * Continuing would prevent reusing its device extents
3221			 * for new block groups for a long time.
3222			 */
3223			spin_lock(&cache->lock);
3224			if (cache->removed) {
3225				spin_unlock(&cache->lock);
3226				ret = 0;
3227				goto out;
3228			}
3229			spin_unlock(&cache->lock);
3230
3231			extent = btrfs_item_ptr(l, slot,
3232						struct btrfs_extent_item);
3233			flags = btrfs_extent_flags(l, extent);
3234			generation = btrfs_extent_generation(l, extent);
3235
3236			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3237			    (key.objectid < logical ||
3238			     key.objectid + bytes >
3239			     logical + map->stripe_len)) {
3240				btrfs_err(fs_info,
3241					   "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3242				       key.objectid, logical);
3243				spin_lock(&sctx->stat_lock);
3244				sctx->stat.uncorrectable_errors++;
3245				spin_unlock(&sctx->stat_lock);
3246				goto next;
3247			}
3248
3249again:
3250			extent_logical = key.objectid;
3251			extent_len = bytes;
3252
3253			/*
3254			 * trim extent to this stripe
3255			 */
3256			if (extent_logical < logical) {
3257				extent_len -= logical - extent_logical;
3258				extent_logical = logical;
3259			}
3260			if (extent_logical + extent_len >
3261			    logical + map->stripe_len) {
3262				extent_len = logical + map->stripe_len -
3263					     extent_logical;
3264			}
3265
3266			extent_physical = extent_logical - logical + physical;
3267			extent_dev = scrub_dev;
3268			extent_mirror_num = mirror_num;
3269			if (sctx->is_dev_replace)
3270				scrub_remap_extent(fs_info, extent_logical,
3271						   extent_len, &extent_physical,
3272						   &extent_dev,
3273						   &extent_mirror_num);
3274
3275			if (flags & BTRFS_EXTENT_FLAG_DATA) {
3276				ret = btrfs_lookup_csums_range(csum_root,
3277						extent_logical,
3278						extent_logical + extent_len - 1,
3279						&sctx->csum_list, 1);
3280				if (ret)
3281					goto out;
3282			}
3283
3284			ret = scrub_extent(sctx, map, extent_logical, extent_len,
3285					   extent_physical, extent_dev, flags,
3286					   generation, extent_mirror_num,
3287					   extent_logical - logical + physical);
3288
3289			scrub_free_csums(sctx);
3290
3291			if (ret)
3292				goto out;
3293
3294			if (extent_logical + extent_len <
3295			    key.objectid + bytes) {
3296				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3297					/*
3298					 * loop until we find next data stripe
3299					 * or we have finished all stripes.
3300					 */
3301loop:
3302					physical += map->stripe_len;
3303					ret = get_raid56_logic_offset(physical,
3304							num, map, &logical,
3305							&stripe_logical);
3306					logical += base;
3307
3308					if (ret && physical < physical_end) {
3309						stripe_logical += base;
3310						stripe_end = stripe_logical +
3311								increment;
3312						ret = scrub_raid56_parity(sctx,
3313							map, scrub_dev, ppath,
3314							stripe_logical,
3315							stripe_end);
3316						if (ret)
3317							goto out;
3318						goto loop;
3319					}
3320				} else {
3321					physical += map->stripe_len;
3322					logical += increment;
3323				}
3324				if (logical < key.objectid + bytes) {
3325					cond_resched();
3326					goto again;
3327				}
3328
3329				if (physical >= physical_end) {
3330					stop_loop = 1;
3331					break;
3332				}
3333			}
3334next:
3335			path->slots[0]++;
3336		}
3337		btrfs_release_path(path);
3338skip:
3339		logical += increment;
3340		physical += map->stripe_len;
3341		spin_lock(&sctx->stat_lock);
3342		if (stop_loop)
3343			sctx->stat.last_physical = map->stripes[num].physical +
3344						   length;
3345		else
3346			sctx->stat.last_physical = physical;
3347		spin_unlock(&sctx->stat_lock);
3348		if (stop_loop)
3349			break;
3350	}
3351out:
3352	/* push queued extents */
3353	scrub_submit(sctx);
3354	mutex_lock(&sctx->wr_lock);
3355	scrub_wr_submit(sctx);
3356	mutex_unlock(&sctx->wr_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3357
3358	blk_finish_plug(&plug);
3359	btrfs_free_path(path);
3360	btrfs_free_path(ppath);
3361	return ret < 0 ? ret : 0;
3362}
3363
3364static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
 
3365					  struct btrfs_device *scrub_dev,
3366					  u64 chunk_offset, u64 length,
3367					  u64 dev_offset,
3368					  struct btrfs_block_group *cache)
3369{
3370	struct btrfs_fs_info *fs_info = sctx->fs_info;
3371	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
3372	struct map_lookup *map;
3373	struct extent_map *em;
3374	int i;
3375	int ret = 0;
3376
3377	read_lock(&map_tree->lock);
3378	em = lookup_extent_mapping(map_tree, chunk_offset, 1);
3379	read_unlock(&map_tree->lock);
3380
3381	if (!em) {
3382		/*
3383		 * Might have been an unused block group deleted by the cleaner
3384		 * kthread or relocation.
3385		 */
3386		spin_lock(&cache->lock);
3387		if (!cache->removed)
3388			ret = -EINVAL;
3389		spin_unlock(&cache->lock);
3390
3391		return ret;
3392	}
3393
3394	map = em->map_lookup;
3395	if (em->start != chunk_offset)
3396		goto out;
3397
3398	if (em->len < length)
3399		goto out;
3400
3401	for (i = 0; i < map->num_stripes; ++i) {
3402		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3403		    map->stripes[i].physical == dev_offset) {
3404			ret = scrub_stripe(sctx, map, scrub_dev, i,
3405					   chunk_offset, length, cache);
3406			if (ret)
3407				goto out;
3408		}
3409	}
3410out:
3411	free_extent_map(em);
3412
3413	return ret;
3414}
3415
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3416static noinline_for_stack
3417int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3418			   struct btrfs_device *scrub_dev, u64 start, u64 end)
3419{
3420	struct btrfs_dev_extent *dev_extent = NULL;
3421	struct btrfs_path *path;
3422	struct btrfs_fs_info *fs_info = sctx->fs_info;
3423	struct btrfs_root *root = fs_info->dev_root;
3424	u64 length;
3425	u64 chunk_offset;
3426	int ret = 0;
3427	int ro_set;
3428	int slot;
3429	struct extent_buffer *l;
3430	struct btrfs_key key;
3431	struct btrfs_key found_key;
3432	struct btrfs_block_group *cache;
3433	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3434
3435	path = btrfs_alloc_path();
3436	if (!path)
3437		return -ENOMEM;
3438
3439	path->reada = READA_FORWARD;
3440	path->search_commit_root = 1;
3441	path->skip_locking = 1;
3442
3443	key.objectid = scrub_dev->devid;
3444	key.offset = 0ull;
3445	key.type = BTRFS_DEV_EXTENT_KEY;
3446
3447	while (1) {
 
 
3448		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3449		if (ret < 0)
3450			break;
3451		if (ret > 0) {
3452			if (path->slots[0] >=
3453			    btrfs_header_nritems(path->nodes[0])) {
3454				ret = btrfs_next_leaf(root, path);
3455				if (ret < 0)
3456					break;
3457				if (ret > 0) {
3458					ret = 0;
3459					break;
3460				}
3461			} else {
3462				ret = 0;
3463			}
3464		}
3465
3466		l = path->nodes[0];
3467		slot = path->slots[0];
3468
3469		btrfs_item_key_to_cpu(l, &found_key, slot);
3470
3471		if (found_key.objectid != scrub_dev->devid)
3472			break;
3473
3474		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3475			break;
3476
3477		if (found_key.offset >= end)
3478			break;
3479
3480		if (found_key.offset < key.offset)
3481			break;
3482
3483		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3484		length = btrfs_dev_extent_length(l, dev_extent);
3485
3486		if (found_key.offset + length <= start)
3487			goto skip;
3488
3489		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3490
3491		/*
3492		 * get a reference on the corresponding block group to prevent
3493		 * the chunk from going away while we scrub it
3494		 */
3495		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3496
3497		/* some chunks are removed but not committed to disk yet,
3498		 * continue scrubbing */
3499		if (!cache)
3500			goto skip;
3501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3502		/*
3503		 * Make sure that while we are scrubbing the corresponding block
3504		 * group doesn't get its logical address and its device extents
3505		 * reused for another block group, which can possibly be of a
3506		 * different type and different profile. We do this to prevent
3507		 * false error detections and crashes due to bogus attempts to
3508		 * repair extents.
3509		 */
3510		spin_lock(&cache->lock);
3511		if (cache->removed) {
3512			spin_unlock(&cache->lock);
3513			btrfs_put_block_group(cache);
3514			goto skip;
3515		}
3516		btrfs_freeze_block_group(cache);
3517		spin_unlock(&cache->lock);
3518
3519		/*
3520		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3521		 * to avoid deadlock caused by:
3522		 * btrfs_inc_block_group_ro()
3523		 * -> btrfs_wait_for_commit()
3524		 * -> btrfs_commit_transaction()
3525		 * -> btrfs_scrub_pause()
3526		 */
3527		scrub_pause_on(fs_info);
3528
3529		/*
3530		 * Don't do chunk preallocation for scrub.
3531		 *
3532		 * This is especially important for SYSTEM bgs, or we can hit
3533		 * -EFBIG from btrfs_finish_chunk_alloc() like:
3534		 * 1. The only SYSTEM bg is marked RO.
3535		 *    Since SYSTEM bg is small, that's pretty common.
3536		 * 2. New SYSTEM bg will be allocated
3537		 *    Due to regular version will allocate new chunk.
3538		 * 3. New SYSTEM bg is empty and will get cleaned up
3539		 *    Before cleanup really happens, it's marked RO again.
3540		 * 4. Empty SYSTEM bg get scrubbed
3541		 *    We go back to 2.
3542		 *
3543		 * This can easily boost the amount of SYSTEM chunks if cleaner
3544		 * thread can't be triggered fast enough, and use up all space
3545		 * of btrfs_super_block::sys_chunk_array
3546		 *
3547		 * While for dev replace, we need to try our best to mark block
3548		 * group RO, to prevent race between:
3549		 * - Write duplication
3550		 *   Contains latest data
3551		 * - Scrub copy
3552		 *   Contains data from commit tree
3553		 *
3554		 * If target block group is not marked RO, nocow writes can
3555		 * be overwritten by scrub copy, causing data corruption.
3556		 * So for dev-replace, it's not allowed to continue if a block
3557		 * group is not RO.
3558		 */
3559		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
 
 
 
 
 
 
 
 
 
 
3560		if (ret == 0) {
3561			ro_set = 1;
3562		} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
 
3563			/*
3564			 * btrfs_inc_block_group_ro return -ENOSPC when it
3565			 * failed in creating new chunk for metadata.
3566			 * It is not a problem for scrub, because
3567			 * metadata are always cowed, and our scrub paused
3568			 * commit_transactions.
 
 
 
 
 
 
3569			 */
3570			ro_set = 0;
 
 
 
 
 
 
 
3571		} else {
3572			btrfs_warn(fs_info,
3573				   "failed setting block group ro: %d", ret);
3574			btrfs_unfreeze_block_group(cache);
3575			btrfs_put_block_group(cache);
3576			scrub_pause_off(fs_info);
3577			break;
3578		}
3579
3580		/*
3581		 * Now the target block is marked RO, wait for nocow writes to
3582		 * finish before dev-replace.
3583		 * COW is fine, as COW never overwrites extents in commit tree.
3584		 */
3585		if (sctx->is_dev_replace) {
3586			btrfs_wait_nocow_writers(cache);
3587			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
3588					cache->length);
3589		}
3590
3591		scrub_pause_off(fs_info);
3592		down_write(&dev_replace->rwsem);
3593		dev_replace->cursor_right = found_key.offset + length;
3594		dev_replace->cursor_left = found_key.offset;
3595		dev_replace->item_needs_writeback = 1;
3596		up_write(&dev_replace->rwsem);
3597
3598		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3599				  found_key.offset, cache);
3600
3601		/*
3602		 * flush, submit all pending read and write bios, afterwards
3603		 * wait for them.
3604		 * Note that in the dev replace case, a read request causes
3605		 * write requests that are submitted in the read completion
3606		 * worker. Therefore in the current situation, it is required
3607		 * that all write requests are flushed, so that all read and
3608		 * write requests are really completed when bios_in_flight
3609		 * changes to 0.
3610		 */
3611		sctx->flush_all_writes = true;
3612		scrub_submit(sctx);
3613		mutex_lock(&sctx->wr_lock);
3614		scrub_wr_submit(sctx);
3615		mutex_unlock(&sctx->wr_lock);
3616
3617		wait_event(sctx->list_wait,
3618			   atomic_read(&sctx->bios_in_flight) == 0);
3619
3620		scrub_pause_on(fs_info);
3621
3622		/*
3623		 * must be called before we decrease @scrub_paused.
3624		 * make sure we don't block transaction commit while
3625		 * we are waiting pending workers finished.
3626		 */
3627		wait_event(sctx->list_wait,
3628			   atomic_read(&sctx->workers_pending) == 0);
3629		sctx->flush_all_writes = false;
3630
3631		scrub_pause_off(fs_info);
3632
3633		down_write(&dev_replace->rwsem);
3634		dev_replace->cursor_left = dev_replace->cursor_right;
3635		dev_replace->item_needs_writeback = 1;
3636		up_write(&dev_replace->rwsem);
3637
3638		if (ro_set)
3639			btrfs_dec_block_group_ro(cache);
3640
3641		/*
3642		 * We might have prevented the cleaner kthread from deleting
3643		 * this block group if it was already unused because we raced
3644		 * and set it to RO mode first. So add it back to the unused
3645		 * list, otherwise it might not ever be deleted unless a manual
3646		 * balance is triggered or it becomes used and unused again.
3647		 */
3648		spin_lock(&cache->lock);
3649		if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3650		    cache->used == 0) {
3651			spin_unlock(&cache->lock);
3652			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
3653				btrfs_discard_queue_work(&fs_info->discard_ctl,
3654							 cache);
3655			else
3656				btrfs_mark_bg_unused(cache);
3657		} else {
3658			spin_unlock(&cache->lock);
3659		}
3660
3661		btrfs_unfreeze_block_group(cache);
3662		btrfs_put_block_group(cache);
3663		if (ret)
3664			break;
3665		if (sctx->is_dev_replace &&
3666		    atomic64_read(&dev_replace->num_write_errors) > 0) {
3667			ret = -EIO;
3668			break;
3669		}
3670		if (sctx->stat.malloc_errors > 0) {
3671			ret = -ENOMEM;
3672			break;
3673		}
3674skip:
3675		key.offset = found_key.offset + length;
3676		btrfs_release_path(path);
3677	}
3678
3679	btrfs_free_path(path);
3680
3681	return ret;
3682}
3683
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3684static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3685					   struct btrfs_device *scrub_dev)
3686{
3687	int	i;
3688	u64	bytenr;
3689	u64	gen;
3690	int	ret;
 
3691	struct btrfs_fs_info *fs_info = sctx->fs_info;
3692
3693	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3694		return -EROFS;
3695
 
 
 
 
 
 
 
 
3696	/* Seed devices of a new filesystem has their own generation. */
3697	if (scrub_dev->fs_devices != fs_info->fs_devices)
3698		gen = scrub_dev->generation;
3699	else
3700		gen = fs_info->last_trans_committed;
3701
3702	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3703		bytenr = btrfs_sb_offset(i);
 
 
 
 
 
 
 
 
 
 
3704		if (bytenr + BTRFS_SUPER_INFO_SIZE >
3705		    scrub_dev->commit_total_bytes)
3706			break;
 
 
3707
3708		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3709				  scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3710				  NULL, 1, bytenr);
3711		if (ret)
3712			return ret;
 
3713	}
3714	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3715
3716	return 0;
3717}
3718
3719static void scrub_workers_put(struct btrfs_fs_info *fs_info)
3720{
3721	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
3722					&fs_info->scrub_lock)) {
3723		struct btrfs_workqueue *scrub_workers = NULL;
3724		struct btrfs_workqueue *scrub_wr_comp = NULL;
3725		struct btrfs_workqueue *scrub_parity = NULL;
3726
3727		scrub_workers = fs_info->scrub_workers;
3728		scrub_wr_comp = fs_info->scrub_wr_completion_workers;
3729		scrub_parity = fs_info->scrub_parity_workers;
3730
3731		fs_info->scrub_workers = NULL;
3732		fs_info->scrub_wr_completion_workers = NULL;
3733		fs_info->scrub_parity_workers = NULL;
3734		mutex_unlock(&fs_info->scrub_lock);
3735
3736		btrfs_destroy_workqueue(scrub_workers);
3737		btrfs_destroy_workqueue(scrub_wr_comp);
3738		btrfs_destroy_workqueue(scrub_parity);
3739	}
3740}
3741
3742/*
3743 * get a reference count on fs_info->scrub_workers. start worker if necessary
3744 */
3745static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3746						int is_dev_replace)
3747{
3748	struct btrfs_workqueue *scrub_workers = NULL;
3749	struct btrfs_workqueue *scrub_wr_comp = NULL;
3750	struct btrfs_workqueue *scrub_parity = NULL;
3751	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3752	int max_active = fs_info->thread_pool_size;
3753	int ret = -ENOMEM;
3754
3755	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
3756		return 0;
3757
3758	scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
3759					      is_dev_replace ? 1 : max_active, 4);
3760	if (!scrub_workers)
3761		goto fail_scrub_workers;
3762
3763	scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
3764					      max_active, 2);
3765	if (!scrub_wr_comp)
3766		goto fail_scrub_wr_completion_workers;
3767
3768	scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
3769					     max_active, 2);
3770	if (!scrub_parity)
3771		goto fail_scrub_parity_workers;
3772
3773	mutex_lock(&fs_info->scrub_lock);
3774	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
3775		ASSERT(fs_info->scrub_workers == NULL &&
3776		       fs_info->scrub_wr_completion_workers == NULL &&
3777		       fs_info->scrub_parity_workers == NULL);
3778		fs_info->scrub_workers = scrub_workers;
3779		fs_info->scrub_wr_completion_workers = scrub_wr_comp;
3780		fs_info->scrub_parity_workers = scrub_parity;
3781		refcount_set(&fs_info->scrub_workers_refcnt, 1);
3782		mutex_unlock(&fs_info->scrub_lock);
3783		return 0;
3784	}
3785	/* Other thread raced in and created the workers for us */
3786	refcount_inc(&fs_info->scrub_workers_refcnt);
3787	mutex_unlock(&fs_info->scrub_lock);
3788
3789	ret = 0;
3790	btrfs_destroy_workqueue(scrub_parity);
3791fail_scrub_parity_workers:
3792	btrfs_destroy_workqueue(scrub_wr_comp);
3793fail_scrub_wr_completion_workers:
3794	btrfs_destroy_workqueue(scrub_workers);
3795fail_scrub_workers:
3796	return ret;
3797}
3798
3799int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3800		    u64 end, struct btrfs_scrub_progress *progress,
3801		    int readonly, int is_dev_replace)
3802{
 
3803	struct scrub_ctx *sctx;
3804	int ret;
3805	struct btrfs_device *dev;
3806	unsigned int nofs_flag;
 
3807
3808	if (btrfs_fs_closing(fs_info))
3809		return -EAGAIN;
3810
3811	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
3812		/*
3813		 * in this case scrub is unable to calculate the checksum
3814		 * the way scrub is implemented. Do not handle this
3815		 * situation at all because it won't ever happen.
3816		 */
3817		btrfs_err(fs_info,
3818			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3819		       fs_info->nodesize,
3820		       BTRFS_STRIPE_LEN);
3821		return -EINVAL;
3822	}
3823
3824	if (fs_info->sectorsize != PAGE_SIZE) {
3825		/* not supported for data w/o checksums */
3826		btrfs_err_rl(fs_info,
3827			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
3828		       fs_info->sectorsize, PAGE_SIZE);
3829		return -EINVAL;
3830	}
3831
3832	if (fs_info->nodesize >
3833	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3834	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3835		/*
3836		 * would exhaust the array bounds of pagev member in
3837		 * struct scrub_block
3838		 */
3839		btrfs_err(fs_info,
3840			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3841		       fs_info->nodesize,
3842		       SCRUB_MAX_PAGES_PER_BLOCK,
3843		       fs_info->sectorsize,
3844		       SCRUB_MAX_PAGES_PER_BLOCK);
3845		return -EINVAL;
3846	}
3847
3848	/* Allocate outside of device_list_mutex */
3849	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
3850	if (IS_ERR(sctx))
3851		return PTR_ERR(sctx);
3852
3853	ret = scrub_workers_get(fs_info, is_dev_replace);
3854	if (ret)
3855		goto out_free_ctx;
3856
3857	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3858	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
3859	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
3860		     !is_dev_replace)) {
3861		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3862		ret = -ENODEV;
3863		goto out;
3864	}
3865
3866	if (!is_dev_replace && !readonly &&
3867	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
3868		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3869		btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
3870				rcu_str_deref(dev->name));
 
3871		ret = -EROFS;
3872		goto out;
3873	}
3874
3875	mutex_lock(&fs_info->scrub_lock);
3876	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3877	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
3878		mutex_unlock(&fs_info->scrub_lock);
3879		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3880		ret = -EIO;
3881		goto out;
3882	}
3883
3884	down_read(&fs_info->dev_replace.rwsem);
3885	if (dev->scrub_ctx ||
3886	    (!is_dev_replace &&
3887	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3888		up_read(&fs_info->dev_replace.rwsem);
3889		mutex_unlock(&fs_info->scrub_lock);
3890		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3891		ret = -EINPROGRESS;
3892		goto out;
3893	}
3894	up_read(&fs_info->dev_replace.rwsem);
3895
3896	sctx->readonly = readonly;
3897	dev->scrub_ctx = sctx;
3898	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3899
3900	/*
3901	 * checking @scrub_pause_req here, we can avoid
3902	 * race between committing transaction and scrubbing.
3903	 */
3904	__scrub_blocked_if_needed(fs_info);
3905	atomic_inc(&fs_info->scrubs_running);
3906	mutex_unlock(&fs_info->scrub_lock);
3907
3908	/*
3909	 * In order to avoid deadlock with reclaim when there is a transaction
3910	 * trying to pause scrub, make sure we use GFP_NOFS for all the
3911	 * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
3912	 * invoked by our callees. The pausing request is done when the
3913	 * transaction commit starts, and it blocks the transaction until scrub
3914	 * is paused (done at specific points at scrub_stripe() or right above
3915	 * before incrementing fs_info->scrubs_running).
3916	 */
3917	nofs_flag = memalloc_nofs_save();
3918	if (!is_dev_replace) {
 
 
 
 
 
 
3919		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
3920		/*
3921		 * by holding device list mutex, we can
3922		 * kick off writing super in log tree sync.
3923		 */
3924		mutex_lock(&fs_info->fs_devices->device_list_mutex);
3925		ret = scrub_supers(sctx, dev);
3926		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 
 
 
 
 
 
 
 
 
3927	}
3928
3929	if (!ret)
3930		ret = scrub_enumerate_chunks(sctx, dev, start, end);
3931	memalloc_nofs_restore(nofs_flag);
3932
3933	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3934	atomic_dec(&fs_info->scrubs_running);
3935	wake_up(&fs_info->scrub_pause_wait);
3936
3937	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3938
3939	if (progress)
3940		memcpy(progress, &sctx->stat, sizeof(*progress));
3941
3942	if (!is_dev_replace)
3943		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
3944			ret ? "not finished" : "finished", devid, ret);
3945
3946	mutex_lock(&fs_info->scrub_lock);
3947	dev->scrub_ctx = NULL;
3948	mutex_unlock(&fs_info->scrub_lock);
3949
3950	scrub_workers_put(fs_info);
3951	scrub_put_ctx(sctx);
3952
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3953	return ret;
3954out:
3955	scrub_workers_put(fs_info);
3956out_free_ctx:
3957	scrub_free_ctx(sctx);
3958
3959	return ret;
3960}
3961
3962void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3963{
3964	mutex_lock(&fs_info->scrub_lock);
3965	atomic_inc(&fs_info->scrub_pause_req);
3966	while (atomic_read(&fs_info->scrubs_paused) !=
3967	       atomic_read(&fs_info->scrubs_running)) {
3968		mutex_unlock(&fs_info->scrub_lock);
3969		wait_event(fs_info->scrub_pause_wait,
3970			   atomic_read(&fs_info->scrubs_paused) ==
3971			   atomic_read(&fs_info->scrubs_running));
3972		mutex_lock(&fs_info->scrub_lock);
3973	}
3974	mutex_unlock(&fs_info->scrub_lock);
3975}
3976
3977void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3978{
3979	atomic_dec(&fs_info->scrub_pause_req);
3980	wake_up(&fs_info->scrub_pause_wait);
3981}
3982
3983int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3984{
3985	mutex_lock(&fs_info->scrub_lock);
3986	if (!atomic_read(&fs_info->scrubs_running)) {
3987		mutex_unlock(&fs_info->scrub_lock);
3988		return -ENOTCONN;
3989	}
3990
3991	atomic_inc(&fs_info->scrub_cancel_req);
3992	while (atomic_read(&fs_info->scrubs_running)) {
3993		mutex_unlock(&fs_info->scrub_lock);
3994		wait_event(fs_info->scrub_pause_wait,
3995			   atomic_read(&fs_info->scrubs_running) == 0);
3996		mutex_lock(&fs_info->scrub_lock);
3997	}
3998	atomic_dec(&fs_info->scrub_cancel_req);
3999	mutex_unlock(&fs_info->scrub_lock);
4000
4001	return 0;
4002}
4003
4004int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4005{
4006	struct btrfs_fs_info *fs_info = dev->fs_info;
4007	struct scrub_ctx *sctx;
4008
4009	mutex_lock(&fs_info->scrub_lock);
4010	sctx = dev->scrub_ctx;
4011	if (!sctx) {
4012		mutex_unlock(&fs_info->scrub_lock);
4013		return -ENOTCONN;
4014	}
4015	atomic_inc(&sctx->cancel_req);
4016	while (dev->scrub_ctx) {
4017		mutex_unlock(&fs_info->scrub_lock);
4018		wait_event(fs_info->scrub_pause_wait,
4019			   dev->scrub_ctx == NULL);
4020		mutex_lock(&fs_info->scrub_lock);
4021	}
4022	mutex_unlock(&fs_info->scrub_lock);
4023
4024	return 0;
4025}
4026
4027int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4028			 struct btrfs_scrub_progress *progress)
4029{
 
4030	struct btrfs_device *dev;
4031	struct scrub_ctx *sctx = NULL;
4032
4033	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4034	dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
4035	if (dev)
4036		sctx = dev->scrub_ctx;
4037	if (sctx)
4038		memcpy(progress, &sctx->stat, sizeof(*progress));
4039	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4040
4041	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4042}
4043
4044static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4045			       u64 extent_logical, u64 extent_len,
4046			       u64 *extent_physical,
4047			       struct btrfs_device **extent_dev,
4048			       int *extent_mirror_num)
4049{
4050	u64 mapped_length;
4051	struct btrfs_bio *bbio = NULL;
4052	int ret;
4053
4054	mapped_length = extent_len;
4055	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4056			      &mapped_length, &bbio, 0);
4057	if (ret || !bbio || mapped_length < extent_len ||
4058	    !bbio->stripes[0].dev->bdev) {
4059		btrfs_put_bbio(bbio);
4060		return;
4061	}
4062
4063	*extent_physical = bbio->stripes[0].physical;
4064	*extent_mirror_num = bbio->mirror_num;
4065	*extent_dev = bbio->stripes[0].dev;
4066	btrfs_put_bbio(bbio);
4067}