Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/ratelimit.h>
   8#include <linux/sched/mm.h>
   9#include <crypto/hash.h>
  10#include "ctree.h"
  11#include "discard.h"
  12#include "volumes.h"
  13#include "disk-io.h"
  14#include "ordered-data.h"
  15#include "transaction.h"
  16#include "backref.h"
  17#include "extent_io.h"
  18#include "dev-replace.h"
 
  19#include "raid56.h"
  20#include "block-group.h"
  21#include "zoned.h"
  22#include "fs.h"
  23#include "accessors.h"
  24#include "file-item.h"
  25#include "scrub.h"
  26#include "raid-stripe-tree.h"
  27
  28/*
  29 * This is only the first step towards a full-features scrub. It reads all
  30 * extent and super block and verifies the checksums. In case a bad checksum
  31 * is found or the extent cannot be read, good data will be written back if
  32 * any can be found.
  33 *
  34 * Future enhancements:
  35 *  - In case an unrepairable extent is encountered, track which files are
  36 *    affected and report them
  37 *  - track and record media errors, throw out bad devices
  38 *  - add a mode to also read unallocated space
  39 */
  40
 
  41struct scrub_ctx;
  42
  43/*
  44 * The following value only influences the performance.
  45 *
  46 * This determines how many stripes would be submitted in one go,
  47 * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
 
  48 */
  49#define SCRUB_STRIPES_PER_GROUP		8
  50
  51/*
  52 * How many groups we have for each sctx.
  53 *
  54 * This would be 8M per device, the same value as the old scrub in-flight bios
  55 * size limit.
  56 */
  57#define SCRUB_GROUPS_PER_SCTX		16
  58
  59#define SCRUB_TOTAL_STRIPES		(SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
  60
  61/*
  62 * The following value times PAGE_SIZE needs to be large enough to match the
  63 * largest node/leaf/sector size that shall be supported.
  64 */
  65#define SCRUB_MAX_SECTORS_PER_BLOCK	(BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
  66
  67/* Represent one sector and its needed info to verify the content. */
  68struct scrub_sector_verification {
  69	bool is_metadata;
  70
  71	union {
  72		/*
  73		 * Csum pointer for data csum verification.  Should point to a
  74		 * sector csum inside scrub_stripe::csums.
  75		 *
  76		 * NULL if this data sector has no csum.
  77		 */
  78		u8 *csum;
 
 
  79
  80		/*
  81		 * Extra info for metadata verification.  All sectors inside a
  82		 * tree block share the same generation.
  83		 */
  84		u64 generation;
  85	};
  86};
  87
  88enum scrub_stripe_flags {
  89	/* Set when @mirror_num, @dev, @physical and @logical are set. */
  90	SCRUB_STRIPE_FLAG_INITIALIZED,
 
 
 
 
 
 
 
 
 
 
 
  91
  92	/* Set when the read-repair is finished. */
  93	SCRUB_STRIPE_FLAG_REPAIR_DONE,
 
 
 
 
 
 
 
 
 
 
 
  94
 
  95	/*
  96	 * Set for data stripes if it's triggered from P/Q stripe.
  97	 * During such scrub, we should not report errors in data stripes, nor
  98	 * update the accounting.
  99	 */
 100	SCRUB_STRIPE_FLAG_NO_REPORT,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101};
 102
 103#define SCRUB_STRIPE_PAGES		(BTRFS_STRIPE_LEN / PAGE_SIZE)
 104
 105/*
 106 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
 107 */
 108struct scrub_stripe {
 109	struct scrub_ctx *sctx;
 110	struct btrfs_block_group *bg;
 111
 112	struct page *pages[SCRUB_STRIPE_PAGES];
 113	struct scrub_sector_verification *sectors;
 114
 115	struct btrfs_device *dev;
 116	u64 logical;
 117	u64 physical;
 118
 119	u16 mirror_num;
 120
 121	/* Should be BTRFS_STRIPE_LEN / sectorsize. */
 122	u16 nr_sectors;
 123
 124	/*
 125	 * How many data/meta extents are in this stripe.  Only for scrub status
 126	 * reporting purposes.
 127	 */
 128	u16 nr_data_extents;
 129	u16 nr_meta_extents;
 130
 131	atomic_t pending_io;
 132	wait_queue_head_t io_wait;
 133	wait_queue_head_t repair_wait;
 134
 135	/*
 136	 * Indicate the states of the stripe.  Bits are defined in
 137	 * scrub_stripe_flags enum.
 138	 */
 139	unsigned long state;
 140
 141	/* Indicate which sectors are covered by extent items. */
 142	unsigned long extent_sector_bitmap;
 143
 144	/*
 145	 * The errors hit during the initial read of the stripe.
 146	 *
 147	 * Would be utilized for error reporting and repair.
 148	 *
 149	 * The remaining init_nr_* records the number of errors hit, only used
 150	 * by error reporting.
 151	 */
 152	unsigned long init_error_bitmap;
 153	unsigned int init_nr_io_errors;
 154	unsigned int init_nr_csum_errors;
 155	unsigned int init_nr_meta_errors;
 156
 157	/*
 158	 * The following error bitmaps are all for the current status.
 159	 * Every time we submit a new read, these bitmaps may be updated.
 160	 *
 161	 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
 162	 *
 163	 * IO and csum errors can happen for both metadata and data.
 164	 */
 165	unsigned long error_bitmap;
 166	unsigned long io_error_bitmap;
 167	unsigned long csum_error_bitmap;
 168	unsigned long meta_error_bitmap;
 169
 170	/* For writeback (repair or replace) error reporting. */
 171	unsigned long write_error_bitmap;
 172
 173	/* Writeback can be concurrent, thus we need to protect the bitmap. */
 174	spinlock_t write_error_lock;
 175
 176	/*
 177	 * Checksum for the whole stripe if this stripe is inside a data block
 178	 * group.
 179	 */
 180	u8 *csums;
 181
 182	struct work_struct work;
 183};
 184
 185struct scrub_ctx {
 186	struct scrub_stripe	stripes[SCRUB_TOTAL_STRIPES];
 187	struct scrub_stripe	*raid56_data_stripes;
 188	struct btrfs_fs_info	*fs_info;
 189	struct btrfs_path	extent_path;
 190	struct btrfs_path	csum_path;
 191	int			first_free;
 192	int			cur_stripe;
 
 
 
 
 
 193	atomic_t		cancel_req;
 194	int			readonly;
 
 195
 196	/* State of IO submission throttling affecting the associated device */
 197	ktime_t			throttle_deadline;
 198	u64			throttle_sent;
 199
 200	int			is_dev_replace;
 201	u64			write_pointer;
 202
 
 203	struct mutex            wr_lock;
 204	struct btrfs_device     *wr_tgtdev;
 
 205
 206	/*
 207	 * statistics
 208	 */
 209	struct btrfs_scrub_progress stat;
 210	spinlock_t		stat_lock;
 211
 212	/*
 213	 * Use a ref counter to avoid use-after-free issues. Scrub workers
 214	 * decrement bios_in_flight and workers_pending and then do a wakeup
 215	 * on the list_wait wait queue. We must ensure the main scrub task
 216	 * doesn't free the scrub context before or while the workers are
 217	 * doing the wakeup() call.
 218	 */
 219	refcount_t              refs;
 220};
 221
 222struct scrub_warning {
 223	struct btrfs_path	*path;
 224	u64			extent_item_size;
 225	const char		*errstr;
 226	u64			physical;
 227	u64			logical;
 228	struct btrfs_device	*dev;
 229};
 230
 231static void release_scrub_stripe(struct scrub_stripe *stripe)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 232{
 233	if (!stripe)
 234		return;
 
 
 
 235
 236	for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
 237		if (stripe->pages[i])
 238			__free_page(stripe->pages[i]);
 239		stripe->pages[i] = NULL;
 240	}
 241	kfree(stripe->sectors);
 242	kfree(stripe->csums);
 243	stripe->sectors = NULL;
 244	stripe->csums = NULL;
 245	stripe->sctx = NULL;
 246	stripe->state = 0;
 247}
 248
 249static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
 250			     struct scrub_stripe *stripe)
 251{
 252	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253
 254	memset(stripe, 0, sizeof(*stripe));
 
 
 
 
 
 
 
 
 
 255
 256	stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
 257	stripe->state = 0;
 258
 259	init_waitqueue_head(&stripe->io_wait);
 260	init_waitqueue_head(&stripe->repair_wait);
 261	atomic_set(&stripe->pending_io, 0);
 262	spin_lock_init(&stripe->write_error_lock);
 263
 264	ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, 0);
 265	if (ret < 0)
 266		goto error;
 267
 268	stripe->sectors = kcalloc(stripe->nr_sectors,
 269				  sizeof(struct scrub_sector_verification),
 270				  GFP_KERNEL);
 271	if (!stripe->sectors)
 272		goto error;
 273
 274	stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
 275				fs_info->csum_size, GFP_KERNEL);
 276	if (!stripe->csums)
 277		goto error;
 278	return 0;
 279error:
 280	release_scrub_stripe(stripe);
 281	return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 282}
 283
 284static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
 285{
 286	wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
 
 
 
 
 
 
 
 
 
 
 
 287}
 288
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 289static void scrub_put_ctx(struct scrub_ctx *sctx);
 290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 291static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 292{
 293	while (atomic_read(&fs_info->scrub_pause_req)) {
 294		mutex_unlock(&fs_info->scrub_lock);
 295		wait_event(fs_info->scrub_pause_wait,
 296		   atomic_read(&fs_info->scrub_pause_req) == 0);
 297		mutex_lock(&fs_info->scrub_lock);
 298	}
 299}
 300
 301static void scrub_pause_on(struct btrfs_fs_info *fs_info)
 302{
 303	atomic_inc(&fs_info->scrubs_paused);
 304	wake_up(&fs_info->scrub_pause_wait);
 305}
 306
 307static void scrub_pause_off(struct btrfs_fs_info *fs_info)
 308{
 309	mutex_lock(&fs_info->scrub_lock);
 310	__scrub_blocked_if_needed(fs_info);
 311	atomic_dec(&fs_info->scrubs_paused);
 312	mutex_unlock(&fs_info->scrub_lock);
 313
 314	wake_up(&fs_info->scrub_pause_wait);
 315}
 316
 317static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 318{
 319	scrub_pause_on(fs_info);
 320	scrub_pause_off(fs_info);
 321}
 322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 324{
 325	int i;
 326
 327	if (!sctx)
 328		return;
 329
 330	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
 331		release_scrub_stripe(&sctx->stripes[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332
 333	kvfree(sctx);
 
 
 334}
 335
 336static void scrub_put_ctx(struct scrub_ctx *sctx)
 337{
 338	if (refcount_dec_and_test(&sctx->refs))
 339		scrub_free_ctx(sctx);
 340}
 341
 342static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 343		struct btrfs_fs_info *fs_info, int is_dev_replace)
 344{
 345	struct scrub_ctx *sctx;
 346	int		i;
 347
 348	/* Since sctx has inline 128 stripes, it can go beyond 64K easily.  Use
 349	 * kvzalloc().
 350	 */
 351	sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
 352	if (!sctx)
 353		goto nomem;
 354	refcount_set(&sctx->refs, 1);
 355	sctx->is_dev_replace = is_dev_replace;
 
 
 356	sctx->fs_info = fs_info;
 357	sctx->extent_path.search_commit_root = 1;
 358	sctx->extent_path.skip_locking = 1;
 359	sctx->csum_path.search_commit_root = 1;
 360	sctx->csum_path.skip_locking = 1;
 361	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
 362		int ret;
 363
 364		ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
 365		if (ret < 0)
 366			goto nomem;
 367		sctx->stripes[i].sctx = sctx;
 
 
 
 
 
 
 
 
 
 
 368	}
 369	sctx->first_free = 0;
 
 
 370	atomic_set(&sctx->cancel_req, 0);
 371
 
 372	spin_lock_init(&sctx->stat_lock);
 
 373	sctx->throttle_deadline = 0;
 374
 
 375	mutex_init(&sctx->wr_lock);
 
 376	if (is_dev_replace) {
 377		WARN_ON(!fs_info->dev_replace.tgtdev);
 378		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
 
 379	}
 380
 381	return sctx;
 382
 383nomem:
 384	scrub_free_ctx(sctx);
 385	return ERR_PTR(-ENOMEM);
 386}
 387
 388static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
 389				     u64 root, void *warn_ctx)
 390{
 391	u32 nlink;
 392	int ret;
 393	int i;
 394	unsigned nofs_flag;
 395	struct extent_buffer *eb;
 396	struct btrfs_inode_item *inode_item;
 397	struct scrub_warning *swarn = warn_ctx;
 398	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
 399	struct inode_fs_paths *ipath = NULL;
 400	struct btrfs_root *local_root;
 401	struct btrfs_key key;
 402
 403	local_root = btrfs_get_fs_root(fs_info, root, true);
 404	if (IS_ERR(local_root)) {
 405		ret = PTR_ERR(local_root);
 406		goto err;
 407	}
 408
 409	/*
 410	 * this makes the path point to (inum INODE_ITEM ioff)
 411	 */
 412	key.objectid = inum;
 413	key.type = BTRFS_INODE_ITEM_KEY;
 414	key.offset = 0;
 415
 416	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
 417	if (ret) {
 418		btrfs_put_root(local_root);
 419		btrfs_release_path(swarn->path);
 420		goto err;
 421	}
 422
 423	eb = swarn->path->nodes[0];
 424	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
 425					struct btrfs_inode_item);
 426	nlink = btrfs_inode_nlink(eb, inode_item);
 427	btrfs_release_path(swarn->path);
 428
 429	/*
 430	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
 431	 * uses GFP_NOFS in this context, so we keep it consistent but it does
 432	 * not seem to be strictly necessary.
 433	 */
 434	nofs_flag = memalloc_nofs_save();
 435	ipath = init_ipath(4096, local_root, swarn->path);
 436	memalloc_nofs_restore(nofs_flag);
 437	if (IS_ERR(ipath)) {
 438		btrfs_put_root(local_root);
 439		ret = PTR_ERR(ipath);
 440		ipath = NULL;
 441		goto err;
 442	}
 443	ret = paths_from_inode(inum, ipath);
 444
 445	if (ret < 0)
 446		goto err;
 447
 448	/*
 449	 * we deliberately ignore the bit ipath might have been too small to
 450	 * hold all of the paths here
 451	 */
 452	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
 453		btrfs_warn_in_rcu(fs_info,
 454"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
 455				  swarn->errstr, swarn->logical,
 456				  btrfs_dev_name(swarn->dev),
 457				  swarn->physical,
 458				  root, inum, offset,
 459				  fs_info->sectorsize, nlink,
 460				  (char *)(unsigned long)ipath->fspath->val[i]);
 461
 462	btrfs_put_root(local_root);
 463	free_ipath(ipath);
 464	return 0;
 465
 466err:
 467	btrfs_warn_in_rcu(fs_info,
 468			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
 469			  swarn->errstr, swarn->logical,
 470			  btrfs_dev_name(swarn->dev),
 471			  swarn->physical,
 472			  root, inum, offset, ret);
 473
 474	free_ipath(ipath);
 475	return 0;
 476}
 477
 478static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
 479				       bool is_super, u64 logical, u64 physical)
 480{
 481	struct btrfs_fs_info *fs_info = dev->fs_info;
 
 482	struct btrfs_path *path;
 483	struct btrfs_key found_key;
 484	struct extent_buffer *eb;
 485	struct btrfs_extent_item *ei;
 486	struct scrub_warning swarn;
 
 487	u64 flags = 0;
 
 488	u32 item_size;
 
 489	int ret;
 490
 
 
 
 
 491	/* Super block error, no need to search extent tree. */
 492	if (is_super) {
 493		btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
 494				  errstr, btrfs_dev_name(dev), physical);
 495		return;
 496	}
 497	path = btrfs_alloc_path();
 498	if (!path)
 499		return;
 500
 501	swarn.physical = physical;
 502	swarn.logical = logical;
 503	swarn.errstr = errstr;
 504	swarn.dev = NULL;
 505
 506	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
 507				  &flags);
 508	if (ret < 0)
 509		goto out;
 510
 511	swarn.extent_item_size = found_key.offset;
 512
 513	eb = path->nodes[0];
 514	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 515	item_size = btrfs_item_size(eb, path->slots[0]);
 516
 517	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 518		unsigned long ptr = 0;
 519		u8 ref_level;
 520		u64 ref_root;
 521
 522		while (true) {
 523			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
 524						      item_size, &ref_root,
 525						      &ref_level);
 526			if (ret < 0) {
 527				btrfs_warn(fs_info,
 528				"failed to resolve tree backref for logical %llu: %d",
 529						  swarn.logical, ret);
 530				break;
 531			}
 532			if (ret > 0)
 533				break;
 534			btrfs_warn_in_rcu(fs_info,
 535"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
 536				errstr, swarn.logical, btrfs_dev_name(dev),
 537				swarn.physical, (ref_level ? "node" : "leaf"),
 538				ref_level, ref_root);
 539		}
 
 
 
 540		btrfs_release_path(path);
 541	} else {
 542		struct btrfs_backref_walk_ctx ctx = { 0 };
 543
 544		btrfs_release_path(path);
 545
 546		ctx.bytenr = found_key.objectid;
 547		ctx.extent_item_pos = swarn.logical - found_key.objectid;
 548		ctx.fs_info = fs_info;
 549
 550		swarn.path = path;
 551		swarn.dev = dev;
 552
 553		iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
 554	}
 555
 556out:
 557	btrfs_free_path(path);
 558}
 559
 560static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
 561{
 562	int ret = 0;
 563	u64 length;
 564
 565	if (!btrfs_is_zoned(sctx->fs_info))
 566		return 0;
 567
 568	if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
 569		return 0;
 570
 571	if (sctx->write_pointer < physical) {
 572		length = physical - sctx->write_pointer;
 573
 574		ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
 575						sctx->write_pointer, length);
 576		if (!ret)
 577			sctx->write_pointer = physical;
 578	}
 579	return ret;
 580}
 581
 582static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
 
 583{
 584	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 585	int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
 586
 587	return stripe->pages[page_index];
 
 588}
 589
 590static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
 591						 int sector_nr)
 
 
 
 
 
 
 
 592{
 593	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594
 595	return offset_in_page(sector_nr << fs_info->sectorsize_bits);
 596}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 597
 598static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
 599{
 600	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 601	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 602	const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
 603	const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
 604	const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
 605	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 606	u8 on_disk_csum[BTRFS_CSUM_SIZE];
 607	u8 calculated_csum[BTRFS_CSUM_SIZE];
 608	struct btrfs_header *header;
 609
 610	/*
 611	 * Here we don't have a good way to attach the pages (and subpages)
 612	 * to a dummy extent buffer, thus we have to directly grab the members
 613	 * from pages.
 614	 */
 615	header = (struct btrfs_header *)(page_address(first_page) + first_off);
 616	memcpy(on_disk_csum, header->csum, fs_info->csum_size);
 617
 618	if (logical != btrfs_stack_header_bytenr(header)) {
 619		bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
 620		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 621		btrfs_warn_rl(fs_info,
 622		"tree block %llu mirror %u has bad bytenr, has %llu want %llu",
 623			      logical, stripe->mirror_num,
 624			      btrfs_stack_header_bytenr(header), logical);
 625		return;
 
 
 
 
 
 
 
 
 
 
 
 626	}
 627	if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
 628		   BTRFS_FSID_SIZE) != 0) {
 629		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 630		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 631		btrfs_warn_rl(fs_info,
 632		"tree block %llu mirror %u has bad fsid, has %pU want %pU",
 633			      logical, stripe->mirror_num,
 634			      header->fsid, fs_info->fs_devices->fsid);
 635		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 636	}
 637	if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
 638		   BTRFS_UUID_SIZE) != 0) {
 639		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 640		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 641		btrfs_warn_rl(fs_info,
 642		"tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
 643			      logical, stripe->mirror_num,
 644			      header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
 645		return;
 
 646	}
 
 
 647
 648	/* Now check tree block csum. */
 649	shash->tfm = fs_info->csum_shash;
 650	crypto_shash_init(shash);
 651	crypto_shash_update(shash, page_address(first_page) + first_off +
 652			    BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
 653
 654	for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
 655		struct page *page = scrub_stripe_get_page(stripe, i);
 656		unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
 
 
 
 
 
 
 
 
 
 
 
 657
 658		crypto_shash_update(shash, page_address(page) + page_off,
 659				    fs_info->sectorsize);
 
 660	}
 661
 662	crypto_shash_final(shash, calculated_csum);
 663	if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
 664		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 665		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 666		btrfs_warn_rl(fs_info,
 667		"tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
 668			      logical, stripe->mirror_num,
 669			      CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
 670			      CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
 671		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 672	}
 673	if (stripe->sectors[sector_nr].generation !=
 674	    btrfs_stack_header_generation(header)) {
 675		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 676		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 677		btrfs_warn_rl(fs_info,
 678		"tree block %llu mirror %u has bad generation, has %llu want %llu",
 679			      logical, stripe->mirror_num,
 680			      btrfs_stack_header_generation(header),
 681			      stripe->sectors[sector_nr].generation);
 682		return;
 683	}
 684	bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 685	bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
 686	bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 687}
 688
 689static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
 690{
 691	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 692	struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
 693	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 694	struct page *page = scrub_stripe_get_page(stripe, sector_nr);
 695	unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
 696	u8 csum_buf[BTRFS_CSUM_SIZE];
 697	int ret;
 
 
 
 
 
 
 
 
 698
 699	ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
 
 700
 701	/* Sector not utilized, skip it. */
 702	if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
 703		return;
 
 
 
 704
 705	/* IO error, no need to check. */
 706	if (test_bit(sector_nr, &stripe->io_error_bitmap))
 707		return;
 
 708
 709	/* Metadata, verify the full tree block. */
 710	if (sector->is_metadata) {
 711		/*
 712		 * Check if the tree block crosses the stripe boundary.  If
 713		 * crossed the boundary, we cannot verify it but only give a
 714		 * warning.
 715		 *
 716		 * This can only happen on a very old filesystem where chunks
 717		 * are not ensured to be stripe aligned.
 718		 */
 719		if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
 720			btrfs_warn_rl(fs_info,
 721			"tree block at %llu crosses stripe boundary %llu",
 722				      stripe->logical +
 723				      (sector_nr << fs_info->sectorsize_bits),
 724				      stripe->logical);
 725			return;
 
 
 
 
 
 
 
 
 726		}
 727		scrub_verify_one_metadata(stripe, sector_nr);
 728		return;
 729	}
 730
 
 
 
 731	/*
 732	 * Data is easier, we just verify the data csum (if we have it).  For
 733	 * cases without csum, we have no other choice but to trust it.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 734	 */
 735	if (!sector->csum) {
 736		clear_bit(sector_nr, &stripe->error_bitmap);
 737		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738	}
 739
 740	ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
 741	if (ret < 0) {
 742		set_bit(sector_nr, &stripe->csum_error_bitmap);
 743		set_bit(sector_nr, &stripe->error_bitmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 744	} else {
 745		clear_bit(sector_nr, &stripe->csum_error_bitmap);
 746		clear_bit(sector_nr, &stripe->error_bitmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 747	}
 
 
 
 
 
 
 748}
 749
 750/* Verify specified sectors of a stripe. */
 751static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
 752{
 753	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 754	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 755	int sector_nr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 756
 757	for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
 758		scrub_verify_one_sector(stripe, sector_nr);
 759		if (stripe->sectors[sector_nr].is_metadata)
 760			sector_nr += sectors_per_tree - 1;
 
 
 
 
 
 
 
 761	}
 762}
 763
 764static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
 
 765{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766	int i;
 767
 768	for (i = 0; i < stripe->nr_sectors; i++) {
 769		if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
 770		    scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
 771			break;
 
 
 
 
 
 
 
 
 
 
 
 
 772	}
 773	ASSERT(i < stripe->nr_sectors);
 774	return i;
 
 
 
 
 
 
 
 
 
 775}
 776
 777/*
 778 * Repair read is different to the regular read:
 779 *
 780 * - Only reads the failed sectors
 781 * - May have extra blocksize limits
 
 782 */
 783static void scrub_repair_read_endio(struct btrfs_bio *bbio)
 
 
 784{
 785	struct scrub_stripe *stripe = bbio->private;
 786	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 787	struct bio_vec *bvec;
 788	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
 789	u32 bio_size = 0;
 790	int i;
 791
 792	ASSERT(sector_nr < stripe->nr_sectors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793
 794	bio_for_each_bvec_all(bvec, &bbio->bio, i)
 795		bio_size += bvec->bv_len;
 
 
 
 796
 797	if (bbio->bio.bi_status) {
 798		bitmap_set(&stripe->io_error_bitmap, sector_nr,
 799			   bio_size >> fs_info->sectorsize_bits);
 800		bitmap_set(&stripe->error_bitmap, sector_nr,
 801			   bio_size >> fs_info->sectorsize_bits);
 802	} else {
 803		bitmap_clear(&stripe->io_error_bitmap, sector_nr,
 804			     bio_size >> fs_info->sectorsize_bits);
 805	}
 806	bio_put(&bbio->bio);
 807	if (atomic_dec_and_test(&stripe->pending_io))
 808		wake_up(&stripe->io_wait);
 809}
 810
 811static int calc_next_mirror(int mirror, int num_copies)
 812{
 813	ASSERT(mirror <= num_copies);
 814	return (mirror + 1 > num_copies) ? 1 : mirror + 1;
 
 
 
 815}
 816
 817static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
 818					    int mirror, int blocksize, bool wait)
 
 
 
 
 
 
 
 
 
 
 
 
 819{
 820	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 821	struct btrfs_bio *bbio = NULL;
 822	const unsigned long old_error_bitmap = stripe->error_bitmap;
 823	int i;
 
 824
 825	ASSERT(stripe->mirror_num >= 1);
 826	ASSERT(atomic_read(&stripe->pending_io) == 0);
 827
 828	for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
 829		struct page *page;
 830		int pgoff;
 831		int ret;
 
 832
 833		page = scrub_stripe_get_page(stripe, i);
 834		pgoff = scrub_stripe_get_page_offset(stripe, i);
 835
 836		/* The current sector cannot be merged, submit the bio. */
 837		if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
 838			     bbio->bio.bi_iter.bi_size >= blocksize)) {
 839			ASSERT(bbio->bio.bi_iter.bi_size);
 840			atomic_inc(&stripe->pending_io);
 841			btrfs_submit_bio(bbio, mirror);
 842			if (wait)
 843				wait_scrub_stripe_io(stripe);
 844			bbio = NULL;
 845		}
 
 
 
 
 846
 847		if (!bbio) {
 848			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
 849				fs_info, scrub_repair_read_endio, stripe);
 850			bbio->bio.bi_iter.bi_sector = (stripe->logical +
 851				(i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
 852		}
 853
 854		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
 855		ASSERT(ret == fs_info->sectorsize);
 856	}
 857	if (bbio) {
 858		ASSERT(bbio->bio.bi_iter.bi_size);
 859		atomic_inc(&stripe->pending_io);
 860		btrfs_submit_bio(bbio, mirror);
 861		if (wait)
 862			wait_scrub_stripe_io(stripe);
 
 
 
 
 
 
 863	}
 
 
 864}
 865
 866static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
 867				       struct scrub_stripe *stripe)
 868{
 869	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
 870				      DEFAULT_RATELIMIT_BURST);
 871	struct btrfs_fs_info *fs_info = sctx->fs_info;
 872	struct btrfs_device *dev = NULL;
 873	u64 physical = 0;
 874	int nr_data_sectors = 0;
 875	int nr_meta_sectors = 0;
 876	int nr_nodatacsum_sectors = 0;
 877	int nr_repaired_sectors = 0;
 878	int sector_nr;
 879
 880	if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
 881		return;
 882
 883	/*
 884	 * Init needed infos for error reporting.
 885	 *
 886	 * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
 887	 * thus no need for dev/physical, error reporting still needs dev and physical.
 888	 */
 889	if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
 890		u64 mapped_len = fs_info->sectorsize;
 891		struct btrfs_io_context *bioc = NULL;
 892		int stripe_index = stripe->mirror_num - 1;
 893		int ret;
 894
 895		/* For scrub, our mirror_num should always start at 1. */
 896		ASSERT(stripe->mirror_num >= 1);
 897		ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
 898				      stripe->logical, &mapped_len, &bioc,
 899				      NULL, NULL);
 900		/*
 901		 * If we failed, dev will be NULL, and later detailed reports
 902		 * will just be skipped.
 903		 */
 904		if (ret < 0)
 905			goto skip;
 906		physical = bioc->stripes[stripe_index].physical;
 907		dev = bioc->stripes[stripe_index].dev;
 908		btrfs_put_bioc(bioc);
 909	}
 
 910
 911skip:
 912	for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
 913		bool repaired = false;
 
 914
 915		if (stripe->sectors[sector_nr].is_metadata) {
 916			nr_meta_sectors++;
 917		} else {
 918			nr_data_sectors++;
 919			if (!stripe->sectors[sector_nr].csum)
 920				nr_nodatacsum_sectors++;
 921		}
 922
 923		if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
 924		    !test_bit(sector_nr, &stripe->error_bitmap)) {
 925			nr_repaired_sectors++;
 926			repaired = true;
 927		}
 928
 929		/* Good sector from the beginning, nothing need to be done. */
 930		if (!test_bit(sector_nr, &stripe->init_error_bitmap))
 931			continue;
 
 932
 933		/*
 934		 * Report error for the corrupted sectors.  If repaired, just
 935		 * output the message of repaired message.
 936		 */
 937		if (repaired) {
 938			if (dev) {
 939				btrfs_err_rl_in_rcu(fs_info,
 940			"fixed up error at logical %llu on dev %s physical %llu",
 941					    stripe->logical, btrfs_dev_name(dev),
 942					    physical);
 943			} else {
 944				btrfs_err_rl_in_rcu(fs_info,
 945			"fixed up error at logical %llu on mirror %u",
 946					    stripe->logical, stripe->mirror_num);
 947			}
 948			continue;
 949		}
 950
 951		/* The remaining are all for unrepaired. */
 952		if (dev) {
 953			btrfs_err_rl_in_rcu(fs_info,
 954	"unable to fixup (regular) error at logical %llu on dev %s physical %llu",
 955					    stripe->logical, btrfs_dev_name(dev),
 956					    physical);
 957		} else {
 958			btrfs_err_rl_in_rcu(fs_info,
 959	"unable to fixup (regular) error at logical %llu on mirror %u",
 960					    stripe->logical, stripe->mirror_num);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 961		}
 962
 963		if (test_bit(sector_nr, &stripe->io_error_bitmap))
 964			if (__ratelimit(&rs) && dev)
 965				scrub_print_common_warning("i/o error", dev, false,
 966						     stripe->logical, physical);
 967		if (test_bit(sector_nr, &stripe->csum_error_bitmap))
 968			if (__ratelimit(&rs) && dev)
 969				scrub_print_common_warning("checksum error", dev, false,
 970						     stripe->logical, physical);
 971		if (test_bit(sector_nr, &stripe->meta_error_bitmap))
 972			if (__ratelimit(&rs) && dev)
 973				scrub_print_common_warning("header error", dev, false,
 974						     stripe->logical, physical);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975	}
 976
 977	spin_lock(&sctx->stat_lock);
 978	sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
 979	sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
 980	sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
 981	sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
 982	sctx->stat.no_csum += nr_nodatacsum_sectors;
 983	sctx->stat.read_errors += stripe->init_nr_io_errors;
 984	sctx->stat.csum_errors += stripe->init_nr_csum_errors;
 985	sctx->stat.verify_errors += stripe->init_nr_meta_errors;
 986	sctx->stat.uncorrectable_errors +=
 987		bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
 988	sctx->stat.corrected_errors += nr_repaired_sectors;
 989	spin_unlock(&sctx->stat_lock);
 
 
 990}
 991
 992static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
 993				unsigned long write_bitmap, bool dev_replace);
 
 994
 995/*
 996 * The main entrance for all read related scrub work, including:
 997 *
 998 * - Wait for the initial read to finish
 999 * - Verify and locate any bad sectors
1000 * - Go through the remaining mirrors and try to read as large blocksize as
1001 *   possible
1002 * - Go through all mirrors (including the failed mirror) sector-by-sector
1003 * - Submit writeback for repaired sectors
1004 *
1005 * Writeback for dev-replace does not happen here, it needs extra
1006 * synchronization for zoned devices.
1007 */
1008static void scrub_stripe_read_repair_worker(struct work_struct *work)
 
 
 
 
 
1009{
1010	struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
1011	struct scrub_ctx *sctx = stripe->sctx;
1012	struct btrfs_fs_info *fs_info = sctx->fs_info;
1013	int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1014					  stripe->bg->length);
1015	int mirror;
 
 
 
 
 
 
 
 
1016	int i;
1017
1018	ASSERT(stripe->mirror_num > 0);
 
 
 
1019
1020	wait_scrub_stripe_io(stripe);
1021	scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1022	/* Save the initial failed bitmap for later repair and report usage. */
1023	stripe->init_error_bitmap = stripe->error_bitmap;
1024	stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1025						  stripe->nr_sectors);
1026	stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1027						    stripe->nr_sectors);
1028	stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1029						    stripe->nr_sectors);
1030
1031	if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1032		goto out;
 
 
1033
1034	/*
1035	 * Try all remaining mirrors.
1036	 *
1037	 * Here we still try to read as large block as possible, as this is
1038	 * faster and we have extra safety nets to rely on.
1039	 */
1040	for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1041	     mirror != stripe->mirror_num;
1042	     mirror = calc_next_mirror(mirror, num_copies)) {
1043		const unsigned long old_error_bitmap = stripe->error_bitmap;
1044
1045		scrub_stripe_submit_repair_read(stripe, mirror,
1046						BTRFS_STRIPE_LEN, false);
1047		wait_scrub_stripe_io(stripe);
1048		scrub_verify_one_stripe(stripe, old_error_bitmap);
1049		if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1050			goto out;
1051	}
1052
1053	/*
1054	 * Last safety net, try re-checking all mirrors, including the failed
1055	 * one, sector-by-sector.
1056	 *
1057	 * As if one sector failed the drive's internal csum, the whole read
1058	 * containing the offending sector would be marked as error.
1059	 * Thus here we do sector-by-sector read.
1060	 *
1061	 * This can be slow, thus we only try it as the last resort.
1062	 */
1063
1064	for (i = 0, mirror = stripe->mirror_num;
1065	     i < num_copies;
1066	     i++, mirror = calc_next_mirror(mirror, num_copies)) {
1067		const unsigned long old_error_bitmap = stripe->error_bitmap;
1068
1069		scrub_stripe_submit_repair_read(stripe, mirror,
1070						fs_info->sectorsize, true);
1071		wait_scrub_stripe_io(stripe);
1072		scrub_verify_one_stripe(stripe, old_error_bitmap);
1073		if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1074			goto out;
1075	}
1076out:
1077	/*
1078	 * Submit the repaired sectors.  For zoned case, we cannot do repair
1079	 * in-place, but queue the bg to be relocated.
 
 
 
 
1080	 */
1081	if (btrfs_is_zoned(fs_info)) {
1082		if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1083			btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1084	} else if (!sctx->readonly) {
1085		unsigned long repaired;
1086
1087		bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1088			      &stripe->error_bitmap, stripe->nr_sectors);
1089		scrub_write_sectors(sctx, stripe, repaired, false);
1090		wait_scrub_stripe_io(stripe);
1091	}
 
 
 
 
 
 
 
 
1092
1093	scrub_stripe_report_errors(sctx, stripe);
1094	set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1095	wake_up(&stripe->repair_wait);
1096}
1097
1098static void scrub_read_endio(struct btrfs_bio *bbio)
1099{
1100	struct scrub_stripe *stripe = bbio->private;
1101	struct bio_vec *bvec;
1102	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1103	int num_sectors;
1104	u32 bio_size = 0;
1105	int i;
 
 
 
 
 
1106
1107	ASSERT(sector_nr < stripe->nr_sectors);
1108	bio_for_each_bvec_all(bvec, &bbio->bio, i)
1109		bio_size += bvec->bv_len;
1110	num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
1111
1112	if (bbio->bio.bi_status) {
1113		bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
1114		bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
1115	} else {
1116		bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
1117	}
1118	bio_put(&bbio->bio);
1119	if (atomic_dec_and_test(&stripe->pending_io)) {
1120		wake_up(&stripe->io_wait);
1121		INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1122		queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1123	}
1124}
1125
1126static void scrub_write_endio(struct btrfs_bio *bbio)
1127{
1128	struct scrub_stripe *stripe = bbio->private;
1129	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1130	struct bio_vec *bvec;
1131	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1132	u32 bio_size = 0;
 
 
 
 
 
 
 
 
1133	int i;
 
 
 
 
 
 
 
1134
1135	bio_for_each_bvec_all(bvec, &bbio->bio, i)
1136		bio_size += bvec->bv_len;
 
 
1137
1138	if (bbio->bio.bi_status) {
1139		unsigned long flags;
 
 
 
 
 
1140
1141		spin_lock_irqsave(&stripe->write_error_lock, flags);
1142		bitmap_set(&stripe->write_error_bitmap, sector_nr,
1143			   bio_size >> fs_info->sectorsize_bits);
1144		spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1145	}
1146	bio_put(&bbio->bio);
1147
1148	if (atomic_dec_and_test(&stripe->pending_io))
1149		wake_up(&stripe->io_wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1150}
1151
1152static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1153				   struct scrub_stripe *stripe,
1154				   struct btrfs_bio *bbio, bool dev_replace)
1155{
 
 
1156	struct btrfs_fs_info *fs_info = sctx->fs_info;
1157	u32 bio_len = bbio->bio.bi_iter.bi_size;
1158	u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1159		      stripe->logical;
1160
1161	fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1162	atomic_inc(&stripe->pending_io);
1163	btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1164	if (!btrfs_is_zoned(fs_info))
1165		return;
1166	/*
1167	 * For zoned writeback, queue depth must be 1, thus we must wait for
1168	 * the write to finish before the next write.
1169	 */
1170	wait_scrub_stripe_io(stripe);
1171
1172	/*
1173	 * And also need to update the write pointer if write finished
1174	 * successfully.
1175	 */
1176	if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1177		      &stripe->write_error_bitmap))
1178		sctx->write_pointer += bio_len;
 
 
 
 
 
 
 
 
 
 
 
1179}
1180
1181/*
1182 * Submit the write bio(s) for the sectors specified by @write_bitmap.
1183 *
1184 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1185 *
1186 * - Only needs logical bytenr and mirror_num
1187 *   Just like the scrub read path
1188 *
1189 * - Would only result in writes to the specified mirror
1190 *   Unlike the regular writeback path, which would write back to all stripes
1191 *
1192 * - Handle dev-replace and read-repair writeback differently
1193 */
1194static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1195				unsigned long write_bitmap, bool dev_replace)
1196{
1197	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1198	struct btrfs_bio *bbio = NULL;
1199	int sector_nr;
1200
1201	for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1202		struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1203		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1204		int ret;
1205
1206		/* We should only writeback sectors covered by an extent. */
1207		ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1208
1209		/* Cannot merge with previous sector, submit the current one. */
1210		if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1211			scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1212			bbio = NULL;
1213		}
1214		if (!bbio) {
1215			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1216					       fs_info, scrub_write_endio, stripe);
1217			bbio->bio.bi_iter.bi_sector = (stripe->logical +
1218				(sector_nr << fs_info->sectorsize_bits)) >>
1219				SECTOR_SHIFT;
1220		}
1221		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1222		ASSERT(ret == fs_info->sectorsize);
1223	}
1224	if (bbio)
1225		scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
 
 
 
 
 
 
 
 
 
1226}
1227
1228/*
1229 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1230 * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1231 */
1232static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1233				  unsigned int bio_size)
1234{
1235	const int time_slice = 1000;
 
 
1236	s64 delta;
1237	ktime_t now;
1238	u32 div;
1239	u64 bwlimit;
1240
 
 
1241	bwlimit = READ_ONCE(device->scrub_speed_max);
1242	if (bwlimit == 0)
1243		return;
1244
1245	/*
1246	 * Slice is divided into intervals when the IO is submitted, adjust by
1247	 * bwlimit and maximum of 64 intervals.
1248	 */
1249	div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1250	div = min_t(u32, 64, div);
1251
1252	/* Start new epoch, set deadline */
1253	now = ktime_get();
1254	if (sctx->throttle_deadline == 0) {
1255		sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1256		sctx->throttle_sent = 0;
1257	}
1258
1259	/* Still in the time to send? */
1260	if (ktime_before(now, sctx->throttle_deadline)) {
1261		/* If current bio is within the limit, send it */
1262		sctx->throttle_sent += bio_size;
1263		if (sctx->throttle_sent <= div_u64(bwlimit, div))
1264			return;
1265
1266		/* We're over the limit, sleep until the rest of the slice */
1267		delta = ktime_ms_delta(sctx->throttle_deadline, now);
1268	} else {
1269		/* New request after deadline, start new epoch */
1270		delta = 0;
1271	}
1272
1273	if (delta) {
1274		long timeout;
1275
1276		timeout = div_u64(delta * HZ, 1000);
1277		schedule_timeout_interruptible(timeout);
1278	}
1279
1280	/* Next call will start the deadline period */
1281	sctx->throttle_deadline = 0;
1282}
1283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284/*
1285 * Given a physical address, this will calculate it's
1286 * logical offset. if this is a parity stripe, it will return
1287 * the most left data stripe's logical offset.
1288 *
1289 * return 0 if it is a data stripe, 1 means parity stripe.
1290 */
1291static int get_raid56_logic_offset(u64 physical, int num,
1292				   struct btrfs_chunk_map *map, u64 *offset,
1293				   u64 *stripe_start)
1294{
1295	int i;
1296	int j = 0;
 
1297	u64 last_offset;
 
 
1298	const int data_stripes = nr_data_stripes(map);
1299
1300	last_offset = (physical - map->stripes[num].physical) * data_stripes;
1301	if (stripe_start)
1302		*stripe_start = last_offset;
1303
1304	*offset = last_offset;
1305	for (i = 0; i < data_stripes; i++) {
1306		u32 stripe_nr;
1307		u32 stripe_index;
1308		u32 rot;
1309
1310		*offset = last_offset + btrfs_stripe_nr_to_offset(i);
1311
1312		stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
 
1313
1314		/* Work out the disk rotation on this stripe-set */
1315		rot = stripe_nr % map->num_stripes;
1316		/* calculate which stripe this data locates */
1317		rot += i;
1318		stripe_index = rot % map->num_stripes;
1319		if (stripe_index == num)
1320			return 0;
1321		if (stripe_index < num)
1322			j++;
1323	}
1324	*offset = last_offset + btrfs_stripe_nr_to_offset(j);
1325	return 1;
1326}
1327
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328/*
1329 * Return 0 if the extent item range covers any byte of the range.
1330 * Return <0 if the extent item is before @search_start.
1331 * Return >0 if the extent item is after @start_start + @search_len.
1332 */
1333static int compare_extent_item_range(struct btrfs_path *path,
1334				     u64 search_start, u64 search_len)
1335{
1336	struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1337	u64 len;
1338	struct btrfs_key key;
1339
1340	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1341	ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1342	       key.type == BTRFS_METADATA_ITEM_KEY);
1343	if (key.type == BTRFS_METADATA_ITEM_KEY)
1344		len = fs_info->nodesize;
1345	else
1346		len = key.offset;
1347
1348	if (key.objectid + len <= search_start)
1349		return -1;
1350	if (key.objectid >= search_start + search_len)
1351		return 1;
1352	return 0;
1353}
1354
1355/*
1356 * Locate one extent item which covers any byte in range
1357 * [@search_start, @search_start + @search_length)
1358 *
1359 * If the path is not initialized, we will initialize the search by doing
1360 * a btrfs_search_slot().
1361 * If the path is already initialized, we will use the path as the initial
1362 * slot, to avoid duplicated btrfs_search_slot() calls.
1363 *
1364 * NOTE: If an extent item starts before @search_start, we will still
1365 * return the extent item. This is for data extent crossing stripe boundary.
1366 *
1367 * Return 0 if we found such extent item, and @path will point to the extent item.
1368 * Return >0 if no such extent item can be found, and @path will be released.
1369 * Return <0 if hit fatal error, and @path will be released.
1370 */
1371static int find_first_extent_item(struct btrfs_root *extent_root,
1372				  struct btrfs_path *path,
1373				  u64 search_start, u64 search_len)
1374{
1375	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1376	struct btrfs_key key;
1377	int ret;
1378
1379	/* Continue using the existing path */
1380	if (path->nodes[0])
1381		goto search_forward;
1382
1383	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1384		key.type = BTRFS_METADATA_ITEM_KEY;
1385	else
1386		key.type = BTRFS_EXTENT_ITEM_KEY;
1387	key.objectid = search_start;
1388	key.offset = (u64)-1;
1389
1390	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1391	if (ret < 0)
1392		return ret;
1393
1394	ASSERT(ret > 0);
1395	/*
1396	 * Here we intentionally pass 0 as @min_objectid, as there could be
1397	 * an extent item starting before @search_start.
1398	 */
1399	ret = btrfs_previous_extent_item(extent_root, path, 0);
1400	if (ret < 0)
1401		return ret;
1402	/*
1403	 * No matter whether we have found an extent item, the next loop will
1404	 * properly do every check on the key.
1405	 */
1406search_forward:
1407	while (true) {
1408		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1409		if (key.objectid >= search_start + search_len)
1410			break;
1411		if (key.type != BTRFS_METADATA_ITEM_KEY &&
1412		    key.type != BTRFS_EXTENT_ITEM_KEY)
1413			goto next;
1414
1415		ret = compare_extent_item_range(path, search_start, search_len);
1416		if (ret == 0)
1417			return ret;
1418		if (ret > 0)
1419			break;
1420next:
1421		ret = btrfs_next_item(extent_root, path);
1422		if (ret) {
1423			/* Either no more items or a fatal error. */
1424			btrfs_release_path(path);
1425			return ret;
 
 
 
1426		}
1427	}
1428	btrfs_release_path(path);
1429	return 1;
1430}
1431
1432static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1433			    u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1434{
1435	struct btrfs_key key;
1436	struct btrfs_extent_item *ei;
1437
1438	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1439	ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1440	       key.type == BTRFS_EXTENT_ITEM_KEY);
1441	*extent_start_ret = key.objectid;
1442	if (key.type == BTRFS_METADATA_ITEM_KEY)
1443		*size_ret = path->nodes[0]->fs_info->nodesize;
1444	else
1445		*size_ret = key.offset;
1446	ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1447	*flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1448	*generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1449}
1450
1451static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1452					u64 physical, u64 physical_end)
1453{
1454	struct btrfs_fs_info *fs_info = sctx->fs_info;
1455	int ret = 0;
1456
1457	if (!btrfs_is_zoned(fs_info))
1458		return 0;
1459
1460	mutex_lock(&sctx->wr_lock);
1461	if (sctx->write_pointer < physical_end) {
1462		ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1463						    physical,
1464						    sctx->write_pointer);
1465		if (ret)
1466			btrfs_err(fs_info,
1467				  "zoned: failed to recover write pointer");
1468	}
1469	mutex_unlock(&sctx->wr_lock);
1470	btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1471
1472	return ret;
1473}
1474
1475static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1476				 struct scrub_stripe *stripe,
1477				 u64 extent_start, u64 extent_len,
1478				 u64 extent_flags, u64 extent_gen)
1479{
1480	for (u64 cur_logical = max(stripe->logical, extent_start);
1481	     cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1482			       extent_start + extent_len);
1483	     cur_logical += fs_info->sectorsize) {
1484		const int nr_sector = (cur_logical - stripe->logical) >>
1485				      fs_info->sectorsize_bits;
1486		struct scrub_sector_verification *sector =
1487						&stripe->sectors[nr_sector];
1488
1489		set_bit(nr_sector, &stripe->extent_sector_bitmap);
1490		if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1491			sector->is_metadata = true;
1492			sector->generation = extent_gen;
1493		}
1494	}
1495}
1496
1497static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1498{
1499	stripe->extent_sector_bitmap = 0;
1500	stripe->init_error_bitmap = 0;
1501	stripe->init_nr_io_errors = 0;
1502	stripe->init_nr_csum_errors = 0;
1503	stripe->init_nr_meta_errors = 0;
1504	stripe->error_bitmap = 0;
1505	stripe->io_error_bitmap = 0;
1506	stripe->csum_error_bitmap = 0;
1507	stripe->meta_error_bitmap = 0;
1508}
1509
1510/*
1511 * Locate one stripe which has at least one extent in its range.
1512 *
1513 * Return 0 if found such stripe, and store its info into @stripe.
1514 * Return >0 if there is no such stripe in the specified range.
1515 * Return <0 for error.
1516 */
1517static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1518					struct btrfs_path *extent_path,
1519					struct btrfs_path *csum_path,
1520					struct btrfs_device *dev, u64 physical,
1521					int mirror_num, u64 logical_start,
1522					u32 logical_len,
1523					struct scrub_stripe *stripe)
1524{
1525	struct btrfs_fs_info *fs_info = bg->fs_info;
1526	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1527	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1528	const u64 logical_end = logical_start + logical_len;
1529	u64 cur_logical = logical_start;
1530	u64 stripe_end;
1531	u64 extent_start;
1532	u64 extent_len;
1533	u64 extent_flags;
1534	u64 extent_gen;
1535	int ret;
1536
1537	memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1538				   stripe->nr_sectors);
1539	scrub_stripe_reset_bitmaps(stripe);
1540
1541	/* The range must be inside the bg. */
1542	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1543
1544	ret = find_first_extent_item(extent_root, extent_path, logical_start,
1545				     logical_len);
1546	/* Either error or not found. */
1547	if (ret)
1548		goto out;
1549	get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1550			&extent_gen);
1551	if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1552		stripe->nr_meta_extents++;
1553	if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1554		stripe->nr_data_extents++;
1555	cur_logical = max(extent_start, cur_logical);
1556
1557	/*
1558	 * Round down to stripe boundary.
1559	 *
1560	 * The extra calculation against bg->start is to handle block groups
1561	 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1562	 */
1563	stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1564			  bg->start;
1565	stripe->physical = physical + stripe->logical - logical_start;
1566	stripe->dev = dev;
1567	stripe->bg = bg;
1568	stripe->mirror_num = mirror_num;
1569	stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1570
1571	/* Fill the first extent info into stripe->sectors[] array. */
1572	fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1573			     extent_flags, extent_gen);
1574	cur_logical = extent_start + extent_len;
1575
1576	/* Fill the extent info for the remaining sectors. */
1577	while (cur_logical <= stripe_end) {
1578		ret = find_first_extent_item(extent_root, extent_path, cur_logical,
1579					     stripe_end - cur_logical + 1);
1580		if (ret < 0)
1581			goto out;
1582		if (ret > 0) {
1583			ret = 0;
1584			break;
1585		}
1586		get_extent_info(extent_path, &extent_start, &extent_len,
1587				&extent_flags, &extent_gen);
1588		if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1589			stripe->nr_meta_extents++;
1590		if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1591			stripe->nr_data_extents++;
1592		fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1593				     extent_flags, extent_gen);
1594		cur_logical = extent_start + extent_len;
1595	}
1596
1597	/* Now fill the data csum. */
1598	if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1599		int sector_nr;
1600		unsigned long csum_bitmap = 0;
1601
1602		/* Csum space should have already been allocated. */
1603		ASSERT(stripe->csums);
1604
1605		/*
1606		 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1607		 * should contain at most 16 sectors.
1608		 */
1609		ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1610
1611		ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1612						stripe->logical, stripe_end,
1613						stripe->csums, &csum_bitmap);
1614		if (ret < 0)
1615			goto out;
1616		if (ret > 0)
1617			ret = 0;
1618
1619		for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1620			stripe->sectors[sector_nr].csum = stripe->csums +
1621				sector_nr * fs_info->csum_size;
 
 
 
 
 
 
 
 
 
1622		}
1623	}
1624	set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1625out:
1626	return ret;
1627}
1628
1629static void scrub_reset_stripe(struct scrub_stripe *stripe)
1630{
1631	scrub_stripe_reset_bitmaps(stripe);
1632
1633	stripe->nr_meta_extents = 0;
1634	stripe->nr_data_extents = 0;
1635	stripe->state = 0;
1636
1637	for (int i = 0; i < stripe->nr_sectors; i++) {
1638		stripe->sectors[i].is_metadata = false;
1639		stripe->sectors[i].csum = NULL;
1640		stripe->sectors[i].generation = 0;
1641	}
1642}
1643
1644static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
1645					    struct scrub_stripe *stripe)
1646{
1647	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1648	struct btrfs_bio *bbio = NULL;
1649	unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
1650				      stripe->bg->length - stripe->logical) >>
1651				  fs_info->sectorsize_bits;
1652	u64 stripe_len = BTRFS_STRIPE_LEN;
1653	int mirror = stripe->mirror_num;
1654	int i;
1655
1656	atomic_inc(&stripe->pending_io);
1657
1658	for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
1659		struct page *page = scrub_stripe_get_page(stripe, i);
1660		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
1661
1662		/* We're beyond the chunk boundary, no need to read anymore. */
1663		if (i >= nr_sectors)
1664			break;
1665
1666		/* The current sector cannot be merged, submit the bio. */
1667		if (bbio &&
1668		    ((i > 0 &&
1669		      !test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
1670		     bbio->bio.bi_iter.bi_size >= stripe_len)) {
1671			ASSERT(bbio->bio.bi_iter.bi_size);
1672			atomic_inc(&stripe->pending_io);
1673			btrfs_submit_bio(bbio, mirror);
1674			bbio = NULL;
1675		}
1676
1677		if (!bbio) {
1678			struct btrfs_io_stripe io_stripe = {};
1679			struct btrfs_io_context *bioc = NULL;
1680			const u64 logical = stripe->logical +
1681					    (i << fs_info->sectorsize_bits);
1682			int err;
1683
1684			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
1685					       fs_info, scrub_read_endio, stripe);
1686			bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
1687
1688			io_stripe.is_scrub = true;
1689			err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
1690					      &stripe_len, &bioc, &io_stripe,
1691					      &mirror);
1692			btrfs_put_bioc(bioc);
1693			if (err) {
1694				btrfs_bio_end_io(bbio,
1695						 errno_to_blk_status(err));
1696				return;
1697			}
1698		}
 
 
 
 
1699
1700		__bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1701	}
1702
1703	if (bbio) {
1704		ASSERT(bbio->bio.bi_iter.bi_size);
1705		atomic_inc(&stripe->pending_io);
1706		btrfs_submit_bio(bbio, mirror);
1707	}
1708
1709	if (atomic_dec_and_test(&stripe->pending_io)) {
1710		wake_up(&stripe->io_wait);
1711		INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1712		queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1713	}
1714}
1715
1716static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1717				      struct scrub_stripe *stripe)
1718{
1719	struct btrfs_fs_info *fs_info = sctx->fs_info;
1720	struct btrfs_bio *bbio;
1721	unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
1722				      stripe->bg->length - stripe->logical) >>
1723				  fs_info->sectorsize_bits;
1724	int mirror = stripe->mirror_num;
1725
1726	ASSERT(stripe->bg);
1727	ASSERT(stripe->mirror_num > 0);
1728	ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1729
1730	if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
1731		scrub_submit_extent_sector_read(sctx, stripe);
1732		return;
1733	}
1734
1735	bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1736			       scrub_read_endio, stripe);
1737
1738	bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1739	/* Read the whole range inside the chunk boundary. */
1740	for (unsigned int cur = 0; cur < nr_sectors; cur++) {
1741		struct page *page = scrub_stripe_get_page(stripe, cur);
1742		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
1743		int ret;
1744
1745		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1746		/* We should have allocated enough bio vectors. */
1747		ASSERT(ret == fs_info->sectorsize);
1748	}
1749	atomic_inc(&stripe->pending_io);
1750
1751	/*
1752	 * For dev-replace, either user asks to avoid the source dev, or
1753	 * the device is missing, we try the next mirror instead.
1754	 */
1755	if (sctx->is_dev_replace &&
1756	    (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1757	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1758	     !stripe->dev->bdev)) {
1759		int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1760						  stripe->bg->length);
1761
1762		mirror = calc_next_mirror(mirror, num_copies);
1763	}
1764	btrfs_submit_bio(bbio, mirror);
1765}
1766
1767static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1768{
1769	int i;
1770
1771	for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1772		if (stripe->sectors[i].is_metadata) {
1773			struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1774
1775			btrfs_err(fs_info,
1776			"stripe %llu has unrepaired metadata sector at %llu",
1777				  stripe->logical,
1778				  stripe->logical + (i << fs_info->sectorsize_bits));
1779			return true;
1780		}
1781	}
1782	return false;
1783}
1784
1785static void submit_initial_group_read(struct scrub_ctx *sctx,
1786				      unsigned int first_slot,
1787				      unsigned int nr_stripes)
1788{
1789	struct blk_plug plug;
1790
1791	ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1792	ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
 
 
 
1793
1794	scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1795			      btrfs_stripe_nr_to_offset(nr_stripes));
1796	blk_start_plug(&plug);
1797	for (int i = 0; i < nr_stripes; i++) {
1798		struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1799
1800		/* Those stripes should be initialized. */
1801		ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1802		scrub_submit_initial_read(sctx, stripe);
1803	}
1804	blk_finish_plug(&plug);
 
1805}
1806
1807static int flush_scrub_stripes(struct scrub_ctx *sctx)
 
 
 
 
1808{
1809	struct btrfs_fs_info *fs_info = sctx->fs_info;
1810	struct scrub_stripe *stripe;
1811	const int nr_stripes = sctx->cur_stripe;
1812	int ret = 0;
1813
1814	if (!nr_stripes)
1815		return 0;
1816
1817	ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1818
1819	/* Submit the stripes which are populated but not submitted. */
1820	if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1821		const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1822
1823		submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
 
 
 
 
 
1824	}
 
 
1825
1826	for (int i = 0; i < nr_stripes; i++) {
1827		stripe = &sctx->stripes[i];
1828
1829		wait_event(stripe->repair_wait,
1830			   test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
 
 
 
 
 
1831	}
1832
1833	/* Submit for dev-replace. */
1834	if (sctx->is_dev_replace) {
1835		/*
1836		 * For dev-replace, if we know there is something wrong with
1837		 * metadata, we should immediately abort.
1838		 */
1839		for (int i = 0; i < nr_stripes; i++) {
1840			if (stripe_has_metadata_error(&sctx->stripes[i])) {
1841				ret = -EIO;
1842				goto out;
1843			}
1844		}
1845		for (int i = 0; i < nr_stripes; i++) {
1846			unsigned long good;
1847
1848			stripe = &sctx->stripes[i];
1849
1850			ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1851
1852			bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1853				      &stripe->error_bitmap, stripe->nr_sectors);
1854			scrub_write_sectors(sctx, stripe, good, true);
1855		}
 
1856	}
1857
1858	/* Wait for the above writebacks to finish. */
1859	for (int i = 0; i < nr_stripes; i++) {
1860		stripe = &sctx->stripes[i];
1861
1862		wait_scrub_stripe_io(stripe);
1863		scrub_reset_stripe(stripe);
1864	}
1865out:
1866	sctx->cur_stripe = 0;
1867	return ret;
1868}
1869
1870static void raid56_scrub_wait_endio(struct bio *bio)
1871{
1872	complete(bio->bi_private);
1873}
1874
1875static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1876			      struct btrfs_device *dev, int mirror_num,
1877			      u64 logical, u32 length, u64 physical,
1878			      u64 *found_logical_ret)
1879{
1880	struct scrub_stripe *stripe;
1881	int ret;
1882
1883	/*
1884	 * There should always be one slot left, as caller filling the last
1885	 * slot should flush them all.
1886	 */
1887	ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
1888
1889	/* @found_logical_ret must be specified. */
1890	ASSERT(found_logical_ret);
1891
1892	stripe = &sctx->stripes[sctx->cur_stripe];
1893	scrub_reset_stripe(stripe);
1894	ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1895					   &sctx->csum_path, dev, physical,
1896					   mirror_num, logical, length, stripe);
1897	/* Either >0 as no more extents or <0 for error. */
1898	if (ret)
1899		return ret;
1900	*found_logical_ret = stripe->logical;
1901	sctx->cur_stripe++;
1902
1903	/* We filled one group, submit it. */
1904	if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1905		const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
1906
1907		submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
1908	}
 
 
 
1909
1910	/* Last slot used, flush them all. */
1911	if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1912		return flush_scrub_stripes(sctx);
1913	return 0;
1914}
1915
1916static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1917				      struct btrfs_device *scrub_dev,
1918				      struct btrfs_block_group *bg,
1919				      struct btrfs_chunk_map *map,
1920				      u64 full_stripe_start)
1921{
1922	DECLARE_COMPLETION_ONSTACK(io_done);
1923	struct btrfs_fs_info *fs_info = sctx->fs_info;
1924	struct btrfs_raid_bio *rbio;
1925	struct btrfs_io_context *bioc = NULL;
1926	struct btrfs_path extent_path = { 0 };
1927	struct btrfs_path csum_path = { 0 };
1928	struct bio *bio;
1929	struct scrub_stripe *stripe;
1930	bool all_empty = true;
1931	const int data_stripes = nr_data_stripes(map);
1932	unsigned long extent_bitmap = 0;
1933	u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1934	int ret;
1935
1936	ASSERT(sctx->raid56_data_stripes);
1937
1938	/*
1939	 * For data stripe search, we cannot re-use the same extent/csum paths,
1940	 * as the data stripe bytenr may be smaller than previous extent.  Thus
1941	 * we have to use our own extent/csum paths.
1942	 */
1943	extent_path.search_commit_root = 1;
1944	extent_path.skip_locking = 1;
1945	csum_path.search_commit_root = 1;
1946	csum_path.skip_locking = 1;
1947
1948	for (int i = 0; i < data_stripes; i++) {
1949		int stripe_index;
1950		int rot;
1951		u64 physical;
1952
1953		stripe = &sctx->raid56_data_stripes[i];
1954		rot = div_u64(full_stripe_start - bg->start,
1955			      data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1956		stripe_index = (i + rot) % map->num_stripes;
1957		physical = map->stripes[stripe_index].physical +
1958			   btrfs_stripe_nr_to_offset(rot);
1959
1960		scrub_reset_stripe(stripe);
1961		set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1962		ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1963				map->stripes[stripe_index].dev, physical, 1,
1964				full_stripe_start + btrfs_stripe_nr_to_offset(i),
1965				BTRFS_STRIPE_LEN, stripe);
1966		if (ret < 0)
1967			goto out;
1968		/*
1969		 * No extent in this data stripe, need to manually mark them
1970		 * initialized to make later read submission happy.
1971		 */
1972		if (ret > 0) {
1973			stripe->logical = full_stripe_start +
1974					  btrfs_stripe_nr_to_offset(i);
1975			stripe->dev = map->stripes[stripe_index].dev;
1976			stripe->mirror_num = 1;
1977			set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1978		}
1979	}
1980
1981	/* Check if all data stripes are empty. */
1982	for (int i = 0; i < data_stripes; i++) {
1983		stripe = &sctx->raid56_data_stripes[i];
1984		if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
1985			all_empty = false;
1986			break;
1987		}
1988	}
1989	if (all_empty) {
1990		ret = 0;
1991		goto out;
1992	}
1993
1994	for (int i = 0; i < data_stripes; i++) {
1995		stripe = &sctx->raid56_data_stripes[i];
1996		scrub_submit_initial_read(sctx, stripe);
1997	}
1998	for (int i = 0; i < data_stripes; i++) {
1999		stripe = &sctx->raid56_data_stripes[i];
2000
2001		wait_event(stripe->repair_wait,
2002			   test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
2003	}
2004	/* For now, no zoned support for RAID56. */
2005	ASSERT(!btrfs_is_zoned(sctx->fs_info));
2006
2007	/*
2008	 * Now all data stripes are properly verified. Check if we have any
2009	 * unrepaired, if so abort immediately or we could further corrupt the
2010	 * P/Q stripes.
2011	 *
2012	 * During the loop, also populate extent_bitmap.
2013	 */
2014	for (int i = 0; i < data_stripes; i++) {
2015		unsigned long error;
2016
2017		stripe = &sctx->raid56_data_stripes[i];
2018
2019		/*
2020		 * We should only check the errors where there is an extent.
2021		 * As we may hit an empty data stripe while it's missing.
2022		 */
2023		bitmap_and(&error, &stripe->error_bitmap,
2024			   &stripe->extent_sector_bitmap, stripe->nr_sectors);
2025		if (!bitmap_empty(&error, stripe->nr_sectors)) {
2026			btrfs_err(fs_info,
2027"unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
2028				  full_stripe_start, i, stripe->nr_sectors,
2029				  &error);
2030			ret = -EIO;
2031			goto out;
2032		}
2033		bitmap_or(&extent_bitmap, &extent_bitmap,
2034			  &stripe->extent_sector_bitmap, stripe->nr_sectors);
2035	}
2036
2037	/* Now we can check and regenerate the P/Q stripe. */
2038	bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
2039	bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
2040	bio->bi_private = &io_done;
2041	bio->bi_end_io = raid56_scrub_wait_endio;
2042
2043	btrfs_bio_counter_inc_blocked(fs_info);
2044	ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
2045			      &length, &bioc, NULL, NULL);
2046	if (ret < 0) {
2047		btrfs_put_bioc(bioc);
2048		btrfs_bio_counter_dec(fs_info);
2049		goto out;
2050	}
2051	rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
2052				BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
2053	btrfs_put_bioc(bioc);
2054	if (!rbio) {
2055		ret = -ENOMEM;
2056		btrfs_bio_counter_dec(fs_info);
2057		goto out;
2058	}
2059	/* Use the recovered stripes as cache to avoid read them from disk again. */
2060	for (int i = 0; i < data_stripes; i++) {
2061		stripe = &sctx->raid56_data_stripes[i];
2062
2063		raid56_parity_cache_data_pages(rbio, stripe->pages,
2064				full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
2065	}
2066	raid56_parity_submit_scrub_rbio(rbio);
2067	wait_for_completion_io(&io_done);
2068	ret = blk_status_to_errno(bio->bi_status);
2069	bio_put(bio);
2070	btrfs_bio_counter_dec(fs_info);
2071
2072	btrfs_release_path(&extent_path);
2073	btrfs_release_path(&csum_path);
2074out:
2075	return ret;
2076}
2077
2078/*
2079 * Scrub one range which can only has simple mirror based profile.
2080 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
2081 *  RAID0/RAID10).
2082 *
2083 * Since we may need to handle a subset of block group, we need @logical_start
2084 * and @logical_length parameter.
2085 */
2086static int scrub_simple_mirror(struct scrub_ctx *sctx,
 
 
2087			       struct btrfs_block_group *bg,
2088			       struct btrfs_chunk_map *map,
2089			       u64 logical_start, u64 logical_length,
2090			       struct btrfs_device *device,
2091			       u64 physical, int mirror_num)
2092{
2093	struct btrfs_fs_info *fs_info = sctx->fs_info;
2094	const u64 logical_end = logical_start + logical_length;
 
 
 
2095	u64 cur_logical = logical_start;
2096	int ret;
2097
2098	/* The range must be inside the bg */
2099	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
2100
 
 
2101	/* Go through each extent items inside the logical range */
2102	while (cur_logical < logical_end) {
2103		u64 found_logical = U64_MAX;
2104		u64 cur_physical = physical + cur_logical - logical_start;
 
 
 
2105
2106		/* Canceled? */
2107		if (atomic_read(&fs_info->scrub_cancel_req) ||
2108		    atomic_read(&sctx->cancel_req)) {
2109			ret = -ECANCELED;
2110			break;
2111		}
2112		/* Paused? */
2113		if (atomic_read(&fs_info->scrub_pause_req)) {
2114			/* Push queued extents */
 
 
 
 
 
 
 
 
2115			scrub_blocked_if_needed(fs_info);
2116		}
2117		/* Block group removed? */
2118		spin_lock(&bg->lock);
2119		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
2120			spin_unlock(&bg->lock);
2121			ret = 0;
2122			break;
2123		}
2124		spin_unlock(&bg->lock);
2125
2126		ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2127					 cur_logical, logical_end - cur_logical,
2128					 cur_physical, &found_logical);
2129		if (ret > 0) {
2130			/* No more extent, just update the accounting */
2131			sctx->stat.last_physical = physical + logical_length;
2132			ret = 0;
2133			break;
2134		}
2135		if (ret < 0)
2136			break;
 
 
 
 
2137
2138		/* queue_scrub_stripe() returned 0, @found_logical must be updated. */
2139		ASSERT(found_logical != U64_MAX);
2140		cur_logical = found_logical + BTRFS_STRIPE_LEN;
2141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2142		/* Don't hold CPU for too long time */
2143		cond_resched();
2144	}
 
2145	return ret;
2146}
2147
2148/* Calculate the full stripe length for simple stripe based profiles */
2149static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
2150{
2151	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2152			    BTRFS_BLOCK_GROUP_RAID10));
2153
2154	return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
2155}
2156
2157/* Get the logical bytenr for the stripe */
2158static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
2159				     struct btrfs_block_group *bg,
2160				     int stripe_index)
2161{
2162	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2163			    BTRFS_BLOCK_GROUP_RAID10));
2164	ASSERT(stripe_index < map->num_stripes);
2165
2166	/*
2167	 * (stripe_index / sub_stripes) gives how many data stripes we need to
2168	 * skip.
2169	 */
2170	return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2171	       bg->start;
2172}
2173
2174/* Get the mirror number for the stripe */
2175static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
2176{
2177	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2178			    BTRFS_BLOCK_GROUP_RAID10));
2179	ASSERT(stripe_index < map->num_stripes);
2180
2181	/* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2182	return stripe_index % map->sub_stripes + 1;
2183}
2184
2185static int scrub_simple_stripe(struct scrub_ctx *sctx,
 
 
2186			       struct btrfs_block_group *bg,
2187			       struct btrfs_chunk_map *map,
2188			       struct btrfs_device *device,
2189			       int stripe_index)
2190{
2191	const u64 logical_increment = simple_stripe_full_stripe_len(map);
2192	const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2193	const u64 orig_physical = map->stripes[stripe_index].physical;
2194	const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2195	u64 cur_logical = orig_logical;
2196	u64 cur_physical = orig_physical;
2197	int ret = 0;
2198
2199	while (cur_logical < bg->start + bg->length) {
2200		/*
2201		 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2202		 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2203		 * this stripe.
2204		 */
2205		ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2206					  BTRFS_STRIPE_LEN, device, cur_physical,
2207					  mirror_num);
2208		if (ret)
2209			return ret;
2210		/* Skip to next stripe which belongs to the target device */
2211		cur_logical += logical_increment;
2212		/* For physical offset, we just go to next stripe */
2213		cur_physical += BTRFS_STRIPE_LEN;
2214	}
2215	return ret;
2216}
2217
2218static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2219					   struct btrfs_block_group *bg,
2220					   struct btrfs_chunk_map *map,
2221					   struct btrfs_device *scrub_dev,
2222					   int stripe_index)
2223{
 
2224	struct btrfs_fs_info *fs_info = sctx->fs_info;
 
 
 
 
2225	const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2226	const u64 chunk_logical = bg->start;
2227	int ret;
2228	int ret2;
2229	u64 physical = map->stripes[stripe_index].physical;
2230	const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
2231	const u64 physical_end = physical + dev_stripe_len;
2232	u64 logical;
2233	u64 logic_end;
2234	/* The logical increment after finishing one stripe */
2235	u64 increment;
2236	/* Offset inside the chunk */
2237	u64 offset;
2238	u64 stripe_logical;
 
2239	int stop_loop = 0;
2240
2241	/* Extent_path should be released by now. */
2242	ASSERT(sctx->extent_path.nodes[0] == NULL);
 
2243
 
 
 
 
 
 
 
 
 
 
 
2244	scrub_blocked_if_needed(fs_info);
2245
 
 
 
 
 
 
 
 
 
2246	if (sctx->is_dev_replace &&
2247	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2248		mutex_lock(&sctx->wr_lock);
2249		sctx->write_pointer = physical;
2250		mutex_unlock(&sctx->wr_lock);
 
2251	}
2252
2253	/* Prepare the extra data stripes used by RAID56. */
2254	if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2255		ASSERT(sctx->raid56_data_stripes == NULL);
2256
2257		sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2258						    sizeof(struct scrub_stripe),
2259						    GFP_KERNEL);
2260		if (!sctx->raid56_data_stripes) {
2261			ret = -ENOMEM;
2262			goto out;
2263		}
2264		for (int i = 0; i < nr_data_stripes(map); i++) {
2265			ret = init_scrub_stripe(fs_info,
2266						&sctx->raid56_data_stripes[i]);
2267			if (ret < 0)
2268				goto out;
2269			sctx->raid56_data_stripes[i].bg = bg;
2270			sctx->raid56_data_stripes[i].sctx = sctx;
2271		}
2272	}
2273	/*
2274	 * There used to be a big double loop to handle all profiles using the
2275	 * same routine, which grows larger and more gross over time.
2276	 *
2277	 * So here we handle each profile differently, so simpler profiles
2278	 * have simpler scrubbing function.
2279	 */
2280	if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2281			 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2282		/*
2283		 * Above check rules out all complex profile, the remaining
2284		 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2285		 * mirrored duplication without stripe.
2286		 *
2287		 * Only @physical and @mirror_num needs to calculated using
2288		 * @stripe_index.
2289		 */
2290		ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2291				scrub_dev, map->stripes[stripe_index].physical,
 
2292				stripe_index + 1);
2293		offset = 0;
2294		goto out;
2295	}
2296	if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2297		ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2298		offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
 
2299		goto out;
2300	}
2301
2302	/* Only RAID56 goes through the old code */
2303	ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2304	ret = 0;
2305
2306	/* Calculate the logical end of the stripe */
2307	get_raid56_logic_offset(physical_end, stripe_index,
2308				map, &logic_end, NULL);
2309	logic_end += chunk_logical;
2310
2311	/* Initialize @offset in case we need to go to out: label */
2312	get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2313	increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2314
2315	/*
2316	 * Due to the rotation, for RAID56 it's better to iterate each stripe
2317	 * using their physical offset.
2318	 */
2319	while (physical < physical_end) {
2320		ret = get_raid56_logic_offset(physical, stripe_index, map,
2321					      &logical, &stripe_logical);
2322		logical += chunk_logical;
2323		if (ret) {
2324			/* it is parity strip */
2325			stripe_logical += chunk_logical;
2326			ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2327							 map, stripe_logical);
 
 
2328			if (ret)
2329				goto out;
2330			goto next;
2331		}
2332
2333		/*
2334		 * Now we're at a data stripe, scrub each extents in the range.
2335		 *
2336		 * At this stage, if we ignore the repair part, inside each data
2337		 * stripe it is no different than SINGLE profile.
2338		 * We can reuse scrub_simple_mirror() here, as the repair part
2339		 * is still based on @mirror_num.
2340		 */
2341		ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
 
2342					  scrub_dev, physical, 1);
2343		if (ret < 0)
2344			goto out;
2345next:
2346		logical += increment;
2347		physical += BTRFS_STRIPE_LEN;
2348		spin_lock(&sctx->stat_lock);
2349		if (stop_loop)
2350			sctx->stat.last_physical =
2351				map->stripes[stripe_index].physical + dev_stripe_len;
2352		else
2353			sctx->stat.last_physical = physical;
2354		spin_unlock(&sctx->stat_lock);
2355		if (stop_loop)
2356			break;
2357	}
2358out:
2359	ret2 = flush_scrub_stripes(sctx);
2360	if (!ret)
2361		ret = ret2;
2362	btrfs_release_path(&sctx->extent_path);
2363	btrfs_release_path(&sctx->csum_path);
2364
2365	if (sctx->raid56_data_stripes) {
2366		for (int i = 0; i < nr_data_stripes(map); i++)
2367			release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2368		kfree(sctx->raid56_data_stripes);
2369		sctx->raid56_data_stripes = NULL;
2370	}
2371
2372	if (sctx->is_dev_replace && ret >= 0) {
2373		int ret2;
2374
2375		ret2 = sync_write_pointer_for_zoned(sctx,
2376				chunk_logical + offset,
2377				map->stripes[stripe_index].physical,
2378				physical_end);
2379		if (ret2)
2380			ret = ret2;
2381	}
2382
2383	return ret < 0 ? ret : 0;
2384}
2385
2386static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2387					  struct btrfs_block_group *bg,
2388					  struct btrfs_device *scrub_dev,
2389					  u64 dev_offset,
2390					  u64 dev_extent_len)
2391{
2392	struct btrfs_fs_info *fs_info = sctx->fs_info;
2393	struct btrfs_chunk_map *map;
 
 
2394	int i;
2395	int ret = 0;
2396
2397	map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
2398	if (!map) {
 
 
 
2399		/*
2400		 * Might have been an unused block group deleted by the cleaner
2401		 * kthread or relocation.
2402		 */
2403		spin_lock(&bg->lock);
2404		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2405			ret = -EINVAL;
2406		spin_unlock(&bg->lock);
2407
2408		return ret;
2409	}
2410	if (map->start != bg->start)
2411		goto out;
2412	if (map->chunk_len < dev_extent_len)
2413		goto out;
2414
 
2415	for (i = 0; i < map->num_stripes; ++i) {
2416		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2417		    map->stripes[i].physical == dev_offset) {
2418			ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
2419			if (ret)
2420				goto out;
2421		}
2422	}
2423out:
2424	btrfs_free_chunk_map(map);
2425
2426	return ret;
2427}
2428
2429static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2430					  struct btrfs_block_group *cache)
2431{
2432	struct btrfs_fs_info *fs_info = cache->fs_info;
2433	struct btrfs_trans_handle *trans;
2434
2435	if (!btrfs_is_zoned(fs_info))
2436		return 0;
2437
2438	btrfs_wait_block_group_reservations(cache);
2439	btrfs_wait_nocow_writers(cache);
2440	btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2441
2442	trans = btrfs_join_transaction(root);
2443	if (IS_ERR(trans))
2444		return PTR_ERR(trans);
2445	return btrfs_commit_transaction(trans);
2446}
2447
2448static noinline_for_stack
2449int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2450			   struct btrfs_device *scrub_dev, u64 start, u64 end)
2451{
2452	struct btrfs_dev_extent *dev_extent = NULL;
2453	struct btrfs_path *path;
2454	struct btrfs_fs_info *fs_info = sctx->fs_info;
2455	struct btrfs_root *root = fs_info->dev_root;
2456	u64 chunk_offset;
2457	int ret = 0;
2458	int ro_set;
2459	int slot;
2460	struct extent_buffer *l;
2461	struct btrfs_key key;
2462	struct btrfs_key found_key;
2463	struct btrfs_block_group *cache;
2464	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2465
2466	path = btrfs_alloc_path();
2467	if (!path)
2468		return -ENOMEM;
2469
2470	path->reada = READA_FORWARD;
2471	path->search_commit_root = 1;
2472	path->skip_locking = 1;
2473
2474	key.objectid = scrub_dev->devid;
2475	key.offset = 0ull;
2476	key.type = BTRFS_DEV_EXTENT_KEY;
2477
2478	while (1) {
2479		u64 dev_extent_len;
2480
2481		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2482		if (ret < 0)
2483			break;
2484		if (ret > 0) {
2485			if (path->slots[0] >=
2486			    btrfs_header_nritems(path->nodes[0])) {
2487				ret = btrfs_next_leaf(root, path);
2488				if (ret < 0)
2489					break;
2490				if (ret > 0) {
2491					ret = 0;
2492					break;
2493				}
2494			} else {
2495				ret = 0;
2496			}
2497		}
2498
2499		l = path->nodes[0];
2500		slot = path->slots[0];
2501
2502		btrfs_item_key_to_cpu(l, &found_key, slot);
2503
2504		if (found_key.objectid != scrub_dev->devid)
2505			break;
2506
2507		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2508			break;
2509
2510		if (found_key.offset >= end)
2511			break;
2512
2513		if (found_key.offset < key.offset)
2514			break;
2515
2516		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2517		dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2518
2519		if (found_key.offset + dev_extent_len <= start)
2520			goto skip;
2521
2522		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2523
2524		/*
2525		 * get a reference on the corresponding block group to prevent
2526		 * the chunk from going away while we scrub it
2527		 */
2528		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2529
2530		/* some chunks are removed but not committed to disk yet,
2531		 * continue scrubbing */
2532		if (!cache)
2533			goto skip;
2534
2535		ASSERT(cache->start <= chunk_offset);
2536		/*
2537		 * We are using the commit root to search for device extents, so
2538		 * that means we could have found a device extent item from a
2539		 * block group that was deleted in the current transaction. The
2540		 * logical start offset of the deleted block group, stored at
2541		 * @chunk_offset, might be part of the logical address range of
2542		 * a new block group (which uses different physical extents).
2543		 * In this case btrfs_lookup_block_group() has returned the new
2544		 * block group, and its start address is less than @chunk_offset.
2545		 *
2546		 * We skip such new block groups, because it's pointless to
2547		 * process them, as we won't find their extents because we search
2548		 * for them using the commit root of the extent tree. For a device
2549		 * replace it's also fine to skip it, we won't miss copying them
2550		 * to the target device because we have the write duplication
2551		 * setup through the regular write path (by btrfs_map_block()),
2552		 * and we have committed a transaction when we started the device
2553		 * replace, right after setting up the device replace state.
2554		 */
2555		if (cache->start < chunk_offset) {
2556			btrfs_put_block_group(cache);
2557			goto skip;
2558		}
2559
2560		if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2561			if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2562				btrfs_put_block_group(cache);
2563				goto skip;
2564			}
2565		}
2566
2567		/*
2568		 * Make sure that while we are scrubbing the corresponding block
2569		 * group doesn't get its logical address and its device extents
2570		 * reused for another block group, which can possibly be of a
2571		 * different type and different profile. We do this to prevent
2572		 * false error detections and crashes due to bogus attempts to
2573		 * repair extents.
2574		 */
2575		spin_lock(&cache->lock);
2576		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2577			spin_unlock(&cache->lock);
2578			btrfs_put_block_group(cache);
2579			goto skip;
2580		}
2581		btrfs_freeze_block_group(cache);
2582		spin_unlock(&cache->lock);
2583
2584		/*
2585		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2586		 * to avoid deadlock caused by:
2587		 * btrfs_inc_block_group_ro()
2588		 * -> btrfs_wait_for_commit()
2589		 * -> btrfs_commit_transaction()
2590		 * -> btrfs_scrub_pause()
2591		 */
2592		scrub_pause_on(fs_info);
2593
2594		/*
2595		 * Don't do chunk preallocation for scrub.
2596		 *
2597		 * This is especially important for SYSTEM bgs, or we can hit
2598		 * -EFBIG from btrfs_finish_chunk_alloc() like:
2599		 * 1. The only SYSTEM bg is marked RO.
2600		 *    Since SYSTEM bg is small, that's pretty common.
2601		 * 2. New SYSTEM bg will be allocated
2602		 *    Due to regular version will allocate new chunk.
2603		 * 3. New SYSTEM bg is empty and will get cleaned up
2604		 *    Before cleanup really happens, it's marked RO again.
2605		 * 4. Empty SYSTEM bg get scrubbed
2606		 *    We go back to 2.
2607		 *
2608		 * This can easily boost the amount of SYSTEM chunks if cleaner
2609		 * thread can't be triggered fast enough, and use up all space
2610		 * of btrfs_super_block::sys_chunk_array
2611		 *
2612		 * While for dev replace, we need to try our best to mark block
2613		 * group RO, to prevent race between:
2614		 * - Write duplication
2615		 *   Contains latest data
2616		 * - Scrub copy
2617		 *   Contains data from commit tree
2618		 *
2619		 * If target block group is not marked RO, nocow writes can
2620		 * be overwritten by scrub copy, causing data corruption.
2621		 * So for dev-replace, it's not allowed to continue if a block
2622		 * group is not RO.
2623		 */
2624		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2625		if (!ret && sctx->is_dev_replace) {
2626			ret = finish_extent_writes_for_zoned(root, cache);
2627			if (ret) {
2628				btrfs_dec_block_group_ro(cache);
2629				scrub_pause_off(fs_info);
2630				btrfs_put_block_group(cache);
2631				break;
2632			}
2633		}
2634
2635		if (ret == 0) {
2636			ro_set = 1;
2637		} else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2638			   !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2639			/*
2640			 * btrfs_inc_block_group_ro return -ENOSPC when it
2641			 * failed in creating new chunk for metadata.
2642			 * It is not a problem for scrub, because
2643			 * metadata are always cowed, and our scrub paused
2644			 * commit_transactions.
2645			 *
2646			 * For RAID56 chunks, we have to mark them read-only
2647			 * for scrub, as later we would use our own cache
2648			 * out of RAID56 realm.
2649			 * Thus we want the RAID56 bg to be marked RO to
2650			 * prevent RMW from screwing up out cache.
2651			 */
2652			ro_set = 0;
2653		} else if (ret == -ETXTBSY) {
2654			btrfs_warn(fs_info,
2655		   "skipping scrub of block group %llu due to active swapfile",
2656				   cache->start);
2657			scrub_pause_off(fs_info);
2658			ret = 0;
2659			goto skip_unfreeze;
2660		} else {
2661			btrfs_warn(fs_info,
2662				   "failed setting block group ro: %d", ret);
2663			btrfs_unfreeze_block_group(cache);
2664			btrfs_put_block_group(cache);
2665			scrub_pause_off(fs_info);
2666			break;
2667		}
2668
2669		/*
2670		 * Now the target block is marked RO, wait for nocow writes to
2671		 * finish before dev-replace.
2672		 * COW is fine, as COW never overwrites extents in commit tree.
2673		 */
2674		if (sctx->is_dev_replace) {
2675			btrfs_wait_nocow_writers(cache);
2676			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2677					cache->length);
2678		}
2679
2680		scrub_pause_off(fs_info);
2681		down_write(&dev_replace->rwsem);
2682		dev_replace->cursor_right = found_key.offset + dev_extent_len;
2683		dev_replace->cursor_left = found_key.offset;
2684		dev_replace->item_needs_writeback = 1;
2685		up_write(&dev_replace->rwsem);
2686
2687		ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2688				  dev_extent_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2689		if (sctx->is_dev_replace &&
2690		    !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2691						      cache, found_key.offset))
2692			ro_set = 0;
2693
2694		down_write(&dev_replace->rwsem);
2695		dev_replace->cursor_left = dev_replace->cursor_right;
2696		dev_replace->item_needs_writeback = 1;
2697		up_write(&dev_replace->rwsem);
2698
2699		if (ro_set)
2700			btrfs_dec_block_group_ro(cache);
2701
2702		/*
2703		 * We might have prevented the cleaner kthread from deleting
2704		 * this block group if it was already unused because we raced
2705		 * and set it to RO mode first. So add it back to the unused
2706		 * list, otherwise it might not ever be deleted unless a manual
2707		 * balance is triggered or it becomes used and unused again.
2708		 */
2709		spin_lock(&cache->lock);
2710		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2711		    !cache->ro && cache->reserved == 0 && cache->used == 0) {
2712			spin_unlock(&cache->lock);
2713			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2714				btrfs_discard_queue_work(&fs_info->discard_ctl,
2715							 cache);
2716			else
2717				btrfs_mark_bg_unused(cache);
2718		} else {
2719			spin_unlock(&cache->lock);
2720		}
2721skip_unfreeze:
2722		btrfs_unfreeze_block_group(cache);
2723		btrfs_put_block_group(cache);
2724		if (ret)
2725			break;
2726		if (sctx->is_dev_replace &&
2727		    atomic64_read(&dev_replace->num_write_errors) > 0) {
2728			ret = -EIO;
2729			break;
2730		}
2731		if (sctx->stat.malloc_errors > 0) {
2732			ret = -ENOMEM;
2733			break;
2734		}
2735skip:
2736		key.offset = found_key.offset + dev_extent_len;
2737		btrfs_release_path(path);
2738	}
2739
2740	btrfs_free_path(path);
2741
2742	return ret;
2743}
2744
2745static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2746			   struct page *page, u64 physical, u64 generation)
2747{
2748	struct btrfs_fs_info *fs_info = sctx->fs_info;
2749	struct bio_vec bvec;
2750	struct bio bio;
2751	struct btrfs_super_block *sb = page_address(page);
2752	int ret;
2753
2754	bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2755	bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2756	__bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2757	ret = submit_bio_wait(&bio);
2758	bio_uninit(&bio);
2759
2760	if (ret < 0)
2761		return ret;
2762	ret = btrfs_check_super_csum(fs_info, sb);
2763	if (ret != 0) {
2764		btrfs_err_rl(fs_info,
2765			"super block at physical %llu devid %llu has bad csum",
2766			physical, dev->devid);
2767		return -EIO;
2768	}
2769	if (btrfs_super_generation(sb) != generation) {
2770		btrfs_err_rl(fs_info,
2771"super block at physical %llu devid %llu has bad generation %llu expect %llu",
2772			     physical, dev->devid,
2773			     btrfs_super_generation(sb), generation);
2774		return -EUCLEAN;
2775	}
2776
2777	return btrfs_validate_super(fs_info, sb, -1);
2778}
2779
2780static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2781					   struct btrfs_device *scrub_dev)
2782{
2783	int	i;
2784	u64	bytenr;
2785	u64	gen;
2786	int ret = 0;
2787	struct page *page;
2788	struct btrfs_fs_info *fs_info = sctx->fs_info;
2789
2790	if (BTRFS_FS_ERROR(fs_info))
2791		return -EROFS;
2792
2793	page = alloc_page(GFP_KERNEL);
2794	if (!page) {
2795		spin_lock(&sctx->stat_lock);
2796		sctx->stat.malloc_errors++;
2797		spin_unlock(&sctx->stat_lock);
2798		return -ENOMEM;
2799	}
2800
2801	/* Seed devices of a new filesystem has their own generation. */
2802	if (scrub_dev->fs_devices != fs_info->fs_devices)
2803		gen = scrub_dev->generation;
2804	else
2805		gen = btrfs_get_last_trans_committed(fs_info);
2806
2807	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2808		bytenr = btrfs_sb_offset(i);
2809		if (bytenr + BTRFS_SUPER_INFO_SIZE >
2810		    scrub_dev->commit_total_bytes)
2811			break;
2812		if (!btrfs_check_super_location(scrub_dev, bytenr))
2813			continue;
2814
2815		ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2816		if (ret) {
2817			spin_lock(&sctx->stat_lock);
2818			sctx->stat.super_errors++;
2819			spin_unlock(&sctx->stat_lock);
2820		}
2821	}
2822	__free_page(page);
 
2823	return 0;
2824}
2825
2826static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2827{
2828	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2829					&fs_info->scrub_lock)) {
2830		struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
 
 
 
 
2831
2832		fs_info->scrub_workers = NULL;
 
 
2833		mutex_unlock(&fs_info->scrub_lock);
2834
2835		if (scrub_workers)
2836			destroy_workqueue(scrub_workers);
 
 
 
 
2837	}
2838}
2839
2840/*
2841 * get a reference count on fs_info->scrub_workers. start worker if necessary
2842 */
2843static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
 
2844{
2845	struct workqueue_struct *scrub_workers = NULL;
 
 
2846	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2847	int max_active = fs_info->thread_pool_size;
2848	int ret = -ENOMEM;
2849
2850	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2851		return 0;
2852
2853	scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
 
2854	if (!scrub_workers)
2855		return -ENOMEM;
 
 
 
 
 
 
 
 
2856
2857	mutex_lock(&fs_info->scrub_lock);
2858	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2859		ASSERT(fs_info->scrub_workers == NULL);
 
 
2860		fs_info->scrub_workers = scrub_workers;
 
 
2861		refcount_set(&fs_info->scrub_workers_refcnt, 1);
2862		mutex_unlock(&fs_info->scrub_lock);
2863		return 0;
2864	}
2865	/* Other thread raced in and created the workers for us */
2866	refcount_inc(&fs_info->scrub_workers_refcnt);
2867	mutex_unlock(&fs_info->scrub_lock);
2868
2869	ret = 0;
2870
 
 
 
2871	destroy_workqueue(scrub_workers);
 
2872	return ret;
2873}
2874
2875int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2876		    u64 end, struct btrfs_scrub_progress *progress,
2877		    int readonly, int is_dev_replace)
2878{
2879	struct btrfs_dev_lookup_args args = { .devid = devid };
2880	struct scrub_ctx *sctx;
2881	int ret;
2882	struct btrfs_device *dev;
2883	unsigned int nofs_flag;
2884	bool need_commit = false;
2885
2886	if (btrfs_fs_closing(fs_info))
2887		return -EAGAIN;
2888
2889	/* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2890	ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2891
2892	/*
2893	 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2894	 * value (max nodesize / min sectorsize), thus nodesize should always
2895	 * be fine.
2896	 */
2897	ASSERT(fs_info->nodesize <=
2898	       SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
2899
2900	/* Allocate outside of device_list_mutex */
2901	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2902	if (IS_ERR(sctx))
2903		return PTR_ERR(sctx);
2904
2905	ret = scrub_workers_get(fs_info);
2906	if (ret)
2907		goto out_free_ctx;
2908
2909	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2910	dev = btrfs_find_device(fs_info->fs_devices, &args);
2911	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2912		     !is_dev_replace)) {
2913		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2914		ret = -ENODEV;
2915		goto out;
2916	}
2917
2918	if (!is_dev_replace && !readonly &&
2919	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2920		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2921		btrfs_err_in_rcu(fs_info,
2922			"scrub on devid %llu: filesystem on %s is not writable",
2923				 devid, btrfs_dev_name(dev));
2924		ret = -EROFS;
2925		goto out;
2926	}
2927
2928	mutex_lock(&fs_info->scrub_lock);
2929	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2930	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2931		mutex_unlock(&fs_info->scrub_lock);
2932		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2933		ret = -EIO;
2934		goto out;
2935	}
2936
2937	down_read(&fs_info->dev_replace.rwsem);
2938	if (dev->scrub_ctx ||
2939	    (!is_dev_replace &&
2940	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2941		up_read(&fs_info->dev_replace.rwsem);
2942		mutex_unlock(&fs_info->scrub_lock);
2943		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2944		ret = -EINPROGRESS;
2945		goto out;
2946	}
2947	up_read(&fs_info->dev_replace.rwsem);
2948
2949	sctx->readonly = readonly;
2950	dev->scrub_ctx = sctx;
2951	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2952
2953	/*
2954	 * checking @scrub_pause_req here, we can avoid
2955	 * race between committing transaction and scrubbing.
2956	 */
2957	__scrub_blocked_if_needed(fs_info);
2958	atomic_inc(&fs_info->scrubs_running);
2959	mutex_unlock(&fs_info->scrub_lock);
2960
2961	/*
2962	 * In order to avoid deadlock with reclaim when there is a transaction
2963	 * trying to pause scrub, make sure we use GFP_NOFS for all the
2964	 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2965	 * invoked by our callees. The pausing request is done when the
2966	 * transaction commit starts, and it blocks the transaction until scrub
2967	 * is paused (done at specific points at scrub_stripe() or right above
2968	 * before incrementing fs_info->scrubs_running).
2969	 */
2970	nofs_flag = memalloc_nofs_save();
2971	if (!is_dev_replace) {
2972		u64 old_super_errors;
2973
2974		spin_lock(&sctx->stat_lock);
2975		old_super_errors = sctx->stat.super_errors;
2976		spin_unlock(&sctx->stat_lock);
2977
2978		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
2979		/*
2980		 * by holding device list mutex, we can
2981		 * kick off writing super in log tree sync.
2982		 */
2983		mutex_lock(&fs_info->fs_devices->device_list_mutex);
2984		ret = scrub_supers(sctx, dev);
2985		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2986
2987		spin_lock(&sctx->stat_lock);
2988		/*
2989		 * Super block errors found, but we can not commit transaction
2990		 * at current context, since btrfs_commit_transaction() needs
2991		 * to pause the current running scrub (hold by ourselves).
2992		 */
2993		if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2994			need_commit = true;
2995		spin_unlock(&sctx->stat_lock);
2996	}
2997
2998	if (!ret)
2999		ret = scrub_enumerate_chunks(sctx, dev, start, end);
3000	memalloc_nofs_restore(nofs_flag);
3001
 
3002	atomic_dec(&fs_info->scrubs_running);
3003	wake_up(&fs_info->scrub_pause_wait);
3004
 
 
3005	if (progress)
3006		memcpy(progress, &sctx->stat, sizeof(*progress));
3007
3008	if (!is_dev_replace)
3009		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
3010			ret ? "not finished" : "finished", devid, ret);
3011
3012	mutex_lock(&fs_info->scrub_lock);
3013	dev->scrub_ctx = NULL;
3014	mutex_unlock(&fs_info->scrub_lock);
3015
3016	scrub_workers_put(fs_info);
3017	scrub_put_ctx(sctx);
3018
3019	/*
3020	 * We found some super block errors before, now try to force a
3021	 * transaction commit, as scrub has finished.
3022	 */
3023	if (need_commit) {
3024		struct btrfs_trans_handle *trans;
3025
3026		trans = btrfs_start_transaction(fs_info->tree_root, 0);
3027		if (IS_ERR(trans)) {
3028			ret = PTR_ERR(trans);
3029			btrfs_err(fs_info,
3030	"scrub: failed to start transaction to fix super block errors: %d", ret);
3031			return ret;
3032		}
3033		ret = btrfs_commit_transaction(trans);
3034		if (ret < 0)
3035			btrfs_err(fs_info,
3036	"scrub: failed to commit transaction to fix super block errors: %d", ret);
3037	}
3038	return ret;
3039out:
3040	scrub_workers_put(fs_info);
3041out_free_ctx:
3042	scrub_free_ctx(sctx);
3043
3044	return ret;
3045}
3046
3047void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3048{
3049	mutex_lock(&fs_info->scrub_lock);
3050	atomic_inc(&fs_info->scrub_pause_req);
3051	while (atomic_read(&fs_info->scrubs_paused) !=
3052	       atomic_read(&fs_info->scrubs_running)) {
3053		mutex_unlock(&fs_info->scrub_lock);
3054		wait_event(fs_info->scrub_pause_wait,
3055			   atomic_read(&fs_info->scrubs_paused) ==
3056			   atomic_read(&fs_info->scrubs_running));
3057		mutex_lock(&fs_info->scrub_lock);
3058	}
3059	mutex_unlock(&fs_info->scrub_lock);
3060}
3061
3062void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3063{
3064	atomic_dec(&fs_info->scrub_pause_req);
3065	wake_up(&fs_info->scrub_pause_wait);
3066}
3067
3068int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3069{
3070	mutex_lock(&fs_info->scrub_lock);
3071	if (!atomic_read(&fs_info->scrubs_running)) {
3072		mutex_unlock(&fs_info->scrub_lock);
3073		return -ENOTCONN;
3074	}
3075
3076	atomic_inc(&fs_info->scrub_cancel_req);
3077	while (atomic_read(&fs_info->scrubs_running)) {
3078		mutex_unlock(&fs_info->scrub_lock);
3079		wait_event(fs_info->scrub_pause_wait,
3080			   atomic_read(&fs_info->scrubs_running) == 0);
3081		mutex_lock(&fs_info->scrub_lock);
3082	}
3083	atomic_dec(&fs_info->scrub_cancel_req);
3084	mutex_unlock(&fs_info->scrub_lock);
3085
3086	return 0;
3087}
3088
3089int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
3090{
3091	struct btrfs_fs_info *fs_info = dev->fs_info;
3092	struct scrub_ctx *sctx;
3093
3094	mutex_lock(&fs_info->scrub_lock);
3095	sctx = dev->scrub_ctx;
3096	if (!sctx) {
3097		mutex_unlock(&fs_info->scrub_lock);
3098		return -ENOTCONN;
3099	}
3100	atomic_inc(&sctx->cancel_req);
3101	while (dev->scrub_ctx) {
3102		mutex_unlock(&fs_info->scrub_lock);
3103		wait_event(fs_info->scrub_pause_wait,
3104			   dev->scrub_ctx == NULL);
3105		mutex_lock(&fs_info->scrub_lock);
3106	}
3107	mutex_unlock(&fs_info->scrub_lock);
3108
3109	return 0;
3110}
3111
3112int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3113			 struct btrfs_scrub_progress *progress)
3114{
3115	struct btrfs_dev_lookup_args args = { .devid = devid };
3116	struct btrfs_device *dev;
3117	struct scrub_ctx *sctx = NULL;
3118
3119	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3120	dev = btrfs_find_device(fs_info->fs_devices, &args);
3121	if (dev)
3122		sctx = dev->scrub_ctx;
3123	if (sctx)
3124		memcpy(progress, &sctx->stat, sizeof(*progress));
3125	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3126
3127	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3128}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/ratelimit.h>
   8#include <linux/sched/mm.h>
   9#include <crypto/hash.h>
  10#include "ctree.h"
  11#include "discard.h"
  12#include "volumes.h"
  13#include "disk-io.h"
  14#include "ordered-data.h"
  15#include "transaction.h"
  16#include "backref.h"
  17#include "extent_io.h"
  18#include "dev-replace.h"
  19#include "check-integrity.h"
  20#include "raid56.h"
  21#include "block-group.h"
  22#include "zoned.h"
  23#include "fs.h"
  24#include "accessors.h"
  25#include "file-item.h"
  26#include "scrub.h"
 
  27
  28/*
  29 * This is only the first step towards a full-features scrub. It reads all
  30 * extent and super block and verifies the checksums. In case a bad checksum
  31 * is found or the extent cannot be read, good data will be written back if
  32 * any can be found.
  33 *
  34 * Future enhancements:
  35 *  - In case an unrepairable extent is encountered, track which files are
  36 *    affected and report them
  37 *  - track and record media errors, throw out bad devices
  38 *  - add a mode to also read unallocated space
  39 */
  40
  41struct scrub_block;
  42struct scrub_ctx;
  43
  44/*
  45 * The following three values only influence the performance.
  46 *
  47 * The last one configures the number of parallel and outstanding I/O
  48 * operations. The first one configures an upper limit for the number
  49 * of (dynamically allocated) pages that are added to a bio.
  50 */
  51#define SCRUB_SECTORS_PER_BIO	32	/* 128KiB per bio for 4KiB pages */
  52#define SCRUB_BIOS_PER_SCTX	64	/* 8MiB per device in flight for 4KiB pages */
 
 
 
 
 
 
 
 
 
  53
  54/*
  55 * The following value times PAGE_SIZE needs to be large enough to match the
  56 * largest node/leaf/sector size that shall be supported.
  57 */
  58#define SCRUB_MAX_SECTORS_PER_BLOCK	(BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
  59
  60#define SCRUB_MAX_PAGES			(DIV_ROUND_UP(BTRFS_MAX_METADATA_BLOCKSIZE, PAGE_SIZE))
 
 
  61
  62/*
  63 * Maximum number of mirrors that can be available for all profiles counting
  64 * the target device of dev-replace as one. During an active device replace
  65 * procedure, the target device of the copy operation is a mirror for the
  66 * filesystem data as well that can be used to read data in order to repair
  67 * read errors on other disks.
  68 *
  69 * Current value is derived from RAID1C4 with 4 copies.
  70 */
  71#define BTRFS_MAX_MIRRORS (4 + 1)
  72
  73struct scrub_recover {
  74	refcount_t		refs;
  75	struct btrfs_io_context	*bioc;
  76	u64			map_length;
 
 
  77};
  78
  79struct scrub_sector {
  80	struct scrub_block	*sblock;
  81	struct list_head	list;
  82	u64			flags;  /* extent flags */
  83	u64			generation;
  84	/* Offset in bytes to @sblock. */
  85	u32			offset;
  86	atomic_t		refs;
  87	unsigned int		have_csum:1;
  88	unsigned int		io_error:1;
  89	u8			csum[BTRFS_CSUM_SIZE];
  90
  91	struct scrub_recover	*recover;
  92};
  93
  94struct scrub_bio {
  95	int			index;
  96	struct scrub_ctx	*sctx;
  97	struct btrfs_device	*dev;
  98	struct bio		*bio;
  99	blk_status_t		status;
 100	u64			logical;
 101	u64			physical;
 102	struct scrub_sector	*sectors[SCRUB_SECTORS_PER_BIO];
 103	int			sector_count;
 104	int			next_free;
 105	struct work_struct	work;
 106};
 107
 108struct scrub_block {
 109	/*
 110	 * Each page will have its page::private used to record the logical
 111	 * bytenr.
 
 112	 */
 113	struct page		*pages[SCRUB_MAX_PAGES];
 114	struct scrub_sector	*sectors[SCRUB_MAX_SECTORS_PER_BLOCK];
 115	struct btrfs_device	*dev;
 116	/* Logical bytenr of the sblock */
 117	u64			logical;
 118	u64			physical;
 119	u64			physical_for_dev_replace;
 120	/* Length of sblock in bytes */
 121	u32			len;
 122	int			sector_count;
 123	int			mirror_num;
 124
 125	atomic_t		outstanding_sectors;
 126	refcount_t		refs; /* free mem on transition to zero */
 127	struct scrub_ctx	*sctx;
 128	struct scrub_parity	*sparity;
 129	struct {
 130		unsigned int	header_error:1;
 131		unsigned int	checksum_error:1;
 132		unsigned int	no_io_error_seen:1;
 133		unsigned int	generation_error:1; /* also sets header_error */
 134
 135		/* The following is for the data used to check parity */
 136		/* It is for the data with checksum */
 137		unsigned int	data_corrected:1;
 138	};
 139	struct work_struct	work;
 140};
 141
 142/* Used for the chunks with parity stripe such RAID5/6 */
 143struct scrub_parity {
 144	struct scrub_ctx	*sctx;
 
 
 
 
 
 145
 146	struct btrfs_device	*scrub_dev;
 
 147
 148	u64			logic_start;
 
 
 149
 150	u64			logic_end;
 151
 152	int			nsectors;
 
 153
 154	u32			stripe_len;
 
 
 
 
 
 155
 156	refcount_t		refs;
 
 
 157
 158	struct list_head	sectors_list;
 
 
 
 
 159
 160	/* Work of parity check and repair */
 161	struct work_struct	work;
 162
 163	/* Mark the parity blocks which have data */
 164	unsigned long		dbitmap;
 
 
 
 
 
 
 
 
 
 
 165
 166	/*
 167	 * Mark the parity blocks which have data, but errors happen when
 168	 * read data or check data
 
 
 
 
 169	 */
 170	unsigned long		ebitmap;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171};
 172
 173struct scrub_ctx {
 174	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
 
 175	struct btrfs_fs_info	*fs_info;
 
 
 176	int			first_free;
 177	int			curr;
 178	atomic_t		bios_in_flight;
 179	atomic_t		workers_pending;
 180	spinlock_t		list_lock;
 181	wait_queue_head_t	list_wait;
 182	struct list_head	csum_list;
 183	atomic_t		cancel_req;
 184	int			readonly;
 185	int			sectors_per_bio;
 186
 187	/* State of IO submission throttling affecting the associated device */
 188	ktime_t			throttle_deadline;
 189	u64			throttle_sent;
 190
 191	int			is_dev_replace;
 192	u64			write_pointer;
 193
 194	struct scrub_bio        *wr_curr_bio;
 195	struct mutex            wr_lock;
 196	struct btrfs_device     *wr_tgtdev;
 197	bool                    flush_all_writes;
 198
 199	/*
 200	 * statistics
 201	 */
 202	struct btrfs_scrub_progress stat;
 203	spinlock_t		stat_lock;
 204
 205	/*
 206	 * Use a ref counter to avoid use-after-free issues. Scrub workers
 207	 * decrement bios_in_flight and workers_pending and then do a wakeup
 208	 * on the list_wait wait queue. We must ensure the main scrub task
 209	 * doesn't free the scrub context before or while the workers are
 210	 * doing the wakeup() call.
 211	 */
 212	refcount_t              refs;
 213};
 214
 215struct scrub_warning {
 216	struct btrfs_path	*path;
 217	u64			extent_item_size;
 218	const char		*errstr;
 219	u64			physical;
 220	u64			logical;
 221	struct btrfs_device	*dev;
 222};
 223
 224struct full_stripe_lock {
 225	struct rb_node node;
 226	u64 logical;
 227	u64 refs;
 228	struct mutex mutex;
 229};
 230
 231#ifndef CONFIG_64BIT
 232/* This structure is for archtectures whose (void *) is smaller than u64 */
 233struct scrub_page_private {
 234	u64 logical;
 235};
 236#endif
 237
 238static int attach_scrub_page_private(struct page *page, u64 logical)
 239{
 240#ifdef CONFIG_64BIT
 241	attach_page_private(page, (void *)logical);
 242	return 0;
 243#else
 244	struct scrub_page_private *spp;
 245
 246	spp = kmalloc(sizeof(*spp), GFP_KERNEL);
 247	if (!spp)
 248		return -ENOMEM;
 249	spp->logical = logical;
 250	attach_page_private(page, (void *)spp);
 251	return 0;
 252#endif
 
 
 
 
 253}
 254
 255static void detach_scrub_page_private(struct page *page)
 
 256{
 257#ifdef CONFIG_64BIT
 258	detach_page_private(page);
 259	return;
 260#else
 261	struct scrub_page_private *spp;
 262
 263	spp = detach_page_private(page);
 264	kfree(spp);
 265	return;
 266#endif
 267}
 268
 269static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx,
 270					     struct btrfs_device *dev,
 271					     u64 logical, u64 physical,
 272					     u64 physical_for_dev_replace,
 273					     int mirror_num)
 274{
 275	struct scrub_block *sblock;
 276
 277	sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
 278	if (!sblock)
 279		return NULL;
 280	refcount_set(&sblock->refs, 1);
 281	sblock->sctx = sctx;
 282	sblock->logical = logical;
 283	sblock->physical = physical;
 284	sblock->physical_for_dev_replace = physical_for_dev_replace;
 285	sblock->dev = dev;
 286	sblock->mirror_num = mirror_num;
 287	sblock->no_io_error_seen = 1;
 288	/*
 289	 * Scrub_block::pages will be allocated at alloc_scrub_sector() when
 290	 * the corresponding page is not allocated.
 291	 */
 292	return sblock;
 293}
 294
 295/*
 296 * Allocate a new scrub sector and attach it to @sblock.
 297 *
 298 * Will also allocate new pages for @sblock if needed.
 299 */
 300static struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock,
 301					       u64 logical)
 302{
 303	const pgoff_t page_index = (logical - sblock->logical) >> PAGE_SHIFT;
 304	struct scrub_sector *ssector;
 305
 306	/* We must never have scrub_block exceed U32_MAX in size. */
 307	ASSERT(logical - sblock->logical < U32_MAX);
 308
 309	ssector = kzalloc(sizeof(*ssector), GFP_KERNEL);
 310	if (!ssector)
 311		return NULL;
 
 312
 313	/* Allocate a new page if the slot is not allocated */
 314	if (!sblock->pages[page_index]) {
 315		int ret;
 316
 317		sblock->pages[page_index] = alloc_page(GFP_KERNEL);
 318		if (!sblock->pages[page_index]) {
 319			kfree(ssector);
 320			return NULL;
 321		}
 322		ret = attach_scrub_page_private(sblock->pages[page_index],
 323				sblock->logical + (page_index << PAGE_SHIFT));
 324		if (ret < 0) {
 325			kfree(ssector);
 326			__free_page(sblock->pages[page_index]);
 327			sblock->pages[page_index] = NULL;
 328			return NULL;
 329		}
 330	}
 331
 332	atomic_set(&ssector->refs, 1);
 333	ssector->sblock = sblock;
 334	/* The sector to be added should not be used */
 335	ASSERT(sblock->sectors[sblock->sector_count] == NULL);
 336	ssector->offset = logical - sblock->logical;
 337
 338	/* The sector count must be smaller than the limit */
 339	ASSERT(sblock->sector_count < SCRUB_MAX_SECTORS_PER_BLOCK);
 340
 341	sblock->sectors[sblock->sector_count] = ssector;
 342	sblock->sector_count++;
 343	sblock->len += sblock->sctx->fs_info->sectorsize;
 344
 345	return ssector;
 346}
 347
 348static struct page *scrub_sector_get_page(struct scrub_sector *ssector)
 349{
 350	struct scrub_block *sblock = ssector->sblock;
 351	pgoff_t index;
 352	/*
 353	 * When calling this function, ssector must be alreaday attached to the
 354	 * parent sblock.
 355	 */
 356	ASSERT(sblock);
 357
 358	/* The range should be inside the sblock range */
 359	ASSERT(ssector->offset < sblock->len);
 360
 361	index = ssector->offset >> PAGE_SHIFT;
 362	ASSERT(index < SCRUB_MAX_PAGES);
 363	ASSERT(sblock->pages[index]);
 364	ASSERT(PagePrivate(sblock->pages[index]));
 365	return sblock->pages[index];
 366}
 367
 368static unsigned int scrub_sector_get_page_offset(struct scrub_sector *ssector)
 369{
 370	struct scrub_block *sblock = ssector->sblock;
 371
 372	/*
 373	 * When calling this function, ssector must be already attached to the
 374	 * parent sblock.
 375	 */
 376	ASSERT(sblock);
 377
 378	/* The range should be inside the sblock range */
 379	ASSERT(ssector->offset < sblock->len);
 380
 381	return offset_in_page(ssector->offset);
 382}
 383
 384static char *scrub_sector_get_kaddr(struct scrub_sector *ssector)
 385{
 386	return page_address(scrub_sector_get_page(ssector)) +
 387	       scrub_sector_get_page_offset(ssector);
 388}
 389
 390static int bio_add_scrub_sector(struct bio *bio, struct scrub_sector *ssector,
 391				unsigned int len)
 392{
 393	return bio_add_page(bio, scrub_sector_get_page(ssector), len,
 394			    scrub_sector_get_page_offset(ssector));
 395}
 396
 397static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 398				     struct scrub_block *sblocks_for_recheck[]);
 399static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
 400				struct scrub_block *sblock,
 401				int retry_failed_mirror);
 402static void scrub_recheck_block_checksum(struct scrub_block *sblock);
 403static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
 404					     struct scrub_block *sblock_good);
 405static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
 406					    struct scrub_block *sblock_good,
 407					    int sector_num, int force_write);
 408static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
 409static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock,
 410					     int sector_num);
 411static int scrub_checksum_data(struct scrub_block *sblock);
 412static int scrub_checksum_tree_block(struct scrub_block *sblock);
 413static int scrub_checksum_super(struct scrub_block *sblock);
 414static void scrub_block_put(struct scrub_block *sblock);
 415static void scrub_sector_get(struct scrub_sector *sector);
 416static void scrub_sector_put(struct scrub_sector *sector);
 417static void scrub_parity_get(struct scrub_parity *sparity);
 418static void scrub_parity_put(struct scrub_parity *sparity);
 419static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
 420			 u64 physical, struct btrfs_device *dev, u64 flags,
 421			 u64 gen, int mirror_num, u8 *csum,
 422			 u64 physical_for_dev_replace);
 423static void scrub_bio_end_io(struct bio *bio);
 424static void scrub_bio_end_io_worker(struct work_struct *work);
 425static void scrub_block_complete(struct scrub_block *sblock);
 426static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
 427				 u64 extent_logical, u32 extent_len,
 428				 u64 *extent_physical,
 429				 struct btrfs_device **extent_dev,
 430				 int *extent_mirror_num);
 431static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
 432				      struct scrub_sector *sector);
 433static void scrub_wr_submit(struct scrub_ctx *sctx);
 434static void scrub_wr_bio_end_io(struct bio *bio);
 435static void scrub_wr_bio_end_io_worker(struct work_struct *work);
 436static void scrub_put_ctx(struct scrub_ctx *sctx);
 437
 438static inline int scrub_is_page_on_raid56(struct scrub_sector *sector)
 439{
 440	return sector->recover &&
 441	       (sector->recover->bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
 442}
 443
 444static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
 445{
 446	refcount_inc(&sctx->refs);
 447	atomic_inc(&sctx->bios_in_flight);
 448}
 449
 450static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
 451{
 452	atomic_dec(&sctx->bios_in_flight);
 453	wake_up(&sctx->list_wait);
 454	scrub_put_ctx(sctx);
 455}
 456
 457static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 458{
 459	while (atomic_read(&fs_info->scrub_pause_req)) {
 460		mutex_unlock(&fs_info->scrub_lock);
 461		wait_event(fs_info->scrub_pause_wait,
 462		   atomic_read(&fs_info->scrub_pause_req) == 0);
 463		mutex_lock(&fs_info->scrub_lock);
 464	}
 465}
 466
 467static void scrub_pause_on(struct btrfs_fs_info *fs_info)
 468{
 469	atomic_inc(&fs_info->scrubs_paused);
 470	wake_up(&fs_info->scrub_pause_wait);
 471}
 472
 473static void scrub_pause_off(struct btrfs_fs_info *fs_info)
 474{
 475	mutex_lock(&fs_info->scrub_lock);
 476	__scrub_blocked_if_needed(fs_info);
 477	atomic_dec(&fs_info->scrubs_paused);
 478	mutex_unlock(&fs_info->scrub_lock);
 479
 480	wake_up(&fs_info->scrub_pause_wait);
 481}
 482
 483static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 484{
 485	scrub_pause_on(fs_info);
 486	scrub_pause_off(fs_info);
 487}
 488
 489/*
 490 * Insert new full stripe lock into full stripe locks tree
 491 *
 492 * Return pointer to existing or newly inserted full_stripe_lock structure if
 493 * everything works well.
 494 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
 495 *
 496 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
 497 * function
 498 */
 499static struct full_stripe_lock *insert_full_stripe_lock(
 500		struct btrfs_full_stripe_locks_tree *locks_root,
 501		u64 fstripe_logical)
 502{
 503	struct rb_node **p;
 504	struct rb_node *parent = NULL;
 505	struct full_stripe_lock *entry;
 506	struct full_stripe_lock *ret;
 507
 508	lockdep_assert_held(&locks_root->lock);
 509
 510	p = &locks_root->root.rb_node;
 511	while (*p) {
 512		parent = *p;
 513		entry = rb_entry(parent, struct full_stripe_lock, node);
 514		if (fstripe_logical < entry->logical) {
 515			p = &(*p)->rb_left;
 516		} else if (fstripe_logical > entry->logical) {
 517			p = &(*p)->rb_right;
 518		} else {
 519			entry->refs++;
 520			return entry;
 521		}
 522	}
 523
 524	/*
 525	 * Insert new lock.
 526	 */
 527	ret = kmalloc(sizeof(*ret), GFP_KERNEL);
 528	if (!ret)
 529		return ERR_PTR(-ENOMEM);
 530	ret->logical = fstripe_logical;
 531	ret->refs = 1;
 532	mutex_init(&ret->mutex);
 533
 534	rb_link_node(&ret->node, parent, p);
 535	rb_insert_color(&ret->node, &locks_root->root);
 536	return ret;
 537}
 538
 539/*
 540 * Search for a full stripe lock of a block group
 541 *
 542 * Return pointer to existing full stripe lock if found
 543 * Return NULL if not found
 544 */
 545static struct full_stripe_lock *search_full_stripe_lock(
 546		struct btrfs_full_stripe_locks_tree *locks_root,
 547		u64 fstripe_logical)
 548{
 549	struct rb_node *node;
 550	struct full_stripe_lock *entry;
 551
 552	lockdep_assert_held(&locks_root->lock);
 553
 554	node = locks_root->root.rb_node;
 555	while (node) {
 556		entry = rb_entry(node, struct full_stripe_lock, node);
 557		if (fstripe_logical < entry->logical)
 558			node = node->rb_left;
 559		else if (fstripe_logical > entry->logical)
 560			node = node->rb_right;
 561		else
 562			return entry;
 563	}
 564	return NULL;
 565}
 566
 567/*
 568 * Helper to get full stripe logical from a normal bytenr.
 569 *
 570 * Caller must ensure @cache is a RAID56 block group.
 571 */
 572static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
 573{
 574	u64 ret;
 575
 576	/*
 577	 * Due to chunk item size limit, full stripe length should not be
 578	 * larger than U32_MAX. Just a sanity check here.
 579	 */
 580	WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
 581
 582	/*
 583	 * round_down() can only handle power of 2, while RAID56 full
 584	 * stripe length can be 64KiB * n, so we need to manually round down.
 585	 */
 586	ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
 587			cache->full_stripe_len + cache->start;
 588	return ret;
 589}
 590
 591/*
 592 * Lock a full stripe to avoid concurrency of recovery and read
 593 *
 594 * It's only used for profiles with parities (RAID5/6), for other profiles it
 595 * does nothing.
 596 *
 597 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
 598 * So caller must call unlock_full_stripe() at the same context.
 599 *
 600 * Return <0 if encounters error.
 601 */
 602static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
 603			    bool *locked_ret)
 604{
 605	struct btrfs_block_group *bg_cache;
 606	struct btrfs_full_stripe_locks_tree *locks_root;
 607	struct full_stripe_lock *existing;
 608	u64 fstripe_start;
 609	int ret = 0;
 610
 611	*locked_ret = false;
 612	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
 613	if (!bg_cache) {
 614		ASSERT(0);
 615		return -ENOENT;
 616	}
 617
 618	/* Profiles not based on parity don't need full stripe lock */
 619	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
 620		goto out;
 621	locks_root = &bg_cache->full_stripe_locks_root;
 622
 623	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
 624
 625	/* Now insert the full stripe lock */
 626	mutex_lock(&locks_root->lock);
 627	existing = insert_full_stripe_lock(locks_root, fstripe_start);
 628	mutex_unlock(&locks_root->lock);
 629	if (IS_ERR(existing)) {
 630		ret = PTR_ERR(existing);
 631		goto out;
 632	}
 633	mutex_lock(&existing->mutex);
 634	*locked_ret = true;
 635out:
 636	btrfs_put_block_group(bg_cache);
 637	return ret;
 638}
 639
 640/*
 641 * Unlock a full stripe.
 642 *
 643 * NOTE: Caller must ensure it's the same context calling corresponding
 644 * lock_full_stripe().
 645 *
 646 * Return 0 if we unlock full stripe without problem.
 647 * Return <0 for error
 648 */
 649static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
 650			      bool locked)
 651{
 652	struct btrfs_block_group *bg_cache;
 653	struct btrfs_full_stripe_locks_tree *locks_root;
 654	struct full_stripe_lock *fstripe_lock;
 655	u64 fstripe_start;
 656	bool freeit = false;
 657	int ret = 0;
 658
 659	/* If we didn't acquire full stripe lock, no need to continue */
 660	if (!locked)
 661		return 0;
 662
 663	bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
 664	if (!bg_cache) {
 665		ASSERT(0);
 666		return -ENOENT;
 667	}
 668	if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
 669		goto out;
 670
 671	locks_root = &bg_cache->full_stripe_locks_root;
 672	fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
 673
 674	mutex_lock(&locks_root->lock);
 675	fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
 676	/* Unpaired unlock_full_stripe() detected */
 677	if (!fstripe_lock) {
 678		WARN_ON(1);
 679		ret = -ENOENT;
 680		mutex_unlock(&locks_root->lock);
 681		goto out;
 682	}
 683
 684	if (fstripe_lock->refs == 0) {
 685		WARN_ON(1);
 686		btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
 687			fstripe_lock->logical);
 688	} else {
 689		fstripe_lock->refs--;
 690	}
 691
 692	if (fstripe_lock->refs == 0) {
 693		rb_erase(&fstripe_lock->node, &locks_root->root);
 694		freeit = true;
 695	}
 696	mutex_unlock(&locks_root->lock);
 697
 698	mutex_unlock(&fstripe_lock->mutex);
 699	if (freeit)
 700		kfree(fstripe_lock);
 701out:
 702	btrfs_put_block_group(bg_cache);
 703	return ret;
 704}
 705
 706static void scrub_free_csums(struct scrub_ctx *sctx)
 707{
 708	while (!list_empty(&sctx->csum_list)) {
 709		struct btrfs_ordered_sum *sum;
 710		sum = list_first_entry(&sctx->csum_list,
 711				       struct btrfs_ordered_sum, list);
 712		list_del(&sum->list);
 713		kfree(sum);
 714	}
 715}
 716
 717static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 718{
 719	int i;
 720
 721	if (!sctx)
 722		return;
 723
 724	/* this can happen when scrub is cancelled */
 725	if (sctx->curr != -1) {
 726		struct scrub_bio *sbio = sctx->bios[sctx->curr];
 727
 728		for (i = 0; i < sbio->sector_count; i++)
 729			scrub_block_put(sbio->sectors[i]->sblock);
 730		bio_put(sbio->bio);
 731	}
 732
 733	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
 734		struct scrub_bio *sbio = sctx->bios[i];
 735
 736		if (!sbio)
 737			break;
 738		kfree(sbio);
 739	}
 740
 741	kfree(sctx->wr_curr_bio);
 742	scrub_free_csums(sctx);
 743	kfree(sctx);
 744}
 745
 746static void scrub_put_ctx(struct scrub_ctx *sctx)
 747{
 748	if (refcount_dec_and_test(&sctx->refs))
 749		scrub_free_ctx(sctx);
 750}
 751
 752static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 753		struct btrfs_fs_info *fs_info, int is_dev_replace)
 754{
 755	struct scrub_ctx *sctx;
 756	int		i;
 757
 758	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
 
 
 
 759	if (!sctx)
 760		goto nomem;
 761	refcount_set(&sctx->refs, 1);
 762	sctx->is_dev_replace = is_dev_replace;
 763	sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO;
 764	sctx->curr = -1;
 765	sctx->fs_info = fs_info;
 766	INIT_LIST_HEAD(&sctx->csum_list);
 767	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
 768		struct scrub_bio *sbio;
 
 
 
 769
 770		sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
 771		if (!sbio)
 772			goto nomem;
 773		sctx->bios[i] = sbio;
 774
 775		sbio->index = i;
 776		sbio->sctx = sctx;
 777		sbio->sector_count = 0;
 778		INIT_WORK(&sbio->work, scrub_bio_end_io_worker);
 779
 780		if (i != SCRUB_BIOS_PER_SCTX - 1)
 781			sctx->bios[i]->next_free = i + 1;
 782		else
 783			sctx->bios[i]->next_free = -1;
 784	}
 785	sctx->first_free = 0;
 786	atomic_set(&sctx->bios_in_flight, 0);
 787	atomic_set(&sctx->workers_pending, 0);
 788	atomic_set(&sctx->cancel_req, 0);
 789
 790	spin_lock_init(&sctx->list_lock);
 791	spin_lock_init(&sctx->stat_lock);
 792	init_waitqueue_head(&sctx->list_wait);
 793	sctx->throttle_deadline = 0;
 794
 795	WARN_ON(sctx->wr_curr_bio != NULL);
 796	mutex_init(&sctx->wr_lock);
 797	sctx->wr_curr_bio = NULL;
 798	if (is_dev_replace) {
 799		WARN_ON(!fs_info->dev_replace.tgtdev);
 800		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
 801		sctx->flush_all_writes = false;
 802	}
 803
 804	return sctx;
 805
 806nomem:
 807	scrub_free_ctx(sctx);
 808	return ERR_PTR(-ENOMEM);
 809}
 810
 811static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
 812				     u64 root, void *warn_ctx)
 813{
 814	u32 nlink;
 815	int ret;
 816	int i;
 817	unsigned nofs_flag;
 818	struct extent_buffer *eb;
 819	struct btrfs_inode_item *inode_item;
 820	struct scrub_warning *swarn = warn_ctx;
 821	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
 822	struct inode_fs_paths *ipath = NULL;
 823	struct btrfs_root *local_root;
 824	struct btrfs_key key;
 825
 826	local_root = btrfs_get_fs_root(fs_info, root, true);
 827	if (IS_ERR(local_root)) {
 828		ret = PTR_ERR(local_root);
 829		goto err;
 830	}
 831
 832	/*
 833	 * this makes the path point to (inum INODE_ITEM ioff)
 834	 */
 835	key.objectid = inum;
 836	key.type = BTRFS_INODE_ITEM_KEY;
 837	key.offset = 0;
 838
 839	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
 840	if (ret) {
 841		btrfs_put_root(local_root);
 842		btrfs_release_path(swarn->path);
 843		goto err;
 844	}
 845
 846	eb = swarn->path->nodes[0];
 847	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
 848					struct btrfs_inode_item);
 849	nlink = btrfs_inode_nlink(eb, inode_item);
 850	btrfs_release_path(swarn->path);
 851
 852	/*
 853	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
 854	 * uses GFP_NOFS in this context, so we keep it consistent but it does
 855	 * not seem to be strictly necessary.
 856	 */
 857	nofs_flag = memalloc_nofs_save();
 858	ipath = init_ipath(4096, local_root, swarn->path);
 859	memalloc_nofs_restore(nofs_flag);
 860	if (IS_ERR(ipath)) {
 861		btrfs_put_root(local_root);
 862		ret = PTR_ERR(ipath);
 863		ipath = NULL;
 864		goto err;
 865	}
 866	ret = paths_from_inode(inum, ipath);
 867
 868	if (ret < 0)
 869		goto err;
 870
 871	/*
 872	 * we deliberately ignore the bit ipath might have been too small to
 873	 * hold all of the paths here
 874	 */
 875	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
 876		btrfs_warn_in_rcu(fs_info,
 877"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
 878				  swarn->errstr, swarn->logical,
 879				  btrfs_dev_name(swarn->dev),
 880				  swarn->physical,
 881				  root, inum, offset,
 882				  fs_info->sectorsize, nlink,
 883				  (char *)(unsigned long)ipath->fspath->val[i]);
 884
 885	btrfs_put_root(local_root);
 886	free_ipath(ipath);
 887	return 0;
 888
 889err:
 890	btrfs_warn_in_rcu(fs_info,
 891			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
 892			  swarn->errstr, swarn->logical,
 893			  btrfs_dev_name(swarn->dev),
 894			  swarn->physical,
 895			  root, inum, offset, ret);
 896
 897	free_ipath(ipath);
 898	return 0;
 899}
 900
 901static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
 
 902{
 903	struct btrfs_device *dev;
 904	struct btrfs_fs_info *fs_info;
 905	struct btrfs_path *path;
 906	struct btrfs_key found_key;
 907	struct extent_buffer *eb;
 908	struct btrfs_extent_item *ei;
 909	struct scrub_warning swarn;
 910	unsigned long ptr = 0;
 911	u64 flags = 0;
 912	u64 ref_root;
 913	u32 item_size;
 914	u8 ref_level = 0;
 915	int ret;
 916
 917	WARN_ON(sblock->sector_count < 1);
 918	dev = sblock->dev;
 919	fs_info = sblock->sctx->fs_info;
 920
 921	/* Super block error, no need to search extent tree. */
 922	if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
 923		btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
 924			errstr, btrfs_dev_name(dev), sblock->physical);
 925		return;
 926	}
 927	path = btrfs_alloc_path();
 928	if (!path)
 929		return;
 930
 931	swarn.physical = sblock->physical;
 932	swarn.logical = sblock->logical;
 933	swarn.errstr = errstr;
 934	swarn.dev = NULL;
 935
 936	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
 937				  &flags);
 938	if (ret < 0)
 939		goto out;
 940
 941	swarn.extent_item_size = found_key.offset;
 942
 943	eb = path->nodes[0];
 944	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 945	item_size = btrfs_item_size(eb, path->slots[0]);
 946
 947	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 948		do {
 
 
 
 
 949			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
 950						      item_size, &ref_root,
 951						      &ref_level);
 
 
 
 
 
 
 
 
 952			btrfs_warn_in_rcu(fs_info,
 953"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
 954				errstr, swarn.logical,
 955				btrfs_dev_name(dev),
 956				swarn.physical,
 957				ref_level ? "node" : "leaf",
 958				ret < 0 ? -1 : ref_level,
 959				ret < 0 ? -1 : ref_root);
 960		} while (ret != 1);
 961		btrfs_release_path(path);
 962	} else {
 963		struct btrfs_backref_walk_ctx ctx = { 0 };
 964
 965		btrfs_release_path(path);
 966
 967		ctx.bytenr = found_key.objectid;
 968		ctx.extent_item_pos = swarn.logical - found_key.objectid;
 969		ctx.fs_info = fs_info;
 970
 971		swarn.path = path;
 972		swarn.dev = dev;
 973
 974		iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
 975	}
 976
 977out:
 978	btrfs_free_path(path);
 979}
 980
 981static inline void scrub_get_recover(struct scrub_recover *recover)
 982{
 983	refcount_inc(&recover->refs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 984}
 985
 986static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
 987				     struct scrub_recover *recover)
 988{
 989	if (refcount_dec_and_test(&recover->refs)) {
 990		btrfs_bio_counter_dec(fs_info);
 991		btrfs_put_bioc(recover->bioc);
 992		kfree(recover);
 993	}
 994}
 995
 996/*
 997 * scrub_handle_errored_block gets called when either verification of the
 998 * sectors failed or the bio failed to read, e.g. with EIO. In the latter
 999 * case, this function handles all sectors in the bio, even though only one
1000 * may be bad.
1001 * The goal of this function is to repair the errored block by using the
1002 * contents of one of the mirrors.
1003 */
1004static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
1005{
1006	struct scrub_ctx *sctx = sblock_to_check->sctx;
1007	struct btrfs_device *dev = sblock_to_check->dev;
1008	struct btrfs_fs_info *fs_info;
1009	u64 logical;
1010	unsigned int failed_mirror_index;
1011	unsigned int is_metadata;
1012	unsigned int have_csum;
1013	/* One scrub_block for each mirror */
1014	struct scrub_block *sblocks_for_recheck[BTRFS_MAX_MIRRORS] = { 0 };
1015	struct scrub_block *sblock_bad;
1016	int ret;
1017	int mirror_index;
1018	int sector_num;
1019	int success;
1020	bool full_stripe_locked;
1021	unsigned int nofs_flag;
1022	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
1023				      DEFAULT_RATELIMIT_BURST);
1024
1025	BUG_ON(sblock_to_check->sector_count < 1);
1026	fs_info = sctx->fs_info;
1027	if (sblock_to_check->sectors[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
1028		/*
1029		 * If we find an error in a super block, we just report it.
1030		 * They will get written with the next transaction commit
1031		 * anyway
1032		 */
1033		scrub_print_warning("super block error", sblock_to_check);
1034		spin_lock(&sctx->stat_lock);
1035		++sctx->stat.super_errors;
1036		spin_unlock(&sctx->stat_lock);
1037		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS);
1038		return 0;
1039	}
1040	logical = sblock_to_check->logical;
1041	ASSERT(sblock_to_check->mirror_num);
1042	failed_mirror_index = sblock_to_check->mirror_num - 1;
1043	is_metadata = !(sblock_to_check->sectors[0]->flags &
1044			BTRFS_EXTENT_FLAG_DATA);
1045	have_csum = sblock_to_check->sectors[0]->have_csum;
1046
1047	if (!sctx->is_dev_replace && btrfs_repair_one_zone(fs_info, logical))
1048		return 0;
 
 
 
 
 
 
 
 
 
1049
1050	/*
1051	 * We must use GFP_NOFS because the scrub task might be waiting for a
1052	 * worker task executing this function and in turn a transaction commit
1053	 * might be waiting the scrub task to pause (which needs to wait for all
1054	 * the worker tasks to complete before pausing).
1055	 * We do allocations in the workers through insert_full_stripe_lock()
1056	 * and scrub_add_sector_to_wr_bio(), which happens down the call chain of
1057	 * this function.
1058	 */
1059	nofs_flag = memalloc_nofs_save();
1060	/*
1061	 * For RAID5/6, race can happen for a different device scrub thread.
1062	 * For data corruption, Parity and Data threads will both try
1063	 * to recovery the data.
1064	 * Race can lead to doubly added csum error, or even unrecoverable
1065	 * error.
1066	 */
1067	ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
1068	if (ret < 0) {
1069		memalloc_nofs_restore(nofs_flag);
1070		spin_lock(&sctx->stat_lock);
1071		if (ret == -ENOMEM)
1072			sctx->stat.malloc_errors++;
1073		sctx->stat.read_errors++;
1074		sctx->stat.uncorrectable_errors++;
1075		spin_unlock(&sctx->stat_lock);
1076		return ret;
1077	}
1078
1079	/*
1080	 * read all mirrors one after the other. This includes to
1081	 * re-read the extent or metadata block that failed (that was
1082	 * the cause that this fixup code is called) another time,
1083	 * sector by sector this time in order to know which sectors
1084	 * caused I/O errors and which ones are good (for all mirrors).
1085	 * It is the goal to handle the situation when more than one
1086	 * mirror contains I/O errors, but the errors do not
1087	 * overlap, i.e. the data can be repaired by selecting the
1088	 * sectors from those mirrors without I/O error on the
1089	 * particular sectors. One example (with blocks >= 2 * sectorsize)
1090	 * would be that mirror #1 has an I/O error on the first sector,
1091	 * the second sector is good, and mirror #2 has an I/O error on
1092	 * the second sector, but the first sector is good.
1093	 * Then the first sector of the first mirror can be repaired by
1094	 * taking the first sector of the second mirror, and the
1095	 * second sector of the second mirror can be repaired by
1096	 * copying the contents of the 2nd sector of the 1st mirror.
1097	 * One more note: if the sectors of one mirror contain I/O
1098	 * errors, the checksum cannot be verified. In order to get
1099	 * the best data for repairing, the first attempt is to find
1100	 * a mirror without I/O errors and with a validated checksum.
1101	 * Only if this is not possible, the sectors are picked from
1102	 * mirrors with I/O errors without considering the checksum.
1103	 * If the latter is the case, at the end, the checksum of the
1104	 * repaired area is verified in order to correctly maintain
1105	 * the statistics.
1106	 */
1107	for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
1108		/*
1109		 * Note: the two members refs and outstanding_sectors are not
1110		 * used in the blocks that are used for the recheck procedure.
1111		 *
1112		 * But alloc_scrub_block() will initialize sblock::ref anyway,
1113		 * so we can use scrub_block_put() to clean them up.
1114		 *
1115		 * And here we don't setup the physical/dev for the sblock yet,
1116		 * they will be correctly initialized in scrub_setup_recheck_block().
1117		 */
1118		sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx, NULL,
1119							logical, 0, 0, mirror_index);
1120		if (!sblocks_for_recheck[mirror_index]) {
1121			spin_lock(&sctx->stat_lock);
1122			sctx->stat.malloc_errors++;
1123			sctx->stat.read_errors++;
1124			sctx->stat.uncorrectable_errors++;
1125			spin_unlock(&sctx->stat_lock);
1126			btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1127			goto out;
1128		}
1129	}
1130
1131	/* Setup the context, map the logical blocks and alloc the sectors */
1132	ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
1133	if (ret) {
1134		spin_lock(&sctx->stat_lock);
1135		sctx->stat.read_errors++;
1136		sctx->stat.uncorrectable_errors++;
1137		spin_unlock(&sctx->stat_lock);
1138		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1139		goto out;
1140	}
1141	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
1142	sblock_bad = sblocks_for_recheck[failed_mirror_index];
1143
1144	/* build and submit the bios for the failed mirror, check checksums */
1145	scrub_recheck_block(fs_info, sblock_bad, 1);
 
 
 
1146
1147	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
1148	    sblock_bad->no_io_error_seen) {
1149		/*
1150		 * The error disappeared after reading sector by sector, or
1151		 * the area was part of a huge bio and other parts of the
1152		 * bio caused I/O errors, or the block layer merged several
1153		 * read requests into one and the error is caused by a
1154		 * different bio (usually one of the two latter cases is
1155		 * the cause)
1156		 */
1157		spin_lock(&sctx->stat_lock);
1158		sctx->stat.unverified_errors++;
1159		sblock_to_check->data_corrected = 1;
1160		spin_unlock(&sctx->stat_lock);
1161
1162		if (sctx->is_dev_replace)
1163			scrub_write_block_to_dev_replace(sblock_bad);
1164		goto out;
1165	}
1166
1167	if (!sblock_bad->no_io_error_seen) {
1168		spin_lock(&sctx->stat_lock);
1169		sctx->stat.read_errors++;
1170		spin_unlock(&sctx->stat_lock);
1171		if (__ratelimit(&rs))
1172			scrub_print_warning("i/o error", sblock_to_check);
1173		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1174	} else if (sblock_bad->checksum_error) {
1175		spin_lock(&sctx->stat_lock);
1176		sctx->stat.csum_errors++;
1177		spin_unlock(&sctx->stat_lock);
1178		if (__ratelimit(&rs))
1179			scrub_print_warning("checksum error", sblock_to_check);
1180		btrfs_dev_stat_inc_and_print(dev,
1181					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
1182	} else if (sblock_bad->header_error) {
1183		spin_lock(&sctx->stat_lock);
1184		sctx->stat.verify_errors++;
1185		spin_unlock(&sctx->stat_lock);
1186		if (__ratelimit(&rs))
1187			scrub_print_warning("checksum/header error",
1188					    sblock_to_check);
1189		if (sblock_bad->generation_error)
1190			btrfs_dev_stat_inc_and_print(dev,
1191				BTRFS_DEV_STAT_GENERATION_ERRS);
1192		else
1193			btrfs_dev_stat_inc_and_print(dev,
1194				BTRFS_DEV_STAT_CORRUPTION_ERRS);
1195	}
1196
1197	if (sctx->readonly) {
1198		ASSERT(!sctx->is_dev_replace);
1199		goto out;
 
 
 
 
 
 
1200	}
 
 
 
 
1201
1202	/*
1203	 * now build and submit the bios for the other mirrors, check
1204	 * checksums.
1205	 * First try to pick the mirror which is completely without I/O
1206	 * errors and also does not have a checksum error.
1207	 * If one is found, and if a checksum is present, the full block
1208	 * that is known to contain an error is rewritten. Afterwards
1209	 * the block is known to be corrected.
1210	 * If a mirror is found which is completely correct, and no
1211	 * checksum is present, only those sectors are rewritten that had
1212	 * an I/O error in the block to be repaired, since it cannot be
1213	 * determined, which copy of the other sectors is better (and it
1214	 * could happen otherwise that a correct sector would be
1215	 * overwritten by a bad one).
1216	 */
1217	for (mirror_index = 0; ;mirror_index++) {
1218		struct scrub_block *sblock_other;
1219
1220		if (mirror_index == failed_mirror_index)
1221			continue;
1222
1223		/* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1224		if (!scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1225			if (mirror_index >= BTRFS_MAX_MIRRORS)
1226				break;
1227			if (!sblocks_for_recheck[mirror_index]->sector_count)
1228				break;
1229
1230			sblock_other = sblocks_for_recheck[mirror_index];
1231		} else {
1232			struct scrub_recover *r = sblock_bad->sectors[0]->recover;
1233			int max_allowed = r->bioc->num_stripes - r->bioc->num_tgtdevs;
1234
1235			if (mirror_index >= max_allowed)
1236				break;
1237			if (!sblocks_for_recheck[1]->sector_count)
1238				break;
1239
1240			ASSERT(failed_mirror_index == 0);
1241			sblock_other = sblocks_for_recheck[1];
1242			sblock_other->mirror_num = 1 + mirror_index;
1243		}
1244
1245		/* build and submit the bios, check checksums */
1246		scrub_recheck_block(fs_info, sblock_other, 0);
1247
1248		if (!sblock_other->header_error &&
1249		    !sblock_other->checksum_error &&
1250		    sblock_other->no_io_error_seen) {
1251			if (sctx->is_dev_replace) {
1252				scrub_write_block_to_dev_replace(sblock_other);
1253				goto corrected_error;
1254			} else {
1255				ret = scrub_repair_block_from_good_copy(
1256						sblock_bad, sblock_other);
1257				if (!ret)
1258					goto corrected_error;
1259			}
1260		}
 
 
1261	}
1262
1263	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1264		goto did_not_correct_error;
1265
1266	/*
1267	 * In case of I/O errors in the area that is supposed to be
1268	 * repaired, continue by picking good copies of those sectors.
1269	 * Select the good sectors from mirrors to rewrite bad sectors from
1270	 * the area to fix. Afterwards verify the checksum of the block
1271	 * that is supposed to be repaired. This verification step is
1272	 * only done for the purpose of statistic counting and for the
1273	 * final scrub report, whether errors remain.
1274	 * A perfect algorithm could make use of the checksum and try
1275	 * all possible combinations of sectors from the different mirrors
1276	 * until the checksum verification succeeds. For example, when
1277	 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
1278	 * of mirror #2 is readable but the final checksum test fails,
1279	 * then the 2nd sector of mirror #3 could be tried, whether now
1280	 * the final checksum succeeds. But this would be a rare
1281	 * exception and is therefore not implemented. At least it is
1282	 * avoided that the good copy is overwritten.
1283	 * A more useful improvement would be to pick the sectors
1284	 * without I/O error based on sector sizes (512 bytes on legacy
1285	 * disks) instead of on sectorsize. Then maybe 512 byte of one
1286	 * mirror could be repaired by taking 512 byte of a different
1287	 * mirror, even if other 512 byte sectors in the same sectorsize
1288	 * area are unreadable.
1289	 */
1290	success = 1;
1291	for (sector_num = 0; sector_num < sblock_bad->sector_count;
1292	     sector_num++) {
1293		struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1294		struct scrub_block *sblock_other = NULL;
1295
1296		/* Skip no-io-error sectors in scrub */
1297		if (!sector_bad->io_error && !sctx->is_dev_replace)
1298			continue;
1299
1300		if (scrub_is_page_on_raid56(sblock_bad->sectors[0])) {
1301			/*
1302			 * In case of dev replace, if raid56 rebuild process
1303			 * didn't work out correct data, then copy the content
1304			 * in sblock_bad to make sure target device is identical
1305			 * to source device, instead of writing garbage data in
1306			 * sblock_for_recheck array to target device.
1307			 */
1308			sblock_other = NULL;
1309		} else if (sector_bad->io_error) {
1310			/* Try to find no-io-error sector in mirrors */
1311			for (mirror_index = 0;
1312			     mirror_index < BTRFS_MAX_MIRRORS &&
1313			     sblocks_for_recheck[mirror_index]->sector_count > 0;
1314			     mirror_index++) {
1315				if (!sblocks_for_recheck[mirror_index]->
1316				    sectors[sector_num]->io_error) {
1317					sblock_other = sblocks_for_recheck[mirror_index];
1318					break;
1319				}
1320			}
1321			if (!sblock_other)
1322				success = 0;
1323		}
1324
1325		if (sctx->is_dev_replace) {
1326			/*
1327			 * Did not find a mirror to fetch the sector from.
1328			 * scrub_write_sector_to_dev_replace() handles this
1329			 * case (sector->io_error), by filling the block with
1330			 * zeros before submitting the write request
1331			 */
1332			if (!sblock_other)
1333				sblock_other = sblock_bad;
1334
1335			if (scrub_write_sector_to_dev_replace(sblock_other,
1336							      sector_num) != 0) {
1337				atomic64_inc(
1338					&fs_info->dev_replace.num_write_errors);
1339				success = 0;
1340			}
1341		} else if (sblock_other) {
1342			ret = scrub_repair_sector_from_good_copy(sblock_bad,
1343								 sblock_other,
1344								 sector_num, 0);
1345			if (0 == ret)
1346				sector_bad->io_error = 0;
1347			else
1348				success = 0;
1349		}
1350	}
1351
1352	if (success && !sctx->is_dev_replace) {
1353		if (is_metadata || have_csum) {
1354			/*
1355			 * need to verify the checksum now that all
1356			 * sectors on disk are repaired (the write
1357			 * request for data to be repaired is on its way).
1358			 * Just be lazy and use scrub_recheck_block()
1359			 * which re-reads the data before the checksum
1360			 * is verified, but most likely the data comes out
1361			 * of the page cache.
1362			 */
1363			scrub_recheck_block(fs_info, sblock_bad, 1);
1364			if (!sblock_bad->header_error &&
1365			    !sblock_bad->checksum_error &&
1366			    sblock_bad->no_io_error_seen)
1367				goto corrected_error;
1368			else
1369				goto did_not_correct_error;
1370		} else {
1371corrected_error:
1372			spin_lock(&sctx->stat_lock);
1373			sctx->stat.corrected_errors++;
1374			sblock_to_check->data_corrected = 1;
1375			spin_unlock(&sctx->stat_lock);
1376			btrfs_err_rl_in_rcu(fs_info,
1377				"fixed up error at logical %llu on dev %s",
1378				logical, btrfs_dev_name(dev));
1379		}
1380	} else {
1381did_not_correct_error:
1382		spin_lock(&sctx->stat_lock);
1383		sctx->stat.uncorrectable_errors++;
1384		spin_unlock(&sctx->stat_lock);
1385		btrfs_err_rl_in_rcu(fs_info,
1386			"unable to fixup (regular) error at logical %llu on dev %s",
1387			logical, btrfs_dev_name(dev));
1388	}
1389
1390out:
1391	for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
1392		struct scrub_block *sblock = sblocks_for_recheck[mirror_index];
1393		struct scrub_recover *recover;
1394		int sector_index;
1395
1396		/* Not allocated, continue checking the next mirror */
1397		if (!sblock)
1398			continue;
1399
1400		for (sector_index = 0; sector_index < sblock->sector_count;
1401		     sector_index++) {
1402			/*
1403			 * Here we just cleanup the recover, each sector will be
1404			 * properly cleaned up by later scrub_block_put()
1405			 */
1406			recover = sblock->sectors[sector_index]->recover;
1407			if (recover) {
1408				scrub_put_recover(fs_info, recover);
1409				sblock->sectors[sector_index]->recover = NULL;
1410			}
1411		}
1412		scrub_block_put(sblock);
1413	}
1414
1415	ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1416	memalloc_nofs_restore(nofs_flag);
1417	if (ret < 0)
1418		return ret;
1419	return 0;
1420}
1421
1422static inline int scrub_nr_raid_mirrors(struct btrfs_io_context *bioc)
 
1423{
1424	if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID5)
1425		return 2;
1426	else if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID6)
1427		return 3;
1428	else
1429		return (int)bioc->num_stripes;
1430}
1431
1432static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1433						 u64 *raid_map,
1434						 int nstripes, int mirror,
1435						 int *stripe_index,
1436						 u64 *stripe_offset)
1437{
1438	int i;
1439
1440	if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1441		/* RAID5/6 */
1442		for (i = 0; i < nstripes; i++) {
1443			if (raid_map[i] == RAID6_Q_STRIPE ||
1444			    raid_map[i] == RAID5_P_STRIPE)
1445				continue;
1446
1447			if (logical >= raid_map[i] &&
1448			    logical < raid_map[i] + BTRFS_STRIPE_LEN)
1449				break;
1450		}
1451
1452		*stripe_index = i;
1453		*stripe_offset = logical - raid_map[i];
1454	} else {
1455		/* The other RAID type */
1456		*stripe_index = mirror;
1457		*stripe_offset = 0;
1458	}
1459}
1460
1461static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1462				     struct scrub_block *sblocks_for_recheck[])
1463{
1464	struct scrub_ctx *sctx = original_sblock->sctx;
1465	struct btrfs_fs_info *fs_info = sctx->fs_info;
1466	u64 logical = original_sblock->logical;
1467	u64 length = original_sblock->sector_count << fs_info->sectorsize_bits;
1468	u64 generation = original_sblock->sectors[0]->generation;
1469	u64 flags = original_sblock->sectors[0]->flags;
1470	u64 have_csum = original_sblock->sectors[0]->have_csum;
1471	struct scrub_recover *recover;
1472	struct btrfs_io_context *bioc;
1473	u64 sublen;
1474	u64 mapped_length;
1475	u64 stripe_offset;
1476	int stripe_index;
1477	int sector_index = 0;
1478	int mirror_index;
1479	int nmirrors;
1480	int ret;
1481
1482	while (length > 0) {
1483		sublen = min_t(u64, length, fs_info->sectorsize);
1484		mapped_length = sublen;
1485		bioc = NULL;
1486
1487		/*
1488		 * With a length of sectorsize, each returned stripe represents
1489		 * one mirror
1490		 */
1491		btrfs_bio_counter_inc_blocked(fs_info);
1492		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1493				       logical, &mapped_length, &bioc);
1494		if (ret || !bioc || mapped_length < sublen) {
1495			btrfs_put_bioc(bioc);
1496			btrfs_bio_counter_dec(fs_info);
1497			return -EIO;
1498		}
1499
1500		recover = kzalloc(sizeof(struct scrub_recover), GFP_KERNEL);
1501		if (!recover) {
1502			btrfs_put_bioc(bioc);
1503			btrfs_bio_counter_dec(fs_info);
1504			return -ENOMEM;
1505		}
1506
1507		refcount_set(&recover->refs, 1);
1508		recover->bioc = bioc;
1509		recover->map_length = mapped_length;
1510
1511		ASSERT(sector_index < SCRUB_MAX_SECTORS_PER_BLOCK);
1512
1513		nmirrors = min(scrub_nr_raid_mirrors(bioc), BTRFS_MAX_MIRRORS);
1514
1515		for (mirror_index = 0; mirror_index < nmirrors;
1516		     mirror_index++) {
1517			struct scrub_block *sblock;
1518			struct scrub_sector *sector;
1519
1520			sblock = sblocks_for_recheck[mirror_index];
1521			sblock->sctx = sctx;
1522
1523			sector = alloc_scrub_sector(sblock, logical);
1524			if (!sector) {
1525				spin_lock(&sctx->stat_lock);
1526				sctx->stat.malloc_errors++;
1527				spin_unlock(&sctx->stat_lock);
1528				scrub_put_recover(fs_info, recover);
1529				return -ENOMEM;
1530			}
1531			sector->flags = flags;
1532			sector->generation = generation;
1533			sector->have_csum = have_csum;
1534			if (have_csum)
1535				memcpy(sector->csum,
1536				       original_sblock->sectors[0]->csum,
1537				       sctx->fs_info->csum_size);
1538
1539			scrub_stripe_index_and_offset(logical,
1540						      bioc->map_type,
1541						      bioc->raid_map,
1542						      bioc->num_stripes -
1543						      bioc->num_tgtdevs,
1544						      mirror_index,
1545						      &stripe_index,
1546						      &stripe_offset);
1547			/*
1548			 * We're at the first sector, also populate @sblock
1549			 * physical and dev.
1550			 */
1551			if (sector_index == 0) {
1552				sblock->physical =
1553					bioc->stripes[stripe_index].physical +
1554					stripe_offset;
1555				sblock->dev = bioc->stripes[stripe_index].dev;
1556				sblock->physical_for_dev_replace =
1557					original_sblock->physical_for_dev_replace;
1558			}
1559
1560			BUG_ON(sector_index >= original_sblock->sector_count);
1561			scrub_get_recover(recover);
1562			sector->recover = recover;
1563		}
1564		scrub_put_recover(fs_info, recover);
1565		length -= sublen;
1566		logical += sublen;
1567		sector_index++;
1568	}
1569
1570	return 0;
1571}
1572
1573static void scrub_bio_wait_endio(struct bio *bio)
1574{
1575	complete(bio->bi_private);
1576}
1577
1578static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1579					struct bio *bio,
1580					struct scrub_sector *sector)
1581{
1582	DECLARE_COMPLETION_ONSTACK(done);
1583
1584	bio->bi_iter.bi_sector = (sector->offset + sector->sblock->logical) >>
1585				 SECTOR_SHIFT;
1586	bio->bi_private = &done;
1587	bio->bi_end_io = scrub_bio_wait_endio;
1588	raid56_parity_recover(bio, sector->recover->bioc, sector->sblock->mirror_num);
1589
1590	wait_for_completion_io(&done);
1591	return blk_status_to_errno(bio->bi_status);
1592}
1593
1594static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1595					  struct scrub_block *sblock)
1596{
1597	struct scrub_sector *first_sector = sblock->sectors[0];
1598	struct bio *bio;
1599	int i;
1600
1601	/* All sectors in sblock belong to the same stripe on the same device. */
1602	ASSERT(sblock->dev);
1603	if (!sblock->dev->bdev)
1604		goto out;
1605
1606	bio = bio_alloc(sblock->dev->bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
1607
1608	for (i = 0; i < sblock->sector_count; i++) {
1609		struct scrub_sector *sector = sblock->sectors[i];
1610
1611		bio_add_scrub_sector(bio, sector, fs_info->sectorsize);
1612	}
1613
1614	if (scrub_submit_raid56_bio_wait(fs_info, bio, first_sector)) {
1615		bio_put(bio);
1616		goto out;
1617	}
1618
1619	bio_put(bio);
1620
1621	scrub_recheck_block_checksum(sblock);
1622
1623	return;
1624out:
1625	for (i = 0; i < sblock->sector_count; i++)
1626		sblock->sectors[i]->io_error = 1;
1627
1628	sblock->no_io_error_seen = 0;
1629}
1630
1631/*
1632 * This function will check the on disk data for checksum errors, header errors
1633 * and read I/O errors. If any I/O errors happen, the exact sectors which are
1634 * errored are marked as being bad. The goal is to enable scrub to take those
1635 * sectors that are not errored from all the mirrors so that the sectors that
1636 * are errored in the just handled mirror can be repaired.
1637 */
1638static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1639				struct scrub_block *sblock,
1640				int retry_failed_mirror)
1641{
 
 
 
 
 
1642	int i;
1643
1644	sblock->no_io_error_seen = 1;
1645
1646	/* short cut for raid56 */
1647	if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->sectors[0]))
1648		return scrub_recheck_block_on_raid56(fs_info, sblock);
1649
1650	for (i = 0; i < sblock->sector_count; i++) {
1651		struct scrub_sector *sector = sblock->sectors[i];
1652		struct bio bio;
1653		struct bio_vec bvec;
1654
1655		if (sblock->dev->bdev == NULL) {
1656			sector->io_error = 1;
1657			sblock->no_io_error_seen = 0;
1658			continue;
1659		}
1660
1661		bio_init(&bio, sblock->dev->bdev, &bvec, 1, REQ_OP_READ);
1662		bio_add_scrub_sector(&bio, sector, fs_info->sectorsize);
1663		bio.bi_iter.bi_sector = (sblock->physical + sector->offset) >>
1664					SECTOR_SHIFT;
1665
1666		btrfsic_check_bio(&bio);
1667		if (submit_bio_wait(&bio)) {
1668			sector->io_error = 1;
1669			sblock->no_io_error_seen = 0;
1670		}
1671
1672		bio_uninit(&bio);
 
 
 
 
 
 
 
1673	}
1674
1675	if (sblock->no_io_error_seen)
1676		scrub_recheck_block_checksum(sblock);
1677}
1678
1679static inline int scrub_check_fsid(u8 fsid[], struct scrub_sector *sector)
1680{
1681	struct btrfs_fs_devices *fs_devices = sector->sblock->dev->fs_devices;
1682	int ret;
1683
1684	ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1685	return !ret;
1686}
1687
1688static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1689{
1690	sblock->header_error = 0;
1691	sblock->checksum_error = 0;
1692	sblock->generation_error = 0;
1693
1694	if (sblock->sectors[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1695		scrub_checksum_data(sblock);
1696	else
1697		scrub_checksum_tree_block(sblock);
1698}
1699
1700static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1701					     struct scrub_block *sblock_good)
1702{
 
 
 
1703	int i;
1704	int ret = 0;
1705
1706	for (i = 0; i < sblock_bad->sector_count; i++) {
1707		int ret_sub;
1708
1709		ret_sub = scrub_repair_sector_from_good_copy(sblock_bad,
1710							     sblock_good, i, 1);
1711		if (ret_sub)
1712			ret = ret_sub;
1713	}
1714
1715	return ret;
1716}
1717
1718static int scrub_repair_sector_from_good_copy(struct scrub_block *sblock_bad,
1719					      struct scrub_block *sblock_good,
1720					      int sector_num, int force_write)
1721{
1722	struct scrub_sector *sector_bad = sblock_bad->sectors[sector_num];
1723	struct scrub_sector *sector_good = sblock_good->sectors[sector_num];
1724	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1725	const u32 sectorsize = fs_info->sectorsize;
1726
1727	if (force_write || sblock_bad->header_error ||
1728	    sblock_bad->checksum_error || sector_bad->io_error) {
1729		struct bio bio;
1730		struct bio_vec bvec;
1731		int ret;
1732
1733		if (!sblock_bad->dev->bdev) {
1734			btrfs_warn_rl(fs_info,
1735				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1736			return -EIO;
 
1737		}
1738
1739		bio_init(&bio, sblock_bad->dev->bdev, &bvec, 1, REQ_OP_WRITE);
1740		bio.bi_iter.bi_sector = (sblock_bad->physical +
1741					 sector_bad->offset) >> SECTOR_SHIFT;
1742		ret = bio_add_scrub_sector(&bio, sector_good, sectorsize);
1743
1744		btrfsic_check_bio(&bio);
1745		ret = submit_bio_wait(&bio);
1746		bio_uninit(&bio);
1747
1748		if (ret) {
1749			btrfs_dev_stat_inc_and_print(sblock_bad->dev,
1750				BTRFS_DEV_STAT_WRITE_ERRS);
1751			atomic64_inc(&fs_info->dev_replace.num_write_errors);
1752			return -EIO;
1753		}
1754	}
1755
1756	return 0;
1757}
1758
1759static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
 
1760{
1761	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1762	int i;
 
 
 
 
 
 
 
 
 
 
 
1763
1764	/*
1765	 * This block is used for the check of the parity on the source device,
1766	 * so the data needn't be written into the destination device.
 
 
1767	 */
1768	if (sblock->sparity)
1769		return;
1770
1771	for (i = 0; i < sblock->sector_count; i++) {
1772		int ret;
1773
1774		ret = scrub_write_sector_to_dev_replace(sblock, i);
1775		if (ret)
1776			atomic64_inc(&fs_info->dev_replace.num_write_errors);
 
 
 
 
 
 
 
 
 
 
 
1777	}
1778}
1779
1780static int scrub_write_sector_to_dev_replace(struct scrub_block *sblock, int sector_num)
1781{
1782	const u32 sectorsize = sblock->sctx->fs_info->sectorsize;
1783	struct scrub_sector *sector = sblock->sectors[sector_num];
1784
1785	if (sector->io_error)
1786		memset(scrub_sector_get_kaddr(sector), 0, sectorsize);
 
 
 
 
 
1787
1788	return scrub_add_sector_to_wr_bio(sblock->sctx, sector);
1789}
 
 
 
1790
1791static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
1792{
1793	int ret = 0;
1794	u64 length;
1795
1796	if (!btrfs_is_zoned(sctx->fs_info))
1797		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1798
1799	if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
1800		return 0;
1801
1802	if (sctx->write_pointer < physical) {
1803		length = physical - sctx->write_pointer;
1804
1805		ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
1806						sctx->write_pointer, length);
1807		if (!ret)
1808			sctx->write_pointer = physical;
1809	}
1810	return ret;
1811}
1812
1813static void scrub_block_get(struct scrub_block *sblock)
1814{
1815	refcount_inc(&sblock->refs);
1816}
1817
1818static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
1819				      struct scrub_sector *sector)
1820{
1821	struct scrub_block *sblock = sector->sblock;
1822	struct scrub_bio *sbio;
1823	int ret;
1824	const u32 sectorsize = sctx->fs_info->sectorsize;
1825
1826	mutex_lock(&sctx->wr_lock);
1827again:
1828	if (!sctx->wr_curr_bio) {
1829		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1830					      GFP_KERNEL);
1831		if (!sctx->wr_curr_bio) {
1832			mutex_unlock(&sctx->wr_lock);
1833			return -ENOMEM;
1834		}
1835		sctx->wr_curr_bio->sctx = sctx;
1836		sctx->wr_curr_bio->sector_count = 0;
1837	}
1838	sbio = sctx->wr_curr_bio;
1839	if (sbio->sector_count == 0) {
1840		ret = fill_writer_pointer_gap(sctx, sector->offset +
1841					      sblock->physical_for_dev_replace);
1842		if (ret) {
1843			mutex_unlock(&sctx->wr_lock);
1844			return ret;
1845		}
1846
1847		sbio->physical = sblock->physical_for_dev_replace + sector->offset;
1848		sbio->logical = sblock->logical + sector->offset;
1849		sbio->dev = sctx->wr_tgtdev;
1850		if (!sbio->bio) {
1851			sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
1852					      REQ_OP_WRITE, GFP_NOFS);
1853		}
1854		sbio->bio->bi_private = sbio;
1855		sbio->bio->bi_end_io = scrub_wr_bio_end_io;
1856		sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
1857		sbio->status = 0;
1858	} else if (sbio->physical + sbio->sector_count * sectorsize !=
1859		   sblock->physical_for_dev_replace + sector->offset ||
1860		   sbio->logical + sbio->sector_count * sectorsize !=
1861		   sblock->logical + sector->offset) {
1862		scrub_wr_submit(sctx);
1863		goto again;
1864	}
1865
1866	ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
1867	if (ret != sectorsize) {
1868		if (sbio->sector_count < 1) {
1869			bio_put(sbio->bio);
1870			sbio->bio = NULL;
1871			mutex_unlock(&sctx->wr_lock);
1872			return -EIO;
1873		}
1874		scrub_wr_submit(sctx);
1875		goto again;
1876	}
1877
1878	sbio->sectors[sbio->sector_count] = sector;
1879	scrub_sector_get(sector);
1880	/*
1881	 * Since ssector no longer holds a page, but uses sblock::pages, we
1882	 * have to ensure the sblock had not been freed before our write bio
1883	 * finished.
1884	 */
1885	scrub_block_get(sector->sblock);
1886
1887	sbio->sector_count++;
1888	if (sbio->sector_count == sctx->sectors_per_bio)
1889		scrub_wr_submit(sctx);
1890	mutex_unlock(&sctx->wr_lock);
1891
1892	return 0;
1893}
1894
1895static void scrub_wr_submit(struct scrub_ctx *sctx)
1896{
1897	struct scrub_bio *sbio;
1898
1899	if (!sctx->wr_curr_bio)
1900		return;
1901
1902	sbio = sctx->wr_curr_bio;
1903	sctx->wr_curr_bio = NULL;
1904	scrub_pending_bio_inc(sctx);
1905	/* process all writes in a single worker thread. Then the block layer
1906	 * orders the requests before sending them to the driver which
1907	 * doubled the write performance on spinning disks when measured
1908	 * with Linux 3.5 */
1909	btrfsic_check_bio(sbio->bio);
1910	submit_bio(sbio->bio);
1911
1912	if (btrfs_is_zoned(sctx->fs_info))
1913		sctx->write_pointer = sbio->physical + sbio->sector_count *
1914			sctx->fs_info->sectorsize;
1915}
1916
1917static void scrub_wr_bio_end_io(struct bio *bio)
1918{
1919	struct scrub_bio *sbio = bio->bi_private;
1920	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1921
1922	sbio->status = bio->bi_status;
1923	sbio->bio = bio;
1924
1925	INIT_WORK(&sbio->work, scrub_wr_bio_end_io_worker);
1926	queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1927}
1928
1929static void scrub_wr_bio_end_io_worker(struct work_struct *work)
1930{
1931	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1932	struct scrub_ctx *sctx = sbio->sctx;
1933	int i;
1934
1935	ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
1936	if (sbio->status) {
1937		struct btrfs_dev_replace *dev_replace =
1938			&sbio->sctx->fs_info->dev_replace;
1939
1940		for (i = 0; i < sbio->sector_count; i++) {
1941			struct scrub_sector *sector = sbio->sectors[i];
 
 
 
 
 
 
 
 
1942
1943			sector->io_error = 1;
1944			atomic64_inc(&dev_replace->num_write_errors);
1945		}
1946	}
1947
1948	/*
1949	 * In scrub_add_sector_to_wr_bio() we grab extra ref for sblock, now in
1950	 * endio we should put the sblock.
 
 
1951	 */
1952	for (i = 0; i < sbio->sector_count; i++) {
1953		scrub_block_put(sbio->sectors[i]->sblock);
1954		scrub_sector_put(sbio->sectors[i]);
 
 
 
 
 
 
 
 
1955	}
1956
1957	bio_put(sbio->bio);
1958	kfree(sbio);
1959	scrub_pending_bio_dec(sctx);
1960}
1961
1962static int scrub_checksum(struct scrub_block *sblock)
1963{
1964	u64 flags;
1965	int ret;
 
1966
 
 
 
 
 
 
 
 
 
 
 
 
 
1967	/*
1968	 * No need to initialize these stats currently,
1969	 * because this function only use return value
1970	 * instead of these stats value.
1971	 *
1972	 * Todo:
1973	 * always use stats
1974	 */
1975	sblock->header_error = 0;
1976	sblock->generation_error = 0;
1977	sblock->checksum_error = 0;
 
 
1978
1979	WARN_ON(sblock->sector_count < 1);
1980	flags = sblock->sectors[0]->flags;
1981	ret = 0;
1982	if (flags & BTRFS_EXTENT_FLAG_DATA)
1983		ret = scrub_checksum_data(sblock);
1984	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1985		ret = scrub_checksum_tree_block(sblock);
1986	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1987		ret = scrub_checksum_super(sblock);
1988	else
1989		WARN_ON(1);
1990	if (ret)
1991		scrub_handle_errored_block(sblock);
1992
1993	return ret;
 
 
1994}
1995
1996static int scrub_checksum_data(struct scrub_block *sblock)
1997{
1998	struct scrub_ctx *sctx = sblock->sctx;
1999	struct btrfs_fs_info *fs_info = sctx->fs_info;
2000	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
2001	u8 csum[BTRFS_CSUM_SIZE];
2002	struct scrub_sector *sector;
2003	char *kaddr;
2004
2005	BUG_ON(sblock->sector_count < 1);
2006	sector = sblock->sectors[0];
2007	if (!sector->have_csum)
2008		return 0;
2009
2010	kaddr = scrub_sector_get_kaddr(sector);
2011
2012	shash->tfm = fs_info->csum_shash;
2013	crypto_shash_init(shash);
2014
2015	crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
2016
2017	if (memcmp(csum, sector->csum, fs_info->csum_size))
2018		sblock->checksum_error = 1;
2019	return sblock->checksum_error;
 
 
 
 
 
 
 
2020}
2021
2022static int scrub_checksum_tree_block(struct scrub_block *sblock)
2023{
2024	struct scrub_ctx *sctx = sblock->sctx;
2025	struct btrfs_header *h;
2026	struct btrfs_fs_info *fs_info = sctx->fs_info;
2027	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
2028	u8 calculated_csum[BTRFS_CSUM_SIZE];
2029	u8 on_disk_csum[BTRFS_CSUM_SIZE];
2030	/*
2031	 * This is done in sectorsize steps even for metadata as there's a
2032	 * constraint for nodesize to be aligned to sectorsize. This will need
2033	 * to change so we don't misuse data and metadata units like that.
2034	 */
2035	const u32 sectorsize = sctx->fs_info->sectorsize;
2036	const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
2037	int i;
2038	struct scrub_sector *sector;
2039	char *kaddr;
2040
2041	BUG_ON(sblock->sector_count < 1);
2042
2043	/* Each member in sectors is just one sector */
2044	ASSERT(sblock->sector_count == num_sectors);
2045
2046	sector = sblock->sectors[0];
2047	kaddr = scrub_sector_get_kaddr(sector);
2048	h = (struct btrfs_header *)kaddr;
2049	memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
2050
2051	/*
2052	 * we don't use the getter functions here, as we
2053	 * a) don't have an extent buffer and
2054	 * b) the page is already kmapped
2055	 */
2056	if (sblock->logical != btrfs_stack_header_bytenr(h))
2057		sblock->header_error = 1;
2058
2059	if (sector->generation != btrfs_stack_header_generation(h)) {
2060		sblock->header_error = 1;
2061		sblock->generation_error = 1;
 
2062	}
 
2063
2064	if (!scrub_check_fsid(h->fsid, sector))
2065		sblock->header_error = 1;
2066
2067	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
2068		   BTRFS_UUID_SIZE))
2069		sblock->header_error = 1;
2070
2071	shash->tfm = fs_info->csum_shash;
2072	crypto_shash_init(shash);
2073	crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
2074			    sectorsize - BTRFS_CSUM_SIZE);
2075
2076	for (i = 1; i < num_sectors; i++) {
2077		kaddr = scrub_sector_get_kaddr(sblock->sectors[i]);
2078		crypto_shash_update(shash, kaddr, sectorsize);
2079	}
2080
2081	crypto_shash_final(shash, calculated_csum);
2082	if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
2083		sblock->checksum_error = 1;
2084
2085	return sblock->header_error || sblock->checksum_error;
2086}
2087
2088static int scrub_checksum_super(struct scrub_block *sblock)
 
 
2089{
2090	struct btrfs_super_block *s;
2091	struct scrub_ctx *sctx = sblock->sctx;
2092	struct btrfs_fs_info *fs_info = sctx->fs_info;
2093	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
2094	u8 calculated_csum[BTRFS_CSUM_SIZE];
2095	struct scrub_sector *sector;
2096	char *kaddr;
2097	int fail_gen = 0;
2098	int fail_cor = 0;
2099
2100	BUG_ON(sblock->sector_count < 1);
2101	sector = sblock->sectors[0];
2102	kaddr = scrub_sector_get_kaddr(sector);
2103	s = (struct btrfs_super_block *)kaddr;
 
 
 
2104
2105	if (sblock->logical != btrfs_super_bytenr(s))
2106		++fail_cor;
2107
2108	if (sector->generation != btrfs_super_generation(s))
2109		++fail_gen;
2110
2111	if (!scrub_check_fsid(s->fsid, sector))
2112		++fail_cor;
2113
2114	shash->tfm = fs_info->csum_shash;
2115	crypto_shash_init(shash);
2116	crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
2117			BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
2118
2119	if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
2120		++fail_cor;
2121
2122	return fail_cor + fail_gen;
2123}
2124
2125static void scrub_block_put(struct scrub_block *sblock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2126{
2127	if (refcount_dec_and_test(&sblock->refs)) {
2128		int i;
 
 
 
 
 
 
2129
2130		if (sblock->sparity)
2131			scrub_parity_put(sblock->sparity);
2132
2133		for (i = 0; i < sblock->sector_count; i++)
2134			scrub_sector_put(sblock->sectors[i]);
2135		for (i = 0; i < DIV_ROUND_UP(sblock->len, PAGE_SIZE); i++) {
2136			if (sblock->pages[i]) {
2137				detach_scrub_page_private(sblock->pages[i]);
2138				__free_page(sblock->pages[i]);
2139			}
 
 
 
 
2140		}
2141		kfree(sblock);
 
2142	}
2143}
2144
2145static void scrub_sector_get(struct scrub_sector *sector)
2146{
2147	atomic_inc(&sector->refs);
2148}
2149
2150static void scrub_sector_put(struct scrub_sector *sector)
2151{
2152	if (atomic_dec_and_test(&sector->refs))
2153		kfree(sector);
2154}
2155
2156/*
2157 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
2158 * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
2159 */
2160static void scrub_throttle(struct scrub_ctx *sctx)
 
2161{
2162	const int time_slice = 1000;
2163	struct scrub_bio *sbio;
2164	struct btrfs_device *device;
2165	s64 delta;
2166	ktime_t now;
2167	u32 div;
2168	u64 bwlimit;
2169
2170	sbio = sctx->bios[sctx->curr];
2171	device = sbio->dev;
2172	bwlimit = READ_ONCE(device->scrub_speed_max);
2173	if (bwlimit == 0)
2174		return;
2175
2176	/*
2177	 * Slice is divided into intervals when the IO is submitted, adjust by
2178	 * bwlimit and maximum of 64 intervals.
2179	 */
2180	div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
2181	div = min_t(u32, 64, div);
2182
2183	/* Start new epoch, set deadline */
2184	now = ktime_get();
2185	if (sctx->throttle_deadline == 0) {
2186		sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
2187		sctx->throttle_sent = 0;
2188	}
2189
2190	/* Still in the time to send? */
2191	if (ktime_before(now, sctx->throttle_deadline)) {
2192		/* If current bio is within the limit, send it */
2193		sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
2194		if (sctx->throttle_sent <= div_u64(bwlimit, div))
2195			return;
2196
2197		/* We're over the limit, sleep until the rest of the slice */
2198		delta = ktime_ms_delta(sctx->throttle_deadline, now);
2199	} else {
2200		/* New request after deadline, start new epoch */
2201		delta = 0;
2202	}
2203
2204	if (delta) {
2205		long timeout;
2206
2207		timeout = div_u64(delta * HZ, 1000);
2208		schedule_timeout_interruptible(timeout);
2209	}
2210
2211	/* Next call will start the deadline period */
2212	sctx->throttle_deadline = 0;
2213}
2214
2215static void scrub_submit(struct scrub_ctx *sctx)
2216{
2217	struct scrub_bio *sbio;
2218
2219	if (sctx->curr == -1)
2220		return;
2221
2222	scrub_throttle(sctx);
2223
2224	sbio = sctx->bios[sctx->curr];
2225	sctx->curr = -1;
2226	scrub_pending_bio_inc(sctx);
2227	btrfsic_check_bio(sbio->bio);
2228	submit_bio(sbio->bio);
2229}
2230
2231static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
2232				      struct scrub_sector *sector)
2233{
2234	struct scrub_block *sblock = sector->sblock;
2235	struct scrub_bio *sbio;
2236	const u32 sectorsize = sctx->fs_info->sectorsize;
2237	int ret;
2238
2239again:
2240	/*
2241	 * grab a fresh bio or wait for one to become available
2242	 */
2243	while (sctx->curr == -1) {
2244		spin_lock(&sctx->list_lock);
2245		sctx->curr = sctx->first_free;
2246		if (sctx->curr != -1) {
2247			sctx->first_free = sctx->bios[sctx->curr]->next_free;
2248			sctx->bios[sctx->curr]->next_free = -1;
2249			sctx->bios[sctx->curr]->sector_count = 0;
2250			spin_unlock(&sctx->list_lock);
2251		} else {
2252			spin_unlock(&sctx->list_lock);
2253			wait_event(sctx->list_wait, sctx->first_free != -1);
2254		}
2255	}
2256	sbio = sctx->bios[sctx->curr];
2257	if (sbio->sector_count == 0) {
2258		sbio->physical = sblock->physical + sector->offset;
2259		sbio->logical = sblock->logical + sector->offset;
2260		sbio->dev = sblock->dev;
2261		if (!sbio->bio) {
2262			sbio->bio = bio_alloc(sbio->dev->bdev, sctx->sectors_per_bio,
2263					      REQ_OP_READ, GFP_NOFS);
2264		}
2265		sbio->bio->bi_private = sbio;
2266		sbio->bio->bi_end_io = scrub_bio_end_io;
2267		sbio->bio->bi_iter.bi_sector = sbio->physical >> 9;
2268		sbio->status = 0;
2269	} else if (sbio->physical + sbio->sector_count * sectorsize !=
2270		   sblock->physical + sector->offset ||
2271		   sbio->logical + sbio->sector_count * sectorsize !=
2272		   sblock->logical + sector->offset ||
2273		   sbio->dev != sblock->dev) {
2274		scrub_submit(sctx);
2275		goto again;
2276	}
2277
2278	sbio->sectors[sbio->sector_count] = sector;
2279	ret = bio_add_scrub_sector(sbio->bio, sector, sectorsize);
2280	if (ret != sectorsize) {
2281		if (sbio->sector_count < 1) {
2282			bio_put(sbio->bio);
2283			sbio->bio = NULL;
2284			return -EIO;
2285		}
2286		scrub_submit(sctx);
2287		goto again;
2288	}
2289
2290	scrub_block_get(sblock); /* one for the page added to the bio */
2291	atomic_inc(&sblock->outstanding_sectors);
2292	sbio->sector_count++;
2293	if (sbio->sector_count == sctx->sectors_per_bio)
2294		scrub_submit(sctx);
2295
2296	return 0;
2297}
2298
2299static void scrub_missing_raid56_end_io(struct bio *bio)
2300{
2301	struct scrub_block *sblock = bio->bi_private;
2302	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2303
2304	btrfs_bio_counter_dec(fs_info);
2305	if (bio->bi_status)
2306		sblock->no_io_error_seen = 0;
2307
2308	bio_put(bio);
2309
2310	queue_work(fs_info->scrub_workers, &sblock->work);
2311}
2312
2313static void scrub_missing_raid56_worker(struct work_struct *work)
2314{
2315	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2316	struct scrub_ctx *sctx = sblock->sctx;
2317	struct btrfs_fs_info *fs_info = sctx->fs_info;
2318	u64 logical;
2319	struct btrfs_device *dev;
2320
2321	logical = sblock->logical;
2322	dev = sblock->dev;
2323
2324	if (sblock->no_io_error_seen)
2325		scrub_recheck_block_checksum(sblock);
2326
2327	if (!sblock->no_io_error_seen) {
2328		spin_lock(&sctx->stat_lock);
2329		sctx->stat.read_errors++;
2330		spin_unlock(&sctx->stat_lock);
2331		btrfs_err_rl_in_rcu(fs_info,
2332			"IO error rebuilding logical %llu for dev %s",
2333			logical, btrfs_dev_name(dev));
2334	} else if (sblock->header_error || sblock->checksum_error) {
2335		spin_lock(&sctx->stat_lock);
2336		sctx->stat.uncorrectable_errors++;
2337		spin_unlock(&sctx->stat_lock);
2338		btrfs_err_rl_in_rcu(fs_info,
2339			"failed to rebuild valid logical %llu for dev %s",
2340			logical, btrfs_dev_name(dev));
2341	} else {
2342		scrub_write_block_to_dev_replace(sblock);
2343	}
2344
2345	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2346		mutex_lock(&sctx->wr_lock);
2347		scrub_wr_submit(sctx);
2348		mutex_unlock(&sctx->wr_lock);
2349	}
2350
2351	scrub_block_put(sblock);
2352	scrub_pending_bio_dec(sctx);
2353}
2354
2355static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2356{
2357	struct scrub_ctx *sctx = sblock->sctx;
2358	struct btrfs_fs_info *fs_info = sctx->fs_info;
2359	u64 length = sblock->sector_count << fs_info->sectorsize_bits;
2360	u64 logical = sblock->logical;
2361	struct btrfs_io_context *bioc = NULL;
2362	struct bio *bio;
2363	struct btrfs_raid_bio *rbio;
2364	int ret;
2365	int i;
2366
2367	btrfs_bio_counter_inc_blocked(fs_info);
2368	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2369			       &length, &bioc);
2370	if (ret || !bioc || !bioc->raid_map)
2371		goto bioc_out;
2372
2373	if (WARN_ON(!sctx->is_dev_replace ||
2374		    !(bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2375		/*
2376		 * We shouldn't be scrubbing a missing device. Even for dev
2377		 * replace, we should only get here for RAID 5/6. We either
2378		 * managed to mount something with no mirrors remaining or
2379		 * there's a bug in scrub_find_good_copy()/btrfs_map_block().
2380		 */
2381		goto bioc_out;
2382	}
2383
2384	bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2385	bio->bi_iter.bi_sector = logical >> 9;
2386	bio->bi_private = sblock;
2387	bio->bi_end_io = scrub_missing_raid56_end_io;
2388
2389	rbio = raid56_alloc_missing_rbio(bio, bioc);
2390	if (!rbio)
2391		goto rbio_out;
2392
2393	for (i = 0; i < sblock->sector_count; i++) {
2394		struct scrub_sector *sector = sblock->sectors[i];
2395
2396		raid56_add_scrub_pages(rbio, scrub_sector_get_page(sector),
2397				       scrub_sector_get_page_offset(sector),
2398				       sector->offset + sector->sblock->logical);
2399	}
2400
2401	INIT_WORK(&sblock->work, scrub_missing_raid56_worker);
2402	scrub_block_get(sblock);
2403	scrub_pending_bio_inc(sctx);
2404	raid56_submit_missing_rbio(rbio);
2405	btrfs_put_bioc(bioc);
2406	return;
2407
2408rbio_out:
2409	bio_put(bio);
2410bioc_out:
2411	btrfs_bio_counter_dec(fs_info);
2412	btrfs_put_bioc(bioc);
2413	spin_lock(&sctx->stat_lock);
2414	sctx->stat.malloc_errors++;
2415	spin_unlock(&sctx->stat_lock);
2416}
2417
2418static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
2419		       u64 physical, struct btrfs_device *dev, u64 flags,
2420		       u64 gen, int mirror_num, u8 *csum,
2421		       u64 physical_for_dev_replace)
2422{
2423	struct scrub_block *sblock;
2424	const u32 sectorsize = sctx->fs_info->sectorsize;
2425	int index;
2426
2427	sblock = alloc_scrub_block(sctx, dev, logical, physical,
2428				   physical_for_dev_replace, mirror_num);
2429	if (!sblock) {
2430		spin_lock(&sctx->stat_lock);
2431		sctx->stat.malloc_errors++;
2432		spin_unlock(&sctx->stat_lock);
2433		return -ENOMEM;
2434	}
2435
2436	for (index = 0; len > 0; index++) {
2437		struct scrub_sector *sector;
2438		/*
2439		 * Here we will allocate one page for one sector to scrub.
2440		 * This is fine if PAGE_SIZE == sectorsize, but will cost
2441		 * more memory for PAGE_SIZE > sectorsize case.
2442		 */
2443		u32 l = min(sectorsize, len);
2444
2445		sector = alloc_scrub_sector(sblock, logical);
2446		if (!sector) {
2447			spin_lock(&sctx->stat_lock);
2448			sctx->stat.malloc_errors++;
2449			spin_unlock(&sctx->stat_lock);
2450			scrub_block_put(sblock);
2451			return -ENOMEM;
2452		}
2453		sector->flags = flags;
2454		sector->generation = gen;
2455		if (csum) {
2456			sector->have_csum = 1;
2457			memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2458		} else {
2459			sector->have_csum = 0;
2460		}
2461		len -= l;
2462		logical += l;
2463		physical += l;
2464		physical_for_dev_replace += l;
2465	}
2466
2467	WARN_ON(sblock->sector_count == 0);
2468	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2469		/*
2470		 * This case should only be hit for RAID 5/6 device replace. See
2471		 * the comment in scrub_missing_raid56_pages() for details.
2472		 */
2473		scrub_missing_raid56_pages(sblock);
2474	} else {
2475		for (index = 0; index < sblock->sector_count; index++) {
2476			struct scrub_sector *sector = sblock->sectors[index];
2477			int ret;
2478
2479			ret = scrub_add_sector_to_rd_bio(sctx, sector);
2480			if (ret) {
2481				scrub_block_put(sblock);
2482				return ret;
2483			}
2484		}
2485
2486		if (flags & BTRFS_EXTENT_FLAG_SUPER)
2487			scrub_submit(sctx);
2488	}
2489
2490	/* last one frees, either here or in bio completion for last page */
2491	scrub_block_put(sblock);
2492	return 0;
2493}
2494
2495static void scrub_bio_end_io(struct bio *bio)
2496{
2497	struct scrub_bio *sbio = bio->bi_private;
2498	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2499
2500	sbio->status = bio->bi_status;
2501	sbio->bio = bio;
2502
2503	queue_work(fs_info->scrub_workers, &sbio->work);
2504}
2505
2506static void scrub_bio_end_io_worker(struct work_struct *work)
2507{
2508	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2509	struct scrub_ctx *sctx = sbio->sctx;
2510	int i;
2511
2512	ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
2513	if (sbio->status) {
2514		for (i = 0; i < sbio->sector_count; i++) {
2515			struct scrub_sector *sector = sbio->sectors[i];
2516
2517			sector->io_error = 1;
2518			sector->sblock->no_io_error_seen = 0;
2519		}
2520	}
2521
2522	/* Now complete the scrub_block items that have all pages completed */
2523	for (i = 0; i < sbio->sector_count; i++) {
2524		struct scrub_sector *sector = sbio->sectors[i];
2525		struct scrub_block *sblock = sector->sblock;
2526
2527		if (atomic_dec_and_test(&sblock->outstanding_sectors))
2528			scrub_block_complete(sblock);
2529		scrub_block_put(sblock);
2530	}
2531
2532	bio_put(sbio->bio);
2533	sbio->bio = NULL;
2534	spin_lock(&sctx->list_lock);
2535	sbio->next_free = sctx->first_free;
2536	sctx->first_free = sbio->index;
2537	spin_unlock(&sctx->list_lock);
2538
2539	if (sctx->is_dev_replace && sctx->flush_all_writes) {
2540		mutex_lock(&sctx->wr_lock);
2541		scrub_wr_submit(sctx);
2542		mutex_unlock(&sctx->wr_lock);
2543	}
2544
2545	scrub_pending_bio_dec(sctx);
2546}
2547
2548static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2549				       unsigned long *bitmap,
2550				       u64 start, u32 len)
2551{
2552	u64 offset;
2553	u32 nsectors;
2554	u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
2555
2556	if (len >= sparity->stripe_len) {
2557		bitmap_set(bitmap, 0, sparity->nsectors);
2558		return;
2559	}
2560
2561	start -= sparity->logic_start;
2562	start = div64_u64_rem(start, sparity->stripe_len, &offset);
2563	offset = offset >> sectorsize_bits;
2564	nsectors = len >> sectorsize_bits;
2565
2566	if (offset + nsectors <= sparity->nsectors) {
2567		bitmap_set(bitmap, offset, nsectors);
2568		return;
2569	}
2570
2571	bitmap_set(bitmap, offset, sparity->nsectors - offset);
2572	bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2573}
2574
2575static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2576						   u64 start, u32 len)
2577{
2578	__scrub_mark_bitmap(sparity, &sparity->ebitmap, start, len);
2579}
2580
2581static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2582						  u64 start, u32 len)
2583{
2584	__scrub_mark_bitmap(sparity, &sparity->dbitmap, start, len);
2585}
2586
2587static void scrub_block_complete(struct scrub_block *sblock)
2588{
2589	int corrupted = 0;
2590
2591	if (!sblock->no_io_error_seen) {
2592		corrupted = 1;
2593		scrub_handle_errored_block(sblock);
2594	} else {
2595		/*
2596		 * if has checksum error, write via repair mechanism in
2597		 * dev replace case, otherwise write here in dev replace
2598		 * case.
2599		 */
2600		corrupted = scrub_checksum(sblock);
2601		if (!corrupted && sblock->sctx->is_dev_replace)
2602			scrub_write_block_to_dev_replace(sblock);
2603	}
2604
2605	if (sblock->sparity && corrupted && !sblock->data_corrected) {
2606		u64 start = sblock->logical;
2607		u64 end = sblock->logical +
2608			  sblock->sectors[sblock->sector_count - 1]->offset +
2609			  sblock->sctx->fs_info->sectorsize;
2610
2611		ASSERT(end - start <= U32_MAX);
2612		scrub_parity_mark_sectors_error(sblock->sparity,
2613						start, end - start);
2614	}
2615}
2616
2617static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
2618{
2619	sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
2620	list_del(&sum->list);
2621	kfree(sum);
2622}
2623
2624/*
2625 * Find the desired csum for range [logical, logical + sectorsize), and store
2626 * the csum into @csum.
2627 *
2628 * The search source is sctx->csum_list, which is a pre-populated list
2629 * storing bytenr ordered csum ranges.  We're responsible to cleanup any range
2630 * that is before @logical.
2631 *
2632 * Return 0 if there is no csum for the range.
2633 * Return 1 if there is csum for the range and copied to @csum.
2634 */
2635static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2636{
2637	bool found = false;
2638
2639	while (!list_empty(&sctx->csum_list)) {
2640		struct btrfs_ordered_sum *sum = NULL;
2641		unsigned long index;
2642		unsigned long num_sectors;
2643
2644		sum = list_first_entry(&sctx->csum_list,
2645				       struct btrfs_ordered_sum, list);
2646		/* The current csum range is beyond our range, no csum found */
2647		if (sum->bytenr > logical)
2648			break;
2649
2650		/*
2651		 * The current sum is before our bytenr, since scrub is always
2652		 * done in bytenr order, the csum will never be used anymore,
2653		 * clean it up so that later calls won't bother with the range,
2654		 * and continue search the next range.
2655		 */
2656		if (sum->bytenr + sum->len <= logical) {
2657			drop_csum_range(sctx, sum);
2658			continue;
2659		}
2660
2661		/* Now the csum range covers our bytenr, copy the csum */
2662		found = true;
2663		index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
2664		num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
2665
2666		memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
2667		       sctx->fs_info->csum_size);
2668
2669		/* Cleanup the range if we're at the end of the csum range */
2670		if (index == num_sectors - 1)
2671			drop_csum_range(sctx, sum);
2672		break;
2673	}
2674	if (!found)
2675		return 0;
2676	return 1;
2677}
2678
2679/* scrub extent tries to collect up to 64 kB for each bio */
2680static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2681			u64 logical, u32 len,
2682			u64 physical, struct btrfs_device *dev, u64 flags,
2683			u64 gen, int mirror_num)
2684{
2685	struct btrfs_device *src_dev = dev;
2686	u64 src_physical = physical;
2687	int src_mirror = mirror_num;
2688	int ret;
2689	u8 csum[BTRFS_CSUM_SIZE];
2690	u32 blocksize;
2691
2692	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2693		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2694			blocksize = map->stripe_len;
2695		else
2696			blocksize = sctx->fs_info->sectorsize;
2697		spin_lock(&sctx->stat_lock);
2698		sctx->stat.data_extents_scrubbed++;
2699		sctx->stat.data_bytes_scrubbed += len;
2700		spin_unlock(&sctx->stat_lock);
2701	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2702		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2703			blocksize = map->stripe_len;
2704		else
2705			blocksize = sctx->fs_info->nodesize;
2706		spin_lock(&sctx->stat_lock);
2707		sctx->stat.tree_extents_scrubbed++;
2708		sctx->stat.tree_bytes_scrubbed += len;
2709		spin_unlock(&sctx->stat_lock);
2710	} else {
2711		blocksize = sctx->fs_info->sectorsize;
2712		WARN_ON(1);
2713	}
2714
2715	/*
2716	 * For dev-replace case, we can have @dev being a missing device.
2717	 * Regular scrub will avoid its execution on missing device at all,
2718	 * as that would trigger tons of read error.
2719	 *
2720	 * Reading from missing device will cause read error counts to
2721	 * increase unnecessarily.
2722	 * So here we change the read source to a good mirror.
2723	 */
2724	if (sctx->is_dev_replace && !dev->bdev)
2725		scrub_find_good_copy(sctx->fs_info, logical, len, &src_physical,
2726				     &src_dev, &src_mirror);
2727	while (len) {
2728		u32 l = min(len, blocksize);
2729		int have_csum = 0;
2730
2731		if (flags & BTRFS_EXTENT_FLAG_DATA) {
2732			/* push csums to sbio */
2733			have_csum = scrub_find_csum(sctx, logical, csum);
2734			if (have_csum == 0)
2735				++sctx->stat.no_csum;
2736		}
2737		ret = scrub_sectors(sctx, logical, l, src_physical, src_dev,
2738				    flags, gen, src_mirror,
2739				    have_csum ? csum : NULL, physical);
2740		if (ret)
2741			return ret;
2742		len -= l;
2743		logical += l;
2744		physical += l;
2745		src_physical += l;
2746	}
2747	return 0;
2748}
2749
2750static int scrub_sectors_for_parity(struct scrub_parity *sparity,
2751				  u64 logical, u32 len,
2752				  u64 physical, struct btrfs_device *dev,
2753				  u64 flags, u64 gen, int mirror_num, u8 *csum)
2754{
2755	struct scrub_ctx *sctx = sparity->sctx;
2756	struct scrub_block *sblock;
2757	const u32 sectorsize = sctx->fs_info->sectorsize;
2758	int index;
2759
2760	ASSERT(IS_ALIGNED(len, sectorsize));
2761
2762	sblock = alloc_scrub_block(sctx, dev, logical, physical, physical, mirror_num);
2763	if (!sblock) {
2764		spin_lock(&sctx->stat_lock);
2765		sctx->stat.malloc_errors++;
2766		spin_unlock(&sctx->stat_lock);
2767		return -ENOMEM;
2768	}
2769
2770	sblock->sparity = sparity;
2771	scrub_parity_get(sparity);
2772
2773	for (index = 0; len > 0; index++) {
2774		struct scrub_sector *sector;
2775
2776		sector = alloc_scrub_sector(sblock, logical);
2777		if (!sector) {
2778			spin_lock(&sctx->stat_lock);
2779			sctx->stat.malloc_errors++;
2780			spin_unlock(&sctx->stat_lock);
2781			scrub_block_put(sblock);
2782			return -ENOMEM;
2783		}
2784		sblock->sectors[index] = sector;
2785		/* For scrub parity */
2786		scrub_sector_get(sector);
2787		list_add_tail(&sector->list, &sparity->sectors_list);
2788		sector->flags = flags;
2789		sector->generation = gen;
2790		if (csum) {
2791			sector->have_csum = 1;
2792			memcpy(sector->csum, csum, sctx->fs_info->csum_size);
2793		} else {
2794			sector->have_csum = 0;
2795		}
2796
2797		/* Iterate over the stripe range in sectorsize steps */
2798		len -= sectorsize;
2799		logical += sectorsize;
2800		physical += sectorsize;
2801	}
2802
2803	WARN_ON(sblock->sector_count == 0);
2804	for (index = 0; index < sblock->sector_count; index++) {
2805		struct scrub_sector *sector = sblock->sectors[index];
2806		int ret;
2807
2808		ret = scrub_add_sector_to_rd_bio(sctx, sector);
2809		if (ret) {
2810			scrub_block_put(sblock);
2811			return ret;
2812		}
2813	}
2814
2815	/* Last one frees, either here or in bio completion for last sector */
2816	scrub_block_put(sblock);
2817	return 0;
2818}
2819
2820static int scrub_extent_for_parity(struct scrub_parity *sparity,
2821				   u64 logical, u32 len,
2822				   u64 physical, struct btrfs_device *dev,
2823				   u64 flags, u64 gen, int mirror_num)
2824{
2825	struct scrub_ctx *sctx = sparity->sctx;
2826	int ret;
2827	u8 csum[BTRFS_CSUM_SIZE];
2828	u32 blocksize;
2829
2830	if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2831		scrub_parity_mark_sectors_error(sparity, logical, len);
2832		return 0;
2833	}
2834
2835	if (flags & BTRFS_EXTENT_FLAG_DATA) {
2836		blocksize = sparity->stripe_len;
2837	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2838		blocksize = sparity->stripe_len;
2839	} else {
2840		blocksize = sctx->fs_info->sectorsize;
2841		WARN_ON(1);
2842	}
2843
2844	while (len) {
2845		u32 l = min(len, blocksize);
2846		int have_csum = 0;
2847
2848		if (flags & BTRFS_EXTENT_FLAG_DATA) {
2849			/* push csums to sbio */
2850			have_csum = scrub_find_csum(sctx, logical, csum);
2851			if (have_csum == 0)
2852				goto skip;
2853		}
2854		ret = scrub_sectors_for_parity(sparity, logical, l, physical, dev,
2855					     flags, gen, mirror_num,
2856					     have_csum ? csum : NULL);
2857		if (ret)
2858			return ret;
2859skip:
2860		len -= l;
2861		logical += l;
2862		physical += l;
2863	}
2864	return 0;
2865}
2866
2867/*
2868 * Given a physical address, this will calculate it's
2869 * logical offset. if this is a parity stripe, it will return
2870 * the most left data stripe's logical offset.
2871 *
2872 * return 0 if it is a data stripe, 1 means parity stripe.
2873 */
2874static int get_raid56_logic_offset(u64 physical, int num,
2875				   struct map_lookup *map, u64 *offset,
2876				   u64 *stripe_start)
2877{
2878	int i;
2879	int j = 0;
2880	u64 stripe_nr;
2881	u64 last_offset;
2882	u32 stripe_index;
2883	u32 rot;
2884	const int data_stripes = nr_data_stripes(map);
2885
2886	last_offset = (physical - map->stripes[num].physical) * data_stripes;
2887	if (stripe_start)
2888		*stripe_start = last_offset;
2889
2890	*offset = last_offset;
2891	for (i = 0; i < data_stripes; i++) {
2892		*offset = last_offset + i * map->stripe_len;
 
 
 
 
2893
2894		stripe_nr = div64_u64(*offset, map->stripe_len);
2895		stripe_nr = div_u64(stripe_nr, data_stripes);
2896
2897		/* Work out the disk rotation on this stripe-set */
2898		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2899		/* calculate which stripe this data locates */
2900		rot += i;
2901		stripe_index = rot % map->num_stripes;
2902		if (stripe_index == num)
2903			return 0;
2904		if (stripe_index < num)
2905			j++;
2906	}
2907	*offset = last_offset + j * map->stripe_len;
2908	return 1;
2909}
2910
2911static void scrub_free_parity(struct scrub_parity *sparity)
2912{
2913	struct scrub_ctx *sctx = sparity->sctx;
2914	struct scrub_sector *curr, *next;
2915	int nbits;
2916
2917	nbits = bitmap_weight(&sparity->ebitmap, sparity->nsectors);
2918	if (nbits) {
2919		spin_lock(&sctx->stat_lock);
2920		sctx->stat.read_errors += nbits;
2921		sctx->stat.uncorrectable_errors += nbits;
2922		spin_unlock(&sctx->stat_lock);
2923	}
2924
2925	list_for_each_entry_safe(curr, next, &sparity->sectors_list, list) {
2926		list_del_init(&curr->list);
2927		scrub_sector_put(curr);
2928	}
2929
2930	kfree(sparity);
2931}
2932
2933static void scrub_parity_bio_endio_worker(struct work_struct *work)
2934{
2935	struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2936						    work);
2937	struct scrub_ctx *sctx = sparity->sctx;
2938
2939	btrfs_bio_counter_dec(sctx->fs_info);
2940	scrub_free_parity(sparity);
2941	scrub_pending_bio_dec(sctx);
2942}
2943
2944static void scrub_parity_bio_endio(struct bio *bio)
2945{
2946	struct scrub_parity *sparity = bio->bi_private;
2947	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2948
2949	if (bio->bi_status)
2950		bitmap_or(&sparity->ebitmap, &sparity->ebitmap,
2951			  &sparity->dbitmap, sparity->nsectors);
2952
2953	bio_put(bio);
2954
2955	INIT_WORK(&sparity->work, scrub_parity_bio_endio_worker);
2956	queue_work(fs_info->scrub_parity_workers, &sparity->work);
2957}
2958
2959static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2960{
2961	struct scrub_ctx *sctx = sparity->sctx;
2962	struct btrfs_fs_info *fs_info = sctx->fs_info;
2963	struct bio *bio;
2964	struct btrfs_raid_bio *rbio;
2965	struct btrfs_io_context *bioc = NULL;
2966	u64 length;
2967	int ret;
2968
2969	if (!bitmap_andnot(&sparity->dbitmap, &sparity->dbitmap,
2970			   &sparity->ebitmap, sparity->nsectors))
2971		goto out;
2972
2973	length = sparity->logic_end - sparity->logic_start;
2974
2975	btrfs_bio_counter_inc_blocked(fs_info);
2976	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2977			       &length, &bioc);
2978	if (ret || !bioc || !bioc->raid_map)
2979		goto bioc_out;
2980
2981	bio = bio_alloc(NULL, BIO_MAX_VECS, REQ_OP_READ, GFP_NOFS);
2982	bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2983	bio->bi_private = sparity;
2984	bio->bi_end_io = scrub_parity_bio_endio;
2985
2986	rbio = raid56_parity_alloc_scrub_rbio(bio, bioc,
2987					      sparity->scrub_dev,
2988					      &sparity->dbitmap,
2989					      sparity->nsectors);
2990	btrfs_put_bioc(bioc);
2991	if (!rbio)
2992		goto rbio_out;
2993
2994	scrub_pending_bio_inc(sctx);
2995	raid56_parity_submit_scrub_rbio(rbio);
2996	return;
2997
2998rbio_out:
2999	bio_put(bio);
3000bioc_out:
3001	btrfs_bio_counter_dec(fs_info);
3002	bitmap_or(&sparity->ebitmap, &sparity->ebitmap, &sparity->dbitmap,
3003		  sparity->nsectors);
3004	spin_lock(&sctx->stat_lock);
3005	sctx->stat.malloc_errors++;
3006	spin_unlock(&sctx->stat_lock);
3007out:
3008	scrub_free_parity(sparity);
3009}
3010
3011static void scrub_parity_get(struct scrub_parity *sparity)
3012{
3013	refcount_inc(&sparity->refs);
3014}
3015
3016static void scrub_parity_put(struct scrub_parity *sparity)
3017{
3018	if (!refcount_dec_and_test(&sparity->refs))
3019		return;
3020
3021	scrub_parity_check_and_repair(sparity);
3022}
3023
3024/*
3025 * Return 0 if the extent item range covers any byte of the range.
3026 * Return <0 if the extent item is before @search_start.
3027 * Return >0 if the extent item is after @start_start + @search_len.
3028 */
3029static int compare_extent_item_range(struct btrfs_path *path,
3030				     u64 search_start, u64 search_len)
3031{
3032	struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
3033	u64 len;
3034	struct btrfs_key key;
3035
3036	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3037	ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
3038	       key.type == BTRFS_METADATA_ITEM_KEY);
3039	if (key.type == BTRFS_METADATA_ITEM_KEY)
3040		len = fs_info->nodesize;
3041	else
3042		len = key.offset;
3043
3044	if (key.objectid + len <= search_start)
3045		return -1;
3046	if (key.objectid >= search_start + search_len)
3047		return 1;
3048	return 0;
3049}
3050
3051/*
3052 * Locate one extent item which covers any byte in range
3053 * [@search_start, @search_start + @search_length)
3054 *
3055 * If the path is not initialized, we will initialize the search by doing
3056 * a btrfs_search_slot().
3057 * If the path is already initialized, we will use the path as the initial
3058 * slot, to avoid duplicated btrfs_search_slot() calls.
3059 *
3060 * NOTE: If an extent item starts before @search_start, we will still
3061 * return the extent item. This is for data extent crossing stripe boundary.
3062 *
3063 * Return 0 if we found such extent item, and @path will point to the extent item.
3064 * Return >0 if no such extent item can be found, and @path will be released.
3065 * Return <0 if hit fatal error, and @path will be released.
3066 */
3067static int find_first_extent_item(struct btrfs_root *extent_root,
3068				  struct btrfs_path *path,
3069				  u64 search_start, u64 search_len)
3070{
3071	struct btrfs_fs_info *fs_info = extent_root->fs_info;
3072	struct btrfs_key key;
3073	int ret;
3074
3075	/* Continue using the existing path */
3076	if (path->nodes[0])
3077		goto search_forward;
3078
3079	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3080		key.type = BTRFS_METADATA_ITEM_KEY;
3081	else
3082		key.type = BTRFS_EXTENT_ITEM_KEY;
3083	key.objectid = search_start;
3084	key.offset = (u64)-1;
3085
3086	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3087	if (ret < 0)
3088		return ret;
3089
3090	ASSERT(ret > 0);
3091	/*
3092	 * Here we intentionally pass 0 as @min_objectid, as there could be
3093	 * an extent item starting before @search_start.
3094	 */
3095	ret = btrfs_previous_extent_item(extent_root, path, 0);
3096	if (ret < 0)
3097		return ret;
3098	/*
3099	 * No matter whether we have found an extent item, the next loop will
3100	 * properly do every check on the key.
3101	 */
3102search_forward:
3103	while (true) {
3104		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3105		if (key.objectid >= search_start + search_len)
3106			break;
3107		if (key.type != BTRFS_METADATA_ITEM_KEY &&
3108		    key.type != BTRFS_EXTENT_ITEM_KEY)
3109			goto next;
3110
3111		ret = compare_extent_item_range(path, search_start, search_len);
3112		if (ret == 0)
3113			return ret;
3114		if (ret > 0)
3115			break;
3116next:
3117		path->slots[0]++;
3118		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3119			ret = btrfs_next_leaf(extent_root, path);
3120			if (ret) {
3121				/* Either no more item or fatal error */
3122				btrfs_release_path(path);
3123				return ret;
3124			}
3125		}
3126	}
3127	btrfs_release_path(path);
3128	return 1;
3129}
3130
3131static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
3132			    u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
3133{
3134	struct btrfs_key key;
3135	struct btrfs_extent_item *ei;
3136
3137	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3138	ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
3139	       key.type == BTRFS_EXTENT_ITEM_KEY);
3140	*extent_start_ret = key.objectid;
3141	if (key.type == BTRFS_METADATA_ITEM_KEY)
3142		*size_ret = path->nodes[0]->fs_info->nodesize;
3143	else
3144		*size_ret = key.offset;
3145	ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
3146	*flags_ret = btrfs_extent_flags(path->nodes[0], ei);
3147	*generation_ret = btrfs_extent_generation(path->nodes[0], ei);
3148}
3149
3150static bool does_range_cross_boundary(u64 extent_start, u64 extent_len,
3151				      u64 boundary_start, u64 boudary_len)
3152{
3153	return (extent_start < boundary_start &&
3154		extent_start + extent_len > boundary_start) ||
3155	       (extent_start < boundary_start + boudary_len &&
3156		extent_start + extent_len > boundary_start + boudary_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3157}
3158
3159static int scrub_raid56_data_stripe_for_parity(struct scrub_ctx *sctx,
3160					       struct scrub_parity *sparity,
3161					       struct map_lookup *map,
3162					       struct btrfs_device *sdev,
3163					       struct btrfs_path *path,
3164					       u64 logical)
3165{
3166	struct btrfs_fs_info *fs_info = sctx->fs_info;
3167	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
3168	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, logical);
3169	u64 cur_logical = logical;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3170	int ret;
3171
3172	ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
 
 
 
 
 
3173
3174	/* Path must not be populated */
3175	ASSERT(!path->nodes[0]);
 
 
 
 
 
 
 
 
 
 
3176
3177	while (cur_logical < logical + map->stripe_len) {
3178		struct btrfs_io_context *bioc = NULL;
3179		struct btrfs_device *extent_dev;
3180		u64 extent_start;
3181		u64 extent_size;
3182		u64 mapped_length;
3183		u64 extent_flags;
3184		u64 extent_gen;
3185		u64 extent_physical;
3186		u64 extent_mirror_num;
3187
3188		ret = find_first_extent_item(extent_root, path, cur_logical,
3189					     logical + map->stripe_len - cur_logical);
3190		/* No more extent item in this data stripe */
 
 
 
 
 
 
 
 
 
 
 
3191		if (ret > 0) {
3192			ret = 0;
3193			break;
3194		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3195		if (ret < 0)
3196			break;
3197		get_extent_info(path, &extent_start, &extent_size, &extent_flags,
3198				&extent_gen);
3199
3200		/* Metadata should not cross stripe boundaries */
3201		if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3202		    does_range_cross_boundary(extent_start, extent_size,
3203					      logical, map->stripe_len)) {
3204			btrfs_err(fs_info,
3205	"scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3206				  extent_start, logical);
3207			spin_lock(&sctx->stat_lock);
3208			sctx->stat.uncorrectable_errors++;
3209			spin_unlock(&sctx->stat_lock);
3210			cur_logical += extent_size;
3211			continue;
3212		}
 
 
 
 
 
 
 
 
 
3213
3214		/* Skip hole range which doesn't have any extent */
3215		cur_logical = max(extent_start, cur_logical);
 
3216
3217		/* Truncate the range inside this data stripe */
3218		extent_size = min(extent_start + extent_size,
3219				  logical + map->stripe_len) - cur_logical;
3220		extent_start = cur_logical;
3221		ASSERT(extent_size <= U32_MAX);
3222
3223		scrub_parity_mark_sectors_data(sparity, extent_start, extent_size);
3224
3225		mapped_length = extent_size;
3226		ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_start,
3227				      &mapped_length, &bioc, 0);
3228		if (!ret && (!bioc || mapped_length < extent_size))
3229			ret = -EIO;
3230		if (ret) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3231			btrfs_put_bioc(bioc);
3232			scrub_parity_mark_sectors_error(sparity, extent_start,
3233							extent_size);
3234			break;
 
 
3235		}
3236		extent_physical = bioc->stripes[0].physical;
3237		extent_mirror_num = bioc->mirror_num;
3238		extent_dev = bioc->stripes[0].dev;
3239		btrfs_put_bioc(bioc);
3240
3241		ret = btrfs_lookup_csums_list(csum_root, extent_start,
3242					      extent_start + extent_size - 1,
3243					      &sctx->csum_list, 1, false);
3244		if (ret) {
3245			scrub_parity_mark_sectors_error(sparity, extent_start,
3246							extent_size);
3247			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3248		}
 
 
 
 
 
 
 
 
 
3249
3250		ret = scrub_extent_for_parity(sparity, extent_start,
3251					      extent_size, extent_physical,
3252					      extent_dev, extent_flags,
3253					      extent_gen, extent_mirror_num);
3254		scrub_free_csums(sctx);
3255
3256		if (ret) {
3257			scrub_parity_mark_sectors_error(sparity, extent_start,
3258							extent_size);
3259			break;
3260		}
3261
3262		cond_resched();
3263		cur_logical += extent_size;
 
3264	}
3265	btrfs_release_path(path);
3266	return ret;
3267}
3268
3269static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
3270						  struct map_lookup *map,
3271						  struct btrfs_device *sdev,
3272						  u64 logic_start,
3273						  u64 logic_end)
3274{
3275	struct btrfs_fs_info *fs_info = sctx->fs_info;
3276	struct btrfs_path *path;
3277	u64 cur_logical;
3278	int ret;
3279	struct scrub_parity *sparity;
3280	int nsectors;
 
 
 
 
 
 
 
3281
3282	path = btrfs_alloc_path();
3283	if (!path) {
3284		spin_lock(&sctx->stat_lock);
3285		sctx->stat.malloc_errors++;
3286		spin_unlock(&sctx->stat_lock);
3287		return -ENOMEM;
3288	}
3289	path->search_commit_root = 1;
3290	path->skip_locking = 1;
3291
3292	ASSERT(map->stripe_len <= U32_MAX);
3293	nsectors = map->stripe_len >> fs_info->sectorsize_bits;
3294	ASSERT(nsectors <= BITS_PER_LONG);
3295	sparity = kzalloc(sizeof(struct scrub_parity), GFP_NOFS);
3296	if (!sparity) {
3297		spin_lock(&sctx->stat_lock);
3298		sctx->stat.malloc_errors++;
3299		spin_unlock(&sctx->stat_lock);
3300		btrfs_free_path(path);
3301		return -ENOMEM;
3302	}
3303
3304	ASSERT(map->stripe_len <= U32_MAX);
3305	sparity->stripe_len = map->stripe_len;
3306	sparity->nsectors = nsectors;
3307	sparity->sctx = sctx;
3308	sparity->scrub_dev = sdev;
3309	sparity->logic_start = logic_start;
3310	sparity->logic_end = logic_end;
3311	refcount_set(&sparity->refs, 1);
3312	INIT_LIST_HEAD(&sparity->sectors_list);
 
 
 
 
 
 
 
3313
3314	ret = 0;
3315	for (cur_logical = logic_start; cur_logical < logic_end;
3316	     cur_logical += map->stripe_len) {
3317		ret = scrub_raid56_data_stripe_for_parity(sctx, sparity, map,
3318							  sdev, path, cur_logical);
3319		if (ret < 0)
3320			break;
3321	}
3322
3323	scrub_parity_put(sparity);
3324	scrub_submit(sctx);
3325	mutex_lock(&sctx->wr_lock);
3326	scrub_wr_submit(sctx);
3327	mutex_unlock(&sctx->wr_lock);
 
 
 
 
 
 
3328
3329	btrfs_free_path(path);
3330	return ret < 0 ? ret : 0;
 
3331}
3332
3333static void sync_replace_for_zoned(struct scrub_ctx *sctx)
 
 
 
3334{
3335	if (!btrfs_is_zoned(sctx->fs_info))
3336		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3337
3338	sctx->flush_all_writes = true;
3339	scrub_submit(sctx);
3340	mutex_lock(&sctx->wr_lock);
3341	scrub_wr_submit(sctx);
3342	mutex_unlock(&sctx->wr_lock);
3343
3344	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
 
 
 
3345}
3346
3347static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
3348					u64 physical, u64 physical_end)
 
 
 
3349{
 
3350	struct btrfs_fs_info *fs_info = sctx->fs_info;
3351	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3352
3353	if (!btrfs_is_zoned(fs_info))
3354		return 0;
 
 
 
 
 
 
 
3355
3356	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3357
3358	mutex_lock(&sctx->wr_lock);
3359	if (sctx->write_pointer < physical_end) {
3360		ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
3361						    physical,
3362						    sctx->write_pointer);
3363		if (ret)
 
3364			btrfs_err(fs_info,
3365				  "zoned: failed to recover write pointer");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3366	}
3367	mutex_unlock(&sctx->wr_lock);
3368	btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
 
 
 
3369
 
 
 
3370	return ret;
3371}
3372
3373/*
3374 * Scrub one range which can only has simple mirror based profile.
3375 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
3376 *  RAID0/RAID10).
3377 *
3378 * Since we may need to handle a subset of block group, we need @logical_start
3379 * and @logical_length parameter.
3380 */
3381static int scrub_simple_mirror(struct scrub_ctx *sctx,
3382			       struct btrfs_root *extent_root,
3383			       struct btrfs_root *csum_root,
3384			       struct btrfs_block_group *bg,
3385			       struct map_lookup *map,
3386			       u64 logical_start, u64 logical_length,
3387			       struct btrfs_device *device,
3388			       u64 physical, int mirror_num)
3389{
3390	struct btrfs_fs_info *fs_info = sctx->fs_info;
3391	const u64 logical_end = logical_start + logical_length;
3392	/* An artificial limit, inherit from old scrub behavior */
3393	const u32 max_length = SZ_64K;
3394	struct btrfs_path path = { 0 };
3395	u64 cur_logical = logical_start;
3396	int ret;
3397
3398	/* The range must be inside the bg */
3399	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
3400
3401	path.search_commit_root = 1;
3402	path.skip_locking = 1;
3403	/* Go through each extent items inside the logical range */
3404	while (cur_logical < logical_end) {
3405		u64 extent_start;
3406		u64 extent_len;
3407		u64 extent_flags;
3408		u64 extent_gen;
3409		u64 scrub_len;
3410
3411		/* Canceled? */
3412		if (atomic_read(&fs_info->scrub_cancel_req) ||
3413		    atomic_read(&sctx->cancel_req)) {
3414			ret = -ECANCELED;
3415			break;
3416		}
3417		/* Paused? */
3418		if (atomic_read(&fs_info->scrub_pause_req)) {
3419			/* Push queued extents */
3420			sctx->flush_all_writes = true;
3421			scrub_submit(sctx);
3422			mutex_lock(&sctx->wr_lock);
3423			scrub_wr_submit(sctx);
3424			mutex_unlock(&sctx->wr_lock);
3425			wait_event(sctx->list_wait,
3426				   atomic_read(&sctx->bios_in_flight) == 0);
3427			sctx->flush_all_writes = false;
3428			scrub_blocked_if_needed(fs_info);
3429		}
3430		/* Block group removed? */
3431		spin_lock(&bg->lock);
3432		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
3433			spin_unlock(&bg->lock);
3434			ret = 0;
3435			break;
3436		}
3437		spin_unlock(&bg->lock);
3438
3439		ret = find_first_extent_item(extent_root, &path, cur_logical,
3440					     logical_end - cur_logical);
 
3441		if (ret > 0) {
3442			/* No more extent, just update the accounting */
3443			sctx->stat.last_physical = physical + logical_length;
3444			ret = 0;
3445			break;
3446		}
3447		if (ret < 0)
3448			break;
3449		get_extent_info(&path, &extent_start, &extent_len,
3450				&extent_flags, &extent_gen);
3451		/* Skip hole range which doesn't have any extent */
3452		cur_logical = max(extent_start, cur_logical);
3453
3454		/*
3455		 * Scrub len has three limits:
3456		 * - Extent size limit
3457		 * - Scrub range limit
3458		 *   This is especially imporatant for RAID0/RAID10 to reuse
3459		 *   this function
3460		 * - Max scrub size limit
3461		 */
3462		scrub_len = min(min(extent_start + extent_len,
3463				    logical_end), cur_logical + max_length) -
3464			    cur_logical;
3465
3466		if (extent_flags & BTRFS_EXTENT_FLAG_DATA) {
3467			ret = btrfs_lookup_csums_list(csum_root, cur_logical,
3468					cur_logical + scrub_len - 1,
3469					&sctx->csum_list, 1, false);
3470			if (ret)
3471				break;
3472		}
3473		if ((extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3474		    does_range_cross_boundary(extent_start, extent_len,
3475					      logical_start, logical_length)) {
3476			btrfs_err(fs_info,
3477"scrub: tree block %llu spanning boundaries, ignored. boundary=[%llu, %llu)",
3478				  extent_start, logical_start, logical_end);
3479			spin_lock(&sctx->stat_lock);
3480			sctx->stat.uncorrectable_errors++;
3481			spin_unlock(&sctx->stat_lock);
3482			cur_logical += scrub_len;
3483			continue;
3484		}
3485		ret = scrub_extent(sctx, map, cur_logical, scrub_len,
3486				   cur_logical - logical_start + physical,
3487				   device, extent_flags, extent_gen,
3488				   mirror_num);
3489		scrub_free_csums(sctx);
3490		if (ret)
3491			break;
3492		if (sctx->is_dev_replace)
3493			sync_replace_for_zoned(sctx);
3494		cur_logical += scrub_len;
3495		/* Don't hold CPU for too long time */
3496		cond_resched();
3497	}
3498	btrfs_release_path(&path);
3499	return ret;
3500}
3501
3502/* Calculate the full stripe length for simple stripe based profiles */
3503static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
3504{
3505	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3506			    BTRFS_BLOCK_GROUP_RAID10));
3507
3508	return map->num_stripes / map->sub_stripes * map->stripe_len;
3509}
3510
3511/* Get the logical bytenr for the stripe */
3512static u64 simple_stripe_get_logical(struct map_lookup *map,
3513				     struct btrfs_block_group *bg,
3514				     int stripe_index)
3515{
3516	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3517			    BTRFS_BLOCK_GROUP_RAID10));
3518	ASSERT(stripe_index < map->num_stripes);
3519
3520	/*
3521	 * (stripe_index / sub_stripes) gives how many data stripes we need to
3522	 * skip.
3523	 */
3524	return (stripe_index / map->sub_stripes) * map->stripe_len + bg->start;
 
3525}
3526
3527/* Get the mirror number for the stripe */
3528static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
3529{
3530	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3531			    BTRFS_BLOCK_GROUP_RAID10));
3532	ASSERT(stripe_index < map->num_stripes);
3533
3534	/* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
3535	return stripe_index % map->sub_stripes + 1;
3536}
3537
3538static int scrub_simple_stripe(struct scrub_ctx *sctx,
3539			       struct btrfs_root *extent_root,
3540			       struct btrfs_root *csum_root,
3541			       struct btrfs_block_group *bg,
3542			       struct map_lookup *map,
3543			       struct btrfs_device *device,
3544			       int stripe_index)
3545{
3546	const u64 logical_increment = simple_stripe_full_stripe_len(map);
3547	const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
3548	const u64 orig_physical = map->stripes[stripe_index].physical;
3549	const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
3550	u64 cur_logical = orig_logical;
3551	u64 cur_physical = orig_physical;
3552	int ret = 0;
3553
3554	while (cur_logical < bg->start + bg->length) {
3555		/*
3556		 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
3557		 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
3558		 * this stripe.
3559		 */
3560		ret = scrub_simple_mirror(sctx, extent_root, csum_root, bg, map,
3561					  cur_logical, map->stripe_len, device,
3562					  cur_physical, mirror_num);
3563		if (ret)
3564			return ret;
3565		/* Skip to next stripe which belongs to the target device */
3566		cur_logical += logical_increment;
3567		/* For physical offset, we just go to next stripe */
3568		cur_physical += map->stripe_len;
3569	}
3570	return ret;
3571}
3572
3573static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3574					   struct btrfs_block_group *bg,
3575					   struct extent_map *em,
3576					   struct btrfs_device *scrub_dev,
3577					   int stripe_index)
3578{
3579	struct btrfs_path *path;
3580	struct btrfs_fs_info *fs_info = sctx->fs_info;
3581	struct btrfs_root *root;
3582	struct btrfs_root *csum_root;
3583	struct blk_plug plug;
3584	struct map_lookup *map = em->map_lookup;
3585	const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
3586	const u64 chunk_logical = bg->start;
3587	int ret;
 
3588	u64 physical = map->stripes[stripe_index].physical;
3589	const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
3590	const u64 physical_end = physical + dev_stripe_len;
3591	u64 logical;
3592	u64 logic_end;
3593	/* The logical increment after finishing one stripe */
3594	u64 increment;
3595	/* Offset inside the chunk */
3596	u64 offset;
3597	u64 stripe_logical;
3598	u64 stripe_end;
3599	int stop_loop = 0;
3600
3601	path = btrfs_alloc_path();
3602	if (!path)
3603		return -ENOMEM;
3604
3605	/*
3606	 * work on commit root. The related disk blocks are static as
3607	 * long as COW is applied. This means, it is save to rewrite
3608	 * them to repair disk errors without any race conditions
3609	 */
3610	path->search_commit_root = 1;
3611	path->skip_locking = 1;
3612	path->reada = READA_FORWARD;
3613
3614	wait_event(sctx->list_wait,
3615		   atomic_read(&sctx->bios_in_flight) == 0);
3616	scrub_blocked_if_needed(fs_info);
3617
3618	root = btrfs_extent_root(fs_info, bg->start);
3619	csum_root = btrfs_csum_root(fs_info, bg->start);
3620
3621	/*
3622	 * collect all data csums for the stripe to avoid seeking during
3623	 * the scrub. This might currently (crc32) end up to be about 1MB
3624	 */
3625	blk_start_plug(&plug);
3626
3627	if (sctx->is_dev_replace &&
3628	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
3629		mutex_lock(&sctx->wr_lock);
3630		sctx->write_pointer = physical;
3631		mutex_unlock(&sctx->wr_lock);
3632		sctx->flush_all_writes = true;
3633	}
3634
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3635	/*
3636	 * There used to be a big double loop to handle all profiles using the
3637	 * same routine, which grows larger and more gross over time.
3638	 *
3639	 * So here we handle each profile differently, so simpler profiles
3640	 * have simpler scrubbing function.
3641	 */
3642	if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
3643			 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
3644		/*
3645		 * Above check rules out all complex profile, the remaining
3646		 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
3647		 * mirrored duplication without stripe.
3648		 *
3649		 * Only @physical and @mirror_num needs to calculated using
3650		 * @stripe_index.
3651		 */
3652		ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3653				bg->start, bg->length, scrub_dev,
3654				map->stripes[stripe_index].physical,
3655				stripe_index + 1);
3656		offset = 0;
3657		goto out;
3658	}
3659	if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3660		ret = scrub_simple_stripe(sctx, root, csum_root, bg, map,
3661					  scrub_dev, stripe_index);
3662		offset = map->stripe_len * (stripe_index / map->sub_stripes);
3663		goto out;
3664	}
3665
3666	/* Only RAID56 goes through the old code */
3667	ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
3668	ret = 0;
3669
3670	/* Calculate the logical end of the stripe */
3671	get_raid56_logic_offset(physical_end, stripe_index,
3672				map, &logic_end, NULL);
3673	logic_end += chunk_logical;
3674
3675	/* Initialize @offset in case we need to go to out: label */
3676	get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
3677	increment = map->stripe_len * nr_data_stripes(map);
3678
3679	/*
3680	 * Due to the rotation, for RAID56 it's better to iterate each stripe
3681	 * using their physical offset.
3682	 */
3683	while (physical < physical_end) {
3684		ret = get_raid56_logic_offset(physical, stripe_index, map,
3685					      &logical, &stripe_logical);
3686		logical += chunk_logical;
3687		if (ret) {
3688			/* it is parity strip */
3689			stripe_logical += chunk_logical;
3690			stripe_end = stripe_logical + increment;
3691			ret = scrub_raid56_parity(sctx, map, scrub_dev,
3692						  stripe_logical,
3693						  stripe_end);
3694			if (ret)
3695				goto out;
3696			goto next;
3697		}
3698
3699		/*
3700		 * Now we're at a data stripe, scrub each extents in the range.
3701		 *
3702		 * At this stage, if we ignore the repair part, inside each data
3703		 * stripe it is no different than SINGLE profile.
3704		 * We can reuse scrub_simple_mirror() here, as the repair part
3705		 * is still based on @mirror_num.
3706		 */
3707		ret = scrub_simple_mirror(sctx, root, csum_root, bg, map,
3708					  logical, map->stripe_len,
3709					  scrub_dev, physical, 1);
3710		if (ret < 0)
3711			goto out;
3712next:
3713		logical += increment;
3714		physical += map->stripe_len;
3715		spin_lock(&sctx->stat_lock);
3716		if (stop_loop)
3717			sctx->stat.last_physical =
3718				map->stripes[stripe_index].physical + dev_stripe_len;
3719		else
3720			sctx->stat.last_physical = physical;
3721		spin_unlock(&sctx->stat_lock);
3722		if (stop_loop)
3723			break;
3724	}
3725out:
3726	/* push queued extents */
3727	scrub_submit(sctx);
3728	mutex_lock(&sctx->wr_lock);
3729	scrub_wr_submit(sctx);
3730	mutex_unlock(&sctx->wr_lock);
3731
3732	blk_finish_plug(&plug);
3733	btrfs_free_path(path);
 
 
 
 
3734
3735	if (sctx->is_dev_replace && ret >= 0) {
3736		int ret2;
3737
3738		ret2 = sync_write_pointer_for_zoned(sctx,
3739				chunk_logical + offset,
3740				map->stripes[stripe_index].physical,
3741				physical_end);
3742		if (ret2)
3743			ret = ret2;
3744	}
3745
3746	return ret < 0 ? ret : 0;
3747}
3748
3749static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3750					  struct btrfs_block_group *bg,
3751					  struct btrfs_device *scrub_dev,
3752					  u64 dev_offset,
3753					  u64 dev_extent_len)
3754{
3755	struct btrfs_fs_info *fs_info = sctx->fs_info;
3756	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
3757	struct map_lookup *map;
3758	struct extent_map *em;
3759	int i;
3760	int ret = 0;
3761
3762	read_lock(&map_tree->lock);
3763	em = lookup_extent_mapping(map_tree, bg->start, bg->length);
3764	read_unlock(&map_tree->lock);
3765
3766	if (!em) {
3767		/*
3768		 * Might have been an unused block group deleted by the cleaner
3769		 * kthread or relocation.
3770		 */
3771		spin_lock(&bg->lock);
3772		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
3773			ret = -EINVAL;
3774		spin_unlock(&bg->lock);
3775
3776		return ret;
3777	}
3778	if (em->start != bg->start)
3779		goto out;
3780	if (em->len < dev_extent_len)
3781		goto out;
3782
3783	map = em->map_lookup;
3784	for (i = 0; i < map->num_stripes; ++i) {
3785		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3786		    map->stripes[i].physical == dev_offset) {
3787			ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
3788			if (ret)
3789				goto out;
3790		}
3791	}
3792out:
3793	free_extent_map(em);
3794
3795	return ret;
3796}
3797
3798static int finish_extent_writes_for_zoned(struct btrfs_root *root,
3799					  struct btrfs_block_group *cache)
3800{
3801	struct btrfs_fs_info *fs_info = cache->fs_info;
3802	struct btrfs_trans_handle *trans;
3803
3804	if (!btrfs_is_zoned(fs_info))
3805		return 0;
3806
3807	btrfs_wait_block_group_reservations(cache);
3808	btrfs_wait_nocow_writers(cache);
3809	btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
3810
3811	trans = btrfs_join_transaction(root);
3812	if (IS_ERR(trans))
3813		return PTR_ERR(trans);
3814	return btrfs_commit_transaction(trans);
3815}
3816
3817static noinline_for_stack
3818int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3819			   struct btrfs_device *scrub_dev, u64 start, u64 end)
3820{
3821	struct btrfs_dev_extent *dev_extent = NULL;
3822	struct btrfs_path *path;
3823	struct btrfs_fs_info *fs_info = sctx->fs_info;
3824	struct btrfs_root *root = fs_info->dev_root;
3825	u64 chunk_offset;
3826	int ret = 0;
3827	int ro_set;
3828	int slot;
3829	struct extent_buffer *l;
3830	struct btrfs_key key;
3831	struct btrfs_key found_key;
3832	struct btrfs_block_group *cache;
3833	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3834
3835	path = btrfs_alloc_path();
3836	if (!path)
3837		return -ENOMEM;
3838
3839	path->reada = READA_FORWARD;
3840	path->search_commit_root = 1;
3841	path->skip_locking = 1;
3842
3843	key.objectid = scrub_dev->devid;
3844	key.offset = 0ull;
3845	key.type = BTRFS_DEV_EXTENT_KEY;
3846
3847	while (1) {
3848		u64 dev_extent_len;
3849
3850		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3851		if (ret < 0)
3852			break;
3853		if (ret > 0) {
3854			if (path->slots[0] >=
3855			    btrfs_header_nritems(path->nodes[0])) {
3856				ret = btrfs_next_leaf(root, path);
3857				if (ret < 0)
3858					break;
3859				if (ret > 0) {
3860					ret = 0;
3861					break;
3862				}
3863			} else {
3864				ret = 0;
3865			}
3866		}
3867
3868		l = path->nodes[0];
3869		slot = path->slots[0];
3870
3871		btrfs_item_key_to_cpu(l, &found_key, slot);
3872
3873		if (found_key.objectid != scrub_dev->devid)
3874			break;
3875
3876		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3877			break;
3878
3879		if (found_key.offset >= end)
3880			break;
3881
3882		if (found_key.offset < key.offset)
3883			break;
3884
3885		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3886		dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
3887
3888		if (found_key.offset + dev_extent_len <= start)
3889			goto skip;
3890
3891		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3892
3893		/*
3894		 * get a reference on the corresponding block group to prevent
3895		 * the chunk from going away while we scrub it
3896		 */
3897		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3898
3899		/* some chunks are removed but not committed to disk yet,
3900		 * continue scrubbing */
3901		if (!cache)
3902			goto skip;
3903
3904		ASSERT(cache->start <= chunk_offset);
3905		/*
3906		 * We are using the commit root to search for device extents, so
3907		 * that means we could have found a device extent item from a
3908		 * block group that was deleted in the current transaction. The
3909		 * logical start offset of the deleted block group, stored at
3910		 * @chunk_offset, might be part of the logical address range of
3911		 * a new block group (which uses different physical extents).
3912		 * In this case btrfs_lookup_block_group() has returned the new
3913		 * block group, and its start address is less than @chunk_offset.
3914		 *
3915		 * We skip such new block groups, because it's pointless to
3916		 * process them, as we won't find their extents because we search
3917		 * for them using the commit root of the extent tree. For a device
3918		 * replace it's also fine to skip it, we won't miss copying them
3919		 * to the target device because we have the write duplication
3920		 * setup through the regular write path (by btrfs_map_block()),
3921		 * and we have committed a transaction when we started the device
3922		 * replace, right after setting up the device replace state.
3923		 */
3924		if (cache->start < chunk_offset) {
3925			btrfs_put_block_group(cache);
3926			goto skip;
3927		}
3928
3929		if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
3930			if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
3931				btrfs_put_block_group(cache);
3932				goto skip;
3933			}
3934		}
3935
3936		/*
3937		 * Make sure that while we are scrubbing the corresponding block
3938		 * group doesn't get its logical address and its device extents
3939		 * reused for another block group, which can possibly be of a
3940		 * different type and different profile. We do this to prevent
3941		 * false error detections and crashes due to bogus attempts to
3942		 * repair extents.
3943		 */
3944		spin_lock(&cache->lock);
3945		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
3946			spin_unlock(&cache->lock);
3947			btrfs_put_block_group(cache);
3948			goto skip;
3949		}
3950		btrfs_freeze_block_group(cache);
3951		spin_unlock(&cache->lock);
3952
3953		/*
3954		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3955		 * to avoid deadlock caused by:
3956		 * btrfs_inc_block_group_ro()
3957		 * -> btrfs_wait_for_commit()
3958		 * -> btrfs_commit_transaction()
3959		 * -> btrfs_scrub_pause()
3960		 */
3961		scrub_pause_on(fs_info);
3962
3963		/*
3964		 * Don't do chunk preallocation for scrub.
3965		 *
3966		 * This is especially important for SYSTEM bgs, or we can hit
3967		 * -EFBIG from btrfs_finish_chunk_alloc() like:
3968		 * 1. The only SYSTEM bg is marked RO.
3969		 *    Since SYSTEM bg is small, that's pretty common.
3970		 * 2. New SYSTEM bg will be allocated
3971		 *    Due to regular version will allocate new chunk.
3972		 * 3. New SYSTEM bg is empty and will get cleaned up
3973		 *    Before cleanup really happens, it's marked RO again.
3974		 * 4. Empty SYSTEM bg get scrubbed
3975		 *    We go back to 2.
3976		 *
3977		 * This can easily boost the amount of SYSTEM chunks if cleaner
3978		 * thread can't be triggered fast enough, and use up all space
3979		 * of btrfs_super_block::sys_chunk_array
3980		 *
3981		 * While for dev replace, we need to try our best to mark block
3982		 * group RO, to prevent race between:
3983		 * - Write duplication
3984		 *   Contains latest data
3985		 * - Scrub copy
3986		 *   Contains data from commit tree
3987		 *
3988		 * If target block group is not marked RO, nocow writes can
3989		 * be overwritten by scrub copy, causing data corruption.
3990		 * So for dev-replace, it's not allowed to continue if a block
3991		 * group is not RO.
3992		 */
3993		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3994		if (!ret && sctx->is_dev_replace) {
3995			ret = finish_extent_writes_for_zoned(root, cache);
3996			if (ret) {
3997				btrfs_dec_block_group_ro(cache);
3998				scrub_pause_off(fs_info);
3999				btrfs_put_block_group(cache);
4000				break;
4001			}
4002		}
4003
4004		if (ret == 0) {
4005			ro_set = 1;
4006		} else if (ret == -ENOSPC && !sctx->is_dev_replace) {
 
4007			/*
4008			 * btrfs_inc_block_group_ro return -ENOSPC when it
4009			 * failed in creating new chunk for metadata.
4010			 * It is not a problem for scrub, because
4011			 * metadata are always cowed, and our scrub paused
4012			 * commit_transactions.
 
 
 
 
 
 
4013			 */
4014			ro_set = 0;
4015		} else if (ret == -ETXTBSY) {
4016			btrfs_warn(fs_info,
4017		   "skipping scrub of block group %llu due to active swapfile",
4018				   cache->start);
4019			scrub_pause_off(fs_info);
4020			ret = 0;
4021			goto skip_unfreeze;
4022		} else {
4023			btrfs_warn(fs_info,
4024				   "failed setting block group ro: %d", ret);
4025			btrfs_unfreeze_block_group(cache);
4026			btrfs_put_block_group(cache);
4027			scrub_pause_off(fs_info);
4028			break;
4029		}
4030
4031		/*
4032		 * Now the target block is marked RO, wait for nocow writes to
4033		 * finish before dev-replace.
4034		 * COW is fine, as COW never overwrites extents in commit tree.
4035		 */
4036		if (sctx->is_dev_replace) {
4037			btrfs_wait_nocow_writers(cache);
4038			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
4039					cache->length);
4040		}
4041
4042		scrub_pause_off(fs_info);
4043		down_write(&dev_replace->rwsem);
4044		dev_replace->cursor_right = found_key.offset + dev_extent_len;
4045		dev_replace->cursor_left = found_key.offset;
4046		dev_replace->item_needs_writeback = 1;
4047		up_write(&dev_replace->rwsem);
4048
4049		ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
4050				  dev_extent_len);
4051
4052		/*
4053		 * flush, submit all pending read and write bios, afterwards
4054		 * wait for them.
4055		 * Note that in the dev replace case, a read request causes
4056		 * write requests that are submitted in the read completion
4057		 * worker. Therefore in the current situation, it is required
4058		 * that all write requests are flushed, so that all read and
4059		 * write requests are really completed when bios_in_flight
4060		 * changes to 0.
4061		 */
4062		sctx->flush_all_writes = true;
4063		scrub_submit(sctx);
4064		mutex_lock(&sctx->wr_lock);
4065		scrub_wr_submit(sctx);
4066		mutex_unlock(&sctx->wr_lock);
4067
4068		wait_event(sctx->list_wait,
4069			   atomic_read(&sctx->bios_in_flight) == 0);
4070
4071		scrub_pause_on(fs_info);
4072
4073		/*
4074		 * must be called before we decrease @scrub_paused.
4075		 * make sure we don't block transaction commit while
4076		 * we are waiting pending workers finished.
4077		 */
4078		wait_event(sctx->list_wait,
4079			   atomic_read(&sctx->workers_pending) == 0);
4080		sctx->flush_all_writes = false;
4081
4082		scrub_pause_off(fs_info);
4083
4084		if (sctx->is_dev_replace &&
4085		    !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
4086						      cache, found_key.offset))
4087			ro_set = 0;
4088
4089		down_write(&dev_replace->rwsem);
4090		dev_replace->cursor_left = dev_replace->cursor_right;
4091		dev_replace->item_needs_writeback = 1;
4092		up_write(&dev_replace->rwsem);
4093
4094		if (ro_set)
4095			btrfs_dec_block_group_ro(cache);
4096
4097		/*
4098		 * We might have prevented the cleaner kthread from deleting
4099		 * this block group if it was already unused because we raced
4100		 * and set it to RO mode first. So add it back to the unused
4101		 * list, otherwise it might not ever be deleted unless a manual
4102		 * balance is triggered or it becomes used and unused again.
4103		 */
4104		spin_lock(&cache->lock);
4105		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
4106		    !cache->ro && cache->reserved == 0 && cache->used == 0) {
4107			spin_unlock(&cache->lock);
4108			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
4109				btrfs_discard_queue_work(&fs_info->discard_ctl,
4110							 cache);
4111			else
4112				btrfs_mark_bg_unused(cache);
4113		} else {
4114			spin_unlock(&cache->lock);
4115		}
4116skip_unfreeze:
4117		btrfs_unfreeze_block_group(cache);
4118		btrfs_put_block_group(cache);
4119		if (ret)
4120			break;
4121		if (sctx->is_dev_replace &&
4122		    atomic64_read(&dev_replace->num_write_errors) > 0) {
4123			ret = -EIO;
4124			break;
4125		}
4126		if (sctx->stat.malloc_errors > 0) {
4127			ret = -ENOMEM;
4128			break;
4129		}
4130skip:
4131		key.offset = found_key.offset + dev_extent_len;
4132		btrfs_release_path(path);
4133	}
4134
4135	btrfs_free_path(path);
4136
4137	return ret;
4138}
4139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4140static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
4141					   struct btrfs_device *scrub_dev)
4142{
4143	int	i;
4144	u64	bytenr;
4145	u64	gen;
4146	int	ret;
 
4147	struct btrfs_fs_info *fs_info = sctx->fs_info;
4148
4149	if (BTRFS_FS_ERROR(fs_info))
4150		return -EROFS;
4151
 
 
 
 
 
 
 
 
4152	/* Seed devices of a new filesystem has their own generation. */
4153	if (scrub_dev->fs_devices != fs_info->fs_devices)
4154		gen = scrub_dev->generation;
4155	else
4156		gen = fs_info->last_trans_committed;
4157
4158	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
4159		bytenr = btrfs_sb_offset(i);
4160		if (bytenr + BTRFS_SUPER_INFO_SIZE >
4161		    scrub_dev->commit_total_bytes)
4162			break;
4163		if (!btrfs_check_super_location(scrub_dev, bytenr))
4164			continue;
4165
4166		ret = scrub_sectors(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
4167				    scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
4168				    NULL, bytenr);
4169		if (ret)
4170			return ret;
 
4171	}
4172	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4173
4174	return 0;
4175}
4176
4177static void scrub_workers_put(struct btrfs_fs_info *fs_info)
4178{
4179	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
4180					&fs_info->scrub_lock)) {
4181		struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
4182		struct workqueue_struct *scrub_wr_comp =
4183						fs_info->scrub_wr_completion_workers;
4184		struct workqueue_struct *scrub_parity =
4185						fs_info->scrub_parity_workers;
4186
4187		fs_info->scrub_workers = NULL;
4188		fs_info->scrub_wr_completion_workers = NULL;
4189		fs_info->scrub_parity_workers = NULL;
4190		mutex_unlock(&fs_info->scrub_lock);
4191
4192		if (scrub_workers)
4193			destroy_workqueue(scrub_workers);
4194		if (scrub_wr_comp)
4195			destroy_workqueue(scrub_wr_comp);
4196		if (scrub_parity)
4197			destroy_workqueue(scrub_parity);
4198	}
4199}
4200
4201/*
4202 * get a reference count on fs_info->scrub_workers. start worker if necessary
4203 */
4204static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4205						int is_dev_replace)
4206{
4207	struct workqueue_struct *scrub_workers = NULL;
4208	struct workqueue_struct *scrub_wr_comp = NULL;
4209	struct workqueue_struct *scrub_parity = NULL;
4210	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4211	int max_active = fs_info->thread_pool_size;
4212	int ret = -ENOMEM;
4213
4214	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
4215		return 0;
4216
4217	scrub_workers = alloc_workqueue("btrfs-scrub", flags,
4218					is_dev_replace ? 1 : max_active);
4219	if (!scrub_workers)
4220		goto fail_scrub_workers;
4221
4222	scrub_wr_comp = alloc_workqueue("btrfs-scrubwrc", flags, max_active);
4223	if (!scrub_wr_comp)
4224		goto fail_scrub_wr_completion_workers;
4225
4226	scrub_parity = alloc_workqueue("btrfs-scrubparity", flags, max_active);
4227	if (!scrub_parity)
4228		goto fail_scrub_parity_workers;
4229
4230	mutex_lock(&fs_info->scrub_lock);
4231	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
4232		ASSERT(fs_info->scrub_workers == NULL &&
4233		       fs_info->scrub_wr_completion_workers == NULL &&
4234		       fs_info->scrub_parity_workers == NULL);
4235		fs_info->scrub_workers = scrub_workers;
4236		fs_info->scrub_wr_completion_workers = scrub_wr_comp;
4237		fs_info->scrub_parity_workers = scrub_parity;
4238		refcount_set(&fs_info->scrub_workers_refcnt, 1);
4239		mutex_unlock(&fs_info->scrub_lock);
4240		return 0;
4241	}
4242	/* Other thread raced in and created the workers for us */
4243	refcount_inc(&fs_info->scrub_workers_refcnt);
4244	mutex_unlock(&fs_info->scrub_lock);
4245
4246	ret = 0;
4247	destroy_workqueue(scrub_parity);
4248fail_scrub_parity_workers:
4249	destroy_workqueue(scrub_wr_comp);
4250fail_scrub_wr_completion_workers:
4251	destroy_workqueue(scrub_workers);
4252fail_scrub_workers:
4253	return ret;
4254}
4255
4256int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4257		    u64 end, struct btrfs_scrub_progress *progress,
4258		    int readonly, int is_dev_replace)
4259{
4260	struct btrfs_dev_lookup_args args = { .devid = devid };
4261	struct scrub_ctx *sctx;
4262	int ret;
4263	struct btrfs_device *dev;
4264	unsigned int nofs_flag;
4265	bool need_commit = false;
4266
4267	if (btrfs_fs_closing(fs_info))
4268		return -EAGAIN;
4269
4270	/* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
4271	ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
4272
4273	/*
4274	 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
4275	 * value (max nodesize / min sectorsize), thus nodesize should always
4276	 * be fine.
4277	 */
4278	ASSERT(fs_info->nodesize <=
4279	       SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
4280
4281	/* Allocate outside of device_list_mutex */
4282	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
4283	if (IS_ERR(sctx))
4284		return PTR_ERR(sctx);
4285
4286	ret = scrub_workers_get(fs_info, is_dev_replace);
4287	if (ret)
4288		goto out_free_ctx;
4289
4290	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4291	dev = btrfs_find_device(fs_info->fs_devices, &args);
4292	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
4293		     !is_dev_replace)) {
4294		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4295		ret = -ENODEV;
4296		goto out;
4297	}
4298
4299	if (!is_dev_replace && !readonly &&
4300	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
4301		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4302		btrfs_err_in_rcu(fs_info,
4303			"scrub on devid %llu: filesystem on %s is not writable",
4304				 devid, btrfs_dev_name(dev));
4305		ret = -EROFS;
4306		goto out;
4307	}
4308
4309	mutex_lock(&fs_info->scrub_lock);
4310	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4311	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
4312		mutex_unlock(&fs_info->scrub_lock);
4313		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4314		ret = -EIO;
4315		goto out;
4316	}
4317
4318	down_read(&fs_info->dev_replace.rwsem);
4319	if (dev->scrub_ctx ||
4320	    (!is_dev_replace &&
4321	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4322		up_read(&fs_info->dev_replace.rwsem);
4323		mutex_unlock(&fs_info->scrub_lock);
4324		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4325		ret = -EINPROGRESS;
4326		goto out;
4327	}
4328	up_read(&fs_info->dev_replace.rwsem);
4329
4330	sctx->readonly = readonly;
4331	dev->scrub_ctx = sctx;
4332	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4333
4334	/*
4335	 * checking @scrub_pause_req here, we can avoid
4336	 * race between committing transaction and scrubbing.
4337	 */
4338	__scrub_blocked_if_needed(fs_info);
4339	atomic_inc(&fs_info->scrubs_running);
4340	mutex_unlock(&fs_info->scrub_lock);
4341
4342	/*
4343	 * In order to avoid deadlock with reclaim when there is a transaction
4344	 * trying to pause scrub, make sure we use GFP_NOFS for all the
4345	 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
4346	 * invoked by our callees. The pausing request is done when the
4347	 * transaction commit starts, and it blocks the transaction until scrub
4348	 * is paused (done at specific points at scrub_stripe() or right above
4349	 * before incrementing fs_info->scrubs_running).
4350	 */
4351	nofs_flag = memalloc_nofs_save();
4352	if (!is_dev_replace) {
4353		u64 old_super_errors;
4354
4355		spin_lock(&sctx->stat_lock);
4356		old_super_errors = sctx->stat.super_errors;
4357		spin_unlock(&sctx->stat_lock);
4358
4359		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
4360		/*
4361		 * by holding device list mutex, we can
4362		 * kick off writing super in log tree sync.
4363		 */
4364		mutex_lock(&fs_info->fs_devices->device_list_mutex);
4365		ret = scrub_supers(sctx, dev);
4366		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4367
4368		spin_lock(&sctx->stat_lock);
4369		/*
4370		 * Super block errors found, but we can not commit transaction
4371		 * at current context, since btrfs_commit_transaction() needs
4372		 * to pause the current running scrub (hold by ourselves).
4373		 */
4374		if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
4375			need_commit = true;
4376		spin_unlock(&sctx->stat_lock);
4377	}
4378
4379	if (!ret)
4380		ret = scrub_enumerate_chunks(sctx, dev, start, end);
4381	memalloc_nofs_restore(nofs_flag);
4382
4383	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4384	atomic_dec(&fs_info->scrubs_running);
4385	wake_up(&fs_info->scrub_pause_wait);
4386
4387	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4388
4389	if (progress)
4390		memcpy(progress, &sctx->stat, sizeof(*progress));
4391
4392	if (!is_dev_replace)
4393		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
4394			ret ? "not finished" : "finished", devid, ret);
4395
4396	mutex_lock(&fs_info->scrub_lock);
4397	dev->scrub_ctx = NULL;
4398	mutex_unlock(&fs_info->scrub_lock);
4399
4400	scrub_workers_put(fs_info);
4401	scrub_put_ctx(sctx);
4402
4403	/*
4404	 * We found some super block errors before, now try to force a
4405	 * transaction commit, as scrub has finished.
4406	 */
4407	if (need_commit) {
4408		struct btrfs_trans_handle *trans;
4409
4410		trans = btrfs_start_transaction(fs_info->tree_root, 0);
4411		if (IS_ERR(trans)) {
4412			ret = PTR_ERR(trans);
4413			btrfs_err(fs_info,
4414	"scrub: failed to start transaction to fix super block errors: %d", ret);
4415			return ret;
4416		}
4417		ret = btrfs_commit_transaction(trans);
4418		if (ret < 0)
4419			btrfs_err(fs_info,
4420	"scrub: failed to commit transaction to fix super block errors: %d", ret);
4421	}
4422	return ret;
4423out:
4424	scrub_workers_put(fs_info);
4425out_free_ctx:
4426	scrub_free_ctx(sctx);
4427
4428	return ret;
4429}
4430
4431void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
4432{
4433	mutex_lock(&fs_info->scrub_lock);
4434	atomic_inc(&fs_info->scrub_pause_req);
4435	while (atomic_read(&fs_info->scrubs_paused) !=
4436	       atomic_read(&fs_info->scrubs_running)) {
4437		mutex_unlock(&fs_info->scrub_lock);
4438		wait_event(fs_info->scrub_pause_wait,
4439			   atomic_read(&fs_info->scrubs_paused) ==
4440			   atomic_read(&fs_info->scrubs_running));
4441		mutex_lock(&fs_info->scrub_lock);
4442	}
4443	mutex_unlock(&fs_info->scrub_lock);
4444}
4445
4446void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
4447{
4448	atomic_dec(&fs_info->scrub_pause_req);
4449	wake_up(&fs_info->scrub_pause_wait);
4450}
4451
4452int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4453{
4454	mutex_lock(&fs_info->scrub_lock);
4455	if (!atomic_read(&fs_info->scrubs_running)) {
4456		mutex_unlock(&fs_info->scrub_lock);
4457		return -ENOTCONN;
4458	}
4459
4460	atomic_inc(&fs_info->scrub_cancel_req);
4461	while (atomic_read(&fs_info->scrubs_running)) {
4462		mutex_unlock(&fs_info->scrub_lock);
4463		wait_event(fs_info->scrub_pause_wait,
4464			   atomic_read(&fs_info->scrubs_running) == 0);
4465		mutex_lock(&fs_info->scrub_lock);
4466	}
4467	atomic_dec(&fs_info->scrub_cancel_req);
4468	mutex_unlock(&fs_info->scrub_lock);
4469
4470	return 0;
4471}
4472
4473int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4474{
4475	struct btrfs_fs_info *fs_info = dev->fs_info;
4476	struct scrub_ctx *sctx;
4477
4478	mutex_lock(&fs_info->scrub_lock);
4479	sctx = dev->scrub_ctx;
4480	if (!sctx) {
4481		mutex_unlock(&fs_info->scrub_lock);
4482		return -ENOTCONN;
4483	}
4484	atomic_inc(&sctx->cancel_req);
4485	while (dev->scrub_ctx) {
4486		mutex_unlock(&fs_info->scrub_lock);
4487		wait_event(fs_info->scrub_pause_wait,
4488			   dev->scrub_ctx == NULL);
4489		mutex_lock(&fs_info->scrub_lock);
4490	}
4491	mutex_unlock(&fs_info->scrub_lock);
4492
4493	return 0;
4494}
4495
4496int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4497			 struct btrfs_scrub_progress *progress)
4498{
4499	struct btrfs_dev_lookup_args args = { .devid = devid };
4500	struct btrfs_device *dev;
4501	struct scrub_ctx *sctx = NULL;
4502
4503	mutex_lock(&fs_info->fs_devices->device_list_mutex);
4504	dev = btrfs_find_device(fs_info->fs_devices, &args);
4505	if (dev)
4506		sctx = dev->scrub_ctx;
4507	if (sctx)
4508		memcpy(progress, &sctx->stat, sizeof(*progress));
4509	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4510
4511	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4512}
4513
4514static void scrub_find_good_copy(struct btrfs_fs_info *fs_info,
4515				 u64 extent_logical, u32 extent_len,
4516				 u64 *extent_physical,
4517				 struct btrfs_device **extent_dev,
4518				 int *extent_mirror_num)
4519{
4520	u64 mapped_length;
4521	struct btrfs_io_context *bioc = NULL;
4522	int ret;
4523
4524	mapped_length = extent_len;
4525	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4526			      &mapped_length, &bioc, 0);
4527	if (ret || !bioc || mapped_length < extent_len ||
4528	    !bioc->stripes[0].dev->bdev) {
4529		btrfs_put_bioc(bioc);
4530		return;
4531	}
4532
4533	*extent_physical = bioc->stripes[0].physical;
4534	*extent_mirror_num = bioc->mirror_num;
4535	*extent_dev = bioc->stripes[0].dev;
4536	btrfs_put_bioc(bioc);
4537}