Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/ratelimit.h>
   8#include <linux/sched/mm.h>
   9#include <crypto/hash.h>
  10#include "ctree.h"
  11#include "discard.h"
  12#include "volumes.h"
  13#include "disk-io.h"
  14#include "ordered-data.h"
  15#include "transaction.h"
  16#include "backref.h"
  17#include "extent_io.h"
  18#include "dev-replace.h"
  19#include "raid56.h"
  20#include "block-group.h"
  21#include "zoned.h"
  22#include "fs.h"
  23#include "accessors.h"
  24#include "file-item.h"
  25#include "scrub.h"
  26#include "raid-stripe-tree.h"
  27
  28/*
  29 * This is only the first step towards a full-features scrub. It reads all
  30 * extent and super block and verifies the checksums. In case a bad checksum
  31 * is found or the extent cannot be read, good data will be written back if
  32 * any can be found.
  33 *
  34 * Future enhancements:
  35 *  - In case an unrepairable extent is encountered, track which files are
  36 *    affected and report them
  37 *  - track and record media errors, throw out bad devices
  38 *  - add a mode to also read unallocated space
  39 */
  40
  41struct scrub_ctx;
  42
  43/*
  44 * The following value only influences the performance.
  45 *
  46 * This determines how many stripes would be submitted in one go,
  47 * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
  48 */
  49#define SCRUB_STRIPES_PER_GROUP		8
  50
  51/*
  52 * How many groups we have for each sctx.
  53 *
  54 * This would be 8M per device, the same value as the old scrub in-flight bios
  55 * size limit.
  56 */
  57#define SCRUB_GROUPS_PER_SCTX		16
  58
  59#define SCRUB_TOTAL_STRIPES		(SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
  60
  61/*
  62 * The following value times PAGE_SIZE needs to be large enough to match the
  63 * largest node/leaf/sector size that shall be supported.
  64 */
  65#define SCRUB_MAX_SECTORS_PER_BLOCK	(BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
  66
  67/* Represent one sector and its needed info to verify the content. */
  68struct scrub_sector_verification {
  69	bool is_metadata;
  70
  71	union {
  72		/*
  73		 * Csum pointer for data csum verification.  Should point to a
  74		 * sector csum inside scrub_stripe::csums.
  75		 *
  76		 * NULL if this data sector has no csum.
  77		 */
  78		u8 *csum;
  79
  80		/*
  81		 * Extra info for metadata verification.  All sectors inside a
  82		 * tree block share the same generation.
  83		 */
  84		u64 generation;
 
 
 
 
 
 
 
 
 
 
 
  85	};
 
  86};
  87
  88enum scrub_stripe_flags {
  89	/* Set when @mirror_num, @dev, @physical and @logical are set. */
  90	SCRUB_STRIPE_FLAG_INITIALIZED,
  91
  92	/* Set when the read-repair is finished. */
  93	SCRUB_STRIPE_FLAG_REPAIR_DONE,
  94
  95	/*
  96	 * Set for data stripes if it's triggered from P/Q stripe.
  97	 * During such scrub, we should not report errors in data stripes, nor
  98	 * update the accounting.
  99	 */
 100	SCRUB_STRIPE_FLAG_NO_REPORT,
 101};
 102
 103#define SCRUB_STRIPE_PAGES		(BTRFS_STRIPE_LEN / PAGE_SIZE)
 104
 105/*
 106 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
 107 */
 108struct scrub_stripe {
 109	struct scrub_ctx *sctx;
 110	struct btrfs_block_group *bg;
 111
 112	struct page *pages[SCRUB_STRIPE_PAGES];
 113	struct scrub_sector_verification *sectors;
 114
 115	struct btrfs_device *dev;
 116	u64 logical;
 117	u64 physical;
 118
 119	u16 mirror_num;
 120
 121	/* Should be BTRFS_STRIPE_LEN / sectorsize. */
 122	u16 nr_sectors;
 123
 124	/*
 125	 * How many data/meta extents are in this stripe.  Only for scrub status
 126	 * reporting purposes.
 127	 */
 128	u16 nr_data_extents;
 129	u16 nr_meta_extents;
 130
 131	atomic_t pending_io;
 132	wait_queue_head_t io_wait;
 133	wait_queue_head_t repair_wait;
 134
 135	/*
 136	 * Indicate the states of the stripe.  Bits are defined in
 137	 * scrub_stripe_flags enum.
 138	 */
 139	unsigned long state;
 140
 141	/* Indicate which sectors are covered by extent items. */
 142	unsigned long extent_sector_bitmap;
 143
 144	/*
 145	 * The errors hit during the initial read of the stripe.
 146	 *
 147	 * Would be utilized for error reporting and repair.
 148	 *
 149	 * The remaining init_nr_* records the number of errors hit, only used
 150	 * by error reporting.
 151	 */
 152	unsigned long init_error_bitmap;
 153	unsigned int init_nr_io_errors;
 154	unsigned int init_nr_csum_errors;
 155	unsigned int init_nr_meta_errors;
 156
 157	/*
 158	 * The following error bitmaps are all for the current status.
 159	 * Every time we submit a new read, these bitmaps may be updated.
 160	 *
 161	 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
 162	 *
 163	 * IO and csum errors can happen for both metadata and data.
 164	 */
 165	unsigned long error_bitmap;
 166	unsigned long io_error_bitmap;
 167	unsigned long csum_error_bitmap;
 168	unsigned long meta_error_bitmap;
 169
 170	/* For writeback (repair or replace) error reporting. */
 171	unsigned long write_error_bitmap;
 172
 173	/* Writeback can be concurrent, thus we need to protect the bitmap. */
 174	spinlock_t write_error_lock;
 175
 176	/*
 177	 * Checksum for the whole stripe if this stripe is inside a data block
 178	 * group.
 179	 */
 180	u8 *csums;
 181
 182	struct work_struct work;
 183};
 184
 185struct scrub_ctx {
 186	struct scrub_stripe	stripes[SCRUB_TOTAL_STRIPES];
 187	struct scrub_stripe	*raid56_data_stripes;
 188	struct btrfs_fs_info	*fs_info;
 189	struct btrfs_path	extent_path;
 190	struct btrfs_path	csum_path;
 191	int			first_free;
 192	int			cur_stripe;
 
 
 
 
 
 
 193	atomic_t		cancel_req;
 194	int			readonly;
 195
 196	/* State of IO submission throttling affecting the associated device */
 197	ktime_t			throttle_deadline;
 198	u64			throttle_sent;
 199
 200	int			is_dev_replace;
 201	u64			write_pointer;
 202
 203	struct mutex            wr_lock;
 204	struct btrfs_device     *wr_tgtdev;
 205
 206	/*
 207	 * statistics
 208	 */
 209	struct btrfs_scrub_progress stat;
 210	spinlock_t		stat_lock;
 
 211
 212	/*
 213	 * Use a ref counter to avoid use-after-free issues. Scrub workers
 214	 * decrement bios_in_flight and workers_pending and then do a wakeup
 215	 * on the list_wait wait queue. We must ensure the main scrub task
 216	 * doesn't free the scrub context before or while the workers are
 217	 * doing the wakeup() call.
 218	 */
 219	refcount_t              refs;
 220};
 221
 222struct scrub_warning {
 223	struct btrfs_path	*path;
 224	u64			extent_item_size;
 
 
 225	const char		*errstr;
 226	u64			physical;
 227	u64			logical;
 228	struct btrfs_device	*dev;
 
 
 229};
 230
 231static void release_scrub_stripe(struct scrub_stripe *stripe)
 232{
 233	if (!stripe)
 234		return;
 235
 236	for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
 237		if (stripe->pages[i])
 238			__free_page(stripe->pages[i]);
 239		stripe->pages[i] = NULL;
 240	}
 241	kfree(stripe->sectors);
 242	kfree(stripe->csums);
 243	stripe->sectors = NULL;
 244	stripe->csums = NULL;
 245	stripe->sctx = NULL;
 246	stripe->state = 0;
 247}
 248
 249static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
 250			     struct scrub_stripe *stripe)
 251{
 252	int ret;
 253
 254	memset(stripe, 0, sizeof(*stripe));
 255
 256	stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
 257	stripe->state = 0;
 258
 259	init_waitqueue_head(&stripe->io_wait);
 260	init_waitqueue_head(&stripe->repair_wait);
 261	atomic_set(&stripe->pending_io, 0);
 262	spin_lock_init(&stripe->write_error_lock);
 263
 264	ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, false);
 265	if (ret < 0)
 266		goto error;
 267
 268	stripe->sectors = kcalloc(stripe->nr_sectors,
 269				  sizeof(struct scrub_sector_verification),
 270				  GFP_KERNEL);
 271	if (!stripe->sectors)
 272		goto error;
 273
 274	stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
 275				fs_info->csum_size, GFP_KERNEL);
 276	if (!stripe->csums)
 277		goto error;
 278	return 0;
 279error:
 280	release_scrub_stripe(stripe);
 281	return -ENOMEM;
 282}
 283
 284static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
 285{
 286	wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
 287}
 288
 289static void scrub_put_ctx(struct scrub_ctx *sctx);
 290
 291static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 292{
 293	while (atomic_read(&fs_info->scrub_pause_req)) {
 294		mutex_unlock(&fs_info->scrub_lock);
 295		wait_event(fs_info->scrub_pause_wait,
 296		   atomic_read(&fs_info->scrub_pause_req) == 0);
 297		mutex_lock(&fs_info->scrub_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 298	}
 299}
 300
 301static void scrub_pause_on(struct btrfs_fs_info *fs_info)
 302{
 303	atomic_inc(&fs_info->scrubs_paused);
 304	wake_up(&fs_info->scrub_pause_wait);
 305}
 306
 307static void scrub_pause_off(struct btrfs_fs_info *fs_info)
 308{
 309	mutex_lock(&fs_info->scrub_lock);
 310	__scrub_blocked_if_needed(fs_info);
 311	atomic_dec(&fs_info->scrubs_paused);
 312	mutex_unlock(&fs_info->scrub_lock);
 313
 314	wake_up(&fs_info->scrub_pause_wait);
 315}
 316
 317static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 318{
 319	scrub_pause_on(fs_info);
 320	scrub_pause_off(fs_info);
 321}
 322
 323static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 324{
 325	int i;
 326
 327	if (!sctx)
 328		return;
 329
 330	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
 331		release_scrub_stripe(&sctx->stripes[i]);
 
 332
 333	kvfree(sctx);
 334}
 
 
 
 
 
 335
 336static void scrub_put_ctx(struct scrub_ctx *sctx)
 337{
 338	if (refcount_dec_and_test(&sctx->refs))
 339		scrub_free_ctx(sctx);
 
 
 
 
 
 
 340}
 341
 342static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 343		struct btrfs_fs_info *fs_info, int is_dev_replace)
 344{
 345	struct scrub_ctx *sctx;
 346	int		i;
 
 
 347
 348	/* Since sctx has inline 128 stripes, it can go beyond 64K easily.  Use
 349	 * kvzalloc().
 350	 */
 351	sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
 352	if (!sctx)
 353		goto nomem;
 354	refcount_set(&sctx->refs, 1);
 355	sctx->is_dev_replace = is_dev_replace;
 356	sctx->fs_info = fs_info;
 357	sctx->extent_path.search_commit_root = 1;
 358	sctx->extent_path.skip_locking = 1;
 359	sctx->csum_path.search_commit_root = 1;
 360	sctx->csum_path.skip_locking = 1;
 361	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
 362		int ret;
 363
 364		ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
 365		if (ret < 0)
 366			goto nomem;
 367		sctx->stripes[i].sctx = sctx;
 368	}
 369	sctx->first_free = 0;
 370	atomic_set(&sctx->cancel_req, 0);
 371
 372	spin_lock_init(&sctx->stat_lock);
 373	sctx->throttle_deadline = 0;
 374
 375	mutex_init(&sctx->wr_lock);
 376	if (is_dev_replace) {
 377		WARN_ON(!fs_info->dev_replace.tgtdev);
 378		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
 379	}
 380
 381	return sctx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 382
 383nomem:
 384	scrub_free_ctx(sctx);
 385	return ERR_PTR(-ENOMEM);
 386}
 387
 388static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
 389				     u64 root, void *warn_ctx)
 390{
 
 391	u32 nlink;
 392	int ret;
 393	int i;
 394	unsigned nofs_flag;
 395	struct extent_buffer *eb;
 396	struct btrfs_inode_item *inode_item;
 397	struct scrub_warning *swarn = warn_ctx;
 398	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
 399	struct inode_fs_paths *ipath = NULL;
 400	struct btrfs_root *local_root;
 401	struct btrfs_key key;
 402
 403	local_root = btrfs_get_fs_root(fs_info, root, true);
 
 
 
 404	if (IS_ERR(local_root)) {
 405		ret = PTR_ERR(local_root);
 406		goto err;
 407	}
 408
 409	/*
 410	 * this makes the path point to (inum INODE_ITEM ioff)
 411	 */
 412	key.objectid = inum;
 413	key.type = BTRFS_INODE_ITEM_KEY;
 414	key.offset = 0;
 415
 416	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
 417	if (ret) {
 418		btrfs_put_root(local_root);
 419		btrfs_release_path(swarn->path);
 420		goto err;
 421	}
 422
 423	eb = swarn->path->nodes[0];
 424	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
 425					struct btrfs_inode_item);
 
 426	nlink = btrfs_inode_nlink(eb, inode_item);
 427	btrfs_release_path(swarn->path);
 428
 429	/*
 430	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
 431	 * uses GFP_NOFS in this context, so we keep it consistent but it does
 432	 * not seem to be strictly necessary.
 433	 */
 434	nofs_flag = memalloc_nofs_save();
 435	ipath = init_ipath(4096, local_root, swarn->path);
 436	memalloc_nofs_restore(nofs_flag);
 437	if (IS_ERR(ipath)) {
 438		btrfs_put_root(local_root);
 439		ret = PTR_ERR(ipath);
 440		ipath = NULL;
 441		goto err;
 442	}
 443	ret = paths_from_inode(inum, ipath);
 444
 445	if (ret < 0)
 446		goto err;
 447
 448	/*
 449	 * we deliberately ignore the bit ipath might have been too small to
 450	 * hold all of the paths here
 451	 */
 452	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
 453		btrfs_warn_in_rcu(fs_info,
 454"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
 455				  swarn->errstr, swarn->logical,
 456				  btrfs_dev_name(swarn->dev),
 457				  swarn->physical,
 458				  root, inum, offset,
 459				  fs_info->sectorsize, nlink,
 460				  (char *)(unsigned long)ipath->fspath->val[i]);
 461
 462	btrfs_put_root(local_root);
 463	free_ipath(ipath);
 464	return 0;
 465
 466err:
 467	btrfs_warn_in_rcu(fs_info,
 468			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
 469			  swarn->errstr, swarn->logical,
 470			  btrfs_dev_name(swarn->dev),
 471			  swarn->physical,
 472			  root, inum, offset, ret);
 473
 474	free_ipath(ipath);
 475	return 0;
 476}
 477
 478static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
 479				       bool is_super, u64 logical, u64 physical)
 480{
 481	struct btrfs_fs_info *fs_info = dev->fs_info;
 
 482	struct btrfs_path *path;
 483	struct btrfs_key found_key;
 484	struct extent_buffer *eb;
 485	struct btrfs_extent_item *ei;
 486	struct scrub_warning swarn;
 487	u64 flags = 0;
 488	u32 item_size;
 489	int ret;
 
 
 
 
 
 490
 491	/* Super block error, no need to search extent tree. */
 492	if (is_super) {
 493		btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
 494				  errstr, btrfs_dev_name(dev), physical);
 495		return;
 496	}
 497	path = btrfs_alloc_path();
 498	if (!path)
 499		return;
 500
 501	swarn.physical = physical;
 502	swarn.logical = logical;
 
 
 
 503	swarn.errstr = errstr;
 504	swarn.dev = NULL;
 
 
 
 
 
 505
 506	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
 507				  &flags);
 508	if (ret < 0)
 509		goto out;
 510
 
 511	swarn.extent_item_size = found_key.offset;
 512
 513	eb = path->nodes[0];
 514	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 515	item_size = btrfs_item_size(eb, path->slots[0]);
 
 516
 517	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 518		unsigned long ptr = 0;
 519		u8 ref_level;
 520		u64 ref_root;
 521
 522		while (true) {
 523			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
 524						      item_size, &ref_root,
 525						      &ref_level);
 526			if (ret < 0) {
 527				btrfs_warn(fs_info,
 528				"failed to resolve tree backref for logical %llu: %d",
 529						  swarn.logical, ret);
 530				break;
 531			}
 532			if (ret > 0)
 533				break;
 534			btrfs_warn_in_rcu(fs_info,
 535"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
 536				errstr, swarn.logical, btrfs_dev_name(dev),
 537				swarn.physical, (ref_level ? "node" : "leaf"),
 538				ref_level, ref_root);
 539		}
 540		btrfs_release_path(path);
 541	} else {
 542		struct btrfs_backref_walk_ctx ctx = { 0 };
 543
 544		btrfs_release_path(path);
 545
 546		ctx.bytenr = found_key.objectid;
 547		ctx.extent_item_pos = swarn.logical - found_key.objectid;
 548		ctx.fs_info = fs_info;
 549
 550		swarn.path = path;
 551		swarn.dev = dev;
 552
 553		iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
 554	}
 555
 556out:
 557	btrfs_free_path(path);
 
 
 558}
 559
 560static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
 561{
 562	int ret = 0;
 563	u64 length;
 
 
 
 
 
 
 
 564
 565	if (!btrfs_is_zoned(sctx->fs_info))
 566		return 0;
 
 
 
 
 567
 568	if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
 569		return 0;
 
 
 
 
 570
 571	if (sctx->write_pointer < physical) {
 572		length = physical - sctx->write_pointer;
 573
 574		ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
 575						sctx->write_pointer, length);
 576		if (!ret)
 577			sctx->write_pointer = physical;
 578	}
 579	return ret;
 580}
 581
 582static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
 583{
 584	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 585	int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
 586
 587	return stripe->pages[page_index];
 588}
 589
 590static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
 591						 int sector_nr)
 592{
 593	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594
 595	return offset_in_page(sector_nr << fs_info->sectorsize_bits);
 596}
 
 
 
 
 
 
 
 
 
 597
 598static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
 599{
 600	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 601	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 602	const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
 603	const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
 604	const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
 605	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 606	u8 on_disk_csum[BTRFS_CSUM_SIZE];
 607	u8 calculated_csum[BTRFS_CSUM_SIZE];
 608	struct btrfs_header *header;
 609
 610	/*
 611	 * Here we don't have a good way to attach the pages (and subpages)
 612	 * to a dummy extent buffer, thus we have to directly grab the members
 613	 * from pages.
 614	 */
 615	header = (struct btrfs_header *)(page_address(first_page) + first_off);
 616	memcpy(on_disk_csum, header->csum, fs_info->csum_size);
 617
 618	if (logical != btrfs_stack_header_bytenr(header)) {
 619		bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
 620		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 621		btrfs_warn_rl(fs_info,
 622		"tree block %llu mirror %u has bad bytenr, has %llu want %llu",
 623			      logical, stripe->mirror_num,
 624			      btrfs_stack_header_bytenr(header), logical);
 625		return;
 626	}
 627	if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
 628		   BTRFS_FSID_SIZE) != 0) {
 629		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 630		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 631		btrfs_warn_rl(fs_info,
 632		"tree block %llu mirror %u has bad fsid, has %pU want %pU",
 633			      logical, stripe->mirror_num,
 634			      header->fsid, fs_info->fs_devices->fsid);
 635		return;
 636	}
 637	if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
 638		   BTRFS_UUID_SIZE) != 0) {
 639		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 640		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 641		btrfs_warn_rl(fs_info,
 642		"tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
 643			      logical, stripe->mirror_num,
 644			      header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
 645		return;
 646	}
 647
 648	/* Now check tree block csum. */
 649	shash->tfm = fs_info->csum_shash;
 650	crypto_shash_init(shash);
 651	crypto_shash_update(shash, page_address(first_page) + first_off +
 652			    BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
 653
 654	for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
 655		struct page *page = scrub_stripe_get_page(stripe, i);
 656		unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
 657
 658		crypto_shash_update(shash, page_address(page) + page_off,
 659				    fs_info->sectorsize);
 660	}
 661
 662	crypto_shash_final(shash, calculated_csum);
 663	if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
 664		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 665		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 666		btrfs_warn_rl(fs_info,
 667		"tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
 668			      logical, stripe->mirror_num,
 669			      CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
 670			      CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
 671		return;
 672	}
 673	if (stripe->sectors[sector_nr].generation !=
 674	    btrfs_stack_header_generation(header)) {
 675		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 676		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 677		btrfs_warn_rl(fs_info,
 678		"tree block %llu mirror %u has bad generation, has %llu want %llu",
 679			      logical, stripe->mirror_num,
 680			      btrfs_stack_header_generation(header),
 681			      stripe->sectors[sector_nr].generation);
 682		return;
 683	}
 684	bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 685	bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
 686	bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 687}
 688
 689static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
 690{
 691	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 692	struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
 693	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 694	struct page *page = scrub_stripe_get_page(stripe, sector_nr);
 695	unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
 696	u8 csum_buf[BTRFS_CSUM_SIZE];
 697	int ret;
 
 
 
 
 
 
 698
 699	ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
 700
 701	/* Sector not utilized, skip it. */
 702	if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
 703		return;
 704
 705	/* IO error, no need to check. */
 706	if (test_bit(sector_nr, &stripe->io_error_bitmap))
 707		return;
 708
 709	/* Metadata, verify the full tree block. */
 710	if (sector->is_metadata) {
 711		/*
 712		 * Check if the tree block crosses the stripe boundary.  If
 713		 * crossed the boundary, we cannot verify it but only give a
 714		 * warning.
 715		 *
 716		 * This can only happen on a very old filesystem where chunks
 717		 * are not ensured to be stripe aligned.
 718		 */
 719		if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
 720			btrfs_warn_rl(fs_info,
 721			"tree block at %llu crosses stripe boundary %llu",
 722				      stripe->logical +
 723				      (sector_nr << fs_info->sectorsize_bits),
 724				      stripe->logical);
 725			return;
 726		}
 727		scrub_verify_one_metadata(stripe, sector_nr);
 728		return;
 729	}
 730
 731	/*
 732	 * Data is easier, we just verify the data csum (if we have it).  For
 733	 * cases without csum, we have no other choice but to trust it.
 734	 */
 735	if (!sector->csum) {
 736		clear_bit(sector_nr, &stripe->error_bitmap);
 737		return;
 738	}
 739
 740	ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
 
 
 
 
 
 
 
 
 
 
 
 741	if (ret < 0) {
 742		set_bit(sector_nr, &stripe->csum_error_bitmap);
 743		set_bit(sector_nr, &stripe->error_bitmap);
 744	} else {
 745		clear_bit(sector_nr, &stripe->csum_error_bitmap);
 746		clear_bit(sector_nr, &stripe->error_bitmap);
 747	}
 748}
 749
 750/* Verify specified sectors of a stripe. */
 751static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
 752{
 753	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 754	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 755	int sector_nr;
 756
 757	for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
 758		scrub_verify_one_sector(stripe, sector_nr);
 759		if (stripe->sectors[sector_nr].is_metadata)
 760			sector_nr += sectors_per_tree - 1;
 
 
 
 
 
 
 
 
 761	}
 762}
 763
 764static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
 765{
 766	int i;
 767
 768	for (i = 0; i < stripe->nr_sectors; i++) {
 769		if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
 770		    scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
 771			break;
 772	}
 773	ASSERT(i < stripe->nr_sectors);
 774	return i;
 
 775}
 776
 777/*
 778 * Repair read is different to the regular read:
 779 *
 780 * - Only reads the failed sectors
 781 * - May have extra blocksize limits
 
 
 782 */
 783static void scrub_repair_read_endio(struct btrfs_bio *bbio)
 784{
 785	struct scrub_stripe *stripe = bbio->private;
 786	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 787	struct bio_vec *bvec;
 788	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
 789	u32 bio_size = 0;
 790	int i;
 791
 792	ASSERT(sector_nr < stripe->nr_sectors);
 793
 794	bio_for_each_bvec_all(bvec, &bbio->bio, i)
 795		bio_size += bvec->bv_len;
 
 
 
 
 
 
 796
 797	if (bbio->bio.bi_status) {
 798		bitmap_set(&stripe->io_error_bitmap, sector_nr,
 799			   bio_size >> fs_info->sectorsize_bits);
 800		bitmap_set(&stripe->error_bitmap, sector_nr,
 801			   bio_size >> fs_info->sectorsize_bits);
 802	} else {
 803		bitmap_clear(&stripe->io_error_bitmap, sector_nr,
 804			     bio_size >> fs_info->sectorsize_bits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 805	}
 806	bio_put(&bbio->bio);
 807	if (atomic_dec_and_test(&stripe->pending_io))
 808		wake_up(&stripe->io_wait);
 809}
 810
 811static int calc_next_mirror(int mirror, int num_copies)
 812{
 813	ASSERT(mirror <= num_copies);
 814	return (mirror + 1 > num_copies) ? 1 : mirror + 1;
 815}
 816
 817static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
 818					    int mirror, int blocksize, bool wait)
 819{
 820	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 821	struct btrfs_bio *bbio = NULL;
 822	const unsigned long old_error_bitmap = stripe->error_bitmap;
 823	int i;
 824
 825	ASSERT(stripe->mirror_num >= 1);
 826	ASSERT(atomic_read(&stripe->pending_io) == 0);
 827
 828	for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
 829		struct page *page;
 830		int pgoff;
 831		int ret;
 832
 833		page = scrub_stripe_get_page(stripe, i);
 834		pgoff = scrub_stripe_get_page_offset(stripe, i);
 835
 836		/* The current sector cannot be merged, submit the bio. */
 837		if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
 838			     bbio->bio.bi_iter.bi_size >= blocksize)) {
 839			ASSERT(bbio->bio.bi_iter.bi_size);
 840			atomic_inc(&stripe->pending_io);
 841			btrfs_submit_bbio(bbio, mirror);
 842			if (wait)
 843				wait_scrub_stripe_io(stripe);
 844			bbio = NULL;
 845		}
 
 
 846
 847		if (!bbio) {
 848			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
 849				fs_info, scrub_repair_read_endio, stripe);
 850			bbio->bio.bi_iter.bi_sector = (stripe->logical +
 851				(i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
 852		}
 
 
 
 
 
 
 
 853
 854		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
 855		ASSERT(ret == fs_info->sectorsize);
 856	}
 857	if (bbio) {
 858		ASSERT(bbio->bio.bi_iter.bi_size);
 859		atomic_inc(&stripe->pending_io);
 860		btrfs_submit_bbio(bbio, mirror);
 861		if (wait)
 862			wait_scrub_stripe_io(stripe);
 863	}
 864}
 865
 866static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
 867				       struct scrub_stripe *stripe)
 868{
 869	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
 870				      DEFAULT_RATELIMIT_BURST);
 871	struct btrfs_fs_info *fs_info = sctx->fs_info;
 872	struct btrfs_device *dev = NULL;
 873	u64 physical = 0;
 874	int nr_data_sectors = 0;
 875	int nr_meta_sectors = 0;
 876	int nr_nodatacsum_sectors = 0;
 877	int nr_repaired_sectors = 0;
 878	int sector_nr;
 879
 880	if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
 881		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882
 883	/*
 884	 * Init needed infos for error reporting.
 885	 *
 886	 * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
 887	 * thus no need for dev/physical, error reporting still needs dev and physical.
 888	 */
 889	if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
 890		u64 mapped_len = fs_info->sectorsize;
 891		struct btrfs_io_context *bioc = NULL;
 892		int stripe_index = stripe->mirror_num - 1;
 893		int ret;
 
 894
 895		/* For scrub, our mirror_num should always start at 1. */
 896		ASSERT(stripe->mirror_num >= 1);
 897		ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
 898				      stripe->logical, &mapped_len, &bioc,
 899				      NULL, NULL);
 900		/*
 901		 * If we failed, dev will be NULL, and later detailed reports
 902		 * will just be skipped.
 903		 */
 904		if (ret < 0)
 905			goto skip;
 906		physical = bioc->stripes[stripe_index].physical;
 907		dev = bioc->stripes[stripe_index].dev;
 908		btrfs_put_bioc(bioc);
 909	}
 910
 911skip:
 912	for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
 913		bool repaired = false;
 914
 915		if (stripe->sectors[sector_nr].is_metadata) {
 916			nr_meta_sectors++;
 917		} else {
 918			nr_data_sectors++;
 919			if (!stripe->sectors[sector_nr].csum)
 920				nr_nodatacsum_sectors++;
 921		}
 922
 923		if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
 924		    !test_bit(sector_nr, &stripe->error_bitmap)) {
 925			nr_repaired_sectors++;
 926			repaired = true;
 927		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 928
 929		/* Good sector from the beginning, nothing need to be done. */
 930		if (!test_bit(sector_nr, &stripe->init_error_bitmap))
 931			continue;
 932
 933		/*
 934		 * Report error for the corrupted sectors.  If repaired, just
 935		 * output the message of repaired message.
 936		 */
 937		if (repaired) {
 938			if (dev) {
 939				btrfs_err_rl_in_rcu(fs_info,
 940			"fixed up error at logical %llu on dev %s physical %llu",
 941					    stripe->logical, btrfs_dev_name(dev),
 942					    physical);
 943			} else {
 944				btrfs_err_rl_in_rcu(fs_info,
 945			"fixed up error at logical %llu on mirror %u",
 946					    stripe->logical, stripe->mirror_num);
 
 
 947			}
 948			continue;
 949		}
 950
 951		/* The remaining are all for unrepaired. */
 952		if (dev) {
 953			btrfs_err_rl_in_rcu(fs_info,
 954	"unable to fixup (regular) error at logical %llu on dev %s physical %llu",
 955					    stripe->logical, btrfs_dev_name(dev),
 956					    physical);
 957		} else {
 958			btrfs_err_rl_in_rcu(fs_info,
 959	"unable to fixup (regular) error at logical %llu on mirror %u",
 960					    stripe->logical, stripe->mirror_num);
 961		}
 962
 963		if (test_bit(sector_nr, &stripe->io_error_bitmap))
 964			if (__ratelimit(&rs) && dev)
 965				scrub_print_common_warning("i/o error", dev, false,
 966						     stripe->logical, physical);
 967		if (test_bit(sector_nr, &stripe->csum_error_bitmap))
 968			if (__ratelimit(&rs) && dev)
 969				scrub_print_common_warning("checksum error", dev, false,
 970						     stripe->logical, physical);
 971		if (test_bit(sector_nr, &stripe->meta_error_bitmap))
 972			if (__ratelimit(&rs) && dev)
 973				scrub_print_common_warning("header error", dev, false,
 974						     stripe->logical, physical);
 975	}
 976
 977	spin_lock(&sctx->stat_lock);
 978	sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
 979	sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
 980	sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
 981	sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
 982	sctx->stat.no_csum += nr_nodatacsum_sectors;
 983	sctx->stat.read_errors += stripe->init_nr_io_errors;
 984	sctx->stat.csum_errors += stripe->init_nr_csum_errors;
 985	sctx->stat.verify_errors += stripe->init_nr_meta_errors;
 986	sctx->stat.uncorrectable_errors +=
 987		bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
 988	sctx->stat.corrected_errors += nr_repaired_sectors;
 989	spin_unlock(&sctx->stat_lock);
 990}
 991
 992static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
 993				unsigned long write_bitmap, bool dev_replace);
 994
 995/*
 996 * The main entrance for all read related scrub work, including:
 997 *
 998 * - Wait for the initial read to finish
 999 * - Verify and locate any bad sectors
1000 * - Go through the remaining mirrors and try to read as large blocksize as
1001 *   possible
1002 * - Go through all mirrors (including the failed mirror) sector-by-sector
1003 * - Submit writeback for repaired sectors
1004 *
1005 * Writeback for dev-replace does not happen here, it needs extra
1006 * synchronization for zoned devices.
1007 */
1008static void scrub_stripe_read_repair_worker(struct work_struct *work)
1009{
1010	struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
1011	struct scrub_ctx *sctx = stripe->sctx;
1012	struct btrfs_fs_info *fs_info = sctx->fs_info;
1013	int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1014					  stripe->bg->length);
1015	unsigned long repaired;
1016	int mirror;
1017	int i;
1018
1019	ASSERT(stripe->mirror_num > 0);
1020
1021	wait_scrub_stripe_io(stripe);
1022	scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1023	/* Save the initial failed bitmap for later repair and report usage. */
1024	stripe->init_error_bitmap = stripe->error_bitmap;
1025	stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1026						  stripe->nr_sectors);
1027	stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1028						    stripe->nr_sectors);
1029	stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1030						    stripe->nr_sectors);
1031
1032	if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1033		goto out;
1034
1035	/*
1036	 * Try all remaining mirrors.
1037	 *
1038	 * Here we still try to read as large block as possible, as this is
1039	 * faster and we have extra safety nets to rely on.
1040	 */
1041	for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1042	     mirror != stripe->mirror_num;
1043	     mirror = calc_next_mirror(mirror, num_copies)) {
1044		const unsigned long old_error_bitmap = stripe->error_bitmap;
1045
1046		scrub_stripe_submit_repair_read(stripe, mirror,
1047						BTRFS_STRIPE_LEN, false);
1048		wait_scrub_stripe_io(stripe);
1049		scrub_verify_one_stripe(stripe, old_error_bitmap);
1050		if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1051			goto out;
1052	}
1053
1054	/*
1055	 * Last safety net, try re-checking all mirrors, including the failed
1056	 * one, sector-by-sector.
1057	 *
1058	 * As if one sector failed the drive's internal csum, the whole read
1059	 * containing the offending sector would be marked as error.
1060	 * Thus here we do sector-by-sector read.
1061	 *
1062	 * This can be slow, thus we only try it as the last resort.
1063	 */
1064
1065	for (i = 0, mirror = stripe->mirror_num;
1066	     i < num_copies;
1067	     i++, mirror = calc_next_mirror(mirror, num_copies)) {
1068		const unsigned long old_error_bitmap = stripe->error_bitmap;
1069
1070		scrub_stripe_submit_repair_read(stripe, mirror,
1071						fs_info->sectorsize, true);
1072		wait_scrub_stripe_io(stripe);
1073		scrub_verify_one_stripe(stripe, old_error_bitmap);
1074		if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1075			goto out;
1076	}
1077out:
1078	/*
1079	 * Submit the repaired sectors.  For zoned case, we cannot do repair
1080	 * in-place, but queue the bg to be relocated.
1081	 */
1082	bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap,
1083		      stripe->nr_sectors);
1084	if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
1085		if (btrfs_is_zoned(fs_info)) {
1086			btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1087		} else {
1088			scrub_write_sectors(sctx, stripe, repaired, false);
1089			wait_scrub_stripe_io(stripe);
 
 
 
 
 
 
1090		}
1091	}
1092
1093	scrub_stripe_report_errors(sctx, stripe);
1094	set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1095	wake_up(&stripe->repair_wait);
1096}
1097
1098static void scrub_read_endio(struct btrfs_bio *bbio)
1099{
1100	struct scrub_stripe *stripe = bbio->private;
1101	struct bio_vec *bvec;
1102	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1103	int num_sectors;
1104	u32 bio_size = 0;
1105	int i;
1106
1107	ASSERT(sector_nr < stripe->nr_sectors);
1108	bio_for_each_bvec_all(bvec, &bbio->bio, i)
1109		bio_size += bvec->bv_len;
1110	num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
1111
1112	if (bbio->bio.bi_status) {
1113		bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
1114		bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
1115	} else {
1116		bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
1117	}
1118	bio_put(&bbio->bio);
1119	if (atomic_dec_and_test(&stripe->pending_io)) {
1120		wake_up(&stripe->io_wait);
1121		INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1122		queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
 
1123	}
1124}
1125
1126static void scrub_write_endio(struct btrfs_bio *bbio)
1127{
1128	struct scrub_stripe *stripe = bbio->private;
1129	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1130	struct bio_vec *bvec;
1131	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1132	u32 bio_size = 0;
1133	int i;
1134
1135	bio_for_each_bvec_all(bvec, &bbio->bio, i)
1136		bio_size += bvec->bv_len;
1137
1138	if (bbio->bio.bi_status) {
1139		unsigned long flags;
1140
1141		spin_lock_irqsave(&stripe->write_error_lock, flags);
1142		bitmap_set(&stripe->write_error_bitmap, sector_nr,
1143			   bio_size >> fs_info->sectorsize_bits);
1144		spin_unlock_irqrestore(&stripe->write_error_lock, flags);
 
 
 
 
 
 
 
 
1145	}
1146	bio_put(&bbio->bio);
1147
1148	if (atomic_dec_and_test(&stripe->pending_io))
1149		wake_up(&stripe->io_wait);
1150}
1151
1152static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1153				   struct scrub_stripe *stripe,
1154				   struct btrfs_bio *bbio, bool dev_replace)
 
1155{
1156	struct btrfs_fs_info *fs_info = sctx->fs_info;
1157	u32 bio_len = bbio->bio.bi_iter.bi_size;
1158	u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1159		      stripe->logical;
1160
1161	fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1162	atomic_inc(&stripe->pending_io);
1163	btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1164	if (!btrfs_is_zoned(fs_info))
1165		return;
1166	/*
1167	 * For zoned writeback, queue depth must be 1, thus we must wait for
1168	 * the write to finish before the next write.
 
1169	 */
1170	wait_scrub_stripe_io(stripe);
1171
1172	/*
1173	 * And also need to update the write pointer if write finished
1174	 * successfully.
1175	 */
1176	if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1177		      &stripe->write_error_bitmap))
1178		sctx->write_pointer += bio_len;
1179}
1180
1181/*
1182 * Submit the write bio(s) for the sectors specified by @write_bitmap.
1183 *
1184 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1185 *
1186 * - Only needs logical bytenr and mirror_num
1187 *   Just like the scrub read path
1188 *
1189 * - Would only result in writes to the specified mirror
1190 *   Unlike the regular writeback path, which would write back to all stripes
1191 *
1192 * - Handle dev-replace and read-repair writeback differently
1193 */
1194static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1195				unsigned long write_bitmap, bool dev_replace)
1196{
1197	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1198	struct btrfs_bio *bbio = NULL;
1199	int sector_nr;
1200
1201	for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1202		struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1203		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1204		int ret;
1205
1206		/* We should only writeback sectors covered by an extent. */
1207		ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1208
1209		/* Cannot merge with previous sector, submit the current one. */
1210		if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1211			scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1212			bbio = NULL;
1213		}
1214		if (!bbio) {
1215			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1216					       fs_info, scrub_write_endio, stripe);
1217			bbio->bio.bi_iter.bi_sector = (stripe->logical +
1218				(sector_nr << fs_info->sectorsize_bits)) >>
1219				SECTOR_SHIFT;
1220		}
1221		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1222		ASSERT(ret == fs_info->sectorsize);
1223	}
1224	if (bbio)
1225		scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1226}
1227
1228/*
1229 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1230 * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1231 */
1232static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1233				  unsigned int bio_size)
1234{
1235	const int time_slice = 1000;
1236	s64 delta;
1237	ktime_t now;
1238	u32 div;
1239	u64 bwlimit;
1240
1241	bwlimit = READ_ONCE(device->scrub_speed_max);
1242	if (bwlimit == 0)
1243		return;
1244
1245	/*
1246	 * Slice is divided into intervals when the IO is submitted, adjust by
1247	 * bwlimit and maximum of 64 intervals.
1248	 */
1249	div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1250	div = min_t(u32, 64, div);
1251
1252	/* Start new epoch, set deadline */
1253	now = ktime_get();
1254	if (sctx->throttle_deadline == 0) {
1255		sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1256		sctx->throttle_sent = 0;
1257	}
1258
1259	/* Still in the time to send? */
1260	if (ktime_before(now, sctx->throttle_deadline)) {
1261		/* If current bio is within the limit, send it */
1262		sctx->throttle_sent += bio_size;
1263		if (sctx->throttle_sent <= div_u64(bwlimit, div))
1264			return;
1265
1266		/* We're over the limit, sleep until the rest of the slice */
1267		delta = ktime_ms_delta(sctx->throttle_deadline, now);
1268	} else {
1269		/* New request after deadline, start new epoch */
1270		delta = 0;
1271	}
1272
1273	if (delta) {
1274		long timeout;
1275
1276		timeout = div_u64(delta * HZ, 1000);
1277		schedule_timeout_interruptible(timeout);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278	}
1279
1280	/* Next call will start the deadline period */
1281	sctx->throttle_deadline = 0;
1282}
1283
1284/*
1285 * Given a physical address, this will calculate it's
1286 * logical offset. if this is a parity stripe, it will return
1287 * the most left data stripe's logical offset.
1288 *
1289 * return 0 if it is a data stripe, 1 means parity stripe.
1290 */
1291static int get_raid56_logic_offset(u64 physical, int num,
1292				   struct btrfs_chunk_map *map, u64 *offset,
1293				   u64 *stripe_start)
1294{
1295	int i;
1296	int j = 0;
1297	u64 last_offset;
1298	const int data_stripes = nr_data_stripes(map);
1299
1300	last_offset = (physical - map->stripes[num].physical) * data_stripes;
1301	if (stripe_start)
1302		*stripe_start = last_offset;
1303
1304	*offset = last_offset;
1305	for (i = 0; i < data_stripes; i++) {
1306		u32 stripe_nr;
1307		u32 stripe_index;
1308		u32 rot;
1309
1310		*offset = last_offset + btrfs_stripe_nr_to_offset(i);
1311
1312		stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
1313
1314		/* Work out the disk rotation on this stripe-set */
1315		rot = stripe_nr % map->num_stripes;
1316		/* calculate which stripe this data locates */
1317		rot += i;
1318		stripe_index = rot % map->num_stripes;
1319		if (stripe_index == num)
1320			return 0;
1321		if (stripe_index < num)
1322			j++;
1323	}
1324	*offset = last_offset + btrfs_stripe_nr_to_offset(j);
1325	return 1;
1326}
1327
1328/*
1329 * Return 0 if the extent item range covers any byte of the range.
1330 * Return <0 if the extent item is before @search_start.
1331 * Return >0 if the extent item is after @start_start + @search_len.
1332 */
1333static int compare_extent_item_range(struct btrfs_path *path,
1334				     u64 search_start, u64 search_len)
1335{
1336	struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1337	u64 len;
1338	struct btrfs_key key;
1339
1340	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1341	ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1342	       key.type == BTRFS_METADATA_ITEM_KEY);
1343	if (key.type == BTRFS_METADATA_ITEM_KEY)
1344		len = fs_info->nodesize;
1345	else
1346		len = key.offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1347
1348	if (key.objectid + len <= search_start)
1349		return -1;
1350	if (key.objectid >= search_start + search_len)
1351		return 1;
1352	return 0;
1353}
1354
1355/*
1356 * Locate one extent item which covers any byte in range
1357 * [@search_start, @search_start + @search_length)
1358 *
1359 * If the path is not initialized, we will initialize the search by doing
1360 * a btrfs_search_slot().
1361 * If the path is already initialized, we will use the path as the initial
1362 * slot, to avoid duplicated btrfs_search_slot() calls.
1363 *
1364 * NOTE: If an extent item starts before @search_start, we will still
1365 * return the extent item. This is for data extent crossing stripe boundary.
1366 *
1367 * Return 0 if we found such extent item, and @path will point to the extent item.
1368 * Return >0 if no such extent item can be found, and @path will be released.
1369 * Return <0 if hit fatal error, and @path will be released.
1370 */
1371static int find_first_extent_item(struct btrfs_root *extent_root,
1372				  struct btrfs_path *path,
1373				  u64 search_start, u64 search_len)
1374{
1375	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1376	struct btrfs_key key;
1377	int ret;
1378
1379	/* Continue using the existing path */
1380	if (path->nodes[0])
1381		goto search_forward;
1382
1383	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1384		key.type = BTRFS_METADATA_ITEM_KEY;
1385	else
1386		key.type = BTRFS_EXTENT_ITEM_KEY;
1387	key.objectid = search_start;
1388	key.offset = (u64)-1;
 
 
 
 
 
 
 
 
 
 
 
 
1389
1390	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1391	if (ret < 0)
1392		return ret;
1393	if (ret == 0) {
1394		/*
1395		 * Key with offset -1 found, there would have to exist an extent
1396		 * item with such offset, but this is out of the valid range.
1397		 */
1398		btrfs_release_path(path);
1399		return -EUCLEAN;
1400	}
1401
1402	/*
1403	 * Here we intentionally pass 0 as @min_objectid, as there could be
1404	 * an extent item starting before @search_start.
1405	 */
1406	ret = btrfs_previous_extent_item(extent_root, path, 0);
1407	if (ret < 0)
1408		return ret;
1409	/*
1410	 * No matter whether we have found an extent item, the next loop will
1411	 * properly do every check on the key.
1412	 */
1413search_forward:
1414	while (true) {
1415		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1416		if (key.objectid >= search_start + search_len)
1417			break;
1418		if (key.type != BTRFS_METADATA_ITEM_KEY &&
1419		    key.type != BTRFS_EXTENT_ITEM_KEY)
1420			goto next;
1421
1422		ret = compare_extent_item_range(path, search_start, search_len);
1423		if (ret == 0)
1424			return ret;
1425		if (ret > 0)
1426			break;
1427next:
1428		ret = btrfs_next_item(extent_root, path);
1429		if (ret) {
1430			/* Either no more items or a fatal error. */
1431			btrfs_release_path(path);
1432			return ret;
1433		}
1434	}
1435	btrfs_release_path(path);
1436	return 1;
 
 
1437}
1438
1439static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1440			    u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1441{
1442	struct btrfs_key key;
1443	struct btrfs_extent_item *ei;
1444
1445	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1446	ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1447	       key.type == BTRFS_EXTENT_ITEM_KEY);
1448	*extent_start_ret = key.objectid;
1449	if (key.type == BTRFS_METADATA_ITEM_KEY)
1450		*size_ret = path->nodes[0]->fs_info->nodesize;
1451	else
1452		*size_ret = key.offset;
1453	ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1454	*flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1455	*generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1456}
1457
1458static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1459					u64 physical, u64 physical_end)
 
1460{
1461	struct btrfs_fs_info *fs_info = sctx->fs_info;
1462	int ret = 0;
1463
1464	if (!btrfs_is_zoned(fs_info))
1465		return 0;
1466
1467	mutex_lock(&sctx->wr_lock);
1468	if (sctx->write_pointer < physical_end) {
1469		ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1470						    physical,
1471						    sctx->write_pointer);
1472		if (ret)
1473			btrfs_err(fs_info,
1474				  "zoned: failed to recover write pointer");
1475	}
1476	mutex_unlock(&sctx->wr_lock);
1477	btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1478
1479	return ret;
1480}
1481
1482static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1483				 struct scrub_stripe *stripe,
1484				 u64 extent_start, u64 extent_len,
1485				 u64 extent_flags, u64 extent_gen)
1486{
1487	for (u64 cur_logical = max(stripe->logical, extent_start);
1488	     cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1489			       extent_start + extent_len);
1490	     cur_logical += fs_info->sectorsize) {
1491		const int nr_sector = (cur_logical - stripe->logical) >>
1492				      fs_info->sectorsize_bits;
1493		struct scrub_sector_verification *sector =
1494						&stripe->sectors[nr_sector];
1495
1496		set_bit(nr_sector, &stripe->extent_sector_bitmap);
1497		if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1498			sector->is_metadata = true;
1499			sector->generation = extent_gen;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1500		}
 
1501	}
1502}
1503
1504static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1505{
1506	stripe->extent_sector_bitmap = 0;
1507	stripe->init_error_bitmap = 0;
1508	stripe->init_nr_io_errors = 0;
1509	stripe->init_nr_csum_errors = 0;
1510	stripe->init_nr_meta_errors = 0;
1511	stripe->error_bitmap = 0;
1512	stripe->io_error_bitmap = 0;
1513	stripe->csum_error_bitmap = 0;
1514	stripe->meta_error_bitmap = 0;
1515}
1516
1517/*
1518 * Locate one stripe which has at least one extent in its range.
1519 *
1520 * Return 0 if found such stripe, and store its info into @stripe.
1521 * Return >0 if there is no such stripe in the specified range.
1522 * Return <0 for error.
1523 */
1524static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1525					struct btrfs_path *extent_path,
1526					struct btrfs_path *csum_path,
1527					struct btrfs_device *dev, u64 physical,
1528					int mirror_num, u64 logical_start,
1529					u32 logical_len,
1530					struct scrub_stripe *stripe)
1531{
1532	struct btrfs_fs_info *fs_info = bg->fs_info;
1533	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1534	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1535	const u64 logical_end = logical_start + logical_len;
1536	u64 cur_logical = logical_start;
1537	u64 stripe_end;
1538	u64 extent_start;
1539	u64 extent_len;
1540	u64 extent_flags;
1541	u64 extent_gen;
1542	int ret;
1543
1544	if (unlikely(!extent_root)) {
1545		btrfs_err(fs_info, "no valid extent root for scrub");
1546		return -EUCLEAN;
1547	}
1548	memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1549				   stripe->nr_sectors);
1550	scrub_stripe_reset_bitmaps(stripe);
1551
1552	/* The range must be inside the bg. */
1553	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1554
1555	ret = find_first_extent_item(extent_root, extent_path, logical_start,
1556				     logical_len);
1557	/* Either error or not found. */
1558	if (ret)
1559		goto out;
1560	get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1561			&extent_gen);
1562	if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1563		stripe->nr_meta_extents++;
1564	if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1565		stripe->nr_data_extents++;
1566	cur_logical = max(extent_start, cur_logical);
1567
1568	/*
1569	 * Round down to stripe boundary.
1570	 *
1571	 * The extra calculation against bg->start is to handle block groups
1572	 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1573	 */
1574	stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1575			  bg->start;
1576	stripe->physical = physical + stripe->logical - logical_start;
1577	stripe->dev = dev;
1578	stripe->bg = bg;
1579	stripe->mirror_num = mirror_num;
1580	stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1581
1582	/* Fill the first extent info into stripe->sectors[] array. */
1583	fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1584			     extent_flags, extent_gen);
1585	cur_logical = extent_start + extent_len;
1586
1587	/* Fill the extent info for the remaining sectors. */
1588	while (cur_logical <= stripe_end) {
1589		ret = find_first_extent_item(extent_root, extent_path, cur_logical,
1590					     stripe_end - cur_logical + 1);
1591		if (ret < 0)
1592			goto out;
1593		if (ret > 0) {
1594			ret = 0;
1595			break;
1596		}
1597		get_extent_info(extent_path, &extent_start, &extent_len,
1598				&extent_flags, &extent_gen);
1599		if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1600			stripe->nr_meta_extents++;
1601		if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1602			stripe->nr_data_extents++;
1603		fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1604				     extent_flags, extent_gen);
1605		cur_logical = extent_start + extent_len;
1606	}
1607
1608	/* Now fill the data csum. */
1609	if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1610		int sector_nr;
1611		unsigned long csum_bitmap = 0;
1612
1613		/* Csum space should have already been allocated. */
1614		ASSERT(stripe->csums);
 
1615
1616		/*
1617		 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1618		 * should contain at most 16 sectors.
1619		 */
1620		ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1621
1622		ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1623						stripe->logical, stripe_end,
1624						stripe->csums, &csum_bitmap);
1625		if (ret < 0)
1626			goto out;
1627		if (ret > 0)
1628			ret = 0;
1629
1630		for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1631			stripe->sectors[sector_nr].csum = stripe->csums +
1632				sector_nr * fs_info->csum_size;
1633		}
 
 
 
 
 
 
1634	}
1635	set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1636out:
1637	return ret;
1638}
1639
1640static void scrub_reset_stripe(struct scrub_stripe *stripe)
1641{
1642	scrub_stripe_reset_bitmaps(stripe);
1643
1644	stripe->nr_meta_extents = 0;
1645	stripe->nr_data_extents = 0;
1646	stripe->state = 0;
1647
1648	for (int i = 0; i < stripe->nr_sectors; i++) {
1649		stripe->sectors[i].is_metadata = false;
1650		stripe->sectors[i].csum = NULL;
1651		stripe->sectors[i].generation = 0;
1652	}
1653}
1654
1655static u32 stripe_length(const struct scrub_stripe *stripe)
1656{
1657	ASSERT(stripe->bg);
1658
1659	return min(BTRFS_STRIPE_LEN,
1660		   stripe->bg->start + stripe->bg->length - stripe->logical);
1661}
 
 
 
 
 
 
 
 
 
 
1662
1663static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
1664{
1665	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1666	struct btrfs_bio *bbio = NULL;
1667	unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
1668	u64 stripe_len = BTRFS_STRIPE_LEN;
1669	int mirror = stripe->mirror_num;
1670	int i;
1671
1672	atomic_inc(&stripe->pending_io);
 
 
 
 
1673
1674	for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
1675		struct page *page = scrub_stripe_get_page(stripe, i);
1676		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
1677
1678		/* We're beyond the chunk boundary, no need to read anymore. */
1679		if (i >= nr_sectors)
1680			break;
1681
1682		/* The current sector cannot be merged, submit the bio. */
1683		if (bbio &&
1684		    ((i > 0 &&
1685		      !test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
1686		     bbio->bio.bi_iter.bi_size >= stripe_len)) {
1687			ASSERT(bbio->bio.bi_iter.bi_size);
1688			atomic_inc(&stripe->pending_io);
1689			btrfs_submit_bbio(bbio, mirror);
1690			bbio = NULL;
1691		}
1692
1693		if (!bbio) {
1694			struct btrfs_io_stripe io_stripe = {};
1695			struct btrfs_io_context *bioc = NULL;
1696			const u64 logical = stripe->logical +
1697					    (i << fs_info->sectorsize_bits);
1698			int err;
1699
1700			io_stripe.rst_search_commit_root = true;
1701			stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits;
1702			/*
1703			 * For RST cases, we need to manually split the bbio to
1704			 * follow the RST boundary.
1705			 */
1706			err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
1707					      &stripe_len, &bioc, &io_stripe, &mirror);
1708			btrfs_put_bioc(bioc);
1709			if (err < 0) {
1710				if (err != -ENODATA) {
1711					/*
1712					 * Earlier btrfs_get_raid_extent_offset()
1713					 * returned -ENODATA, which means there's
1714					 * no entry for the corresponding range
1715					 * in the stripe tree.  But if it's in
1716					 * the extent tree, then it's a preallocated
1717					 * extent and not an error.
1718					 */
1719					set_bit(i, &stripe->io_error_bitmap);
1720					set_bit(i, &stripe->error_bitmap);
1721				}
1722				continue;
1723			}
1724
1725			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
1726					       fs_info, scrub_read_endio, stripe);
1727			bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
1728		}
 
 
 
1729
1730		__bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
 
 
 
 
 
 
 
 
 
 
 
1731	}
1732
1733	if (bbio) {
1734		ASSERT(bbio->bio.bi_iter.bi_size);
1735		atomic_inc(&stripe->pending_io);
1736		btrfs_submit_bbio(bbio, mirror);
1737	}
1738
1739	if (atomic_dec_and_test(&stripe->pending_io)) {
1740		wake_up(&stripe->io_wait);
1741		INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1742		queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1743	}
1744}
1745
1746static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1747				      struct scrub_stripe *stripe)
1748{
1749	struct btrfs_fs_info *fs_info = sctx->fs_info;
1750	struct btrfs_bio *bbio;
1751	unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
1752	int mirror = stripe->mirror_num;
 
 
 
 
 
 
 
 
 
 
 
1753
1754	ASSERT(stripe->bg);
1755	ASSERT(stripe->mirror_num > 0);
1756	ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
 
 
1757
1758	if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
1759		scrub_submit_extent_sector_read(stripe);
1760		return;
1761	}
1762
1763	bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1764			       scrub_read_endio, stripe);
1765
1766	bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1767	/* Read the whole range inside the chunk boundary. */
1768	for (unsigned int cur = 0; cur < nr_sectors; cur++) {
1769		struct page *page = scrub_stripe_get_page(stripe, cur);
1770		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
1771		int ret;
1772
1773		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1774		/* We should have allocated enough bio vectors. */
1775		ASSERT(ret == fs_info->sectorsize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1776	}
1777	atomic_inc(&stripe->pending_io);
1778
1779	/*
1780	 * For dev-replace, either user asks to avoid the source dev, or
1781	 * the device is missing, we try the next mirror instead.
1782	 */
1783	if (sctx->is_dev_replace &&
1784	    (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1785	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1786	     !stripe->dev->bdev)) {
1787		int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1788						  stripe->bg->length);
1789
1790		mirror = calc_next_mirror(mirror, num_copies);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1791	}
1792	btrfs_submit_bbio(bbio, mirror);
 
1793}
1794
1795static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1796{
1797	int i;
 
1798
1799	for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1800		if (stripe->sectors[i].is_metadata) {
1801			struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 
1802
1803			btrfs_err(fs_info,
1804			"stripe %llu has unrepaired metadata sector at %llu",
1805				  stripe->logical,
1806				  stripe->logical + (i << fs_info->sectorsize_bits));
1807			return true;
1808		}
1809	}
1810	return false;
1811}
1812
1813static void submit_initial_group_read(struct scrub_ctx *sctx,
1814				      unsigned int first_slot,
1815				      unsigned int nr_stripes)
1816{
1817	struct blk_plug plug;
1818
1819	ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1820	ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
1821
1822	scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1823			      btrfs_stripe_nr_to_offset(nr_stripes));
1824	blk_start_plug(&plug);
1825	for (int i = 0; i < nr_stripes; i++) {
1826		struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1827
1828		/* Those stripes should be initialized. */
1829		ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1830		scrub_submit_initial_read(sctx, stripe);
1831	}
1832	blk_finish_plug(&plug);
1833}
1834
1835static int flush_scrub_stripes(struct scrub_ctx *sctx)
 
1836{
1837	struct btrfs_fs_info *fs_info = sctx->fs_info;
1838	struct scrub_stripe *stripe;
1839	const int nr_stripes = sctx->cur_stripe;
1840	int ret = 0;
1841
1842	if (!nr_stripes)
1843		return 0;
1844
1845	ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1846
1847	/* Submit the stripes which are populated but not submitted. */
1848	if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1849		const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1850
1851		submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1852	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1853
1854	for (int i = 0; i < nr_stripes; i++) {
1855		stripe = &sctx->stripes[i];
1856
1857		wait_event(stripe->repair_wait,
1858			   test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
 
 
 
 
 
 
 
 
 
 
 
1859	}
1860
1861	/* Submit for dev-replace. */
1862	if (sctx->is_dev_replace) {
1863		/*
1864		 * For dev-replace, if we know there is something wrong with
1865		 * metadata, we should immediately abort.
1866		 */
1867		for (int i = 0; i < nr_stripes; i++) {
1868			if (stripe_has_metadata_error(&sctx->stripes[i])) {
1869				ret = -EIO;
1870				goto out;
 
 
 
 
 
 
 
 
1871			}
 
 
1872		}
1873		for (int i = 0; i < nr_stripes; i++) {
1874			unsigned long good;
1875
1876			stripe = &sctx->stripes[i];
1877
1878			ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1879
1880			bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1881				      &stripe->error_bitmap, stripe->nr_sectors);
1882			scrub_write_sectors(sctx, stripe, good, true);
 
 
1883		}
 
 
 
 
1884	}
1885
1886	/* Wait for the above writebacks to finish. */
1887	for (int i = 0; i < nr_stripes; i++) {
1888		stripe = &sctx->stripes[i];
 
1889
1890		wait_scrub_stripe_io(stripe);
1891		spin_lock(&sctx->stat_lock);
1892		sctx->stat.last_physical = stripe->physical + stripe_length(stripe);
1893		spin_unlock(&sctx->stat_lock);
1894		scrub_reset_stripe(stripe);
1895	}
1896out:
1897	sctx->cur_stripe = 0;
1898	return ret;
1899}
1900
1901static void raid56_scrub_wait_endio(struct bio *bio)
1902{
1903	complete(bio->bi_private);
 
 
 
1904}
1905
1906static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1907			      struct btrfs_device *dev, int mirror_num,
1908			      u64 logical, u32 length, u64 physical,
1909			      u64 *found_logical_ret)
1910{
1911	struct scrub_stripe *stripe;
1912	int ret;
 
1913
1914	/*
1915	 * There should always be one slot left, as caller filling the last
1916	 * slot should flush them all.
1917	 */
1918	ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
1919
1920	/* @found_logical_ret must be specified. */
1921	ASSERT(found_logical_ret);
1922
1923	stripe = &sctx->stripes[sctx->cur_stripe];
1924	scrub_reset_stripe(stripe);
1925	ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1926					   &sctx->csum_path, dev, physical,
1927					   mirror_num, logical, length, stripe);
1928	/* Either >0 as no more extents or <0 for error. */
1929	if (ret)
1930		return ret;
1931	*found_logical_ret = stripe->logical;
1932	sctx->cur_stripe++;
1933
1934	/* We filled one group, submit it. */
1935	if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1936		const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
1937
1938		submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
1939	}
1940
1941	/* Last slot used, flush them all. */
1942	if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1943		return flush_scrub_stripes(sctx);
1944	return 0;
1945}
1946
1947static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1948				      struct btrfs_device *scrub_dev,
1949				      struct btrfs_block_group *bg,
1950				      struct btrfs_chunk_map *map,
1951				      u64 full_stripe_start)
1952{
1953	DECLARE_COMPLETION_ONSTACK(io_done);
1954	struct btrfs_fs_info *fs_info = sctx->fs_info;
1955	struct btrfs_raid_bio *rbio;
1956	struct btrfs_io_context *bioc = NULL;
1957	struct btrfs_path extent_path = { 0 };
1958	struct btrfs_path csum_path = { 0 };
1959	struct bio *bio;
1960	struct scrub_stripe *stripe;
1961	bool all_empty = true;
1962	const int data_stripes = nr_data_stripes(map);
1963	unsigned long extent_bitmap = 0;
1964	u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1965	int ret;
1966
1967	ASSERT(sctx->raid56_data_stripes);
1968
1969	/*
1970	 * For data stripe search, we cannot reuse the same extent/csum paths,
1971	 * as the data stripe bytenr may be smaller than previous extent.  Thus
1972	 * we have to use our own extent/csum paths.
1973	 */
1974	extent_path.search_commit_root = 1;
1975	extent_path.skip_locking = 1;
1976	csum_path.search_commit_root = 1;
1977	csum_path.skip_locking = 1;
1978
1979	for (int i = 0; i < data_stripes; i++) {
1980		int stripe_index;
1981		int rot;
1982		u64 physical;
1983
1984		stripe = &sctx->raid56_data_stripes[i];
1985		rot = div_u64(full_stripe_start - bg->start,
1986			      data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1987		stripe_index = (i + rot) % map->num_stripes;
1988		physical = map->stripes[stripe_index].physical +
1989			   btrfs_stripe_nr_to_offset(rot);
1990
1991		scrub_reset_stripe(stripe);
1992		set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1993		ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1994				map->stripes[stripe_index].dev, physical, 1,
1995				full_stripe_start + btrfs_stripe_nr_to_offset(i),
1996				BTRFS_STRIPE_LEN, stripe);
1997		if (ret < 0)
1998			goto out;
1999		/*
2000		 * No extent in this data stripe, need to manually mark them
2001		 * initialized to make later read submission happy.
2002		 */
2003		if (ret > 0) {
2004			stripe->logical = full_stripe_start +
2005					  btrfs_stripe_nr_to_offset(i);
2006			stripe->dev = map->stripes[stripe_index].dev;
2007			stripe->mirror_num = 1;
2008			set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
2009		}
2010	}
2011
2012	/* Check if all data stripes are empty. */
2013	for (int i = 0; i < data_stripes; i++) {
2014		stripe = &sctx->raid56_data_stripes[i];
2015		if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
2016			all_empty = false;
2017			break;
2018		}
2019	}
2020	if (all_empty) {
2021		ret = 0;
2022		goto out;
2023	}
2024
2025	for (int i = 0; i < data_stripes; i++) {
2026		stripe = &sctx->raid56_data_stripes[i];
2027		scrub_submit_initial_read(sctx, stripe);
2028	}
2029	for (int i = 0; i < data_stripes; i++) {
2030		stripe = &sctx->raid56_data_stripes[i];
2031
2032		wait_event(stripe->repair_wait,
2033			   test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
 
2034	}
2035	/* For now, no zoned support for RAID56. */
2036	ASSERT(!btrfs_is_zoned(sctx->fs_info));
2037
2038	/*
2039	 * Now all data stripes are properly verified. Check if we have any
2040	 * unrepaired, if so abort immediately or we could further corrupt the
2041	 * P/Q stripes.
2042	 *
2043	 * During the loop, also populate extent_bitmap.
2044	 */
2045	for (int i = 0; i < data_stripes; i++) {
2046		unsigned long error;
2047
2048		stripe = &sctx->raid56_data_stripes[i];
 
 
 
 
 
2049
2050		/*
2051		 * We should only check the errors where there is an extent.
2052		 * As we may hit an empty data stripe while it's missing.
2053		 */
2054		bitmap_and(&error, &stripe->error_bitmap,
2055			   &stripe->extent_sector_bitmap, stripe->nr_sectors);
2056		if (!bitmap_empty(&error, stripe->nr_sectors)) {
2057			btrfs_err(fs_info,
2058"unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
2059				  full_stripe_start, i, stripe->nr_sectors,
2060				  &error);
2061			ret = -EIO;
2062			goto out;
2063		}
2064		bitmap_or(&extent_bitmap, &extent_bitmap,
2065			  &stripe->extent_sector_bitmap, stripe->nr_sectors);
2066	}
2067
2068	/* Now we can check and regenerate the P/Q stripe. */
2069	bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
2070	bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
2071	bio->bi_private = &io_done;
2072	bio->bi_end_io = raid56_scrub_wait_endio;
2073
2074	btrfs_bio_counter_inc_blocked(fs_info);
2075	ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
2076			      &length, &bioc, NULL, NULL);
2077	if (ret < 0) {
2078		btrfs_put_bioc(bioc);
2079		btrfs_bio_counter_dec(fs_info);
2080		goto out;
2081	}
2082	rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
2083				BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
2084	btrfs_put_bioc(bioc);
2085	if (!rbio) {
2086		ret = -ENOMEM;
2087		btrfs_bio_counter_dec(fs_info);
2088		goto out;
2089	}
2090	/* Use the recovered stripes as cache to avoid read them from disk again. */
2091	for (int i = 0; i < data_stripes; i++) {
2092		stripe = &sctx->raid56_data_stripes[i];
2093
2094		raid56_parity_cache_data_pages(rbio, stripe->pages,
2095				full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
2096	}
2097	raid56_parity_submit_scrub_rbio(rbio);
2098	wait_for_completion_io(&io_done);
2099	ret = blk_status_to_errno(bio->bi_status);
2100	bio_put(bio);
2101	btrfs_bio_counter_dec(fs_info);
2102
2103	btrfs_release_path(&extent_path);
2104	btrfs_release_path(&csum_path);
2105out:
2106	return ret;
 
 
2107}
2108
2109/*
2110 * Scrub one range which can only has simple mirror based profile.
2111 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
2112 *  RAID0/RAID10).
2113 *
2114 * Since we may need to handle a subset of block group, we need @logical_start
2115 * and @logical_length parameter.
2116 */
2117static int scrub_simple_mirror(struct scrub_ctx *sctx,
2118			       struct btrfs_block_group *bg,
2119			       u64 logical_start, u64 logical_length,
2120			       struct btrfs_device *device,
2121			       u64 physical, int mirror_num)
2122{
2123	struct btrfs_fs_info *fs_info = sctx->fs_info;
2124	const u64 logical_end = logical_start + logical_length;
2125	u64 cur_logical = logical_start;
2126	int ret = 0;
 
 
2127
2128	/* The range must be inside the bg */
2129	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
2130
2131	/* Go through each extent items inside the logical range */
2132	while (cur_logical < logical_end) {
2133		u64 found_logical = U64_MAX;
2134		u64 cur_physical = physical + cur_logical - logical_start;
2135
2136		/* Canceled? */
2137		if (atomic_read(&fs_info->scrub_cancel_req) ||
2138		    atomic_read(&sctx->cancel_req)) {
2139			ret = -ECANCELED;
2140			break;
2141		}
2142		/* Paused? */
2143		if (atomic_read(&fs_info->scrub_pause_req)) {
2144			/* Push queued extents */
2145			scrub_blocked_if_needed(fs_info);
2146		}
2147		/* Block group removed? */
2148		spin_lock(&bg->lock);
2149		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
2150			spin_unlock(&bg->lock);
2151			ret = 0;
2152			break;
2153		}
2154		spin_unlock(&bg->lock);
2155
2156		ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2157					 cur_logical, logical_end - cur_logical,
2158					 cur_physical, &found_logical);
2159		if (ret > 0) {
2160			/* No more extent, just update the accounting */
2161			spin_lock(&sctx->stat_lock);
2162			sctx->stat.last_physical = physical + logical_length;
2163			spin_unlock(&sctx->stat_lock);
2164			ret = 0;
 
 
 
 
2165			break;
2166		}
2167		if (ret < 0)
2168			break;
2169
2170		/* queue_scrub_stripe() returned 0, @found_logical must be updated. */
2171		ASSERT(found_logical != U64_MAX);
2172		cur_logical = found_logical + BTRFS_STRIPE_LEN;
2173
2174		/* Don't hold CPU for too long time */
2175		cond_resched();
2176	}
2177	return ret;
2178}
2179
2180/* Calculate the full stripe length for simple stripe based profiles */
2181static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
2182{
2183	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2184			    BTRFS_BLOCK_GROUP_RAID10));
2185
2186	return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
2187}
2188
2189/* Get the logical bytenr for the stripe */
2190static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
2191				     struct btrfs_block_group *bg,
2192				     int stripe_index)
2193{
2194	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2195			    BTRFS_BLOCK_GROUP_RAID10));
2196	ASSERT(stripe_index < map->num_stripes);
2197
2198	/*
2199	 * (stripe_index / sub_stripes) gives how many data stripes we need to
2200	 * skip.
2201	 */
2202	return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2203	       bg->start;
2204}
2205
2206/* Get the mirror number for the stripe */
2207static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
2208{
2209	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2210			    BTRFS_BLOCK_GROUP_RAID10));
2211	ASSERT(stripe_index < map->num_stripes);
2212
2213	/* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2214	return stripe_index % map->sub_stripes + 1;
2215}
2216
2217static int scrub_simple_stripe(struct scrub_ctx *sctx,
2218			       struct btrfs_block_group *bg,
2219			       struct btrfs_chunk_map *map,
2220			       struct btrfs_device *device,
2221			       int stripe_index)
2222{
2223	const u64 logical_increment = simple_stripe_full_stripe_len(map);
2224	const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2225	const u64 orig_physical = map->stripes[stripe_index].physical;
2226	const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2227	u64 cur_logical = orig_logical;
2228	u64 cur_physical = orig_physical;
2229	int ret = 0;
 
 
 
 
2230
2231	while (cur_logical < bg->start + bg->length) {
2232		/*
2233		 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2234		 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2235		 * this stripe.
2236		 */
2237		ret = scrub_simple_mirror(sctx, bg, cur_logical,
2238					  BTRFS_STRIPE_LEN, device, cur_physical,
2239					  mirror_num);
 
 
 
2240		if (ret)
2241			return ret;
2242		/* Skip to next stripe which belongs to the target device */
2243		cur_logical += logical_increment;
2244		/* For physical offset, we just go to next stripe */
2245		cur_physical += BTRFS_STRIPE_LEN;
2246	}
2247	return ret;
2248}
2249
2250static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2251					   struct btrfs_block_group *bg,
2252					   struct btrfs_chunk_map *map,
2253					   struct btrfs_device *scrub_dev,
2254					   int stripe_index)
2255{
2256	struct btrfs_fs_info *fs_info = sctx->fs_info;
2257	const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2258	const u64 chunk_logical = bg->start;
 
2259	int ret;
2260	int ret2;
2261	u64 physical = map->stripes[stripe_index].physical;
2262	const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
2263	const u64 physical_end = physical + dev_stripe_len;
 
 
2264	u64 logical;
2265	u64 logic_end;
2266	/* The logical increment after finishing one stripe */
2267	u64 increment;
2268	/* Offset inside the chunk */
 
 
 
 
2269	u64 offset;
2270	u64 stripe_logical;
2271
2272	/* Extent_path should be released by now. */
2273	ASSERT(sctx->extent_path.nodes[0] == NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2274
2275	scrub_blocked_if_needed(fs_info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2276
2277	if (sctx->is_dev_replace &&
2278	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2279		mutex_lock(&sctx->wr_lock);
2280		sctx->write_pointer = physical;
2281		mutex_unlock(&sctx->wr_lock);
 
2282	}
 
 
 
2283
2284	/* Prepare the extra data stripes used by RAID56. */
2285	if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2286		ASSERT(sctx->raid56_data_stripes == NULL);
 
 
2287
2288		sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2289						    sizeof(struct scrub_stripe),
2290						    GFP_KERNEL);
2291		if (!sctx->raid56_data_stripes) {
2292			ret = -ENOMEM;
 
 
 
 
 
 
 
 
2293			goto out;
2294		}
2295		for (int i = 0; i < nr_data_stripes(map); i++) {
2296			ret = init_scrub_stripe(fs_info,
2297						&sctx->raid56_data_stripes[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2298			if (ret < 0)
2299				goto out;
2300			sctx->raid56_data_stripes[i].bg = bg;
2301			sctx->raid56_data_stripes[i].sctx = sctx;
 
 
 
 
 
 
 
2302		}
2303	}
2304	/*
2305	 * There used to be a big double loop to handle all profiles using the
2306	 * same routine, which grows larger and more gross over time.
2307	 *
2308	 * So here we handle each profile differently, so simpler profiles
2309	 * have simpler scrubbing function.
2310	 */
2311	if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2312			 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2313		/*
2314		 * Above check rules out all complex profile, the remaining
2315		 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2316		 * mirrored duplication without stripe.
2317		 *
2318		 * Only @physical and @mirror_num needs to calculated using
2319		 * @stripe_index.
2320		 */
2321		ret = scrub_simple_mirror(sctx, bg, bg->start, bg->length,
2322				scrub_dev, map->stripes[stripe_index].physical,
2323				stripe_index + 1);
2324		offset = 0;
2325		goto out;
2326	}
2327	if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2328		ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2329		offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
2330		goto out;
2331	}
2332
2333	/* Only RAID56 goes through the old code */
2334	ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2335	ret = 0;
 
 
 
 
 
 
2336
2337	/* Calculate the logical end of the stripe */
2338	get_raid56_logic_offset(physical_end, stripe_index,
2339				map, &logic_end, NULL);
2340	logic_end += chunk_logical;
2341
2342	/* Initialize @offset in case we need to go to out: label */
2343	get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2344	increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2345
2346	/*
2347	 * Due to the rotation, for RAID56 it's better to iterate each stripe
2348	 * using their physical offset.
2349	 */
2350	while (physical < physical_end) {
2351		ret = get_raid56_logic_offset(physical, stripe_index, map,
2352					      &logical, &stripe_logical);
2353		logical += chunk_logical;
2354		if (ret) {
2355			/* it is parity strip */
2356			stripe_logical += chunk_logical;
2357			ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2358							 map, stripe_logical);
2359			spin_lock(&sctx->stat_lock);
2360			sctx->stat.last_physical = min(physical + BTRFS_STRIPE_LEN,
2361						       physical_end);
2362			spin_unlock(&sctx->stat_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2363			if (ret)
2364				goto out;
2365			goto next;
2366		}
2367
2368		/*
2369		 * Now we're at a data stripe, scrub each extents in the range.
2370		 *
2371		 * At this stage, if we ignore the repair part, inside each data
2372		 * stripe it is no different than SINGLE profile.
2373		 * We can reuse scrub_simple_mirror() here, as the repair part
2374		 * is still based on @mirror_num.
2375		 */
2376		ret = scrub_simple_mirror(sctx, bg, logical, BTRFS_STRIPE_LEN,
2377					  scrub_dev, physical, 1);
2378		if (ret < 0)
2379			goto out;
2380next:
 
 
 
2381		logical += increment;
2382		physical += BTRFS_STRIPE_LEN;
2383		spin_lock(&sctx->stat_lock);
2384		sctx->stat.last_physical = physical;
2385		spin_unlock(&sctx->stat_lock);
2386	}
2387out:
2388	ret2 = flush_scrub_stripes(sctx);
2389	if (!ret)
2390		ret = ret2;
2391	btrfs_release_path(&sctx->extent_path);
2392	btrfs_release_path(&sctx->csum_path);
2393
2394	if (sctx->raid56_data_stripes) {
2395		for (int i = 0; i < nr_data_stripes(map); i++)
2396			release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2397		kfree(sctx->raid56_data_stripes);
2398		sctx->raid56_data_stripes = NULL;
2399	}
2400
2401	if (sctx->is_dev_replace && ret >= 0) {
2402		int ret2;
2403
2404		ret2 = sync_write_pointer_for_zoned(sctx,
2405				chunk_logical + offset,
2406				map->stripes[stripe_index].physical,
2407				physical_end);
2408		if (ret2)
2409			ret = ret2;
2410	}
 
 
2411
 
 
 
2412	return ret < 0 ? ret : 0;
2413}
2414
2415static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2416					  struct btrfs_block_group *bg,
2417					  struct btrfs_device *scrub_dev,
2418					  u64 dev_offset,
2419					  u64 dev_extent_len)
2420{
2421	struct btrfs_fs_info *fs_info = sctx->fs_info;
2422	struct btrfs_chunk_map *map;
2423	int i;
2424	int ret = 0;
2425
2426	map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
2427	if (!map) {
2428		/*
2429		 * Might have been an unused block group deleted by the cleaner
2430		 * kthread or relocation.
2431		 */
2432		spin_lock(&bg->lock);
2433		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2434			ret = -EINVAL;
2435		spin_unlock(&bg->lock);
2436
2437		return ret;
2438	}
2439	if (map->start != bg->start)
 
 
2440		goto out;
2441	if (map->chunk_len < dev_extent_len)
 
2442		goto out;
2443
2444	for (i = 0; i < map->num_stripes; ++i) {
2445		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2446		    map->stripes[i].physical == dev_offset) {
2447			ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
2448			if (ret)
2449				goto out;
2450		}
2451	}
2452out:
2453	btrfs_free_chunk_map(map);
2454
2455	return ret;
2456}
2457
2458static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2459					  struct btrfs_block_group *cache)
2460{
2461	struct btrfs_fs_info *fs_info = cache->fs_info;
2462
2463	if (!btrfs_is_zoned(fs_info))
2464		return 0;
2465
2466	btrfs_wait_block_group_reservations(cache);
2467	btrfs_wait_nocow_writers(cache);
2468	btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
2469
2470	return btrfs_commit_current_transaction(root);
2471}
2472
2473static noinline_for_stack
2474int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2475			   struct btrfs_device *scrub_dev, u64 start, u64 end)
2476{
2477	struct btrfs_dev_extent *dev_extent = NULL;
2478	struct btrfs_path *path;
2479	struct btrfs_fs_info *fs_info = sctx->fs_info;
2480	struct btrfs_root *root = fs_info->dev_root;
 
 
 
2481	u64 chunk_offset;
2482	int ret = 0;
2483	int ro_set;
2484	int slot;
2485	struct extent_buffer *l;
2486	struct btrfs_key key;
2487	struct btrfs_key found_key;
2488	struct btrfs_block_group *cache;
2489	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2490
2491	path = btrfs_alloc_path();
2492	if (!path)
2493		return -ENOMEM;
2494
2495	path->reada = READA_FORWARD;
2496	path->search_commit_root = 1;
2497	path->skip_locking = 1;
2498
2499	key.objectid = scrub_dev->devid;
2500	key.offset = 0ull;
2501	key.type = BTRFS_DEV_EXTENT_KEY;
2502
2503	while (1) {
2504		u64 dev_extent_len;
2505
 
2506		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2507		if (ret < 0)
2508			break;
2509		if (ret > 0) {
2510			if (path->slots[0] >=
2511			    btrfs_header_nritems(path->nodes[0])) {
2512				ret = btrfs_next_leaf(root, path);
2513				if (ret < 0)
2514					break;
2515				if (ret > 0) {
2516					ret = 0;
2517					break;
2518				}
2519			} else {
2520				ret = 0;
2521			}
2522		}
2523
2524		l = path->nodes[0];
2525		slot = path->slots[0];
2526
2527		btrfs_item_key_to_cpu(l, &found_key, slot);
2528
2529		if (found_key.objectid != scrub_dev->devid)
2530			break;
2531
2532		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2533			break;
2534
2535		if (found_key.offset >= end)
2536			break;
2537
2538		if (found_key.offset < key.offset)
2539			break;
2540
2541		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2542		dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2543
2544		if (found_key.offset + dev_extent_len <= start)
2545			goto skip;
 
 
 
2546
 
 
2547		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2548
2549		/*
2550		 * get a reference on the corresponding block group to prevent
2551		 * the chunk from going away while we scrub it
2552		 */
2553		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2554
2555		/* some chunks are removed but not committed to disk yet,
2556		 * continue scrubbing */
2557		if (!cache)
2558			goto skip;
2559
2560		ASSERT(cache->start <= chunk_offset);
2561		/*
2562		 * We are using the commit root to search for device extents, so
2563		 * that means we could have found a device extent item from a
2564		 * block group that was deleted in the current transaction. The
2565		 * logical start offset of the deleted block group, stored at
2566		 * @chunk_offset, might be part of the logical address range of
2567		 * a new block group (which uses different physical extents).
2568		 * In this case btrfs_lookup_block_group() has returned the new
2569		 * block group, and its start address is less than @chunk_offset.
2570		 *
2571		 * We skip such new block groups, because it's pointless to
2572		 * process them, as we won't find their extents because we search
2573		 * for them using the commit root of the extent tree. For a device
2574		 * replace it's also fine to skip it, we won't miss copying them
2575		 * to the target device because we have the write duplication
2576		 * setup through the regular write path (by btrfs_map_block()),
2577		 * and we have committed a transaction when we started the device
2578		 * replace, right after setting up the device replace state.
2579		 */
2580		if (cache->start < chunk_offset) {
2581			btrfs_put_block_group(cache);
2582			goto skip;
2583		}
2584
2585		if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2586			if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2587				btrfs_put_block_group(cache);
2588				goto skip;
2589			}
2590		}
2591
2592		/*
2593		 * Make sure that while we are scrubbing the corresponding block
2594		 * group doesn't get its logical address and its device extents
2595		 * reused for another block group, which can possibly be of a
2596		 * different type and different profile. We do this to prevent
2597		 * false error detections and crashes due to bogus attempts to
2598		 * repair extents.
2599		 */
2600		spin_lock(&cache->lock);
2601		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2602			spin_unlock(&cache->lock);
2603			btrfs_put_block_group(cache);
2604			goto skip;
2605		}
2606		btrfs_freeze_block_group(cache);
2607		spin_unlock(&cache->lock);
2608
2609		/*
2610		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2611		 * to avoid deadlock caused by:
2612		 * btrfs_inc_block_group_ro()
2613		 * -> btrfs_wait_for_commit()
2614		 * -> btrfs_commit_transaction()
2615		 * -> btrfs_scrub_pause()
2616		 */
2617		scrub_pause_on(fs_info);
2618
2619		/*
2620		 * Don't do chunk preallocation for scrub.
2621		 *
2622		 * This is especially important for SYSTEM bgs, or we can hit
2623		 * -EFBIG from btrfs_finish_chunk_alloc() like:
2624		 * 1. The only SYSTEM bg is marked RO.
2625		 *    Since SYSTEM bg is small, that's pretty common.
2626		 * 2. New SYSTEM bg will be allocated
2627		 *    Due to regular version will allocate new chunk.
2628		 * 3. New SYSTEM bg is empty and will get cleaned up
2629		 *    Before cleanup really happens, it's marked RO again.
2630		 * 4. Empty SYSTEM bg get scrubbed
2631		 *    We go back to 2.
2632		 *
2633		 * This can easily boost the amount of SYSTEM chunks if cleaner
2634		 * thread can't be triggered fast enough, and use up all space
2635		 * of btrfs_super_block::sys_chunk_array
2636		 *
2637		 * While for dev replace, we need to try our best to mark block
2638		 * group RO, to prevent race between:
2639		 * - Write duplication
2640		 *   Contains latest data
2641		 * - Scrub copy
2642		 *   Contains data from commit tree
2643		 *
2644		 * If target block group is not marked RO, nocow writes can
2645		 * be overwritten by scrub copy, causing data corruption.
2646		 * So for dev-replace, it's not allowed to continue if a block
2647		 * group is not RO.
2648		 */
2649		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2650		if (!ret && sctx->is_dev_replace) {
2651			ret = finish_extent_writes_for_zoned(root, cache);
2652			if (ret) {
2653				btrfs_dec_block_group_ro(cache);
2654				scrub_pause_off(fs_info);
2655				btrfs_put_block_group(cache);
2656				break;
2657			}
2658		}
2659
2660		if (ret == 0) {
2661			ro_set = 1;
2662		} else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2663			   !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2664			/*
2665			 * btrfs_inc_block_group_ro return -ENOSPC when it
2666			 * failed in creating new chunk for metadata.
2667			 * It is not a problem for scrub, because
2668			 * metadata are always cowed, and our scrub paused
2669			 * commit_transactions.
2670			 *
2671			 * For RAID56 chunks, we have to mark them read-only
2672			 * for scrub, as later we would use our own cache
2673			 * out of RAID56 realm.
2674			 * Thus we want the RAID56 bg to be marked RO to
2675			 * prevent RMW from screwing up out cache.
2676			 */
2677			ro_set = 0;
2678		} else if (ret == -ETXTBSY) {
2679			btrfs_warn(fs_info,
2680		   "skipping scrub of block group %llu due to active swapfile",
2681				   cache->start);
2682			scrub_pause_off(fs_info);
2683			ret = 0;
2684			goto skip_unfreeze;
2685		} else {
2686			btrfs_warn(fs_info,
2687				   "failed setting block group ro: %d", ret);
2688			btrfs_unfreeze_block_group(cache);
2689			btrfs_put_block_group(cache);
2690			scrub_pause_off(fs_info);
2691			break;
2692		}
2693
2694		/*
2695		 * Now the target block is marked RO, wait for nocow writes to
2696		 * finish before dev-replace.
2697		 * COW is fine, as COW never overwrites extents in commit tree.
2698		 */
2699		if (sctx->is_dev_replace) {
2700			btrfs_wait_nocow_writers(cache);
2701			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
2702		}
2703
2704		scrub_pause_off(fs_info);
2705		down_write(&dev_replace->rwsem);
2706		dev_replace->cursor_right = found_key.offset + dev_extent_len;
2707		dev_replace->cursor_left = found_key.offset;
2708		dev_replace->item_needs_writeback = 1;
2709		up_write(&dev_replace->rwsem);
2710
2711		ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2712				  dev_extent_len);
2713		if (sctx->is_dev_replace &&
2714		    !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2715						      cache, found_key.offset))
2716			ro_set = 0;
2717
2718		down_write(&dev_replace->rwsem);
2719		dev_replace->cursor_left = dev_replace->cursor_right;
2720		dev_replace->item_needs_writeback = 1;
2721		up_write(&dev_replace->rwsem);
2722
2723		if (ro_set)
2724			btrfs_dec_block_group_ro(cache);
2725
2726		/*
2727		 * We might have prevented the cleaner kthread from deleting
2728		 * this block group if it was already unused because we raced
2729		 * and set it to RO mode first. So add it back to the unused
2730		 * list, otherwise it might not ever be deleted unless a manual
2731		 * balance is triggered or it becomes used and unused again.
2732		 */
2733		spin_lock(&cache->lock);
2734		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2735		    !cache->ro && cache->reserved == 0 && cache->used == 0) {
2736			spin_unlock(&cache->lock);
2737			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2738				btrfs_discard_queue_work(&fs_info->discard_ctl,
2739							 cache);
2740			else
2741				btrfs_mark_bg_unused(cache);
2742		} else {
2743			spin_unlock(&cache->lock);
2744		}
2745skip_unfreeze:
2746		btrfs_unfreeze_block_group(cache);
2747		btrfs_put_block_group(cache);
2748		if (ret)
2749			break;
2750		if (sctx->is_dev_replace &&
2751		    atomic64_read(&dev_replace->num_write_errors) > 0) {
2752			ret = -EIO;
2753			break;
2754		}
2755		if (sctx->stat.malloc_errors > 0) {
2756			ret = -ENOMEM;
2757			break;
2758		}
2759skip:
2760		key.offset = found_key.offset + dev_extent_len;
2761		btrfs_release_path(path);
2762	}
2763
2764	btrfs_free_path(path);
2765
2766	return ret;
2767}
2768
2769static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2770			   struct page *page, u64 physical, u64 generation)
2771{
2772	struct btrfs_fs_info *fs_info = sctx->fs_info;
2773	struct bio_vec bvec;
2774	struct bio bio;
2775	struct btrfs_super_block *sb = page_address(page);
2776	int ret;
2777
2778	bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2779	bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2780	__bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2781	ret = submit_bio_wait(&bio);
2782	bio_uninit(&bio);
2783
2784	if (ret < 0)
2785		return ret;
2786	ret = btrfs_check_super_csum(fs_info, sb);
2787	if (ret != 0) {
2788		btrfs_err_rl(fs_info,
2789			"super block at physical %llu devid %llu has bad csum",
2790			physical, dev->devid);
2791		return -EIO;
2792	}
2793	if (btrfs_super_generation(sb) != generation) {
2794		btrfs_err_rl(fs_info,
2795"super block at physical %llu devid %llu has bad generation %llu expect %llu",
2796			     physical, dev->devid,
2797			     btrfs_super_generation(sb), generation);
2798		return -EUCLEAN;
2799	}
2800
2801	return btrfs_validate_super(fs_info, sb, -1);
2802}
2803
2804static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2805					   struct btrfs_device *scrub_dev)
2806{
2807	int	i;
2808	u64	bytenr;
2809	u64	gen;
2810	int ret = 0;
2811	struct page *page;
2812	struct btrfs_fs_info *fs_info = sctx->fs_info;
2813
2814	if (BTRFS_FS_ERROR(fs_info))
2815		return -EROFS;
2816
2817	page = alloc_page(GFP_KERNEL);
2818	if (!page) {
2819		spin_lock(&sctx->stat_lock);
2820		sctx->stat.malloc_errors++;
2821		spin_unlock(&sctx->stat_lock);
2822		return -ENOMEM;
2823	}
2824
2825	/* Seed devices of a new filesystem has their own generation. */
2826	if (scrub_dev->fs_devices != fs_info->fs_devices)
2827		gen = scrub_dev->generation;
2828	else
2829		gen = btrfs_get_last_trans_committed(fs_info);
2830
2831	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2832		ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
2833		if (ret == -ENOENT)
2834			break;
2835
2836		if (ret) {
2837			spin_lock(&sctx->stat_lock);
2838			sctx->stat.super_errors++;
2839			spin_unlock(&sctx->stat_lock);
2840			continue;
2841		}
2842
2843		if (bytenr + BTRFS_SUPER_INFO_SIZE >
2844		    scrub_dev->commit_total_bytes)
2845			break;
2846		if (!btrfs_check_super_location(scrub_dev, bytenr))
2847			continue;
2848
2849		ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2850		if (ret) {
2851			spin_lock(&sctx->stat_lock);
2852			sctx->stat.super_errors++;
2853			spin_unlock(&sctx->stat_lock);
2854		}
2855	}
2856	__free_page(page);
2857	return 0;
2858}
2859
2860static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2861{
2862	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2863					&fs_info->scrub_lock)) {
2864		struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2865
2866		fs_info->scrub_workers = NULL;
2867		mutex_unlock(&fs_info->scrub_lock);
2868
2869		if (scrub_workers)
2870			destroy_workqueue(scrub_workers);
2871	}
2872}
2873
2874/*
2875 * get a reference count on fs_info->scrub_workers. start worker if necessary
2876 */
2877static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
2878{
2879	struct workqueue_struct *scrub_workers = NULL;
2880	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2881	int max_active = fs_info->thread_pool_size;
2882	int ret = -ENOMEM;
2883
2884	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2885		return 0;
2886
2887	scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
2888	if (!scrub_workers)
2889		return -ENOMEM;
2890
2891	mutex_lock(&fs_info->scrub_lock);
2892	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2893		ASSERT(fs_info->scrub_workers == NULL);
2894		fs_info->scrub_workers = scrub_workers;
2895		refcount_set(&fs_info->scrub_workers_refcnt, 1);
2896		mutex_unlock(&fs_info->scrub_lock);
2897		return 0;
 
2898	}
2899	/* Other thread raced in and created the workers for us */
2900	refcount_inc(&fs_info->scrub_workers_refcnt);
2901	mutex_unlock(&fs_info->scrub_lock);
2902
2903	ret = 0;
2904
2905	destroy_workqueue(scrub_workers);
2906	return ret;
2907}
2908
2909int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2910		    u64 end, struct btrfs_scrub_progress *progress,
2911		    int readonly, int is_dev_replace)
2912{
2913	struct btrfs_dev_lookup_args args = { .devid = devid };
2914	struct scrub_ctx *sctx;
 
 
 
 
 
 
 
 
 
 
 
 
 
2915	int ret;
2916	struct btrfs_device *dev;
2917	unsigned int nofs_flag;
2918	bool need_commit = false;
2919
2920	if (btrfs_fs_closing(fs_info))
2921		return -EAGAIN;
2922
2923	/* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2924	ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2925
2926	/*
2927	 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2928	 * value (max nodesize / min sectorsize), thus nodesize should always
2929	 * be fine.
2930	 */
2931	ASSERT(fs_info->nodesize <=
2932	       SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2933
2934	/* Allocate outside of device_list_mutex */
2935	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2936	if (IS_ERR(sctx))
2937		return PTR_ERR(sctx);
 
 
 
2938
2939	ret = scrub_workers_get(fs_info);
2940	if (ret)
2941		goto out_free_ctx;
2942
2943	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2944	dev = btrfs_find_device(fs_info->fs_devices, &args);
2945	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2946		     !is_dev_replace)) {
2947		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2948		ret = -ENODEV;
2949		goto out;
2950	}
 
2951
2952	if (!is_dev_replace && !readonly &&
2953	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2954		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2955		btrfs_err_in_rcu(fs_info,
2956			"scrub on devid %llu: filesystem on %s is not writable",
2957				 devid, btrfs_dev_name(dev));
2958		ret = -EROFS;
2959		goto out;
2960	}
2961
2962	mutex_lock(&fs_info->scrub_lock);
2963	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2964	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2965		mutex_unlock(&fs_info->scrub_lock);
2966		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2967		ret = -EIO;
2968		goto out;
2969	}
2970
2971	down_read(&fs_info->dev_replace.rwsem);
2972	if (dev->scrub_ctx ||
2973	    (!is_dev_replace &&
2974	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2975		up_read(&fs_info->dev_replace.rwsem);
2976		mutex_unlock(&fs_info->scrub_lock);
2977		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2978		ret = -EINPROGRESS;
2979		goto out;
2980	}
2981	up_read(&fs_info->dev_replace.rwsem);
2982
2983	sctx->readonly = readonly;
2984	dev->scrub_ctx = sctx;
2985	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2986
2987	/*
2988	 * checking @scrub_pause_req here, we can avoid
2989	 * race between committing transaction and scrubbing.
2990	 */
2991	__scrub_blocked_if_needed(fs_info);
2992	atomic_inc(&fs_info->scrubs_running);
2993	mutex_unlock(&fs_info->scrub_lock);
 
2994
2995	/*
2996	 * In order to avoid deadlock with reclaim when there is a transaction
2997	 * trying to pause scrub, make sure we use GFP_NOFS for all the
2998	 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2999	 * invoked by our callees. The pausing request is done when the
3000	 * transaction commit starts, and it blocks the transaction until scrub
3001	 * is paused (done at specific points at scrub_stripe() or right above
3002	 * before incrementing fs_info->scrubs_running).
3003	 */
3004	nofs_flag = memalloc_nofs_save();
3005	if (!is_dev_replace) {
3006		u64 old_super_errors;
3007
3008		spin_lock(&sctx->stat_lock);
3009		old_super_errors = sctx->stat.super_errors;
3010		spin_unlock(&sctx->stat_lock);
3011
3012		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
3013		/*
3014		 * by holding device list mutex, we can
3015		 * kick off writing super in log tree sync.
3016		 */
3017		mutex_lock(&fs_info->fs_devices->device_list_mutex);
3018		ret = scrub_supers(sctx, dev);
3019		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3020
3021		spin_lock(&sctx->stat_lock);
3022		/*
3023		 * Super block errors found, but we can not commit transaction
3024		 * at current context, since btrfs_commit_transaction() needs
3025		 * to pause the current running scrub (hold by ourselves).
3026		 */
3027		if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
3028			need_commit = true;
3029		spin_unlock(&sctx->stat_lock);
3030	}
3031
3032	if (!ret)
3033		ret = scrub_enumerate_chunks(sctx, dev, start, end);
3034	memalloc_nofs_restore(nofs_flag);
3035
 
3036	atomic_dec(&fs_info->scrubs_running);
3037	wake_up(&fs_info->scrub_pause_wait);
3038
3039	if (progress)
3040		memcpy(progress, &sctx->stat, sizeof(*progress));
3041
3042	if (!is_dev_replace)
3043		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
3044			ret ? "not finished" : "finished", devid, ret);
3045
3046	mutex_lock(&fs_info->scrub_lock);
3047	dev->scrub_ctx = NULL;
3048	mutex_unlock(&fs_info->scrub_lock);
3049
3050	scrub_workers_put(fs_info);
3051	scrub_put_ctx(sctx);
3052
3053	/*
3054	 * We found some super block errors before, now try to force a
3055	 * transaction commit, as scrub has finished.
3056	 */
3057	if (need_commit) {
3058		struct btrfs_trans_handle *trans;
3059
3060		trans = btrfs_start_transaction(fs_info->tree_root, 0);
3061		if (IS_ERR(trans)) {
3062			ret = PTR_ERR(trans);
3063			btrfs_err(fs_info,
3064	"scrub: failed to start transaction to fix super block errors: %d", ret);
3065			return ret;
3066		}
3067		ret = btrfs_commit_transaction(trans);
3068		if (ret < 0)
3069			btrfs_err(fs_info,
3070	"scrub: failed to commit transaction to fix super block errors: %d", ret);
3071	}
3072	return ret;
3073out:
3074	scrub_workers_put(fs_info);
3075out_free_ctx:
3076	scrub_free_ctx(sctx);
3077
3078	return ret;
3079}
3080
3081void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3082{
 
 
3083	mutex_lock(&fs_info->scrub_lock);
3084	atomic_inc(&fs_info->scrub_pause_req);
3085	while (atomic_read(&fs_info->scrubs_paused) !=
3086	       atomic_read(&fs_info->scrubs_running)) {
3087		mutex_unlock(&fs_info->scrub_lock);
3088		wait_event(fs_info->scrub_pause_wait,
3089			   atomic_read(&fs_info->scrubs_paused) ==
3090			   atomic_read(&fs_info->scrubs_running));
3091		mutex_lock(&fs_info->scrub_lock);
3092	}
3093	mutex_unlock(&fs_info->scrub_lock);
3094}
3095
3096void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3097{
 
 
3098	atomic_dec(&fs_info->scrub_pause_req);
3099	wake_up(&fs_info->scrub_pause_wait);
3100}
3101
3102int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
 
 
 
 
 
3103{
 
 
 
 
 
 
3104	mutex_lock(&fs_info->scrub_lock);
3105	if (!atomic_read(&fs_info->scrubs_running)) {
3106		mutex_unlock(&fs_info->scrub_lock);
3107		return -ENOTCONN;
3108	}
3109
3110	atomic_inc(&fs_info->scrub_cancel_req);
3111	while (atomic_read(&fs_info->scrubs_running)) {
3112		mutex_unlock(&fs_info->scrub_lock);
3113		wait_event(fs_info->scrub_pause_wait,
3114			   atomic_read(&fs_info->scrubs_running) == 0);
3115		mutex_lock(&fs_info->scrub_lock);
3116	}
3117	atomic_dec(&fs_info->scrub_cancel_req);
3118	mutex_unlock(&fs_info->scrub_lock);
3119
3120	return 0;
3121}
3122
3123int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
3124{
3125	struct btrfs_fs_info *fs_info = dev->fs_info;
3126	struct scrub_ctx *sctx;
 
 
 
 
 
3127
3128	mutex_lock(&fs_info->scrub_lock);
3129	sctx = dev->scrub_ctx;
3130	if (!sctx) {
3131		mutex_unlock(&fs_info->scrub_lock);
3132		return -ENOTCONN;
3133	}
3134	atomic_inc(&sctx->cancel_req);
3135	while (dev->scrub_ctx) {
3136		mutex_unlock(&fs_info->scrub_lock);
3137		wait_event(fs_info->scrub_pause_wait,
3138			   dev->scrub_ctx == NULL);
3139		mutex_lock(&fs_info->scrub_lock);
3140	}
3141	mutex_unlock(&fs_info->scrub_lock);
3142
3143	return 0;
3144}
3145
3146int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3147			 struct btrfs_scrub_progress *progress)
3148{
3149	struct btrfs_dev_lookup_args args = { .devid = devid };
3150	struct btrfs_device *dev;
3151	struct scrub_ctx *sctx = NULL;
3152
 
 
 
 
3153	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3154	dev = btrfs_find_device(fs_info->fs_devices, &args);
3155	if (dev)
3156		sctx = dev->scrub_ctx;
3157	if (sctx)
3158		memcpy(progress, &sctx->stat, sizeof(*progress));
 
3159	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3160
3161	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3162}
v3.5.6
 
   1/*
   2 * Copyright (C) 2011 STRATO.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/blkdev.h>
  20#include <linux/ratelimit.h>
 
 
  21#include "ctree.h"
 
  22#include "volumes.h"
  23#include "disk-io.h"
  24#include "ordered-data.h"
  25#include "transaction.h"
  26#include "backref.h"
  27#include "extent_io.h"
  28#include "check-integrity.h"
  29#include "rcu-string.h"
 
 
 
 
 
 
 
  30
  31/*
  32 * This is only the first step towards a full-features scrub. It reads all
  33 * extent and super block and verifies the checksums. In case a bad checksum
  34 * is found or the extent cannot be read, good data will be written back if
  35 * any can be found.
  36 *
  37 * Future enhancements:
  38 *  - In case an unrepairable extent is encountered, track which files are
  39 *    affected and report them
  40 *  - track and record media errors, throw out bad devices
  41 *  - add a mode to also read unallocated space
  42 */
  43
  44struct scrub_block;
  45struct scrub_dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46
  47#define SCRUB_PAGES_PER_BIO	16	/* 64k per bio */
  48#define SCRUB_BIOS_PER_DEV	16	/* 1 MB per device in flight */
  49#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
  50
  51struct scrub_page {
  52	struct scrub_block	*sblock;
  53	struct page		*page;
  54	struct btrfs_device	*dev;
  55	u64			flags;  /* extent flags */
  56	u64			generation;
  57	u64			logical;
  58	u64			physical;
  59	struct {
  60		unsigned int	mirror_num:8;
  61		unsigned int	have_csum:1;
  62		unsigned int	io_error:1;
  63	};
  64	u8			csum[BTRFS_CSUM_SIZE];
  65};
  66
  67struct scrub_bio {
  68	int			index;
  69	struct scrub_dev	*sdev;
  70	struct bio		*bio;
  71	int			err;
  72	u64			logical;
  73	u64			physical;
  74	struct scrub_page	*pagev[SCRUB_PAGES_PER_BIO];
  75	int			page_count;
  76	int			next_free;
  77	struct btrfs_work	work;
 
 
  78};
  79
  80struct scrub_block {
  81	struct scrub_page	pagev[SCRUB_MAX_PAGES_PER_BLOCK];
  82	int			page_count;
  83	atomic_t		outstanding_pages;
  84	atomic_t		ref_count; /* free mem on transition to zero */
  85	struct scrub_dev	*sdev;
  86	struct {
  87		unsigned int	header_error:1;
  88		unsigned int	checksum_error:1;
  89		unsigned int	no_io_error_seen:1;
  90		unsigned int	generation_error:1; /* also sets header_error */
  91	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92};
  93
  94struct scrub_dev {
  95	struct scrub_bio	*bios[SCRUB_BIOS_PER_DEV];
  96	struct btrfs_device	*dev;
 
 
 
  97	int			first_free;
  98	int			curr;
  99	atomic_t		in_flight;
 100	atomic_t		fixup_cnt;
 101	spinlock_t		list_lock;
 102	wait_queue_head_t	list_wait;
 103	u16			csum_size;
 104	struct list_head	csum_list;
 105	atomic_t		cancel_req;
 106	int			readonly;
 107	int			pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
 108	u32			sectorsize;
 109	u32			nodesize;
 110	u32			leafsize;
 
 
 
 
 
 
 
 111	/*
 112	 * statistics
 113	 */
 114	struct btrfs_scrub_progress stat;
 115	spinlock_t		stat_lock;
 116};
 117
 118struct scrub_fixup_nodatasum {
 119	struct scrub_dev	*sdev;
 120	u64			logical;
 121	struct btrfs_root	*root;
 122	struct btrfs_work	work;
 123	int			mirror_num;
 
 
 124};
 125
 126struct scrub_warning {
 127	struct btrfs_path	*path;
 128	u64			extent_item_size;
 129	char			*scratch_buf;
 130	char			*msg_buf;
 131	const char		*errstr;
 132	sector_t		sector;
 133	u64			logical;
 134	struct btrfs_device	*dev;
 135	int			msg_bufsize;
 136	int			scratch_bufsize;
 137};
 138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139
 140static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
 141static int scrub_setup_recheck_block(struct scrub_dev *sdev,
 142				     struct btrfs_mapping_tree *map_tree,
 143				     u64 length, u64 logical,
 144				     struct scrub_block *sblock);
 145static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
 146			       struct scrub_block *sblock, int is_metadata,
 147			       int have_csum, u8 *csum, u64 generation,
 148			       u16 csum_size);
 149static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
 150					 struct scrub_block *sblock,
 151					 int is_metadata, int have_csum,
 152					 const u8 *csum, u64 generation,
 153					 u16 csum_size);
 154static void scrub_complete_bio_end_io(struct bio *bio, int err);
 155static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
 156					     struct scrub_block *sblock_good,
 157					     int force_write);
 158static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 159					    struct scrub_block *sblock_good,
 160					    int page_num, int force_write);
 161static int scrub_checksum_data(struct scrub_block *sblock);
 162static int scrub_checksum_tree_block(struct scrub_block *sblock);
 163static int scrub_checksum_super(struct scrub_block *sblock);
 164static void scrub_block_get(struct scrub_block *sblock);
 165static void scrub_block_put(struct scrub_block *sblock);
 166static int scrub_add_page_to_bio(struct scrub_dev *sdev,
 167				 struct scrub_page *spage);
 168static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
 169		       u64 physical, u64 flags, u64 gen, int mirror_num,
 170		       u8 *csum, int force);
 171static void scrub_bio_end_io(struct bio *bio, int err);
 172static void scrub_bio_end_io_worker(struct btrfs_work *work);
 173static void scrub_block_complete(struct scrub_block *sblock);
 174
 175
 176static void scrub_free_csums(struct scrub_dev *sdev)
 177{
 178	while (!list_empty(&sdev->csum_list)) {
 179		struct btrfs_ordered_sum *sum;
 180		sum = list_first_entry(&sdev->csum_list,
 181				       struct btrfs_ordered_sum, list);
 182		list_del(&sum->list);
 183		kfree(sum);
 184	}
 185}
 186
 187static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188{
 189	int i;
 190
 191	if (!sdev)
 192		return;
 193
 194	/* this can happen when scrub is cancelled */
 195	if (sdev->curr != -1) {
 196		struct scrub_bio *sbio = sdev->bios[sdev->curr];
 197
 198		for (i = 0; i < sbio->page_count; i++) {
 199			BUG_ON(!sbio->pagev[i]);
 200			BUG_ON(!sbio->pagev[i]->page);
 201			scrub_block_put(sbio->pagev[i]->sblock);
 202		}
 203		bio_put(sbio->bio);
 204	}
 205
 206	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
 207		struct scrub_bio *sbio = sdev->bios[i];
 208
 209		if (!sbio)
 210			break;
 211		kfree(sbio);
 212	}
 213
 214	scrub_free_csums(sdev);
 215	kfree(sdev);
 216}
 217
 218static noinline_for_stack
 219struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
 220{
 221	struct scrub_dev *sdev;
 222	int		i;
 223	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
 224	int pages_per_bio;
 225
 226	pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
 227			      bio_get_nr_vecs(dev->bdev));
 228	sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
 229	if (!sdev)
 
 230		goto nomem;
 231	sdev->dev = dev;
 232	sdev->pages_per_bio = pages_per_bio;
 233	sdev->curr = -1;
 234	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
 235		struct scrub_bio *sbio;
 
 
 
 
 236
 237		sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
 238		if (!sbio)
 239			goto nomem;
 240		sdev->bios[i] = sbio;
 
 
 
 
 
 
 
 
 
 
 
 
 241
 242		sbio->index = i;
 243		sbio->sdev = sdev;
 244		sbio->page_count = 0;
 245		sbio->work.func = scrub_bio_end_io_worker;
 246
 247		if (i != SCRUB_BIOS_PER_DEV-1)
 248			sdev->bios[i]->next_free = i + 1;
 249		else
 250			sdev->bios[i]->next_free = -1;
 251	}
 252	sdev->first_free = 0;
 253	sdev->nodesize = dev->dev_root->nodesize;
 254	sdev->leafsize = dev->dev_root->leafsize;
 255	sdev->sectorsize = dev->dev_root->sectorsize;
 256	atomic_set(&sdev->in_flight, 0);
 257	atomic_set(&sdev->fixup_cnt, 0);
 258	atomic_set(&sdev->cancel_req, 0);
 259	sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
 260	INIT_LIST_HEAD(&sdev->csum_list);
 261
 262	spin_lock_init(&sdev->list_lock);
 263	spin_lock_init(&sdev->stat_lock);
 264	init_waitqueue_head(&sdev->list_wait);
 265	return sdev;
 266
 267nomem:
 268	scrub_free_dev(sdev);
 269	return ERR_PTR(-ENOMEM);
 270}
 271
 272static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
 
 273{
 274	u64 isize;
 275	u32 nlink;
 276	int ret;
 277	int i;
 
 278	struct extent_buffer *eb;
 279	struct btrfs_inode_item *inode_item;
 280	struct scrub_warning *swarn = ctx;
 281	struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
 282	struct inode_fs_paths *ipath = NULL;
 283	struct btrfs_root *local_root;
 284	struct btrfs_key root_key;
 285
 286	root_key.objectid = root;
 287	root_key.type = BTRFS_ROOT_ITEM_KEY;
 288	root_key.offset = (u64)-1;
 289	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
 290	if (IS_ERR(local_root)) {
 291		ret = PTR_ERR(local_root);
 292		goto err;
 293	}
 294
 295	ret = inode_item_info(inum, 0, local_root, swarn->path);
 
 
 
 
 
 
 
 296	if (ret) {
 
 297		btrfs_release_path(swarn->path);
 298		goto err;
 299	}
 300
 301	eb = swarn->path->nodes[0];
 302	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
 303					struct btrfs_inode_item);
 304	isize = btrfs_inode_size(eb, inode_item);
 305	nlink = btrfs_inode_nlink(eb, inode_item);
 306	btrfs_release_path(swarn->path);
 307
 
 
 
 
 
 
 308	ipath = init_ipath(4096, local_root, swarn->path);
 
 309	if (IS_ERR(ipath)) {
 
 310		ret = PTR_ERR(ipath);
 311		ipath = NULL;
 312		goto err;
 313	}
 314	ret = paths_from_inode(inum, ipath);
 315
 316	if (ret < 0)
 317		goto err;
 318
 319	/*
 320	 * we deliberately ignore the bit ipath might have been too small to
 321	 * hold all of the paths here
 322	 */
 323	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
 324		printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
 325			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
 326			"length %llu, links %u (path: %s)\n", swarn->errstr,
 327			swarn->logical, rcu_str_deref(swarn->dev->name),
 328			(unsigned long long)swarn->sector, root, inum, offset,
 329			min(isize - offset, (u64)PAGE_SIZE), nlink,
 330			(char *)(unsigned long)ipath->fspath->val[i]);
 
 331
 
 332	free_ipath(ipath);
 333	return 0;
 334
 335err:
 336	printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
 337		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
 338		"resolving failed with ret=%d\n", swarn->errstr,
 339		swarn->logical, rcu_str_deref(swarn->dev->name),
 340		(unsigned long long)swarn->sector, root, inum, offset, ret);
 
 341
 342	free_ipath(ipath);
 343	return 0;
 344}
 345
 346static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
 
 347{
 348	struct btrfs_device *dev = sblock->sdev->dev;
 349	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
 350	struct btrfs_path *path;
 351	struct btrfs_key found_key;
 352	struct extent_buffer *eb;
 353	struct btrfs_extent_item *ei;
 354	struct scrub_warning swarn;
 
 355	u32 item_size;
 356	int ret;
 357	u64 ref_root;
 358	u8 ref_level;
 359	unsigned long ptr = 0;
 360	const int bufsize = 4096;
 361	u64 extent_item_pos;
 362
 
 
 
 
 
 
 363	path = btrfs_alloc_path();
 
 
 364
 365	swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
 366	swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
 367	BUG_ON(sblock->page_count < 1);
 368	swarn.sector = (sblock->pagev[0].physical) >> 9;
 369	swarn.logical = sblock->pagev[0].logical;
 370	swarn.errstr = errstr;
 371	swarn.dev = dev;
 372	swarn.msg_bufsize = bufsize;
 373	swarn.scratch_bufsize = bufsize;
 374
 375	if (!path || !swarn.scratch_buf || !swarn.msg_buf)
 376		goto out;
 377
 378	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
 
 379	if (ret < 0)
 380		goto out;
 381
 382	extent_item_pos = swarn.logical - found_key.objectid;
 383	swarn.extent_item_size = found_key.offset;
 384
 385	eb = path->nodes[0];
 386	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 387	item_size = btrfs_item_size_nr(eb, path->slots[0]);
 388	btrfs_release_path(path);
 389
 390	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 391		do {
 392			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
 393							&ref_root, &ref_level);
 394			printk_in_rcu(KERN_WARNING
 395				"btrfs: %s at logical %llu on dev %s, "
 396				"sector %llu: metadata %s (level %d) in tree "
 397				"%llu\n", errstr, swarn.logical,
 398				rcu_str_deref(dev->name),
 399				(unsigned long long)swarn.sector,
 400				ref_level ? "node" : "leaf",
 401				ret < 0 ? -1 : ref_level,
 402				ret < 0 ? -1 : ref_root);
 403		} while (ret != 1);
 
 
 
 
 
 
 
 
 
 
 404	} else {
 
 
 
 
 
 
 
 
 405		swarn.path = path;
 406		iterate_extent_inodes(fs_info, found_key.objectid,
 407					extent_item_pos, 1,
 408					scrub_print_warning_inode, &swarn);
 409	}
 410
 411out:
 412	btrfs_free_path(path);
 413	kfree(swarn.scratch_buf);
 414	kfree(swarn.msg_buf);
 415}
 416
 417static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
 418{
 419	struct page *page = NULL;
 420	unsigned long index;
 421	struct scrub_fixup_nodatasum *fixup = ctx;
 422	int ret;
 423	int corrected = 0;
 424	struct btrfs_key key;
 425	struct inode *inode = NULL;
 426	u64 end = offset + PAGE_SIZE - 1;
 427	struct btrfs_root *local_root;
 428
 429	key.objectid = root;
 430	key.type = BTRFS_ROOT_ITEM_KEY;
 431	key.offset = (u64)-1;
 432	local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
 433	if (IS_ERR(local_root))
 434		return PTR_ERR(local_root);
 435
 436	key.type = BTRFS_INODE_ITEM_KEY;
 437	key.objectid = inum;
 438	key.offset = 0;
 439	inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
 440	if (IS_ERR(inode))
 441		return PTR_ERR(inode);
 442
 443	index = offset >> PAGE_CACHE_SHIFT;
 
 444
 445	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 446	if (!page) {
 447		ret = -ENOMEM;
 448		goto out;
 449	}
 
 
 
 
 
 
 
 450
 451	if (PageUptodate(page)) {
 452		struct btrfs_mapping_tree *map_tree;
 453		if (PageDirty(page)) {
 454			/*
 455			 * we need to write the data to the defect sector. the
 456			 * data that was in that sector is not in memory,
 457			 * because the page was modified. we must not write the
 458			 * modified page to that sector.
 459			 *
 460			 * TODO: what could be done here: wait for the delalloc
 461			 *       runner to write out that page (might involve
 462			 *       COW) and see whether the sector is still
 463			 *       referenced afterwards.
 464			 *
 465			 * For the meantime, we'll treat this error
 466			 * incorrectable, although there is a chance that a
 467			 * later scrub will find the bad sector again and that
 468			 * there's no dirty page in memory, then.
 469			 */
 470			ret = -EIO;
 471			goto out;
 472		}
 473		map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
 474		ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
 475					fixup->logical, page,
 476					fixup->mirror_num);
 477		unlock_page(page);
 478		corrected = !ret;
 479	} else {
 480		/*
 481		 * we need to get good data first. the general readpage path
 482		 * will call repair_io_failure for us, we just have to make
 483		 * sure we read the bad mirror.
 484		 */
 485		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
 486					EXTENT_DAMAGED, GFP_NOFS);
 487		if (ret) {
 488			/* set_extent_bits should give proper error */
 489			WARN_ON(ret > 0);
 490			if (ret > 0)
 491				ret = -EFAULT;
 492			goto out;
 493		}
 494
 495		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
 496						btrfs_get_extent,
 497						fixup->mirror_num);
 498		wait_on_page_locked(page);
 499
 500		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
 501						end, EXTENT_DAMAGED, 0, NULL);
 502		if (!corrected)
 503			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
 504						EXTENT_DAMAGED, GFP_NOFS);
 505	}
 506
 507out:
 508	if (page)
 509		put_page(page);
 510	if (inode)
 511		iput(inode);
 
 
 
 
 
 
 512
 513	if (ret < 0)
 514		return ret;
 
 
 
 
 
 515
 516	if (ret == 0 && corrected) {
 517		/*
 518		 * we only need to call readpage for one of the inodes belonging
 519		 * to this extent. so make iterate_extent_inodes stop
 520		 */
 521		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 522	}
 523
 524	return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 525}
 526
 527static void scrub_fixup_nodatasum(struct btrfs_work *work)
 528{
 
 
 
 
 
 
 529	int ret;
 530	struct scrub_fixup_nodatasum *fixup;
 531	struct scrub_dev *sdev;
 532	struct btrfs_trans_handle *trans = NULL;
 533	struct btrfs_fs_info *fs_info;
 534	struct btrfs_path *path;
 535	int uncorrectable = 0;
 536
 537	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
 538	sdev = fixup->sdev;
 539	fs_info = fixup->root->fs_info;
 
 
 
 
 
 
 540
 541	path = btrfs_alloc_path();
 542	if (!path) {
 543		spin_lock(&sdev->stat_lock);
 544		++sdev->stat.malloc_errors;
 545		spin_unlock(&sdev->stat_lock);
 546		uncorrectable = 1;
 547		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 548	}
 549
 550	trans = btrfs_join_transaction(fixup->root);
 551	if (IS_ERR(trans)) {
 552		uncorrectable = 1;
 553		goto out;
 
 
 
 554	}
 555
 556	/*
 557	 * the idea is to trigger a regular read through the standard path. we
 558	 * read a page from the (failed) logical address by specifying the
 559	 * corresponding copynum of the failed sector. thus, that readpage is
 560	 * expected to fail.
 561	 * that is the point where on-the-fly error correction will kick in
 562	 * (once it's finished) and rewrite the failed sector if a good copy
 563	 * can be found.
 564	 */
 565	ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
 566						path, scrub_fixup_readpage,
 567						fixup);
 568	if (ret < 0) {
 569		uncorrectable = 1;
 570		goto out;
 
 
 
 571	}
 572	WARN_ON(ret != 1);
 573
 574	spin_lock(&sdev->stat_lock);
 575	++sdev->stat.corrected_errors;
 576	spin_unlock(&sdev->stat_lock);
 
 
 
 577
 578out:
 579	if (trans && !IS_ERR(trans))
 580		btrfs_end_transaction(trans, fixup->root);
 581	if (uncorrectable) {
 582		spin_lock(&sdev->stat_lock);
 583		++sdev->stat.uncorrectable_errors;
 584		spin_unlock(&sdev->stat_lock);
 585
 586		printk_ratelimited_in_rcu(KERN_ERR
 587			"btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
 588			(unsigned long long)fixup->logical,
 589			rcu_str_deref(sdev->dev->name));
 590	}
 
 591
 592	btrfs_free_path(path);
 593	kfree(fixup);
 
 594
 595	/* see caller why we're pretending to be paused in the scrub counters */
 596	mutex_lock(&fs_info->scrub_lock);
 597	atomic_dec(&fs_info->scrubs_running);
 598	atomic_dec(&fs_info->scrubs_paused);
 599	mutex_unlock(&fs_info->scrub_lock);
 600	atomic_dec(&sdev->fixup_cnt);
 601	wake_up(&fs_info->scrub_pause_wait);
 602	wake_up(&sdev->list_wait);
 603}
 604
 605/*
 606 * scrub_handle_errored_block gets called when either verification of the
 607 * pages failed or the bio failed to read, e.g. with EIO. In the latter
 608 * case, this function handles all pages in the bio, even though only one
 609 * may be bad.
 610 * The goal of this function is to repair the errored block by using the
 611 * contents of one of the mirrors.
 612 */
 613static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 614{
 615	struct scrub_dev *sdev = sblock_to_check->sdev;
 616	struct btrfs_fs_info *fs_info;
 617	u64 length;
 618	u64 logical;
 619	u64 generation;
 620	unsigned int failed_mirror_index;
 621	unsigned int is_metadata;
 622	unsigned int have_csum;
 623	u8 *csum;
 624	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
 625	struct scrub_block *sblock_bad;
 626	int ret;
 627	int mirror_index;
 628	int page_num;
 629	int success;
 630	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
 631				      DEFAULT_RATELIMIT_BURST);
 632
 633	BUG_ON(sblock_to_check->page_count < 1);
 634	fs_info = sdev->dev->dev_root->fs_info;
 635	length = sblock_to_check->page_count * PAGE_SIZE;
 636	logical = sblock_to_check->pagev[0].logical;
 637	generation = sblock_to_check->pagev[0].generation;
 638	BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
 639	failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
 640	is_metadata = !(sblock_to_check->pagev[0].flags &
 641			BTRFS_EXTENT_FLAG_DATA);
 642	have_csum = sblock_to_check->pagev[0].have_csum;
 643	csum = sblock_to_check->pagev[0].csum;
 644
 645	/*
 646	 * read all mirrors one after the other. This includes to
 647	 * re-read the extent or metadata block that failed (that was
 648	 * the cause that this fixup code is called) another time,
 649	 * page by page this time in order to know which pages
 650	 * caused I/O errors and which ones are good (for all mirrors).
 651	 * It is the goal to handle the situation when more than one
 652	 * mirror contains I/O errors, but the errors do not
 653	 * overlap, i.e. the data can be repaired by selecting the
 654	 * pages from those mirrors without I/O error on the
 655	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
 656	 * would be that mirror #1 has an I/O error on the first page,
 657	 * the second page is good, and mirror #2 has an I/O error on
 658	 * the second page, but the first page is good.
 659	 * Then the first page of the first mirror can be repaired by
 660	 * taking the first page of the second mirror, and the
 661	 * second page of the second mirror can be repaired by
 662	 * copying the contents of the 2nd page of the 1st mirror.
 663	 * One more note: if the pages of one mirror contain I/O
 664	 * errors, the checksum cannot be verified. In order to get
 665	 * the best data for repairing, the first attempt is to find
 666	 * a mirror without I/O errors and with a validated checksum.
 667	 * Only if this is not possible, the pages are picked from
 668	 * mirrors with I/O errors without considering the checksum.
 669	 * If the latter is the case, at the end, the checksum of the
 670	 * repaired area is verified in order to correctly maintain
 671	 * the statistics.
 672	 */
 673
 674	sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
 675				     sizeof(*sblocks_for_recheck),
 676				     GFP_NOFS);
 677	if (!sblocks_for_recheck) {
 678		spin_lock(&sdev->stat_lock);
 679		sdev->stat.malloc_errors++;
 680		sdev->stat.read_errors++;
 681		sdev->stat.uncorrectable_errors++;
 682		spin_unlock(&sdev->stat_lock);
 683		btrfs_dev_stat_inc_and_print(sdev->dev,
 684					     BTRFS_DEV_STAT_READ_ERRS);
 685		goto out;
 686	}
 
 
 
 
 
 
 
 
 
 
 687
 688	/* setup the context, map the logical blocks and alloc the pages */
 689	ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
 690					logical, sblocks_for_recheck);
 691	if (ret) {
 692		spin_lock(&sdev->stat_lock);
 693		sdev->stat.read_errors++;
 694		sdev->stat.uncorrectable_errors++;
 695		spin_unlock(&sdev->stat_lock);
 696		btrfs_dev_stat_inc_and_print(sdev->dev,
 697					     BTRFS_DEV_STAT_READ_ERRS);
 698		goto out;
 699	}
 700	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
 701	sblock_bad = sblocks_for_recheck + failed_mirror_index;
 
 
 
 
 702
 703	/* build and submit the bios for the failed mirror, check checksums */
 704	ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
 705				  csum, generation, sdev->csum_size);
 706	if (ret) {
 707		spin_lock(&sdev->stat_lock);
 708		sdev->stat.read_errors++;
 709		sdev->stat.uncorrectable_errors++;
 710		spin_unlock(&sdev->stat_lock);
 711		btrfs_dev_stat_inc_and_print(sdev->dev,
 712					     BTRFS_DEV_STAT_READ_ERRS);
 713		goto out;
 714	}
 715
 716	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
 717	    sblock_bad->no_io_error_seen) {
 718		/*
 719		 * the error disappeared after reading page by page, or
 720		 * the area was part of a huge bio and other parts of the
 721		 * bio caused I/O errors, or the block layer merged several
 722		 * read requests into one and the error is caused by a
 723		 * different bio (usually one of the two latter cases is
 724		 * the cause)
 725		 */
 726		spin_lock(&sdev->stat_lock);
 727		sdev->stat.unverified_errors++;
 728		spin_unlock(&sdev->stat_lock);
 729
 730		goto out;
 
 
 
 
 
 
 
 
 731	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 732
 733	if (!sblock_bad->no_io_error_seen) {
 734		spin_lock(&sdev->stat_lock);
 735		sdev->stat.read_errors++;
 736		spin_unlock(&sdev->stat_lock);
 737		if (__ratelimit(&_rs))
 738			scrub_print_warning("i/o error", sblock_to_check);
 739		btrfs_dev_stat_inc_and_print(sdev->dev,
 740					     BTRFS_DEV_STAT_READ_ERRS);
 741	} else if (sblock_bad->checksum_error) {
 742		spin_lock(&sdev->stat_lock);
 743		sdev->stat.csum_errors++;
 744		spin_unlock(&sdev->stat_lock);
 745		if (__ratelimit(&_rs))
 746			scrub_print_warning("checksum error", sblock_to_check);
 747		btrfs_dev_stat_inc_and_print(sdev->dev,
 748					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
 749	} else if (sblock_bad->header_error) {
 750		spin_lock(&sdev->stat_lock);
 751		sdev->stat.verify_errors++;
 752		spin_unlock(&sdev->stat_lock);
 753		if (__ratelimit(&_rs))
 754			scrub_print_warning("checksum/header error",
 755					    sblock_to_check);
 756		if (sblock_bad->generation_error)
 757			btrfs_dev_stat_inc_and_print(sdev->dev,
 758				BTRFS_DEV_STAT_GENERATION_ERRS);
 759		else
 760			btrfs_dev_stat_inc_and_print(sdev->dev,
 761				BTRFS_DEV_STAT_CORRUPTION_ERRS);
 762	}
 763
 764	if (sdev->readonly)
 765		goto did_not_correct_error;
 766
 767	if (!is_metadata && !have_csum) {
 768		struct scrub_fixup_nodatasum *fixup_nodatasum;
 769
 770		/*
 771		 * !is_metadata and !have_csum, this means that the data
 772		 * might not be COW'ed, that it might be modified
 773		 * concurrently. The general strategy to work on the
 774		 * commit root does not help in the case when COW is not
 775		 * used.
 776		 */
 777		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
 778		if (!fixup_nodatasum)
 779			goto did_not_correct_error;
 780		fixup_nodatasum->sdev = sdev;
 781		fixup_nodatasum->logical = logical;
 782		fixup_nodatasum->root = fs_info->extent_root;
 783		fixup_nodatasum->mirror_num = failed_mirror_index + 1;
 784		/*
 785		 * increment scrubs_running to prevent cancel requests from
 786		 * completing as long as a fixup worker is running. we must also
 787		 * increment scrubs_paused to prevent deadlocking on pause
 788		 * requests used for transactions commits (as the worker uses a
 789		 * transaction context). it is safe to regard the fixup worker
 790		 * as paused for all matters practical. effectively, we only
 791		 * avoid cancellation requests from completing.
 792		 */
 793		mutex_lock(&fs_info->scrub_lock);
 794		atomic_inc(&fs_info->scrubs_running);
 795		atomic_inc(&fs_info->scrubs_paused);
 796		mutex_unlock(&fs_info->scrub_lock);
 797		atomic_inc(&sdev->fixup_cnt);
 798		fixup_nodatasum->work.func = scrub_fixup_nodatasum;
 799		btrfs_queue_worker(&fs_info->scrub_workers,
 800				   &fixup_nodatasum->work);
 801		goto out;
 802	}
 803
 804	/*
 805	 * now build and submit the bios for the other mirrors, check
 806	 * checksums
 
 
 807	 */
 808	for (mirror_index = 0;
 809	     mirror_index < BTRFS_MAX_MIRRORS &&
 810	     sblocks_for_recheck[mirror_index].page_count > 0;
 811	     mirror_index++) {
 812		if (mirror_index == failed_mirror_index)
 813			continue;
 814
 815		/* build and submit the bios, check checksums */
 816		ret = scrub_recheck_block(fs_info,
 817					  sblocks_for_recheck + mirror_index,
 818					  is_metadata, have_csum, csum,
 819					  generation, sdev->csum_size);
 820		if (ret)
 821			goto did_not_correct_error;
 
 
 
 
 
 
 
 822	}
 823
 824	/*
 825	 * first try to pick the mirror which is completely without I/O
 826	 * errors and also does not have a checksum error.
 827	 * If one is found, and if a checksum is present, the full block
 828	 * that is known to contain an error is rewritten. Afterwards
 829	 * the block is known to be corrected.
 830	 * If a mirror is found which is completely correct, and no
 831	 * checksum is present, only those pages are rewritten that had
 832	 * an I/O error in the block to be repaired, since it cannot be
 833	 * determined, which copy of the other pages is better (and it
 834	 * could happen otherwise that a correct page would be
 835	 * overwritten by a bad one).
 836	 */
 837	for (mirror_index = 0;
 838	     mirror_index < BTRFS_MAX_MIRRORS &&
 839	     sblocks_for_recheck[mirror_index].page_count > 0;
 840	     mirror_index++) {
 841		struct scrub_block *sblock_other = sblocks_for_recheck +
 842						   mirror_index;
 843
 844		if (!sblock_other->header_error &&
 845		    !sblock_other->checksum_error &&
 846		    sblock_other->no_io_error_seen) {
 847			int force_write = is_metadata || have_csum;
 848
 849			ret = scrub_repair_block_from_good_copy(sblock_bad,
 850								sblock_other,
 851								force_write);
 852			if (0 == ret)
 853				goto corrected_error;
 854		}
 855	}
 856
 857	/*
 858	 * in case of I/O errors in the area that is supposed to be
 859	 * repaired, continue by picking good copies of those pages.
 860	 * Select the good pages from mirrors to rewrite bad pages from
 861	 * the area to fix. Afterwards verify the checksum of the block
 862	 * that is supposed to be repaired. This verification step is
 863	 * only done for the purpose of statistic counting and for the
 864	 * final scrub report, whether errors remain.
 865	 * A perfect algorithm could make use of the checksum and try
 866	 * all possible combinations of pages from the different mirrors
 867	 * until the checksum verification succeeds. For example, when
 868	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
 869	 * of mirror #2 is readable but the final checksum test fails,
 870	 * then the 2nd page of mirror #3 could be tried, whether now
 871	 * the final checksum succeedes. But this would be a rare
 872	 * exception and is therefore not implemented. At least it is
 873	 * avoided that the good copy is overwritten.
 874	 * A more useful improvement would be to pick the sectors
 875	 * without I/O error based on sector sizes (512 bytes on legacy
 876	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
 877	 * mirror could be repaired by taking 512 byte of a different
 878	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
 879	 * area are unreadable.
 880	 */
 881
 882	/* can only fix I/O errors from here on */
 883	if (sblock_bad->no_io_error_seen)
 884		goto did_not_correct_error;
 885
 886	success = 1;
 887	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
 888		struct scrub_page *page_bad = sblock_bad->pagev + page_num;
 889
 890		if (!page_bad->io_error)
 
 891			continue;
 892
 893		for (mirror_index = 0;
 894		     mirror_index < BTRFS_MAX_MIRRORS &&
 895		     sblocks_for_recheck[mirror_index].page_count > 0;
 896		     mirror_index++) {
 897			struct scrub_block *sblock_other = sblocks_for_recheck +
 898							   mirror_index;
 899			struct scrub_page *page_other = sblock_other->pagev +
 900							page_num;
 901
 902			if (!page_other->io_error) {
 903				ret = scrub_repair_page_from_good_copy(
 904					sblock_bad, sblock_other, page_num, 0);
 905				if (0 == ret) {
 906					page_bad->io_error = 0;
 907					break; /* succeeded for this page */
 908				}
 909			}
 
 910		}
 911
 912		if (page_bad->io_error) {
 913			/* did not find a mirror to copy the page from */
 914			success = 0;
 915		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916	}
 917
 918	if (success) {
 919		if (is_metadata || have_csum) {
 920			/*
 921			 * need to verify the checksum now that all
 922			 * sectors on disk are repaired (the write
 923			 * request for data to be repaired is on its way).
 924			 * Just be lazy and use scrub_recheck_block()
 925			 * which re-reads the data before the checksum
 926			 * is verified, but most likely the data comes out
 927			 * of the page cache.
 928			 */
 929			ret = scrub_recheck_block(fs_info, sblock_bad,
 930						  is_metadata, have_csum, csum,
 931						  generation, sdev->csum_size);
 932			if (!ret && !sblock_bad->header_error &&
 933			    !sblock_bad->checksum_error &&
 934			    sblock_bad->no_io_error_seen)
 935				goto corrected_error;
 936			else
 937				goto did_not_correct_error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 938		} else {
 939corrected_error:
 940			spin_lock(&sdev->stat_lock);
 941			sdev->stat.corrected_errors++;
 942			spin_unlock(&sdev->stat_lock);
 943			printk_ratelimited_in_rcu(KERN_ERR
 944				"btrfs: fixed up error at logical %llu on dev %s\n",
 945				(unsigned long long)logical,
 946				rcu_str_deref(sdev->dev->name));
 947		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 948	} else {
 949did_not_correct_error:
 950		spin_lock(&sdev->stat_lock);
 951		sdev->stat.uncorrectable_errors++;
 952		spin_unlock(&sdev->stat_lock);
 953		printk_ratelimited_in_rcu(KERN_ERR
 954			"btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
 955			(unsigned long long)logical,
 956			rcu_str_deref(sdev->dev->name));
 957	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 958
 959out:
 960	if (sblocks_for_recheck) {
 961		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
 962		     mirror_index++) {
 963			struct scrub_block *sblock = sblocks_for_recheck +
 964						     mirror_index;
 965			int page_index;
 966
 967			for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
 968			     page_index++)
 969				if (sblock->pagev[page_index].page)
 970					__free_page(
 971						sblock->pagev[page_index].page);
 972		}
 973		kfree(sblocks_for_recheck);
 974	}
 
 975
 976	return 0;
 
 977}
 978
 979static int scrub_setup_recheck_block(struct scrub_dev *sdev,
 980				     struct btrfs_mapping_tree *map_tree,
 981				     u64 length, u64 logical,
 982				     struct scrub_block *sblocks_for_recheck)
 983{
 984	int page_index;
 985	int mirror_index;
 986	int ret;
 
 987
 
 
 
 
 
 988	/*
 989	 * note: the three members sdev, ref_count and outstanding_pages
 990	 * are not used (and not set) in the blocks that are used for
 991	 * the recheck procedure
 992	 */
 
 993
 994	page_index = 0;
 995	while (length > 0) {
 996		u64 sublen = min_t(u64, length, PAGE_SIZE);
 997		u64 mapped_length = sublen;
 998		struct btrfs_bio *bbio = NULL;
 
 
 
 999
1000		/*
1001		 * with a length of PAGE_SIZE, each returned stripe
1002		 * represents one mirror
1003		 */
1004		ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
1005				      &bbio, 0);
1006		if (ret || !bbio || mapped_length < sublen) {
1007			kfree(bbio);
1008			return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010
1011		BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
1012		for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
1013		     mirror_index++) {
1014			struct scrub_block *sblock;
1015			struct scrub_page *page;
 
 
 
 
 
 
 
 
1016
1017			if (mirror_index >= BTRFS_MAX_MIRRORS)
1018				continue;
 
 
 
 
 
 
 
1019
1020			sblock = sblocks_for_recheck + mirror_index;
1021			page = sblock->pagev + page_index;
1022			page->logical = logical;
1023			page->physical = bbio->stripes[mirror_index].physical;
1024			/* for missing devices, dev->bdev is NULL */
1025			page->dev = bbio->stripes[mirror_index].dev;
1026			page->mirror_num = mirror_index + 1;
1027			page->page = alloc_page(GFP_NOFS);
1028			if (!page->page) {
1029				spin_lock(&sdev->stat_lock);
1030				sdev->stat.malloc_errors++;
1031				spin_unlock(&sdev->stat_lock);
1032				return -ENOMEM;
1033			}
1034			sblock->page_count++;
1035		}
1036		kfree(bbio);
1037		length -= sublen;
1038		logical += sublen;
1039		page_index++;
1040	}
1041
1042	return 0;
 
1043}
1044
1045/*
1046 * this function will check the on disk data for checksum errors, header
1047 * errors and read I/O errors. If any I/O errors happen, the exact pages
1048 * which are errored are marked as being bad. The goal is to enable scrub
1049 * to take those pages that are not errored from all the mirrors so that
1050 * the pages that are errored in the just handled mirror can be repaired.
1051 */
1052static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1053			       struct scrub_block *sblock, int is_metadata,
1054			       int have_csum, u8 *csum, u64 generation,
1055			       u16 csum_size)
1056{
1057	int page_num;
1058
1059	sblock->no_io_error_seen = 1;
1060	sblock->header_error = 0;
1061	sblock->checksum_error = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1062
1063	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1064		struct bio *bio;
1065		int ret;
1066		struct scrub_page *page = sblock->pagev + page_num;
1067		DECLARE_COMPLETION_ONSTACK(complete);
 
 
 
 
 
 
1068
1069		if (page->dev->bdev == NULL) {
1070			page->io_error = 1;
1071			sblock->no_io_error_seen = 0;
1072			continue;
1073		}
1074
1075		BUG_ON(!page->page);
1076		bio = bio_alloc(GFP_NOFS, 1);
1077		if (!bio)
1078			return -EIO;
1079		bio->bi_bdev = page->dev->bdev;
1080		bio->bi_sector = page->physical >> 9;
1081		bio->bi_end_io = scrub_complete_bio_end_io;
1082		bio->bi_private = &complete;
1083
1084		ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
1085		if (PAGE_SIZE != ret) {
1086			bio_put(bio);
1087			return -EIO;
1088		}
1089		btrfsic_submit_bio(READ, bio);
1090
1091		/* this will also unplug the queue */
1092		wait_for_completion(&complete);
1093
1094		page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1095		if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1096			sblock->no_io_error_seen = 0;
1097		bio_put(bio);
1098	}
1099
1100	if (sblock->no_io_error_seen)
1101		scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1102					     have_csum, csum, generation,
1103					     csum_size);
1104
 
 
 
 
1105	return 0;
1106}
1107
1108static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1109					 struct scrub_block *sblock,
1110					 int is_metadata, int have_csum,
1111					 const u8 *csum, u64 generation,
1112					 u16 csum_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113{
1114	int page_num;
1115	u8 calculated_csum[BTRFS_CSUM_SIZE];
1116	u32 crc = ~(u32)0;
1117	struct btrfs_root *root = fs_info->extent_root;
1118	void *mapped_buffer;
1119
1120	BUG_ON(!sblock->pagev[0].page);
1121	if (is_metadata) {
1122		struct btrfs_header *h;
1123
1124		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1125		h = (struct btrfs_header *)mapped_buffer;
1126
1127		if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
1128		    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1129		    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1130			   BTRFS_UUID_SIZE)) {
1131			sblock->header_error = 1;
1132		} else if (generation != le64_to_cpu(h->generation)) {
1133			sblock->header_error = 1;
1134			sblock->generation_error = 1;
1135		}
1136		csum = h->csum;
1137	} else {
1138		if (!have_csum)
1139			return;
1140
1141		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
 
 
 
 
 
 
 
 
 
1142	}
1143
1144	for (page_num = 0;;) {
1145		if (page_num == 0 && is_metadata)
1146			crc = btrfs_csum_data(root,
1147				((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1148				crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1149		else
1150			crc = btrfs_csum_data(root, mapped_buffer, crc,
1151					      PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
1152
1153		kunmap_atomic(mapped_buffer);
1154		page_num++;
1155		if (page_num >= sblock->page_count)
 
1156			break;
1157		BUG_ON(!sblock->pagev[page_num].page);
1158
1159		mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
 
 
 
 
1160	}
1161
1162	btrfs_csum_final(crc, calculated_csum);
1163	if (memcmp(calculated_csum, csum, csum_size))
1164		sblock->checksum_error = 1;
1165}
1166
1167static void scrub_complete_bio_end_io(struct bio *bio, int err)
 
1168{
1169	complete((struct completion *)bio->bi_private);
 
 
 
 
 
 
 
 
 
 
 
 
 
1170}
1171
1172static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1173					     struct scrub_block *sblock_good,
1174					     int force_write)
1175{
1176	int page_num;
1177	int ret = 0;
1178
1179	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1180		int ret_sub;
1181
1182		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1183							   sblock_good,
1184							   page_num,
1185							   force_write);
1186		if (ret_sub)
1187			ret = ret_sub;
 
 
1188	}
 
 
1189
1190	return ret;
1191}
1192
1193static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1194					    struct scrub_block *sblock_good,
1195					    int page_num, int force_write)
1196{
1197	struct scrub_page *page_bad = sblock_bad->pagev + page_num;
1198	struct scrub_page *page_good = sblock_good->pagev + page_num;
1199
1200	BUG_ON(sblock_bad->pagev[page_num].page == NULL);
1201	BUG_ON(sblock_good->pagev[page_num].page == NULL);
1202	if (force_write || sblock_bad->header_error ||
1203	    sblock_bad->checksum_error || page_bad->io_error) {
1204		struct bio *bio;
1205		int ret;
1206		DECLARE_COMPLETION_ONSTACK(complete);
1207
1208		bio = bio_alloc(GFP_NOFS, 1);
1209		if (!bio)
1210			return -EIO;
1211		bio->bi_bdev = page_bad->dev->bdev;
1212		bio->bi_sector = page_bad->physical >> 9;
1213		bio->bi_end_io = scrub_complete_bio_end_io;
1214		bio->bi_private = &complete;
1215
1216		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1217		if (PAGE_SIZE != ret) {
1218			bio_put(bio);
1219			return -EIO;
1220		}
1221		btrfsic_submit_bio(WRITE, bio);
1222
1223		/* this will also unplug the queue */
1224		wait_for_completion(&complete);
1225		if (!bio_flagged(bio, BIO_UPTODATE)) {
1226			btrfs_dev_stat_inc_and_print(page_bad->dev,
1227				BTRFS_DEV_STAT_WRITE_ERRS);
1228			bio_put(bio);
1229			return -EIO;
1230		}
1231		bio_put(bio);
1232	}
 
1233
1234	return 0;
 
 
 
 
 
 
 
 
 
 
1235}
1236
1237static void scrub_checksum(struct scrub_block *sblock)
1238{
1239	u64 flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240	int ret;
1241
1242	BUG_ON(sblock->page_count < 1);
1243	flags = sblock->pagev[0].flags;
1244	ret = 0;
1245	if (flags & BTRFS_EXTENT_FLAG_DATA)
1246		ret = scrub_checksum_data(sblock);
1247	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1248		ret = scrub_checksum_tree_block(sblock);
1249	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1250		(void)scrub_checksum_super(sblock);
1251	else
1252		WARN_ON(1);
 
 
 
1253	if (ret)
1254		scrub_handle_errored_block(sblock);
1255}
 
 
 
 
 
 
1256
1257static int scrub_checksum_data(struct scrub_block *sblock)
1258{
1259	struct scrub_dev *sdev = sblock->sdev;
1260	u8 csum[BTRFS_CSUM_SIZE];
1261	u8 *on_disk_csum;
1262	struct page *page;
1263	void *buffer;
1264	u32 crc = ~(u32)0;
1265	int fail = 0;
1266	struct btrfs_root *root = sdev->dev->dev_root;
1267	u64 len;
1268	int index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1269
1270	BUG_ON(sblock->page_count < 1);
1271	if (!sblock->pagev[0].have_csum)
1272		return 0;
1273
1274	on_disk_csum = sblock->pagev[0].csum;
1275	page = sblock->pagev[0].page;
1276	buffer = kmap_atomic(page);
 
 
1277
1278	len = sdev->sectorsize;
1279	index = 0;
1280	for (;;) {
1281		u64 l = min_t(u64, len, PAGE_SIZE);
 
 
 
1282
1283		crc = btrfs_csum_data(root, buffer, crc, l);
1284		kunmap_atomic(buffer);
1285		len -= l;
1286		if (len == 0)
1287			break;
1288		index++;
1289		BUG_ON(index >= sblock->page_count);
1290		BUG_ON(!sblock->pagev[index].page);
1291		page = sblock->pagev[index].page;
1292		buffer = kmap_atomic(page);
1293	}
 
 
 
 
 
 
 
 
1294
1295	btrfs_csum_final(crc, csum);
1296	if (memcmp(csum, on_disk_csum, sdev->csum_size))
1297		fail = 1;
1298
1299	return fail;
 
 
 
 
1300}
1301
1302static int scrub_checksum_tree_block(struct scrub_block *sblock)
1303{
1304	struct scrub_dev *sdev = sblock->sdev;
1305	struct btrfs_header *h;
1306	struct btrfs_root *root = sdev->dev->dev_root;
1307	struct btrfs_fs_info *fs_info = root->fs_info;
1308	u8 calculated_csum[BTRFS_CSUM_SIZE];
1309	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1310	struct page *page;
1311	void *mapped_buffer;
1312	u64 mapped_size;
1313	void *p;
1314	u32 crc = ~(u32)0;
1315	int fail = 0;
1316	int crc_fail = 0;
1317	u64 len;
1318	int index;
1319
1320	BUG_ON(sblock->page_count < 1);
1321	page = sblock->pagev[0].page;
1322	mapped_buffer = kmap_atomic(page);
1323	h = (struct btrfs_header *)mapped_buffer;
1324	memcpy(on_disk_csum, h->csum, sdev->csum_size);
 
 
 
1325
1326	/*
1327	 * we don't use the getter functions here, as we
1328	 * a) don't have an extent buffer and
1329	 * b) the page is already kmapped
1330	 */
1331
1332	if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
1333		++fail;
 
1334
1335	if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
1336		++fail;
 
1337
1338	if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1339		++fail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1340
1341	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1342		   BTRFS_UUID_SIZE))
1343		++fail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1344
1345	BUG_ON(sdev->nodesize != sdev->leafsize);
1346	len = sdev->nodesize - BTRFS_CSUM_SIZE;
1347	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1348	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1349	index = 0;
1350	for (;;) {
1351		u64 l = min_t(u64, len, mapped_size);
1352
1353		crc = btrfs_csum_data(root, p, crc, l);
1354		kunmap_atomic(mapped_buffer);
1355		len -= l;
1356		if (len == 0)
1357			break;
1358		index++;
1359		BUG_ON(index >= sblock->page_count);
1360		BUG_ON(!sblock->pagev[index].page);
1361		page = sblock->pagev[index].page;
1362		mapped_buffer = kmap_atomic(page);
1363		mapped_size = PAGE_SIZE;
1364		p = mapped_buffer;
1365	}
1366
1367	btrfs_csum_final(crc, calculated_csum);
1368	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1369		++crc_fail;
 
 
1370
1371	return fail || crc_fail;
 
 
 
 
1372}
1373
1374static int scrub_checksum_super(struct scrub_block *sblock)
 
1375{
1376	struct btrfs_super_block *s;
1377	struct scrub_dev *sdev = sblock->sdev;
1378	struct btrfs_root *root = sdev->dev->dev_root;
1379	struct btrfs_fs_info *fs_info = root->fs_info;
1380	u8 calculated_csum[BTRFS_CSUM_SIZE];
1381	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1382	struct page *page;
1383	void *mapped_buffer;
1384	u64 mapped_size;
1385	void *p;
1386	u32 crc = ~(u32)0;
1387	int fail_gen = 0;
1388	int fail_cor = 0;
1389	u64 len;
1390	int index;
1391
1392	BUG_ON(sblock->page_count < 1);
1393	page = sblock->pagev[0].page;
1394	mapped_buffer = kmap_atomic(page);
1395	s = (struct btrfs_super_block *)mapped_buffer;
1396	memcpy(on_disk_csum, s->csum, sdev->csum_size);
1397
1398	if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
1399		++fail_cor;
 
 
1400
1401	if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
1402		++fail_gen;
1403
1404	if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1405		++fail_cor;
 
 
 
 
1406
1407	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1408	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1409	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1410	index = 0;
1411	for (;;) {
1412		u64 l = min_t(u64, len, mapped_size);
1413
1414		crc = btrfs_csum_data(root, p, crc, l);
1415		kunmap_atomic(mapped_buffer);
1416		len -= l;
1417		if (len == 0)
1418			break;
1419		index++;
1420		BUG_ON(index >= sblock->page_count);
1421		BUG_ON(!sblock->pagev[index].page);
1422		page = sblock->pagev[index].page;
1423		mapped_buffer = kmap_atomic(page);
1424		mapped_size = PAGE_SIZE;
1425		p = mapped_buffer;
1426	}
 
1427
1428	btrfs_csum_final(crc, calculated_csum);
1429	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1430		++fail_cor;
 
 
 
 
 
 
 
1431
1432	if (fail_cor + fail_gen) {
1433		/*
1434		 * if we find an error in a super block, we just report it.
1435		 * They will get written with the next transaction commit
1436		 * anyway
1437		 */
1438		spin_lock(&sdev->stat_lock);
1439		++sdev->stat.super_errors;
1440		spin_unlock(&sdev->stat_lock);
1441		if (fail_cor)
1442			btrfs_dev_stat_inc_and_print(sdev->dev,
1443				BTRFS_DEV_STAT_CORRUPTION_ERRS);
1444		else
1445			btrfs_dev_stat_inc_and_print(sdev->dev,
1446				BTRFS_DEV_STAT_GENERATION_ERRS);
1447	}
1448
1449	return fail_cor + fail_gen;
1450}
1451
1452static void scrub_block_get(struct scrub_block *sblock)
1453{
1454	atomic_inc(&sblock->ref_count);
1455}
1456
1457static void scrub_block_put(struct scrub_block *sblock)
1458{
1459	if (atomic_dec_and_test(&sblock->ref_count)) {
1460		int i;
1461
1462		for (i = 0; i < sblock->page_count; i++)
1463			if (sblock->pagev[i].page)
1464				__free_page(sblock->pagev[i].page);
1465		kfree(sblock);
 
 
1466	}
 
1467}
1468
1469static void scrub_submit(struct scrub_dev *sdev)
 
 
1470{
1471	struct scrub_bio *sbio;
1472
1473	if (sdev->curr == -1)
1474		return;
1475
1476	sbio = sdev->bios[sdev->curr];
1477	sdev->curr = -1;
1478	atomic_inc(&sdev->in_flight);
 
 
1479
1480	btrfsic_submit_bio(READ, sbio->bio);
 
 
 
 
1481}
1482
1483static int scrub_add_page_to_bio(struct scrub_dev *sdev,
1484				 struct scrub_page *spage)
1485{
1486	struct scrub_block *sblock = spage->sblock;
1487	struct scrub_bio *sbio;
1488	int ret;
 
 
 
 
 
 
 
 
 
 
1489
1490again:
1491	/*
1492	 * grab a fresh bio or wait for one to become available
1493	 */
1494	while (sdev->curr == -1) {
1495		spin_lock(&sdev->list_lock);
1496		sdev->curr = sdev->first_free;
1497		if (sdev->curr != -1) {
1498			sdev->first_free = sdev->bios[sdev->curr]->next_free;
1499			sdev->bios[sdev->curr]->next_free = -1;
1500			sdev->bios[sdev->curr]->page_count = 0;
1501			spin_unlock(&sdev->list_lock);
1502		} else {
1503			spin_unlock(&sdev->list_lock);
1504			wait_event(sdev->list_wait, sdev->first_free != -1);
1505		}
1506	}
1507	sbio = sdev->bios[sdev->curr];
1508	if (sbio->page_count == 0) {
1509		struct bio *bio;
1510
1511		sbio->physical = spage->physical;
1512		sbio->logical = spage->logical;
1513		bio = sbio->bio;
1514		if (!bio) {
1515			bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
1516			if (!bio)
1517				return -ENOMEM;
1518			sbio->bio = bio;
1519		}
1520
1521		bio->bi_private = sbio;
1522		bio->bi_end_io = scrub_bio_end_io;
1523		bio->bi_bdev = sdev->dev->bdev;
1524		bio->bi_sector = spage->physical >> 9;
1525		sbio->err = 0;
1526	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1527		   spage->physical ||
1528		   sbio->logical + sbio->page_count * PAGE_SIZE !=
1529		   spage->logical) {
1530		scrub_submit(sdev);
1531		goto again;
1532	}
1533
1534	sbio->pagev[sbio->page_count] = spage;
1535	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1536	if (ret != PAGE_SIZE) {
1537		if (sbio->page_count < 1) {
1538			bio_put(sbio->bio);
1539			sbio->bio = NULL;
1540			return -EIO;
1541		}
1542		scrub_submit(sdev);
1543		goto again;
1544	}
1545
1546	scrub_block_get(sblock); /* one for the added page */
1547	atomic_inc(&sblock->outstanding_pages);
1548	sbio->page_count++;
1549	if (sbio->page_count == sdev->pages_per_bio)
1550		scrub_submit(sdev);
1551
1552	return 0;
1553}
1554
1555static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1556		       u64 physical, u64 flags, u64 gen, int mirror_num,
1557		       u8 *csum, int force)
1558{
1559	struct scrub_block *sblock;
1560	int index;
1561
1562	sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1563	if (!sblock) {
1564		spin_lock(&sdev->stat_lock);
1565		sdev->stat.malloc_errors++;
1566		spin_unlock(&sdev->stat_lock);
1567		return -ENOMEM;
1568	}
1569
1570	/* one ref inside this function, plus one for each page later on */
1571	atomic_set(&sblock->ref_count, 1);
1572	sblock->sdev = sdev;
1573	sblock->no_io_error_seen = 1;
1574
1575	for (index = 0; len > 0; index++) {
1576		struct scrub_page *spage = sblock->pagev + index;
1577		u64 l = min_t(u64, len, PAGE_SIZE);
1578
1579		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
1580		spage->page = alloc_page(GFP_NOFS);
1581		if (!spage->page) {
1582			spin_lock(&sdev->stat_lock);
1583			sdev->stat.malloc_errors++;
1584			spin_unlock(&sdev->stat_lock);
1585			while (index > 0) {
1586				index--;
1587				__free_page(sblock->pagev[index].page);
1588			}
1589			kfree(sblock);
1590			return -ENOMEM;
1591		}
1592		spage->sblock = sblock;
1593		spage->dev = sdev->dev;
1594		spage->flags = flags;
1595		spage->generation = gen;
1596		spage->logical = logical;
1597		spage->physical = physical;
1598		spage->mirror_num = mirror_num;
1599		if (csum) {
1600			spage->have_csum = 1;
1601			memcpy(spage->csum, csum, sdev->csum_size);
1602		} else {
1603			spage->have_csum = 0;
1604		}
1605		sblock->page_count++;
1606		len -= l;
1607		logical += l;
1608		physical += l;
1609	}
1610
1611	BUG_ON(sblock->page_count == 0);
1612	for (index = 0; index < sblock->page_count; index++) {
1613		struct scrub_page *spage = sblock->pagev + index;
1614		int ret;
1615
1616		ret = scrub_add_page_to_bio(sdev, spage);
1617		if (ret) {
1618			scrub_block_put(sblock);
1619			return ret;
1620		}
1621	}
 
 
 
 
1622
1623	if (force)
1624		scrub_submit(sdev);
1625
1626	/* last one frees, either here or in bio completion for last page */
1627	scrub_block_put(sblock);
1628	return 0;
1629}
1630
1631static void scrub_bio_end_io(struct bio *bio, int err)
 
 
 
1632{
1633	struct scrub_bio *sbio = bio->bi_private;
1634	struct scrub_dev *sdev = sbio->sdev;
1635	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1636
1637	sbio->err = err;
1638	sbio->bio = bio;
 
 
 
1639
1640	btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1641}
1642
1643static void scrub_bio_end_io_worker(struct btrfs_work *work)
1644{
1645	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1646	struct scrub_dev *sdev = sbio->sdev;
1647	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1648
1649	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
1650	if (sbio->err) {
1651		for (i = 0; i < sbio->page_count; i++) {
1652			struct scrub_page *spage = sbio->pagev[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1653
1654			spage->io_error = 1;
1655			spage->sblock->no_io_error_seen = 0;
 
 
 
 
1656		}
1657	}
 
 
 
 
1658
1659	/* now complete the scrub_block items that have all pages completed */
1660	for (i = 0; i < sbio->page_count; i++) {
1661		struct scrub_page *spage = sbio->pagev[i];
1662		struct scrub_block *sblock = spage->sblock;
 
 
1663
1664		if (atomic_dec_and_test(&sblock->outstanding_pages))
1665			scrub_block_complete(sblock);
1666		scrub_block_put(sblock);
1667	}
 
 
 
 
 
 
 
 
 
 
 
 
1668
1669	if (sbio->err) {
1670		/* what is this good for??? */
1671		sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1672		sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
1673		sbio->bio->bi_phys_segments = 0;
1674		sbio->bio->bi_idx = 0;
1675
1676		for (i = 0; i < sbio->page_count; i++) {
1677			struct bio_vec *bi;
1678			bi = &sbio->bio->bi_io_vec[i];
1679			bi->bv_offset = 0;
1680			bi->bv_len = PAGE_SIZE;
 
 
 
 
 
 
 
 
1681		}
 
 
1682	}
1683
1684	bio_put(sbio->bio);
1685	sbio->bio = NULL;
1686	spin_lock(&sdev->list_lock);
1687	sbio->next_free = sdev->first_free;
1688	sdev->first_free = sbio->index;
1689	spin_unlock(&sdev->list_lock);
1690	atomic_dec(&sdev->in_flight);
1691	wake_up(&sdev->list_wait);
1692}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1693
1694static void scrub_block_complete(struct scrub_block *sblock)
1695{
1696	if (!sblock->no_io_error_seen)
1697		scrub_handle_errored_block(sblock);
1698	else
1699		scrub_checksum(sblock);
1700}
1701
1702static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1703			   u8 *csum)
1704{
1705	struct btrfs_ordered_sum *sum = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
1706	int ret = 0;
1707	unsigned long i;
1708	unsigned long num_sectors;
1709
1710	while (!list_empty(&sdev->csum_list)) {
1711		sum = list_first_entry(&sdev->csum_list,
1712				       struct btrfs_ordered_sum, list);
1713		if (sum->bytenr > logical)
1714			return 0;
1715		if (sum->bytenr + sum->len > logical)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1716			break;
 
 
1717
1718		++sdev->stat.csum_discards;
1719		list_del(&sum->list);
1720		kfree(sum);
1721		sum = NULL;
1722	}
1723	if (!sum)
1724		return 0;
1725
1726	num_sectors = sum->len / sdev->sectorsize;
1727	for (i = 0; i < num_sectors; ++i) {
1728		if (sum->sums[i].bytenr == logical) {
1729			memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
1730			ret = 1;
1731			break;
1732		}
1733	}
1734	if (ret && i == num_sectors - 1) {
1735		list_del(&sum->list);
1736		kfree(sum);
 
 
 
 
 
1737	}
1738	return ret;
1739}
1740
1741/* scrub extent tries to collect up to 64 kB for each bio */
1742static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1743			u64 physical, u64 flags, u64 gen, int mirror_num)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1744{
1745	int ret;
1746	u8 csum[BTRFS_CSUM_SIZE];
1747	u32 blocksize;
 
 
 
 
1748
1749	if (flags & BTRFS_EXTENT_FLAG_DATA) {
1750		blocksize = sdev->sectorsize;
1751		spin_lock(&sdev->stat_lock);
1752		sdev->stat.data_extents_scrubbed++;
1753		sdev->stat.data_bytes_scrubbed += len;
1754		spin_unlock(&sdev->stat_lock);
1755	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1756		BUG_ON(sdev->nodesize != sdev->leafsize);
1757		blocksize = sdev->nodesize;
1758		spin_lock(&sdev->stat_lock);
1759		sdev->stat.tree_extents_scrubbed++;
1760		sdev->stat.tree_bytes_scrubbed += len;
1761		spin_unlock(&sdev->stat_lock);
1762	} else {
1763		blocksize = sdev->sectorsize;
1764		BUG_ON(1);
1765	}
1766
1767	while (len) {
1768		u64 l = min_t(u64, len, blocksize);
1769		int have_csum = 0;
1770
1771		if (flags & BTRFS_EXTENT_FLAG_DATA) {
1772			/* push csums to sbio */
1773			have_csum = scrub_find_csum(sdev, logical, l, csum);
1774			if (have_csum == 0)
1775				++sdev->stat.no_csum;
1776		}
1777		ret = scrub_pages(sdev, logical, l, physical, flags, gen,
1778				  mirror_num, have_csum ? csum : NULL, 0);
1779		if (ret)
1780			return ret;
1781		len -= l;
1782		logical += l;
1783		physical += l;
 
1784	}
1785	return 0;
1786}
1787
1788static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1789	struct map_lookup *map, int num, u64 base, u64 length)
1790{
1791	struct btrfs_path *path;
1792	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1793	struct btrfs_root *root = fs_info->extent_root;
1794	struct btrfs_root *csum_root = fs_info->csum_root;
1795	struct btrfs_extent_item *extent;
1796	struct blk_plug plug;
1797	u64 flags;
1798	int ret;
1799	int slot;
1800	int i;
1801	u64 nstripes;
1802	struct extent_buffer *l;
1803	struct btrfs_key key;
1804	u64 physical;
1805	u64 logical;
1806	u64 generation;
1807	int mirror_num;
1808	struct reada_control *reada1;
1809	struct reada_control *reada2;
1810	struct btrfs_key key_start;
1811	struct btrfs_key key_end;
1812
1813	u64 increment = map->stripe_len;
1814	u64 offset;
 
1815
1816	nstripes = length;
1817	offset = 0;
1818	do_div(nstripes, map->stripe_len);
1819	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1820		offset = map->stripe_len * num;
1821		increment = map->stripe_len * map->num_stripes;
1822		mirror_num = 1;
1823	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1824		int factor = map->num_stripes / map->sub_stripes;
1825		offset = map->stripe_len * (num / map->sub_stripes);
1826		increment = map->stripe_len * factor;
1827		mirror_num = num % map->sub_stripes + 1;
1828	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1829		increment = map->stripe_len;
1830		mirror_num = num % map->num_stripes + 1;
1831	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1832		increment = map->stripe_len;
1833		mirror_num = num % map->num_stripes + 1;
1834	} else {
1835		increment = map->stripe_len;
1836		mirror_num = 1;
1837	}
1838
1839	path = btrfs_alloc_path();
1840	if (!path)
1841		return -ENOMEM;
1842
1843	/*
1844	 * work on commit root. The related disk blocks are static as
1845	 * long as COW is applied. This means, it is save to rewrite
1846	 * them to repair disk errors without any race conditions
1847	 */
1848	path->search_commit_root = 1;
1849	path->skip_locking = 1;
1850
1851	/*
1852	 * trigger the readahead for extent tree csum tree and wait for
1853	 * completion. During readahead, the scrub is officially paused
1854	 * to not hold off transaction commits
1855	 */
1856	logical = base + offset;
1857
1858	wait_event(sdev->list_wait,
1859		   atomic_read(&sdev->in_flight) == 0);
1860	atomic_inc(&fs_info->scrubs_paused);
1861	wake_up(&fs_info->scrub_pause_wait);
1862
1863	/* FIXME it might be better to start readahead at commit root */
1864	key_start.objectid = logical;
1865	key_start.type = BTRFS_EXTENT_ITEM_KEY;
1866	key_start.offset = (u64)0;
1867	key_end.objectid = base + offset + nstripes * increment;
1868	key_end.type = BTRFS_EXTENT_ITEM_KEY;
1869	key_end.offset = (u64)0;
1870	reada1 = btrfs_reada_add(root, &key_start, &key_end);
1871
1872	key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1873	key_start.type = BTRFS_EXTENT_CSUM_KEY;
1874	key_start.offset = logical;
1875	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1876	key_end.type = BTRFS_EXTENT_CSUM_KEY;
1877	key_end.offset = base + offset + nstripes * increment;
1878	reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
1879
1880	if (!IS_ERR(reada1))
1881		btrfs_reada_wait(reada1);
1882	if (!IS_ERR(reada2))
1883		btrfs_reada_wait(reada2);
1884
1885	mutex_lock(&fs_info->scrub_lock);
1886	while (atomic_read(&fs_info->scrub_pause_req)) {
1887		mutex_unlock(&fs_info->scrub_lock);
1888		wait_event(fs_info->scrub_pause_wait,
1889		   atomic_read(&fs_info->scrub_pause_req) == 0);
1890		mutex_lock(&fs_info->scrub_lock);
1891	}
1892	atomic_dec(&fs_info->scrubs_paused);
1893	mutex_unlock(&fs_info->scrub_lock);
1894	wake_up(&fs_info->scrub_pause_wait);
1895
1896	/*
1897	 * collect all data csums for the stripe to avoid seeking during
1898	 * the scrub. This might currently (crc32) end up to be about 1MB
1899	 */
1900	blk_start_plug(&plug);
1901
1902	/*
1903	 * now find all extents for each stripe and scrub them
1904	 */
1905	logical = base + offset;
1906	physical = map->stripes[num].physical;
1907	ret = 0;
1908	for (i = 0; i < nstripes; ++i) {
1909		/*
1910		 * canceled?
1911		 */
1912		if (atomic_read(&fs_info->scrub_cancel_req) ||
1913		    atomic_read(&sdev->cancel_req)) {
1914			ret = -ECANCELED;
1915			goto out;
1916		}
1917		/*
1918		 * check to see if we have to pause
1919		 */
1920		if (atomic_read(&fs_info->scrub_pause_req)) {
1921			/* push queued extents */
1922			scrub_submit(sdev);
1923			wait_event(sdev->list_wait,
1924				   atomic_read(&sdev->in_flight) == 0);
1925			atomic_inc(&fs_info->scrubs_paused);
1926			wake_up(&fs_info->scrub_pause_wait);
1927			mutex_lock(&fs_info->scrub_lock);
1928			while (atomic_read(&fs_info->scrub_pause_req)) {
1929				mutex_unlock(&fs_info->scrub_lock);
1930				wait_event(fs_info->scrub_pause_wait,
1931				   atomic_read(&fs_info->scrub_pause_req) == 0);
1932				mutex_lock(&fs_info->scrub_lock);
1933			}
1934			atomic_dec(&fs_info->scrubs_paused);
1935			mutex_unlock(&fs_info->scrub_lock);
1936			wake_up(&fs_info->scrub_pause_wait);
1937		}
1938
1939		ret = btrfs_lookup_csums_range(csum_root, logical,
1940					       logical + map->stripe_len - 1,
1941					       &sdev->csum_list, 1);
1942		if (ret)
1943			goto out;
1944
1945		key.objectid = logical;
1946		key.type = BTRFS_EXTENT_ITEM_KEY;
1947		key.offset = (u64)0;
1948
1949		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1950		if (ret < 0)
1951			goto out;
1952		if (ret > 0) {
1953			ret = btrfs_previous_item(root, path, 0,
1954						  BTRFS_EXTENT_ITEM_KEY);
1955			if (ret < 0)
1956				goto out;
1957			if (ret > 0) {
1958				/* there's no smaller item, so stick with the
1959				 * larger one */
1960				btrfs_release_path(path);
1961				ret = btrfs_search_slot(NULL, root, &key,
1962							path, 0, 0);
1963				if (ret < 0)
1964					goto out;
1965			}
1966		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1967
1968		while (1) {
1969			l = path->nodes[0];
1970			slot = path->slots[0];
1971			if (slot >= btrfs_header_nritems(l)) {
1972				ret = btrfs_next_leaf(root, path);
1973				if (ret == 0)
1974					continue;
1975				if (ret < 0)
1976					goto out;
1977
1978				break;
1979			}
1980			btrfs_item_key_to_cpu(l, &key, slot);
 
 
 
 
 
1981
1982			if (key.objectid + key.offset <= logical)
1983				goto next;
1984
1985			if (key.objectid >= logical + map->stripe_len)
1986				break;
1987
1988			if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1989				goto next;
1990
1991			extent = btrfs_item_ptr(l, slot,
1992						struct btrfs_extent_item);
1993			flags = btrfs_extent_flags(l, extent);
1994			generation = btrfs_extent_generation(l, extent);
1995
1996			if (key.objectid < logical &&
1997			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
1998				printk(KERN_ERR
1999				       "btrfs scrub: tree block %llu spanning "
2000				       "stripes, ignored. logical=%llu\n",
2001				       (unsigned long long)key.objectid,
2002				       (unsigned long long)logical);
2003				goto next;
2004			}
2005
2006			/*
2007			 * trim extent to this stripe
2008			 */
2009			if (key.objectid < logical) {
2010				key.offset -= logical - key.objectid;
2011				key.objectid = logical;
2012			}
2013			if (key.objectid + key.offset >
2014			    logical + map->stripe_len) {
2015				key.offset = logical + map->stripe_len -
2016					     key.objectid;
2017			}
2018
2019			ret = scrub_extent(sdev, key.objectid, key.offset,
2020					   key.objectid - logical + physical,
2021					   flags, generation, mirror_num);
2022			if (ret)
2023				goto out;
 
 
2024
 
 
 
 
 
 
 
 
 
 
 
 
2025next:
2026			path->slots[0]++;
2027		}
2028		btrfs_release_path(path);
2029		logical += increment;
2030		physical += map->stripe_len;
2031		spin_lock(&sdev->stat_lock);
2032		sdev->stat.last_physical = physical;
2033		spin_unlock(&sdev->stat_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2034	}
2035	/* push queued extents */
2036	scrub_submit(sdev);
2037
2038out:
2039	blk_finish_plug(&plug);
2040	btrfs_free_path(path);
2041	return ret < 0 ? ret : 0;
2042}
2043
2044static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
2045	u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
2046	u64 dev_offset)
2047{
2048	struct btrfs_mapping_tree *map_tree =
2049		&sdev->dev->dev_root->fs_info->mapping_tree;
2050	struct map_lookup *map;
2051	struct extent_map *em;
2052	int i;
2053	int ret = -EINVAL;
2054
2055	read_lock(&map_tree->map_tree.lock);
2056	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2057	read_unlock(&map_tree->map_tree.lock);
 
 
 
 
 
 
 
2058
2059	if (!em)
2060		return -EINVAL;
2061
2062	map = (struct map_lookup *)em->bdev;
2063	if (em->start != chunk_offset)
2064		goto out;
2065
2066	if (em->len < length)
2067		goto out;
2068
2069	for (i = 0; i < map->num_stripes; ++i) {
2070		if (map->stripes[i].dev == sdev->dev &&
2071		    map->stripes[i].physical == dev_offset) {
2072			ret = scrub_stripe(sdev, map, i, chunk_offset, length);
2073			if (ret)
2074				goto out;
2075		}
2076	}
2077out:
2078	free_extent_map(em);
2079
2080	return ret;
2081}
2082
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2083static noinline_for_stack
2084int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
 
2085{
2086	struct btrfs_dev_extent *dev_extent = NULL;
2087	struct btrfs_path *path;
2088	struct btrfs_root *root = sdev->dev->dev_root;
2089	struct btrfs_fs_info *fs_info = root->fs_info;
2090	u64 length;
2091	u64 chunk_tree;
2092	u64 chunk_objectid;
2093	u64 chunk_offset;
2094	int ret;
 
2095	int slot;
2096	struct extent_buffer *l;
2097	struct btrfs_key key;
2098	struct btrfs_key found_key;
2099	struct btrfs_block_group_cache *cache;
 
2100
2101	path = btrfs_alloc_path();
2102	if (!path)
2103		return -ENOMEM;
2104
2105	path->reada = 2;
2106	path->search_commit_root = 1;
2107	path->skip_locking = 1;
2108
2109	key.objectid = sdev->dev->devid;
2110	key.offset = 0ull;
2111	key.type = BTRFS_DEV_EXTENT_KEY;
2112
 
 
2113
2114	while (1) {
2115		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2116		if (ret < 0)
2117			break;
2118		if (ret > 0) {
2119			if (path->slots[0] >=
2120			    btrfs_header_nritems(path->nodes[0])) {
2121				ret = btrfs_next_leaf(root, path);
2122				if (ret)
 
 
 
2123					break;
 
 
 
2124			}
2125		}
2126
2127		l = path->nodes[0];
2128		slot = path->slots[0];
2129
2130		btrfs_item_key_to_cpu(l, &found_key, slot);
2131
2132		if (found_key.objectid != sdev->dev->devid)
2133			break;
2134
2135		if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
2136			break;
2137
2138		if (found_key.offset >= end)
2139			break;
2140
2141		if (found_key.offset < key.offset)
2142			break;
2143
2144		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2145		length = btrfs_dev_extent_length(l, dev_extent);
2146
2147		if (found_key.offset + length <= start) {
2148			key.offset = found_key.offset + length;
2149			btrfs_release_path(path);
2150			continue;
2151		}
2152
2153		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2154		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2155		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2156
2157		/*
2158		 * get a reference on the corresponding block group to prevent
2159		 * the chunk from going away while we scrub it
2160		 */
2161		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2162		if (!cache) {
2163			ret = -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2164			break;
2165		}
2166		ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
2167				  chunk_offset, length, found_key.offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2168		btrfs_put_block_group(cache);
2169		if (ret)
2170			break;
2171
2172		key.offset = found_key.offset + length;
 
 
 
 
 
 
 
 
 
2173		btrfs_release_path(path);
2174	}
2175
2176	btrfs_free_path(path);
2177
2178	/*
2179	 * ret can still be 1 from search_slot or next_leaf,
2180	 * that's not an error
2181	 */
2182	return ret < 0 ? ret : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2183}
2184
2185static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
 
2186{
2187	int	i;
2188	u64	bytenr;
2189	u64	gen;
2190	int	ret;
2191	struct btrfs_device *device = sdev->dev;
2192	struct btrfs_root *root = device->dev_root;
2193
2194	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2195		return -EIO;
 
 
 
 
 
 
 
 
2196
2197	gen = root->fs_info->last_trans_committed;
 
 
 
 
2198
2199	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2200		bytenr = btrfs_sb_offset(i);
2201		if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
2202			break;
2203
2204		ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2205				     BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
2206		if (ret)
2207			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2208	}
2209	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
 
 
 
 
 
 
 
 
 
 
 
2210
2211	return 0;
 
 
2212}
2213
2214/*
2215 * get a reference count on fs_info->scrub_workers. start worker if necessary
2216 */
2217static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
2218{
2219	struct btrfs_fs_info *fs_info = root->fs_info;
2220	int ret = 0;
 
 
 
 
 
 
 
 
 
2221
2222	mutex_lock(&fs_info->scrub_lock);
2223	if (fs_info->scrub_workers_refcnt == 0) {
2224		btrfs_init_workers(&fs_info->scrub_workers, "scrub",
2225			   fs_info->thread_pool_size, &fs_info->generic_worker);
2226		fs_info->scrub_workers.idle_thresh = 4;
2227		ret = btrfs_start_workers(&fs_info->scrub_workers);
2228		if (ret)
2229			goto out;
2230	}
2231	++fs_info->scrub_workers_refcnt;
2232out:
2233	mutex_unlock(&fs_info->scrub_lock);
2234
 
 
 
2235	return ret;
2236}
2237
2238static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
 
 
2239{
2240	struct btrfs_fs_info *fs_info = root->fs_info;
2241
2242	mutex_lock(&fs_info->scrub_lock);
2243	if (--fs_info->scrub_workers_refcnt == 0)
2244		btrfs_stop_workers(&fs_info->scrub_workers);
2245	WARN_ON(fs_info->scrub_workers_refcnt < 0);
2246	mutex_unlock(&fs_info->scrub_lock);
2247}
2248
2249
2250int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2251		    struct btrfs_scrub_progress *progress, int readonly)
2252{
2253	struct scrub_dev *sdev;
2254	struct btrfs_fs_info *fs_info = root->fs_info;
2255	int ret;
2256	struct btrfs_device *dev;
 
 
 
 
 
2257
2258	if (btrfs_fs_closing(root->fs_info))
2259		return -EINVAL;
2260
2261	/*
2262	 * check some assumptions
 
 
2263	 */
2264	if (root->nodesize != root->leafsize) {
2265		printk(KERN_ERR
2266		       "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2267		       root->nodesize, root->leafsize);
2268		return -EINVAL;
2269	}
2270
2271	if (root->nodesize > BTRFS_STRIPE_LEN) {
2272		/*
2273		 * in this case scrub is unable to calculate the checksum
2274		 * the way scrub is implemented. Do not handle this
2275		 * situation at all because it won't ever happen.
2276		 */
2277		printk(KERN_ERR
2278		       "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2279		       root->nodesize, BTRFS_STRIPE_LEN);
2280		return -EINVAL;
2281	}
2282
2283	if (root->sectorsize != PAGE_SIZE) {
2284		/* not supported for data w/o checksums */
2285		printk(KERN_ERR
2286		       "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2287		       root->sectorsize, (unsigned long long)PAGE_SIZE);
2288		return -EINVAL;
2289	}
2290
2291	ret = scrub_workers_get(root);
2292	if (ret)
2293		return ret;
2294
2295	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2296	dev = btrfs_find_device(root, devid, NULL, NULL);
2297	if (!dev || dev->missing) {
2298		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2299		scrub_workers_put(root);
2300		return -ENODEV;
 
2301	}
2302	mutex_lock(&fs_info->scrub_lock);
2303
2304	if (!dev->in_fs_metadata) {
2305		mutex_unlock(&fs_info->scrub_lock);
2306		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2307		scrub_workers_put(root);
2308		return -ENODEV;
 
 
 
2309	}
2310
2311	if (dev->scrub_device) {
 
 
2312		mutex_unlock(&fs_info->scrub_lock);
2313		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2314		scrub_workers_put(root);
2315		return -EINPROGRESS;
2316	}
2317	sdev = scrub_setup_dev(dev);
2318	if (IS_ERR(sdev)) {
 
 
 
 
2319		mutex_unlock(&fs_info->scrub_lock);
2320		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2321		scrub_workers_put(root);
2322		return PTR_ERR(sdev);
2323	}
2324	sdev->readonly = readonly;
2325	dev->scrub_device = sdev;
 
 
 
2326
 
 
 
 
 
2327	atomic_inc(&fs_info->scrubs_running);
2328	mutex_unlock(&fs_info->scrub_lock);
2329	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2330
2331	down_read(&fs_info->scrub_super_lock);
2332	ret = scrub_supers(sdev);
2333	up_read(&fs_info->scrub_super_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2334
2335	if (!ret)
2336		ret = scrub_enumerate_chunks(sdev, start, end);
 
2337
2338	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2339	atomic_dec(&fs_info->scrubs_running);
2340	wake_up(&fs_info->scrub_pause_wait);
2341
2342	wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
 
2343
2344	if (progress)
2345		memcpy(progress, &sdev->stat, sizeof(*progress));
 
2346
2347	mutex_lock(&fs_info->scrub_lock);
2348	dev->scrub_device = NULL;
2349	mutex_unlock(&fs_info->scrub_lock);
2350
2351	scrub_free_dev(sdev);
2352	scrub_workers_put(root);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2353
2354	return ret;
2355}
2356
2357void btrfs_scrub_pause(struct btrfs_root *root)
2358{
2359	struct btrfs_fs_info *fs_info = root->fs_info;
2360
2361	mutex_lock(&fs_info->scrub_lock);
2362	atomic_inc(&fs_info->scrub_pause_req);
2363	while (atomic_read(&fs_info->scrubs_paused) !=
2364	       atomic_read(&fs_info->scrubs_running)) {
2365		mutex_unlock(&fs_info->scrub_lock);
2366		wait_event(fs_info->scrub_pause_wait,
2367			   atomic_read(&fs_info->scrubs_paused) ==
2368			   atomic_read(&fs_info->scrubs_running));
2369		mutex_lock(&fs_info->scrub_lock);
2370	}
2371	mutex_unlock(&fs_info->scrub_lock);
2372}
2373
2374void btrfs_scrub_continue(struct btrfs_root *root)
2375{
2376	struct btrfs_fs_info *fs_info = root->fs_info;
2377
2378	atomic_dec(&fs_info->scrub_pause_req);
2379	wake_up(&fs_info->scrub_pause_wait);
2380}
2381
2382void btrfs_scrub_pause_super(struct btrfs_root *root)
2383{
2384	down_write(&root->fs_info->scrub_super_lock);
2385}
2386
2387void btrfs_scrub_continue_super(struct btrfs_root *root)
2388{
2389	up_write(&root->fs_info->scrub_super_lock);
2390}
2391
2392int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2393{
2394
2395	mutex_lock(&fs_info->scrub_lock);
2396	if (!atomic_read(&fs_info->scrubs_running)) {
2397		mutex_unlock(&fs_info->scrub_lock);
2398		return -ENOTCONN;
2399	}
2400
2401	atomic_inc(&fs_info->scrub_cancel_req);
2402	while (atomic_read(&fs_info->scrubs_running)) {
2403		mutex_unlock(&fs_info->scrub_lock);
2404		wait_event(fs_info->scrub_pause_wait,
2405			   atomic_read(&fs_info->scrubs_running) == 0);
2406		mutex_lock(&fs_info->scrub_lock);
2407	}
2408	atomic_dec(&fs_info->scrub_cancel_req);
2409	mutex_unlock(&fs_info->scrub_lock);
2410
2411	return 0;
2412}
2413
2414int btrfs_scrub_cancel(struct btrfs_root *root)
2415{
2416	return __btrfs_scrub_cancel(root->fs_info);
2417}
2418
2419int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
2420{
2421	struct btrfs_fs_info *fs_info = root->fs_info;
2422	struct scrub_dev *sdev;
2423
2424	mutex_lock(&fs_info->scrub_lock);
2425	sdev = dev->scrub_device;
2426	if (!sdev) {
2427		mutex_unlock(&fs_info->scrub_lock);
2428		return -ENOTCONN;
2429	}
2430	atomic_inc(&sdev->cancel_req);
2431	while (dev->scrub_device) {
2432		mutex_unlock(&fs_info->scrub_lock);
2433		wait_event(fs_info->scrub_pause_wait,
2434			   dev->scrub_device == NULL);
2435		mutex_lock(&fs_info->scrub_lock);
2436	}
2437	mutex_unlock(&fs_info->scrub_lock);
2438
2439	return 0;
2440}
2441
2442int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
 
2443{
2444	struct btrfs_fs_info *fs_info = root->fs_info;
2445	struct btrfs_device *dev;
2446	int ret;
2447
2448	/*
2449	 * we have to hold the device_list_mutex here so the device
2450	 * does not go away in cancel_dev. FIXME: find a better solution
2451	 */
2452	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2453	dev = btrfs_find_device(root, devid, NULL, NULL);
2454	if (!dev) {
2455		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2456		return -ENODEV;
2457	}
2458	ret = btrfs_scrub_cancel_dev(root, dev);
2459	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2460
2461	return ret;
2462}
2463
2464int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
2465			 struct btrfs_scrub_progress *progress)
2466{
2467	struct btrfs_device *dev;
2468	struct scrub_dev *sdev = NULL;
2469
2470	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2471	dev = btrfs_find_device(root, devid, NULL, NULL);
2472	if (dev)
2473		sdev = dev->scrub_device;
2474	if (sdev)
2475		memcpy(progress, &sdev->stat, sizeof(*progress));
2476	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2477
2478	return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
2479}