Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/blkdev.h>
   7#include <linux/ratelimit.h>
   8#include <linux/sched/mm.h>
   9#include <crypto/hash.h>
  10#include "ctree.h"
  11#include "discard.h"
  12#include "volumes.h"
  13#include "disk-io.h"
  14#include "ordered-data.h"
  15#include "transaction.h"
  16#include "backref.h"
  17#include "extent_io.h"
  18#include "dev-replace.h"
  19#include "raid56.h"
  20#include "block-group.h"
  21#include "zoned.h"
  22#include "fs.h"
  23#include "accessors.h"
  24#include "file-item.h"
  25#include "scrub.h"
  26#include "raid-stripe-tree.h"
  27
  28/*
  29 * This is only the first step towards a full-features scrub. It reads all
  30 * extent and super block and verifies the checksums. In case a bad checksum
  31 * is found or the extent cannot be read, good data will be written back if
  32 * any can be found.
  33 *
  34 * Future enhancements:
  35 *  - In case an unrepairable extent is encountered, track which files are
  36 *    affected and report them
  37 *  - track and record media errors, throw out bad devices
  38 *  - add a mode to also read unallocated space
  39 */
  40
  41struct scrub_ctx;
  42
  43/*
  44 * The following value only influences the performance.
  45 *
  46 * This determines how many stripes would be submitted in one go,
  47 * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
  48 */
  49#define SCRUB_STRIPES_PER_GROUP		8
  50
  51/*
  52 * How many groups we have for each sctx.
  53 *
  54 * This would be 8M per device, the same value as the old scrub in-flight bios
  55 * size limit.
  56 */
  57#define SCRUB_GROUPS_PER_SCTX		16
  58
  59#define SCRUB_TOTAL_STRIPES		(SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
  60
  61/*
  62 * The following value times PAGE_SIZE needs to be large enough to match the
  63 * largest node/leaf/sector size that shall be supported.
  64 */
  65#define SCRUB_MAX_SECTORS_PER_BLOCK	(BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
  66
  67/* Represent one sector and its needed info to verify the content. */
  68struct scrub_sector_verification {
  69	bool is_metadata;
  70
  71	union {
  72		/*
  73		 * Csum pointer for data csum verification.  Should point to a
  74		 * sector csum inside scrub_stripe::csums.
  75		 *
  76		 * NULL if this data sector has no csum.
  77		 */
  78		u8 *csum;
  79
  80		/*
  81		 * Extra info for metadata verification.  All sectors inside a
  82		 * tree block share the same generation.
  83		 */
  84		u64 generation;
 
 
 
 
 
 
 
 
 
 
 
  85	};
 
  86};
  87
  88enum scrub_stripe_flags {
  89	/* Set when @mirror_num, @dev, @physical and @logical are set. */
  90	SCRUB_STRIPE_FLAG_INITIALIZED,
  91
  92	/* Set when the read-repair is finished. */
  93	SCRUB_STRIPE_FLAG_REPAIR_DONE,
  94
  95	/*
  96	 * Set for data stripes if it's triggered from P/Q stripe.
  97	 * During such scrub, we should not report errors in data stripes, nor
  98	 * update the accounting.
  99	 */
 100	SCRUB_STRIPE_FLAG_NO_REPORT,
 101};
 102
 103#define SCRUB_STRIPE_PAGES		(BTRFS_STRIPE_LEN / PAGE_SIZE)
 104
 105/*
 106 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
 107 */
 108struct scrub_stripe {
 109	struct scrub_ctx *sctx;
 110	struct btrfs_block_group *bg;
 111
 112	struct page *pages[SCRUB_STRIPE_PAGES];
 113	struct scrub_sector_verification *sectors;
 114
 115	struct btrfs_device *dev;
 116	u64 logical;
 117	u64 physical;
 118
 119	u16 mirror_num;
 120
 121	/* Should be BTRFS_STRIPE_LEN / sectorsize. */
 122	u16 nr_sectors;
 123
 124	/*
 125	 * How many data/meta extents are in this stripe.  Only for scrub status
 126	 * reporting purposes.
 127	 */
 128	u16 nr_data_extents;
 129	u16 nr_meta_extents;
 130
 131	atomic_t pending_io;
 132	wait_queue_head_t io_wait;
 133	wait_queue_head_t repair_wait;
 134
 135	/*
 136	 * Indicate the states of the stripe.  Bits are defined in
 137	 * scrub_stripe_flags enum.
 138	 */
 139	unsigned long state;
 140
 141	/* Indicate which sectors are covered by extent items. */
 142	unsigned long extent_sector_bitmap;
 143
 144	/*
 145	 * The errors hit during the initial read of the stripe.
 146	 *
 147	 * Would be utilized for error reporting and repair.
 148	 *
 149	 * The remaining init_nr_* records the number of errors hit, only used
 150	 * by error reporting.
 151	 */
 152	unsigned long init_error_bitmap;
 153	unsigned int init_nr_io_errors;
 154	unsigned int init_nr_csum_errors;
 155	unsigned int init_nr_meta_errors;
 156
 157	/*
 158	 * The following error bitmaps are all for the current status.
 159	 * Every time we submit a new read, these bitmaps may be updated.
 160	 *
 161	 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
 162	 *
 163	 * IO and csum errors can happen for both metadata and data.
 164	 */
 165	unsigned long error_bitmap;
 166	unsigned long io_error_bitmap;
 167	unsigned long csum_error_bitmap;
 168	unsigned long meta_error_bitmap;
 169
 170	/* For writeback (repair or replace) error reporting. */
 171	unsigned long write_error_bitmap;
 172
 173	/* Writeback can be concurrent, thus we need to protect the bitmap. */
 174	spinlock_t write_error_lock;
 175
 176	/*
 177	 * Checksum for the whole stripe if this stripe is inside a data block
 178	 * group.
 179	 */
 180	u8 *csums;
 181
 182	struct work_struct work;
 183};
 184
 185struct scrub_ctx {
 186	struct scrub_stripe	stripes[SCRUB_TOTAL_STRIPES];
 187	struct scrub_stripe	*raid56_data_stripes;
 188	struct btrfs_fs_info	*fs_info;
 189	struct btrfs_path	extent_path;
 190	struct btrfs_path	csum_path;
 191	int			first_free;
 192	int			cur_stripe;
 
 
 
 
 
 
 193	atomic_t		cancel_req;
 194	int			readonly;
 195
 196	/* State of IO submission throttling affecting the associated device */
 197	ktime_t			throttle_deadline;
 198	u64			throttle_sent;
 199
 200	int			is_dev_replace;
 201	u64			write_pointer;
 202
 203	struct mutex            wr_lock;
 204	struct btrfs_device     *wr_tgtdev;
 205
 206	/*
 207	 * statistics
 208	 */
 209	struct btrfs_scrub_progress stat;
 210	spinlock_t		stat_lock;
 
 211
 212	/*
 213	 * Use a ref counter to avoid use-after-free issues. Scrub workers
 214	 * decrement bios_in_flight and workers_pending and then do a wakeup
 215	 * on the list_wait wait queue. We must ensure the main scrub task
 216	 * doesn't free the scrub context before or while the workers are
 217	 * doing the wakeup() call.
 218	 */
 219	refcount_t              refs;
 220};
 221
 222struct scrub_warning {
 223	struct btrfs_path	*path;
 224	u64			extent_item_size;
 
 
 225	const char		*errstr;
 226	u64			physical;
 227	u64			logical;
 228	struct btrfs_device	*dev;
 
 
 229};
 230
 231static void release_scrub_stripe(struct scrub_stripe *stripe)
 232{
 233	if (!stripe)
 234		return;
 235
 236	for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
 237		if (stripe->pages[i])
 238			__free_page(stripe->pages[i]);
 239		stripe->pages[i] = NULL;
 240	}
 241	kfree(stripe->sectors);
 242	kfree(stripe->csums);
 243	stripe->sectors = NULL;
 244	stripe->csums = NULL;
 245	stripe->sctx = NULL;
 246	stripe->state = 0;
 247}
 248
 249static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
 250			     struct scrub_stripe *stripe)
 251{
 252	int ret;
 253
 254	memset(stripe, 0, sizeof(*stripe));
 255
 256	stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
 257	stripe->state = 0;
 258
 259	init_waitqueue_head(&stripe->io_wait);
 260	init_waitqueue_head(&stripe->repair_wait);
 261	atomic_set(&stripe->pending_io, 0);
 262	spin_lock_init(&stripe->write_error_lock);
 263
 264	ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, 0);
 265	if (ret < 0)
 266		goto error;
 267
 268	stripe->sectors = kcalloc(stripe->nr_sectors,
 269				  sizeof(struct scrub_sector_verification),
 270				  GFP_KERNEL);
 271	if (!stripe->sectors)
 272		goto error;
 273
 274	stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
 275				fs_info->csum_size, GFP_KERNEL);
 276	if (!stripe->csums)
 277		goto error;
 278	return 0;
 279error:
 280	release_scrub_stripe(stripe);
 281	return -ENOMEM;
 282}
 283
 284static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
 285{
 286	wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
 287}
 288
 289static void scrub_put_ctx(struct scrub_ctx *sctx);
 290
 291static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 292{
 293	while (atomic_read(&fs_info->scrub_pause_req)) {
 294		mutex_unlock(&fs_info->scrub_lock);
 295		wait_event(fs_info->scrub_pause_wait,
 296		   atomic_read(&fs_info->scrub_pause_req) == 0);
 297		mutex_lock(&fs_info->scrub_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 298	}
 299}
 300
 301static void scrub_pause_on(struct btrfs_fs_info *fs_info)
 302{
 303	atomic_inc(&fs_info->scrubs_paused);
 304	wake_up(&fs_info->scrub_pause_wait);
 305}
 306
 307static void scrub_pause_off(struct btrfs_fs_info *fs_info)
 308{
 309	mutex_lock(&fs_info->scrub_lock);
 310	__scrub_blocked_if_needed(fs_info);
 311	atomic_dec(&fs_info->scrubs_paused);
 312	mutex_unlock(&fs_info->scrub_lock);
 313
 314	wake_up(&fs_info->scrub_pause_wait);
 315}
 316
 317static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
 318{
 319	scrub_pause_on(fs_info);
 320	scrub_pause_off(fs_info);
 321}
 322
 323static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 324{
 325	int i;
 326
 327	if (!sctx)
 328		return;
 329
 330	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
 331		release_scrub_stripe(&sctx->stripes[i]);
 
 332
 333	kvfree(sctx);
 334}
 
 
 
 
 
 335
 336static void scrub_put_ctx(struct scrub_ctx *sctx)
 337{
 338	if (refcount_dec_and_test(&sctx->refs))
 339		scrub_free_ctx(sctx);
 
 
 
 
 
 
 340}
 341
 342static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
 343		struct btrfs_fs_info *fs_info, int is_dev_replace)
 344{
 345	struct scrub_ctx *sctx;
 346	int		i;
 
 
 347
 348	/* Since sctx has inline 128 stripes, it can go beyond 64K easily.  Use
 349	 * kvzalloc().
 350	 */
 351	sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
 352	if (!sctx)
 353		goto nomem;
 354	refcount_set(&sctx->refs, 1);
 355	sctx->is_dev_replace = is_dev_replace;
 356	sctx->fs_info = fs_info;
 357	sctx->extent_path.search_commit_root = 1;
 358	sctx->extent_path.skip_locking = 1;
 359	sctx->csum_path.search_commit_root = 1;
 360	sctx->csum_path.skip_locking = 1;
 361	for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
 362		int ret;
 363
 364		ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
 365		if (ret < 0)
 366			goto nomem;
 367		sctx->stripes[i].sctx = sctx;
 368	}
 369	sctx->first_free = 0;
 370	atomic_set(&sctx->cancel_req, 0);
 371
 372	spin_lock_init(&sctx->stat_lock);
 373	sctx->throttle_deadline = 0;
 
 
 374
 375	mutex_init(&sctx->wr_lock);
 376	if (is_dev_replace) {
 377		WARN_ON(!fs_info->dev_replace.tgtdev);
 378		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
 379	}
 380
 381	return sctx;
 
 
 
 
 
 
 
 
 
 
 
 
 382
 383nomem:
 384	scrub_free_ctx(sctx);
 385	return ERR_PTR(-ENOMEM);
 386}
 387
 388static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
 389				     u64 root, void *warn_ctx)
 390{
 
 391	u32 nlink;
 392	int ret;
 393	int i;
 394	unsigned nofs_flag;
 395	struct extent_buffer *eb;
 396	struct btrfs_inode_item *inode_item;
 397	struct scrub_warning *swarn = warn_ctx;
 398	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
 399	struct inode_fs_paths *ipath = NULL;
 400	struct btrfs_root *local_root;
 401	struct btrfs_key key;
 402
 403	local_root = btrfs_get_fs_root(fs_info, root, true);
 
 
 
 404	if (IS_ERR(local_root)) {
 405		ret = PTR_ERR(local_root);
 406		goto err;
 407	}
 408
 409	/*
 410	 * this makes the path point to (inum INODE_ITEM ioff)
 411	 */
 412	key.objectid = inum;
 413	key.type = BTRFS_INODE_ITEM_KEY;
 414	key.offset = 0;
 415
 416	ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
 417	if (ret) {
 418		btrfs_put_root(local_root);
 419		btrfs_release_path(swarn->path);
 420		goto err;
 421	}
 422
 423	eb = swarn->path->nodes[0];
 424	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
 425					struct btrfs_inode_item);
 
 426	nlink = btrfs_inode_nlink(eb, inode_item);
 427	btrfs_release_path(swarn->path);
 428
 429	/*
 430	 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
 431	 * uses GFP_NOFS in this context, so we keep it consistent but it does
 432	 * not seem to be strictly necessary.
 433	 */
 434	nofs_flag = memalloc_nofs_save();
 435	ipath = init_ipath(4096, local_root, swarn->path);
 436	memalloc_nofs_restore(nofs_flag);
 437	if (IS_ERR(ipath)) {
 438		btrfs_put_root(local_root);
 439		ret = PTR_ERR(ipath);
 440		ipath = NULL;
 441		goto err;
 442	}
 443	ret = paths_from_inode(inum, ipath);
 444
 445	if (ret < 0)
 446		goto err;
 447
 448	/*
 449	 * we deliberately ignore the bit ipath might have been too small to
 450	 * hold all of the paths here
 451	 */
 452	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
 453		btrfs_warn_in_rcu(fs_info,
 454"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
 455				  swarn->errstr, swarn->logical,
 456				  btrfs_dev_name(swarn->dev),
 457				  swarn->physical,
 458				  root, inum, offset,
 459				  fs_info->sectorsize, nlink,
 460				  (char *)(unsigned long)ipath->fspath->val[i]);
 461
 462	btrfs_put_root(local_root);
 463	free_ipath(ipath);
 464	return 0;
 465
 466err:
 467	btrfs_warn_in_rcu(fs_info,
 468			  "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
 469			  swarn->errstr, swarn->logical,
 470			  btrfs_dev_name(swarn->dev),
 471			  swarn->physical,
 472			  root, inum, offset, ret);
 473
 474	free_ipath(ipath);
 475	return 0;
 476}
 477
 478static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
 479				       bool is_super, u64 logical, u64 physical)
 480{
 481	struct btrfs_fs_info *fs_info = dev->fs_info;
 
 482	struct btrfs_path *path;
 483	struct btrfs_key found_key;
 484	struct extent_buffer *eb;
 485	struct btrfs_extent_item *ei;
 486	struct scrub_warning swarn;
 487	u64 flags = 0;
 488	u32 item_size;
 489	int ret;
 
 
 
 
 
 490
 491	/* Super block error, no need to search extent tree. */
 492	if (is_super) {
 493		btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
 494				  errstr, btrfs_dev_name(dev), physical);
 495		return;
 496	}
 497	path = btrfs_alloc_path();
 498	if (!path)
 499		return;
 500
 501	swarn.physical = physical;
 502	swarn.logical = logical;
 
 
 
 503	swarn.errstr = errstr;
 504	swarn.dev = NULL;
 
 
 505
 506	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
 507				  &flags);
 
 
 508	if (ret < 0)
 509		goto out;
 510
 
 511	swarn.extent_item_size = found_key.offset;
 512
 513	eb = path->nodes[0];
 514	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 515	item_size = btrfs_item_size(eb, path->slots[0]);
 
 516
 517	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 518		unsigned long ptr = 0;
 519		u8 ref_level;
 520		u64 ref_root;
 521
 522		while (true) {
 523			ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
 524						      item_size, &ref_root,
 525						      &ref_level);
 526			if (ret < 0) {
 527				btrfs_warn(fs_info,
 528				"failed to resolve tree backref for logical %llu: %d",
 529						  swarn.logical, ret);
 530				break;
 531			}
 532			if (ret > 0)
 533				break;
 534			btrfs_warn_in_rcu(fs_info,
 535"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
 536				errstr, swarn.logical, btrfs_dev_name(dev),
 537				swarn.physical, (ref_level ? "node" : "leaf"),
 538				ref_level, ref_root);
 539		}
 540		btrfs_release_path(path);
 541	} else {
 542		struct btrfs_backref_walk_ctx ctx = { 0 };
 543
 544		btrfs_release_path(path);
 545
 546		ctx.bytenr = found_key.objectid;
 547		ctx.extent_item_pos = swarn.logical - found_key.objectid;
 548		ctx.fs_info = fs_info;
 549
 550		swarn.path = path;
 551		swarn.dev = dev;
 552
 553		iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
 554	}
 555
 556out:
 557	btrfs_free_path(path);
 
 
 558}
 559
 560static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
 561{
 562	int ret = 0;
 563	u64 length;
 
 
 
 
 
 
 
 564
 565	if (!btrfs_is_zoned(sctx->fs_info))
 566		return 0;
 
 
 
 
 567
 568	if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
 569		return 0;
 
 
 
 
 570
 571	if (sctx->write_pointer < physical) {
 572		length = physical - sctx->write_pointer;
 573
 574		ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
 575						sctx->write_pointer, length);
 576		if (!ret)
 577			sctx->write_pointer = physical;
 578	}
 579	return ret;
 580}
 581
 582static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
 583{
 584	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 585	int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
 586
 587	return stripe->pages[page_index];
 588}
 589
 590static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
 591						 int sector_nr)
 592{
 593	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594
 595	return offset_in_page(sector_nr << fs_info->sectorsize_bits);
 596}
 
 
 
 
 
 
 
 
 
 597
 598static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
 599{
 600	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 601	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 602	const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
 603	const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
 604	const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
 605	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 606	u8 on_disk_csum[BTRFS_CSUM_SIZE];
 607	u8 calculated_csum[BTRFS_CSUM_SIZE];
 608	struct btrfs_header *header;
 609
 610	/*
 611	 * Here we don't have a good way to attach the pages (and subpages)
 612	 * to a dummy extent buffer, thus we have to directly grab the members
 613	 * from pages.
 614	 */
 615	header = (struct btrfs_header *)(page_address(first_page) + first_off);
 616	memcpy(on_disk_csum, header->csum, fs_info->csum_size);
 617
 618	if (logical != btrfs_stack_header_bytenr(header)) {
 619		bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
 620		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 621		btrfs_warn_rl(fs_info,
 622		"tree block %llu mirror %u has bad bytenr, has %llu want %llu",
 623			      logical, stripe->mirror_num,
 624			      btrfs_stack_header_bytenr(header), logical);
 625		return;
 626	}
 627	if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
 628		   BTRFS_FSID_SIZE) != 0) {
 629		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 630		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 631		btrfs_warn_rl(fs_info,
 632		"tree block %llu mirror %u has bad fsid, has %pU want %pU",
 633			      logical, stripe->mirror_num,
 634			      header->fsid, fs_info->fs_devices->fsid);
 635		return;
 636	}
 637	if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
 638		   BTRFS_UUID_SIZE) != 0) {
 639		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 640		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 641		btrfs_warn_rl(fs_info,
 642		"tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
 643			      logical, stripe->mirror_num,
 644			      header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
 645		return;
 646	}
 647
 648	/* Now check tree block csum. */
 649	shash->tfm = fs_info->csum_shash;
 650	crypto_shash_init(shash);
 651	crypto_shash_update(shash, page_address(first_page) + first_off +
 652			    BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
 653
 654	for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
 655		struct page *page = scrub_stripe_get_page(stripe, i);
 656		unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
 657
 658		crypto_shash_update(shash, page_address(page) + page_off,
 659				    fs_info->sectorsize);
 660	}
 661
 662	crypto_shash_final(shash, calculated_csum);
 663	if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
 664		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 665		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 666		btrfs_warn_rl(fs_info,
 667		"tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
 668			      logical, stripe->mirror_num,
 669			      CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
 670			      CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
 671		return;
 672	}
 673	if (stripe->sectors[sector_nr].generation !=
 674	    btrfs_stack_header_generation(header)) {
 675		bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 676		bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 677		btrfs_warn_rl(fs_info,
 678		"tree block %llu mirror %u has bad generation, has %llu want %llu",
 679			      logical, stripe->mirror_num,
 680			      btrfs_stack_header_generation(header),
 681			      stripe->sectors[sector_nr].generation);
 682		return;
 683	}
 684	bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
 685	bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
 686	bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
 687}
 688
 689static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
 690{
 691	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 692	struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
 693	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 694	struct page *page = scrub_stripe_get_page(stripe, sector_nr);
 695	unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
 696	u8 csum_buf[BTRFS_CSUM_SIZE];
 697	int ret;
 
 
 
 
 
 
 698
 699	ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
 700
 701	/* Sector not utilized, skip it. */
 702	if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
 703		return;
 704
 705	/* IO error, no need to check. */
 706	if (test_bit(sector_nr, &stripe->io_error_bitmap))
 707		return;
 708
 709	/* Metadata, verify the full tree block. */
 710	if (sector->is_metadata) {
 711		/*
 712		 * Check if the tree block crosses the stripe boundary.  If
 713		 * crossed the boundary, we cannot verify it but only give a
 714		 * warning.
 715		 *
 716		 * This can only happen on a very old filesystem where chunks
 717		 * are not ensured to be stripe aligned.
 718		 */
 719		if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
 720			btrfs_warn_rl(fs_info,
 721			"tree block at %llu crosses stripe boundary %llu",
 722				      stripe->logical +
 723				      (sector_nr << fs_info->sectorsize_bits),
 724				      stripe->logical);
 725			return;
 726		}
 727		scrub_verify_one_metadata(stripe, sector_nr);
 728		return;
 729	}
 730
 731	/*
 732	 * Data is easier, we just verify the data csum (if we have it).  For
 733	 * cases without csum, we have no other choice but to trust it.
 734	 */
 735	if (!sector->csum) {
 736		clear_bit(sector_nr, &stripe->error_bitmap);
 737		return;
 738	}
 739
 740	ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
 
 
 
 
 
 
 
 
 
 
 
 741	if (ret < 0) {
 742		set_bit(sector_nr, &stripe->csum_error_bitmap);
 743		set_bit(sector_nr, &stripe->error_bitmap);
 744	} else {
 745		clear_bit(sector_nr, &stripe->csum_error_bitmap);
 746		clear_bit(sector_nr, &stripe->error_bitmap);
 747	}
 748}
 749
 750/* Verify specified sectors of a stripe. */
 751static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
 752{
 753	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 754	const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
 755	int sector_nr;
 756
 757	for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
 758		scrub_verify_one_sector(stripe, sector_nr);
 759		if (stripe->sectors[sector_nr].is_metadata)
 760			sector_nr += sectors_per_tree - 1;
 
 
 
 
 
 
 
 
 761	}
 762}
 763
 764static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
 765{
 766	int i;
 767
 768	for (i = 0; i < stripe->nr_sectors; i++) {
 769		if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
 770		    scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
 771			break;
 772	}
 773	ASSERT(i < stripe->nr_sectors);
 774	return i;
 
 775}
 776
 777/*
 778 * Repair read is different to the regular read:
 779 *
 780 * - Only reads the failed sectors
 781 * - May have extra blocksize limits
 
 
 782 */
 783static void scrub_repair_read_endio(struct btrfs_bio *bbio)
 784{
 785	struct scrub_stripe *stripe = bbio->private;
 786	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 787	struct bio_vec *bvec;
 788	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
 789	u32 bio_size = 0;
 790	int i;
 791
 792	ASSERT(sector_nr < stripe->nr_sectors);
 793
 794	bio_for_each_bvec_all(bvec, &bbio->bio, i)
 795		bio_size += bvec->bv_len;
 
 
 
 
 
 
 796
 797	if (bbio->bio.bi_status) {
 798		bitmap_set(&stripe->io_error_bitmap, sector_nr,
 799			   bio_size >> fs_info->sectorsize_bits);
 800		bitmap_set(&stripe->error_bitmap, sector_nr,
 801			   bio_size >> fs_info->sectorsize_bits);
 802	} else {
 803		bitmap_clear(&stripe->io_error_bitmap, sector_nr,
 804			     bio_size >> fs_info->sectorsize_bits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 805	}
 806	bio_put(&bbio->bio);
 807	if (atomic_dec_and_test(&stripe->pending_io))
 808		wake_up(&stripe->io_wait);
 809}
 810
 811static int calc_next_mirror(int mirror, int num_copies)
 812{
 813	ASSERT(mirror <= num_copies);
 814	return (mirror + 1 > num_copies) ? 1 : mirror + 1;
 815}
 816
 817static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
 818					    int mirror, int blocksize, bool wait)
 819{
 820	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 821	struct btrfs_bio *bbio = NULL;
 822	const unsigned long old_error_bitmap = stripe->error_bitmap;
 823	int i;
 824
 825	ASSERT(stripe->mirror_num >= 1);
 826	ASSERT(atomic_read(&stripe->pending_io) == 0);
 827
 828	for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
 829		struct page *page;
 830		int pgoff;
 831		int ret;
 832
 833		page = scrub_stripe_get_page(stripe, i);
 834		pgoff = scrub_stripe_get_page_offset(stripe, i);
 
 
 
 
 
 
 
 
 
 
 
 
 835
 836		/* The current sector cannot be merged, submit the bio. */
 837		if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
 838			     bbio->bio.bi_iter.bi_size >= blocksize)) {
 839			ASSERT(bbio->bio.bi_iter.bi_size);
 840			atomic_inc(&stripe->pending_io);
 841			btrfs_submit_bio(bbio, mirror);
 842			if (wait)
 843				wait_scrub_stripe_io(stripe);
 844			bbio = NULL;
 845		}
 
 
 846
 847		if (!bbio) {
 848			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
 849				fs_info, scrub_repair_read_endio, stripe);
 850			bbio->bio.bi_iter.bi_sector = (stripe->logical +
 851				(i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
 852		}
 
 
 
 
 
 
 
 853
 854		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
 855		ASSERT(ret == fs_info->sectorsize);
 856	}
 857	if (bbio) {
 858		ASSERT(bbio->bio.bi_iter.bi_size);
 859		atomic_inc(&stripe->pending_io);
 860		btrfs_submit_bio(bbio, mirror);
 861		if (wait)
 862			wait_scrub_stripe_io(stripe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 863	}
 864}
 865
 866static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
 867				       struct scrub_stripe *stripe)
 868{
 869	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
 870				      DEFAULT_RATELIMIT_BURST);
 871	struct btrfs_fs_info *fs_info = sctx->fs_info;
 872	struct btrfs_device *dev = NULL;
 873	u64 physical = 0;
 874	int nr_data_sectors = 0;
 875	int nr_meta_sectors = 0;
 876	int nr_nodatacsum_sectors = 0;
 877	int nr_repaired_sectors = 0;
 878	int sector_nr;
 879
 880	if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
 881		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882
 883	/*
 884	 * Init needed infos for error reporting.
 885	 *
 886	 * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
 887	 * thus no need for dev/physical, error reporting still needs dev and physical.
 888	 */
 889	if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
 890		u64 mapped_len = fs_info->sectorsize;
 891		struct btrfs_io_context *bioc = NULL;
 892		int stripe_index = stripe->mirror_num - 1;
 893		int ret;
 
 894
 895		/* For scrub, our mirror_num should always start at 1. */
 896		ASSERT(stripe->mirror_num >= 1);
 897		ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
 898				      stripe->logical, &mapped_len, &bioc,
 899				      NULL, NULL);
 900		/*
 901		 * If we failed, dev will be NULL, and later detailed reports
 902		 * will just be skipped.
 903		 */
 904		if (ret < 0)
 905			goto skip;
 906		physical = bioc->stripes[stripe_index].physical;
 907		dev = bioc->stripes[stripe_index].dev;
 908		btrfs_put_bioc(bioc);
 909	}
 910
 911skip:
 912	for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
 913		bool repaired = false;
 914
 915		if (stripe->sectors[sector_nr].is_metadata) {
 916			nr_meta_sectors++;
 917		} else {
 918			nr_data_sectors++;
 919			if (!stripe->sectors[sector_nr].csum)
 920				nr_nodatacsum_sectors++;
 921		}
 922
 923		if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
 924		    !test_bit(sector_nr, &stripe->error_bitmap)) {
 925			nr_repaired_sectors++;
 926			repaired = true;
 927		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 928
 929		/* Good sector from the beginning, nothing need to be done. */
 930		if (!test_bit(sector_nr, &stripe->init_error_bitmap))
 931			continue;
 932
 933		/*
 934		 * Report error for the corrupted sectors.  If repaired, just
 935		 * output the message of repaired message.
 936		 */
 937		if (repaired) {
 938			if (dev) {
 939				btrfs_err_rl_in_rcu(fs_info,
 940			"fixed up error at logical %llu on dev %s physical %llu",
 941					    stripe->logical, btrfs_dev_name(dev),
 942					    physical);
 943			} else {
 944				btrfs_err_rl_in_rcu(fs_info,
 945			"fixed up error at logical %llu on mirror %u",
 946					    stripe->logical, stripe->mirror_num);
 
 
 947			}
 948			continue;
 949		}
 950
 951		/* The remaining are all for unrepaired. */
 952		if (dev) {
 953			btrfs_err_rl_in_rcu(fs_info,
 954	"unable to fixup (regular) error at logical %llu on dev %s physical %llu",
 955					    stripe->logical, btrfs_dev_name(dev),
 956					    physical);
 957		} else {
 958			btrfs_err_rl_in_rcu(fs_info,
 959	"unable to fixup (regular) error at logical %llu on mirror %u",
 960					    stripe->logical, stripe->mirror_num);
 961		}
 962
 963		if (test_bit(sector_nr, &stripe->io_error_bitmap))
 964			if (__ratelimit(&rs) && dev)
 965				scrub_print_common_warning("i/o error", dev, false,
 966						     stripe->logical, physical);
 967		if (test_bit(sector_nr, &stripe->csum_error_bitmap))
 968			if (__ratelimit(&rs) && dev)
 969				scrub_print_common_warning("checksum error", dev, false,
 970						     stripe->logical, physical);
 971		if (test_bit(sector_nr, &stripe->meta_error_bitmap))
 972			if (__ratelimit(&rs) && dev)
 973				scrub_print_common_warning("header error", dev, false,
 974						     stripe->logical, physical);
 975	}
 976
 977	spin_lock(&sctx->stat_lock);
 978	sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
 979	sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
 980	sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
 981	sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
 982	sctx->stat.no_csum += nr_nodatacsum_sectors;
 983	sctx->stat.read_errors += stripe->init_nr_io_errors;
 984	sctx->stat.csum_errors += stripe->init_nr_csum_errors;
 985	sctx->stat.verify_errors += stripe->init_nr_meta_errors;
 986	sctx->stat.uncorrectable_errors +=
 987		bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
 988	sctx->stat.corrected_errors += nr_repaired_sectors;
 989	spin_unlock(&sctx->stat_lock);
 990}
 991
 992static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
 993				unsigned long write_bitmap, bool dev_replace);
 994
 995/*
 996 * The main entrance for all read related scrub work, including:
 997 *
 998 * - Wait for the initial read to finish
 999 * - Verify and locate any bad sectors
1000 * - Go through the remaining mirrors and try to read as large blocksize as
1001 *   possible
1002 * - Go through all mirrors (including the failed mirror) sector-by-sector
1003 * - Submit writeback for repaired sectors
1004 *
1005 * Writeback for dev-replace does not happen here, it needs extra
1006 * synchronization for zoned devices.
1007 */
1008static void scrub_stripe_read_repair_worker(struct work_struct *work)
1009{
1010	struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
1011	struct scrub_ctx *sctx = stripe->sctx;
1012	struct btrfs_fs_info *fs_info = sctx->fs_info;
1013	int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1014					  stripe->bg->length);
1015	int mirror;
1016	int i;
1017
1018	ASSERT(stripe->mirror_num > 0);
1019
1020	wait_scrub_stripe_io(stripe);
1021	scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1022	/* Save the initial failed bitmap for later repair and report usage. */
1023	stripe->init_error_bitmap = stripe->error_bitmap;
1024	stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1025						  stripe->nr_sectors);
1026	stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1027						    stripe->nr_sectors);
1028	stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1029						    stripe->nr_sectors);
1030
1031	if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1032		goto out;
1033
1034	/*
1035	 * Try all remaining mirrors.
1036	 *
1037	 * Here we still try to read as large block as possible, as this is
1038	 * faster and we have extra safety nets to rely on.
1039	 */
1040	for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1041	     mirror != stripe->mirror_num;
1042	     mirror = calc_next_mirror(mirror, num_copies)) {
1043		const unsigned long old_error_bitmap = stripe->error_bitmap;
1044
1045		scrub_stripe_submit_repair_read(stripe, mirror,
1046						BTRFS_STRIPE_LEN, false);
1047		wait_scrub_stripe_io(stripe);
1048		scrub_verify_one_stripe(stripe, old_error_bitmap);
1049		if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1050			goto out;
1051	}
1052
1053	/*
1054	 * Last safety net, try re-checking all mirrors, including the failed
1055	 * one, sector-by-sector.
1056	 *
1057	 * As if one sector failed the drive's internal csum, the whole read
1058	 * containing the offending sector would be marked as error.
1059	 * Thus here we do sector-by-sector read.
1060	 *
1061	 * This can be slow, thus we only try it as the last resort.
1062	 */
1063
1064	for (i = 0, mirror = stripe->mirror_num;
1065	     i < num_copies;
1066	     i++, mirror = calc_next_mirror(mirror, num_copies)) {
1067		const unsigned long old_error_bitmap = stripe->error_bitmap;
1068
1069		scrub_stripe_submit_repair_read(stripe, mirror,
1070						fs_info->sectorsize, true);
1071		wait_scrub_stripe_io(stripe);
1072		scrub_verify_one_stripe(stripe, old_error_bitmap);
1073		if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1074			goto out;
1075	}
1076out:
1077	/*
1078	 * Submit the repaired sectors.  For zoned case, we cannot do repair
1079	 * in-place, but queue the bg to be relocated.
1080	 */
1081	if (btrfs_is_zoned(fs_info)) {
1082		if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1083			btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1084	} else if (!sctx->readonly) {
1085		unsigned long repaired;
1086
1087		bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1088			      &stripe->error_bitmap, stripe->nr_sectors);
1089		scrub_write_sectors(sctx, stripe, repaired, false);
1090		wait_scrub_stripe_io(stripe);
1091	}
1092
1093	scrub_stripe_report_errors(sctx, stripe);
1094	set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1095	wake_up(&stripe->repair_wait);
1096}
1097
1098static void scrub_read_endio(struct btrfs_bio *bbio)
1099{
1100	struct scrub_stripe *stripe = bbio->private;
1101	struct bio_vec *bvec;
1102	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1103	int num_sectors;
1104	u32 bio_size = 0;
1105	int i;
1106
1107	ASSERT(sector_nr < stripe->nr_sectors);
1108	bio_for_each_bvec_all(bvec, &bbio->bio, i)
1109		bio_size += bvec->bv_len;
1110	num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
1111
1112	if (bbio->bio.bi_status) {
1113		bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
1114		bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1115	} else {
1116		bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
 
 
 
 
 
 
 
1117	}
1118	bio_put(&bbio->bio);
1119	if (atomic_dec_and_test(&stripe->pending_io)) {
1120		wake_up(&stripe->io_wait);
1121		INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1122		queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1123	}
1124}
1125
1126static void scrub_write_endio(struct btrfs_bio *bbio)
1127{
1128	struct scrub_stripe *stripe = bbio->private;
1129	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1130	struct bio_vec *bvec;
1131	int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1132	u32 bio_size = 0;
1133	int i;
1134
1135	bio_for_each_bvec_all(bvec, &bbio->bio, i)
1136		bio_size += bvec->bv_len;
1137
1138	if (bbio->bio.bi_status) {
1139		unsigned long flags;
1140
1141		spin_lock_irqsave(&stripe->write_error_lock, flags);
1142		bitmap_set(&stripe->write_error_bitmap, sector_nr,
1143			   bio_size >> fs_info->sectorsize_bits);
1144		spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1145	}
1146	bio_put(&bbio->bio);
1147
1148	if (atomic_dec_and_test(&stripe->pending_io))
1149		wake_up(&stripe->io_wait);
1150}
1151
1152static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1153				   struct scrub_stripe *stripe,
1154				   struct btrfs_bio *bbio, bool dev_replace)
 
1155{
1156	struct btrfs_fs_info *fs_info = sctx->fs_info;
1157	u32 bio_len = bbio->bio.bi_iter.bi_size;
1158	u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1159		      stripe->logical;
1160
1161	fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1162	atomic_inc(&stripe->pending_io);
1163	btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1164	if (!btrfs_is_zoned(fs_info))
1165		return;
1166	/*
1167	 * For zoned writeback, queue depth must be 1, thus we must wait for
1168	 * the write to finish before the next write.
1169	 */
1170	wait_scrub_stripe_io(stripe);
1171
1172	/*
1173	 * And also need to update the write pointer if write finished
1174	 * successfully.
1175	 */
1176	if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1177		      &stripe->write_error_bitmap))
1178		sctx->write_pointer += bio_len;
1179}
1180
1181/*
1182 * Submit the write bio(s) for the sectors specified by @write_bitmap.
1183 *
1184 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1185 *
1186 * - Only needs logical bytenr and mirror_num
1187 *   Just like the scrub read path
1188 *
1189 * - Would only result in writes to the specified mirror
1190 *   Unlike the regular writeback path, which would write back to all stripes
1191 *
1192 * - Handle dev-replace and read-repair writeback differently
1193 */
1194static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1195				unsigned long write_bitmap, bool dev_replace)
1196{
1197	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1198	struct btrfs_bio *bbio = NULL;
1199	int sector_nr;
1200
1201	for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1202		struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1203		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1204		int ret;
1205
1206		/* We should only writeback sectors covered by an extent. */
1207		ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1208
1209		/* Cannot merge with previous sector, submit the current one. */
1210		if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1211			scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1212			bbio = NULL;
1213		}
1214		if (!bbio) {
1215			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1216					       fs_info, scrub_write_endio, stripe);
1217			bbio->bio.bi_iter.bi_sector = (stripe->logical +
1218				(sector_nr << fs_info->sectorsize_bits)) >>
1219				SECTOR_SHIFT;
1220		}
1221		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1222		ASSERT(ret == fs_info->sectorsize);
1223	}
1224	if (bbio)
1225		scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1226}
1227
1228/*
1229 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1230 * second.  Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1231 */
1232static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1233				  unsigned int bio_size)
1234{
1235	const int time_slice = 1000;
1236	s64 delta;
1237	ktime_t now;
1238	u32 div;
1239	u64 bwlimit;
1240
1241	bwlimit = READ_ONCE(device->scrub_speed_max);
1242	if (bwlimit == 0)
1243		return;
1244
1245	/*
1246	 * Slice is divided into intervals when the IO is submitted, adjust by
1247	 * bwlimit and maximum of 64 intervals.
1248	 */
1249	div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1250	div = min_t(u32, 64, div);
1251
1252	/* Start new epoch, set deadline */
1253	now = ktime_get();
1254	if (sctx->throttle_deadline == 0) {
1255		sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1256		sctx->throttle_sent = 0;
1257	}
1258
1259	/* Still in the time to send? */
1260	if (ktime_before(now, sctx->throttle_deadline)) {
1261		/* If current bio is within the limit, send it */
1262		sctx->throttle_sent += bio_size;
1263		if (sctx->throttle_sent <= div_u64(bwlimit, div))
1264			return;
1265
1266		/* We're over the limit, sleep until the rest of the slice */
1267		delta = ktime_ms_delta(sctx->throttle_deadline, now);
1268	} else {
1269		/* New request after deadline, start new epoch */
1270		delta = 0;
1271	}
1272
1273	if (delta) {
1274		long timeout;
1275
1276		timeout = div_u64(delta * HZ, 1000);
1277		schedule_timeout_interruptible(timeout);
1278	}
1279
1280	/* Next call will start the deadline period */
1281	sctx->throttle_deadline = 0;
1282}
1283
1284/*
1285 * Given a physical address, this will calculate it's
1286 * logical offset. if this is a parity stripe, it will return
1287 * the most left data stripe's logical offset.
1288 *
1289 * return 0 if it is a data stripe, 1 means parity stripe.
1290 */
1291static int get_raid56_logic_offset(u64 physical, int num,
1292				   struct btrfs_chunk_map *map, u64 *offset,
1293				   u64 *stripe_start)
1294{
1295	int i;
1296	int j = 0;
1297	u64 last_offset;
1298	const int data_stripes = nr_data_stripes(map);
1299
1300	last_offset = (physical - map->stripes[num].physical) * data_stripes;
1301	if (stripe_start)
1302		*stripe_start = last_offset;
1303
1304	*offset = last_offset;
1305	for (i = 0; i < data_stripes; i++) {
1306		u32 stripe_nr;
1307		u32 stripe_index;
1308		u32 rot;
1309
1310		*offset = last_offset + btrfs_stripe_nr_to_offset(i);
1311
1312		stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
1313
1314		/* Work out the disk rotation on this stripe-set */
1315		rot = stripe_nr % map->num_stripes;
1316		/* calculate which stripe this data locates */
1317		rot += i;
1318		stripe_index = rot % map->num_stripes;
1319		if (stripe_index == num)
1320			return 0;
1321		if (stripe_index < num)
1322			j++;
1323	}
1324	*offset = last_offset + btrfs_stripe_nr_to_offset(j);
1325	return 1;
1326}
1327
1328/*
1329 * Return 0 if the extent item range covers any byte of the range.
1330 * Return <0 if the extent item is before @search_start.
1331 * Return >0 if the extent item is after @start_start + @search_len.
1332 */
1333static int compare_extent_item_range(struct btrfs_path *path,
1334				     u64 search_start, u64 search_len)
1335{
1336	struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1337	u64 len;
1338	struct btrfs_key key;
1339
1340	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1341	ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1342	       key.type == BTRFS_METADATA_ITEM_KEY);
1343	if (key.type == BTRFS_METADATA_ITEM_KEY)
1344		len = fs_info->nodesize;
1345	else
1346		len = key.offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1347
1348	if (key.objectid + len <= search_start)
1349		return -1;
1350	if (key.objectid >= search_start + search_len)
1351		return 1;
1352	return 0;
1353}
1354
1355/*
1356 * Locate one extent item which covers any byte in range
1357 * [@search_start, @search_start + @search_length)
1358 *
1359 * If the path is not initialized, we will initialize the search by doing
1360 * a btrfs_search_slot().
1361 * If the path is already initialized, we will use the path as the initial
1362 * slot, to avoid duplicated btrfs_search_slot() calls.
1363 *
1364 * NOTE: If an extent item starts before @search_start, we will still
1365 * return the extent item. This is for data extent crossing stripe boundary.
1366 *
1367 * Return 0 if we found such extent item, and @path will point to the extent item.
1368 * Return >0 if no such extent item can be found, and @path will be released.
1369 * Return <0 if hit fatal error, and @path will be released.
1370 */
1371static int find_first_extent_item(struct btrfs_root *extent_root,
1372				  struct btrfs_path *path,
1373				  u64 search_start, u64 search_len)
1374{
1375	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1376	struct btrfs_key key;
1377	int ret;
1378
1379	/* Continue using the existing path */
1380	if (path->nodes[0])
1381		goto search_forward;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1382
1383	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1384		key.type = BTRFS_METADATA_ITEM_KEY;
1385	else
1386		key.type = BTRFS_EXTENT_ITEM_KEY;
1387	key.objectid = search_start;
1388	key.offset = (u64)-1;
1389
1390	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1391	if (ret < 0)
1392		return ret;
 
 
 
 
 
1393
1394	ASSERT(ret > 0);
1395	/*
1396	 * Here we intentionally pass 0 as @min_objectid, as there could be
1397	 * an extent item starting before @search_start.
1398	 */
1399	ret = btrfs_previous_extent_item(extent_root, path, 0);
1400	if (ret < 0)
1401		return ret;
1402	/*
1403	 * No matter whether we have found an extent item, the next loop will
1404	 * properly do every check on the key.
1405	 */
1406search_forward:
1407	while (true) {
1408		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1409		if (key.objectid >= search_start + search_len)
1410			break;
1411		if (key.type != BTRFS_METADATA_ITEM_KEY &&
1412		    key.type != BTRFS_EXTENT_ITEM_KEY)
1413			goto next;
1414
1415		ret = compare_extent_item_range(path, search_start, search_len);
1416		if (ret == 0)
1417			return ret;
1418		if (ret > 0)
1419			break;
1420next:
1421		ret = btrfs_next_item(extent_root, path);
1422		if (ret) {
1423			/* Either no more items or a fatal error. */
1424			btrfs_release_path(path);
1425			return ret;
1426		}
1427	}
1428	btrfs_release_path(path);
1429	return 1;
 
 
1430}
1431
1432static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1433			    u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1434{
1435	struct btrfs_key key;
1436	struct btrfs_extent_item *ei;
1437
1438	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1439	ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1440	       key.type == BTRFS_EXTENT_ITEM_KEY);
1441	*extent_start_ret = key.objectid;
1442	if (key.type == BTRFS_METADATA_ITEM_KEY)
1443		*size_ret = path->nodes[0]->fs_info->nodesize;
1444	else
1445		*size_ret = key.offset;
1446	ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1447	*flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1448	*generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1449}
1450
1451static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1452					u64 physical, u64 physical_end)
 
1453{
1454	struct btrfs_fs_info *fs_info = sctx->fs_info;
1455	int ret = 0;
1456
1457	if (!btrfs_is_zoned(fs_info))
1458		return 0;
1459
1460	mutex_lock(&sctx->wr_lock);
1461	if (sctx->write_pointer < physical_end) {
1462		ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1463						    physical,
1464						    sctx->write_pointer);
1465		if (ret)
1466			btrfs_err(fs_info,
1467				  "zoned: failed to recover write pointer");
1468	}
1469	mutex_unlock(&sctx->wr_lock);
1470	btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1471
1472	return ret;
1473}
1474
1475static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1476				 struct scrub_stripe *stripe,
1477				 u64 extent_start, u64 extent_len,
1478				 u64 extent_flags, u64 extent_gen)
1479{
1480	for (u64 cur_logical = max(stripe->logical, extent_start);
1481	     cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1482			       extent_start + extent_len);
1483	     cur_logical += fs_info->sectorsize) {
1484		const int nr_sector = (cur_logical - stripe->logical) >>
1485				      fs_info->sectorsize_bits;
1486		struct scrub_sector_verification *sector =
1487						&stripe->sectors[nr_sector];
1488
1489		set_bit(nr_sector, &stripe->extent_sector_bitmap);
1490		if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1491			sector->is_metadata = true;
1492			sector->generation = extent_gen;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1493		}
 
1494	}
1495}
1496
1497static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1498{
1499	stripe->extent_sector_bitmap = 0;
1500	stripe->init_error_bitmap = 0;
1501	stripe->init_nr_io_errors = 0;
1502	stripe->init_nr_csum_errors = 0;
1503	stripe->init_nr_meta_errors = 0;
1504	stripe->error_bitmap = 0;
1505	stripe->io_error_bitmap = 0;
1506	stripe->csum_error_bitmap = 0;
1507	stripe->meta_error_bitmap = 0;
1508}
1509
1510/*
1511 * Locate one stripe which has at least one extent in its range.
1512 *
1513 * Return 0 if found such stripe, and store its info into @stripe.
1514 * Return >0 if there is no such stripe in the specified range.
1515 * Return <0 for error.
1516 */
1517static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1518					struct btrfs_path *extent_path,
1519					struct btrfs_path *csum_path,
1520					struct btrfs_device *dev, u64 physical,
1521					int mirror_num, u64 logical_start,
1522					u32 logical_len,
1523					struct scrub_stripe *stripe)
1524{
1525	struct btrfs_fs_info *fs_info = bg->fs_info;
1526	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1527	struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1528	const u64 logical_end = logical_start + logical_len;
1529	u64 cur_logical = logical_start;
1530	u64 stripe_end;
1531	u64 extent_start;
1532	u64 extent_len;
1533	u64 extent_flags;
1534	u64 extent_gen;
1535	int ret;
1536
1537	memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1538				   stripe->nr_sectors);
1539	scrub_stripe_reset_bitmaps(stripe);
1540
1541	/* The range must be inside the bg. */
1542	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1543
1544	ret = find_first_extent_item(extent_root, extent_path, logical_start,
1545				     logical_len);
1546	/* Either error or not found. */
 
1547	if (ret)
1548		goto out;
1549	get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1550			&extent_gen);
1551	if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1552		stripe->nr_meta_extents++;
1553	if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1554		stripe->nr_data_extents++;
1555	cur_logical = max(extent_start, cur_logical);
1556
1557	/*
1558	 * Round down to stripe boundary.
1559	 *
1560	 * The extra calculation against bg->start is to handle block groups
1561	 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1562	 */
1563	stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1564			  bg->start;
1565	stripe->physical = physical + stripe->logical - logical_start;
1566	stripe->dev = dev;
1567	stripe->bg = bg;
1568	stripe->mirror_num = mirror_num;
1569	stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1570
1571	/* Fill the first extent info into stripe->sectors[] array. */
1572	fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1573			     extent_flags, extent_gen);
1574	cur_logical = extent_start + extent_len;
1575
1576	/* Fill the extent info for the remaining sectors. */
1577	while (cur_logical <= stripe_end) {
1578		ret = find_first_extent_item(extent_root, extent_path, cur_logical,
1579					     stripe_end - cur_logical + 1);
1580		if (ret < 0)
1581			goto out;
1582		if (ret > 0) {
1583			ret = 0;
1584			break;
1585		}
1586		get_extent_info(extent_path, &extent_start, &extent_len,
1587				&extent_flags, &extent_gen);
1588		if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1589			stripe->nr_meta_extents++;
1590		if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1591			stripe->nr_data_extents++;
1592		fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1593				     extent_flags, extent_gen);
1594		cur_logical = extent_start + extent_len;
1595	}
1596
1597	/* Now fill the data csum. */
1598	if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1599		int sector_nr;
1600		unsigned long csum_bitmap = 0;
1601
1602		/* Csum space should have already been allocated. */
1603		ASSERT(stripe->csums);
 
1604
1605		/*
1606		 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1607		 * should contain at most 16 sectors.
1608		 */
1609		ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1610
1611		ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1612						stripe->logical, stripe_end,
1613						stripe->csums, &csum_bitmap);
1614		if (ret < 0)
1615			goto out;
1616		if (ret > 0)
1617			ret = 0;
1618
1619		for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1620			stripe->sectors[sector_nr].csum = stripe->csums +
1621				sector_nr * fs_info->csum_size;
1622		}
 
 
 
 
 
 
1623	}
1624	set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1625out:
1626	return ret;
 
 
 
1627}
1628
1629static void scrub_reset_stripe(struct scrub_stripe *stripe)
1630{
1631	scrub_stripe_reset_bitmaps(stripe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1632
1633	stripe->nr_meta_extents = 0;
1634	stripe->nr_data_extents = 0;
1635	stripe->state = 0;
 
 
1636
1637	for (int i = 0; i < stripe->nr_sectors; i++) {
1638		stripe->sectors[i].is_metadata = false;
1639		stripe->sectors[i].csum = NULL;
1640		stripe->sectors[i].generation = 0;
1641	}
1642}
1643
1644static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
1645					    struct scrub_stripe *stripe)
1646{
1647	struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1648	struct btrfs_bio *bbio = NULL;
1649	unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
1650				      stripe->bg->length - stripe->logical) >>
1651				  fs_info->sectorsize_bits;
1652	u64 stripe_len = BTRFS_STRIPE_LEN;
1653	int mirror = stripe->mirror_num;
1654	int i;
1655
1656	atomic_inc(&stripe->pending_io);
 
1657
1658	for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
1659		struct page *page = scrub_stripe_get_page(stripe, i);
1660		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
1661
1662		/* We're beyond the chunk boundary, no need to read anymore. */
1663		if (i >= nr_sectors)
1664			break;
1665
1666		/* The current sector cannot be merged, submit the bio. */
1667		if (bbio &&
1668		    ((i > 0 &&
1669		      !test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
1670		     bbio->bio.bi_iter.bi_size >= stripe_len)) {
1671			ASSERT(bbio->bio.bi_iter.bi_size);
1672			atomic_inc(&stripe->pending_io);
1673			btrfs_submit_bio(bbio, mirror);
1674			bbio = NULL;
1675		}
1676
1677		if (!bbio) {
1678			struct btrfs_io_stripe io_stripe = {};
1679			struct btrfs_io_context *bioc = NULL;
1680			const u64 logical = stripe->logical +
1681					    (i << fs_info->sectorsize_bits);
1682			int err;
1683
1684			bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
1685					       fs_info, scrub_read_endio, stripe);
1686			bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
1687
1688			io_stripe.is_scrub = true;
1689			err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
1690					      &stripe_len, &bioc, &io_stripe,
1691					      &mirror);
1692			btrfs_put_bioc(bioc);
1693			if (err) {
1694				btrfs_bio_end_io(bbio,
1695						 errno_to_blk_status(err));
1696				return;
1697			}
1698		}
1699
1700		__bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
 
 
 
 
 
 
 
 
 
 
 
1701	}
1702
1703	if (bbio) {
1704		ASSERT(bbio->bio.bi_iter.bi_size);
1705		atomic_inc(&stripe->pending_io);
1706		btrfs_submit_bio(bbio, mirror);
1707	}
1708
1709	if (atomic_dec_and_test(&stripe->pending_io)) {
1710		wake_up(&stripe->io_wait);
1711		INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1712		queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1713	}
1714}
1715
1716static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1717				      struct scrub_stripe *stripe)
1718{
1719	struct btrfs_fs_info *fs_info = sctx->fs_info;
1720	struct btrfs_bio *bbio;
1721	unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
1722				      stripe->bg->length - stripe->logical) >>
1723				  fs_info->sectorsize_bits;
1724	int mirror = stripe->mirror_num;
1725
1726	ASSERT(stripe->bg);
1727	ASSERT(stripe->mirror_num > 0);
1728	ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1729
1730	if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
1731		scrub_submit_extent_sector_read(sctx, stripe);
1732		return;
1733	}
1734
1735	bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1736			       scrub_read_endio, stripe);
1737
1738	bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1739	/* Read the whole range inside the chunk boundary. */
1740	for (unsigned int cur = 0; cur < nr_sectors; cur++) {
1741		struct page *page = scrub_stripe_get_page(stripe, cur);
1742		unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
1743		int ret;
1744
1745		ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1746		/* We should have allocated enough bio vectors. */
1747		ASSERT(ret == fs_info->sectorsize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1748	}
1749	atomic_inc(&stripe->pending_io);
1750
1751	/*
1752	 * For dev-replace, either user asks to avoid the source dev, or
1753	 * the device is missing, we try the next mirror instead.
1754	 */
1755	if (sctx->is_dev_replace &&
1756	    (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1757	     BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1758	     !stripe->dev->bdev)) {
1759		int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1760						  stripe->bg->length);
1761
1762		mirror = calc_next_mirror(mirror, num_copies);
1763	}
1764	btrfs_submit_bio(bbio, mirror);
1765}
1766
1767static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1768{
1769	int i;
 
1770
1771	for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1772		if (stripe->sectors[i].is_metadata) {
1773			struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
 
1774
1775			btrfs_err(fs_info,
1776			"stripe %llu has unrepaired metadata sector at %llu",
1777				  stripe->logical,
1778				  stripe->logical + (i << fs_info->sectorsize_bits));
1779			return true;
1780		}
1781	}
1782	return false;
1783}
1784
1785static void submit_initial_group_read(struct scrub_ctx *sctx,
1786				      unsigned int first_slot,
1787				      unsigned int nr_stripes)
1788{
1789	struct blk_plug plug;
1790
1791	ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1792	ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
1793
1794	scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1795			      btrfs_stripe_nr_to_offset(nr_stripes));
1796	blk_start_plug(&plug);
1797	for (int i = 0; i < nr_stripes; i++) {
1798		struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1799
1800		/* Those stripes should be initialized. */
1801		ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1802		scrub_submit_initial_read(sctx, stripe);
1803	}
1804	blk_finish_plug(&plug);
1805}
1806
1807static int flush_scrub_stripes(struct scrub_ctx *sctx)
 
1808{
1809	struct btrfs_fs_info *fs_info = sctx->fs_info;
1810	struct scrub_stripe *stripe;
1811	const int nr_stripes = sctx->cur_stripe;
1812	int ret = 0;
1813
1814	if (!nr_stripes)
1815		return 0;
1816
1817	ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1818
1819	/* Submit the stripes which are populated but not submitted. */
1820	if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1821		const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1822
1823		submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1824	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1825
1826	for (int i = 0; i < nr_stripes; i++) {
1827		stripe = &sctx->stripes[i];
1828
1829		wait_event(stripe->repair_wait,
1830			   test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
 
 
 
 
 
 
 
 
 
 
 
1831	}
1832
1833	/* Submit for dev-replace. */
1834	if (sctx->is_dev_replace) {
1835		/*
1836		 * For dev-replace, if we know there is something wrong with
1837		 * metadata, we should immediately abort.
1838		 */
1839		for (int i = 0; i < nr_stripes; i++) {
1840			if (stripe_has_metadata_error(&sctx->stripes[i])) {
1841				ret = -EIO;
1842				goto out;
 
 
 
 
 
 
 
 
1843			}
 
 
1844		}
1845		for (int i = 0; i < nr_stripes; i++) {
1846			unsigned long good;
1847
1848			stripe = &sctx->stripes[i];
1849
1850			ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1851
1852			bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1853				      &stripe->error_bitmap, stripe->nr_sectors);
1854			scrub_write_sectors(sctx, stripe, good, true);
 
 
1855		}
 
 
 
 
1856	}
1857
1858	/* Wait for the above writebacks to finish. */
1859	for (int i = 0; i < nr_stripes; i++) {
1860		stripe = &sctx->stripes[i];
 
1861
1862		wait_scrub_stripe_io(stripe);
1863		scrub_reset_stripe(stripe);
 
 
 
1864	}
1865out:
1866	sctx->cur_stripe = 0;
1867	return ret;
1868}
1869
1870static void raid56_scrub_wait_endio(struct bio *bio)
1871{
1872	complete(bio->bi_private);
 
 
 
1873}
1874
1875static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1876			      struct btrfs_device *dev, int mirror_num,
1877			      u64 logical, u32 length, u64 physical,
1878			      u64 *found_logical_ret)
1879{
1880	struct scrub_stripe *stripe;
1881	int ret;
 
1882
1883	/*
1884	 * There should always be one slot left, as caller filling the last
1885	 * slot should flush them all.
1886	 */
1887	ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
1888
1889	/* @found_logical_ret must be specified. */
1890	ASSERT(found_logical_ret);
1891
1892	stripe = &sctx->stripes[sctx->cur_stripe];
1893	scrub_reset_stripe(stripe);
1894	ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1895					   &sctx->csum_path, dev, physical,
1896					   mirror_num, logical, length, stripe);
1897	/* Either >0 as no more extents or <0 for error. */
1898	if (ret)
1899		return ret;
1900	*found_logical_ret = stripe->logical;
1901	sctx->cur_stripe++;
1902
1903	/* We filled one group, submit it. */
1904	if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1905		const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
 
1906
1907		submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
 
 
1908	}
1909
1910	/* Last slot used, flush them all. */
1911	if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1912		return flush_scrub_stripes(sctx);
1913	return 0;
1914}
1915
1916static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1917				      struct btrfs_device *scrub_dev,
1918				      struct btrfs_block_group *bg,
1919				      struct btrfs_chunk_map *map,
1920				      u64 full_stripe_start)
1921{
1922	DECLARE_COMPLETION_ONSTACK(io_done);
1923	struct btrfs_fs_info *fs_info = sctx->fs_info;
1924	struct btrfs_raid_bio *rbio;
1925	struct btrfs_io_context *bioc = NULL;
1926	struct btrfs_path extent_path = { 0 };
1927	struct btrfs_path csum_path = { 0 };
1928	struct bio *bio;
1929	struct scrub_stripe *stripe;
1930	bool all_empty = true;
1931	const int data_stripes = nr_data_stripes(map);
1932	unsigned long extent_bitmap = 0;
1933	u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1934	int ret;
1935
1936	ASSERT(sctx->raid56_data_stripes);
1937
1938	/*
1939	 * For data stripe search, we cannot re-use the same extent/csum paths,
1940	 * as the data stripe bytenr may be smaller than previous extent.  Thus
1941	 * we have to use our own extent/csum paths.
1942	 */
1943	extent_path.search_commit_root = 1;
1944	extent_path.skip_locking = 1;
1945	csum_path.search_commit_root = 1;
1946	csum_path.skip_locking = 1;
1947
1948	for (int i = 0; i < data_stripes; i++) {
1949		int stripe_index;
1950		int rot;
1951		u64 physical;
1952
1953		stripe = &sctx->raid56_data_stripes[i];
1954		rot = div_u64(full_stripe_start - bg->start,
1955			      data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1956		stripe_index = (i + rot) % map->num_stripes;
1957		physical = map->stripes[stripe_index].physical +
1958			   btrfs_stripe_nr_to_offset(rot);
1959
1960		scrub_reset_stripe(stripe);
1961		set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1962		ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1963				map->stripes[stripe_index].dev, physical, 1,
1964				full_stripe_start + btrfs_stripe_nr_to_offset(i),
1965				BTRFS_STRIPE_LEN, stripe);
1966		if (ret < 0)
1967			goto out;
1968		/*
1969		 * No extent in this data stripe, need to manually mark them
1970		 * initialized to make later read submission happy.
1971		 */
1972		if (ret > 0) {
1973			stripe->logical = full_stripe_start +
1974					  btrfs_stripe_nr_to_offset(i);
1975			stripe->dev = map->stripes[stripe_index].dev;
1976			stripe->mirror_num = 1;
1977			set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1978		}
1979	}
1980
1981	/* Check if all data stripes are empty. */
1982	for (int i = 0; i < data_stripes; i++) {
1983		stripe = &sctx->raid56_data_stripes[i];
1984		if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
1985			all_empty = false;
1986			break;
 
 
 
 
 
 
1987		}
1988	}
1989	if (all_empty) {
1990		ret = 0;
1991		goto out;
1992	}
1993
1994	for (int i = 0; i < data_stripes; i++) {
1995		stripe = &sctx->raid56_data_stripes[i];
1996		scrub_submit_initial_read(sctx, stripe);
1997	}
1998	for (int i = 0; i < data_stripes; i++) {
1999		stripe = &sctx->raid56_data_stripes[i];
 
 
 
2000
2001		wait_event(stripe->repair_wait,
2002			   test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
2003	}
2004	/* For now, no zoned support for RAID56. */
2005	ASSERT(!btrfs_is_zoned(sctx->fs_info));
 
 
2006
2007	/*
2008	 * Now all data stripes are properly verified. Check if we have any
2009	 * unrepaired, if so abort immediately or we could further corrupt the
2010	 * P/Q stripes.
2011	 *
2012	 * During the loop, also populate extent_bitmap.
2013	 */
2014	for (int i = 0; i < data_stripes; i++) {
2015		unsigned long error;
2016
2017		stripe = &sctx->raid56_data_stripes[i];
 
 
 
 
 
 
2018
2019		/*
2020		 * We should only check the errors where there is an extent.
2021		 * As we may hit an empty data stripe while it's missing.
2022		 */
2023		bitmap_and(&error, &stripe->error_bitmap,
2024			   &stripe->extent_sector_bitmap, stripe->nr_sectors);
2025		if (!bitmap_empty(&error, stripe->nr_sectors)) {
2026			btrfs_err(fs_info,
2027"unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
2028				  full_stripe_start, i, stripe->nr_sectors,
2029				  &error);
2030			ret = -EIO;
2031			goto out;
2032		}
2033		bitmap_or(&extent_bitmap, &extent_bitmap,
2034			  &stripe->extent_sector_bitmap, stripe->nr_sectors);
2035	}
 
 
2036
2037	/* Now we can check and regenerate the P/Q stripe. */
2038	bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
2039	bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
2040	bio->bi_private = &io_done;
2041	bio->bi_end_io = raid56_scrub_wait_endio;
2042
2043	btrfs_bio_counter_inc_blocked(fs_info);
2044	ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
2045			      &length, &bioc, NULL, NULL);
2046	if (ret < 0) {
2047		btrfs_put_bioc(bioc);
2048		btrfs_bio_counter_dec(fs_info);
2049		goto out;
2050	}
2051	rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
2052				BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
2053	btrfs_put_bioc(bioc);
2054	if (!rbio) {
2055		ret = -ENOMEM;
2056		btrfs_bio_counter_dec(fs_info);
2057		goto out;
2058	}
2059	/* Use the recovered stripes as cache to avoid read them from disk again. */
2060	for (int i = 0; i < data_stripes; i++) {
2061		stripe = &sctx->raid56_data_stripes[i];
2062
2063		raid56_parity_cache_data_pages(rbio, stripe->pages,
2064				full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
2065	}
2066	raid56_parity_submit_scrub_rbio(rbio);
2067	wait_for_completion_io(&io_done);
2068	ret = blk_status_to_errno(bio->bi_status);
2069	bio_put(bio);
2070	btrfs_bio_counter_dec(fs_info);
2071
2072	btrfs_release_path(&extent_path);
2073	btrfs_release_path(&csum_path);
2074out:
2075	return ret;
2076}
2077
2078/*
2079 * Scrub one range which can only has simple mirror based profile.
2080 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
2081 *  RAID0/RAID10).
2082 *
2083 * Since we may need to handle a subset of block group, we need @logical_start
2084 * and @logical_length parameter.
2085 */
2086static int scrub_simple_mirror(struct scrub_ctx *sctx,
2087			       struct btrfs_block_group *bg,
2088			       struct btrfs_chunk_map *map,
2089			       u64 logical_start, u64 logical_length,
2090			       struct btrfs_device *device,
2091			       u64 physical, int mirror_num)
2092{
2093	struct btrfs_fs_info *fs_info = sctx->fs_info;
2094	const u64 logical_end = logical_start + logical_length;
2095	u64 cur_logical = logical_start;
2096	int ret;
 
 
2097
2098	/* The range must be inside the bg */
2099	ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
2100
2101	/* Go through each extent items inside the logical range */
2102	while (cur_logical < logical_end) {
2103		u64 found_logical = U64_MAX;
2104		u64 cur_physical = physical + cur_logical - logical_start;
2105
2106		/* Canceled? */
2107		if (atomic_read(&fs_info->scrub_cancel_req) ||
2108		    atomic_read(&sctx->cancel_req)) {
2109			ret = -ECANCELED;
2110			break;
2111		}
2112		/* Paused? */
2113		if (atomic_read(&fs_info->scrub_pause_req)) {
2114			/* Push queued extents */
2115			scrub_blocked_if_needed(fs_info);
2116		}
2117		/* Block group removed? */
2118		spin_lock(&bg->lock);
2119		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
2120			spin_unlock(&bg->lock);
2121			ret = 0;
2122			break;
2123		}
2124		spin_unlock(&bg->lock);
2125
2126		ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2127					 cur_logical, logical_end - cur_logical,
2128					 cur_physical, &found_logical);
2129		if (ret > 0) {
2130			/* No more extent, just update the accounting */
2131			sctx->stat.last_physical = physical + logical_length;
2132			ret = 0;
2133			break;
 
2134		}
2135		if (ret < 0)
2136			break;
2137
2138		/* queue_scrub_stripe() returned 0, @found_logical must be updated. */
2139		ASSERT(found_logical != U64_MAX);
2140		cur_logical = found_logical + BTRFS_STRIPE_LEN;
2141
2142		/* Don't hold CPU for too long time */
2143		cond_resched();
2144	}
2145	return ret;
2146}
2147
2148/* Calculate the full stripe length for simple stripe based profiles */
2149static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
2150{
2151	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2152			    BTRFS_BLOCK_GROUP_RAID10));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2153
2154	return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
2155}
2156
2157/* Get the logical bytenr for the stripe */
2158static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
2159				     struct btrfs_block_group *bg,
2160				     int stripe_index)
2161{
2162	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2163			    BTRFS_BLOCK_GROUP_RAID10));
2164	ASSERT(stripe_index < map->num_stripes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2165
2166	/*
2167	 * (stripe_index / sub_stripes) gives how many data stripes we need to
2168	 * skip.
 
2169	 */
2170	return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2171	       bg->start;
2172}
2173
2174/* Get the mirror number for the stripe */
2175static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
2176{
2177	ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2178			    BTRFS_BLOCK_GROUP_RAID10));
2179	ASSERT(stripe_index < map->num_stripes);
2180
2181	/* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2182	return stripe_index % map->sub_stripes + 1;
2183}
 
2184
2185static int scrub_simple_stripe(struct scrub_ctx *sctx,
2186			       struct btrfs_block_group *bg,
2187			       struct btrfs_chunk_map *map,
2188			       struct btrfs_device *device,
2189			       int stripe_index)
2190{
2191	const u64 logical_increment = simple_stripe_full_stripe_len(map);
2192	const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2193	const u64 orig_physical = map->stripes[stripe_index].physical;
2194	const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2195	u64 cur_logical = orig_logical;
2196	u64 cur_physical = orig_physical;
2197	int ret = 0;
 
 
 
 
 
 
 
 
2198
2199	while (cur_logical < bg->start + bg->length) {
2200		/*
2201		 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2202		 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2203		 * this stripe.
2204		 */
2205		ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2206					  BTRFS_STRIPE_LEN, device, cur_physical,
2207					  mirror_num);
2208		if (ret)
2209			return ret;
2210		/* Skip to next stripe which belongs to the target device */
2211		cur_logical += logical_increment;
2212		/* For physical offset, we just go to next stripe */
2213		cur_physical += BTRFS_STRIPE_LEN;
2214	}
2215	return ret;
2216}
 
2217
2218static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2219					   struct btrfs_block_group *bg,
2220					   struct btrfs_chunk_map *map,
2221					   struct btrfs_device *scrub_dev,
2222					   int stripe_index)
2223{
2224	struct btrfs_fs_info *fs_info = sctx->fs_info;
2225	const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2226	const u64 chunk_logical = bg->start;
2227	int ret;
2228	int ret2;
2229	u64 physical = map->stripes[stripe_index].physical;
2230	const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
2231	const u64 physical_end = physical + dev_stripe_len;
2232	u64 logical;
2233	u64 logic_end;
2234	/* The logical increment after finishing one stripe */
2235	u64 increment;
2236	/* Offset inside the chunk */
2237	u64 offset;
2238	u64 stripe_logical;
2239	int stop_loop = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2240
2241	/* Extent_path should be released by now. */
2242	ASSERT(sctx->extent_path.nodes[0] == NULL);
 
 
 
2243
2244	scrub_blocked_if_needed(fs_info);
 
 
2245
2246	if (sctx->is_dev_replace &&
2247	    btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2248		mutex_lock(&sctx->wr_lock);
2249		sctx->write_pointer = physical;
2250		mutex_unlock(&sctx->wr_lock);
2251	}
2252
2253	/* Prepare the extra data stripes used by RAID56. */
2254	if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2255		ASSERT(sctx->raid56_data_stripes == NULL);
2256
2257		sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2258						    sizeof(struct scrub_stripe),
2259						    GFP_KERNEL);
2260		if (!sctx->raid56_data_stripes) {
2261			ret = -ENOMEM;
2262			goto out;
2263		}
2264		for (int i = 0; i < nr_data_stripes(map); i++) {
2265			ret = init_scrub_stripe(fs_info,
2266						&sctx->raid56_data_stripes[i]);
2267			if (ret < 0)
2268				goto out;
2269			sctx->raid56_data_stripes[i].bg = bg;
2270			sctx->raid56_data_stripes[i].sctx = sctx;
 
 
 
 
 
 
 
2271		}
2272	}
2273	/*
2274	 * There used to be a big double loop to handle all profiles using the
2275	 * same routine, which grows larger and more gross over time.
2276	 *
2277	 * So here we handle each profile differently, so simpler profiles
2278	 * have simpler scrubbing function.
2279	 */
2280	if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2281			 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2282		/*
2283		 * Above check rules out all complex profile, the remaining
2284		 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2285		 * mirrored duplication without stripe.
2286		 *
2287		 * Only @physical and @mirror_num needs to calculated using
2288		 * @stripe_index.
2289		 */
2290		ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2291				scrub_dev, map->stripes[stripe_index].physical,
2292				stripe_index + 1);
2293		offset = 0;
2294		goto out;
2295	}
2296	if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2297		ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2298		offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
2299		goto out;
2300	}
2301
2302	/* Only RAID56 goes through the old code */
2303	ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2304	ret = 0;
 
 
 
 
 
 
2305
2306	/* Calculate the logical end of the stripe */
2307	get_raid56_logic_offset(physical_end, stripe_index,
2308				map, &logic_end, NULL);
2309	logic_end += chunk_logical;
2310
2311	/* Initialize @offset in case we need to go to out: label */
2312	get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2313	increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2314
2315	/*
2316	 * Due to the rotation, for RAID56 it's better to iterate each stripe
2317	 * using their physical offset.
2318	 */
2319	while (physical < physical_end) {
2320		ret = get_raid56_logic_offset(physical, stripe_index, map,
2321					      &logical, &stripe_logical);
2322		logical += chunk_logical;
2323		if (ret) {
2324			/* it is parity strip */
2325			stripe_logical += chunk_logical;
2326			ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2327							 map, stripe_logical);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2328			if (ret)
2329				goto out;
2330			goto next;
2331		}
2332
2333		/*
2334		 * Now we're at a data stripe, scrub each extents in the range.
2335		 *
2336		 * At this stage, if we ignore the repair part, inside each data
2337		 * stripe it is no different than SINGLE profile.
2338		 * We can reuse scrub_simple_mirror() here, as the repair part
2339		 * is still based on @mirror_num.
2340		 */
2341		ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
2342					  scrub_dev, physical, 1);
2343		if (ret < 0)
2344			goto out;
2345next:
 
 
 
2346		logical += increment;
2347		physical += BTRFS_STRIPE_LEN;
2348		spin_lock(&sctx->stat_lock);
2349		if (stop_loop)
2350			sctx->stat.last_physical =
2351				map->stripes[stripe_index].physical + dev_stripe_len;
2352		else
2353			sctx->stat.last_physical = physical;
2354		spin_unlock(&sctx->stat_lock);
2355		if (stop_loop)
2356			break;
2357	}
2358out:
2359	ret2 = flush_scrub_stripes(sctx);
2360	if (!ret)
2361		ret = ret2;
2362	btrfs_release_path(&sctx->extent_path);
2363	btrfs_release_path(&sctx->csum_path);
2364
2365	if (sctx->raid56_data_stripes) {
2366		for (int i = 0; i < nr_data_stripes(map); i++)
2367			release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2368		kfree(sctx->raid56_data_stripes);
2369		sctx->raid56_data_stripes = NULL;
2370	}
2371
2372	if (sctx->is_dev_replace && ret >= 0) {
2373		int ret2;
2374
2375		ret2 = sync_write_pointer_for_zoned(sctx,
2376				chunk_logical + offset,
2377				map->stripes[stripe_index].physical,
2378				physical_end);
2379		if (ret2)
2380			ret = ret2;
2381	}
 
 
2382
 
 
 
2383	return ret < 0 ? ret : 0;
2384}
2385
2386static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2387					  struct btrfs_block_group *bg,
2388					  struct btrfs_device *scrub_dev,
2389					  u64 dev_offset,
2390					  u64 dev_extent_len)
2391{
2392	struct btrfs_fs_info *fs_info = sctx->fs_info;
2393	struct btrfs_chunk_map *map;
2394	int i;
2395	int ret = 0;
2396
2397	map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
2398	if (!map) {
2399		/*
2400		 * Might have been an unused block group deleted by the cleaner
2401		 * kthread or relocation.
2402		 */
2403		spin_lock(&bg->lock);
2404		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2405			ret = -EINVAL;
2406		spin_unlock(&bg->lock);
2407
2408		return ret;
2409	}
2410	if (map->start != bg->start)
 
 
2411		goto out;
2412	if (map->chunk_len < dev_extent_len)
 
2413		goto out;
2414
2415	for (i = 0; i < map->num_stripes; ++i) {
2416		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2417		    map->stripes[i].physical == dev_offset) {
2418			ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
2419			if (ret)
2420				goto out;
2421		}
2422	}
2423out:
2424	btrfs_free_chunk_map(map);
2425
2426	return ret;
2427}
2428
2429static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2430					  struct btrfs_block_group *cache)
2431{
2432	struct btrfs_fs_info *fs_info = cache->fs_info;
2433	struct btrfs_trans_handle *trans;
2434
2435	if (!btrfs_is_zoned(fs_info))
2436		return 0;
2437
2438	btrfs_wait_block_group_reservations(cache);
2439	btrfs_wait_nocow_writers(cache);
2440	btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2441
2442	trans = btrfs_join_transaction(root);
2443	if (IS_ERR(trans))
2444		return PTR_ERR(trans);
2445	return btrfs_commit_transaction(trans);
2446}
2447
2448static noinline_for_stack
2449int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2450			   struct btrfs_device *scrub_dev, u64 start, u64 end)
2451{
2452	struct btrfs_dev_extent *dev_extent = NULL;
2453	struct btrfs_path *path;
2454	struct btrfs_fs_info *fs_info = sctx->fs_info;
2455	struct btrfs_root *root = fs_info->dev_root;
 
 
 
2456	u64 chunk_offset;
2457	int ret = 0;
2458	int ro_set;
2459	int slot;
2460	struct extent_buffer *l;
2461	struct btrfs_key key;
2462	struct btrfs_key found_key;
2463	struct btrfs_block_group *cache;
2464	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2465
2466	path = btrfs_alloc_path();
2467	if (!path)
2468		return -ENOMEM;
2469
2470	path->reada = READA_FORWARD;
2471	path->search_commit_root = 1;
2472	path->skip_locking = 1;
2473
2474	key.objectid = scrub_dev->devid;
2475	key.offset = 0ull;
2476	key.type = BTRFS_DEV_EXTENT_KEY;
2477
2478	while (1) {
2479		u64 dev_extent_len;
2480
 
2481		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2482		if (ret < 0)
2483			break;
2484		if (ret > 0) {
2485			if (path->slots[0] >=
2486			    btrfs_header_nritems(path->nodes[0])) {
2487				ret = btrfs_next_leaf(root, path);
2488				if (ret < 0)
2489					break;
2490				if (ret > 0) {
2491					ret = 0;
2492					break;
2493				}
2494			} else {
2495				ret = 0;
2496			}
2497		}
2498
2499		l = path->nodes[0];
2500		slot = path->slots[0];
2501
2502		btrfs_item_key_to_cpu(l, &found_key, slot);
2503
2504		if (found_key.objectid != scrub_dev->devid)
2505			break;
2506
2507		if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2508			break;
2509
2510		if (found_key.offset >= end)
2511			break;
2512
2513		if (found_key.offset < key.offset)
2514			break;
2515
2516		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2517		dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2518
2519		if (found_key.offset + dev_extent_len <= start)
2520			goto skip;
 
 
 
2521
 
 
2522		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2523
2524		/*
2525		 * get a reference on the corresponding block group to prevent
2526		 * the chunk from going away while we scrub it
2527		 */
2528		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2529
2530		/* some chunks are removed but not committed to disk yet,
2531		 * continue scrubbing */
2532		if (!cache)
2533			goto skip;
2534
2535		ASSERT(cache->start <= chunk_offset);
2536		/*
2537		 * We are using the commit root to search for device extents, so
2538		 * that means we could have found a device extent item from a
2539		 * block group that was deleted in the current transaction. The
2540		 * logical start offset of the deleted block group, stored at
2541		 * @chunk_offset, might be part of the logical address range of
2542		 * a new block group (which uses different physical extents).
2543		 * In this case btrfs_lookup_block_group() has returned the new
2544		 * block group, and its start address is less than @chunk_offset.
2545		 *
2546		 * We skip such new block groups, because it's pointless to
2547		 * process them, as we won't find their extents because we search
2548		 * for them using the commit root of the extent tree. For a device
2549		 * replace it's also fine to skip it, we won't miss copying them
2550		 * to the target device because we have the write duplication
2551		 * setup through the regular write path (by btrfs_map_block()),
2552		 * and we have committed a transaction when we started the device
2553		 * replace, right after setting up the device replace state.
2554		 */
2555		if (cache->start < chunk_offset) {
2556			btrfs_put_block_group(cache);
2557			goto skip;
2558		}
2559
2560		if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2561			if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2562				btrfs_put_block_group(cache);
2563				goto skip;
2564			}
2565		}
2566
2567		/*
2568		 * Make sure that while we are scrubbing the corresponding block
2569		 * group doesn't get its logical address and its device extents
2570		 * reused for another block group, which can possibly be of a
2571		 * different type and different profile. We do this to prevent
2572		 * false error detections and crashes due to bogus attempts to
2573		 * repair extents.
2574		 */
2575		spin_lock(&cache->lock);
2576		if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2577			spin_unlock(&cache->lock);
2578			btrfs_put_block_group(cache);
2579			goto skip;
2580		}
2581		btrfs_freeze_block_group(cache);
2582		spin_unlock(&cache->lock);
2583
2584		/*
2585		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2586		 * to avoid deadlock caused by:
2587		 * btrfs_inc_block_group_ro()
2588		 * -> btrfs_wait_for_commit()
2589		 * -> btrfs_commit_transaction()
2590		 * -> btrfs_scrub_pause()
2591		 */
2592		scrub_pause_on(fs_info);
2593
2594		/*
2595		 * Don't do chunk preallocation for scrub.
2596		 *
2597		 * This is especially important for SYSTEM bgs, or we can hit
2598		 * -EFBIG from btrfs_finish_chunk_alloc() like:
2599		 * 1. The only SYSTEM bg is marked RO.
2600		 *    Since SYSTEM bg is small, that's pretty common.
2601		 * 2. New SYSTEM bg will be allocated
2602		 *    Due to regular version will allocate new chunk.
2603		 * 3. New SYSTEM bg is empty and will get cleaned up
2604		 *    Before cleanup really happens, it's marked RO again.
2605		 * 4. Empty SYSTEM bg get scrubbed
2606		 *    We go back to 2.
2607		 *
2608		 * This can easily boost the amount of SYSTEM chunks if cleaner
2609		 * thread can't be triggered fast enough, and use up all space
2610		 * of btrfs_super_block::sys_chunk_array
2611		 *
2612		 * While for dev replace, we need to try our best to mark block
2613		 * group RO, to prevent race between:
2614		 * - Write duplication
2615		 *   Contains latest data
2616		 * - Scrub copy
2617		 *   Contains data from commit tree
2618		 *
2619		 * If target block group is not marked RO, nocow writes can
2620		 * be overwritten by scrub copy, causing data corruption.
2621		 * So for dev-replace, it's not allowed to continue if a block
2622		 * group is not RO.
2623		 */
2624		ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2625		if (!ret && sctx->is_dev_replace) {
2626			ret = finish_extent_writes_for_zoned(root, cache);
2627			if (ret) {
2628				btrfs_dec_block_group_ro(cache);
2629				scrub_pause_off(fs_info);
2630				btrfs_put_block_group(cache);
2631				break;
2632			}
2633		}
2634
2635		if (ret == 0) {
2636			ro_set = 1;
2637		} else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2638			   !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2639			/*
2640			 * btrfs_inc_block_group_ro return -ENOSPC when it
2641			 * failed in creating new chunk for metadata.
2642			 * It is not a problem for scrub, because
2643			 * metadata are always cowed, and our scrub paused
2644			 * commit_transactions.
2645			 *
2646			 * For RAID56 chunks, we have to mark them read-only
2647			 * for scrub, as later we would use our own cache
2648			 * out of RAID56 realm.
2649			 * Thus we want the RAID56 bg to be marked RO to
2650			 * prevent RMW from screwing up out cache.
2651			 */
2652			ro_set = 0;
2653		} else if (ret == -ETXTBSY) {
2654			btrfs_warn(fs_info,
2655		   "skipping scrub of block group %llu due to active swapfile",
2656				   cache->start);
2657			scrub_pause_off(fs_info);
2658			ret = 0;
2659			goto skip_unfreeze;
2660		} else {
2661			btrfs_warn(fs_info,
2662				   "failed setting block group ro: %d", ret);
2663			btrfs_unfreeze_block_group(cache);
2664			btrfs_put_block_group(cache);
2665			scrub_pause_off(fs_info);
2666			break;
2667		}
2668
2669		/*
2670		 * Now the target block is marked RO, wait for nocow writes to
2671		 * finish before dev-replace.
2672		 * COW is fine, as COW never overwrites extents in commit tree.
2673		 */
2674		if (sctx->is_dev_replace) {
2675			btrfs_wait_nocow_writers(cache);
2676			btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2677					cache->length);
2678		}
2679
2680		scrub_pause_off(fs_info);
2681		down_write(&dev_replace->rwsem);
2682		dev_replace->cursor_right = found_key.offset + dev_extent_len;
2683		dev_replace->cursor_left = found_key.offset;
2684		dev_replace->item_needs_writeback = 1;
2685		up_write(&dev_replace->rwsem);
2686
2687		ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2688				  dev_extent_len);
2689		if (sctx->is_dev_replace &&
2690		    !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2691						      cache, found_key.offset))
2692			ro_set = 0;
2693
2694		down_write(&dev_replace->rwsem);
2695		dev_replace->cursor_left = dev_replace->cursor_right;
2696		dev_replace->item_needs_writeback = 1;
2697		up_write(&dev_replace->rwsem);
2698
2699		if (ro_set)
2700			btrfs_dec_block_group_ro(cache);
2701
2702		/*
2703		 * We might have prevented the cleaner kthread from deleting
2704		 * this block group if it was already unused because we raced
2705		 * and set it to RO mode first. So add it back to the unused
2706		 * list, otherwise it might not ever be deleted unless a manual
2707		 * balance is triggered or it becomes used and unused again.
2708		 */
2709		spin_lock(&cache->lock);
2710		if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2711		    !cache->ro && cache->reserved == 0 && cache->used == 0) {
2712			spin_unlock(&cache->lock);
2713			if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2714				btrfs_discard_queue_work(&fs_info->discard_ctl,
2715							 cache);
2716			else
2717				btrfs_mark_bg_unused(cache);
2718		} else {
2719			spin_unlock(&cache->lock);
2720		}
2721skip_unfreeze:
2722		btrfs_unfreeze_block_group(cache);
2723		btrfs_put_block_group(cache);
2724		if (ret)
2725			break;
2726		if (sctx->is_dev_replace &&
2727		    atomic64_read(&dev_replace->num_write_errors) > 0) {
2728			ret = -EIO;
2729			break;
2730		}
2731		if (sctx->stat.malloc_errors > 0) {
2732			ret = -ENOMEM;
2733			break;
2734		}
2735skip:
2736		key.offset = found_key.offset + dev_extent_len;
2737		btrfs_release_path(path);
2738	}
2739
2740	btrfs_free_path(path);
2741
2742	return ret;
2743}
2744
2745static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2746			   struct page *page, u64 physical, u64 generation)
2747{
2748	struct btrfs_fs_info *fs_info = sctx->fs_info;
2749	struct bio_vec bvec;
2750	struct bio bio;
2751	struct btrfs_super_block *sb = page_address(page);
2752	int ret;
2753
2754	bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2755	bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2756	__bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2757	ret = submit_bio_wait(&bio);
2758	bio_uninit(&bio);
2759
2760	if (ret < 0)
2761		return ret;
2762	ret = btrfs_check_super_csum(fs_info, sb);
2763	if (ret != 0) {
2764		btrfs_err_rl(fs_info,
2765			"super block at physical %llu devid %llu has bad csum",
2766			physical, dev->devid);
2767		return -EIO;
2768	}
2769	if (btrfs_super_generation(sb) != generation) {
2770		btrfs_err_rl(fs_info,
2771"super block at physical %llu devid %llu has bad generation %llu expect %llu",
2772			     physical, dev->devid,
2773			     btrfs_super_generation(sb), generation);
2774		return -EUCLEAN;
2775	}
2776
2777	return btrfs_validate_super(fs_info, sb, -1);
2778}
2779
2780static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2781					   struct btrfs_device *scrub_dev)
2782{
2783	int	i;
2784	u64	bytenr;
2785	u64	gen;
2786	int ret = 0;
2787	struct page *page;
2788	struct btrfs_fs_info *fs_info = sctx->fs_info;
2789
2790	if (BTRFS_FS_ERROR(fs_info))
2791		return -EROFS;
2792
2793	page = alloc_page(GFP_KERNEL);
2794	if (!page) {
2795		spin_lock(&sctx->stat_lock);
2796		sctx->stat.malloc_errors++;
2797		spin_unlock(&sctx->stat_lock);
2798		return -ENOMEM;
2799	}
2800
2801	/* Seed devices of a new filesystem has their own generation. */
2802	if (scrub_dev->fs_devices != fs_info->fs_devices)
2803		gen = scrub_dev->generation;
2804	else
2805		gen = btrfs_get_last_trans_committed(fs_info);
2806
2807	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2808		bytenr = btrfs_sb_offset(i);
2809		if (bytenr + BTRFS_SUPER_INFO_SIZE >
2810		    scrub_dev->commit_total_bytes)
2811			break;
2812		if (!btrfs_check_super_location(scrub_dev, bytenr))
2813			continue;
2814
2815		ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2816		if (ret) {
2817			spin_lock(&sctx->stat_lock);
2818			sctx->stat.super_errors++;
2819			spin_unlock(&sctx->stat_lock);
2820		}
2821	}
2822	__free_page(page);
2823	return 0;
2824}
2825
2826static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2827{
2828	if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2829					&fs_info->scrub_lock)) {
2830		struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2831
2832		fs_info->scrub_workers = NULL;
2833		mutex_unlock(&fs_info->scrub_lock);
2834
2835		if (scrub_workers)
2836			destroy_workqueue(scrub_workers);
2837	}
2838}
2839
2840/*
2841 * get a reference count on fs_info->scrub_workers. start worker if necessary
2842 */
2843static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
2844{
2845	struct workqueue_struct *scrub_workers = NULL;
2846	unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2847	int max_active = fs_info->thread_pool_size;
2848	int ret = -ENOMEM;
2849
2850	if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2851		return 0;
2852
2853	scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
2854	if (!scrub_workers)
2855		return -ENOMEM;
2856
2857	mutex_lock(&fs_info->scrub_lock);
2858	if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2859		ASSERT(fs_info->scrub_workers == NULL);
2860		fs_info->scrub_workers = scrub_workers;
2861		refcount_set(&fs_info->scrub_workers_refcnt, 1);
2862		mutex_unlock(&fs_info->scrub_lock);
2863		return 0;
 
2864	}
2865	/* Other thread raced in and created the workers for us */
2866	refcount_inc(&fs_info->scrub_workers_refcnt);
2867	mutex_unlock(&fs_info->scrub_lock);
2868
2869	ret = 0;
2870
2871	destroy_workqueue(scrub_workers);
2872	return ret;
2873}
2874
2875int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2876		    u64 end, struct btrfs_scrub_progress *progress,
2877		    int readonly, int is_dev_replace)
2878{
2879	struct btrfs_dev_lookup_args args = { .devid = devid };
2880	struct scrub_ctx *sctx;
 
 
 
 
 
 
 
 
 
 
 
 
 
2881	int ret;
2882	struct btrfs_device *dev;
2883	unsigned int nofs_flag;
2884	bool need_commit = false;
2885
2886	if (btrfs_fs_closing(fs_info))
2887		return -EAGAIN;
2888
2889	/* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2890	ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2891
2892	/*
2893	 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2894	 * value (max nodesize / min sectorsize), thus nodesize should always
2895	 * be fine.
2896	 */
2897	ASSERT(fs_info->nodesize <=
2898	       SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
 
 
 
 
2899
2900	/* Allocate outside of device_list_mutex */
2901	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2902	if (IS_ERR(sctx))
2903		return PTR_ERR(sctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2904
2905	ret = scrub_workers_get(fs_info);
2906	if (ret)
2907		goto out_free_ctx;
2908
2909	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2910	dev = btrfs_find_device(fs_info->fs_devices, &args);
2911	if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2912		     !is_dev_replace)) {
2913		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2914		ret = -ENODEV;
2915		goto out;
2916	}
 
2917
2918	if (!is_dev_replace && !readonly &&
2919	    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2920		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2921		btrfs_err_in_rcu(fs_info,
2922			"scrub on devid %llu: filesystem on %s is not writable",
2923				 devid, btrfs_dev_name(dev));
2924		ret = -EROFS;
2925		goto out;
2926	}
2927
2928	mutex_lock(&fs_info->scrub_lock);
2929	if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2930	    test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2931		mutex_unlock(&fs_info->scrub_lock);
2932		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2933		ret = -EIO;
2934		goto out;
2935	}
2936
2937	down_read(&fs_info->dev_replace.rwsem);
2938	if (dev->scrub_ctx ||
2939	    (!is_dev_replace &&
2940	     btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2941		up_read(&fs_info->dev_replace.rwsem);
2942		mutex_unlock(&fs_info->scrub_lock);
2943		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2944		ret = -EINPROGRESS;
2945		goto out;
2946	}
2947	up_read(&fs_info->dev_replace.rwsem);
 
2948
2949	sctx->readonly = readonly;
2950	dev->scrub_ctx = sctx;
2951	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2952
2953	/*
2954	 * checking @scrub_pause_req here, we can avoid
2955	 * race between committing transaction and scrubbing.
2956	 */
2957	__scrub_blocked_if_needed(fs_info);
2958	atomic_inc(&fs_info->scrubs_running);
2959	mutex_unlock(&fs_info->scrub_lock);
 
2960
2961	/*
2962	 * In order to avoid deadlock with reclaim when there is a transaction
2963	 * trying to pause scrub, make sure we use GFP_NOFS for all the
2964	 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2965	 * invoked by our callees. The pausing request is done when the
2966	 * transaction commit starts, and it blocks the transaction until scrub
2967	 * is paused (done at specific points at scrub_stripe() or right above
2968	 * before incrementing fs_info->scrubs_running).
2969	 */
2970	nofs_flag = memalloc_nofs_save();
2971	if (!is_dev_replace) {
2972		u64 old_super_errors;
2973
2974		spin_lock(&sctx->stat_lock);
2975		old_super_errors = sctx->stat.super_errors;
2976		spin_unlock(&sctx->stat_lock);
2977
2978		btrfs_info(fs_info, "scrub: started on devid %llu", devid);
2979		/*
2980		 * by holding device list mutex, we can
2981		 * kick off writing super in log tree sync.
2982		 */
2983		mutex_lock(&fs_info->fs_devices->device_list_mutex);
2984		ret = scrub_supers(sctx, dev);
2985		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2986
2987		spin_lock(&sctx->stat_lock);
2988		/*
2989		 * Super block errors found, but we can not commit transaction
2990		 * at current context, since btrfs_commit_transaction() needs
2991		 * to pause the current running scrub (hold by ourselves).
2992		 */
2993		if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2994			need_commit = true;
2995		spin_unlock(&sctx->stat_lock);
2996	}
2997
2998	if (!ret)
2999		ret = scrub_enumerate_chunks(sctx, dev, start, end);
3000	memalloc_nofs_restore(nofs_flag);
3001
 
3002	atomic_dec(&fs_info->scrubs_running);
3003	wake_up(&fs_info->scrub_pause_wait);
3004
3005	if (progress)
3006		memcpy(progress, &sctx->stat, sizeof(*progress));
3007
3008	if (!is_dev_replace)
3009		btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
3010			ret ? "not finished" : "finished", devid, ret);
3011
3012	mutex_lock(&fs_info->scrub_lock);
3013	dev->scrub_ctx = NULL;
3014	mutex_unlock(&fs_info->scrub_lock);
3015
3016	scrub_workers_put(fs_info);
3017	scrub_put_ctx(sctx);
3018
3019	/*
3020	 * We found some super block errors before, now try to force a
3021	 * transaction commit, as scrub has finished.
3022	 */
3023	if (need_commit) {
3024		struct btrfs_trans_handle *trans;
3025
3026		trans = btrfs_start_transaction(fs_info->tree_root, 0);
3027		if (IS_ERR(trans)) {
3028			ret = PTR_ERR(trans);
3029			btrfs_err(fs_info,
3030	"scrub: failed to start transaction to fix super block errors: %d", ret);
3031			return ret;
3032		}
3033		ret = btrfs_commit_transaction(trans);
3034		if (ret < 0)
3035			btrfs_err(fs_info,
3036	"scrub: failed to commit transaction to fix super block errors: %d", ret);
3037	}
3038	return ret;
3039out:
3040	scrub_workers_put(fs_info);
3041out_free_ctx:
3042	scrub_free_ctx(sctx);
3043
3044	return ret;
3045}
3046
3047void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3048{
 
 
3049	mutex_lock(&fs_info->scrub_lock);
3050	atomic_inc(&fs_info->scrub_pause_req);
3051	while (atomic_read(&fs_info->scrubs_paused) !=
3052	       atomic_read(&fs_info->scrubs_running)) {
3053		mutex_unlock(&fs_info->scrub_lock);
3054		wait_event(fs_info->scrub_pause_wait,
3055			   atomic_read(&fs_info->scrubs_paused) ==
3056			   atomic_read(&fs_info->scrubs_running));
3057		mutex_lock(&fs_info->scrub_lock);
3058	}
3059	mutex_unlock(&fs_info->scrub_lock);
3060}
3061
3062void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3063{
 
 
3064	atomic_dec(&fs_info->scrub_pause_req);
3065	wake_up(&fs_info->scrub_pause_wait);
3066}
3067
3068int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
 
 
 
 
 
3069{
 
 
 
 
 
 
3070	mutex_lock(&fs_info->scrub_lock);
3071	if (!atomic_read(&fs_info->scrubs_running)) {
3072		mutex_unlock(&fs_info->scrub_lock);
3073		return -ENOTCONN;
3074	}
3075
3076	atomic_inc(&fs_info->scrub_cancel_req);
3077	while (atomic_read(&fs_info->scrubs_running)) {
3078		mutex_unlock(&fs_info->scrub_lock);
3079		wait_event(fs_info->scrub_pause_wait,
3080			   atomic_read(&fs_info->scrubs_running) == 0);
3081		mutex_lock(&fs_info->scrub_lock);
3082	}
3083	atomic_dec(&fs_info->scrub_cancel_req);
3084	mutex_unlock(&fs_info->scrub_lock);
3085
3086	return 0;
3087}
3088
3089int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
3090{
3091	struct btrfs_fs_info *fs_info = dev->fs_info;
3092	struct scrub_ctx *sctx;
 
 
 
 
 
3093
3094	mutex_lock(&fs_info->scrub_lock);
3095	sctx = dev->scrub_ctx;
3096	if (!sctx) {
3097		mutex_unlock(&fs_info->scrub_lock);
3098		return -ENOTCONN;
3099	}
3100	atomic_inc(&sctx->cancel_req);
3101	while (dev->scrub_ctx) {
3102		mutex_unlock(&fs_info->scrub_lock);
3103		wait_event(fs_info->scrub_pause_wait,
3104			   dev->scrub_ctx == NULL);
3105		mutex_lock(&fs_info->scrub_lock);
3106	}
3107	mutex_unlock(&fs_info->scrub_lock);
3108
3109	return 0;
3110}
3111
3112int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3113			 struct btrfs_scrub_progress *progress)
3114{
3115	struct btrfs_dev_lookup_args args = { .devid = devid };
3116	struct btrfs_device *dev;
3117	struct scrub_ctx *sctx = NULL;
3118
 
 
 
 
3119	mutex_lock(&fs_info->fs_devices->device_list_mutex);
3120	dev = btrfs_find_device(fs_info->fs_devices, &args);
3121	if (dev)
3122		sctx = dev->scrub_ctx;
3123	if (sctx)
3124		memcpy(progress, &sctx->stat, sizeof(*progress));
 
3125	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3126
3127	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3128}
v3.5.6
 
   1/*
   2 * Copyright (C) 2011 STRATO.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/blkdev.h>
  20#include <linux/ratelimit.h>
 
 
  21#include "ctree.h"
 
  22#include "volumes.h"
  23#include "disk-io.h"
  24#include "ordered-data.h"
  25#include "transaction.h"
  26#include "backref.h"
  27#include "extent_io.h"
  28#include "check-integrity.h"
  29#include "rcu-string.h"
 
 
 
 
 
 
 
  30
  31/*
  32 * This is only the first step towards a full-features scrub. It reads all
  33 * extent and super block and verifies the checksums. In case a bad checksum
  34 * is found or the extent cannot be read, good data will be written back if
  35 * any can be found.
  36 *
  37 * Future enhancements:
  38 *  - In case an unrepairable extent is encountered, track which files are
  39 *    affected and report them
  40 *  - track and record media errors, throw out bad devices
  41 *  - add a mode to also read unallocated space
  42 */
  43
  44struct scrub_block;
  45struct scrub_dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46
  47#define SCRUB_PAGES_PER_BIO	16	/* 64k per bio */
  48#define SCRUB_BIOS_PER_DEV	16	/* 1 MB per device in flight */
  49#define SCRUB_MAX_PAGES_PER_BLOCK	16	/* 64k per node/leaf/sector */
  50
  51struct scrub_page {
  52	struct scrub_block	*sblock;
  53	struct page		*page;
  54	struct btrfs_device	*dev;
  55	u64			flags;  /* extent flags */
  56	u64			generation;
  57	u64			logical;
  58	u64			physical;
  59	struct {
  60		unsigned int	mirror_num:8;
  61		unsigned int	have_csum:1;
  62		unsigned int	io_error:1;
  63	};
  64	u8			csum[BTRFS_CSUM_SIZE];
  65};
  66
  67struct scrub_bio {
  68	int			index;
  69	struct scrub_dev	*sdev;
  70	struct bio		*bio;
  71	int			err;
  72	u64			logical;
  73	u64			physical;
  74	struct scrub_page	*pagev[SCRUB_PAGES_PER_BIO];
  75	int			page_count;
  76	int			next_free;
  77	struct btrfs_work	work;
 
 
  78};
  79
  80struct scrub_block {
  81	struct scrub_page	pagev[SCRUB_MAX_PAGES_PER_BLOCK];
  82	int			page_count;
  83	atomic_t		outstanding_pages;
  84	atomic_t		ref_count; /* free mem on transition to zero */
  85	struct scrub_dev	*sdev;
  86	struct {
  87		unsigned int	header_error:1;
  88		unsigned int	checksum_error:1;
  89		unsigned int	no_io_error_seen:1;
  90		unsigned int	generation_error:1; /* also sets header_error */
  91	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92};
  93
  94struct scrub_dev {
  95	struct scrub_bio	*bios[SCRUB_BIOS_PER_DEV];
  96	struct btrfs_device	*dev;
 
 
 
  97	int			first_free;
  98	int			curr;
  99	atomic_t		in_flight;
 100	atomic_t		fixup_cnt;
 101	spinlock_t		list_lock;
 102	wait_queue_head_t	list_wait;
 103	u16			csum_size;
 104	struct list_head	csum_list;
 105	atomic_t		cancel_req;
 106	int			readonly;
 107	int			pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
 108	u32			sectorsize;
 109	u32			nodesize;
 110	u32			leafsize;
 
 
 
 
 
 
 
 111	/*
 112	 * statistics
 113	 */
 114	struct btrfs_scrub_progress stat;
 115	spinlock_t		stat_lock;
 116};
 117
 118struct scrub_fixup_nodatasum {
 119	struct scrub_dev	*sdev;
 120	u64			logical;
 121	struct btrfs_root	*root;
 122	struct btrfs_work	work;
 123	int			mirror_num;
 
 
 124};
 125
 126struct scrub_warning {
 127	struct btrfs_path	*path;
 128	u64			extent_item_size;
 129	char			*scratch_buf;
 130	char			*msg_buf;
 131	const char		*errstr;
 132	sector_t		sector;
 133	u64			logical;
 134	struct btrfs_device	*dev;
 135	int			msg_bufsize;
 136	int			scratch_bufsize;
 137};
 138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139
 140static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
 141static int scrub_setup_recheck_block(struct scrub_dev *sdev,
 142				     struct btrfs_mapping_tree *map_tree,
 143				     u64 length, u64 logical,
 144				     struct scrub_block *sblock);
 145static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
 146			       struct scrub_block *sblock, int is_metadata,
 147			       int have_csum, u8 *csum, u64 generation,
 148			       u16 csum_size);
 149static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
 150					 struct scrub_block *sblock,
 151					 int is_metadata, int have_csum,
 152					 const u8 *csum, u64 generation,
 153					 u16 csum_size);
 154static void scrub_complete_bio_end_io(struct bio *bio, int err);
 155static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
 156					     struct scrub_block *sblock_good,
 157					     int force_write);
 158static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 159					    struct scrub_block *sblock_good,
 160					    int page_num, int force_write);
 161static int scrub_checksum_data(struct scrub_block *sblock);
 162static int scrub_checksum_tree_block(struct scrub_block *sblock);
 163static int scrub_checksum_super(struct scrub_block *sblock);
 164static void scrub_block_get(struct scrub_block *sblock);
 165static void scrub_block_put(struct scrub_block *sblock);
 166static int scrub_add_page_to_bio(struct scrub_dev *sdev,
 167				 struct scrub_page *spage);
 168static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
 169		       u64 physical, u64 flags, u64 gen, int mirror_num,
 170		       u8 *csum, int force);
 171static void scrub_bio_end_io(struct bio *bio, int err);
 172static void scrub_bio_end_io_worker(struct btrfs_work *work);
 173static void scrub_block_complete(struct scrub_block *sblock);
 174
 175
 176static void scrub_free_csums(struct scrub_dev *sdev)
 177{
 178	while (!list_empty(&sdev->csum_list)) {
 179		struct btrfs_ordered_sum *sum;
 180		sum = list_first_entry(&sdev->csum_list,
 181				       struct btrfs_ordered_sum, list);
 182		list_del(&sum->list);
 183		kfree(sum);
 184	}
 185}
 186
 187static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188{
 189	int i;
 190
 191	if (!sdev)
 192		return;
 193
 194	/* this can happen when scrub is cancelled */
 195	if (sdev->curr != -1) {
 196		struct scrub_bio *sbio = sdev->bios[sdev->curr];
 197
 198		for (i = 0; i < sbio->page_count; i++) {
 199			BUG_ON(!sbio->pagev[i]);
 200			BUG_ON(!sbio->pagev[i]->page);
 201			scrub_block_put(sbio->pagev[i]->sblock);
 202		}
 203		bio_put(sbio->bio);
 204	}
 205
 206	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
 207		struct scrub_bio *sbio = sdev->bios[i];
 208
 209		if (!sbio)
 210			break;
 211		kfree(sbio);
 212	}
 213
 214	scrub_free_csums(sdev);
 215	kfree(sdev);
 216}
 217
 218static noinline_for_stack
 219struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
 220{
 221	struct scrub_dev *sdev;
 222	int		i;
 223	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
 224	int pages_per_bio;
 225
 226	pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
 227			      bio_get_nr_vecs(dev->bdev));
 228	sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
 229	if (!sdev)
 
 230		goto nomem;
 231	sdev->dev = dev;
 232	sdev->pages_per_bio = pages_per_bio;
 233	sdev->curr = -1;
 234	for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
 235		struct scrub_bio *sbio;
 
 
 
 
 236
 237		sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
 238		if (!sbio)
 239			goto nomem;
 240		sdev->bios[i] = sbio;
 
 
 
 241
 242		sbio->index = i;
 243		sbio->sdev = sdev;
 244		sbio->page_count = 0;
 245		sbio->work.func = scrub_bio_end_io_worker;
 246
 247		if (i != SCRUB_BIOS_PER_DEV-1)
 248			sdev->bios[i]->next_free = i + 1;
 249		else
 250			sdev->bios[i]->next_free = -1;
 251	}
 252	sdev->first_free = 0;
 253	sdev->nodesize = dev->dev_root->nodesize;
 254	sdev->leafsize = dev->dev_root->leafsize;
 255	sdev->sectorsize = dev->dev_root->sectorsize;
 256	atomic_set(&sdev->in_flight, 0);
 257	atomic_set(&sdev->fixup_cnt, 0);
 258	atomic_set(&sdev->cancel_req, 0);
 259	sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
 260	INIT_LIST_HEAD(&sdev->csum_list);
 261
 262	spin_lock_init(&sdev->list_lock);
 263	spin_lock_init(&sdev->stat_lock);
 264	init_waitqueue_head(&sdev->list_wait);
 265	return sdev;
 266
 267nomem:
 268	scrub_free_dev(sdev);
 269	return ERR_PTR(-ENOMEM);
 270}
 271
 272static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
 
 273{
 274	u64 isize;
 275	u32 nlink;
 276	int ret;
 277	int i;
 
 278	struct extent_buffer *eb;
 279	struct btrfs_inode_item *inode_item;
 280	struct scrub_warning *swarn = ctx;
 281	struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
 282	struct inode_fs_paths *ipath = NULL;
 283	struct btrfs_root *local_root;
 284	struct btrfs_key root_key;
 285
 286	root_key.objectid = root;
 287	root_key.type = BTRFS_ROOT_ITEM_KEY;
 288	root_key.offset = (u64)-1;
 289	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
 290	if (IS_ERR(local_root)) {
 291		ret = PTR_ERR(local_root);
 292		goto err;
 293	}
 294
 295	ret = inode_item_info(inum, 0, local_root, swarn->path);
 
 
 
 
 
 
 
 296	if (ret) {
 
 297		btrfs_release_path(swarn->path);
 298		goto err;
 299	}
 300
 301	eb = swarn->path->nodes[0];
 302	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
 303					struct btrfs_inode_item);
 304	isize = btrfs_inode_size(eb, inode_item);
 305	nlink = btrfs_inode_nlink(eb, inode_item);
 306	btrfs_release_path(swarn->path);
 307
 
 
 
 
 
 
 308	ipath = init_ipath(4096, local_root, swarn->path);
 
 309	if (IS_ERR(ipath)) {
 
 310		ret = PTR_ERR(ipath);
 311		ipath = NULL;
 312		goto err;
 313	}
 314	ret = paths_from_inode(inum, ipath);
 315
 316	if (ret < 0)
 317		goto err;
 318
 319	/*
 320	 * we deliberately ignore the bit ipath might have been too small to
 321	 * hold all of the paths here
 322	 */
 323	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
 324		printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
 325			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
 326			"length %llu, links %u (path: %s)\n", swarn->errstr,
 327			swarn->logical, rcu_str_deref(swarn->dev->name),
 328			(unsigned long long)swarn->sector, root, inum, offset,
 329			min(isize - offset, (u64)PAGE_SIZE), nlink,
 330			(char *)(unsigned long)ipath->fspath->val[i]);
 
 331
 
 332	free_ipath(ipath);
 333	return 0;
 334
 335err:
 336	printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
 337		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
 338		"resolving failed with ret=%d\n", swarn->errstr,
 339		swarn->logical, rcu_str_deref(swarn->dev->name),
 340		(unsigned long long)swarn->sector, root, inum, offset, ret);
 
 341
 342	free_ipath(ipath);
 343	return 0;
 344}
 345
 346static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
 
 347{
 348	struct btrfs_device *dev = sblock->sdev->dev;
 349	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
 350	struct btrfs_path *path;
 351	struct btrfs_key found_key;
 352	struct extent_buffer *eb;
 353	struct btrfs_extent_item *ei;
 354	struct scrub_warning swarn;
 
 355	u32 item_size;
 356	int ret;
 357	u64 ref_root;
 358	u8 ref_level;
 359	unsigned long ptr = 0;
 360	const int bufsize = 4096;
 361	u64 extent_item_pos;
 362
 
 
 
 
 
 
 363	path = btrfs_alloc_path();
 
 
 364
 365	swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
 366	swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
 367	BUG_ON(sblock->page_count < 1);
 368	swarn.sector = (sblock->pagev[0].physical) >> 9;
 369	swarn.logical = sblock->pagev[0].logical;
 370	swarn.errstr = errstr;
 371	swarn.dev = dev;
 372	swarn.msg_bufsize = bufsize;
 373	swarn.scratch_bufsize = bufsize;
 374
 375	if (!path || !swarn.scratch_buf || !swarn.msg_buf)
 376		goto out;
 377
 378	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
 379	if (ret < 0)
 380		goto out;
 381
 382	extent_item_pos = swarn.logical - found_key.objectid;
 383	swarn.extent_item_size = found_key.offset;
 384
 385	eb = path->nodes[0];
 386	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 387	item_size = btrfs_item_size_nr(eb, path->slots[0]);
 388	btrfs_release_path(path);
 389
 390	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 391		do {
 392			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
 393							&ref_root, &ref_level);
 394			printk_in_rcu(KERN_WARNING
 395				"btrfs: %s at logical %llu on dev %s, "
 396				"sector %llu: metadata %s (level %d) in tree "
 397				"%llu\n", errstr, swarn.logical,
 398				rcu_str_deref(dev->name),
 399				(unsigned long long)swarn.sector,
 400				ref_level ? "node" : "leaf",
 401				ret < 0 ? -1 : ref_level,
 402				ret < 0 ? -1 : ref_root);
 403		} while (ret != 1);
 
 
 
 
 
 
 
 
 
 
 404	} else {
 
 
 
 
 
 
 
 
 405		swarn.path = path;
 406		iterate_extent_inodes(fs_info, found_key.objectid,
 407					extent_item_pos, 1,
 408					scrub_print_warning_inode, &swarn);
 409	}
 410
 411out:
 412	btrfs_free_path(path);
 413	kfree(swarn.scratch_buf);
 414	kfree(swarn.msg_buf);
 415}
 416
 417static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
 418{
 419	struct page *page = NULL;
 420	unsigned long index;
 421	struct scrub_fixup_nodatasum *fixup = ctx;
 422	int ret;
 423	int corrected = 0;
 424	struct btrfs_key key;
 425	struct inode *inode = NULL;
 426	u64 end = offset + PAGE_SIZE - 1;
 427	struct btrfs_root *local_root;
 428
 429	key.objectid = root;
 430	key.type = BTRFS_ROOT_ITEM_KEY;
 431	key.offset = (u64)-1;
 432	local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
 433	if (IS_ERR(local_root))
 434		return PTR_ERR(local_root);
 435
 436	key.type = BTRFS_INODE_ITEM_KEY;
 437	key.objectid = inum;
 438	key.offset = 0;
 439	inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
 440	if (IS_ERR(inode))
 441		return PTR_ERR(inode);
 442
 443	index = offset >> PAGE_CACHE_SHIFT;
 
 444
 445	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 446	if (!page) {
 447		ret = -ENOMEM;
 448		goto out;
 449	}
 
 
 450
 451	if (PageUptodate(page)) {
 452		struct btrfs_mapping_tree *map_tree;
 453		if (PageDirty(page)) {
 454			/*
 455			 * we need to write the data to the defect sector. the
 456			 * data that was in that sector is not in memory,
 457			 * because the page was modified. we must not write the
 458			 * modified page to that sector.
 459			 *
 460			 * TODO: what could be done here: wait for the delalloc
 461			 *       runner to write out that page (might involve
 462			 *       COW) and see whether the sector is still
 463			 *       referenced afterwards.
 464			 *
 465			 * For the meantime, we'll treat this error
 466			 * incorrectable, although there is a chance that a
 467			 * later scrub will find the bad sector again and that
 468			 * there's no dirty page in memory, then.
 469			 */
 470			ret = -EIO;
 471			goto out;
 472		}
 473		map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
 474		ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
 475					fixup->logical, page,
 476					fixup->mirror_num);
 477		unlock_page(page);
 478		corrected = !ret;
 479	} else {
 480		/*
 481		 * we need to get good data first. the general readpage path
 482		 * will call repair_io_failure for us, we just have to make
 483		 * sure we read the bad mirror.
 484		 */
 485		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
 486					EXTENT_DAMAGED, GFP_NOFS);
 487		if (ret) {
 488			/* set_extent_bits should give proper error */
 489			WARN_ON(ret > 0);
 490			if (ret > 0)
 491				ret = -EFAULT;
 492			goto out;
 493		}
 494
 495		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
 496						btrfs_get_extent,
 497						fixup->mirror_num);
 498		wait_on_page_locked(page);
 499
 500		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
 501						end, EXTENT_DAMAGED, 0, NULL);
 502		if (!corrected)
 503			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
 504						EXTENT_DAMAGED, GFP_NOFS);
 505	}
 506
 507out:
 508	if (page)
 509		put_page(page);
 510	if (inode)
 511		iput(inode);
 
 
 
 
 
 
 512
 513	if (ret < 0)
 514		return ret;
 
 
 
 
 
 515
 516	if (ret == 0 && corrected) {
 517		/*
 518		 * we only need to call readpage for one of the inodes belonging
 519		 * to this extent. so make iterate_extent_inodes stop
 520		 */
 521		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 522	}
 523
 524	return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 525}
 526
 527static void scrub_fixup_nodatasum(struct btrfs_work *work)
 528{
 
 
 
 
 
 
 529	int ret;
 530	struct scrub_fixup_nodatasum *fixup;
 531	struct scrub_dev *sdev;
 532	struct btrfs_trans_handle *trans = NULL;
 533	struct btrfs_fs_info *fs_info;
 534	struct btrfs_path *path;
 535	int uncorrectable = 0;
 536
 537	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
 538	sdev = fixup->sdev;
 539	fs_info = fixup->root->fs_info;
 
 
 
 
 
 
 540
 541	path = btrfs_alloc_path();
 542	if (!path) {
 543		spin_lock(&sdev->stat_lock);
 544		++sdev->stat.malloc_errors;
 545		spin_unlock(&sdev->stat_lock);
 546		uncorrectable = 1;
 547		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 548	}
 549
 550	trans = btrfs_join_transaction(fixup->root);
 551	if (IS_ERR(trans)) {
 552		uncorrectable = 1;
 553		goto out;
 
 
 
 554	}
 555
 556	/*
 557	 * the idea is to trigger a regular read through the standard path. we
 558	 * read a page from the (failed) logical address by specifying the
 559	 * corresponding copynum of the failed sector. thus, that readpage is
 560	 * expected to fail.
 561	 * that is the point where on-the-fly error correction will kick in
 562	 * (once it's finished) and rewrite the failed sector if a good copy
 563	 * can be found.
 564	 */
 565	ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
 566						path, scrub_fixup_readpage,
 567						fixup);
 568	if (ret < 0) {
 569		uncorrectable = 1;
 570		goto out;
 
 
 
 571	}
 572	WARN_ON(ret != 1);
 573
 574	spin_lock(&sdev->stat_lock);
 575	++sdev->stat.corrected_errors;
 576	spin_unlock(&sdev->stat_lock);
 
 
 
 577
 578out:
 579	if (trans && !IS_ERR(trans))
 580		btrfs_end_transaction(trans, fixup->root);
 581	if (uncorrectable) {
 582		spin_lock(&sdev->stat_lock);
 583		++sdev->stat.uncorrectable_errors;
 584		spin_unlock(&sdev->stat_lock);
 585
 586		printk_ratelimited_in_rcu(KERN_ERR
 587			"btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
 588			(unsigned long long)fixup->logical,
 589			rcu_str_deref(sdev->dev->name));
 590	}
 
 591
 592	btrfs_free_path(path);
 593	kfree(fixup);
 
 594
 595	/* see caller why we're pretending to be paused in the scrub counters */
 596	mutex_lock(&fs_info->scrub_lock);
 597	atomic_dec(&fs_info->scrubs_running);
 598	atomic_dec(&fs_info->scrubs_paused);
 599	mutex_unlock(&fs_info->scrub_lock);
 600	atomic_dec(&sdev->fixup_cnt);
 601	wake_up(&fs_info->scrub_pause_wait);
 602	wake_up(&sdev->list_wait);
 603}
 604
 605/*
 606 * scrub_handle_errored_block gets called when either verification of the
 607 * pages failed or the bio failed to read, e.g. with EIO. In the latter
 608 * case, this function handles all pages in the bio, even though only one
 609 * may be bad.
 610 * The goal of this function is to repair the errored block by using the
 611 * contents of one of the mirrors.
 612 */
 613static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 614{
 615	struct scrub_dev *sdev = sblock_to_check->sdev;
 616	struct btrfs_fs_info *fs_info;
 617	u64 length;
 618	u64 logical;
 619	u64 generation;
 620	unsigned int failed_mirror_index;
 621	unsigned int is_metadata;
 622	unsigned int have_csum;
 623	u8 *csum;
 624	struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
 625	struct scrub_block *sblock_bad;
 626	int ret;
 627	int mirror_index;
 628	int page_num;
 629	int success;
 630	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
 631				      DEFAULT_RATELIMIT_BURST);
 632
 633	BUG_ON(sblock_to_check->page_count < 1);
 634	fs_info = sdev->dev->dev_root->fs_info;
 635	length = sblock_to_check->page_count * PAGE_SIZE;
 636	logical = sblock_to_check->pagev[0].logical;
 637	generation = sblock_to_check->pagev[0].generation;
 638	BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
 639	failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
 640	is_metadata = !(sblock_to_check->pagev[0].flags &
 641			BTRFS_EXTENT_FLAG_DATA);
 642	have_csum = sblock_to_check->pagev[0].have_csum;
 643	csum = sblock_to_check->pagev[0].csum;
 644
 645	/*
 646	 * read all mirrors one after the other. This includes to
 647	 * re-read the extent or metadata block that failed (that was
 648	 * the cause that this fixup code is called) another time,
 649	 * page by page this time in order to know which pages
 650	 * caused I/O errors and which ones are good (for all mirrors).
 651	 * It is the goal to handle the situation when more than one
 652	 * mirror contains I/O errors, but the errors do not
 653	 * overlap, i.e. the data can be repaired by selecting the
 654	 * pages from those mirrors without I/O error on the
 655	 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
 656	 * would be that mirror #1 has an I/O error on the first page,
 657	 * the second page is good, and mirror #2 has an I/O error on
 658	 * the second page, but the first page is good.
 659	 * Then the first page of the first mirror can be repaired by
 660	 * taking the first page of the second mirror, and the
 661	 * second page of the second mirror can be repaired by
 662	 * copying the contents of the 2nd page of the 1st mirror.
 663	 * One more note: if the pages of one mirror contain I/O
 664	 * errors, the checksum cannot be verified. In order to get
 665	 * the best data for repairing, the first attempt is to find
 666	 * a mirror without I/O errors and with a validated checksum.
 667	 * Only if this is not possible, the pages are picked from
 668	 * mirrors with I/O errors without considering the checksum.
 669	 * If the latter is the case, at the end, the checksum of the
 670	 * repaired area is verified in order to correctly maintain
 671	 * the statistics.
 672	 */
 673
 674	sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
 675				     sizeof(*sblocks_for_recheck),
 676				     GFP_NOFS);
 677	if (!sblocks_for_recheck) {
 678		spin_lock(&sdev->stat_lock);
 679		sdev->stat.malloc_errors++;
 680		sdev->stat.read_errors++;
 681		sdev->stat.uncorrectable_errors++;
 682		spin_unlock(&sdev->stat_lock);
 683		btrfs_dev_stat_inc_and_print(sdev->dev,
 684					     BTRFS_DEV_STAT_READ_ERRS);
 685		goto out;
 686	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 687
 688	/* setup the context, map the logical blocks and alloc the pages */
 689	ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
 690					logical, sblocks_for_recheck);
 691	if (ret) {
 692		spin_lock(&sdev->stat_lock);
 693		sdev->stat.read_errors++;
 694		sdev->stat.uncorrectable_errors++;
 695		spin_unlock(&sdev->stat_lock);
 696		btrfs_dev_stat_inc_and_print(sdev->dev,
 697					     BTRFS_DEV_STAT_READ_ERRS);
 698		goto out;
 699	}
 700	BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
 701	sblock_bad = sblocks_for_recheck + failed_mirror_index;
 702
 703	/* build and submit the bios for the failed mirror, check checksums */
 704	ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
 705				  csum, generation, sdev->csum_size);
 706	if (ret) {
 707		spin_lock(&sdev->stat_lock);
 708		sdev->stat.read_errors++;
 709		sdev->stat.uncorrectable_errors++;
 710		spin_unlock(&sdev->stat_lock);
 711		btrfs_dev_stat_inc_and_print(sdev->dev,
 712					     BTRFS_DEV_STAT_READ_ERRS);
 713		goto out;
 714	}
 715
 716	if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
 717	    sblock_bad->no_io_error_seen) {
 718		/*
 719		 * the error disappeared after reading page by page, or
 720		 * the area was part of a huge bio and other parts of the
 721		 * bio caused I/O errors, or the block layer merged several
 722		 * read requests into one and the error is caused by a
 723		 * different bio (usually one of the two latter cases is
 724		 * the cause)
 725		 */
 726		spin_lock(&sdev->stat_lock);
 727		sdev->stat.unverified_errors++;
 728		spin_unlock(&sdev->stat_lock);
 729
 730		goto out;
 
 731	}
 732
 733	if (!sblock_bad->no_io_error_seen) {
 734		spin_lock(&sdev->stat_lock);
 735		sdev->stat.read_errors++;
 736		spin_unlock(&sdev->stat_lock);
 737		if (__ratelimit(&_rs))
 738			scrub_print_warning("i/o error", sblock_to_check);
 739		btrfs_dev_stat_inc_and_print(sdev->dev,
 740					     BTRFS_DEV_STAT_READ_ERRS);
 741	} else if (sblock_bad->checksum_error) {
 742		spin_lock(&sdev->stat_lock);
 743		sdev->stat.csum_errors++;
 744		spin_unlock(&sdev->stat_lock);
 745		if (__ratelimit(&_rs))
 746			scrub_print_warning("checksum error", sblock_to_check);
 747		btrfs_dev_stat_inc_and_print(sdev->dev,
 748					     BTRFS_DEV_STAT_CORRUPTION_ERRS);
 749	} else if (sblock_bad->header_error) {
 750		spin_lock(&sdev->stat_lock);
 751		sdev->stat.verify_errors++;
 752		spin_unlock(&sdev->stat_lock);
 753		if (__ratelimit(&_rs))
 754			scrub_print_warning("checksum/header error",
 755					    sblock_to_check);
 756		if (sblock_bad->generation_error)
 757			btrfs_dev_stat_inc_and_print(sdev->dev,
 758				BTRFS_DEV_STAT_GENERATION_ERRS);
 759		else
 760			btrfs_dev_stat_inc_and_print(sdev->dev,
 761				BTRFS_DEV_STAT_CORRUPTION_ERRS);
 762	}
 
 763
 764	if (sdev->readonly)
 765		goto did_not_correct_error;
 
 
 
 
 
 
 
 
 
 
 
 766
 767	if (!is_metadata && !have_csum) {
 768		struct scrub_fixup_nodatasum *fixup_nodatasum;
 769
 770		/*
 771		 * !is_metadata and !have_csum, this means that the data
 772		 * might not be COW'ed, that it might be modified
 773		 * concurrently. The general strategy to work on the
 774		 * commit root does not help in the case when COW is not
 775		 * used.
 776		 */
 777		fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
 778		if (!fixup_nodatasum)
 779			goto did_not_correct_error;
 780		fixup_nodatasum->sdev = sdev;
 781		fixup_nodatasum->logical = logical;
 782		fixup_nodatasum->root = fs_info->extent_root;
 783		fixup_nodatasum->mirror_num = failed_mirror_index + 1;
 784		/*
 785		 * increment scrubs_running to prevent cancel requests from
 786		 * completing as long as a fixup worker is running. we must also
 787		 * increment scrubs_paused to prevent deadlocking on pause
 788		 * requests used for transactions commits (as the worker uses a
 789		 * transaction context). it is safe to regard the fixup worker
 790		 * as paused for all matters practical. effectively, we only
 791		 * avoid cancellation requests from completing.
 792		 */
 793		mutex_lock(&fs_info->scrub_lock);
 794		atomic_inc(&fs_info->scrubs_running);
 795		atomic_inc(&fs_info->scrubs_paused);
 796		mutex_unlock(&fs_info->scrub_lock);
 797		atomic_inc(&sdev->fixup_cnt);
 798		fixup_nodatasum->work.func = scrub_fixup_nodatasum;
 799		btrfs_queue_worker(&fs_info->scrub_workers,
 800				   &fixup_nodatasum->work);
 801		goto out;
 802	}
 803
 804	/*
 805	 * now build and submit the bios for the other mirrors, check
 806	 * checksums
 
 
 807	 */
 808	for (mirror_index = 0;
 809	     mirror_index < BTRFS_MAX_MIRRORS &&
 810	     sblocks_for_recheck[mirror_index].page_count > 0;
 811	     mirror_index++) {
 812		if (mirror_index == failed_mirror_index)
 813			continue;
 814
 815		/* build and submit the bios, check checksums */
 816		ret = scrub_recheck_block(fs_info,
 817					  sblocks_for_recheck + mirror_index,
 818					  is_metadata, have_csum, csum,
 819					  generation, sdev->csum_size);
 820		if (ret)
 821			goto did_not_correct_error;
 
 
 
 
 
 
 
 822	}
 823
 824	/*
 825	 * first try to pick the mirror which is completely without I/O
 826	 * errors and also does not have a checksum error.
 827	 * If one is found, and if a checksum is present, the full block
 828	 * that is known to contain an error is rewritten. Afterwards
 829	 * the block is known to be corrected.
 830	 * If a mirror is found which is completely correct, and no
 831	 * checksum is present, only those pages are rewritten that had
 832	 * an I/O error in the block to be repaired, since it cannot be
 833	 * determined, which copy of the other pages is better (and it
 834	 * could happen otherwise that a correct page would be
 835	 * overwritten by a bad one).
 836	 */
 837	for (mirror_index = 0;
 838	     mirror_index < BTRFS_MAX_MIRRORS &&
 839	     sblocks_for_recheck[mirror_index].page_count > 0;
 840	     mirror_index++) {
 841		struct scrub_block *sblock_other = sblocks_for_recheck +
 842						   mirror_index;
 843
 844		if (!sblock_other->header_error &&
 845		    !sblock_other->checksum_error &&
 846		    sblock_other->no_io_error_seen) {
 847			int force_write = is_metadata || have_csum;
 848
 849			ret = scrub_repair_block_from_good_copy(sblock_bad,
 850								sblock_other,
 851								force_write);
 852			if (0 == ret)
 853				goto corrected_error;
 854		}
 855	}
 856
 857	/*
 858	 * in case of I/O errors in the area that is supposed to be
 859	 * repaired, continue by picking good copies of those pages.
 860	 * Select the good pages from mirrors to rewrite bad pages from
 861	 * the area to fix. Afterwards verify the checksum of the block
 862	 * that is supposed to be repaired. This verification step is
 863	 * only done for the purpose of statistic counting and for the
 864	 * final scrub report, whether errors remain.
 865	 * A perfect algorithm could make use of the checksum and try
 866	 * all possible combinations of pages from the different mirrors
 867	 * until the checksum verification succeeds. For example, when
 868	 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
 869	 * of mirror #2 is readable but the final checksum test fails,
 870	 * then the 2nd page of mirror #3 could be tried, whether now
 871	 * the final checksum succeedes. But this would be a rare
 872	 * exception and is therefore not implemented. At least it is
 873	 * avoided that the good copy is overwritten.
 874	 * A more useful improvement would be to pick the sectors
 875	 * without I/O error based on sector sizes (512 bytes on legacy
 876	 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
 877	 * mirror could be repaired by taking 512 byte of a different
 878	 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
 879	 * area are unreadable.
 880	 */
 881
 882	/* can only fix I/O errors from here on */
 883	if (sblock_bad->no_io_error_seen)
 884		goto did_not_correct_error;
 885
 886	success = 1;
 887	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
 888		struct scrub_page *page_bad = sblock_bad->pagev + page_num;
 889
 890		if (!page_bad->io_error)
 
 891			continue;
 892
 893		for (mirror_index = 0;
 894		     mirror_index < BTRFS_MAX_MIRRORS &&
 895		     sblocks_for_recheck[mirror_index].page_count > 0;
 896		     mirror_index++) {
 897			struct scrub_block *sblock_other = sblocks_for_recheck +
 898							   mirror_index;
 899			struct scrub_page *page_other = sblock_other->pagev +
 900							page_num;
 901
 902			if (!page_other->io_error) {
 903				ret = scrub_repair_page_from_good_copy(
 904					sblock_bad, sblock_other, page_num, 0);
 905				if (0 == ret) {
 906					page_bad->io_error = 0;
 907					break; /* succeeded for this page */
 908				}
 909			}
 
 910		}
 911
 912		if (page_bad->io_error) {
 913			/* did not find a mirror to copy the page from */
 914			success = 0;
 915		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 917
 918	if (success) {
 919		if (is_metadata || have_csum) {
 920			/*
 921			 * need to verify the checksum now that all
 922			 * sectors on disk are repaired (the write
 923			 * request for data to be repaired is on its way).
 924			 * Just be lazy and use scrub_recheck_block()
 925			 * which re-reads the data before the checksum
 926			 * is verified, but most likely the data comes out
 927			 * of the page cache.
 928			 */
 929			ret = scrub_recheck_block(fs_info, sblock_bad,
 930						  is_metadata, have_csum, csum,
 931						  generation, sdev->csum_size);
 932			if (!ret && !sblock_bad->header_error &&
 933			    !sblock_bad->checksum_error &&
 934			    sblock_bad->no_io_error_seen)
 935				goto corrected_error;
 936			else
 937				goto did_not_correct_error;
 938		} else {
 939corrected_error:
 940			spin_lock(&sdev->stat_lock);
 941			sdev->stat.corrected_errors++;
 942			spin_unlock(&sdev->stat_lock);
 943			printk_ratelimited_in_rcu(KERN_ERR
 944				"btrfs: fixed up error at logical %llu on dev %s\n",
 945				(unsigned long long)logical,
 946				rcu_str_deref(sdev->dev->name));
 947		}
 948	} else {
 949did_not_correct_error:
 950		spin_lock(&sdev->stat_lock);
 951		sdev->stat.uncorrectable_errors++;
 952		spin_unlock(&sdev->stat_lock);
 953		printk_ratelimited_in_rcu(KERN_ERR
 954			"btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
 955			(unsigned long long)logical,
 956			rcu_str_deref(sdev->dev->name));
 957	}
 
 
 
 
 
 
 
 958
 959out:
 960	if (sblocks_for_recheck) {
 961		for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
 962		     mirror_index++) {
 963			struct scrub_block *sblock = sblocks_for_recheck +
 964						     mirror_index;
 965			int page_index;
 966
 967			for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
 968			     page_index++)
 969				if (sblock->pagev[page_index].page)
 970					__free_page(
 971						sblock->pagev[page_index].page);
 972		}
 973		kfree(sblocks_for_recheck);
 
 
 
 
 974	}
 
 975
 976	return 0;
 
 977}
 978
 979static int scrub_setup_recheck_block(struct scrub_dev *sdev,
 980				     struct btrfs_mapping_tree *map_tree,
 981				     u64 length, u64 logical,
 982				     struct scrub_block *sblocks_for_recheck)
 983{
 984	int page_index;
 985	int mirror_index;
 986	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 987
 988	/*
 989	 * note: the three members sdev, ref_count and outstanding_pages
 990	 * are not used (and not set) in the blocks that are used for
 991	 * the recheck procedure
 992	 */
 993
 994	page_index = 0;
 995	while (length > 0) {
 996		u64 sublen = min_t(u64, length, PAGE_SIZE);
 997		u64 mapped_length = sublen;
 998		struct btrfs_bio *bbio = NULL;
 999
1000		/*
1001		 * with a length of PAGE_SIZE, each returned stripe
1002		 * represents one mirror
1003		 */
1004		ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
1005				      &bbio, 0);
1006		if (ret || !bbio || mapped_length < sublen) {
1007			kfree(bbio);
1008			return -EIO;
1009		}
1010
1011		BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
1012		for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
1013		     mirror_index++) {
1014			struct scrub_block *sblock;
1015			struct scrub_page *page;
1016
1017			if (mirror_index >= BTRFS_MAX_MIRRORS)
1018				continue;
1019
1020			sblock = sblocks_for_recheck + mirror_index;
1021			page = sblock->pagev + page_index;
1022			page->logical = logical;
1023			page->physical = bbio->stripes[mirror_index].physical;
1024			/* for missing devices, dev->bdev is NULL */
1025			page->dev = bbio->stripes[mirror_index].dev;
1026			page->mirror_num = mirror_index + 1;
1027			page->page = alloc_page(GFP_NOFS);
1028			if (!page->page) {
1029				spin_lock(&sdev->stat_lock);
1030				sdev->stat.malloc_errors++;
1031				spin_unlock(&sdev->stat_lock);
1032				return -ENOMEM;
1033			}
1034			sblock->page_count++;
 
1035		}
1036		kfree(bbio);
1037		length -= sublen;
1038		logical += sublen;
1039		page_index++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040	}
1041
1042	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1043}
1044
1045/*
1046 * this function will check the on disk data for checksum errors, header
1047 * errors and read I/O errors. If any I/O errors happen, the exact pages
1048 * which are errored are marked as being bad. The goal is to enable scrub
1049 * to take those pages that are not errored from all the mirrors so that
1050 * the pages that are errored in the just handled mirror can be repaired.
1051 */
1052static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1053			       struct scrub_block *sblock, int is_metadata,
1054			       int have_csum, u8 *csum, u64 generation,
1055			       u16 csum_size)
1056{
1057	int page_num;
1058
1059	sblock->no_io_error_seen = 1;
1060	sblock->header_error = 0;
1061	sblock->checksum_error = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1062
1063	for (page_num = 0; page_num < sblock->page_count; page_num++) {
1064		struct bio *bio;
1065		int ret;
1066		struct scrub_page *page = sblock->pagev + page_num;
1067		DECLARE_COMPLETION_ONSTACK(complete);
 
 
 
 
 
 
1068
1069		if (page->dev->bdev == NULL) {
1070			page->io_error = 1;
1071			sblock->no_io_error_seen = 0;
1072			continue;
1073		}
1074
1075		BUG_ON(!page->page);
1076		bio = bio_alloc(GFP_NOFS, 1);
1077		if (!bio)
1078			return -EIO;
1079		bio->bi_bdev = page->dev->bdev;
1080		bio->bi_sector = page->physical >> 9;
1081		bio->bi_end_io = scrub_complete_bio_end_io;
1082		bio->bi_private = &complete;
1083
1084		ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
1085		if (PAGE_SIZE != ret) {
1086			bio_put(bio);
1087			return -EIO;
1088		}
1089		btrfsic_submit_bio(READ, bio);
1090
1091		/* this will also unplug the queue */
1092		wait_for_completion(&complete);
1093
1094		page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1095		if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1096			sblock->no_io_error_seen = 0;
1097		bio_put(bio);
1098	}
1099
1100	if (sblock->no_io_error_seen)
1101		scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1102					     have_csum, csum, generation,
1103					     csum_size);
1104
 
 
 
 
1105	return 0;
1106}
1107
1108static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1109					 struct scrub_block *sblock,
1110					 int is_metadata, int have_csum,
1111					 const u8 *csum, u64 generation,
1112					 u16 csum_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113{
1114	int page_num;
1115	u8 calculated_csum[BTRFS_CSUM_SIZE];
1116	u32 crc = ~(u32)0;
1117	struct btrfs_root *root = fs_info->extent_root;
1118	void *mapped_buffer;
1119
1120	BUG_ON(!sblock->pagev[0].page);
1121	if (is_metadata) {
1122		struct btrfs_header *h;
1123
1124		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1125		h = (struct btrfs_header *)mapped_buffer;
1126
1127		if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
1128		    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1129		    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1130			   BTRFS_UUID_SIZE)) {
1131			sblock->header_error = 1;
1132		} else if (generation != le64_to_cpu(h->generation)) {
1133			sblock->header_error = 1;
1134			sblock->generation_error = 1;
1135		}
1136		csum = h->csum;
1137	} else {
1138		if (!have_csum)
1139			return;
1140
1141		mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1142	}
 
 
 
 
1143
1144	for (page_num = 0;;) {
1145		if (page_num == 0 && is_metadata)
1146			crc = btrfs_csum_data(root,
1147				((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1148				crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1149		else
1150			crc = btrfs_csum_data(root, mapped_buffer, crc,
1151					      PAGE_SIZE);
1152
1153		kunmap_atomic(mapped_buffer);
1154		page_num++;
1155		if (page_num >= sblock->page_count)
 
 
 
 
 
 
 
 
 
 
 
 
 
1156			break;
1157		BUG_ON(!sblock->pagev[page_num].page);
 
 
1158
1159		mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
 
 
 
 
 
 
 
 
 
 
 
1160	}
1161
1162	btrfs_csum_final(crc, calculated_csum);
1163	if (memcmp(calculated_csum, csum, csum_size))
1164		sblock->checksum_error = 1;
1165}
1166
1167static void scrub_complete_bio_end_io(struct bio *bio, int err)
 
1168{
1169	complete((struct completion *)bio->bi_private);
 
 
 
 
 
 
 
 
 
 
 
 
 
1170}
1171
1172static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1173					     struct scrub_block *sblock_good,
1174					     int force_write)
1175{
1176	int page_num;
1177	int ret = 0;
1178
1179	for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1180		int ret_sub;
1181
1182		ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1183							   sblock_good,
1184							   page_num,
1185							   force_write);
1186		if (ret_sub)
1187			ret = ret_sub;
 
 
1188	}
 
 
1189
1190	return ret;
1191}
1192
1193static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1194					    struct scrub_block *sblock_good,
1195					    int page_num, int force_write)
1196{
1197	struct scrub_page *page_bad = sblock_bad->pagev + page_num;
1198	struct scrub_page *page_good = sblock_good->pagev + page_num;
1199
1200	BUG_ON(sblock_bad->pagev[page_num].page == NULL);
1201	BUG_ON(sblock_good->pagev[page_num].page == NULL);
1202	if (force_write || sblock_bad->header_error ||
1203	    sblock_bad->checksum_error || page_bad->io_error) {
1204		struct bio *bio;
1205		int ret;
1206		DECLARE_COMPLETION_ONSTACK(complete);
1207
1208		bio = bio_alloc(GFP_NOFS, 1);
1209		if (!bio)
1210			return -EIO;
1211		bio->bi_bdev = page_bad->dev->bdev;
1212		bio->bi_sector = page_bad->physical >> 9;
1213		bio->bi_end_io = scrub_complete_bio_end_io;
1214		bio->bi_private = &complete;
1215
1216		ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1217		if (PAGE_SIZE != ret) {
1218			bio_put(bio);
1219			return -EIO;
1220		}
1221		btrfsic_submit_bio(WRITE, bio);
1222
1223		/* this will also unplug the queue */
1224		wait_for_completion(&complete);
1225		if (!bio_flagged(bio, BIO_UPTODATE)) {
1226			btrfs_dev_stat_inc_and_print(page_bad->dev,
1227				BTRFS_DEV_STAT_WRITE_ERRS);
1228			bio_put(bio);
1229			return -EIO;
1230		}
1231		bio_put(bio);
1232	}
 
1233
1234	return 0;
 
 
 
 
 
 
 
 
 
 
1235}
1236
1237static void scrub_checksum(struct scrub_block *sblock)
1238{
1239	u64 flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240	int ret;
1241
1242	BUG_ON(sblock->page_count < 1);
1243	flags = sblock->pagev[0].flags;
1244	ret = 0;
1245	if (flags & BTRFS_EXTENT_FLAG_DATA)
1246		ret = scrub_checksum_data(sblock);
1247	else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1248		ret = scrub_checksum_tree_block(sblock);
1249	else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1250		(void)scrub_checksum_super(sblock);
1251	else
1252		WARN_ON(1);
1253	if (ret)
1254		scrub_handle_errored_block(sblock);
1255}
 
 
 
 
 
 
1256
1257static int scrub_checksum_data(struct scrub_block *sblock)
1258{
1259	struct scrub_dev *sdev = sblock->sdev;
1260	u8 csum[BTRFS_CSUM_SIZE];
1261	u8 *on_disk_csum;
1262	struct page *page;
1263	void *buffer;
1264	u32 crc = ~(u32)0;
1265	int fail = 0;
1266	struct btrfs_root *root = sdev->dev->dev_root;
1267	u64 len;
1268	int index;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1269
1270	BUG_ON(sblock->page_count < 1);
1271	if (!sblock->pagev[0].have_csum)
1272		return 0;
1273
1274	on_disk_csum = sblock->pagev[0].csum;
1275	page = sblock->pagev[0].page;
1276	buffer = kmap_atomic(page);
 
 
1277
1278	len = sdev->sectorsize;
1279	index = 0;
1280	for (;;) {
1281		u64 l = min_t(u64, len, PAGE_SIZE);
 
 
 
1282
1283		crc = btrfs_csum_data(root, buffer, crc, l);
1284		kunmap_atomic(buffer);
1285		len -= l;
1286		if (len == 0)
1287			break;
1288		index++;
1289		BUG_ON(index >= sblock->page_count);
1290		BUG_ON(!sblock->pagev[index].page);
1291		page = sblock->pagev[index].page;
1292		buffer = kmap_atomic(page);
1293	}
1294
1295	btrfs_csum_final(crc, csum);
1296	if (memcmp(csum, on_disk_csum, sdev->csum_size))
1297		fail = 1;
1298
1299	return fail;
1300}
1301
1302static int scrub_checksum_tree_block(struct scrub_block *sblock)
1303{
1304	struct scrub_dev *sdev = sblock->sdev;
1305	struct btrfs_header *h;
1306	struct btrfs_root *root = sdev->dev->dev_root;
1307	struct btrfs_fs_info *fs_info = root->fs_info;
1308	u8 calculated_csum[BTRFS_CSUM_SIZE];
1309	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1310	struct page *page;
1311	void *mapped_buffer;
1312	u64 mapped_size;
1313	void *p;
1314	u32 crc = ~(u32)0;
1315	int fail = 0;
1316	int crc_fail = 0;
1317	u64 len;
1318	int index;
1319
1320	BUG_ON(sblock->page_count < 1);
1321	page = sblock->pagev[0].page;
1322	mapped_buffer = kmap_atomic(page);
1323	h = (struct btrfs_header *)mapped_buffer;
1324	memcpy(on_disk_csum, h->csum, sdev->csum_size);
1325
1326	/*
1327	 * we don't use the getter functions here, as we
1328	 * a) don't have an extent buffer and
1329	 * b) the page is already kmapped
1330	 */
 
1331
1332	if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
1333		++fail;
 
 
 
 
 
 
 
 
 
1334
1335	if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
1336		++fail;
1337
1338	if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1339		++fail;
 
1340
1341	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1342		   BTRFS_UUID_SIZE))
1343		++fail;
1344
1345	BUG_ON(sdev->nodesize != sdev->leafsize);
1346	len = sdev->nodesize - BTRFS_CSUM_SIZE;
1347	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1348	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1349	index = 0;
1350	for (;;) {
1351		u64 l = min_t(u64, len, mapped_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1352
1353		crc = btrfs_csum_data(root, p, crc, l);
1354		kunmap_atomic(mapped_buffer);
1355		len -= l;
1356		if (len == 0)
1357			break;
1358		index++;
1359		BUG_ON(index >= sblock->page_count);
1360		BUG_ON(!sblock->pagev[index].page);
1361		page = sblock->pagev[index].page;
1362		mapped_buffer = kmap_atomic(page);
1363		mapped_size = PAGE_SIZE;
1364		p = mapped_buffer;
1365	}
1366
1367	btrfs_csum_final(crc, calculated_csum);
1368	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1369		++crc_fail;
 
 
1370
1371	return fail || crc_fail;
 
 
 
 
1372}
1373
1374static int scrub_checksum_super(struct scrub_block *sblock)
 
1375{
1376	struct btrfs_super_block *s;
1377	struct scrub_dev *sdev = sblock->sdev;
1378	struct btrfs_root *root = sdev->dev->dev_root;
1379	struct btrfs_fs_info *fs_info = root->fs_info;
1380	u8 calculated_csum[BTRFS_CSUM_SIZE];
1381	u8 on_disk_csum[BTRFS_CSUM_SIZE];
1382	struct page *page;
1383	void *mapped_buffer;
1384	u64 mapped_size;
1385	void *p;
1386	u32 crc = ~(u32)0;
1387	int fail_gen = 0;
1388	int fail_cor = 0;
1389	u64 len;
1390	int index;
 
 
 
 
 
 
 
 
 
 
1391
1392	BUG_ON(sblock->page_count < 1);
1393	page = sblock->pagev[0].page;
1394	mapped_buffer = kmap_atomic(page);
1395	s = (struct btrfs_super_block *)mapped_buffer;
1396	memcpy(on_disk_csum, s->csum, sdev->csum_size);
1397
1398	if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
1399		++fail_cor;
1400
1401	if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
1402		++fail_gen;
1403
1404	if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1405		++fail_cor;
1406
1407	len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1408	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1409	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1410	index = 0;
1411	for (;;) {
1412		u64 l = min_t(u64, len, mapped_size);
1413
1414		crc = btrfs_csum_data(root, p, crc, l);
1415		kunmap_atomic(mapped_buffer);
1416		len -= l;
1417		if (len == 0)
1418			break;
1419		index++;
1420		BUG_ON(index >= sblock->page_count);
1421		BUG_ON(!sblock->pagev[index].page);
1422		page = sblock->pagev[index].page;
1423		mapped_buffer = kmap_atomic(page);
1424		mapped_size = PAGE_SIZE;
1425		p = mapped_buffer;
1426	}
1427
1428	btrfs_csum_final(crc, calculated_csum);
1429	if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1430		++fail_cor;
1431
1432	if (fail_cor + fail_gen) {
1433		/*
1434		 * if we find an error in a super block, we just report it.
1435		 * They will get written with the next transaction commit
1436		 * anyway
1437		 */
1438		spin_lock(&sdev->stat_lock);
1439		++sdev->stat.super_errors;
1440		spin_unlock(&sdev->stat_lock);
1441		if (fail_cor)
1442			btrfs_dev_stat_inc_and_print(sdev->dev,
1443				BTRFS_DEV_STAT_CORRUPTION_ERRS);
1444		else
1445			btrfs_dev_stat_inc_and_print(sdev->dev,
1446				BTRFS_DEV_STAT_GENERATION_ERRS);
1447	}
 
1448
1449	return fail_cor + fail_gen;
 
 
 
 
 
 
 
 
 
 
 
 
 
1450}
1451
1452static void scrub_block_get(struct scrub_block *sblock)
1453{
1454	atomic_inc(&sblock->ref_count);
1455}
1456
1457static void scrub_block_put(struct scrub_block *sblock)
1458{
1459	if (atomic_dec_and_test(&sblock->ref_count)) {
1460		int i;
1461
1462		for (i = 0; i < sblock->page_count; i++)
1463			if (sblock->pagev[i].page)
1464				__free_page(sblock->pagev[i].page);
1465		kfree(sblock);
 
 
1466	}
 
1467}
1468
1469static void scrub_submit(struct scrub_dev *sdev)
 
 
1470{
1471	struct scrub_bio *sbio;
1472
1473	if (sdev->curr == -1)
1474		return;
1475
1476	sbio = sdev->bios[sdev->curr];
1477	sdev->curr = -1;
1478	atomic_inc(&sdev->in_flight);
 
 
1479
1480	btrfsic_submit_bio(READ, sbio->bio);
 
 
 
 
1481}
1482
1483static int scrub_add_page_to_bio(struct scrub_dev *sdev,
1484				 struct scrub_page *spage)
1485{
1486	struct scrub_block *sblock = spage->sblock;
1487	struct scrub_bio *sbio;
1488	int ret;
 
 
 
 
 
 
 
 
 
 
1489
1490again:
1491	/*
1492	 * grab a fresh bio or wait for one to become available
1493	 */
1494	while (sdev->curr == -1) {
1495		spin_lock(&sdev->list_lock);
1496		sdev->curr = sdev->first_free;
1497		if (sdev->curr != -1) {
1498			sdev->first_free = sdev->bios[sdev->curr]->next_free;
1499			sdev->bios[sdev->curr]->next_free = -1;
1500			sdev->bios[sdev->curr]->page_count = 0;
1501			spin_unlock(&sdev->list_lock);
1502		} else {
1503			spin_unlock(&sdev->list_lock);
1504			wait_event(sdev->list_wait, sdev->first_free != -1);
1505		}
1506	}
1507	sbio = sdev->bios[sdev->curr];
1508	if (sbio->page_count == 0) {
1509		struct bio *bio;
1510
1511		sbio->physical = spage->physical;
1512		sbio->logical = spage->logical;
1513		bio = sbio->bio;
1514		if (!bio) {
1515			bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
1516			if (!bio)
1517				return -ENOMEM;
1518			sbio->bio = bio;
1519		}
1520
1521		bio->bi_private = sbio;
1522		bio->bi_end_io = scrub_bio_end_io;
1523		bio->bi_bdev = sdev->dev->bdev;
1524		bio->bi_sector = spage->physical >> 9;
1525		sbio->err = 0;
1526	} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1527		   spage->physical ||
1528		   sbio->logical + sbio->page_count * PAGE_SIZE !=
1529		   spage->logical) {
1530		scrub_submit(sdev);
1531		goto again;
1532	}
1533
1534	sbio->pagev[sbio->page_count] = spage;
1535	ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1536	if (ret != PAGE_SIZE) {
1537		if (sbio->page_count < 1) {
1538			bio_put(sbio->bio);
1539			sbio->bio = NULL;
1540			return -EIO;
1541		}
1542		scrub_submit(sdev);
1543		goto again;
1544	}
1545
1546	scrub_block_get(sblock); /* one for the added page */
1547	atomic_inc(&sblock->outstanding_pages);
1548	sbio->page_count++;
1549	if (sbio->page_count == sdev->pages_per_bio)
1550		scrub_submit(sdev);
1551
1552	return 0;
1553}
1554
1555static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1556		       u64 physical, u64 flags, u64 gen, int mirror_num,
1557		       u8 *csum, int force)
1558{
1559	struct scrub_block *sblock;
1560	int index;
1561
1562	sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1563	if (!sblock) {
1564		spin_lock(&sdev->stat_lock);
1565		sdev->stat.malloc_errors++;
1566		spin_unlock(&sdev->stat_lock);
1567		return -ENOMEM;
1568	}
1569
1570	/* one ref inside this function, plus one for each page later on */
1571	atomic_set(&sblock->ref_count, 1);
1572	sblock->sdev = sdev;
1573	sblock->no_io_error_seen = 1;
1574
1575	for (index = 0; len > 0; index++) {
1576		struct scrub_page *spage = sblock->pagev + index;
1577		u64 l = min_t(u64, len, PAGE_SIZE);
1578
1579		BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
1580		spage->page = alloc_page(GFP_NOFS);
1581		if (!spage->page) {
1582			spin_lock(&sdev->stat_lock);
1583			sdev->stat.malloc_errors++;
1584			spin_unlock(&sdev->stat_lock);
1585			while (index > 0) {
1586				index--;
1587				__free_page(sblock->pagev[index].page);
1588			}
1589			kfree(sblock);
1590			return -ENOMEM;
1591		}
1592		spage->sblock = sblock;
1593		spage->dev = sdev->dev;
1594		spage->flags = flags;
1595		spage->generation = gen;
1596		spage->logical = logical;
1597		spage->physical = physical;
1598		spage->mirror_num = mirror_num;
1599		if (csum) {
1600			spage->have_csum = 1;
1601			memcpy(spage->csum, csum, sdev->csum_size);
1602		} else {
1603			spage->have_csum = 0;
1604		}
1605		sblock->page_count++;
1606		len -= l;
1607		logical += l;
1608		physical += l;
1609	}
1610
1611	BUG_ON(sblock->page_count == 0);
1612	for (index = 0; index < sblock->page_count; index++) {
1613		struct scrub_page *spage = sblock->pagev + index;
1614		int ret;
1615
1616		ret = scrub_add_page_to_bio(sdev, spage);
1617		if (ret) {
1618			scrub_block_put(sblock);
1619			return ret;
1620		}
1621	}
 
 
 
 
1622
1623	if (force)
1624		scrub_submit(sdev);
1625
1626	/* last one frees, either here or in bio completion for last page */
1627	scrub_block_put(sblock);
1628	return 0;
1629}
1630
1631static void scrub_bio_end_io(struct bio *bio, int err)
 
 
 
1632{
1633	struct scrub_bio *sbio = bio->bi_private;
1634	struct scrub_dev *sdev = sbio->sdev;
1635	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1636
1637	sbio->err = err;
1638	sbio->bio = bio;
 
 
 
1639
1640	btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
1641}
1642
1643static void scrub_bio_end_io_worker(struct btrfs_work *work)
1644{
1645	struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1646	struct scrub_dev *sdev = sbio->sdev;
1647	int i;
 
 
 
 
 
1648
1649	BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
1650	if (sbio->err) {
1651		for (i = 0; i < sbio->page_count; i++) {
1652			struct scrub_page *spage = sbio->pagev[i];
1653
1654			spage->io_error = 1;
1655			spage->sblock->no_io_error_seen = 0;
1656		}
1657	}
1658
1659	/* now complete the scrub_block items that have all pages completed */
1660	for (i = 0; i < sbio->page_count; i++) {
1661		struct scrub_page *spage = sbio->pagev[i];
1662		struct scrub_block *sblock = spage->sblock;
 
1663
1664		if (atomic_dec_and_test(&sblock->outstanding_pages))
1665			scrub_block_complete(sblock);
1666		scrub_block_put(sblock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1667	}
1668
1669	if (sbio->err) {
1670		/* what is this good for??? */
1671		sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1672		sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
1673		sbio->bio->bi_phys_segments = 0;
1674		sbio->bio->bi_idx = 0;
1675
1676		for (i = 0; i < sbio->page_count; i++) {
1677			struct bio_vec *bi;
1678			bi = &sbio->bio->bi_io_vec[i];
1679			bi->bv_offset = 0;
1680			bi->bv_len = PAGE_SIZE;
1681		}
1682	}
 
 
 
 
1683
1684	bio_put(sbio->bio);
1685	sbio->bio = NULL;
1686	spin_lock(&sdev->list_lock);
1687	sbio->next_free = sdev->first_free;
1688	sdev->first_free = sbio->index;
1689	spin_unlock(&sdev->list_lock);
1690	atomic_dec(&sdev->in_flight);
1691	wake_up(&sdev->list_wait);
1692}
1693
1694static void scrub_block_complete(struct scrub_block *sblock)
1695{
1696	if (!sblock->no_io_error_seen)
1697		scrub_handle_errored_block(sblock);
1698	else
1699		scrub_checksum(sblock);
1700}
1701
1702static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1703			   u8 *csum)
1704{
1705	struct btrfs_ordered_sum *sum = NULL;
1706	int ret = 0;
1707	unsigned long i;
1708	unsigned long num_sectors;
 
 
1709
1710	while (!list_empty(&sdev->csum_list)) {
1711		sum = list_first_entry(&sdev->csum_list,
1712				       struct btrfs_ordered_sum, list);
1713		if (sum->bytenr > logical)
1714			return 0;
1715		if (sum->bytenr + sum->len > logical)
1716			break;
1717
1718		++sdev->stat.csum_discards;
1719		list_del(&sum->list);
1720		kfree(sum);
1721		sum = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
1722	}
1723	if (!sum)
1724		return 0;
1725
1726	num_sectors = sum->len / sdev->sectorsize;
1727	for (i = 0; i < num_sectors; ++i) {
1728		if (sum->sums[i].bytenr == logical) {
1729			memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
1730			ret = 1;
1731			break;
1732		}
 
 
 
 
 
 
1733	}
1734	if (ret && i == num_sectors - 1) {
1735		list_del(&sum->list);
1736		kfree(sum);
 
 
 
 
1737	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1738	return ret;
1739}
1740
1741/* scrub extent tries to collect up to 64 kB for each bio */
1742static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1743			u64 physical, u64 flags, u64 gen, int mirror_num)
1744{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1745	int ret;
1746	u8 csum[BTRFS_CSUM_SIZE];
1747	u32 blocksize;
1748
1749	if (flags & BTRFS_EXTENT_FLAG_DATA) {
1750		blocksize = sdev->sectorsize;
1751		spin_lock(&sdev->stat_lock);
1752		sdev->stat.data_extents_scrubbed++;
1753		sdev->stat.data_bytes_scrubbed += len;
1754		spin_unlock(&sdev->stat_lock);
1755	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1756		BUG_ON(sdev->nodesize != sdev->leafsize);
1757		blocksize = sdev->nodesize;
1758		spin_lock(&sdev->stat_lock);
1759		sdev->stat.tree_extents_scrubbed++;
1760		sdev->stat.tree_bytes_scrubbed += len;
1761		spin_unlock(&sdev->stat_lock);
1762	} else {
1763		blocksize = sdev->sectorsize;
1764		BUG_ON(1);
1765	}
 
 
 
 
 
 
 
 
 
 
1766
1767	while (len) {
1768		u64 l = min_t(u64, len, blocksize);
1769		int have_csum = 0;
1770
1771		if (flags & BTRFS_EXTENT_FLAG_DATA) {
1772			/* push csums to sbio */
1773			have_csum = scrub_find_csum(sdev, logical, l, csum);
1774			if (have_csum == 0)
1775				++sdev->stat.no_csum;
1776		}
1777		ret = scrub_pages(sdev, logical, l, physical, flags, gen,
1778				  mirror_num, have_csum ? csum : NULL, 0);
1779		if (ret)
1780			return ret;
1781		len -= l;
1782		logical += l;
1783		physical += l;
 
 
1784	}
1785	return 0;
1786}
1787
1788static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1789	struct map_lookup *map, int num, u64 base, u64 length)
1790{
1791	struct btrfs_path *path;
1792	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1793	struct btrfs_root *root = fs_info->extent_root;
1794	struct btrfs_root *csum_root = fs_info->csum_root;
1795	struct btrfs_extent_item *extent;
1796	struct blk_plug plug;
1797	u64 flags;
1798	int ret;
1799	int slot;
1800	int i;
1801	u64 nstripes;
1802	struct extent_buffer *l;
1803	struct btrfs_key key;
1804	u64 physical;
1805	u64 logical;
1806	u64 generation;
1807	int mirror_num;
1808	struct reada_control *reada1;
1809	struct reada_control *reada2;
1810	struct btrfs_key key_start;
1811	struct btrfs_key key_end;
1812
1813	u64 increment = map->stripe_len;
1814	u64 offset;
1815
1816	nstripes = length;
1817	offset = 0;
1818	do_div(nstripes, map->stripe_len);
1819	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1820		offset = map->stripe_len * num;
1821		increment = map->stripe_len * map->num_stripes;
1822		mirror_num = 1;
1823	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1824		int factor = map->num_stripes / map->sub_stripes;
1825		offset = map->stripe_len * (num / map->sub_stripes);
1826		increment = map->stripe_len * factor;
1827		mirror_num = num % map->sub_stripes + 1;
1828	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1829		increment = map->stripe_len;
1830		mirror_num = num % map->num_stripes + 1;
1831	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1832		increment = map->stripe_len;
1833		mirror_num = num % map->num_stripes + 1;
1834	} else {
1835		increment = map->stripe_len;
1836		mirror_num = 1;
1837	}
1838
1839	path = btrfs_alloc_path();
1840	if (!path)
1841		return -ENOMEM;
1842
1843	/*
1844	 * work on commit root. The related disk blocks are static as
1845	 * long as COW is applied. This means, it is save to rewrite
1846	 * them to repair disk errors without any race conditions
1847	 */
1848	path->search_commit_root = 1;
1849	path->skip_locking = 1;
 
1850
1851	/*
1852	 * trigger the readahead for extent tree csum tree and wait for
1853	 * completion. During readahead, the scrub is officially paused
1854	 * to not hold off transaction commits
1855	 */
1856	logical = base + offset;
1857
1858	wait_event(sdev->list_wait,
1859		   atomic_read(&sdev->in_flight) == 0);
1860	atomic_inc(&fs_info->scrubs_paused);
1861	wake_up(&fs_info->scrub_pause_wait);
1862
1863	/* FIXME it might be better to start readahead at commit root */
1864	key_start.objectid = logical;
1865	key_start.type = BTRFS_EXTENT_ITEM_KEY;
1866	key_start.offset = (u64)0;
1867	key_end.objectid = base + offset + nstripes * increment;
1868	key_end.type = BTRFS_EXTENT_ITEM_KEY;
1869	key_end.offset = (u64)0;
1870	reada1 = btrfs_reada_add(root, &key_start, &key_end);
1871
1872	key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1873	key_start.type = BTRFS_EXTENT_CSUM_KEY;
1874	key_start.offset = logical;
1875	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1876	key_end.type = BTRFS_EXTENT_CSUM_KEY;
1877	key_end.offset = base + offset + nstripes * increment;
1878	reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
1879
1880	if (!IS_ERR(reada1))
1881		btrfs_reada_wait(reada1);
1882	if (!IS_ERR(reada2))
1883		btrfs_reada_wait(reada2);
1884
1885	mutex_lock(&fs_info->scrub_lock);
1886	while (atomic_read(&fs_info->scrub_pause_req)) {
1887		mutex_unlock(&fs_info->scrub_lock);
1888		wait_event(fs_info->scrub_pause_wait,
1889		   atomic_read(&fs_info->scrub_pause_req) == 0);
1890		mutex_lock(&fs_info->scrub_lock);
 
 
 
 
 
 
 
 
 
1891	}
1892	atomic_dec(&fs_info->scrubs_paused);
1893	mutex_unlock(&fs_info->scrub_lock);
1894	wake_up(&fs_info->scrub_pause_wait);
1895
1896	/*
1897	 * collect all data csums for the stripe to avoid seeking during
1898	 * the scrub. This might currently (crc32) end up to be about 1MB
1899	 */
1900	blk_start_plug(&plug);
1901
1902	/*
1903	 * now find all extents for each stripe and scrub them
1904	 */
1905	logical = base + offset;
1906	physical = map->stripes[num].physical;
1907	ret = 0;
1908	for (i = 0; i < nstripes; ++i) {
1909		/*
1910		 * canceled?
1911		 */
1912		if (atomic_read(&fs_info->scrub_cancel_req) ||
1913		    atomic_read(&sdev->cancel_req)) {
1914			ret = -ECANCELED;
1915			goto out;
1916		}
1917		/*
1918		 * check to see if we have to pause
1919		 */
1920		if (atomic_read(&fs_info->scrub_pause_req)) {
1921			/* push queued extents */
1922			scrub_submit(sdev);
1923			wait_event(sdev->list_wait,
1924				   atomic_read(&sdev->in_flight) == 0);
1925			atomic_inc(&fs_info->scrubs_paused);
1926			wake_up(&fs_info->scrub_pause_wait);
1927			mutex_lock(&fs_info->scrub_lock);
1928			while (atomic_read(&fs_info->scrub_pause_req)) {
1929				mutex_unlock(&fs_info->scrub_lock);
1930				wait_event(fs_info->scrub_pause_wait,
1931				   atomic_read(&fs_info->scrub_pause_req) == 0);
1932				mutex_lock(&fs_info->scrub_lock);
1933			}
1934			atomic_dec(&fs_info->scrubs_paused);
1935			mutex_unlock(&fs_info->scrub_lock);
1936			wake_up(&fs_info->scrub_pause_wait);
1937		}
1938
1939		ret = btrfs_lookup_csums_range(csum_root, logical,
1940					       logical + map->stripe_len - 1,
1941					       &sdev->csum_list, 1);
1942		if (ret)
1943			goto out;
1944
1945		key.objectid = logical;
1946		key.type = BTRFS_EXTENT_ITEM_KEY;
1947		key.offset = (u64)0;
1948
1949		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1950		if (ret < 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1951			goto out;
1952		if (ret > 0) {
1953			ret = btrfs_previous_item(root, path, 0,
1954						  BTRFS_EXTENT_ITEM_KEY);
 
1955			if (ret < 0)
1956				goto out;
1957			if (ret > 0) {
1958				/* there's no smaller item, so stick with the
1959				 * larger one */
1960				btrfs_release_path(path);
1961				ret = btrfs_search_slot(NULL, root, &key,
1962							path, 0, 0);
1963				if (ret < 0)
1964					goto out;
1965			}
1966		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1967
1968		while (1) {
1969			l = path->nodes[0];
1970			slot = path->slots[0];
1971			if (slot >= btrfs_header_nritems(l)) {
1972				ret = btrfs_next_leaf(root, path);
1973				if (ret == 0)
1974					continue;
1975				if (ret < 0)
1976					goto out;
1977
1978				break;
1979			}
1980			btrfs_item_key_to_cpu(l, &key, slot);
 
 
 
 
 
1981
1982			if (key.objectid + key.offset <= logical)
1983				goto next;
1984
1985			if (key.objectid >= logical + map->stripe_len)
1986				break;
1987
1988			if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1989				goto next;
1990
1991			extent = btrfs_item_ptr(l, slot,
1992						struct btrfs_extent_item);
1993			flags = btrfs_extent_flags(l, extent);
1994			generation = btrfs_extent_generation(l, extent);
1995
1996			if (key.objectid < logical &&
1997			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
1998				printk(KERN_ERR
1999				       "btrfs scrub: tree block %llu spanning "
2000				       "stripes, ignored. logical=%llu\n",
2001				       (unsigned long long)key.objectid,
2002				       (unsigned long long)logical);
2003				goto next;
2004			}
2005
2006			/*
2007			 * trim extent to this stripe
2008			 */
2009			if (key.objectid < logical) {
2010				key.offset -= logical - key.objectid;
2011				key.objectid = logical;
2012			}
2013			if (key.objectid + key.offset >
2014			    logical + map->stripe_len) {
2015				key.offset = logical + map->stripe_len -
2016					     key.objectid;
2017			}
2018
2019			ret = scrub_extent(sdev, key.objectid, key.offset,
2020					   key.objectid - logical + physical,
2021					   flags, generation, mirror_num);
2022			if (ret)
2023				goto out;
 
 
2024
 
 
 
 
 
 
 
 
 
 
 
 
2025next:
2026			path->slots[0]++;
2027		}
2028		btrfs_release_path(path);
2029		logical += increment;
2030		physical += map->stripe_len;
2031		spin_lock(&sdev->stat_lock);
2032		sdev->stat.last_physical = physical;
2033		spin_unlock(&sdev->stat_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2034	}
2035	/* push queued extents */
2036	scrub_submit(sdev);
2037
2038out:
2039	blk_finish_plug(&plug);
2040	btrfs_free_path(path);
2041	return ret < 0 ? ret : 0;
2042}
2043
2044static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
2045	u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
2046	u64 dev_offset)
2047{
2048	struct btrfs_mapping_tree *map_tree =
2049		&sdev->dev->dev_root->fs_info->mapping_tree;
2050	struct map_lookup *map;
2051	struct extent_map *em;
2052	int i;
2053	int ret = -EINVAL;
2054
2055	read_lock(&map_tree->map_tree.lock);
2056	em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2057	read_unlock(&map_tree->map_tree.lock);
 
 
 
 
 
 
 
2058
2059	if (!em)
2060		return -EINVAL;
2061
2062	map = (struct map_lookup *)em->bdev;
2063	if (em->start != chunk_offset)
2064		goto out;
2065
2066	if (em->len < length)
2067		goto out;
2068
2069	for (i = 0; i < map->num_stripes; ++i) {
2070		if (map->stripes[i].dev == sdev->dev &&
2071		    map->stripes[i].physical == dev_offset) {
2072			ret = scrub_stripe(sdev, map, i, chunk_offset, length);
2073			if (ret)
2074				goto out;
2075		}
2076	}
2077out:
2078	free_extent_map(em);
2079
2080	return ret;
2081}
2082
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2083static noinline_for_stack
2084int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
 
2085{
2086	struct btrfs_dev_extent *dev_extent = NULL;
2087	struct btrfs_path *path;
2088	struct btrfs_root *root = sdev->dev->dev_root;
2089	struct btrfs_fs_info *fs_info = root->fs_info;
2090	u64 length;
2091	u64 chunk_tree;
2092	u64 chunk_objectid;
2093	u64 chunk_offset;
2094	int ret;
 
2095	int slot;
2096	struct extent_buffer *l;
2097	struct btrfs_key key;
2098	struct btrfs_key found_key;
2099	struct btrfs_block_group_cache *cache;
 
2100
2101	path = btrfs_alloc_path();
2102	if (!path)
2103		return -ENOMEM;
2104
2105	path->reada = 2;
2106	path->search_commit_root = 1;
2107	path->skip_locking = 1;
2108
2109	key.objectid = sdev->dev->devid;
2110	key.offset = 0ull;
2111	key.type = BTRFS_DEV_EXTENT_KEY;
2112
 
 
2113
2114	while (1) {
2115		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2116		if (ret < 0)
2117			break;
2118		if (ret > 0) {
2119			if (path->slots[0] >=
2120			    btrfs_header_nritems(path->nodes[0])) {
2121				ret = btrfs_next_leaf(root, path);
2122				if (ret)
 
 
 
2123					break;
 
 
 
2124			}
2125		}
2126
2127		l = path->nodes[0];
2128		slot = path->slots[0];
2129
2130		btrfs_item_key_to_cpu(l, &found_key, slot);
2131
2132		if (found_key.objectid != sdev->dev->devid)
2133			break;
2134
2135		if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
2136			break;
2137
2138		if (found_key.offset >= end)
2139			break;
2140
2141		if (found_key.offset < key.offset)
2142			break;
2143
2144		dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2145		length = btrfs_dev_extent_length(l, dev_extent);
2146
2147		if (found_key.offset + length <= start) {
2148			key.offset = found_key.offset + length;
2149			btrfs_release_path(path);
2150			continue;
2151		}
2152
2153		chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2154		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2155		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2156
2157		/*
2158		 * get a reference on the corresponding block group to prevent
2159		 * the chunk from going away while we scrub it
2160		 */
2161		cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2162		if (!cache) {
2163			ret = -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2164			break;
2165		}
2166		ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
2167				  chunk_offset, length, found_key.offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2168		btrfs_put_block_group(cache);
2169		if (ret)
2170			break;
2171
2172		key.offset = found_key.offset + length;
 
 
 
 
 
 
 
 
 
2173		btrfs_release_path(path);
2174	}
2175
2176	btrfs_free_path(path);
2177
2178	/*
2179	 * ret can still be 1 from search_slot or next_leaf,
2180	 * that's not an error
2181	 */
2182	return ret < 0 ? ret : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2183}
2184
2185static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
 
2186{
2187	int	i;
2188	u64	bytenr;
2189	u64	gen;
2190	int	ret;
2191	struct btrfs_device *device = sdev->dev;
2192	struct btrfs_root *root = device->dev_root;
2193
2194	if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2195		return -EIO;
 
 
 
 
 
 
 
 
2196
2197	gen = root->fs_info->last_trans_committed;
 
 
 
 
2198
2199	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2200		bytenr = btrfs_sb_offset(i);
2201		if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
 
2202			break;
 
 
2203
2204		ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2205				     BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
2206		if (ret)
2207			return ret;
 
 
2208	}
2209	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
 
 
 
 
 
 
 
 
2210
2211	return 0;
 
 
 
 
 
2212}
2213
2214/*
2215 * get a reference count on fs_info->scrub_workers. start worker if necessary
2216 */
2217static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
2218{
2219	struct btrfs_fs_info *fs_info = root->fs_info;
2220	int ret = 0;
 
 
 
 
 
 
 
 
 
2221
2222	mutex_lock(&fs_info->scrub_lock);
2223	if (fs_info->scrub_workers_refcnt == 0) {
2224		btrfs_init_workers(&fs_info->scrub_workers, "scrub",
2225			   fs_info->thread_pool_size, &fs_info->generic_worker);
2226		fs_info->scrub_workers.idle_thresh = 4;
2227		ret = btrfs_start_workers(&fs_info->scrub_workers);
2228		if (ret)
2229			goto out;
2230	}
2231	++fs_info->scrub_workers_refcnt;
2232out:
2233	mutex_unlock(&fs_info->scrub_lock);
2234
 
 
 
2235	return ret;
2236}
2237
2238static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
 
 
2239{
2240	struct btrfs_fs_info *fs_info = root->fs_info;
2241
2242	mutex_lock(&fs_info->scrub_lock);
2243	if (--fs_info->scrub_workers_refcnt == 0)
2244		btrfs_stop_workers(&fs_info->scrub_workers);
2245	WARN_ON(fs_info->scrub_workers_refcnt < 0);
2246	mutex_unlock(&fs_info->scrub_lock);
2247}
2248
2249
2250int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2251		    struct btrfs_scrub_progress *progress, int readonly)
2252{
2253	struct scrub_dev *sdev;
2254	struct btrfs_fs_info *fs_info = root->fs_info;
2255	int ret;
2256	struct btrfs_device *dev;
 
 
2257
2258	if (btrfs_fs_closing(root->fs_info))
2259		return -EINVAL;
 
 
 
2260
2261	/*
2262	 * check some assumptions
 
 
2263	 */
2264	if (root->nodesize != root->leafsize) {
2265		printk(KERN_ERR
2266		       "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2267		       root->nodesize, root->leafsize);
2268		return -EINVAL;
2269	}
2270
2271	if (root->nodesize > BTRFS_STRIPE_LEN) {
2272		/*
2273		 * in this case scrub is unable to calculate the checksum
2274		 * the way scrub is implemented. Do not handle this
2275		 * situation at all because it won't ever happen.
2276		 */
2277		printk(KERN_ERR
2278		       "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2279		       root->nodesize, BTRFS_STRIPE_LEN);
2280		return -EINVAL;
2281	}
2282
2283	if (root->sectorsize != PAGE_SIZE) {
2284		/* not supported for data w/o checksums */
2285		printk(KERN_ERR
2286		       "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2287		       root->sectorsize, (unsigned long long)PAGE_SIZE);
2288		return -EINVAL;
2289	}
2290
2291	ret = scrub_workers_get(root);
2292	if (ret)
2293		return ret;
2294
2295	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2296	dev = btrfs_find_device(root, devid, NULL, NULL);
2297	if (!dev || dev->missing) {
2298		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2299		scrub_workers_put(root);
2300		return -ENODEV;
 
2301	}
2302	mutex_lock(&fs_info->scrub_lock);
2303
2304	if (!dev->in_fs_metadata) {
2305		mutex_unlock(&fs_info->scrub_lock);
2306		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2307		scrub_workers_put(root);
2308		return -ENODEV;
 
 
 
2309	}
2310
2311	if (dev->scrub_device) {
 
 
2312		mutex_unlock(&fs_info->scrub_lock);
2313		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2314		scrub_workers_put(root);
2315		return -EINPROGRESS;
2316	}
2317	sdev = scrub_setup_dev(dev);
2318	if (IS_ERR(sdev)) {
 
 
 
 
2319		mutex_unlock(&fs_info->scrub_lock);
2320		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2321		scrub_workers_put(root);
2322		return PTR_ERR(sdev);
2323	}
2324	sdev->readonly = readonly;
2325	dev->scrub_device = sdev;
2326
 
 
 
 
 
 
 
 
 
2327	atomic_inc(&fs_info->scrubs_running);
2328	mutex_unlock(&fs_info->scrub_lock);
2329	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2330
2331	down_read(&fs_info->scrub_super_lock);
2332	ret = scrub_supers(sdev);
2333	up_read(&fs_info->scrub_super_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2334
2335	if (!ret)
2336		ret = scrub_enumerate_chunks(sdev, start, end);
 
2337
2338	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2339	atomic_dec(&fs_info->scrubs_running);
2340	wake_up(&fs_info->scrub_pause_wait);
2341
2342	wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
 
2343
2344	if (progress)
2345		memcpy(progress, &sdev->stat, sizeof(*progress));
 
2346
2347	mutex_lock(&fs_info->scrub_lock);
2348	dev->scrub_device = NULL;
2349	mutex_unlock(&fs_info->scrub_lock);
2350
2351	scrub_free_dev(sdev);
2352	scrub_workers_put(root);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2353
2354	return ret;
2355}
2356
2357void btrfs_scrub_pause(struct btrfs_root *root)
2358{
2359	struct btrfs_fs_info *fs_info = root->fs_info;
2360
2361	mutex_lock(&fs_info->scrub_lock);
2362	atomic_inc(&fs_info->scrub_pause_req);
2363	while (atomic_read(&fs_info->scrubs_paused) !=
2364	       atomic_read(&fs_info->scrubs_running)) {
2365		mutex_unlock(&fs_info->scrub_lock);
2366		wait_event(fs_info->scrub_pause_wait,
2367			   atomic_read(&fs_info->scrubs_paused) ==
2368			   atomic_read(&fs_info->scrubs_running));
2369		mutex_lock(&fs_info->scrub_lock);
2370	}
2371	mutex_unlock(&fs_info->scrub_lock);
2372}
2373
2374void btrfs_scrub_continue(struct btrfs_root *root)
2375{
2376	struct btrfs_fs_info *fs_info = root->fs_info;
2377
2378	atomic_dec(&fs_info->scrub_pause_req);
2379	wake_up(&fs_info->scrub_pause_wait);
2380}
2381
2382void btrfs_scrub_pause_super(struct btrfs_root *root)
2383{
2384	down_write(&root->fs_info->scrub_super_lock);
2385}
2386
2387void btrfs_scrub_continue_super(struct btrfs_root *root)
2388{
2389	up_write(&root->fs_info->scrub_super_lock);
2390}
2391
2392int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2393{
2394
2395	mutex_lock(&fs_info->scrub_lock);
2396	if (!atomic_read(&fs_info->scrubs_running)) {
2397		mutex_unlock(&fs_info->scrub_lock);
2398		return -ENOTCONN;
2399	}
2400
2401	atomic_inc(&fs_info->scrub_cancel_req);
2402	while (atomic_read(&fs_info->scrubs_running)) {
2403		mutex_unlock(&fs_info->scrub_lock);
2404		wait_event(fs_info->scrub_pause_wait,
2405			   atomic_read(&fs_info->scrubs_running) == 0);
2406		mutex_lock(&fs_info->scrub_lock);
2407	}
2408	atomic_dec(&fs_info->scrub_cancel_req);
2409	mutex_unlock(&fs_info->scrub_lock);
2410
2411	return 0;
2412}
2413
2414int btrfs_scrub_cancel(struct btrfs_root *root)
2415{
2416	return __btrfs_scrub_cancel(root->fs_info);
2417}
2418
2419int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
2420{
2421	struct btrfs_fs_info *fs_info = root->fs_info;
2422	struct scrub_dev *sdev;
2423
2424	mutex_lock(&fs_info->scrub_lock);
2425	sdev = dev->scrub_device;
2426	if (!sdev) {
2427		mutex_unlock(&fs_info->scrub_lock);
2428		return -ENOTCONN;
2429	}
2430	atomic_inc(&sdev->cancel_req);
2431	while (dev->scrub_device) {
2432		mutex_unlock(&fs_info->scrub_lock);
2433		wait_event(fs_info->scrub_pause_wait,
2434			   dev->scrub_device == NULL);
2435		mutex_lock(&fs_info->scrub_lock);
2436	}
2437	mutex_unlock(&fs_info->scrub_lock);
2438
2439	return 0;
2440}
2441
2442int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
 
2443{
2444	struct btrfs_fs_info *fs_info = root->fs_info;
2445	struct btrfs_device *dev;
2446	int ret;
2447
2448	/*
2449	 * we have to hold the device_list_mutex here so the device
2450	 * does not go away in cancel_dev. FIXME: find a better solution
2451	 */
2452	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2453	dev = btrfs_find_device(root, devid, NULL, NULL);
2454	if (!dev) {
2455		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2456		return -ENODEV;
2457	}
2458	ret = btrfs_scrub_cancel_dev(root, dev);
2459	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2460
2461	return ret;
2462}
2463
2464int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
2465			 struct btrfs_scrub_progress *progress)
2466{
2467	struct btrfs_device *dev;
2468	struct scrub_dev *sdev = NULL;
2469
2470	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2471	dev = btrfs_find_device(root, devid, NULL, NULL);
2472	if (dev)
2473		sdev = dev->scrub_device;
2474	if (sdev)
2475		memcpy(progress, &sdev->stat, sizeof(*progress));
2476	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2477
2478	return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
2479}