Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 */
5
6#include <linux/blkdev.h>
7#include <linux/ratelimit.h>
8#include <linux/sched/mm.h>
9#include <crypto/hash.h>
10#include "ctree.h"
11#include "discard.h"
12#include "volumes.h"
13#include "disk-io.h"
14#include "ordered-data.h"
15#include "transaction.h"
16#include "backref.h"
17#include "extent_io.h"
18#include "dev-replace.h"
19#include "raid56.h"
20#include "block-group.h"
21#include "zoned.h"
22#include "fs.h"
23#include "accessors.h"
24#include "file-item.h"
25#include "scrub.h"
26#include "raid-stripe-tree.h"
27
28/*
29 * This is only the first step towards a full-features scrub. It reads all
30 * extent and super block and verifies the checksums. In case a bad checksum
31 * is found or the extent cannot be read, good data will be written back if
32 * any can be found.
33 *
34 * Future enhancements:
35 * - In case an unrepairable extent is encountered, track which files are
36 * affected and report them
37 * - track and record media errors, throw out bad devices
38 * - add a mode to also read unallocated space
39 */
40
41struct scrub_ctx;
42
43/*
44 * The following value only influences the performance.
45 *
46 * This determines how many stripes would be submitted in one go,
47 * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
48 */
49#define SCRUB_STRIPES_PER_GROUP 8
50
51/*
52 * How many groups we have for each sctx.
53 *
54 * This would be 8M per device, the same value as the old scrub in-flight bios
55 * size limit.
56 */
57#define SCRUB_GROUPS_PER_SCTX 16
58
59#define SCRUB_TOTAL_STRIPES (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
60
61/*
62 * The following value times PAGE_SIZE needs to be large enough to match the
63 * largest node/leaf/sector size that shall be supported.
64 */
65#define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
66
67/* Represent one sector and its needed info to verify the content. */
68struct scrub_sector_verification {
69 bool is_metadata;
70
71 union {
72 /*
73 * Csum pointer for data csum verification. Should point to a
74 * sector csum inside scrub_stripe::csums.
75 *
76 * NULL if this data sector has no csum.
77 */
78 u8 *csum;
79
80 /*
81 * Extra info for metadata verification. All sectors inside a
82 * tree block share the same generation.
83 */
84 u64 generation;
85 };
86};
87
88enum scrub_stripe_flags {
89 /* Set when @mirror_num, @dev, @physical and @logical are set. */
90 SCRUB_STRIPE_FLAG_INITIALIZED,
91
92 /* Set when the read-repair is finished. */
93 SCRUB_STRIPE_FLAG_REPAIR_DONE,
94
95 /*
96 * Set for data stripes if it's triggered from P/Q stripe.
97 * During such scrub, we should not report errors in data stripes, nor
98 * update the accounting.
99 */
100 SCRUB_STRIPE_FLAG_NO_REPORT,
101};
102
103#define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE)
104
105/*
106 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
107 */
108struct scrub_stripe {
109 struct scrub_ctx *sctx;
110 struct btrfs_block_group *bg;
111
112 struct page *pages[SCRUB_STRIPE_PAGES];
113 struct scrub_sector_verification *sectors;
114
115 struct btrfs_device *dev;
116 u64 logical;
117 u64 physical;
118
119 u16 mirror_num;
120
121 /* Should be BTRFS_STRIPE_LEN / sectorsize. */
122 u16 nr_sectors;
123
124 /*
125 * How many data/meta extents are in this stripe. Only for scrub status
126 * reporting purposes.
127 */
128 u16 nr_data_extents;
129 u16 nr_meta_extents;
130
131 atomic_t pending_io;
132 wait_queue_head_t io_wait;
133 wait_queue_head_t repair_wait;
134
135 /*
136 * Indicate the states of the stripe. Bits are defined in
137 * scrub_stripe_flags enum.
138 */
139 unsigned long state;
140
141 /* Indicate which sectors are covered by extent items. */
142 unsigned long extent_sector_bitmap;
143
144 /*
145 * The errors hit during the initial read of the stripe.
146 *
147 * Would be utilized for error reporting and repair.
148 *
149 * The remaining init_nr_* records the number of errors hit, only used
150 * by error reporting.
151 */
152 unsigned long init_error_bitmap;
153 unsigned int init_nr_io_errors;
154 unsigned int init_nr_csum_errors;
155 unsigned int init_nr_meta_errors;
156
157 /*
158 * The following error bitmaps are all for the current status.
159 * Every time we submit a new read, these bitmaps may be updated.
160 *
161 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
162 *
163 * IO and csum errors can happen for both metadata and data.
164 */
165 unsigned long error_bitmap;
166 unsigned long io_error_bitmap;
167 unsigned long csum_error_bitmap;
168 unsigned long meta_error_bitmap;
169
170 /* For writeback (repair or replace) error reporting. */
171 unsigned long write_error_bitmap;
172
173 /* Writeback can be concurrent, thus we need to protect the bitmap. */
174 spinlock_t write_error_lock;
175
176 /*
177 * Checksum for the whole stripe if this stripe is inside a data block
178 * group.
179 */
180 u8 *csums;
181
182 struct work_struct work;
183};
184
185struct scrub_ctx {
186 struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES];
187 struct scrub_stripe *raid56_data_stripes;
188 struct btrfs_fs_info *fs_info;
189 struct btrfs_path extent_path;
190 struct btrfs_path csum_path;
191 int first_free;
192 int cur_stripe;
193 atomic_t cancel_req;
194 int readonly;
195
196 /* State of IO submission throttling affecting the associated device */
197 ktime_t throttle_deadline;
198 u64 throttle_sent;
199
200 int is_dev_replace;
201 u64 write_pointer;
202
203 struct mutex wr_lock;
204 struct btrfs_device *wr_tgtdev;
205
206 /*
207 * statistics
208 */
209 struct btrfs_scrub_progress stat;
210 spinlock_t stat_lock;
211
212 /*
213 * Use a ref counter to avoid use-after-free issues. Scrub workers
214 * decrement bios_in_flight and workers_pending and then do a wakeup
215 * on the list_wait wait queue. We must ensure the main scrub task
216 * doesn't free the scrub context before or while the workers are
217 * doing the wakeup() call.
218 */
219 refcount_t refs;
220};
221
222struct scrub_warning {
223 struct btrfs_path *path;
224 u64 extent_item_size;
225 const char *errstr;
226 u64 physical;
227 u64 logical;
228 struct btrfs_device *dev;
229};
230
231static void release_scrub_stripe(struct scrub_stripe *stripe)
232{
233 if (!stripe)
234 return;
235
236 for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
237 if (stripe->pages[i])
238 __free_page(stripe->pages[i]);
239 stripe->pages[i] = NULL;
240 }
241 kfree(stripe->sectors);
242 kfree(stripe->csums);
243 stripe->sectors = NULL;
244 stripe->csums = NULL;
245 stripe->sctx = NULL;
246 stripe->state = 0;
247}
248
249static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
250 struct scrub_stripe *stripe)
251{
252 int ret;
253
254 memset(stripe, 0, sizeof(*stripe));
255
256 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
257 stripe->state = 0;
258
259 init_waitqueue_head(&stripe->io_wait);
260 init_waitqueue_head(&stripe->repair_wait);
261 atomic_set(&stripe->pending_io, 0);
262 spin_lock_init(&stripe->write_error_lock);
263
264 ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, false);
265 if (ret < 0)
266 goto error;
267
268 stripe->sectors = kcalloc(stripe->nr_sectors,
269 sizeof(struct scrub_sector_verification),
270 GFP_KERNEL);
271 if (!stripe->sectors)
272 goto error;
273
274 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
275 fs_info->csum_size, GFP_KERNEL);
276 if (!stripe->csums)
277 goto error;
278 return 0;
279error:
280 release_scrub_stripe(stripe);
281 return -ENOMEM;
282}
283
284static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
285{
286 wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
287}
288
289static void scrub_put_ctx(struct scrub_ctx *sctx);
290
291static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
292{
293 while (atomic_read(&fs_info->scrub_pause_req)) {
294 mutex_unlock(&fs_info->scrub_lock);
295 wait_event(fs_info->scrub_pause_wait,
296 atomic_read(&fs_info->scrub_pause_req) == 0);
297 mutex_lock(&fs_info->scrub_lock);
298 }
299}
300
301static void scrub_pause_on(struct btrfs_fs_info *fs_info)
302{
303 atomic_inc(&fs_info->scrubs_paused);
304 wake_up(&fs_info->scrub_pause_wait);
305}
306
307static void scrub_pause_off(struct btrfs_fs_info *fs_info)
308{
309 mutex_lock(&fs_info->scrub_lock);
310 __scrub_blocked_if_needed(fs_info);
311 atomic_dec(&fs_info->scrubs_paused);
312 mutex_unlock(&fs_info->scrub_lock);
313
314 wake_up(&fs_info->scrub_pause_wait);
315}
316
317static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
318{
319 scrub_pause_on(fs_info);
320 scrub_pause_off(fs_info);
321}
322
323static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
324{
325 int i;
326
327 if (!sctx)
328 return;
329
330 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
331 release_scrub_stripe(&sctx->stripes[i]);
332
333 kvfree(sctx);
334}
335
336static void scrub_put_ctx(struct scrub_ctx *sctx)
337{
338 if (refcount_dec_and_test(&sctx->refs))
339 scrub_free_ctx(sctx);
340}
341
342static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
343 struct btrfs_fs_info *fs_info, int is_dev_replace)
344{
345 struct scrub_ctx *sctx;
346 int i;
347
348 /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use
349 * kvzalloc().
350 */
351 sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
352 if (!sctx)
353 goto nomem;
354 refcount_set(&sctx->refs, 1);
355 sctx->is_dev_replace = is_dev_replace;
356 sctx->fs_info = fs_info;
357 sctx->extent_path.search_commit_root = 1;
358 sctx->extent_path.skip_locking = 1;
359 sctx->csum_path.search_commit_root = 1;
360 sctx->csum_path.skip_locking = 1;
361 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
362 int ret;
363
364 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
365 if (ret < 0)
366 goto nomem;
367 sctx->stripes[i].sctx = sctx;
368 }
369 sctx->first_free = 0;
370 atomic_set(&sctx->cancel_req, 0);
371
372 spin_lock_init(&sctx->stat_lock);
373 sctx->throttle_deadline = 0;
374
375 mutex_init(&sctx->wr_lock);
376 if (is_dev_replace) {
377 WARN_ON(!fs_info->dev_replace.tgtdev);
378 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
379 }
380
381 return sctx;
382
383nomem:
384 scrub_free_ctx(sctx);
385 return ERR_PTR(-ENOMEM);
386}
387
388static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
389 u64 root, void *warn_ctx)
390{
391 u32 nlink;
392 int ret;
393 int i;
394 unsigned nofs_flag;
395 struct extent_buffer *eb;
396 struct btrfs_inode_item *inode_item;
397 struct scrub_warning *swarn = warn_ctx;
398 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
399 struct inode_fs_paths *ipath = NULL;
400 struct btrfs_root *local_root;
401 struct btrfs_key key;
402
403 local_root = btrfs_get_fs_root(fs_info, root, true);
404 if (IS_ERR(local_root)) {
405 ret = PTR_ERR(local_root);
406 goto err;
407 }
408
409 /*
410 * this makes the path point to (inum INODE_ITEM ioff)
411 */
412 key.objectid = inum;
413 key.type = BTRFS_INODE_ITEM_KEY;
414 key.offset = 0;
415
416 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
417 if (ret) {
418 btrfs_put_root(local_root);
419 btrfs_release_path(swarn->path);
420 goto err;
421 }
422
423 eb = swarn->path->nodes[0];
424 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
425 struct btrfs_inode_item);
426 nlink = btrfs_inode_nlink(eb, inode_item);
427 btrfs_release_path(swarn->path);
428
429 /*
430 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
431 * uses GFP_NOFS in this context, so we keep it consistent but it does
432 * not seem to be strictly necessary.
433 */
434 nofs_flag = memalloc_nofs_save();
435 ipath = init_ipath(4096, local_root, swarn->path);
436 memalloc_nofs_restore(nofs_flag);
437 if (IS_ERR(ipath)) {
438 btrfs_put_root(local_root);
439 ret = PTR_ERR(ipath);
440 ipath = NULL;
441 goto err;
442 }
443 ret = paths_from_inode(inum, ipath);
444
445 if (ret < 0)
446 goto err;
447
448 /*
449 * we deliberately ignore the bit ipath might have been too small to
450 * hold all of the paths here
451 */
452 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
453 btrfs_warn_in_rcu(fs_info,
454"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
455 swarn->errstr, swarn->logical,
456 btrfs_dev_name(swarn->dev),
457 swarn->physical,
458 root, inum, offset,
459 fs_info->sectorsize, nlink,
460 (char *)(unsigned long)ipath->fspath->val[i]);
461
462 btrfs_put_root(local_root);
463 free_ipath(ipath);
464 return 0;
465
466err:
467 btrfs_warn_in_rcu(fs_info,
468 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
469 swarn->errstr, swarn->logical,
470 btrfs_dev_name(swarn->dev),
471 swarn->physical,
472 root, inum, offset, ret);
473
474 free_ipath(ipath);
475 return 0;
476}
477
478static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
479 bool is_super, u64 logical, u64 physical)
480{
481 struct btrfs_fs_info *fs_info = dev->fs_info;
482 struct btrfs_path *path;
483 struct btrfs_key found_key;
484 struct extent_buffer *eb;
485 struct btrfs_extent_item *ei;
486 struct scrub_warning swarn;
487 u64 flags = 0;
488 u32 item_size;
489 int ret;
490
491 /* Super block error, no need to search extent tree. */
492 if (is_super) {
493 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
494 errstr, btrfs_dev_name(dev), physical);
495 return;
496 }
497 path = btrfs_alloc_path();
498 if (!path)
499 return;
500
501 swarn.physical = physical;
502 swarn.logical = logical;
503 swarn.errstr = errstr;
504 swarn.dev = NULL;
505
506 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
507 &flags);
508 if (ret < 0)
509 goto out;
510
511 swarn.extent_item_size = found_key.offset;
512
513 eb = path->nodes[0];
514 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
515 item_size = btrfs_item_size(eb, path->slots[0]);
516
517 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
518 unsigned long ptr = 0;
519 u8 ref_level;
520 u64 ref_root;
521
522 while (true) {
523 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
524 item_size, &ref_root,
525 &ref_level);
526 if (ret < 0) {
527 btrfs_warn(fs_info,
528 "failed to resolve tree backref for logical %llu: %d",
529 swarn.logical, ret);
530 break;
531 }
532 if (ret > 0)
533 break;
534 btrfs_warn_in_rcu(fs_info,
535"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
536 errstr, swarn.logical, btrfs_dev_name(dev),
537 swarn.physical, (ref_level ? "node" : "leaf"),
538 ref_level, ref_root);
539 }
540 btrfs_release_path(path);
541 } else {
542 struct btrfs_backref_walk_ctx ctx = { 0 };
543
544 btrfs_release_path(path);
545
546 ctx.bytenr = found_key.objectid;
547 ctx.extent_item_pos = swarn.logical - found_key.objectid;
548 ctx.fs_info = fs_info;
549
550 swarn.path = path;
551 swarn.dev = dev;
552
553 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
554 }
555
556out:
557 btrfs_free_path(path);
558}
559
560static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
561{
562 int ret = 0;
563 u64 length;
564
565 if (!btrfs_is_zoned(sctx->fs_info))
566 return 0;
567
568 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
569 return 0;
570
571 if (sctx->write_pointer < physical) {
572 length = physical - sctx->write_pointer;
573
574 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
575 sctx->write_pointer, length);
576 if (!ret)
577 sctx->write_pointer = physical;
578 }
579 return ret;
580}
581
582static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
583{
584 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
585 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
586
587 return stripe->pages[page_index];
588}
589
590static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
591 int sector_nr)
592{
593 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
594
595 return offset_in_page(sector_nr << fs_info->sectorsize_bits);
596}
597
598static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
599{
600 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
601 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
602 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
603 const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
604 const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
605 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
606 u8 on_disk_csum[BTRFS_CSUM_SIZE];
607 u8 calculated_csum[BTRFS_CSUM_SIZE];
608 struct btrfs_header *header;
609
610 /*
611 * Here we don't have a good way to attach the pages (and subpages)
612 * to a dummy extent buffer, thus we have to directly grab the members
613 * from pages.
614 */
615 header = (struct btrfs_header *)(page_address(first_page) + first_off);
616 memcpy(on_disk_csum, header->csum, fs_info->csum_size);
617
618 if (logical != btrfs_stack_header_bytenr(header)) {
619 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
620 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
621 btrfs_warn_rl(fs_info,
622 "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
623 logical, stripe->mirror_num,
624 btrfs_stack_header_bytenr(header), logical);
625 return;
626 }
627 if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
628 BTRFS_FSID_SIZE) != 0) {
629 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
630 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
631 btrfs_warn_rl(fs_info,
632 "tree block %llu mirror %u has bad fsid, has %pU want %pU",
633 logical, stripe->mirror_num,
634 header->fsid, fs_info->fs_devices->fsid);
635 return;
636 }
637 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
638 BTRFS_UUID_SIZE) != 0) {
639 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
640 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
641 btrfs_warn_rl(fs_info,
642 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
643 logical, stripe->mirror_num,
644 header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
645 return;
646 }
647
648 /* Now check tree block csum. */
649 shash->tfm = fs_info->csum_shash;
650 crypto_shash_init(shash);
651 crypto_shash_update(shash, page_address(first_page) + first_off +
652 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
653
654 for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
655 struct page *page = scrub_stripe_get_page(stripe, i);
656 unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
657
658 crypto_shash_update(shash, page_address(page) + page_off,
659 fs_info->sectorsize);
660 }
661
662 crypto_shash_final(shash, calculated_csum);
663 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
664 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
665 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
666 btrfs_warn_rl(fs_info,
667 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
668 logical, stripe->mirror_num,
669 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
670 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
671 return;
672 }
673 if (stripe->sectors[sector_nr].generation !=
674 btrfs_stack_header_generation(header)) {
675 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
676 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
677 btrfs_warn_rl(fs_info,
678 "tree block %llu mirror %u has bad generation, has %llu want %llu",
679 logical, stripe->mirror_num,
680 btrfs_stack_header_generation(header),
681 stripe->sectors[sector_nr].generation);
682 return;
683 }
684 bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
685 bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
686 bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
687}
688
689static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
690{
691 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
692 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
693 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
694 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
695 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
696 u8 csum_buf[BTRFS_CSUM_SIZE];
697 int ret;
698
699 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
700
701 /* Sector not utilized, skip it. */
702 if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
703 return;
704
705 /* IO error, no need to check. */
706 if (test_bit(sector_nr, &stripe->io_error_bitmap))
707 return;
708
709 /* Metadata, verify the full tree block. */
710 if (sector->is_metadata) {
711 /*
712 * Check if the tree block crosses the stripe boundary. If
713 * crossed the boundary, we cannot verify it but only give a
714 * warning.
715 *
716 * This can only happen on a very old filesystem where chunks
717 * are not ensured to be stripe aligned.
718 */
719 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
720 btrfs_warn_rl(fs_info,
721 "tree block at %llu crosses stripe boundary %llu",
722 stripe->logical +
723 (sector_nr << fs_info->sectorsize_bits),
724 stripe->logical);
725 return;
726 }
727 scrub_verify_one_metadata(stripe, sector_nr);
728 return;
729 }
730
731 /*
732 * Data is easier, we just verify the data csum (if we have it). For
733 * cases without csum, we have no other choice but to trust it.
734 */
735 if (!sector->csum) {
736 clear_bit(sector_nr, &stripe->error_bitmap);
737 return;
738 }
739
740 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
741 if (ret < 0) {
742 set_bit(sector_nr, &stripe->csum_error_bitmap);
743 set_bit(sector_nr, &stripe->error_bitmap);
744 } else {
745 clear_bit(sector_nr, &stripe->csum_error_bitmap);
746 clear_bit(sector_nr, &stripe->error_bitmap);
747 }
748}
749
750/* Verify specified sectors of a stripe. */
751static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
752{
753 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
754 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
755 int sector_nr;
756
757 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
758 scrub_verify_one_sector(stripe, sector_nr);
759 if (stripe->sectors[sector_nr].is_metadata)
760 sector_nr += sectors_per_tree - 1;
761 }
762}
763
764static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
765{
766 int i;
767
768 for (i = 0; i < stripe->nr_sectors; i++) {
769 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
770 scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
771 break;
772 }
773 ASSERT(i < stripe->nr_sectors);
774 return i;
775}
776
777/*
778 * Repair read is different to the regular read:
779 *
780 * - Only reads the failed sectors
781 * - May have extra blocksize limits
782 */
783static void scrub_repair_read_endio(struct btrfs_bio *bbio)
784{
785 struct scrub_stripe *stripe = bbio->private;
786 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
787 struct bio_vec *bvec;
788 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
789 u32 bio_size = 0;
790 int i;
791
792 ASSERT(sector_nr < stripe->nr_sectors);
793
794 bio_for_each_bvec_all(bvec, &bbio->bio, i)
795 bio_size += bvec->bv_len;
796
797 if (bbio->bio.bi_status) {
798 bitmap_set(&stripe->io_error_bitmap, sector_nr,
799 bio_size >> fs_info->sectorsize_bits);
800 bitmap_set(&stripe->error_bitmap, sector_nr,
801 bio_size >> fs_info->sectorsize_bits);
802 } else {
803 bitmap_clear(&stripe->io_error_bitmap, sector_nr,
804 bio_size >> fs_info->sectorsize_bits);
805 }
806 bio_put(&bbio->bio);
807 if (atomic_dec_and_test(&stripe->pending_io))
808 wake_up(&stripe->io_wait);
809}
810
811static int calc_next_mirror(int mirror, int num_copies)
812{
813 ASSERT(mirror <= num_copies);
814 return (mirror + 1 > num_copies) ? 1 : mirror + 1;
815}
816
817static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
818 int mirror, int blocksize, bool wait)
819{
820 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
821 struct btrfs_bio *bbio = NULL;
822 const unsigned long old_error_bitmap = stripe->error_bitmap;
823 int i;
824
825 ASSERT(stripe->mirror_num >= 1);
826 ASSERT(atomic_read(&stripe->pending_io) == 0);
827
828 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
829 struct page *page;
830 int pgoff;
831 int ret;
832
833 page = scrub_stripe_get_page(stripe, i);
834 pgoff = scrub_stripe_get_page_offset(stripe, i);
835
836 /* The current sector cannot be merged, submit the bio. */
837 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
838 bbio->bio.bi_iter.bi_size >= blocksize)) {
839 ASSERT(bbio->bio.bi_iter.bi_size);
840 atomic_inc(&stripe->pending_io);
841 btrfs_submit_bbio(bbio, mirror);
842 if (wait)
843 wait_scrub_stripe_io(stripe);
844 bbio = NULL;
845 }
846
847 if (!bbio) {
848 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
849 fs_info, scrub_repair_read_endio, stripe);
850 bbio->bio.bi_iter.bi_sector = (stripe->logical +
851 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
852 }
853
854 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
855 ASSERT(ret == fs_info->sectorsize);
856 }
857 if (bbio) {
858 ASSERT(bbio->bio.bi_iter.bi_size);
859 atomic_inc(&stripe->pending_io);
860 btrfs_submit_bbio(bbio, mirror);
861 if (wait)
862 wait_scrub_stripe_io(stripe);
863 }
864}
865
866static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
867 struct scrub_stripe *stripe)
868{
869 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
870 DEFAULT_RATELIMIT_BURST);
871 struct btrfs_fs_info *fs_info = sctx->fs_info;
872 struct btrfs_device *dev = NULL;
873 u64 physical = 0;
874 int nr_data_sectors = 0;
875 int nr_meta_sectors = 0;
876 int nr_nodatacsum_sectors = 0;
877 int nr_repaired_sectors = 0;
878 int sector_nr;
879
880 if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
881 return;
882
883 /*
884 * Init needed infos for error reporting.
885 *
886 * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
887 * thus no need for dev/physical, error reporting still needs dev and physical.
888 */
889 if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
890 u64 mapped_len = fs_info->sectorsize;
891 struct btrfs_io_context *bioc = NULL;
892 int stripe_index = stripe->mirror_num - 1;
893 int ret;
894
895 /* For scrub, our mirror_num should always start at 1. */
896 ASSERT(stripe->mirror_num >= 1);
897 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
898 stripe->logical, &mapped_len, &bioc,
899 NULL, NULL);
900 /*
901 * If we failed, dev will be NULL, and later detailed reports
902 * will just be skipped.
903 */
904 if (ret < 0)
905 goto skip;
906 physical = bioc->stripes[stripe_index].physical;
907 dev = bioc->stripes[stripe_index].dev;
908 btrfs_put_bioc(bioc);
909 }
910
911skip:
912 for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
913 bool repaired = false;
914
915 if (stripe->sectors[sector_nr].is_metadata) {
916 nr_meta_sectors++;
917 } else {
918 nr_data_sectors++;
919 if (!stripe->sectors[sector_nr].csum)
920 nr_nodatacsum_sectors++;
921 }
922
923 if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
924 !test_bit(sector_nr, &stripe->error_bitmap)) {
925 nr_repaired_sectors++;
926 repaired = true;
927 }
928
929 /* Good sector from the beginning, nothing need to be done. */
930 if (!test_bit(sector_nr, &stripe->init_error_bitmap))
931 continue;
932
933 /*
934 * Report error for the corrupted sectors. If repaired, just
935 * output the message of repaired message.
936 */
937 if (repaired) {
938 if (dev) {
939 btrfs_err_rl_in_rcu(fs_info,
940 "fixed up error at logical %llu on dev %s physical %llu",
941 stripe->logical, btrfs_dev_name(dev),
942 physical);
943 } else {
944 btrfs_err_rl_in_rcu(fs_info,
945 "fixed up error at logical %llu on mirror %u",
946 stripe->logical, stripe->mirror_num);
947 }
948 continue;
949 }
950
951 /* The remaining are all for unrepaired. */
952 if (dev) {
953 btrfs_err_rl_in_rcu(fs_info,
954 "unable to fixup (regular) error at logical %llu on dev %s physical %llu",
955 stripe->logical, btrfs_dev_name(dev),
956 physical);
957 } else {
958 btrfs_err_rl_in_rcu(fs_info,
959 "unable to fixup (regular) error at logical %llu on mirror %u",
960 stripe->logical, stripe->mirror_num);
961 }
962
963 if (test_bit(sector_nr, &stripe->io_error_bitmap))
964 if (__ratelimit(&rs) && dev)
965 scrub_print_common_warning("i/o error", dev, false,
966 stripe->logical, physical);
967 if (test_bit(sector_nr, &stripe->csum_error_bitmap))
968 if (__ratelimit(&rs) && dev)
969 scrub_print_common_warning("checksum error", dev, false,
970 stripe->logical, physical);
971 if (test_bit(sector_nr, &stripe->meta_error_bitmap))
972 if (__ratelimit(&rs) && dev)
973 scrub_print_common_warning("header error", dev, false,
974 stripe->logical, physical);
975 }
976
977 spin_lock(&sctx->stat_lock);
978 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
979 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
980 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
981 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
982 sctx->stat.no_csum += nr_nodatacsum_sectors;
983 sctx->stat.read_errors += stripe->init_nr_io_errors;
984 sctx->stat.csum_errors += stripe->init_nr_csum_errors;
985 sctx->stat.verify_errors += stripe->init_nr_meta_errors;
986 sctx->stat.uncorrectable_errors +=
987 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
988 sctx->stat.corrected_errors += nr_repaired_sectors;
989 spin_unlock(&sctx->stat_lock);
990}
991
992static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
993 unsigned long write_bitmap, bool dev_replace);
994
995/*
996 * The main entrance for all read related scrub work, including:
997 *
998 * - Wait for the initial read to finish
999 * - Verify and locate any bad sectors
1000 * - Go through the remaining mirrors and try to read as large blocksize as
1001 * possible
1002 * - Go through all mirrors (including the failed mirror) sector-by-sector
1003 * - Submit writeback for repaired sectors
1004 *
1005 * Writeback for dev-replace does not happen here, it needs extra
1006 * synchronization for zoned devices.
1007 */
1008static void scrub_stripe_read_repair_worker(struct work_struct *work)
1009{
1010 struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
1011 struct scrub_ctx *sctx = stripe->sctx;
1012 struct btrfs_fs_info *fs_info = sctx->fs_info;
1013 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1014 stripe->bg->length);
1015 unsigned long repaired;
1016 int mirror;
1017 int i;
1018
1019 ASSERT(stripe->mirror_num > 0);
1020
1021 wait_scrub_stripe_io(stripe);
1022 scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1023 /* Save the initial failed bitmap for later repair and report usage. */
1024 stripe->init_error_bitmap = stripe->error_bitmap;
1025 stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1026 stripe->nr_sectors);
1027 stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1028 stripe->nr_sectors);
1029 stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1030 stripe->nr_sectors);
1031
1032 if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1033 goto out;
1034
1035 /*
1036 * Try all remaining mirrors.
1037 *
1038 * Here we still try to read as large block as possible, as this is
1039 * faster and we have extra safety nets to rely on.
1040 */
1041 for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1042 mirror != stripe->mirror_num;
1043 mirror = calc_next_mirror(mirror, num_copies)) {
1044 const unsigned long old_error_bitmap = stripe->error_bitmap;
1045
1046 scrub_stripe_submit_repair_read(stripe, mirror,
1047 BTRFS_STRIPE_LEN, false);
1048 wait_scrub_stripe_io(stripe);
1049 scrub_verify_one_stripe(stripe, old_error_bitmap);
1050 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1051 goto out;
1052 }
1053
1054 /*
1055 * Last safety net, try re-checking all mirrors, including the failed
1056 * one, sector-by-sector.
1057 *
1058 * As if one sector failed the drive's internal csum, the whole read
1059 * containing the offending sector would be marked as error.
1060 * Thus here we do sector-by-sector read.
1061 *
1062 * This can be slow, thus we only try it as the last resort.
1063 */
1064
1065 for (i = 0, mirror = stripe->mirror_num;
1066 i < num_copies;
1067 i++, mirror = calc_next_mirror(mirror, num_copies)) {
1068 const unsigned long old_error_bitmap = stripe->error_bitmap;
1069
1070 scrub_stripe_submit_repair_read(stripe, mirror,
1071 fs_info->sectorsize, true);
1072 wait_scrub_stripe_io(stripe);
1073 scrub_verify_one_stripe(stripe, old_error_bitmap);
1074 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1075 goto out;
1076 }
1077out:
1078 /*
1079 * Submit the repaired sectors. For zoned case, we cannot do repair
1080 * in-place, but queue the bg to be relocated.
1081 */
1082 bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap,
1083 stripe->nr_sectors);
1084 if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
1085 if (btrfs_is_zoned(fs_info)) {
1086 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1087 } else {
1088 scrub_write_sectors(sctx, stripe, repaired, false);
1089 wait_scrub_stripe_io(stripe);
1090 }
1091 }
1092
1093 scrub_stripe_report_errors(sctx, stripe);
1094 set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1095 wake_up(&stripe->repair_wait);
1096}
1097
1098static void scrub_read_endio(struct btrfs_bio *bbio)
1099{
1100 struct scrub_stripe *stripe = bbio->private;
1101 struct bio_vec *bvec;
1102 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1103 int num_sectors;
1104 u32 bio_size = 0;
1105 int i;
1106
1107 ASSERT(sector_nr < stripe->nr_sectors);
1108 bio_for_each_bvec_all(bvec, &bbio->bio, i)
1109 bio_size += bvec->bv_len;
1110 num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
1111
1112 if (bbio->bio.bi_status) {
1113 bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
1114 bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
1115 } else {
1116 bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
1117 }
1118 bio_put(&bbio->bio);
1119 if (atomic_dec_and_test(&stripe->pending_io)) {
1120 wake_up(&stripe->io_wait);
1121 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1122 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1123 }
1124}
1125
1126static void scrub_write_endio(struct btrfs_bio *bbio)
1127{
1128 struct scrub_stripe *stripe = bbio->private;
1129 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1130 struct bio_vec *bvec;
1131 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1132 u32 bio_size = 0;
1133 int i;
1134
1135 bio_for_each_bvec_all(bvec, &bbio->bio, i)
1136 bio_size += bvec->bv_len;
1137
1138 if (bbio->bio.bi_status) {
1139 unsigned long flags;
1140
1141 spin_lock_irqsave(&stripe->write_error_lock, flags);
1142 bitmap_set(&stripe->write_error_bitmap, sector_nr,
1143 bio_size >> fs_info->sectorsize_bits);
1144 spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1145 }
1146 bio_put(&bbio->bio);
1147
1148 if (atomic_dec_and_test(&stripe->pending_io))
1149 wake_up(&stripe->io_wait);
1150}
1151
1152static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1153 struct scrub_stripe *stripe,
1154 struct btrfs_bio *bbio, bool dev_replace)
1155{
1156 struct btrfs_fs_info *fs_info = sctx->fs_info;
1157 u32 bio_len = bbio->bio.bi_iter.bi_size;
1158 u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1159 stripe->logical;
1160
1161 fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1162 atomic_inc(&stripe->pending_io);
1163 btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1164 if (!btrfs_is_zoned(fs_info))
1165 return;
1166 /*
1167 * For zoned writeback, queue depth must be 1, thus we must wait for
1168 * the write to finish before the next write.
1169 */
1170 wait_scrub_stripe_io(stripe);
1171
1172 /*
1173 * And also need to update the write pointer if write finished
1174 * successfully.
1175 */
1176 if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1177 &stripe->write_error_bitmap))
1178 sctx->write_pointer += bio_len;
1179}
1180
1181/*
1182 * Submit the write bio(s) for the sectors specified by @write_bitmap.
1183 *
1184 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1185 *
1186 * - Only needs logical bytenr and mirror_num
1187 * Just like the scrub read path
1188 *
1189 * - Would only result in writes to the specified mirror
1190 * Unlike the regular writeback path, which would write back to all stripes
1191 *
1192 * - Handle dev-replace and read-repair writeback differently
1193 */
1194static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1195 unsigned long write_bitmap, bool dev_replace)
1196{
1197 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1198 struct btrfs_bio *bbio = NULL;
1199 int sector_nr;
1200
1201 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1202 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1203 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1204 int ret;
1205
1206 /* We should only writeback sectors covered by an extent. */
1207 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1208
1209 /* Cannot merge with previous sector, submit the current one. */
1210 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1211 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1212 bbio = NULL;
1213 }
1214 if (!bbio) {
1215 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1216 fs_info, scrub_write_endio, stripe);
1217 bbio->bio.bi_iter.bi_sector = (stripe->logical +
1218 (sector_nr << fs_info->sectorsize_bits)) >>
1219 SECTOR_SHIFT;
1220 }
1221 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1222 ASSERT(ret == fs_info->sectorsize);
1223 }
1224 if (bbio)
1225 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1226}
1227
1228/*
1229 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1230 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1231 */
1232static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1233 unsigned int bio_size)
1234{
1235 const int time_slice = 1000;
1236 s64 delta;
1237 ktime_t now;
1238 u32 div;
1239 u64 bwlimit;
1240
1241 bwlimit = READ_ONCE(device->scrub_speed_max);
1242 if (bwlimit == 0)
1243 return;
1244
1245 /*
1246 * Slice is divided into intervals when the IO is submitted, adjust by
1247 * bwlimit and maximum of 64 intervals.
1248 */
1249 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1250 div = min_t(u32, 64, div);
1251
1252 /* Start new epoch, set deadline */
1253 now = ktime_get();
1254 if (sctx->throttle_deadline == 0) {
1255 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1256 sctx->throttle_sent = 0;
1257 }
1258
1259 /* Still in the time to send? */
1260 if (ktime_before(now, sctx->throttle_deadline)) {
1261 /* If current bio is within the limit, send it */
1262 sctx->throttle_sent += bio_size;
1263 if (sctx->throttle_sent <= div_u64(bwlimit, div))
1264 return;
1265
1266 /* We're over the limit, sleep until the rest of the slice */
1267 delta = ktime_ms_delta(sctx->throttle_deadline, now);
1268 } else {
1269 /* New request after deadline, start new epoch */
1270 delta = 0;
1271 }
1272
1273 if (delta) {
1274 long timeout;
1275
1276 timeout = div_u64(delta * HZ, 1000);
1277 schedule_timeout_interruptible(timeout);
1278 }
1279
1280 /* Next call will start the deadline period */
1281 sctx->throttle_deadline = 0;
1282}
1283
1284/*
1285 * Given a physical address, this will calculate it's
1286 * logical offset. if this is a parity stripe, it will return
1287 * the most left data stripe's logical offset.
1288 *
1289 * return 0 if it is a data stripe, 1 means parity stripe.
1290 */
1291static int get_raid56_logic_offset(u64 physical, int num,
1292 struct btrfs_chunk_map *map, u64 *offset,
1293 u64 *stripe_start)
1294{
1295 int i;
1296 int j = 0;
1297 u64 last_offset;
1298 const int data_stripes = nr_data_stripes(map);
1299
1300 last_offset = (physical - map->stripes[num].physical) * data_stripes;
1301 if (stripe_start)
1302 *stripe_start = last_offset;
1303
1304 *offset = last_offset;
1305 for (i = 0; i < data_stripes; i++) {
1306 u32 stripe_nr;
1307 u32 stripe_index;
1308 u32 rot;
1309
1310 *offset = last_offset + btrfs_stripe_nr_to_offset(i);
1311
1312 stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
1313
1314 /* Work out the disk rotation on this stripe-set */
1315 rot = stripe_nr % map->num_stripes;
1316 /* calculate which stripe this data locates */
1317 rot += i;
1318 stripe_index = rot % map->num_stripes;
1319 if (stripe_index == num)
1320 return 0;
1321 if (stripe_index < num)
1322 j++;
1323 }
1324 *offset = last_offset + btrfs_stripe_nr_to_offset(j);
1325 return 1;
1326}
1327
1328/*
1329 * Return 0 if the extent item range covers any byte of the range.
1330 * Return <0 if the extent item is before @search_start.
1331 * Return >0 if the extent item is after @start_start + @search_len.
1332 */
1333static int compare_extent_item_range(struct btrfs_path *path,
1334 u64 search_start, u64 search_len)
1335{
1336 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1337 u64 len;
1338 struct btrfs_key key;
1339
1340 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1341 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1342 key.type == BTRFS_METADATA_ITEM_KEY);
1343 if (key.type == BTRFS_METADATA_ITEM_KEY)
1344 len = fs_info->nodesize;
1345 else
1346 len = key.offset;
1347
1348 if (key.objectid + len <= search_start)
1349 return -1;
1350 if (key.objectid >= search_start + search_len)
1351 return 1;
1352 return 0;
1353}
1354
1355/*
1356 * Locate one extent item which covers any byte in range
1357 * [@search_start, @search_start + @search_length)
1358 *
1359 * If the path is not initialized, we will initialize the search by doing
1360 * a btrfs_search_slot().
1361 * If the path is already initialized, we will use the path as the initial
1362 * slot, to avoid duplicated btrfs_search_slot() calls.
1363 *
1364 * NOTE: If an extent item starts before @search_start, we will still
1365 * return the extent item. This is for data extent crossing stripe boundary.
1366 *
1367 * Return 0 if we found such extent item, and @path will point to the extent item.
1368 * Return >0 if no such extent item can be found, and @path will be released.
1369 * Return <0 if hit fatal error, and @path will be released.
1370 */
1371static int find_first_extent_item(struct btrfs_root *extent_root,
1372 struct btrfs_path *path,
1373 u64 search_start, u64 search_len)
1374{
1375 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1376 struct btrfs_key key;
1377 int ret;
1378
1379 /* Continue using the existing path */
1380 if (path->nodes[0])
1381 goto search_forward;
1382
1383 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1384 key.type = BTRFS_METADATA_ITEM_KEY;
1385 else
1386 key.type = BTRFS_EXTENT_ITEM_KEY;
1387 key.objectid = search_start;
1388 key.offset = (u64)-1;
1389
1390 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1391 if (ret < 0)
1392 return ret;
1393 if (ret == 0) {
1394 /*
1395 * Key with offset -1 found, there would have to exist an extent
1396 * item with such offset, but this is out of the valid range.
1397 */
1398 btrfs_release_path(path);
1399 return -EUCLEAN;
1400 }
1401
1402 /*
1403 * Here we intentionally pass 0 as @min_objectid, as there could be
1404 * an extent item starting before @search_start.
1405 */
1406 ret = btrfs_previous_extent_item(extent_root, path, 0);
1407 if (ret < 0)
1408 return ret;
1409 /*
1410 * No matter whether we have found an extent item, the next loop will
1411 * properly do every check on the key.
1412 */
1413search_forward:
1414 while (true) {
1415 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1416 if (key.objectid >= search_start + search_len)
1417 break;
1418 if (key.type != BTRFS_METADATA_ITEM_KEY &&
1419 key.type != BTRFS_EXTENT_ITEM_KEY)
1420 goto next;
1421
1422 ret = compare_extent_item_range(path, search_start, search_len);
1423 if (ret == 0)
1424 return ret;
1425 if (ret > 0)
1426 break;
1427next:
1428 ret = btrfs_next_item(extent_root, path);
1429 if (ret) {
1430 /* Either no more items or a fatal error. */
1431 btrfs_release_path(path);
1432 return ret;
1433 }
1434 }
1435 btrfs_release_path(path);
1436 return 1;
1437}
1438
1439static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1440 u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1441{
1442 struct btrfs_key key;
1443 struct btrfs_extent_item *ei;
1444
1445 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1446 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1447 key.type == BTRFS_EXTENT_ITEM_KEY);
1448 *extent_start_ret = key.objectid;
1449 if (key.type == BTRFS_METADATA_ITEM_KEY)
1450 *size_ret = path->nodes[0]->fs_info->nodesize;
1451 else
1452 *size_ret = key.offset;
1453 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1454 *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1455 *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1456}
1457
1458static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1459 u64 physical, u64 physical_end)
1460{
1461 struct btrfs_fs_info *fs_info = sctx->fs_info;
1462 int ret = 0;
1463
1464 if (!btrfs_is_zoned(fs_info))
1465 return 0;
1466
1467 mutex_lock(&sctx->wr_lock);
1468 if (sctx->write_pointer < physical_end) {
1469 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1470 physical,
1471 sctx->write_pointer);
1472 if (ret)
1473 btrfs_err(fs_info,
1474 "zoned: failed to recover write pointer");
1475 }
1476 mutex_unlock(&sctx->wr_lock);
1477 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1478
1479 return ret;
1480}
1481
1482static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1483 struct scrub_stripe *stripe,
1484 u64 extent_start, u64 extent_len,
1485 u64 extent_flags, u64 extent_gen)
1486{
1487 for (u64 cur_logical = max(stripe->logical, extent_start);
1488 cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1489 extent_start + extent_len);
1490 cur_logical += fs_info->sectorsize) {
1491 const int nr_sector = (cur_logical - stripe->logical) >>
1492 fs_info->sectorsize_bits;
1493 struct scrub_sector_verification *sector =
1494 &stripe->sectors[nr_sector];
1495
1496 set_bit(nr_sector, &stripe->extent_sector_bitmap);
1497 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1498 sector->is_metadata = true;
1499 sector->generation = extent_gen;
1500 }
1501 }
1502}
1503
1504static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1505{
1506 stripe->extent_sector_bitmap = 0;
1507 stripe->init_error_bitmap = 0;
1508 stripe->init_nr_io_errors = 0;
1509 stripe->init_nr_csum_errors = 0;
1510 stripe->init_nr_meta_errors = 0;
1511 stripe->error_bitmap = 0;
1512 stripe->io_error_bitmap = 0;
1513 stripe->csum_error_bitmap = 0;
1514 stripe->meta_error_bitmap = 0;
1515}
1516
1517/*
1518 * Locate one stripe which has at least one extent in its range.
1519 *
1520 * Return 0 if found such stripe, and store its info into @stripe.
1521 * Return >0 if there is no such stripe in the specified range.
1522 * Return <0 for error.
1523 */
1524static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1525 struct btrfs_path *extent_path,
1526 struct btrfs_path *csum_path,
1527 struct btrfs_device *dev, u64 physical,
1528 int mirror_num, u64 logical_start,
1529 u32 logical_len,
1530 struct scrub_stripe *stripe)
1531{
1532 struct btrfs_fs_info *fs_info = bg->fs_info;
1533 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1534 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1535 const u64 logical_end = logical_start + logical_len;
1536 u64 cur_logical = logical_start;
1537 u64 stripe_end;
1538 u64 extent_start;
1539 u64 extent_len;
1540 u64 extent_flags;
1541 u64 extent_gen;
1542 int ret;
1543
1544 if (unlikely(!extent_root)) {
1545 btrfs_err(fs_info, "no valid extent root for scrub");
1546 return -EUCLEAN;
1547 }
1548 memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1549 stripe->nr_sectors);
1550 scrub_stripe_reset_bitmaps(stripe);
1551
1552 /* The range must be inside the bg. */
1553 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1554
1555 ret = find_first_extent_item(extent_root, extent_path, logical_start,
1556 logical_len);
1557 /* Either error or not found. */
1558 if (ret)
1559 goto out;
1560 get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1561 &extent_gen);
1562 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1563 stripe->nr_meta_extents++;
1564 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1565 stripe->nr_data_extents++;
1566 cur_logical = max(extent_start, cur_logical);
1567
1568 /*
1569 * Round down to stripe boundary.
1570 *
1571 * The extra calculation against bg->start is to handle block groups
1572 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1573 */
1574 stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1575 bg->start;
1576 stripe->physical = physical + stripe->logical - logical_start;
1577 stripe->dev = dev;
1578 stripe->bg = bg;
1579 stripe->mirror_num = mirror_num;
1580 stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1581
1582 /* Fill the first extent info into stripe->sectors[] array. */
1583 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1584 extent_flags, extent_gen);
1585 cur_logical = extent_start + extent_len;
1586
1587 /* Fill the extent info for the remaining sectors. */
1588 while (cur_logical <= stripe_end) {
1589 ret = find_first_extent_item(extent_root, extent_path, cur_logical,
1590 stripe_end - cur_logical + 1);
1591 if (ret < 0)
1592 goto out;
1593 if (ret > 0) {
1594 ret = 0;
1595 break;
1596 }
1597 get_extent_info(extent_path, &extent_start, &extent_len,
1598 &extent_flags, &extent_gen);
1599 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1600 stripe->nr_meta_extents++;
1601 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1602 stripe->nr_data_extents++;
1603 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1604 extent_flags, extent_gen);
1605 cur_logical = extent_start + extent_len;
1606 }
1607
1608 /* Now fill the data csum. */
1609 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1610 int sector_nr;
1611 unsigned long csum_bitmap = 0;
1612
1613 /* Csum space should have already been allocated. */
1614 ASSERT(stripe->csums);
1615
1616 /*
1617 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1618 * should contain at most 16 sectors.
1619 */
1620 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1621
1622 ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1623 stripe->logical, stripe_end,
1624 stripe->csums, &csum_bitmap);
1625 if (ret < 0)
1626 goto out;
1627 if (ret > 0)
1628 ret = 0;
1629
1630 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1631 stripe->sectors[sector_nr].csum = stripe->csums +
1632 sector_nr * fs_info->csum_size;
1633 }
1634 }
1635 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1636out:
1637 return ret;
1638}
1639
1640static void scrub_reset_stripe(struct scrub_stripe *stripe)
1641{
1642 scrub_stripe_reset_bitmaps(stripe);
1643
1644 stripe->nr_meta_extents = 0;
1645 stripe->nr_data_extents = 0;
1646 stripe->state = 0;
1647
1648 for (int i = 0; i < stripe->nr_sectors; i++) {
1649 stripe->sectors[i].is_metadata = false;
1650 stripe->sectors[i].csum = NULL;
1651 stripe->sectors[i].generation = 0;
1652 }
1653}
1654
1655static u32 stripe_length(const struct scrub_stripe *stripe)
1656{
1657 ASSERT(stripe->bg);
1658
1659 return min(BTRFS_STRIPE_LEN,
1660 stripe->bg->start + stripe->bg->length - stripe->logical);
1661}
1662
1663static void scrub_submit_extent_sector_read(struct scrub_stripe *stripe)
1664{
1665 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1666 struct btrfs_bio *bbio = NULL;
1667 unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
1668 u64 stripe_len = BTRFS_STRIPE_LEN;
1669 int mirror = stripe->mirror_num;
1670 int i;
1671
1672 atomic_inc(&stripe->pending_io);
1673
1674 for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
1675 struct page *page = scrub_stripe_get_page(stripe, i);
1676 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
1677
1678 /* We're beyond the chunk boundary, no need to read anymore. */
1679 if (i >= nr_sectors)
1680 break;
1681
1682 /* The current sector cannot be merged, submit the bio. */
1683 if (bbio &&
1684 ((i > 0 &&
1685 !test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
1686 bbio->bio.bi_iter.bi_size >= stripe_len)) {
1687 ASSERT(bbio->bio.bi_iter.bi_size);
1688 atomic_inc(&stripe->pending_io);
1689 btrfs_submit_bbio(bbio, mirror);
1690 bbio = NULL;
1691 }
1692
1693 if (!bbio) {
1694 struct btrfs_io_stripe io_stripe = {};
1695 struct btrfs_io_context *bioc = NULL;
1696 const u64 logical = stripe->logical +
1697 (i << fs_info->sectorsize_bits);
1698 int err;
1699
1700 io_stripe.rst_search_commit_root = true;
1701 stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits;
1702 /*
1703 * For RST cases, we need to manually split the bbio to
1704 * follow the RST boundary.
1705 */
1706 err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
1707 &stripe_len, &bioc, &io_stripe, &mirror);
1708 btrfs_put_bioc(bioc);
1709 if (err < 0) {
1710 if (err != -ENODATA) {
1711 /*
1712 * Earlier btrfs_get_raid_extent_offset()
1713 * returned -ENODATA, which means there's
1714 * no entry for the corresponding range
1715 * in the stripe tree. But if it's in
1716 * the extent tree, then it's a preallocated
1717 * extent and not an error.
1718 */
1719 set_bit(i, &stripe->io_error_bitmap);
1720 set_bit(i, &stripe->error_bitmap);
1721 }
1722 continue;
1723 }
1724
1725 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
1726 fs_info, scrub_read_endio, stripe);
1727 bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
1728 }
1729
1730 __bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1731 }
1732
1733 if (bbio) {
1734 ASSERT(bbio->bio.bi_iter.bi_size);
1735 atomic_inc(&stripe->pending_io);
1736 btrfs_submit_bbio(bbio, mirror);
1737 }
1738
1739 if (atomic_dec_and_test(&stripe->pending_io)) {
1740 wake_up(&stripe->io_wait);
1741 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1742 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1743 }
1744}
1745
1746static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1747 struct scrub_stripe *stripe)
1748{
1749 struct btrfs_fs_info *fs_info = sctx->fs_info;
1750 struct btrfs_bio *bbio;
1751 unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
1752 int mirror = stripe->mirror_num;
1753
1754 ASSERT(stripe->bg);
1755 ASSERT(stripe->mirror_num > 0);
1756 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1757
1758 if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
1759 scrub_submit_extent_sector_read(stripe);
1760 return;
1761 }
1762
1763 bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1764 scrub_read_endio, stripe);
1765
1766 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1767 /* Read the whole range inside the chunk boundary. */
1768 for (unsigned int cur = 0; cur < nr_sectors; cur++) {
1769 struct page *page = scrub_stripe_get_page(stripe, cur);
1770 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
1771 int ret;
1772
1773 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1774 /* We should have allocated enough bio vectors. */
1775 ASSERT(ret == fs_info->sectorsize);
1776 }
1777 atomic_inc(&stripe->pending_io);
1778
1779 /*
1780 * For dev-replace, either user asks to avoid the source dev, or
1781 * the device is missing, we try the next mirror instead.
1782 */
1783 if (sctx->is_dev_replace &&
1784 (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1785 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1786 !stripe->dev->bdev)) {
1787 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1788 stripe->bg->length);
1789
1790 mirror = calc_next_mirror(mirror, num_copies);
1791 }
1792 btrfs_submit_bbio(bbio, mirror);
1793}
1794
1795static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1796{
1797 int i;
1798
1799 for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1800 if (stripe->sectors[i].is_metadata) {
1801 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1802
1803 btrfs_err(fs_info,
1804 "stripe %llu has unrepaired metadata sector at %llu",
1805 stripe->logical,
1806 stripe->logical + (i << fs_info->sectorsize_bits));
1807 return true;
1808 }
1809 }
1810 return false;
1811}
1812
1813static void submit_initial_group_read(struct scrub_ctx *sctx,
1814 unsigned int first_slot,
1815 unsigned int nr_stripes)
1816{
1817 struct blk_plug plug;
1818
1819 ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1820 ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
1821
1822 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1823 btrfs_stripe_nr_to_offset(nr_stripes));
1824 blk_start_plug(&plug);
1825 for (int i = 0; i < nr_stripes; i++) {
1826 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1827
1828 /* Those stripes should be initialized. */
1829 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1830 scrub_submit_initial_read(sctx, stripe);
1831 }
1832 blk_finish_plug(&plug);
1833}
1834
1835static int flush_scrub_stripes(struct scrub_ctx *sctx)
1836{
1837 struct btrfs_fs_info *fs_info = sctx->fs_info;
1838 struct scrub_stripe *stripe;
1839 const int nr_stripes = sctx->cur_stripe;
1840 int ret = 0;
1841
1842 if (!nr_stripes)
1843 return 0;
1844
1845 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1846
1847 /* Submit the stripes which are populated but not submitted. */
1848 if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1849 const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1850
1851 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
1852 }
1853
1854 for (int i = 0; i < nr_stripes; i++) {
1855 stripe = &sctx->stripes[i];
1856
1857 wait_event(stripe->repair_wait,
1858 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1859 }
1860
1861 /* Submit for dev-replace. */
1862 if (sctx->is_dev_replace) {
1863 /*
1864 * For dev-replace, if we know there is something wrong with
1865 * metadata, we should immediately abort.
1866 */
1867 for (int i = 0; i < nr_stripes; i++) {
1868 if (stripe_has_metadata_error(&sctx->stripes[i])) {
1869 ret = -EIO;
1870 goto out;
1871 }
1872 }
1873 for (int i = 0; i < nr_stripes; i++) {
1874 unsigned long good;
1875
1876 stripe = &sctx->stripes[i];
1877
1878 ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1879
1880 bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1881 &stripe->error_bitmap, stripe->nr_sectors);
1882 scrub_write_sectors(sctx, stripe, good, true);
1883 }
1884 }
1885
1886 /* Wait for the above writebacks to finish. */
1887 for (int i = 0; i < nr_stripes; i++) {
1888 stripe = &sctx->stripes[i];
1889
1890 wait_scrub_stripe_io(stripe);
1891 spin_lock(&sctx->stat_lock);
1892 sctx->stat.last_physical = stripe->physical + stripe_length(stripe);
1893 spin_unlock(&sctx->stat_lock);
1894 scrub_reset_stripe(stripe);
1895 }
1896out:
1897 sctx->cur_stripe = 0;
1898 return ret;
1899}
1900
1901static void raid56_scrub_wait_endio(struct bio *bio)
1902{
1903 complete(bio->bi_private);
1904}
1905
1906static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1907 struct btrfs_device *dev, int mirror_num,
1908 u64 logical, u32 length, u64 physical,
1909 u64 *found_logical_ret)
1910{
1911 struct scrub_stripe *stripe;
1912 int ret;
1913
1914 /*
1915 * There should always be one slot left, as caller filling the last
1916 * slot should flush them all.
1917 */
1918 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
1919
1920 /* @found_logical_ret must be specified. */
1921 ASSERT(found_logical_ret);
1922
1923 stripe = &sctx->stripes[sctx->cur_stripe];
1924 scrub_reset_stripe(stripe);
1925 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1926 &sctx->csum_path, dev, physical,
1927 mirror_num, logical, length, stripe);
1928 /* Either >0 as no more extents or <0 for error. */
1929 if (ret)
1930 return ret;
1931 *found_logical_ret = stripe->logical;
1932 sctx->cur_stripe++;
1933
1934 /* We filled one group, submit it. */
1935 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1936 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
1937
1938 submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
1939 }
1940
1941 /* Last slot used, flush them all. */
1942 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1943 return flush_scrub_stripes(sctx);
1944 return 0;
1945}
1946
1947static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1948 struct btrfs_device *scrub_dev,
1949 struct btrfs_block_group *bg,
1950 struct btrfs_chunk_map *map,
1951 u64 full_stripe_start)
1952{
1953 DECLARE_COMPLETION_ONSTACK(io_done);
1954 struct btrfs_fs_info *fs_info = sctx->fs_info;
1955 struct btrfs_raid_bio *rbio;
1956 struct btrfs_io_context *bioc = NULL;
1957 struct btrfs_path extent_path = { 0 };
1958 struct btrfs_path csum_path = { 0 };
1959 struct bio *bio;
1960 struct scrub_stripe *stripe;
1961 bool all_empty = true;
1962 const int data_stripes = nr_data_stripes(map);
1963 unsigned long extent_bitmap = 0;
1964 u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1965 int ret;
1966
1967 ASSERT(sctx->raid56_data_stripes);
1968
1969 /*
1970 * For data stripe search, we cannot reuse the same extent/csum paths,
1971 * as the data stripe bytenr may be smaller than previous extent. Thus
1972 * we have to use our own extent/csum paths.
1973 */
1974 extent_path.search_commit_root = 1;
1975 extent_path.skip_locking = 1;
1976 csum_path.search_commit_root = 1;
1977 csum_path.skip_locking = 1;
1978
1979 for (int i = 0; i < data_stripes; i++) {
1980 int stripe_index;
1981 int rot;
1982 u64 physical;
1983
1984 stripe = &sctx->raid56_data_stripes[i];
1985 rot = div_u64(full_stripe_start - bg->start,
1986 data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1987 stripe_index = (i + rot) % map->num_stripes;
1988 physical = map->stripes[stripe_index].physical +
1989 btrfs_stripe_nr_to_offset(rot);
1990
1991 scrub_reset_stripe(stripe);
1992 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1993 ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1994 map->stripes[stripe_index].dev, physical, 1,
1995 full_stripe_start + btrfs_stripe_nr_to_offset(i),
1996 BTRFS_STRIPE_LEN, stripe);
1997 if (ret < 0)
1998 goto out;
1999 /*
2000 * No extent in this data stripe, need to manually mark them
2001 * initialized to make later read submission happy.
2002 */
2003 if (ret > 0) {
2004 stripe->logical = full_stripe_start +
2005 btrfs_stripe_nr_to_offset(i);
2006 stripe->dev = map->stripes[stripe_index].dev;
2007 stripe->mirror_num = 1;
2008 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
2009 }
2010 }
2011
2012 /* Check if all data stripes are empty. */
2013 for (int i = 0; i < data_stripes; i++) {
2014 stripe = &sctx->raid56_data_stripes[i];
2015 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
2016 all_empty = false;
2017 break;
2018 }
2019 }
2020 if (all_empty) {
2021 ret = 0;
2022 goto out;
2023 }
2024
2025 for (int i = 0; i < data_stripes; i++) {
2026 stripe = &sctx->raid56_data_stripes[i];
2027 scrub_submit_initial_read(sctx, stripe);
2028 }
2029 for (int i = 0; i < data_stripes; i++) {
2030 stripe = &sctx->raid56_data_stripes[i];
2031
2032 wait_event(stripe->repair_wait,
2033 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
2034 }
2035 /* For now, no zoned support for RAID56. */
2036 ASSERT(!btrfs_is_zoned(sctx->fs_info));
2037
2038 /*
2039 * Now all data stripes are properly verified. Check if we have any
2040 * unrepaired, if so abort immediately or we could further corrupt the
2041 * P/Q stripes.
2042 *
2043 * During the loop, also populate extent_bitmap.
2044 */
2045 for (int i = 0; i < data_stripes; i++) {
2046 unsigned long error;
2047
2048 stripe = &sctx->raid56_data_stripes[i];
2049
2050 /*
2051 * We should only check the errors where there is an extent.
2052 * As we may hit an empty data stripe while it's missing.
2053 */
2054 bitmap_and(&error, &stripe->error_bitmap,
2055 &stripe->extent_sector_bitmap, stripe->nr_sectors);
2056 if (!bitmap_empty(&error, stripe->nr_sectors)) {
2057 btrfs_err(fs_info,
2058"unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
2059 full_stripe_start, i, stripe->nr_sectors,
2060 &error);
2061 ret = -EIO;
2062 goto out;
2063 }
2064 bitmap_or(&extent_bitmap, &extent_bitmap,
2065 &stripe->extent_sector_bitmap, stripe->nr_sectors);
2066 }
2067
2068 /* Now we can check and regenerate the P/Q stripe. */
2069 bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
2070 bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
2071 bio->bi_private = &io_done;
2072 bio->bi_end_io = raid56_scrub_wait_endio;
2073
2074 btrfs_bio_counter_inc_blocked(fs_info);
2075 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
2076 &length, &bioc, NULL, NULL);
2077 if (ret < 0) {
2078 btrfs_put_bioc(bioc);
2079 btrfs_bio_counter_dec(fs_info);
2080 goto out;
2081 }
2082 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
2083 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
2084 btrfs_put_bioc(bioc);
2085 if (!rbio) {
2086 ret = -ENOMEM;
2087 btrfs_bio_counter_dec(fs_info);
2088 goto out;
2089 }
2090 /* Use the recovered stripes as cache to avoid read them from disk again. */
2091 for (int i = 0; i < data_stripes; i++) {
2092 stripe = &sctx->raid56_data_stripes[i];
2093
2094 raid56_parity_cache_data_pages(rbio, stripe->pages,
2095 full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
2096 }
2097 raid56_parity_submit_scrub_rbio(rbio);
2098 wait_for_completion_io(&io_done);
2099 ret = blk_status_to_errno(bio->bi_status);
2100 bio_put(bio);
2101 btrfs_bio_counter_dec(fs_info);
2102
2103 btrfs_release_path(&extent_path);
2104 btrfs_release_path(&csum_path);
2105out:
2106 return ret;
2107}
2108
2109/*
2110 * Scrub one range which can only has simple mirror based profile.
2111 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
2112 * RAID0/RAID10).
2113 *
2114 * Since we may need to handle a subset of block group, we need @logical_start
2115 * and @logical_length parameter.
2116 */
2117static int scrub_simple_mirror(struct scrub_ctx *sctx,
2118 struct btrfs_block_group *bg,
2119 u64 logical_start, u64 logical_length,
2120 struct btrfs_device *device,
2121 u64 physical, int mirror_num)
2122{
2123 struct btrfs_fs_info *fs_info = sctx->fs_info;
2124 const u64 logical_end = logical_start + logical_length;
2125 u64 cur_logical = logical_start;
2126 int ret = 0;
2127
2128 /* The range must be inside the bg */
2129 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
2130
2131 /* Go through each extent items inside the logical range */
2132 while (cur_logical < logical_end) {
2133 u64 found_logical = U64_MAX;
2134 u64 cur_physical = physical + cur_logical - logical_start;
2135
2136 /* Canceled? */
2137 if (atomic_read(&fs_info->scrub_cancel_req) ||
2138 atomic_read(&sctx->cancel_req)) {
2139 ret = -ECANCELED;
2140 break;
2141 }
2142 /* Paused? */
2143 if (atomic_read(&fs_info->scrub_pause_req)) {
2144 /* Push queued extents */
2145 scrub_blocked_if_needed(fs_info);
2146 }
2147 /* Block group removed? */
2148 spin_lock(&bg->lock);
2149 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
2150 spin_unlock(&bg->lock);
2151 ret = 0;
2152 break;
2153 }
2154 spin_unlock(&bg->lock);
2155
2156 ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2157 cur_logical, logical_end - cur_logical,
2158 cur_physical, &found_logical);
2159 if (ret > 0) {
2160 /* No more extent, just update the accounting */
2161 spin_lock(&sctx->stat_lock);
2162 sctx->stat.last_physical = physical + logical_length;
2163 spin_unlock(&sctx->stat_lock);
2164 ret = 0;
2165 break;
2166 }
2167 if (ret < 0)
2168 break;
2169
2170 /* queue_scrub_stripe() returned 0, @found_logical must be updated. */
2171 ASSERT(found_logical != U64_MAX);
2172 cur_logical = found_logical + BTRFS_STRIPE_LEN;
2173
2174 /* Don't hold CPU for too long time */
2175 cond_resched();
2176 }
2177 return ret;
2178}
2179
2180/* Calculate the full stripe length for simple stripe based profiles */
2181static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
2182{
2183 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2184 BTRFS_BLOCK_GROUP_RAID10));
2185
2186 return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
2187}
2188
2189/* Get the logical bytenr for the stripe */
2190static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
2191 struct btrfs_block_group *bg,
2192 int stripe_index)
2193{
2194 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2195 BTRFS_BLOCK_GROUP_RAID10));
2196 ASSERT(stripe_index < map->num_stripes);
2197
2198 /*
2199 * (stripe_index / sub_stripes) gives how many data stripes we need to
2200 * skip.
2201 */
2202 return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2203 bg->start;
2204}
2205
2206/* Get the mirror number for the stripe */
2207static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
2208{
2209 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2210 BTRFS_BLOCK_GROUP_RAID10));
2211 ASSERT(stripe_index < map->num_stripes);
2212
2213 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2214 return stripe_index % map->sub_stripes + 1;
2215}
2216
2217static int scrub_simple_stripe(struct scrub_ctx *sctx,
2218 struct btrfs_block_group *bg,
2219 struct btrfs_chunk_map *map,
2220 struct btrfs_device *device,
2221 int stripe_index)
2222{
2223 const u64 logical_increment = simple_stripe_full_stripe_len(map);
2224 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2225 const u64 orig_physical = map->stripes[stripe_index].physical;
2226 const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2227 u64 cur_logical = orig_logical;
2228 u64 cur_physical = orig_physical;
2229 int ret = 0;
2230
2231 while (cur_logical < bg->start + bg->length) {
2232 /*
2233 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2234 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2235 * this stripe.
2236 */
2237 ret = scrub_simple_mirror(sctx, bg, cur_logical,
2238 BTRFS_STRIPE_LEN, device, cur_physical,
2239 mirror_num);
2240 if (ret)
2241 return ret;
2242 /* Skip to next stripe which belongs to the target device */
2243 cur_logical += logical_increment;
2244 /* For physical offset, we just go to next stripe */
2245 cur_physical += BTRFS_STRIPE_LEN;
2246 }
2247 return ret;
2248}
2249
2250static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2251 struct btrfs_block_group *bg,
2252 struct btrfs_chunk_map *map,
2253 struct btrfs_device *scrub_dev,
2254 int stripe_index)
2255{
2256 struct btrfs_fs_info *fs_info = sctx->fs_info;
2257 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2258 const u64 chunk_logical = bg->start;
2259 int ret;
2260 int ret2;
2261 u64 physical = map->stripes[stripe_index].physical;
2262 const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
2263 const u64 physical_end = physical + dev_stripe_len;
2264 u64 logical;
2265 u64 logic_end;
2266 /* The logical increment after finishing one stripe */
2267 u64 increment;
2268 /* Offset inside the chunk */
2269 u64 offset;
2270 u64 stripe_logical;
2271
2272 /* Extent_path should be released by now. */
2273 ASSERT(sctx->extent_path.nodes[0] == NULL);
2274
2275 scrub_blocked_if_needed(fs_info);
2276
2277 if (sctx->is_dev_replace &&
2278 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2279 mutex_lock(&sctx->wr_lock);
2280 sctx->write_pointer = physical;
2281 mutex_unlock(&sctx->wr_lock);
2282 }
2283
2284 /* Prepare the extra data stripes used by RAID56. */
2285 if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2286 ASSERT(sctx->raid56_data_stripes == NULL);
2287
2288 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2289 sizeof(struct scrub_stripe),
2290 GFP_KERNEL);
2291 if (!sctx->raid56_data_stripes) {
2292 ret = -ENOMEM;
2293 goto out;
2294 }
2295 for (int i = 0; i < nr_data_stripes(map); i++) {
2296 ret = init_scrub_stripe(fs_info,
2297 &sctx->raid56_data_stripes[i]);
2298 if (ret < 0)
2299 goto out;
2300 sctx->raid56_data_stripes[i].bg = bg;
2301 sctx->raid56_data_stripes[i].sctx = sctx;
2302 }
2303 }
2304 /*
2305 * There used to be a big double loop to handle all profiles using the
2306 * same routine, which grows larger and more gross over time.
2307 *
2308 * So here we handle each profile differently, so simpler profiles
2309 * have simpler scrubbing function.
2310 */
2311 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2312 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2313 /*
2314 * Above check rules out all complex profile, the remaining
2315 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2316 * mirrored duplication without stripe.
2317 *
2318 * Only @physical and @mirror_num needs to calculated using
2319 * @stripe_index.
2320 */
2321 ret = scrub_simple_mirror(sctx, bg, bg->start, bg->length,
2322 scrub_dev, map->stripes[stripe_index].physical,
2323 stripe_index + 1);
2324 offset = 0;
2325 goto out;
2326 }
2327 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2328 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2329 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
2330 goto out;
2331 }
2332
2333 /* Only RAID56 goes through the old code */
2334 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2335 ret = 0;
2336
2337 /* Calculate the logical end of the stripe */
2338 get_raid56_logic_offset(physical_end, stripe_index,
2339 map, &logic_end, NULL);
2340 logic_end += chunk_logical;
2341
2342 /* Initialize @offset in case we need to go to out: label */
2343 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2344 increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2345
2346 /*
2347 * Due to the rotation, for RAID56 it's better to iterate each stripe
2348 * using their physical offset.
2349 */
2350 while (physical < physical_end) {
2351 ret = get_raid56_logic_offset(physical, stripe_index, map,
2352 &logical, &stripe_logical);
2353 logical += chunk_logical;
2354 if (ret) {
2355 /* it is parity strip */
2356 stripe_logical += chunk_logical;
2357 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2358 map, stripe_logical);
2359 spin_lock(&sctx->stat_lock);
2360 sctx->stat.last_physical = min(physical + BTRFS_STRIPE_LEN,
2361 physical_end);
2362 spin_unlock(&sctx->stat_lock);
2363 if (ret)
2364 goto out;
2365 goto next;
2366 }
2367
2368 /*
2369 * Now we're at a data stripe, scrub each extents in the range.
2370 *
2371 * At this stage, if we ignore the repair part, inside each data
2372 * stripe it is no different than SINGLE profile.
2373 * We can reuse scrub_simple_mirror() here, as the repair part
2374 * is still based on @mirror_num.
2375 */
2376 ret = scrub_simple_mirror(sctx, bg, logical, BTRFS_STRIPE_LEN,
2377 scrub_dev, physical, 1);
2378 if (ret < 0)
2379 goto out;
2380next:
2381 logical += increment;
2382 physical += BTRFS_STRIPE_LEN;
2383 spin_lock(&sctx->stat_lock);
2384 sctx->stat.last_physical = physical;
2385 spin_unlock(&sctx->stat_lock);
2386 }
2387out:
2388 ret2 = flush_scrub_stripes(sctx);
2389 if (!ret)
2390 ret = ret2;
2391 btrfs_release_path(&sctx->extent_path);
2392 btrfs_release_path(&sctx->csum_path);
2393
2394 if (sctx->raid56_data_stripes) {
2395 for (int i = 0; i < nr_data_stripes(map); i++)
2396 release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2397 kfree(sctx->raid56_data_stripes);
2398 sctx->raid56_data_stripes = NULL;
2399 }
2400
2401 if (sctx->is_dev_replace && ret >= 0) {
2402 int ret2;
2403
2404 ret2 = sync_write_pointer_for_zoned(sctx,
2405 chunk_logical + offset,
2406 map->stripes[stripe_index].physical,
2407 physical_end);
2408 if (ret2)
2409 ret = ret2;
2410 }
2411
2412 return ret < 0 ? ret : 0;
2413}
2414
2415static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2416 struct btrfs_block_group *bg,
2417 struct btrfs_device *scrub_dev,
2418 u64 dev_offset,
2419 u64 dev_extent_len)
2420{
2421 struct btrfs_fs_info *fs_info = sctx->fs_info;
2422 struct btrfs_chunk_map *map;
2423 int i;
2424 int ret = 0;
2425
2426 map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
2427 if (!map) {
2428 /*
2429 * Might have been an unused block group deleted by the cleaner
2430 * kthread or relocation.
2431 */
2432 spin_lock(&bg->lock);
2433 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2434 ret = -EINVAL;
2435 spin_unlock(&bg->lock);
2436
2437 return ret;
2438 }
2439 if (map->start != bg->start)
2440 goto out;
2441 if (map->chunk_len < dev_extent_len)
2442 goto out;
2443
2444 for (i = 0; i < map->num_stripes; ++i) {
2445 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2446 map->stripes[i].physical == dev_offset) {
2447 ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
2448 if (ret)
2449 goto out;
2450 }
2451 }
2452out:
2453 btrfs_free_chunk_map(map);
2454
2455 return ret;
2456}
2457
2458static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2459 struct btrfs_block_group *cache)
2460{
2461 struct btrfs_fs_info *fs_info = cache->fs_info;
2462
2463 if (!btrfs_is_zoned(fs_info))
2464 return 0;
2465
2466 btrfs_wait_block_group_reservations(cache);
2467 btrfs_wait_nocow_writers(cache);
2468 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
2469
2470 return btrfs_commit_current_transaction(root);
2471}
2472
2473static noinline_for_stack
2474int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2475 struct btrfs_device *scrub_dev, u64 start, u64 end)
2476{
2477 struct btrfs_dev_extent *dev_extent = NULL;
2478 struct btrfs_path *path;
2479 struct btrfs_fs_info *fs_info = sctx->fs_info;
2480 struct btrfs_root *root = fs_info->dev_root;
2481 u64 chunk_offset;
2482 int ret = 0;
2483 int ro_set;
2484 int slot;
2485 struct extent_buffer *l;
2486 struct btrfs_key key;
2487 struct btrfs_key found_key;
2488 struct btrfs_block_group *cache;
2489 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2490
2491 path = btrfs_alloc_path();
2492 if (!path)
2493 return -ENOMEM;
2494
2495 path->reada = READA_FORWARD;
2496 path->search_commit_root = 1;
2497 path->skip_locking = 1;
2498
2499 key.objectid = scrub_dev->devid;
2500 key.offset = 0ull;
2501 key.type = BTRFS_DEV_EXTENT_KEY;
2502
2503 while (1) {
2504 u64 dev_extent_len;
2505
2506 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2507 if (ret < 0)
2508 break;
2509 if (ret > 0) {
2510 if (path->slots[0] >=
2511 btrfs_header_nritems(path->nodes[0])) {
2512 ret = btrfs_next_leaf(root, path);
2513 if (ret < 0)
2514 break;
2515 if (ret > 0) {
2516 ret = 0;
2517 break;
2518 }
2519 } else {
2520 ret = 0;
2521 }
2522 }
2523
2524 l = path->nodes[0];
2525 slot = path->slots[0];
2526
2527 btrfs_item_key_to_cpu(l, &found_key, slot);
2528
2529 if (found_key.objectid != scrub_dev->devid)
2530 break;
2531
2532 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2533 break;
2534
2535 if (found_key.offset >= end)
2536 break;
2537
2538 if (found_key.offset < key.offset)
2539 break;
2540
2541 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2542 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2543
2544 if (found_key.offset + dev_extent_len <= start)
2545 goto skip;
2546
2547 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2548
2549 /*
2550 * get a reference on the corresponding block group to prevent
2551 * the chunk from going away while we scrub it
2552 */
2553 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2554
2555 /* some chunks are removed but not committed to disk yet,
2556 * continue scrubbing */
2557 if (!cache)
2558 goto skip;
2559
2560 ASSERT(cache->start <= chunk_offset);
2561 /*
2562 * We are using the commit root to search for device extents, so
2563 * that means we could have found a device extent item from a
2564 * block group that was deleted in the current transaction. The
2565 * logical start offset of the deleted block group, stored at
2566 * @chunk_offset, might be part of the logical address range of
2567 * a new block group (which uses different physical extents).
2568 * In this case btrfs_lookup_block_group() has returned the new
2569 * block group, and its start address is less than @chunk_offset.
2570 *
2571 * We skip such new block groups, because it's pointless to
2572 * process them, as we won't find their extents because we search
2573 * for them using the commit root of the extent tree. For a device
2574 * replace it's also fine to skip it, we won't miss copying them
2575 * to the target device because we have the write duplication
2576 * setup through the regular write path (by btrfs_map_block()),
2577 * and we have committed a transaction when we started the device
2578 * replace, right after setting up the device replace state.
2579 */
2580 if (cache->start < chunk_offset) {
2581 btrfs_put_block_group(cache);
2582 goto skip;
2583 }
2584
2585 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2586 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2587 btrfs_put_block_group(cache);
2588 goto skip;
2589 }
2590 }
2591
2592 /*
2593 * Make sure that while we are scrubbing the corresponding block
2594 * group doesn't get its logical address and its device extents
2595 * reused for another block group, which can possibly be of a
2596 * different type and different profile. We do this to prevent
2597 * false error detections and crashes due to bogus attempts to
2598 * repair extents.
2599 */
2600 spin_lock(&cache->lock);
2601 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2602 spin_unlock(&cache->lock);
2603 btrfs_put_block_group(cache);
2604 goto skip;
2605 }
2606 btrfs_freeze_block_group(cache);
2607 spin_unlock(&cache->lock);
2608
2609 /*
2610 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2611 * to avoid deadlock caused by:
2612 * btrfs_inc_block_group_ro()
2613 * -> btrfs_wait_for_commit()
2614 * -> btrfs_commit_transaction()
2615 * -> btrfs_scrub_pause()
2616 */
2617 scrub_pause_on(fs_info);
2618
2619 /*
2620 * Don't do chunk preallocation for scrub.
2621 *
2622 * This is especially important for SYSTEM bgs, or we can hit
2623 * -EFBIG from btrfs_finish_chunk_alloc() like:
2624 * 1. The only SYSTEM bg is marked RO.
2625 * Since SYSTEM bg is small, that's pretty common.
2626 * 2. New SYSTEM bg will be allocated
2627 * Due to regular version will allocate new chunk.
2628 * 3. New SYSTEM bg is empty and will get cleaned up
2629 * Before cleanup really happens, it's marked RO again.
2630 * 4. Empty SYSTEM bg get scrubbed
2631 * We go back to 2.
2632 *
2633 * This can easily boost the amount of SYSTEM chunks if cleaner
2634 * thread can't be triggered fast enough, and use up all space
2635 * of btrfs_super_block::sys_chunk_array
2636 *
2637 * While for dev replace, we need to try our best to mark block
2638 * group RO, to prevent race between:
2639 * - Write duplication
2640 * Contains latest data
2641 * - Scrub copy
2642 * Contains data from commit tree
2643 *
2644 * If target block group is not marked RO, nocow writes can
2645 * be overwritten by scrub copy, causing data corruption.
2646 * So for dev-replace, it's not allowed to continue if a block
2647 * group is not RO.
2648 */
2649 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2650 if (!ret && sctx->is_dev_replace) {
2651 ret = finish_extent_writes_for_zoned(root, cache);
2652 if (ret) {
2653 btrfs_dec_block_group_ro(cache);
2654 scrub_pause_off(fs_info);
2655 btrfs_put_block_group(cache);
2656 break;
2657 }
2658 }
2659
2660 if (ret == 0) {
2661 ro_set = 1;
2662 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2663 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2664 /*
2665 * btrfs_inc_block_group_ro return -ENOSPC when it
2666 * failed in creating new chunk for metadata.
2667 * It is not a problem for scrub, because
2668 * metadata are always cowed, and our scrub paused
2669 * commit_transactions.
2670 *
2671 * For RAID56 chunks, we have to mark them read-only
2672 * for scrub, as later we would use our own cache
2673 * out of RAID56 realm.
2674 * Thus we want the RAID56 bg to be marked RO to
2675 * prevent RMW from screwing up out cache.
2676 */
2677 ro_set = 0;
2678 } else if (ret == -ETXTBSY) {
2679 btrfs_warn(fs_info,
2680 "skipping scrub of block group %llu due to active swapfile",
2681 cache->start);
2682 scrub_pause_off(fs_info);
2683 ret = 0;
2684 goto skip_unfreeze;
2685 } else {
2686 btrfs_warn(fs_info,
2687 "failed setting block group ro: %d", ret);
2688 btrfs_unfreeze_block_group(cache);
2689 btrfs_put_block_group(cache);
2690 scrub_pause_off(fs_info);
2691 break;
2692 }
2693
2694 /*
2695 * Now the target block is marked RO, wait for nocow writes to
2696 * finish before dev-replace.
2697 * COW is fine, as COW never overwrites extents in commit tree.
2698 */
2699 if (sctx->is_dev_replace) {
2700 btrfs_wait_nocow_writers(cache);
2701 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
2702 }
2703
2704 scrub_pause_off(fs_info);
2705 down_write(&dev_replace->rwsem);
2706 dev_replace->cursor_right = found_key.offset + dev_extent_len;
2707 dev_replace->cursor_left = found_key.offset;
2708 dev_replace->item_needs_writeback = 1;
2709 up_write(&dev_replace->rwsem);
2710
2711 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2712 dev_extent_len);
2713 if (sctx->is_dev_replace &&
2714 !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2715 cache, found_key.offset))
2716 ro_set = 0;
2717
2718 down_write(&dev_replace->rwsem);
2719 dev_replace->cursor_left = dev_replace->cursor_right;
2720 dev_replace->item_needs_writeback = 1;
2721 up_write(&dev_replace->rwsem);
2722
2723 if (ro_set)
2724 btrfs_dec_block_group_ro(cache);
2725
2726 /*
2727 * We might have prevented the cleaner kthread from deleting
2728 * this block group if it was already unused because we raced
2729 * and set it to RO mode first. So add it back to the unused
2730 * list, otherwise it might not ever be deleted unless a manual
2731 * balance is triggered or it becomes used and unused again.
2732 */
2733 spin_lock(&cache->lock);
2734 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2735 !cache->ro && cache->reserved == 0 && cache->used == 0) {
2736 spin_unlock(&cache->lock);
2737 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2738 btrfs_discard_queue_work(&fs_info->discard_ctl,
2739 cache);
2740 else
2741 btrfs_mark_bg_unused(cache);
2742 } else {
2743 spin_unlock(&cache->lock);
2744 }
2745skip_unfreeze:
2746 btrfs_unfreeze_block_group(cache);
2747 btrfs_put_block_group(cache);
2748 if (ret)
2749 break;
2750 if (sctx->is_dev_replace &&
2751 atomic64_read(&dev_replace->num_write_errors) > 0) {
2752 ret = -EIO;
2753 break;
2754 }
2755 if (sctx->stat.malloc_errors > 0) {
2756 ret = -ENOMEM;
2757 break;
2758 }
2759skip:
2760 key.offset = found_key.offset + dev_extent_len;
2761 btrfs_release_path(path);
2762 }
2763
2764 btrfs_free_path(path);
2765
2766 return ret;
2767}
2768
2769static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2770 struct page *page, u64 physical, u64 generation)
2771{
2772 struct btrfs_fs_info *fs_info = sctx->fs_info;
2773 struct bio_vec bvec;
2774 struct bio bio;
2775 struct btrfs_super_block *sb = page_address(page);
2776 int ret;
2777
2778 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2779 bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2780 __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2781 ret = submit_bio_wait(&bio);
2782 bio_uninit(&bio);
2783
2784 if (ret < 0)
2785 return ret;
2786 ret = btrfs_check_super_csum(fs_info, sb);
2787 if (ret != 0) {
2788 btrfs_err_rl(fs_info,
2789 "super block at physical %llu devid %llu has bad csum",
2790 physical, dev->devid);
2791 return -EIO;
2792 }
2793 if (btrfs_super_generation(sb) != generation) {
2794 btrfs_err_rl(fs_info,
2795"super block at physical %llu devid %llu has bad generation %llu expect %llu",
2796 physical, dev->devid,
2797 btrfs_super_generation(sb), generation);
2798 return -EUCLEAN;
2799 }
2800
2801 return btrfs_validate_super(fs_info, sb, -1);
2802}
2803
2804static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2805 struct btrfs_device *scrub_dev)
2806{
2807 int i;
2808 u64 bytenr;
2809 u64 gen;
2810 int ret = 0;
2811 struct page *page;
2812 struct btrfs_fs_info *fs_info = sctx->fs_info;
2813
2814 if (BTRFS_FS_ERROR(fs_info))
2815 return -EROFS;
2816
2817 page = alloc_page(GFP_KERNEL);
2818 if (!page) {
2819 spin_lock(&sctx->stat_lock);
2820 sctx->stat.malloc_errors++;
2821 spin_unlock(&sctx->stat_lock);
2822 return -ENOMEM;
2823 }
2824
2825 /* Seed devices of a new filesystem has their own generation. */
2826 if (scrub_dev->fs_devices != fs_info->fs_devices)
2827 gen = scrub_dev->generation;
2828 else
2829 gen = btrfs_get_last_trans_committed(fs_info);
2830
2831 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2832 ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
2833 if (ret == -ENOENT)
2834 break;
2835
2836 if (ret) {
2837 spin_lock(&sctx->stat_lock);
2838 sctx->stat.super_errors++;
2839 spin_unlock(&sctx->stat_lock);
2840 continue;
2841 }
2842
2843 if (bytenr + BTRFS_SUPER_INFO_SIZE >
2844 scrub_dev->commit_total_bytes)
2845 break;
2846 if (!btrfs_check_super_location(scrub_dev, bytenr))
2847 continue;
2848
2849 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2850 if (ret) {
2851 spin_lock(&sctx->stat_lock);
2852 sctx->stat.super_errors++;
2853 spin_unlock(&sctx->stat_lock);
2854 }
2855 }
2856 __free_page(page);
2857 return 0;
2858}
2859
2860static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2861{
2862 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2863 &fs_info->scrub_lock)) {
2864 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2865
2866 fs_info->scrub_workers = NULL;
2867 mutex_unlock(&fs_info->scrub_lock);
2868
2869 if (scrub_workers)
2870 destroy_workqueue(scrub_workers);
2871 }
2872}
2873
2874/*
2875 * get a reference count on fs_info->scrub_workers. start worker if necessary
2876 */
2877static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
2878{
2879 struct workqueue_struct *scrub_workers = NULL;
2880 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2881 int max_active = fs_info->thread_pool_size;
2882 int ret = -ENOMEM;
2883
2884 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2885 return 0;
2886
2887 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
2888 if (!scrub_workers)
2889 return -ENOMEM;
2890
2891 mutex_lock(&fs_info->scrub_lock);
2892 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2893 ASSERT(fs_info->scrub_workers == NULL);
2894 fs_info->scrub_workers = scrub_workers;
2895 refcount_set(&fs_info->scrub_workers_refcnt, 1);
2896 mutex_unlock(&fs_info->scrub_lock);
2897 return 0;
2898 }
2899 /* Other thread raced in and created the workers for us */
2900 refcount_inc(&fs_info->scrub_workers_refcnt);
2901 mutex_unlock(&fs_info->scrub_lock);
2902
2903 ret = 0;
2904
2905 destroy_workqueue(scrub_workers);
2906 return ret;
2907}
2908
2909int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2910 u64 end, struct btrfs_scrub_progress *progress,
2911 int readonly, int is_dev_replace)
2912{
2913 struct btrfs_dev_lookup_args args = { .devid = devid };
2914 struct scrub_ctx *sctx;
2915 int ret;
2916 struct btrfs_device *dev;
2917 unsigned int nofs_flag;
2918 bool need_commit = false;
2919
2920 if (btrfs_fs_closing(fs_info))
2921 return -EAGAIN;
2922
2923 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2924 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2925
2926 /*
2927 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2928 * value (max nodesize / min sectorsize), thus nodesize should always
2929 * be fine.
2930 */
2931 ASSERT(fs_info->nodesize <=
2932 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
2933
2934 /* Allocate outside of device_list_mutex */
2935 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2936 if (IS_ERR(sctx))
2937 return PTR_ERR(sctx);
2938
2939 ret = scrub_workers_get(fs_info);
2940 if (ret)
2941 goto out_free_ctx;
2942
2943 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2944 dev = btrfs_find_device(fs_info->fs_devices, &args);
2945 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2946 !is_dev_replace)) {
2947 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2948 ret = -ENODEV;
2949 goto out;
2950 }
2951
2952 if (!is_dev_replace && !readonly &&
2953 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2954 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2955 btrfs_err_in_rcu(fs_info,
2956 "scrub on devid %llu: filesystem on %s is not writable",
2957 devid, btrfs_dev_name(dev));
2958 ret = -EROFS;
2959 goto out;
2960 }
2961
2962 mutex_lock(&fs_info->scrub_lock);
2963 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2964 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2965 mutex_unlock(&fs_info->scrub_lock);
2966 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2967 ret = -EIO;
2968 goto out;
2969 }
2970
2971 down_read(&fs_info->dev_replace.rwsem);
2972 if (dev->scrub_ctx ||
2973 (!is_dev_replace &&
2974 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2975 up_read(&fs_info->dev_replace.rwsem);
2976 mutex_unlock(&fs_info->scrub_lock);
2977 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2978 ret = -EINPROGRESS;
2979 goto out;
2980 }
2981 up_read(&fs_info->dev_replace.rwsem);
2982
2983 sctx->readonly = readonly;
2984 dev->scrub_ctx = sctx;
2985 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2986
2987 /*
2988 * checking @scrub_pause_req here, we can avoid
2989 * race between committing transaction and scrubbing.
2990 */
2991 __scrub_blocked_if_needed(fs_info);
2992 atomic_inc(&fs_info->scrubs_running);
2993 mutex_unlock(&fs_info->scrub_lock);
2994
2995 /*
2996 * In order to avoid deadlock with reclaim when there is a transaction
2997 * trying to pause scrub, make sure we use GFP_NOFS for all the
2998 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2999 * invoked by our callees. The pausing request is done when the
3000 * transaction commit starts, and it blocks the transaction until scrub
3001 * is paused (done at specific points at scrub_stripe() or right above
3002 * before incrementing fs_info->scrubs_running).
3003 */
3004 nofs_flag = memalloc_nofs_save();
3005 if (!is_dev_replace) {
3006 u64 old_super_errors;
3007
3008 spin_lock(&sctx->stat_lock);
3009 old_super_errors = sctx->stat.super_errors;
3010 spin_unlock(&sctx->stat_lock);
3011
3012 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
3013 /*
3014 * by holding device list mutex, we can
3015 * kick off writing super in log tree sync.
3016 */
3017 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3018 ret = scrub_supers(sctx, dev);
3019 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3020
3021 spin_lock(&sctx->stat_lock);
3022 /*
3023 * Super block errors found, but we can not commit transaction
3024 * at current context, since btrfs_commit_transaction() needs
3025 * to pause the current running scrub (hold by ourselves).
3026 */
3027 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
3028 need_commit = true;
3029 spin_unlock(&sctx->stat_lock);
3030 }
3031
3032 if (!ret)
3033 ret = scrub_enumerate_chunks(sctx, dev, start, end);
3034 memalloc_nofs_restore(nofs_flag);
3035
3036 atomic_dec(&fs_info->scrubs_running);
3037 wake_up(&fs_info->scrub_pause_wait);
3038
3039 if (progress)
3040 memcpy(progress, &sctx->stat, sizeof(*progress));
3041
3042 if (!is_dev_replace)
3043 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
3044 ret ? "not finished" : "finished", devid, ret);
3045
3046 mutex_lock(&fs_info->scrub_lock);
3047 dev->scrub_ctx = NULL;
3048 mutex_unlock(&fs_info->scrub_lock);
3049
3050 scrub_workers_put(fs_info);
3051 scrub_put_ctx(sctx);
3052
3053 /*
3054 * We found some super block errors before, now try to force a
3055 * transaction commit, as scrub has finished.
3056 */
3057 if (need_commit) {
3058 struct btrfs_trans_handle *trans;
3059
3060 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3061 if (IS_ERR(trans)) {
3062 ret = PTR_ERR(trans);
3063 btrfs_err(fs_info,
3064 "scrub: failed to start transaction to fix super block errors: %d", ret);
3065 return ret;
3066 }
3067 ret = btrfs_commit_transaction(trans);
3068 if (ret < 0)
3069 btrfs_err(fs_info,
3070 "scrub: failed to commit transaction to fix super block errors: %d", ret);
3071 }
3072 return ret;
3073out:
3074 scrub_workers_put(fs_info);
3075out_free_ctx:
3076 scrub_free_ctx(sctx);
3077
3078 return ret;
3079}
3080
3081void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3082{
3083 mutex_lock(&fs_info->scrub_lock);
3084 atomic_inc(&fs_info->scrub_pause_req);
3085 while (atomic_read(&fs_info->scrubs_paused) !=
3086 atomic_read(&fs_info->scrubs_running)) {
3087 mutex_unlock(&fs_info->scrub_lock);
3088 wait_event(fs_info->scrub_pause_wait,
3089 atomic_read(&fs_info->scrubs_paused) ==
3090 atomic_read(&fs_info->scrubs_running));
3091 mutex_lock(&fs_info->scrub_lock);
3092 }
3093 mutex_unlock(&fs_info->scrub_lock);
3094}
3095
3096void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3097{
3098 atomic_dec(&fs_info->scrub_pause_req);
3099 wake_up(&fs_info->scrub_pause_wait);
3100}
3101
3102int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3103{
3104 mutex_lock(&fs_info->scrub_lock);
3105 if (!atomic_read(&fs_info->scrubs_running)) {
3106 mutex_unlock(&fs_info->scrub_lock);
3107 return -ENOTCONN;
3108 }
3109
3110 atomic_inc(&fs_info->scrub_cancel_req);
3111 while (atomic_read(&fs_info->scrubs_running)) {
3112 mutex_unlock(&fs_info->scrub_lock);
3113 wait_event(fs_info->scrub_pause_wait,
3114 atomic_read(&fs_info->scrubs_running) == 0);
3115 mutex_lock(&fs_info->scrub_lock);
3116 }
3117 atomic_dec(&fs_info->scrub_cancel_req);
3118 mutex_unlock(&fs_info->scrub_lock);
3119
3120 return 0;
3121}
3122
3123int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
3124{
3125 struct btrfs_fs_info *fs_info = dev->fs_info;
3126 struct scrub_ctx *sctx;
3127
3128 mutex_lock(&fs_info->scrub_lock);
3129 sctx = dev->scrub_ctx;
3130 if (!sctx) {
3131 mutex_unlock(&fs_info->scrub_lock);
3132 return -ENOTCONN;
3133 }
3134 atomic_inc(&sctx->cancel_req);
3135 while (dev->scrub_ctx) {
3136 mutex_unlock(&fs_info->scrub_lock);
3137 wait_event(fs_info->scrub_pause_wait,
3138 dev->scrub_ctx == NULL);
3139 mutex_lock(&fs_info->scrub_lock);
3140 }
3141 mutex_unlock(&fs_info->scrub_lock);
3142
3143 return 0;
3144}
3145
3146int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3147 struct btrfs_scrub_progress *progress)
3148{
3149 struct btrfs_dev_lookup_args args = { .devid = devid };
3150 struct btrfs_device *dev;
3151 struct scrub_ctx *sctx = NULL;
3152
3153 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3154 dev = btrfs_find_device(fs_info->fs_devices, &args);
3155 if (dev)
3156 sctx = dev->scrub_ctx;
3157 if (sctx)
3158 memcpy(progress, &sctx->stat, sizeof(*progress));
3159 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3160
3161 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3162}
1/*
2 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/blkdev.h>
20#include <linux/ratelimit.h>
21#include "ctree.h"
22#include "volumes.h"
23#include "disk-io.h"
24#include "ordered-data.h"
25#include "transaction.h"
26#include "backref.h"
27#include "extent_io.h"
28#include "dev-replace.h"
29#include "check-integrity.h"
30#include "rcu-string.h"
31#include "raid56.h"
32
33/*
34 * This is only the first step towards a full-features scrub. It reads all
35 * extent and super block and verifies the checksums. In case a bad checksum
36 * is found or the extent cannot be read, good data will be written back if
37 * any can be found.
38 *
39 * Future enhancements:
40 * - In case an unrepairable extent is encountered, track which files are
41 * affected and report them
42 * - track and record media errors, throw out bad devices
43 * - add a mode to also read unallocated space
44 */
45
46struct scrub_block;
47struct scrub_ctx;
48
49/*
50 * the following three values only influence the performance.
51 * The last one configures the number of parallel and outstanding I/O
52 * operations. The first two values configure an upper limit for the number
53 * of (dynamically allocated) pages that are added to a bio.
54 */
55#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
56#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
57#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
58
59/*
60 * the following value times PAGE_SIZE needs to be large enough to match the
61 * largest node/leaf/sector size that shall be supported.
62 * Values larger than BTRFS_STRIPE_LEN are not supported.
63 */
64#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
65
66struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
69 u64 map_length;
70};
71
72struct scrub_page {
73 struct scrub_block *sblock;
74 struct page *page;
75 struct btrfs_device *dev;
76 struct list_head list;
77 u64 flags; /* extent flags */
78 u64 generation;
79 u64 logical;
80 u64 physical;
81 u64 physical_for_dev_replace;
82 atomic_t refs;
83 struct {
84 unsigned int mirror_num:8;
85 unsigned int have_csum:1;
86 unsigned int io_error:1;
87 };
88 u8 csum[BTRFS_CSUM_SIZE];
89
90 struct scrub_recover *recover;
91};
92
93struct scrub_bio {
94 int index;
95 struct scrub_ctx *sctx;
96 struct btrfs_device *dev;
97 struct bio *bio;
98 int err;
99 u64 logical;
100 u64 physical;
101#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
103#else
104 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
105#endif
106 int page_count;
107 int next_free;
108 struct btrfs_work work;
109};
110
111struct scrub_block {
112 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
113 int page_count;
114 atomic_t outstanding_pages;
115 atomic_t refs; /* free mem on transition to zero */
116 struct scrub_ctx *sctx;
117 struct scrub_parity *sparity;
118 struct {
119 unsigned int header_error:1;
120 unsigned int checksum_error:1;
121 unsigned int no_io_error_seen:1;
122 unsigned int generation_error:1; /* also sets header_error */
123
124 /* The following is for the data used to check parity */
125 /* It is for the data with checksum */
126 unsigned int data_corrected:1;
127 };
128 struct btrfs_work work;
129};
130
131/* Used for the chunks with parity stripe such RAID5/6 */
132struct scrub_parity {
133 struct scrub_ctx *sctx;
134
135 struct btrfs_device *scrub_dev;
136
137 u64 logic_start;
138
139 u64 logic_end;
140
141 int nsectors;
142
143 int stripe_len;
144
145 atomic_t refs;
146
147 struct list_head spages;
148
149 /* Work of parity check and repair */
150 struct btrfs_work work;
151
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap;
154
155 /*
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
158 */
159 unsigned long *ebitmap;
160
161 unsigned long bitmap[0];
162};
163
164struct scrub_wr_ctx {
165 struct scrub_bio *wr_curr_bio;
166 struct btrfs_device *tgtdev;
167 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
168 atomic_t flush_all_writes;
169 struct mutex wr_lock;
170};
171
172struct scrub_ctx {
173 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
174 struct btrfs_root *dev_root;
175 int first_free;
176 int curr;
177 atomic_t bios_in_flight;
178 atomic_t workers_pending;
179 spinlock_t list_lock;
180 wait_queue_head_t list_wait;
181 u16 csum_size;
182 struct list_head csum_list;
183 atomic_t cancel_req;
184 int readonly;
185 int pages_per_rd_bio;
186 u32 sectorsize;
187 u32 nodesize;
188
189 int is_dev_replace;
190 struct scrub_wr_ctx wr_ctx;
191
192 /*
193 * statistics
194 */
195 struct btrfs_scrub_progress stat;
196 spinlock_t stat_lock;
197
198 /*
199 * Use a ref counter to avoid use-after-free issues. Scrub workers
200 * decrement bios_in_flight and workers_pending and then do a wakeup
201 * on the list_wait wait queue. We must ensure the main scrub task
202 * doesn't free the scrub context before or while the workers are
203 * doing the wakeup() call.
204 */
205 atomic_t refs;
206};
207
208struct scrub_fixup_nodatasum {
209 struct scrub_ctx *sctx;
210 struct btrfs_device *dev;
211 u64 logical;
212 struct btrfs_root *root;
213 struct btrfs_work work;
214 int mirror_num;
215};
216
217struct scrub_nocow_inode {
218 u64 inum;
219 u64 offset;
220 u64 root;
221 struct list_head list;
222};
223
224struct scrub_copy_nocow_ctx {
225 struct scrub_ctx *sctx;
226 u64 logical;
227 u64 len;
228 int mirror_num;
229 u64 physical_for_dev_replace;
230 struct list_head inodes;
231 struct btrfs_work work;
232};
233
234struct scrub_warning {
235 struct btrfs_path *path;
236 u64 extent_item_size;
237 const char *errstr;
238 sector_t sector;
239 u64 logical;
240 struct btrfs_device *dev;
241};
242
243static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
247static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
248static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
249 struct scrub_block *sblocks_for_recheck);
250static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
251 struct scrub_block *sblock,
252 int retry_failed_mirror);
253static void scrub_recheck_block_checksum(struct scrub_block *sblock);
254static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
255 struct scrub_block *sblock_good);
256static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
257 struct scrub_block *sblock_good,
258 int page_num, int force_write);
259static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
260static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
261 int page_num);
262static int scrub_checksum_data(struct scrub_block *sblock);
263static int scrub_checksum_tree_block(struct scrub_block *sblock);
264static int scrub_checksum_super(struct scrub_block *sblock);
265static void scrub_block_get(struct scrub_block *sblock);
266static void scrub_block_put(struct scrub_block *sblock);
267static void scrub_page_get(struct scrub_page *spage);
268static void scrub_page_put(struct scrub_page *spage);
269static void scrub_parity_get(struct scrub_parity *sparity);
270static void scrub_parity_put(struct scrub_parity *sparity);
271static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
272 struct scrub_page *spage);
273static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
274 u64 physical, struct btrfs_device *dev, u64 flags,
275 u64 gen, int mirror_num, u8 *csum, int force,
276 u64 physical_for_dev_replace);
277static void scrub_bio_end_io(struct bio *bio);
278static void scrub_bio_end_io_worker(struct btrfs_work *work);
279static void scrub_block_complete(struct scrub_block *sblock);
280static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
281 u64 extent_logical, u64 extent_len,
282 u64 *extent_physical,
283 struct btrfs_device **extent_dev,
284 int *extent_mirror_num);
285static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
286 struct scrub_wr_ctx *wr_ctx,
287 struct btrfs_fs_info *fs_info,
288 struct btrfs_device *dev,
289 int is_dev_replace);
290static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
291static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
292 struct scrub_page *spage);
293static void scrub_wr_submit(struct scrub_ctx *sctx);
294static void scrub_wr_bio_end_io(struct bio *bio);
295static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
296static int write_page_nocow(struct scrub_ctx *sctx,
297 u64 physical_for_dev_replace, struct page *page);
298static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
299 struct scrub_copy_nocow_ctx *ctx);
300static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
301 int mirror_num, u64 physical_for_dev_replace);
302static void copy_nocow_pages_worker(struct btrfs_work *work);
303static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
304static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
305static void scrub_put_ctx(struct scrub_ctx *sctx);
306
307
308static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
309{
310 atomic_inc(&sctx->refs);
311 atomic_inc(&sctx->bios_in_flight);
312}
313
314static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
315{
316 atomic_dec(&sctx->bios_in_flight);
317 wake_up(&sctx->list_wait);
318 scrub_put_ctx(sctx);
319}
320
321static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
322{
323 while (atomic_read(&fs_info->scrub_pause_req)) {
324 mutex_unlock(&fs_info->scrub_lock);
325 wait_event(fs_info->scrub_pause_wait,
326 atomic_read(&fs_info->scrub_pause_req) == 0);
327 mutex_lock(&fs_info->scrub_lock);
328 }
329}
330
331static void scrub_pause_on(struct btrfs_fs_info *fs_info)
332{
333 atomic_inc(&fs_info->scrubs_paused);
334 wake_up(&fs_info->scrub_pause_wait);
335}
336
337static void scrub_pause_off(struct btrfs_fs_info *fs_info)
338{
339 mutex_lock(&fs_info->scrub_lock);
340 __scrub_blocked_if_needed(fs_info);
341 atomic_dec(&fs_info->scrubs_paused);
342 mutex_unlock(&fs_info->scrub_lock);
343
344 wake_up(&fs_info->scrub_pause_wait);
345}
346
347static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
348{
349 scrub_pause_on(fs_info);
350 scrub_pause_off(fs_info);
351}
352
353/*
354 * used for workers that require transaction commits (i.e., for the
355 * NOCOW case)
356 */
357static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
358{
359 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
360
361 atomic_inc(&sctx->refs);
362 /*
363 * increment scrubs_running to prevent cancel requests from
364 * completing as long as a worker is running. we must also
365 * increment scrubs_paused to prevent deadlocking on pause
366 * requests used for transactions commits (as the worker uses a
367 * transaction context). it is safe to regard the worker
368 * as paused for all matters practical. effectively, we only
369 * avoid cancellation requests from completing.
370 */
371 mutex_lock(&fs_info->scrub_lock);
372 atomic_inc(&fs_info->scrubs_running);
373 atomic_inc(&fs_info->scrubs_paused);
374 mutex_unlock(&fs_info->scrub_lock);
375
376 /*
377 * check if @scrubs_running=@scrubs_paused condition
378 * inside wait_event() is not an atomic operation.
379 * which means we may inc/dec @scrub_running/paused
380 * at any time. Let's wake up @scrub_pause_wait as
381 * much as we can to let commit transaction blocked less.
382 */
383 wake_up(&fs_info->scrub_pause_wait);
384
385 atomic_inc(&sctx->workers_pending);
386}
387
388/* used for workers that require transaction commits */
389static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
390{
391 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
392
393 /*
394 * see scrub_pending_trans_workers_inc() why we're pretending
395 * to be paused in the scrub counters
396 */
397 mutex_lock(&fs_info->scrub_lock);
398 atomic_dec(&fs_info->scrubs_running);
399 atomic_dec(&fs_info->scrubs_paused);
400 mutex_unlock(&fs_info->scrub_lock);
401 atomic_dec(&sctx->workers_pending);
402 wake_up(&fs_info->scrub_pause_wait);
403 wake_up(&sctx->list_wait);
404 scrub_put_ctx(sctx);
405}
406
407static void scrub_free_csums(struct scrub_ctx *sctx)
408{
409 while (!list_empty(&sctx->csum_list)) {
410 struct btrfs_ordered_sum *sum;
411 sum = list_first_entry(&sctx->csum_list,
412 struct btrfs_ordered_sum, list);
413 list_del(&sum->list);
414 kfree(sum);
415 }
416}
417
418static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
419{
420 int i;
421
422 if (!sctx)
423 return;
424
425 scrub_free_wr_ctx(&sctx->wr_ctx);
426
427 /* this can happen when scrub is cancelled */
428 if (sctx->curr != -1) {
429 struct scrub_bio *sbio = sctx->bios[sctx->curr];
430
431 for (i = 0; i < sbio->page_count; i++) {
432 WARN_ON(!sbio->pagev[i]->page);
433 scrub_block_put(sbio->pagev[i]->sblock);
434 }
435 bio_put(sbio->bio);
436 }
437
438 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
439 struct scrub_bio *sbio = sctx->bios[i];
440
441 if (!sbio)
442 break;
443 kfree(sbio);
444 }
445
446 scrub_free_csums(sctx);
447 kfree(sctx);
448}
449
450static void scrub_put_ctx(struct scrub_ctx *sctx)
451{
452 if (atomic_dec_and_test(&sctx->refs))
453 scrub_free_ctx(sctx);
454}
455
456static noinline_for_stack
457struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
458{
459 struct scrub_ctx *sctx;
460 int i;
461 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
462 int ret;
463
464 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
465 if (!sctx)
466 goto nomem;
467 atomic_set(&sctx->refs, 1);
468 sctx->is_dev_replace = is_dev_replace;
469 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
470 sctx->curr = -1;
471 sctx->dev_root = dev->dev_root;
472 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
473 struct scrub_bio *sbio;
474
475 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
476 if (!sbio)
477 goto nomem;
478 sctx->bios[i] = sbio;
479
480 sbio->index = i;
481 sbio->sctx = sctx;
482 sbio->page_count = 0;
483 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
484 scrub_bio_end_io_worker, NULL, NULL);
485
486 if (i != SCRUB_BIOS_PER_SCTX - 1)
487 sctx->bios[i]->next_free = i + 1;
488 else
489 sctx->bios[i]->next_free = -1;
490 }
491 sctx->first_free = 0;
492 sctx->nodesize = dev->dev_root->nodesize;
493 sctx->sectorsize = dev->dev_root->sectorsize;
494 atomic_set(&sctx->bios_in_flight, 0);
495 atomic_set(&sctx->workers_pending, 0);
496 atomic_set(&sctx->cancel_req, 0);
497 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
498 INIT_LIST_HEAD(&sctx->csum_list);
499
500 spin_lock_init(&sctx->list_lock);
501 spin_lock_init(&sctx->stat_lock);
502 init_waitqueue_head(&sctx->list_wait);
503
504 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
505 fs_info->dev_replace.tgtdev, is_dev_replace);
506 if (ret) {
507 scrub_free_ctx(sctx);
508 return ERR_PTR(ret);
509 }
510 return sctx;
511
512nomem:
513 scrub_free_ctx(sctx);
514 return ERR_PTR(-ENOMEM);
515}
516
517static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
518 void *warn_ctx)
519{
520 u64 isize;
521 u32 nlink;
522 int ret;
523 int i;
524 struct extent_buffer *eb;
525 struct btrfs_inode_item *inode_item;
526 struct scrub_warning *swarn = warn_ctx;
527 struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
528 struct inode_fs_paths *ipath = NULL;
529 struct btrfs_root *local_root;
530 struct btrfs_key root_key;
531 struct btrfs_key key;
532
533 root_key.objectid = root;
534 root_key.type = BTRFS_ROOT_ITEM_KEY;
535 root_key.offset = (u64)-1;
536 local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
537 if (IS_ERR(local_root)) {
538 ret = PTR_ERR(local_root);
539 goto err;
540 }
541
542 /*
543 * this makes the path point to (inum INODE_ITEM ioff)
544 */
545 key.objectid = inum;
546 key.type = BTRFS_INODE_ITEM_KEY;
547 key.offset = 0;
548
549 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
550 if (ret) {
551 btrfs_release_path(swarn->path);
552 goto err;
553 }
554
555 eb = swarn->path->nodes[0];
556 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
557 struct btrfs_inode_item);
558 isize = btrfs_inode_size(eb, inode_item);
559 nlink = btrfs_inode_nlink(eb, inode_item);
560 btrfs_release_path(swarn->path);
561
562 ipath = init_ipath(4096, local_root, swarn->path);
563 if (IS_ERR(ipath)) {
564 ret = PTR_ERR(ipath);
565 ipath = NULL;
566 goto err;
567 }
568 ret = paths_from_inode(inum, ipath);
569
570 if (ret < 0)
571 goto err;
572
573 /*
574 * we deliberately ignore the bit ipath might have been too small to
575 * hold all of the paths here
576 */
577 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
578 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
579 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
580 "length %llu, links %u (path: %s)", swarn->errstr,
581 swarn->logical, rcu_str_deref(swarn->dev->name),
582 (unsigned long long)swarn->sector, root, inum, offset,
583 min(isize - offset, (u64)PAGE_SIZE), nlink,
584 (char *)(unsigned long)ipath->fspath->val[i]);
585
586 free_ipath(ipath);
587 return 0;
588
589err:
590 btrfs_warn_in_rcu(fs_info, "%s at logical %llu on dev "
591 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
592 "resolving failed with ret=%d", swarn->errstr,
593 swarn->logical, rcu_str_deref(swarn->dev->name),
594 (unsigned long long)swarn->sector, root, inum, offset, ret);
595
596 free_ipath(ipath);
597 return 0;
598}
599
600static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
601{
602 struct btrfs_device *dev;
603 struct btrfs_fs_info *fs_info;
604 struct btrfs_path *path;
605 struct btrfs_key found_key;
606 struct extent_buffer *eb;
607 struct btrfs_extent_item *ei;
608 struct scrub_warning swarn;
609 unsigned long ptr = 0;
610 u64 extent_item_pos;
611 u64 flags = 0;
612 u64 ref_root;
613 u32 item_size;
614 u8 ref_level = 0;
615 int ret;
616
617 WARN_ON(sblock->page_count < 1);
618 dev = sblock->pagev[0]->dev;
619 fs_info = sblock->sctx->dev_root->fs_info;
620
621 path = btrfs_alloc_path();
622 if (!path)
623 return;
624
625 swarn.sector = (sblock->pagev[0]->physical) >> 9;
626 swarn.logical = sblock->pagev[0]->logical;
627 swarn.errstr = errstr;
628 swarn.dev = NULL;
629
630 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
631 &flags);
632 if (ret < 0)
633 goto out;
634
635 extent_item_pos = swarn.logical - found_key.objectid;
636 swarn.extent_item_size = found_key.offset;
637
638 eb = path->nodes[0];
639 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
640 item_size = btrfs_item_size_nr(eb, path->slots[0]);
641
642 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
643 do {
644 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
645 item_size, &ref_root,
646 &ref_level);
647 btrfs_warn_in_rcu(fs_info,
648 "%s at logical %llu on dev %s, "
649 "sector %llu: metadata %s (level %d) in tree "
650 "%llu", errstr, swarn.logical,
651 rcu_str_deref(dev->name),
652 (unsigned long long)swarn.sector,
653 ref_level ? "node" : "leaf",
654 ret < 0 ? -1 : ref_level,
655 ret < 0 ? -1 : ref_root);
656 } while (ret != 1);
657 btrfs_release_path(path);
658 } else {
659 btrfs_release_path(path);
660 swarn.path = path;
661 swarn.dev = dev;
662 iterate_extent_inodes(fs_info, found_key.objectid,
663 extent_item_pos, 1,
664 scrub_print_warning_inode, &swarn);
665 }
666
667out:
668 btrfs_free_path(path);
669}
670
671static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
672{
673 struct page *page = NULL;
674 unsigned long index;
675 struct scrub_fixup_nodatasum *fixup = fixup_ctx;
676 int ret;
677 int corrected = 0;
678 struct btrfs_key key;
679 struct inode *inode = NULL;
680 struct btrfs_fs_info *fs_info;
681 u64 end = offset + PAGE_SIZE - 1;
682 struct btrfs_root *local_root;
683 int srcu_index;
684
685 key.objectid = root;
686 key.type = BTRFS_ROOT_ITEM_KEY;
687 key.offset = (u64)-1;
688
689 fs_info = fixup->root->fs_info;
690 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
691
692 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
693 if (IS_ERR(local_root)) {
694 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
695 return PTR_ERR(local_root);
696 }
697
698 key.type = BTRFS_INODE_ITEM_KEY;
699 key.objectid = inum;
700 key.offset = 0;
701 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
702 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
703 if (IS_ERR(inode))
704 return PTR_ERR(inode);
705
706 index = offset >> PAGE_SHIFT;
707
708 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
709 if (!page) {
710 ret = -ENOMEM;
711 goto out;
712 }
713
714 if (PageUptodate(page)) {
715 if (PageDirty(page)) {
716 /*
717 * we need to write the data to the defect sector. the
718 * data that was in that sector is not in memory,
719 * because the page was modified. we must not write the
720 * modified page to that sector.
721 *
722 * TODO: what could be done here: wait for the delalloc
723 * runner to write out that page (might involve
724 * COW) and see whether the sector is still
725 * referenced afterwards.
726 *
727 * For the meantime, we'll treat this error
728 * incorrectable, although there is a chance that a
729 * later scrub will find the bad sector again and that
730 * there's no dirty page in memory, then.
731 */
732 ret = -EIO;
733 goto out;
734 }
735 ret = repair_io_failure(inode, offset, PAGE_SIZE,
736 fixup->logical, page,
737 offset - page_offset(page),
738 fixup->mirror_num);
739 unlock_page(page);
740 corrected = !ret;
741 } else {
742 /*
743 * we need to get good data first. the general readpage path
744 * will call repair_io_failure for us, we just have to make
745 * sure we read the bad mirror.
746 */
747 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
748 EXTENT_DAMAGED, GFP_NOFS);
749 if (ret) {
750 /* set_extent_bits should give proper error */
751 WARN_ON(ret > 0);
752 if (ret > 0)
753 ret = -EFAULT;
754 goto out;
755 }
756
757 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
758 btrfs_get_extent,
759 fixup->mirror_num);
760 wait_on_page_locked(page);
761
762 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
763 end, EXTENT_DAMAGED, 0, NULL);
764 if (!corrected)
765 clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
766 EXTENT_DAMAGED, GFP_NOFS);
767 }
768
769out:
770 if (page)
771 put_page(page);
772
773 iput(inode);
774
775 if (ret < 0)
776 return ret;
777
778 if (ret == 0 && corrected) {
779 /*
780 * we only need to call readpage for one of the inodes belonging
781 * to this extent. so make iterate_extent_inodes stop
782 */
783 return 1;
784 }
785
786 return -EIO;
787}
788
789static void scrub_fixup_nodatasum(struct btrfs_work *work)
790{
791 int ret;
792 struct scrub_fixup_nodatasum *fixup;
793 struct scrub_ctx *sctx;
794 struct btrfs_trans_handle *trans = NULL;
795 struct btrfs_path *path;
796 int uncorrectable = 0;
797
798 fixup = container_of(work, struct scrub_fixup_nodatasum, work);
799 sctx = fixup->sctx;
800
801 path = btrfs_alloc_path();
802 if (!path) {
803 spin_lock(&sctx->stat_lock);
804 ++sctx->stat.malloc_errors;
805 spin_unlock(&sctx->stat_lock);
806 uncorrectable = 1;
807 goto out;
808 }
809
810 trans = btrfs_join_transaction(fixup->root);
811 if (IS_ERR(trans)) {
812 uncorrectable = 1;
813 goto out;
814 }
815
816 /*
817 * the idea is to trigger a regular read through the standard path. we
818 * read a page from the (failed) logical address by specifying the
819 * corresponding copynum of the failed sector. thus, that readpage is
820 * expected to fail.
821 * that is the point where on-the-fly error correction will kick in
822 * (once it's finished) and rewrite the failed sector if a good copy
823 * can be found.
824 */
825 ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
826 path, scrub_fixup_readpage,
827 fixup);
828 if (ret < 0) {
829 uncorrectable = 1;
830 goto out;
831 }
832 WARN_ON(ret != 1);
833
834 spin_lock(&sctx->stat_lock);
835 ++sctx->stat.corrected_errors;
836 spin_unlock(&sctx->stat_lock);
837
838out:
839 if (trans && !IS_ERR(trans))
840 btrfs_end_transaction(trans, fixup->root);
841 if (uncorrectable) {
842 spin_lock(&sctx->stat_lock);
843 ++sctx->stat.uncorrectable_errors;
844 spin_unlock(&sctx->stat_lock);
845 btrfs_dev_replace_stats_inc(
846 &sctx->dev_root->fs_info->dev_replace.
847 num_uncorrectable_read_errors);
848 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
849 "unable to fixup (nodatasum) error at logical %llu on dev %s",
850 fixup->logical, rcu_str_deref(fixup->dev->name));
851 }
852
853 btrfs_free_path(path);
854 kfree(fixup);
855
856 scrub_pending_trans_workers_dec(sctx);
857}
858
859static inline void scrub_get_recover(struct scrub_recover *recover)
860{
861 atomic_inc(&recover->refs);
862}
863
864static inline void scrub_put_recover(struct scrub_recover *recover)
865{
866 if (atomic_dec_and_test(&recover->refs)) {
867 btrfs_put_bbio(recover->bbio);
868 kfree(recover);
869 }
870}
871
872/*
873 * scrub_handle_errored_block gets called when either verification of the
874 * pages failed or the bio failed to read, e.g. with EIO. In the latter
875 * case, this function handles all pages in the bio, even though only one
876 * may be bad.
877 * The goal of this function is to repair the errored block by using the
878 * contents of one of the mirrors.
879 */
880static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
881{
882 struct scrub_ctx *sctx = sblock_to_check->sctx;
883 struct btrfs_device *dev;
884 struct btrfs_fs_info *fs_info;
885 u64 length;
886 u64 logical;
887 unsigned int failed_mirror_index;
888 unsigned int is_metadata;
889 unsigned int have_csum;
890 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
891 struct scrub_block *sblock_bad;
892 int ret;
893 int mirror_index;
894 int page_num;
895 int success;
896 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
897 DEFAULT_RATELIMIT_BURST);
898
899 BUG_ON(sblock_to_check->page_count < 1);
900 fs_info = sctx->dev_root->fs_info;
901 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
902 /*
903 * if we find an error in a super block, we just report it.
904 * They will get written with the next transaction commit
905 * anyway
906 */
907 spin_lock(&sctx->stat_lock);
908 ++sctx->stat.super_errors;
909 spin_unlock(&sctx->stat_lock);
910 return 0;
911 }
912 length = sblock_to_check->page_count * PAGE_SIZE;
913 logical = sblock_to_check->pagev[0]->logical;
914 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
915 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
916 is_metadata = !(sblock_to_check->pagev[0]->flags &
917 BTRFS_EXTENT_FLAG_DATA);
918 have_csum = sblock_to_check->pagev[0]->have_csum;
919 dev = sblock_to_check->pagev[0]->dev;
920
921 if (sctx->is_dev_replace && !is_metadata && !have_csum) {
922 sblocks_for_recheck = NULL;
923 goto nodatasum_case;
924 }
925
926 /*
927 * read all mirrors one after the other. This includes to
928 * re-read the extent or metadata block that failed (that was
929 * the cause that this fixup code is called) another time,
930 * page by page this time in order to know which pages
931 * caused I/O errors and which ones are good (for all mirrors).
932 * It is the goal to handle the situation when more than one
933 * mirror contains I/O errors, but the errors do not
934 * overlap, i.e. the data can be repaired by selecting the
935 * pages from those mirrors without I/O error on the
936 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
937 * would be that mirror #1 has an I/O error on the first page,
938 * the second page is good, and mirror #2 has an I/O error on
939 * the second page, but the first page is good.
940 * Then the first page of the first mirror can be repaired by
941 * taking the first page of the second mirror, and the
942 * second page of the second mirror can be repaired by
943 * copying the contents of the 2nd page of the 1st mirror.
944 * One more note: if the pages of one mirror contain I/O
945 * errors, the checksum cannot be verified. In order to get
946 * the best data for repairing, the first attempt is to find
947 * a mirror without I/O errors and with a validated checksum.
948 * Only if this is not possible, the pages are picked from
949 * mirrors with I/O errors without considering the checksum.
950 * If the latter is the case, at the end, the checksum of the
951 * repaired area is verified in order to correctly maintain
952 * the statistics.
953 */
954
955 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
956 sizeof(*sblocks_for_recheck), GFP_NOFS);
957 if (!sblocks_for_recheck) {
958 spin_lock(&sctx->stat_lock);
959 sctx->stat.malloc_errors++;
960 sctx->stat.read_errors++;
961 sctx->stat.uncorrectable_errors++;
962 spin_unlock(&sctx->stat_lock);
963 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
964 goto out;
965 }
966
967 /* setup the context, map the logical blocks and alloc the pages */
968 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
969 if (ret) {
970 spin_lock(&sctx->stat_lock);
971 sctx->stat.read_errors++;
972 sctx->stat.uncorrectable_errors++;
973 spin_unlock(&sctx->stat_lock);
974 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
975 goto out;
976 }
977 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
978 sblock_bad = sblocks_for_recheck + failed_mirror_index;
979
980 /* build and submit the bios for the failed mirror, check checksums */
981 scrub_recheck_block(fs_info, sblock_bad, 1);
982
983 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
984 sblock_bad->no_io_error_seen) {
985 /*
986 * the error disappeared after reading page by page, or
987 * the area was part of a huge bio and other parts of the
988 * bio caused I/O errors, or the block layer merged several
989 * read requests into one and the error is caused by a
990 * different bio (usually one of the two latter cases is
991 * the cause)
992 */
993 spin_lock(&sctx->stat_lock);
994 sctx->stat.unverified_errors++;
995 sblock_to_check->data_corrected = 1;
996 spin_unlock(&sctx->stat_lock);
997
998 if (sctx->is_dev_replace)
999 scrub_write_block_to_dev_replace(sblock_bad);
1000 goto out;
1001 }
1002
1003 if (!sblock_bad->no_io_error_seen) {
1004 spin_lock(&sctx->stat_lock);
1005 sctx->stat.read_errors++;
1006 spin_unlock(&sctx->stat_lock);
1007 if (__ratelimit(&_rs))
1008 scrub_print_warning("i/o error", sblock_to_check);
1009 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1010 } else if (sblock_bad->checksum_error) {
1011 spin_lock(&sctx->stat_lock);
1012 sctx->stat.csum_errors++;
1013 spin_unlock(&sctx->stat_lock);
1014 if (__ratelimit(&_rs))
1015 scrub_print_warning("checksum error", sblock_to_check);
1016 btrfs_dev_stat_inc_and_print(dev,
1017 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1018 } else if (sblock_bad->header_error) {
1019 spin_lock(&sctx->stat_lock);
1020 sctx->stat.verify_errors++;
1021 spin_unlock(&sctx->stat_lock);
1022 if (__ratelimit(&_rs))
1023 scrub_print_warning("checksum/header error",
1024 sblock_to_check);
1025 if (sblock_bad->generation_error)
1026 btrfs_dev_stat_inc_and_print(dev,
1027 BTRFS_DEV_STAT_GENERATION_ERRS);
1028 else
1029 btrfs_dev_stat_inc_and_print(dev,
1030 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1031 }
1032
1033 if (sctx->readonly) {
1034 ASSERT(!sctx->is_dev_replace);
1035 goto out;
1036 }
1037
1038 if (!is_metadata && !have_csum) {
1039 struct scrub_fixup_nodatasum *fixup_nodatasum;
1040
1041 WARN_ON(sctx->is_dev_replace);
1042
1043nodatasum_case:
1044
1045 /*
1046 * !is_metadata and !have_csum, this means that the data
1047 * might not be COW'ed, that it might be modified
1048 * concurrently. The general strategy to work on the
1049 * commit root does not help in the case when COW is not
1050 * used.
1051 */
1052 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1053 if (!fixup_nodatasum)
1054 goto did_not_correct_error;
1055 fixup_nodatasum->sctx = sctx;
1056 fixup_nodatasum->dev = dev;
1057 fixup_nodatasum->logical = logical;
1058 fixup_nodatasum->root = fs_info->extent_root;
1059 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1060 scrub_pending_trans_workers_inc(sctx);
1061 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1062 scrub_fixup_nodatasum, NULL, NULL);
1063 btrfs_queue_work(fs_info->scrub_workers,
1064 &fixup_nodatasum->work);
1065 goto out;
1066 }
1067
1068 /*
1069 * now build and submit the bios for the other mirrors, check
1070 * checksums.
1071 * First try to pick the mirror which is completely without I/O
1072 * errors and also does not have a checksum error.
1073 * If one is found, and if a checksum is present, the full block
1074 * that is known to contain an error is rewritten. Afterwards
1075 * the block is known to be corrected.
1076 * If a mirror is found which is completely correct, and no
1077 * checksum is present, only those pages are rewritten that had
1078 * an I/O error in the block to be repaired, since it cannot be
1079 * determined, which copy of the other pages is better (and it
1080 * could happen otherwise that a correct page would be
1081 * overwritten by a bad one).
1082 */
1083 for (mirror_index = 0;
1084 mirror_index < BTRFS_MAX_MIRRORS &&
1085 sblocks_for_recheck[mirror_index].page_count > 0;
1086 mirror_index++) {
1087 struct scrub_block *sblock_other;
1088
1089 if (mirror_index == failed_mirror_index)
1090 continue;
1091 sblock_other = sblocks_for_recheck + mirror_index;
1092
1093 /* build and submit the bios, check checksums */
1094 scrub_recheck_block(fs_info, sblock_other, 0);
1095
1096 if (!sblock_other->header_error &&
1097 !sblock_other->checksum_error &&
1098 sblock_other->no_io_error_seen) {
1099 if (sctx->is_dev_replace) {
1100 scrub_write_block_to_dev_replace(sblock_other);
1101 goto corrected_error;
1102 } else {
1103 ret = scrub_repair_block_from_good_copy(
1104 sblock_bad, sblock_other);
1105 if (!ret)
1106 goto corrected_error;
1107 }
1108 }
1109 }
1110
1111 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1112 goto did_not_correct_error;
1113
1114 /*
1115 * In case of I/O errors in the area that is supposed to be
1116 * repaired, continue by picking good copies of those pages.
1117 * Select the good pages from mirrors to rewrite bad pages from
1118 * the area to fix. Afterwards verify the checksum of the block
1119 * that is supposed to be repaired. This verification step is
1120 * only done for the purpose of statistic counting and for the
1121 * final scrub report, whether errors remain.
1122 * A perfect algorithm could make use of the checksum and try
1123 * all possible combinations of pages from the different mirrors
1124 * until the checksum verification succeeds. For example, when
1125 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1126 * of mirror #2 is readable but the final checksum test fails,
1127 * then the 2nd page of mirror #3 could be tried, whether now
1128 * the final checksum succeedes. But this would be a rare
1129 * exception and is therefore not implemented. At least it is
1130 * avoided that the good copy is overwritten.
1131 * A more useful improvement would be to pick the sectors
1132 * without I/O error based on sector sizes (512 bytes on legacy
1133 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1134 * mirror could be repaired by taking 512 byte of a different
1135 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1136 * area are unreadable.
1137 */
1138 success = 1;
1139 for (page_num = 0; page_num < sblock_bad->page_count;
1140 page_num++) {
1141 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1142 struct scrub_block *sblock_other = NULL;
1143
1144 /* skip no-io-error page in scrub */
1145 if (!page_bad->io_error && !sctx->is_dev_replace)
1146 continue;
1147
1148 /* try to find no-io-error page in mirrors */
1149 if (page_bad->io_error) {
1150 for (mirror_index = 0;
1151 mirror_index < BTRFS_MAX_MIRRORS &&
1152 sblocks_for_recheck[mirror_index].page_count > 0;
1153 mirror_index++) {
1154 if (!sblocks_for_recheck[mirror_index].
1155 pagev[page_num]->io_error) {
1156 sblock_other = sblocks_for_recheck +
1157 mirror_index;
1158 break;
1159 }
1160 }
1161 if (!sblock_other)
1162 success = 0;
1163 }
1164
1165 if (sctx->is_dev_replace) {
1166 /*
1167 * did not find a mirror to fetch the page
1168 * from. scrub_write_page_to_dev_replace()
1169 * handles this case (page->io_error), by
1170 * filling the block with zeros before
1171 * submitting the write request
1172 */
1173 if (!sblock_other)
1174 sblock_other = sblock_bad;
1175
1176 if (scrub_write_page_to_dev_replace(sblock_other,
1177 page_num) != 0) {
1178 btrfs_dev_replace_stats_inc(
1179 &sctx->dev_root->
1180 fs_info->dev_replace.
1181 num_write_errors);
1182 success = 0;
1183 }
1184 } else if (sblock_other) {
1185 ret = scrub_repair_page_from_good_copy(sblock_bad,
1186 sblock_other,
1187 page_num, 0);
1188 if (0 == ret)
1189 page_bad->io_error = 0;
1190 else
1191 success = 0;
1192 }
1193 }
1194
1195 if (success && !sctx->is_dev_replace) {
1196 if (is_metadata || have_csum) {
1197 /*
1198 * need to verify the checksum now that all
1199 * sectors on disk are repaired (the write
1200 * request for data to be repaired is on its way).
1201 * Just be lazy and use scrub_recheck_block()
1202 * which re-reads the data before the checksum
1203 * is verified, but most likely the data comes out
1204 * of the page cache.
1205 */
1206 scrub_recheck_block(fs_info, sblock_bad, 1);
1207 if (!sblock_bad->header_error &&
1208 !sblock_bad->checksum_error &&
1209 sblock_bad->no_io_error_seen)
1210 goto corrected_error;
1211 else
1212 goto did_not_correct_error;
1213 } else {
1214corrected_error:
1215 spin_lock(&sctx->stat_lock);
1216 sctx->stat.corrected_errors++;
1217 sblock_to_check->data_corrected = 1;
1218 spin_unlock(&sctx->stat_lock);
1219 btrfs_err_rl_in_rcu(fs_info,
1220 "fixed up error at logical %llu on dev %s",
1221 logical, rcu_str_deref(dev->name));
1222 }
1223 } else {
1224did_not_correct_error:
1225 spin_lock(&sctx->stat_lock);
1226 sctx->stat.uncorrectable_errors++;
1227 spin_unlock(&sctx->stat_lock);
1228 btrfs_err_rl_in_rcu(fs_info,
1229 "unable to fixup (regular) error at logical %llu on dev %s",
1230 logical, rcu_str_deref(dev->name));
1231 }
1232
1233out:
1234 if (sblocks_for_recheck) {
1235 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1236 mirror_index++) {
1237 struct scrub_block *sblock = sblocks_for_recheck +
1238 mirror_index;
1239 struct scrub_recover *recover;
1240 int page_index;
1241
1242 for (page_index = 0; page_index < sblock->page_count;
1243 page_index++) {
1244 sblock->pagev[page_index]->sblock = NULL;
1245 recover = sblock->pagev[page_index]->recover;
1246 if (recover) {
1247 scrub_put_recover(recover);
1248 sblock->pagev[page_index]->recover =
1249 NULL;
1250 }
1251 scrub_page_put(sblock->pagev[page_index]);
1252 }
1253 }
1254 kfree(sblocks_for_recheck);
1255 }
1256
1257 return 0;
1258}
1259
1260static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1261{
1262 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1263 return 2;
1264 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1265 return 3;
1266 else
1267 return (int)bbio->num_stripes;
1268}
1269
1270static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1271 u64 *raid_map,
1272 u64 mapped_length,
1273 int nstripes, int mirror,
1274 int *stripe_index,
1275 u64 *stripe_offset)
1276{
1277 int i;
1278
1279 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1280 /* RAID5/6 */
1281 for (i = 0; i < nstripes; i++) {
1282 if (raid_map[i] == RAID6_Q_STRIPE ||
1283 raid_map[i] == RAID5_P_STRIPE)
1284 continue;
1285
1286 if (logical >= raid_map[i] &&
1287 logical < raid_map[i] + mapped_length)
1288 break;
1289 }
1290
1291 *stripe_index = i;
1292 *stripe_offset = logical - raid_map[i];
1293 } else {
1294 /* The other RAID type */
1295 *stripe_index = mirror;
1296 *stripe_offset = 0;
1297 }
1298}
1299
1300static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1301 struct scrub_block *sblocks_for_recheck)
1302{
1303 struct scrub_ctx *sctx = original_sblock->sctx;
1304 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
1305 u64 length = original_sblock->page_count * PAGE_SIZE;
1306 u64 logical = original_sblock->pagev[0]->logical;
1307 u64 generation = original_sblock->pagev[0]->generation;
1308 u64 flags = original_sblock->pagev[0]->flags;
1309 u64 have_csum = original_sblock->pagev[0]->have_csum;
1310 struct scrub_recover *recover;
1311 struct btrfs_bio *bbio;
1312 u64 sublen;
1313 u64 mapped_length;
1314 u64 stripe_offset;
1315 int stripe_index;
1316 int page_index = 0;
1317 int mirror_index;
1318 int nmirrors;
1319 int ret;
1320
1321 /*
1322 * note: the two members refs and outstanding_pages
1323 * are not used (and not set) in the blocks that are used for
1324 * the recheck procedure
1325 */
1326
1327 while (length > 0) {
1328 sublen = min_t(u64, length, PAGE_SIZE);
1329 mapped_length = sublen;
1330 bbio = NULL;
1331
1332 /*
1333 * with a length of PAGE_SIZE, each returned stripe
1334 * represents one mirror
1335 */
1336 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1337 &mapped_length, &bbio, 0, 1);
1338 if (ret || !bbio || mapped_length < sublen) {
1339 btrfs_put_bbio(bbio);
1340 return -EIO;
1341 }
1342
1343 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1344 if (!recover) {
1345 btrfs_put_bbio(bbio);
1346 return -ENOMEM;
1347 }
1348
1349 atomic_set(&recover->refs, 1);
1350 recover->bbio = bbio;
1351 recover->map_length = mapped_length;
1352
1353 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1354
1355 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1356
1357 for (mirror_index = 0; mirror_index < nmirrors;
1358 mirror_index++) {
1359 struct scrub_block *sblock;
1360 struct scrub_page *page;
1361
1362 sblock = sblocks_for_recheck + mirror_index;
1363 sblock->sctx = sctx;
1364
1365 page = kzalloc(sizeof(*page), GFP_NOFS);
1366 if (!page) {
1367leave_nomem:
1368 spin_lock(&sctx->stat_lock);
1369 sctx->stat.malloc_errors++;
1370 spin_unlock(&sctx->stat_lock);
1371 scrub_put_recover(recover);
1372 return -ENOMEM;
1373 }
1374 scrub_page_get(page);
1375 sblock->pagev[page_index] = page;
1376 page->sblock = sblock;
1377 page->flags = flags;
1378 page->generation = generation;
1379 page->logical = logical;
1380 page->have_csum = have_csum;
1381 if (have_csum)
1382 memcpy(page->csum,
1383 original_sblock->pagev[0]->csum,
1384 sctx->csum_size);
1385
1386 scrub_stripe_index_and_offset(logical,
1387 bbio->map_type,
1388 bbio->raid_map,
1389 mapped_length,
1390 bbio->num_stripes -
1391 bbio->num_tgtdevs,
1392 mirror_index,
1393 &stripe_index,
1394 &stripe_offset);
1395 page->physical = bbio->stripes[stripe_index].physical +
1396 stripe_offset;
1397 page->dev = bbio->stripes[stripe_index].dev;
1398
1399 BUG_ON(page_index >= original_sblock->page_count);
1400 page->physical_for_dev_replace =
1401 original_sblock->pagev[page_index]->
1402 physical_for_dev_replace;
1403 /* for missing devices, dev->bdev is NULL */
1404 page->mirror_num = mirror_index + 1;
1405 sblock->page_count++;
1406 page->page = alloc_page(GFP_NOFS);
1407 if (!page->page)
1408 goto leave_nomem;
1409
1410 scrub_get_recover(recover);
1411 page->recover = recover;
1412 }
1413 scrub_put_recover(recover);
1414 length -= sublen;
1415 logical += sublen;
1416 page_index++;
1417 }
1418
1419 return 0;
1420}
1421
1422struct scrub_bio_ret {
1423 struct completion event;
1424 int error;
1425};
1426
1427static void scrub_bio_wait_endio(struct bio *bio)
1428{
1429 struct scrub_bio_ret *ret = bio->bi_private;
1430
1431 ret->error = bio->bi_error;
1432 complete(&ret->event);
1433}
1434
1435static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1436{
1437 return page->recover &&
1438 (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
1439}
1440
1441static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1442 struct bio *bio,
1443 struct scrub_page *page)
1444{
1445 struct scrub_bio_ret done;
1446 int ret;
1447
1448 init_completion(&done.event);
1449 done.error = 0;
1450 bio->bi_iter.bi_sector = page->logical >> 9;
1451 bio->bi_private = &done;
1452 bio->bi_end_io = scrub_bio_wait_endio;
1453
1454 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1455 page->recover->map_length,
1456 page->mirror_num, 0);
1457 if (ret)
1458 return ret;
1459
1460 wait_for_completion(&done.event);
1461 if (done.error)
1462 return -EIO;
1463
1464 return 0;
1465}
1466
1467/*
1468 * this function will check the on disk data for checksum errors, header
1469 * errors and read I/O errors. If any I/O errors happen, the exact pages
1470 * which are errored are marked as being bad. The goal is to enable scrub
1471 * to take those pages that are not errored from all the mirrors so that
1472 * the pages that are errored in the just handled mirror can be repaired.
1473 */
1474static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1475 struct scrub_block *sblock,
1476 int retry_failed_mirror)
1477{
1478 int page_num;
1479
1480 sblock->no_io_error_seen = 1;
1481
1482 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1483 struct bio *bio;
1484 struct scrub_page *page = sblock->pagev[page_num];
1485
1486 if (page->dev->bdev == NULL) {
1487 page->io_error = 1;
1488 sblock->no_io_error_seen = 0;
1489 continue;
1490 }
1491
1492 WARN_ON(!page->page);
1493 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1494 if (!bio) {
1495 page->io_error = 1;
1496 sblock->no_io_error_seen = 0;
1497 continue;
1498 }
1499 bio->bi_bdev = page->dev->bdev;
1500
1501 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1502 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1503 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1504 sblock->no_io_error_seen = 0;
1505 } else {
1506 bio->bi_iter.bi_sector = page->physical >> 9;
1507
1508 if (btrfsic_submit_bio_wait(READ, bio))
1509 sblock->no_io_error_seen = 0;
1510 }
1511
1512 bio_put(bio);
1513 }
1514
1515 if (sblock->no_io_error_seen)
1516 scrub_recheck_block_checksum(sblock);
1517}
1518
1519static inline int scrub_check_fsid(u8 fsid[],
1520 struct scrub_page *spage)
1521{
1522 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1523 int ret;
1524
1525 ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1526 return !ret;
1527}
1528
1529static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1530{
1531 sblock->header_error = 0;
1532 sblock->checksum_error = 0;
1533 sblock->generation_error = 0;
1534
1535 if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1536 scrub_checksum_data(sblock);
1537 else
1538 scrub_checksum_tree_block(sblock);
1539}
1540
1541static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1542 struct scrub_block *sblock_good)
1543{
1544 int page_num;
1545 int ret = 0;
1546
1547 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1548 int ret_sub;
1549
1550 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1551 sblock_good,
1552 page_num, 1);
1553 if (ret_sub)
1554 ret = ret_sub;
1555 }
1556
1557 return ret;
1558}
1559
1560static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1561 struct scrub_block *sblock_good,
1562 int page_num, int force_write)
1563{
1564 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1565 struct scrub_page *page_good = sblock_good->pagev[page_num];
1566
1567 BUG_ON(page_bad->page == NULL);
1568 BUG_ON(page_good->page == NULL);
1569 if (force_write || sblock_bad->header_error ||
1570 sblock_bad->checksum_error || page_bad->io_error) {
1571 struct bio *bio;
1572 int ret;
1573
1574 if (!page_bad->dev->bdev) {
1575 btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
1576 "scrub_repair_page_from_good_copy(bdev == NULL) "
1577 "is unexpected");
1578 return -EIO;
1579 }
1580
1581 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1582 if (!bio)
1583 return -EIO;
1584 bio->bi_bdev = page_bad->dev->bdev;
1585 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1586
1587 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1588 if (PAGE_SIZE != ret) {
1589 bio_put(bio);
1590 return -EIO;
1591 }
1592
1593 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1594 btrfs_dev_stat_inc_and_print(page_bad->dev,
1595 BTRFS_DEV_STAT_WRITE_ERRS);
1596 btrfs_dev_replace_stats_inc(
1597 &sblock_bad->sctx->dev_root->fs_info->
1598 dev_replace.num_write_errors);
1599 bio_put(bio);
1600 return -EIO;
1601 }
1602 bio_put(bio);
1603 }
1604
1605 return 0;
1606}
1607
1608static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1609{
1610 int page_num;
1611
1612 /*
1613 * This block is used for the check of the parity on the source device,
1614 * so the data needn't be written into the destination device.
1615 */
1616 if (sblock->sparity)
1617 return;
1618
1619 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1620 int ret;
1621
1622 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1623 if (ret)
1624 btrfs_dev_replace_stats_inc(
1625 &sblock->sctx->dev_root->fs_info->dev_replace.
1626 num_write_errors);
1627 }
1628}
1629
1630static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1631 int page_num)
1632{
1633 struct scrub_page *spage = sblock->pagev[page_num];
1634
1635 BUG_ON(spage->page == NULL);
1636 if (spage->io_error) {
1637 void *mapped_buffer = kmap_atomic(spage->page);
1638
1639 memset(mapped_buffer, 0, PAGE_SIZE);
1640 flush_dcache_page(spage->page);
1641 kunmap_atomic(mapped_buffer);
1642 }
1643 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1644}
1645
1646static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1647 struct scrub_page *spage)
1648{
1649 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1650 struct scrub_bio *sbio;
1651 int ret;
1652
1653 mutex_lock(&wr_ctx->wr_lock);
1654again:
1655 if (!wr_ctx->wr_curr_bio) {
1656 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1657 GFP_KERNEL);
1658 if (!wr_ctx->wr_curr_bio) {
1659 mutex_unlock(&wr_ctx->wr_lock);
1660 return -ENOMEM;
1661 }
1662 wr_ctx->wr_curr_bio->sctx = sctx;
1663 wr_ctx->wr_curr_bio->page_count = 0;
1664 }
1665 sbio = wr_ctx->wr_curr_bio;
1666 if (sbio->page_count == 0) {
1667 struct bio *bio;
1668
1669 sbio->physical = spage->physical_for_dev_replace;
1670 sbio->logical = spage->logical;
1671 sbio->dev = wr_ctx->tgtdev;
1672 bio = sbio->bio;
1673 if (!bio) {
1674 bio = btrfs_io_bio_alloc(GFP_KERNEL,
1675 wr_ctx->pages_per_wr_bio);
1676 if (!bio) {
1677 mutex_unlock(&wr_ctx->wr_lock);
1678 return -ENOMEM;
1679 }
1680 sbio->bio = bio;
1681 }
1682
1683 bio->bi_private = sbio;
1684 bio->bi_end_io = scrub_wr_bio_end_io;
1685 bio->bi_bdev = sbio->dev->bdev;
1686 bio->bi_iter.bi_sector = sbio->physical >> 9;
1687 sbio->err = 0;
1688 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1689 spage->physical_for_dev_replace ||
1690 sbio->logical + sbio->page_count * PAGE_SIZE !=
1691 spage->logical) {
1692 scrub_wr_submit(sctx);
1693 goto again;
1694 }
1695
1696 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1697 if (ret != PAGE_SIZE) {
1698 if (sbio->page_count < 1) {
1699 bio_put(sbio->bio);
1700 sbio->bio = NULL;
1701 mutex_unlock(&wr_ctx->wr_lock);
1702 return -EIO;
1703 }
1704 scrub_wr_submit(sctx);
1705 goto again;
1706 }
1707
1708 sbio->pagev[sbio->page_count] = spage;
1709 scrub_page_get(spage);
1710 sbio->page_count++;
1711 if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1712 scrub_wr_submit(sctx);
1713 mutex_unlock(&wr_ctx->wr_lock);
1714
1715 return 0;
1716}
1717
1718static void scrub_wr_submit(struct scrub_ctx *sctx)
1719{
1720 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1721 struct scrub_bio *sbio;
1722
1723 if (!wr_ctx->wr_curr_bio)
1724 return;
1725
1726 sbio = wr_ctx->wr_curr_bio;
1727 wr_ctx->wr_curr_bio = NULL;
1728 WARN_ON(!sbio->bio->bi_bdev);
1729 scrub_pending_bio_inc(sctx);
1730 /* process all writes in a single worker thread. Then the block layer
1731 * orders the requests before sending them to the driver which
1732 * doubled the write performance on spinning disks when measured
1733 * with Linux 3.5 */
1734 btrfsic_submit_bio(WRITE, sbio->bio);
1735}
1736
1737static void scrub_wr_bio_end_io(struct bio *bio)
1738{
1739 struct scrub_bio *sbio = bio->bi_private;
1740 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1741
1742 sbio->err = bio->bi_error;
1743 sbio->bio = bio;
1744
1745 btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1746 scrub_wr_bio_end_io_worker, NULL, NULL);
1747 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1748}
1749
1750static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1751{
1752 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1753 struct scrub_ctx *sctx = sbio->sctx;
1754 int i;
1755
1756 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1757 if (sbio->err) {
1758 struct btrfs_dev_replace *dev_replace =
1759 &sbio->sctx->dev_root->fs_info->dev_replace;
1760
1761 for (i = 0; i < sbio->page_count; i++) {
1762 struct scrub_page *spage = sbio->pagev[i];
1763
1764 spage->io_error = 1;
1765 btrfs_dev_replace_stats_inc(&dev_replace->
1766 num_write_errors);
1767 }
1768 }
1769
1770 for (i = 0; i < sbio->page_count; i++)
1771 scrub_page_put(sbio->pagev[i]);
1772
1773 bio_put(sbio->bio);
1774 kfree(sbio);
1775 scrub_pending_bio_dec(sctx);
1776}
1777
1778static int scrub_checksum(struct scrub_block *sblock)
1779{
1780 u64 flags;
1781 int ret;
1782
1783 /*
1784 * No need to initialize these stats currently,
1785 * because this function only use return value
1786 * instead of these stats value.
1787 *
1788 * Todo:
1789 * always use stats
1790 */
1791 sblock->header_error = 0;
1792 sblock->generation_error = 0;
1793 sblock->checksum_error = 0;
1794
1795 WARN_ON(sblock->page_count < 1);
1796 flags = sblock->pagev[0]->flags;
1797 ret = 0;
1798 if (flags & BTRFS_EXTENT_FLAG_DATA)
1799 ret = scrub_checksum_data(sblock);
1800 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1801 ret = scrub_checksum_tree_block(sblock);
1802 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1803 (void)scrub_checksum_super(sblock);
1804 else
1805 WARN_ON(1);
1806 if (ret)
1807 scrub_handle_errored_block(sblock);
1808
1809 return ret;
1810}
1811
1812static int scrub_checksum_data(struct scrub_block *sblock)
1813{
1814 struct scrub_ctx *sctx = sblock->sctx;
1815 u8 csum[BTRFS_CSUM_SIZE];
1816 u8 *on_disk_csum;
1817 struct page *page;
1818 void *buffer;
1819 u32 crc = ~(u32)0;
1820 u64 len;
1821 int index;
1822
1823 BUG_ON(sblock->page_count < 1);
1824 if (!sblock->pagev[0]->have_csum)
1825 return 0;
1826
1827 on_disk_csum = sblock->pagev[0]->csum;
1828 page = sblock->pagev[0]->page;
1829 buffer = kmap_atomic(page);
1830
1831 len = sctx->sectorsize;
1832 index = 0;
1833 for (;;) {
1834 u64 l = min_t(u64, len, PAGE_SIZE);
1835
1836 crc = btrfs_csum_data(buffer, crc, l);
1837 kunmap_atomic(buffer);
1838 len -= l;
1839 if (len == 0)
1840 break;
1841 index++;
1842 BUG_ON(index >= sblock->page_count);
1843 BUG_ON(!sblock->pagev[index]->page);
1844 page = sblock->pagev[index]->page;
1845 buffer = kmap_atomic(page);
1846 }
1847
1848 btrfs_csum_final(crc, csum);
1849 if (memcmp(csum, on_disk_csum, sctx->csum_size))
1850 sblock->checksum_error = 1;
1851
1852 return sblock->checksum_error;
1853}
1854
1855static int scrub_checksum_tree_block(struct scrub_block *sblock)
1856{
1857 struct scrub_ctx *sctx = sblock->sctx;
1858 struct btrfs_header *h;
1859 struct btrfs_root *root = sctx->dev_root;
1860 struct btrfs_fs_info *fs_info = root->fs_info;
1861 u8 calculated_csum[BTRFS_CSUM_SIZE];
1862 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1863 struct page *page;
1864 void *mapped_buffer;
1865 u64 mapped_size;
1866 void *p;
1867 u32 crc = ~(u32)0;
1868 u64 len;
1869 int index;
1870
1871 BUG_ON(sblock->page_count < 1);
1872 page = sblock->pagev[0]->page;
1873 mapped_buffer = kmap_atomic(page);
1874 h = (struct btrfs_header *)mapped_buffer;
1875 memcpy(on_disk_csum, h->csum, sctx->csum_size);
1876
1877 /*
1878 * we don't use the getter functions here, as we
1879 * a) don't have an extent buffer and
1880 * b) the page is already kmapped
1881 */
1882 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1883 sblock->header_error = 1;
1884
1885 if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h)) {
1886 sblock->header_error = 1;
1887 sblock->generation_error = 1;
1888 }
1889
1890 if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1891 sblock->header_error = 1;
1892
1893 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1894 BTRFS_UUID_SIZE))
1895 sblock->header_error = 1;
1896
1897 len = sctx->nodesize - BTRFS_CSUM_SIZE;
1898 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1899 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1900 index = 0;
1901 for (;;) {
1902 u64 l = min_t(u64, len, mapped_size);
1903
1904 crc = btrfs_csum_data(p, crc, l);
1905 kunmap_atomic(mapped_buffer);
1906 len -= l;
1907 if (len == 0)
1908 break;
1909 index++;
1910 BUG_ON(index >= sblock->page_count);
1911 BUG_ON(!sblock->pagev[index]->page);
1912 page = sblock->pagev[index]->page;
1913 mapped_buffer = kmap_atomic(page);
1914 mapped_size = PAGE_SIZE;
1915 p = mapped_buffer;
1916 }
1917
1918 btrfs_csum_final(crc, calculated_csum);
1919 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1920 sblock->checksum_error = 1;
1921
1922 return sblock->header_error || sblock->checksum_error;
1923}
1924
1925static int scrub_checksum_super(struct scrub_block *sblock)
1926{
1927 struct btrfs_super_block *s;
1928 struct scrub_ctx *sctx = sblock->sctx;
1929 u8 calculated_csum[BTRFS_CSUM_SIZE];
1930 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1931 struct page *page;
1932 void *mapped_buffer;
1933 u64 mapped_size;
1934 void *p;
1935 u32 crc = ~(u32)0;
1936 int fail_gen = 0;
1937 int fail_cor = 0;
1938 u64 len;
1939 int index;
1940
1941 BUG_ON(sblock->page_count < 1);
1942 page = sblock->pagev[0]->page;
1943 mapped_buffer = kmap_atomic(page);
1944 s = (struct btrfs_super_block *)mapped_buffer;
1945 memcpy(on_disk_csum, s->csum, sctx->csum_size);
1946
1947 if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
1948 ++fail_cor;
1949
1950 if (sblock->pagev[0]->generation != btrfs_super_generation(s))
1951 ++fail_gen;
1952
1953 if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
1954 ++fail_cor;
1955
1956 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1957 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1958 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1959 index = 0;
1960 for (;;) {
1961 u64 l = min_t(u64, len, mapped_size);
1962
1963 crc = btrfs_csum_data(p, crc, l);
1964 kunmap_atomic(mapped_buffer);
1965 len -= l;
1966 if (len == 0)
1967 break;
1968 index++;
1969 BUG_ON(index >= sblock->page_count);
1970 BUG_ON(!sblock->pagev[index]->page);
1971 page = sblock->pagev[index]->page;
1972 mapped_buffer = kmap_atomic(page);
1973 mapped_size = PAGE_SIZE;
1974 p = mapped_buffer;
1975 }
1976
1977 btrfs_csum_final(crc, calculated_csum);
1978 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1979 ++fail_cor;
1980
1981 if (fail_cor + fail_gen) {
1982 /*
1983 * if we find an error in a super block, we just report it.
1984 * They will get written with the next transaction commit
1985 * anyway
1986 */
1987 spin_lock(&sctx->stat_lock);
1988 ++sctx->stat.super_errors;
1989 spin_unlock(&sctx->stat_lock);
1990 if (fail_cor)
1991 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1992 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1993 else
1994 btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
1995 BTRFS_DEV_STAT_GENERATION_ERRS);
1996 }
1997
1998 return fail_cor + fail_gen;
1999}
2000
2001static void scrub_block_get(struct scrub_block *sblock)
2002{
2003 atomic_inc(&sblock->refs);
2004}
2005
2006static void scrub_block_put(struct scrub_block *sblock)
2007{
2008 if (atomic_dec_and_test(&sblock->refs)) {
2009 int i;
2010
2011 if (sblock->sparity)
2012 scrub_parity_put(sblock->sparity);
2013
2014 for (i = 0; i < sblock->page_count; i++)
2015 scrub_page_put(sblock->pagev[i]);
2016 kfree(sblock);
2017 }
2018}
2019
2020static void scrub_page_get(struct scrub_page *spage)
2021{
2022 atomic_inc(&spage->refs);
2023}
2024
2025static void scrub_page_put(struct scrub_page *spage)
2026{
2027 if (atomic_dec_and_test(&spage->refs)) {
2028 if (spage->page)
2029 __free_page(spage->page);
2030 kfree(spage);
2031 }
2032}
2033
2034static void scrub_submit(struct scrub_ctx *sctx)
2035{
2036 struct scrub_bio *sbio;
2037
2038 if (sctx->curr == -1)
2039 return;
2040
2041 sbio = sctx->bios[sctx->curr];
2042 sctx->curr = -1;
2043 scrub_pending_bio_inc(sctx);
2044 btrfsic_submit_bio(READ, sbio->bio);
2045}
2046
2047static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2048 struct scrub_page *spage)
2049{
2050 struct scrub_block *sblock = spage->sblock;
2051 struct scrub_bio *sbio;
2052 int ret;
2053
2054again:
2055 /*
2056 * grab a fresh bio or wait for one to become available
2057 */
2058 while (sctx->curr == -1) {
2059 spin_lock(&sctx->list_lock);
2060 sctx->curr = sctx->first_free;
2061 if (sctx->curr != -1) {
2062 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2063 sctx->bios[sctx->curr]->next_free = -1;
2064 sctx->bios[sctx->curr]->page_count = 0;
2065 spin_unlock(&sctx->list_lock);
2066 } else {
2067 spin_unlock(&sctx->list_lock);
2068 wait_event(sctx->list_wait, sctx->first_free != -1);
2069 }
2070 }
2071 sbio = sctx->bios[sctx->curr];
2072 if (sbio->page_count == 0) {
2073 struct bio *bio;
2074
2075 sbio->physical = spage->physical;
2076 sbio->logical = spage->logical;
2077 sbio->dev = spage->dev;
2078 bio = sbio->bio;
2079 if (!bio) {
2080 bio = btrfs_io_bio_alloc(GFP_KERNEL,
2081 sctx->pages_per_rd_bio);
2082 if (!bio)
2083 return -ENOMEM;
2084 sbio->bio = bio;
2085 }
2086
2087 bio->bi_private = sbio;
2088 bio->bi_end_io = scrub_bio_end_io;
2089 bio->bi_bdev = sbio->dev->bdev;
2090 bio->bi_iter.bi_sector = sbio->physical >> 9;
2091 sbio->err = 0;
2092 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2093 spage->physical ||
2094 sbio->logical + sbio->page_count * PAGE_SIZE !=
2095 spage->logical ||
2096 sbio->dev != spage->dev) {
2097 scrub_submit(sctx);
2098 goto again;
2099 }
2100
2101 sbio->pagev[sbio->page_count] = spage;
2102 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2103 if (ret != PAGE_SIZE) {
2104 if (sbio->page_count < 1) {
2105 bio_put(sbio->bio);
2106 sbio->bio = NULL;
2107 return -EIO;
2108 }
2109 scrub_submit(sctx);
2110 goto again;
2111 }
2112
2113 scrub_block_get(sblock); /* one for the page added to the bio */
2114 atomic_inc(&sblock->outstanding_pages);
2115 sbio->page_count++;
2116 if (sbio->page_count == sctx->pages_per_rd_bio)
2117 scrub_submit(sctx);
2118
2119 return 0;
2120}
2121
2122static void scrub_missing_raid56_end_io(struct bio *bio)
2123{
2124 struct scrub_block *sblock = bio->bi_private;
2125 struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
2126
2127 if (bio->bi_error)
2128 sblock->no_io_error_seen = 0;
2129
2130 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2131}
2132
2133static void scrub_missing_raid56_worker(struct btrfs_work *work)
2134{
2135 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2136 struct scrub_ctx *sctx = sblock->sctx;
2137 u64 logical;
2138 struct btrfs_device *dev;
2139
2140 logical = sblock->pagev[0]->logical;
2141 dev = sblock->pagev[0]->dev;
2142
2143 if (sblock->no_io_error_seen)
2144 scrub_recheck_block_checksum(sblock);
2145
2146 if (!sblock->no_io_error_seen) {
2147 spin_lock(&sctx->stat_lock);
2148 sctx->stat.read_errors++;
2149 spin_unlock(&sctx->stat_lock);
2150 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
2151 "IO error rebuilding logical %llu for dev %s",
2152 logical, rcu_str_deref(dev->name));
2153 } else if (sblock->header_error || sblock->checksum_error) {
2154 spin_lock(&sctx->stat_lock);
2155 sctx->stat.uncorrectable_errors++;
2156 spin_unlock(&sctx->stat_lock);
2157 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
2158 "failed to rebuild valid logical %llu for dev %s",
2159 logical, rcu_str_deref(dev->name));
2160 } else {
2161 scrub_write_block_to_dev_replace(sblock);
2162 }
2163
2164 scrub_block_put(sblock);
2165
2166 if (sctx->is_dev_replace &&
2167 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2168 mutex_lock(&sctx->wr_ctx.wr_lock);
2169 scrub_wr_submit(sctx);
2170 mutex_unlock(&sctx->wr_ctx.wr_lock);
2171 }
2172
2173 scrub_pending_bio_dec(sctx);
2174}
2175
2176static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2177{
2178 struct scrub_ctx *sctx = sblock->sctx;
2179 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2180 u64 length = sblock->page_count * PAGE_SIZE;
2181 u64 logical = sblock->pagev[0]->logical;
2182 struct btrfs_bio *bbio;
2183 struct bio *bio;
2184 struct btrfs_raid_bio *rbio;
2185 int ret;
2186 int i;
2187
2188 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
2189 &bbio, 0, 1);
2190 if (ret || !bbio || !bbio->raid_map)
2191 goto bbio_out;
2192
2193 if (WARN_ON(!sctx->is_dev_replace ||
2194 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2195 /*
2196 * We shouldn't be scrubbing a missing device. Even for dev
2197 * replace, we should only get here for RAID 5/6. We either
2198 * managed to mount something with no mirrors remaining or
2199 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2200 */
2201 goto bbio_out;
2202 }
2203
2204 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2205 if (!bio)
2206 goto bbio_out;
2207
2208 bio->bi_iter.bi_sector = logical >> 9;
2209 bio->bi_private = sblock;
2210 bio->bi_end_io = scrub_missing_raid56_end_io;
2211
2212 rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
2213 if (!rbio)
2214 goto rbio_out;
2215
2216 for (i = 0; i < sblock->page_count; i++) {
2217 struct scrub_page *spage = sblock->pagev[i];
2218
2219 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2220 }
2221
2222 btrfs_init_work(&sblock->work, btrfs_scrub_helper,
2223 scrub_missing_raid56_worker, NULL, NULL);
2224 scrub_block_get(sblock);
2225 scrub_pending_bio_inc(sctx);
2226 raid56_submit_missing_rbio(rbio);
2227 return;
2228
2229rbio_out:
2230 bio_put(bio);
2231bbio_out:
2232 btrfs_put_bbio(bbio);
2233 spin_lock(&sctx->stat_lock);
2234 sctx->stat.malloc_errors++;
2235 spin_unlock(&sctx->stat_lock);
2236}
2237
2238static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2239 u64 physical, struct btrfs_device *dev, u64 flags,
2240 u64 gen, int mirror_num, u8 *csum, int force,
2241 u64 physical_for_dev_replace)
2242{
2243 struct scrub_block *sblock;
2244 int index;
2245
2246 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2247 if (!sblock) {
2248 spin_lock(&sctx->stat_lock);
2249 sctx->stat.malloc_errors++;
2250 spin_unlock(&sctx->stat_lock);
2251 return -ENOMEM;
2252 }
2253
2254 /* one ref inside this function, plus one for each page added to
2255 * a bio later on */
2256 atomic_set(&sblock->refs, 1);
2257 sblock->sctx = sctx;
2258 sblock->no_io_error_seen = 1;
2259
2260 for (index = 0; len > 0; index++) {
2261 struct scrub_page *spage;
2262 u64 l = min_t(u64, len, PAGE_SIZE);
2263
2264 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2265 if (!spage) {
2266leave_nomem:
2267 spin_lock(&sctx->stat_lock);
2268 sctx->stat.malloc_errors++;
2269 spin_unlock(&sctx->stat_lock);
2270 scrub_block_put(sblock);
2271 return -ENOMEM;
2272 }
2273 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2274 scrub_page_get(spage);
2275 sblock->pagev[index] = spage;
2276 spage->sblock = sblock;
2277 spage->dev = dev;
2278 spage->flags = flags;
2279 spage->generation = gen;
2280 spage->logical = logical;
2281 spage->physical = physical;
2282 spage->physical_for_dev_replace = physical_for_dev_replace;
2283 spage->mirror_num = mirror_num;
2284 if (csum) {
2285 spage->have_csum = 1;
2286 memcpy(spage->csum, csum, sctx->csum_size);
2287 } else {
2288 spage->have_csum = 0;
2289 }
2290 sblock->page_count++;
2291 spage->page = alloc_page(GFP_KERNEL);
2292 if (!spage->page)
2293 goto leave_nomem;
2294 len -= l;
2295 logical += l;
2296 physical += l;
2297 physical_for_dev_replace += l;
2298 }
2299
2300 WARN_ON(sblock->page_count == 0);
2301 if (dev->missing) {
2302 /*
2303 * This case should only be hit for RAID 5/6 device replace. See
2304 * the comment in scrub_missing_raid56_pages() for details.
2305 */
2306 scrub_missing_raid56_pages(sblock);
2307 } else {
2308 for (index = 0; index < sblock->page_count; index++) {
2309 struct scrub_page *spage = sblock->pagev[index];
2310 int ret;
2311
2312 ret = scrub_add_page_to_rd_bio(sctx, spage);
2313 if (ret) {
2314 scrub_block_put(sblock);
2315 return ret;
2316 }
2317 }
2318
2319 if (force)
2320 scrub_submit(sctx);
2321 }
2322
2323 /* last one frees, either here or in bio completion for last page */
2324 scrub_block_put(sblock);
2325 return 0;
2326}
2327
2328static void scrub_bio_end_io(struct bio *bio)
2329{
2330 struct scrub_bio *sbio = bio->bi_private;
2331 struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2332
2333 sbio->err = bio->bi_error;
2334 sbio->bio = bio;
2335
2336 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2337}
2338
2339static void scrub_bio_end_io_worker(struct btrfs_work *work)
2340{
2341 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2342 struct scrub_ctx *sctx = sbio->sctx;
2343 int i;
2344
2345 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2346 if (sbio->err) {
2347 for (i = 0; i < sbio->page_count; i++) {
2348 struct scrub_page *spage = sbio->pagev[i];
2349
2350 spage->io_error = 1;
2351 spage->sblock->no_io_error_seen = 0;
2352 }
2353 }
2354
2355 /* now complete the scrub_block items that have all pages completed */
2356 for (i = 0; i < sbio->page_count; i++) {
2357 struct scrub_page *spage = sbio->pagev[i];
2358 struct scrub_block *sblock = spage->sblock;
2359
2360 if (atomic_dec_and_test(&sblock->outstanding_pages))
2361 scrub_block_complete(sblock);
2362 scrub_block_put(sblock);
2363 }
2364
2365 bio_put(sbio->bio);
2366 sbio->bio = NULL;
2367 spin_lock(&sctx->list_lock);
2368 sbio->next_free = sctx->first_free;
2369 sctx->first_free = sbio->index;
2370 spin_unlock(&sctx->list_lock);
2371
2372 if (sctx->is_dev_replace &&
2373 atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2374 mutex_lock(&sctx->wr_ctx.wr_lock);
2375 scrub_wr_submit(sctx);
2376 mutex_unlock(&sctx->wr_ctx.wr_lock);
2377 }
2378
2379 scrub_pending_bio_dec(sctx);
2380}
2381
2382static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2383 unsigned long *bitmap,
2384 u64 start, u64 len)
2385{
2386 u32 offset;
2387 int nsectors;
2388 int sectorsize = sparity->sctx->dev_root->sectorsize;
2389
2390 if (len >= sparity->stripe_len) {
2391 bitmap_set(bitmap, 0, sparity->nsectors);
2392 return;
2393 }
2394
2395 start -= sparity->logic_start;
2396 start = div_u64_rem(start, sparity->stripe_len, &offset);
2397 offset /= sectorsize;
2398 nsectors = (int)len / sectorsize;
2399
2400 if (offset + nsectors <= sparity->nsectors) {
2401 bitmap_set(bitmap, offset, nsectors);
2402 return;
2403 }
2404
2405 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2406 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2407}
2408
2409static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2410 u64 start, u64 len)
2411{
2412 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2413}
2414
2415static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2416 u64 start, u64 len)
2417{
2418 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2419}
2420
2421static void scrub_block_complete(struct scrub_block *sblock)
2422{
2423 int corrupted = 0;
2424
2425 if (!sblock->no_io_error_seen) {
2426 corrupted = 1;
2427 scrub_handle_errored_block(sblock);
2428 } else {
2429 /*
2430 * if has checksum error, write via repair mechanism in
2431 * dev replace case, otherwise write here in dev replace
2432 * case.
2433 */
2434 corrupted = scrub_checksum(sblock);
2435 if (!corrupted && sblock->sctx->is_dev_replace)
2436 scrub_write_block_to_dev_replace(sblock);
2437 }
2438
2439 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2440 u64 start = sblock->pagev[0]->logical;
2441 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2442 PAGE_SIZE;
2443
2444 scrub_parity_mark_sectors_error(sblock->sparity,
2445 start, end - start);
2446 }
2447}
2448
2449static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2450{
2451 struct btrfs_ordered_sum *sum = NULL;
2452 unsigned long index;
2453 unsigned long num_sectors;
2454
2455 while (!list_empty(&sctx->csum_list)) {
2456 sum = list_first_entry(&sctx->csum_list,
2457 struct btrfs_ordered_sum, list);
2458 if (sum->bytenr > logical)
2459 return 0;
2460 if (sum->bytenr + sum->len > logical)
2461 break;
2462
2463 ++sctx->stat.csum_discards;
2464 list_del(&sum->list);
2465 kfree(sum);
2466 sum = NULL;
2467 }
2468 if (!sum)
2469 return 0;
2470
2471 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2472 num_sectors = sum->len / sctx->sectorsize;
2473 memcpy(csum, sum->sums + index, sctx->csum_size);
2474 if (index == num_sectors - 1) {
2475 list_del(&sum->list);
2476 kfree(sum);
2477 }
2478 return 1;
2479}
2480
2481/* scrub extent tries to collect up to 64 kB for each bio */
2482static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2483 u64 physical, struct btrfs_device *dev, u64 flags,
2484 u64 gen, int mirror_num, u64 physical_for_dev_replace)
2485{
2486 int ret;
2487 u8 csum[BTRFS_CSUM_SIZE];
2488 u32 blocksize;
2489
2490 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2491 blocksize = sctx->sectorsize;
2492 spin_lock(&sctx->stat_lock);
2493 sctx->stat.data_extents_scrubbed++;
2494 sctx->stat.data_bytes_scrubbed += len;
2495 spin_unlock(&sctx->stat_lock);
2496 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2497 blocksize = sctx->nodesize;
2498 spin_lock(&sctx->stat_lock);
2499 sctx->stat.tree_extents_scrubbed++;
2500 sctx->stat.tree_bytes_scrubbed += len;
2501 spin_unlock(&sctx->stat_lock);
2502 } else {
2503 blocksize = sctx->sectorsize;
2504 WARN_ON(1);
2505 }
2506
2507 while (len) {
2508 u64 l = min_t(u64, len, blocksize);
2509 int have_csum = 0;
2510
2511 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2512 /* push csums to sbio */
2513 have_csum = scrub_find_csum(sctx, logical, csum);
2514 if (have_csum == 0)
2515 ++sctx->stat.no_csum;
2516 if (sctx->is_dev_replace && !have_csum) {
2517 ret = copy_nocow_pages(sctx, logical, l,
2518 mirror_num,
2519 physical_for_dev_replace);
2520 goto behind_scrub_pages;
2521 }
2522 }
2523 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2524 mirror_num, have_csum ? csum : NULL, 0,
2525 physical_for_dev_replace);
2526behind_scrub_pages:
2527 if (ret)
2528 return ret;
2529 len -= l;
2530 logical += l;
2531 physical += l;
2532 physical_for_dev_replace += l;
2533 }
2534 return 0;
2535}
2536
2537static int scrub_pages_for_parity(struct scrub_parity *sparity,
2538 u64 logical, u64 len,
2539 u64 physical, struct btrfs_device *dev,
2540 u64 flags, u64 gen, int mirror_num, u8 *csum)
2541{
2542 struct scrub_ctx *sctx = sparity->sctx;
2543 struct scrub_block *sblock;
2544 int index;
2545
2546 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2547 if (!sblock) {
2548 spin_lock(&sctx->stat_lock);
2549 sctx->stat.malloc_errors++;
2550 spin_unlock(&sctx->stat_lock);
2551 return -ENOMEM;
2552 }
2553
2554 /* one ref inside this function, plus one for each page added to
2555 * a bio later on */
2556 atomic_set(&sblock->refs, 1);
2557 sblock->sctx = sctx;
2558 sblock->no_io_error_seen = 1;
2559 sblock->sparity = sparity;
2560 scrub_parity_get(sparity);
2561
2562 for (index = 0; len > 0; index++) {
2563 struct scrub_page *spage;
2564 u64 l = min_t(u64, len, PAGE_SIZE);
2565
2566 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2567 if (!spage) {
2568leave_nomem:
2569 spin_lock(&sctx->stat_lock);
2570 sctx->stat.malloc_errors++;
2571 spin_unlock(&sctx->stat_lock);
2572 scrub_block_put(sblock);
2573 return -ENOMEM;
2574 }
2575 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2576 /* For scrub block */
2577 scrub_page_get(spage);
2578 sblock->pagev[index] = spage;
2579 /* For scrub parity */
2580 scrub_page_get(spage);
2581 list_add_tail(&spage->list, &sparity->spages);
2582 spage->sblock = sblock;
2583 spage->dev = dev;
2584 spage->flags = flags;
2585 spage->generation = gen;
2586 spage->logical = logical;
2587 spage->physical = physical;
2588 spage->mirror_num = mirror_num;
2589 if (csum) {
2590 spage->have_csum = 1;
2591 memcpy(spage->csum, csum, sctx->csum_size);
2592 } else {
2593 spage->have_csum = 0;
2594 }
2595 sblock->page_count++;
2596 spage->page = alloc_page(GFP_KERNEL);
2597 if (!spage->page)
2598 goto leave_nomem;
2599 len -= l;
2600 logical += l;
2601 physical += l;
2602 }
2603
2604 WARN_ON(sblock->page_count == 0);
2605 for (index = 0; index < sblock->page_count; index++) {
2606 struct scrub_page *spage = sblock->pagev[index];
2607 int ret;
2608
2609 ret = scrub_add_page_to_rd_bio(sctx, spage);
2610 if (ret) {
2611 scrub_block_put(sblock);
2612 return ret;
2613 }
2614 }
2615
2616 /* last one frees, either here or in bio completion for last page */
2617 scrub_block_put(sblock);
2618 return 0;
2619}
2620
2621static int scrub_extent_for_parity(struct scrub_parity *sparity,
2622 u64 logical, u64 len,
2623 u64 physical, struct btrfs_device *dev,
2624 u64 flags, u64 gen, int mirror_num)
2625{
2626 struct scrub_ctx *sctx = sparity->sctx;
2627 int ret;
2628 u8 csum[BTRFS_CSUM_SIZE];
2629 u32 blocksize;
2630
2631 if (dev->missing) {
2632 scrub_parity_mark_sectors_error(sparity, logical, len);
2633 return 0;
2634 }
2635
2636 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2637 blocksize = sctx->sectorsize;
2638 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2639 blocksize = sctx->nodesize;
2640 } else {
2641 blocksize = sctx->sectorsize;
2642 WARN_ON(1);
2643 }
2644
2645 while (len) {
2646 u64 l = min_t(u64, len, blocksize);
2647 int have_csum = 0;
2648
2649 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2650 /* push csums to sbio */
2651 have_csum = scrub_find_csum(sctx, logical, csum);
2652 if (have_csum == 0)
2653 goto skip;
2654 }
2655 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2656 flags, gen, mirror_num,
2657 have_csum ? csum : NULL);
2658 if (ret)
2659 return ret;
2660skip:
2661 len -= l;
2662 logical += l;
2663 physical += l;
2664 }
2665 return 0;
2666}
2667
2668/*
2669 * Given a physical address, this will calculate it's
2670 * logical offset. if this is a parity stripe, it will return
2671 * the most left data stripe's logical offset.
2672 *
2673 * return 0 if it is a data stripe, 1 means parity stripe.
2674 */
2675static int get_raid56_logic_offset(u64 physical, int num,
2676 struct map_lookup *map, u64 *offset,
2677 u64 *stripe_start)
2678{
2679 int i;
2680 int j = 0;
2681 u64 stripe_nr;
2682 u64 last_offset;
2683 u32 stripe_index;
2684 u32 rot;
2685
2686 last_offset = (physical - map->stripes[num].physical) *
2687 nr_data_stripes(map);
2688 if (stripe_start)
2689 *stripe_start = last_offset;
2690
2691 *offset = last_offset;
2692 for (i = 0; i < nr_data_stripes(map); i++) {
2693 *offset = last_offset + i * map->stripe_len;
2694
2695 stripe_nr = div_u64(*offset, map->stripe_len);
2696 stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
2697
2698 /* Work out the disk rotation on this stripe-set */
2699 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2700 /* calculate which stripe this data locates */
2701 rot += i;
2702 stripe_index = rot % map->num_stripes;
2703 if (stripe_index == num)
2704 return 0;
2705 if (stripe_index < num)
2706 j++;
2707 }
2708 *offset = last_offset + j * map->stripe_len;
2709 return 1;
2710}
2711
2712static void scrub_free_parity(struct scrub_parity *sparity)
2713{
2714 struct scrub_ctx *sctx = sparity->sctx;
2715 struct scrub_page *curr, *next;
2716 int nbits;
2717
2718 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2719 if (nbits) {
2720 spin_lock(&sctx->stat_lock);
2721 sctx->stat.read_errors += nbits;
2722 sctx->stat.uncorrectable_errors += nbits;
2723 spin_unlock(&sctx->stat_lock);
2724 }
2725
2726 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2727 list_del_init(&curr->list);
2728 scrub_page_put(curr);
2729 }
2730
2731 kfree(sparity);
2732}
2733
2734static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2735{
2736 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2737 work);
2738 struct scrub_ctx *sctx = sparity->sctx;
2739
2740 scrub_free_parity(sparity);
2741 scrub_pending_bio_dec(sctx);
2742}
2743
2744static void scrub_parity_bio_endio(struct bio *bio)
2745{
2746 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2747
2748 if (bio->bi_error)
2749 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2750 sparity->nsectors);
2751
2752 bio_put(bio);
2753
2754 btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
2755 scrub_parity_bio_endio_worker, NULL, NULL);
2756 btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
2757 &sparity->work);
2758}
2759
2760static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2761{
2762 struct scrub_ctx *sctx = sparity->sctx;
2763 struct bio *bio;
2764 struct btrfs_raid_bio *rbio;
2765 struct scrub_page *spage;
2766 struct btrfs_bio *bbio = NULL;
2767 u64 length;
2768 int ret;
2769
2770 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2771 sparity->nsectors))
2772 goto out;
2773
2774 length = sparity->logic_end - sparity->logic_start;
2775 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2776 sparity->logic_start,
2777 &length, &bbio, 0, 1);
2778 if (ret || !bbio || !bbio->raid_map)
2779 goto bbio_out;
2780
2781 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2782 if (!bio)
2783 goto bbio_out;
2784
2785 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2786 bio->bi_private = sparity;
2787 bio->bi_end_io = scrub_parity_bio_endio;
2788
2789 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2790 length, sparity->scrub_dev,
2791 sparity->dbitmap,
2792 sparity->nsectors);
2793 if (!rbio)
2794 goto rbio_out;
2795
2796 list_for_each_entry(spage, &sparity->spages, list)
2797 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2798
2799 scrub_pending_bio_inc(sctx);
2800 raid56_parity_submit_scrub_rbio(rbio);
2801 return;
2802
2803rbio_out:
2804 bio_put(bio);
2805bbio_out:
2806 btrfs_put_bbio(bbio);
2807 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2808 sparity->nsectors);
2809 spin_lock(&sctx->stat_lock);
2810 sctx->stat.malloc_errors++;
2811 spin_unlock(&sctx->stat_lock);
2812out:
2813 scrub_free_parity(sparity);
2814}
2815
2816static inline int scrub_calc_parity_bitmap_len(int nsectors)
2817{
2818 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2819}
2820
2821static void scrub_parity_get(struct scrub_parity *sparity)
2822{
2823 atomic_inc(&sparity->refs);
2824}
2825
2826static void scrub_parity_put(struct scrub_parity *sparity)
2827{
2828 if (!atomic_dec_and_test(&sparity->refs))
2829 return;
2830
2831 scrub_parity_check_and_repair(sparity);
2832}
2833
2834static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2835 struct map_lookup *map,
2836 struct btrfs_device *sdev,
2837 struct btrfs_path *path,
2838 u64 logic_start,
2839 u64 logic_end)
2840{
2841 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2842 struct btrfs_root *root = fs_info->extent_root;
2843 struct btrfs_root *csum_root = fs_info->csum_root;
2844 struct btrfs_extent_item *extent;
2845 struct btrfs_bio *bbio = NULL;
2846 u64 flags;
2847 int ret;
2848 int slot;
2849 struct extent_buffer *l;
2850 struct btrfs_key key;
2851 u64 generation;
2852 u64 extent_logical;
2853 u64 extent_physical;
2854 u64 extent_len;
2855 u64 mapped_length;
2856 struct btrfs_device *extent_dev;
2857 struct scrub_parity *sparity;
2858 int nsectors;
2859 int bitmap_len;
2860 int extent_mirror_num;
2861 int stop_loop = 0;
2862
2863 nsectors = map->stripe_len / root->sectorsize;
2864 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2865 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2866 GFP_NOFS);
2867 if (!sparity) {
2868 spin_lock(&sctx->stat_lock);
2869 sctx->stat.malloc_errors++;
2870 spin_unlock(&sctx->stat_lock);
2871 return -ENOMEM;
2872 }
2873
2874 sparity->stripe_len = map->stripe_len;
2875 sparity->nsectors = nsectors;
2876 sparity->sctx = sctx;
2877 sparity->scrub_dev = sdev;
2878 sparity->logic_start = logic_start;
2879 sparity->logic_end = logic_end;
2880 atomic_set(&sparity->refs, 1);
2881 INIT_LIST_HEAD(&sparity->spages);
2882 sparity->dbitmap = sparity->bitmap;
2883 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2884
2885 ret = 0;
2886 while (logic_start < logic_end) {
2887 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2888 key.type = BTRFS_METADATA_ITEM_KEY;
2889 else
2890 key.type = BTRFS_EXTENT_ITEM_KEY;
2891 key.objectid = logic_start;
2892 key.offset = (u64)-1;
2893
2894 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2895 if (ret < 0)
2896 goto out;
2897
2898 if (ret > 0) {
2899 ret = btrfs_previous_extent_item(root, path, 0);
2900 if (ret < 0)
2901 goto out;
2902 if (ret > 0) {
2903 btrfs_release_path(path);
2904 ret = btrfs_search_slot(NULL, root, &key,
2905 path, 0, 0);
2906 if (ret < 0)
2907 goto out;
2908 }
2909 }
2910
2911 stop_loop = 0;
2912 while (1) {
2913 u64 bytes;
2914
2915 l = path->nodes[0];
2916 slot = path->slots[0];
2917 if (slot >= btrfs_header_nritems(l)) {
2918 ret = btrfs_next_leaf(root, path);
2919 if (ret == 0)
2920 continue;
2921 if (ret < 0)
2922 goto out;
2923
2924 stop_loop = 1;
2925 break;
2926 }
2927 btrfs_item_key_to_cpu(l, &key, slot);
2928
2929 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2930 key.type != BTRFS_METADATA_ITEM_KEY)
2931 goto next;
2932
2933 if (key.type == BTRFS_METADATA_ITEM_KEY)
2934 bytes = root->nodesize;
2935 else
2936 bytes = key.offset;
2937
2938 if (key.objectid + bytes <= logic_start)
2939 goto next;
2940
2941 if (key.objectid >= logic_end) {
2942 stop_loop = 1;
2943 break;
2944 }
2945
2946 while (key.objectid >= logic_start + map->stripe_len)
2947 logic_start += map->stripe_len;
2948
2949 extent = btrfs_item_ptr(l, slot,
2950 struct btrfs_extent_item);
2951 flags = btrfs_extent_flags(l, extent);
2952 generation = btrfs_extent_generation(l, extent);
2953
2954 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
2955 (key.objectid < logic_start ||
2956 key.objectid + bytes >
2957 logic_start + map->stripe_len)) {
2958 btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2959 key.objectid, logic_start);
2960 spin_lock(&sctx->stat_lock);
2961 sctx->stat.uncorrectable_errors++;
2962 spin_unlock(&sctx->stat_lock);
2963 goto next;
2964 }
2965again:
2966 extent_logical = key.objectid;
2967 extent_len = bytes;
2968
2969 if (extent_logical < logic_start) {
2970 extent_len -= logic_start - extent_logical;
2971 extent_logical = logic_start;
2972 }
2973
2974 if (extent_logical + extent_len >
2975 logic_start + map->stripe_len)
2976 extent_len = logic_start + map->stripe_len -
2977 extent_logical;
2978
2979 scrub_parity_mark_sectors_data(sparity, extent_logical,
2980 extent_len);
2981
2982 mapped_length = extent_len;
2983 ret = btrfs_map_block(fs_info, READ, extent_logical,
2984 &mapped_length, &bbio, 0);
2985 if (!ret) {
2986 if (!bbio || mapped_length < extent_len)
2987 ret = -EIO;
2988 }
2989 if (ret) {
2990 btrfs_put_bbio(bbio);
2991 goto out;
2992 }
2993 extent_physical = bbio->stripes[0].physical;
2994 extent_mirror_num = bbio->mirror_num;
2995 extent_dev = bbio->stripes[0].dev;
2996 btrfs_put_bbio(bbio);
2997
2998 ret = btrfs_lookup_csums_range(csum_root,
2999 extent_logical,
3000 extent_logical + extent_len - 1,
3001 &sctx->csum_list, 1);
3002 if (ret)
3003 goto out;
3004
3005 ret = scrub_extent_for_parity(sparity, extent_logical,
3006 extent_len,
3007 extent_physical,
3008 extent_dev, flags,
3009 generation,
3010 extent_mirror_num);
3011
3012 scrub_free_csums(sctx);
3013
3014 if (ret)
3015 goto out;
3016
3017 if (extent_logical + extent_len <
3018 key.objectid + bytes) {
3019 logic_start += map->stripe_len;
3020
3021 if (logic_start >= logic_end) {
3022 stop_loop = 1;
3023 break;
3024 }
3025
3026 if (logic_start < key.objectid + bytes) {
3027 cond_resched();
3028 goto again;
3029 }
3030 }
3031next:
3032 path->slots[0]++;
3033 }
3034
3035 btrfs_release_path(path);
3036
3037 if (stop_loop)
3038 break;
3039
3040 logic_start += map->stripe_len;
3041 }
3042out:
3043 if (ret < 0)
3044 scrub_parity_mark_sectors_error(sparity, logic_start,
3045 logic_end - logic_start);
3046 scrub_parity_put(sparity);
3047 scrub_submit(sctx);
3048 mutex_lock(&sctx->wr_ctx.wr_lock);
3049 scrub_wr_submit(sctx);
3050 mutex_unlock(&sctx->wr_ctx.wr_lock);
3051
3052 btrfs_release_path(path);
3053 return ret < 0 ? ret : 0;
3054}
3055
3056static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3057 struct map_lookup *map,
3058 struct btrfs_device *scrub_dev,
3059 int num, u64 base, u64 length,
3060 int is_dev_replace)
3061{
3062 struct btrfs_path *path, *ppath;
3063 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3064 struct btrfs_root *root = fs_info->extent_root;
3065 struct btrfs_root *csum_root = fs_info->csum_root;
3066 struct btrfs_extent_item *extent;
3067 struct blk_plug plug;
3068 u64 flags;
3069 int ret;
3070 int slot;
3071 u64 nstripes;
3072 struct extent_buffer *l;
3073 struct btrfs_key key;
3074 u64 physical;
3075 u64 logical;
3076 u64 logic_end;
3077 u64 physical_end;
3078 u64 generation;
3079 int mirror_num;
3080 struct reada_control *reada1;
3081 struct reada_control *reada2;
3082 struct btrfs_key key_start;
3083 struct btrfs_key key_end;
3084 u64 increment = map->stripe_len;
3085 u64 offset;
3086 u64 extent_logical;
3087 u64 extent_physical;
3088 u64 extent_len;
3089 u64 stripe_logical;
3090 u64 stripe_end;
3091 struct btrfs_device *extent_dev;
3092 int extent_mirror_num;
3093 int stop_loop = 0;
3094
3095 physical = map->stripes[num].physical;
3096 offset = 0;
3097 nstripes = div_u64(length, map->stripe_len);
3098 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3099 offset = map->stripe_len * num;
3100 increment = map->stripe_len * map->num_stripes;
3101 mirror_num = 1;
3102 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3103 int factor = map->num_stripes / map->sub_stripes;
3104 offset = map->stripe_len * (num / map->sub_stripes);
3105 increment = map->stripe_len * factor;
3106 mirror_num = num % map->sub_stripes + 1;
3107 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3108 increment = map->stripe_len;
3109 mirror_num = num % map->num_stripes + 1;
3110 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3111 increment = map->stripe_len;
3112 mirror_num = num % map->num_stripes + 1;
3113 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3114 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3115 increment = map->stripe_len * nr_data_stripes(map);
3116 mirror_num = 1;
3117 } else {
3118 increment = map->stripe_len;
3119 mirror_num = 1;
3120 }
3121
3122 path = btrfs_alloc_path();
3123 if (!path)
3124 return -ENOMEM;
3125
3126 ppath = btrfs_alloc_path();
3127 if (!ppath) {
3128 btrfs_free_path(path);
3129 return -ENOMEM;
3130 }
3131
3132 /*
3133 * work on commit root. The related disk blocks are static as
3134 * long as COW is applied. This means, it is save to rewrite
3135 * them to repair disk errors without any race conditions
3136 */
3137 path->search_commit_root = 1;
3138 path->skip_locking = 1;
3139
3140 ppath->search_commit_root = 1;
3141 ppath->skip_locking = 1;
3142 /*
3143 * trigger the readahead for extent tree csum tree and wait for
3144 * completion. During readahead, the scrub is officially paused
3145 * to not hold off transaction commits
3146 */
3147 logical = base + offset;
3148 physical_end = physical + nstripes * map->stripe_len;
3149 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3150 get_raid56_logic_offset(physical_end, num,
3151 map, &logic_end, NULL);
3152 logic_end += base;
3153 } else {
3154 logic_end = logical + increment * nstripes;
3155 }
3156 wait_event(sctx->list_wait,
3157 atomic_read(&sctx->bios_in_flight) == 0);
3158 scrub_blocked_if_needed(fs_info);
3159
3160 /* FIXME it might be better to start readahead at commit root */
3161 key_start.objectid = logical;
3162 key_start.type = BTRFS_EXTENT_ITEM_KEY;
3163 key_start.offset = (u64)0;
3164 key_end.objectid = logic_end;
3165 key_end.type = BTRFS_METADATA_ITEM_KEY;
3166 key_end.offset = (u64)-1;
3167 reada1 = btrfs_reada_add(root, &key_start, &key_end);
3168
3169 key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3170 key_start.type = BTRFS_EXTENT_CSUM_KEY;
3171 key_start.offset = logical;
3172 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3173 key_end.type = BTRFS_EXTENT_CSUM_KEY;
3174 key_end.offset = logic_end;
3175 reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
3176
3177 if (!IS_ERR(reada1))
3178 btrfs_reada_wait(reada1);
3179 if (!IS_ERR(reada2))
3180 btrfs_reada_wait(reada2);
3181
3182
3183 /*
3184 * collect all data csums for the stripe to avoid seeking during
3185 * the scrub. This might currently (crc32) end up to be about 1MB
3186 */
3187 blk_start_plug(&plug);
3188
3189 /*
3190 * now find all extents for each stripe and scrub them
3191 */
3192 ret = 0;
3193 while (physical < physical_end) {
3194 /*
3195 * canceled?
3196 */
3197 if (atomic_read(&fs_info->scrub_cancel_req) ||
3198 atomic_read(&sctx->cancel_req)) {
3199 ret = -ECANCELED;
3200 goto out;
3201 }
3202 /*
3203 * check to see if we have to pause
3204 */
3205 if (atomic_read(&fs_info->scrub_pause_req)) {
3206 /* push queued extents */
3207 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3208 scrub_submit(sctx);
3209 mutex_lock(&sctx->wr_ctx.wr_lock);
3210 scrub_wr_submit(sctx);
3211 mutex_unlock(&sctx->wr_ctx.wr_lock);
3212 wait_event(sctx->list_wait,
3213 atomic_read(&sctx->bios_in_flight) == 0);
3214 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3215 scrub_blocked_if_needed(fs_info);
3216 }
3217
3218 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3219 ret = get_raid56_logic_offset(physical, num, map,
3220 &logical,
3221 &stripe_logical);
3222 logical += base;
3223 if (ret) {
3224 /* it is parity strip */
3225 stripe_logical += base;
3226 stripe_end = stripe_logical + increment;
3227 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3228 ppath, stripe_logical,
3229 stripe_end);
3230 if (ret)
3231 goto out;
3232 goto skip;
3233 }
3234 }
3235
3236 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3237 key.type = BTRFS_METADATA_ITEM_KEY;
3238 else
3239 key.type = BTRFS_EXTENT_ITEM_KEY;
3240 key.objectid = logical;
3241 key.offset = (u64)-1;
3242
3243 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3244 if (ret < 0)
3245 goto out;
3246
3247 if (ret > 0) {
3248 ret = btrfs_previous_extent_item(root, path, 0);
3249 if (ret < 0)
3250 goto out;
3251 if (ret > 0) {
3252 /* there's no smaller item, so stick with the
3253 * larger one */
3254 btrfs_release_path(path);
3255 ret = btrfs_search_slot(NULL, root, &key,
3256 path, 0, 0);
3257 if (ret < 0)
3258 goto out;
3259 }
3260 }
3261
3262 stop_loop = 0;
3263 while (1) {
3264 u64 bytes;
3265
3266 l = path->nodes[0];
3267 slot = path->slots[0];
3268 if (slot >= btrfs_header_nritems(l)) {
3269 ret = btrfs_next_leaf(root, path);
3270 if (ret == 0)
3271 continue;
3272 if (ret < 0)
3273 goto out;
3274
3275 stop_loop = 1;
3276 break;
3277 }
3278 btrfs_item_key_to_cpu(l, &key, slot);
3279
3280 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3281 key.type != BTRFS_METADATA_ITEM_KEY)
3282 goto next;
3283
3284 if (key.type == BTRFS_METADATA_ITEM_KEY)
3285 bytes = root->nodesize;
3286 else
3287 bytes = key.offset;
3288
3289 if (key.objectid + bytes <= logical)
3290 goto next;
3291
3292 if (key.objectid >= logical + map->stripe_len) {
3293 /* out of this device extent */
3294 if (key.objectid >= logic_end)
3295 stop_loop = 1;
3296 break;
3297 }
3298
3299 extent = btrfs_item_ptr(l, slot,
3300 struct btrfs_extent_item);
3301 flags = btrfs_extent_flags(l, extent);
3302 generation = btrfs_extent_generation(l, extent);
3303
3304 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3305 (key.objectid < logical ||
3306 key.objectid + bytes >
3307 logical + map->stripe_len)) {
3308 btrfs_err(fs_info,
3309 "scrub: tree block %llu spanning "
3310 "stripes, ignored. logical=%llu",
3311 key.objectid, logical);
3312 spin_lock(&sctx->stat_lock);
3313 sctx->stat.uncorrectable_errors++;
3314 spin_unlock(&sctx->stat_lock);
3315 goto next;
3316 }
3317
3318again:
3319 extent_logical = key.objectid;
3320 extent_len = bytes;
3321
3322 /*
3323 * trim extent to this stripe
3324 */
3325 if (extent_logical < logical) {
3326 extent_len -= logical - extent_logical;
3327 extent_logical = logical;
3328 }
3329 if (extent_logical + extent_len >
3330 logical + map->stripe_len) {
3331 extent_len = logical + map->stripe_len -
3332 extent_logical;
3333 }
3334
3335 extent_physical = extent_logical - logical + physical;
3336 extent_dev = scrub_dev;
3337 extent_mirror_num = mirror_num;
3338 if (is_dev_replace)
3339 scrub_remap_extent(fs_info, extent_logical,
3340 extent_len, &extent_physical,
3341 &extent_dev,
3342 &extent_mirror_num);
3343
3344 ret = btrfs_lookup_csums_range(csum_root,
3345 extent_logical,
3346 extent_logical +
3347 extent_len - 1,
3348 &sctx->csum_list, 1);
3349 if (ret)
3350 goto out;
3351
3352 ret = scrub_extent(sctx, extent_logical, extent_len,
3353 extent_physical, extent_dev, flags,
3354 generation, extent_mirror_num,
3355 extent_logical - logical + physical);
3356
3357 scrub_free_csums(sctx);
3358
3359 if (ret)
3360 goto out;
3361
3362 if (extent_logical + extent_len <
3363 key.objectid + bytes) {
3364 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3365 /*
3366 * loop until we find next data stripe
3367 * or we have finished all stripes.
3368 */
3369loop:
3370 physical += map->stripe_len;
3371 ret = get_raid56_logic_offset(physical,
3372 num, map, &logical,
3373 &stripe_logical);
3374 logical += base;
3375
3376 if (ret && physical < physical_end) {
3377 stripe_logical += base;
3378 stripe_end = stripe_logical +
3379 increment;
3380 ret = scrub_raid56_parity(sctx,
3381 map, scrub_dev, ppath,
3382 stripe_logical,
3383 stripe_end);
3384 if (ret)
3385 goto out;
3386 goto loop;
3387 }
3388 } else {
3389 physical += map->stripe_len;
3390 logical += increment;
3391 }
3392 if (logical < key.objectid + bytes) {
3393 cond_resched();
3394 goto again;
3395 }
3396
3397 if (physical >= physical_end) {
3398 stop_loop = 1;
3399 break;
3400 }
3401 }
3402next:
3403 path->slots[0]++;
3404 }
3405 btrfs_release_path(path);
3406skip:
3407 logical += increment;
3408 physical += map->stripe_len;
3409 spin_lock(&sctx->stat_lock);
3410 if (stop_loop)
3411 sctx->stat.last_physical = map->stripes[num].physical +
3412 length;
3413 else
3414 sctx->stat.last_physical = physical;
3415 spin_unlock(&sctx->stat_lock);
3416 if (stop_loop)
3417 break;
3418 }
3419out:
3420 /* push queued extents */
3421 scrub_submit(sctx);
3422 mutex_lock(&sctx->wr_ctx.wr_lock);
3423 scrub_wr_submit(sctx);
3424 mutex_unlock(&sctx->wr_ctx.wr_lock);
3425
3426 blk_finish_plug(&plug);
3427 btrfs_free_path(path);
3428 btrfs_free_path(ppath);
3429 return ret < 0 ? ret : 0;
3430}
3431
3432static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3433 struct btrfs_device *scrub_dev,
3434 u64 chunk_offset, u64 length,
3435 u64 dev_offset,
3436 struct btrfs_block_group_cache *cache,
3437 int is_dev_replace)
3438{
3439 struct btrfs_mapping_tree *map_tree =
3440 &sctx->dev_root->fs_info->mapping_tree;
3441 struct map_lookup *map;
3442 struct extent_map *em;
3443 int i;
3444 int ret = 0;
3445
3446 read_lock(&map_tree->map_tree.lock);
3447 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3448 read_unlock(&map_tree->map_tree.lock);
3449
3450 if (!em) {
3451 /*
3452 * Might have been an unused block group deleted by the cleaner
3453 * kthread or relocation.
3454 */
3455 spin_lock(&cache->lock);
3456 if (!cache->removed)
3457 ret = -EINVAL;
3458 spin_unlock(&cache->lock);
3459
3460 return ret;
3461 }
3462
3463 map = em->map_lookup;
3464 if (em->start != chunk_offset)
3465 goto out;
3466
3467 if (em->len < length)
3468 goto out;
3469
3470 for (i = 0; i < map->num_stripes; ++i) {
3471 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3472 map->stripes[i].physical == dev_offset) {
3473 ret = scrub_stripe(sctx, map, scrub_dev, i,
3474 chunk_offset, length,
3475 is_dev_replace);
3476 if (ret)
3477 goto out;
3478 }
3479 }
3480out:
3481 free_extent_map(em);
3482
3483 return ret;
3484}
3485
3486static noinline_for_stack
3487int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3488 struct btrfs_device *scrub_dev, u64 start, u64 end,
3489 int is_dev_replace)
3490{
3491 struct btrfs_dev_extent *dev_extent = NULL;
3492 struct btrfs_path *path;
3493 struct btrfs_root *root = sctx->dev_root;
3494 struct btrfs_fs_info *fs_info = root->fs_info;
3495 u64 length;
3496 u64 chunk_offset;
3497 int ret = 0;
3498 int ro_set;
3499 int slot;
3500 struct extent_buffer *l;
3501 struct btrfs_key key;
3502 struct btrfs_key found_key;
3503 struct btrfs_block_group_cache *cache;
3504 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3505
3506 path = btrfs_alloc_path();
3507 if (!path)
3508 return -ENOMEM;
3509
3510 path->reada = READA_FORWARD;
3511 path->search_commit_root = 1;
3512 path->skip_locking = 1;
3513
3514 key.objectid = scrub_dev->devid;
3515 key.offset = 0ull;
3516 key.type = BTRFS_DEV_EXTENT_KEY;
3517
3518 while (1) {
3519 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3520 if (ret < 0)
3521 break;
3522 if (ret > 0) {
3523 if (path->slots[0] >=
3524 btrfs_header_nritems(path->nodes[0])) {
3525 ret = btrfs_next_leaf(root, path);
3526 if (ret < 0)
3527 break;
3528 if (ret > 0) {
3529 ret = 0;
3530 break;
3531 }
3532 } else {
3533 ret = 0;
3534 }
3535 }
3536
3537 l = path->nodes[0];
3538 slot = path->slots[0];
3539
3540 btrfs_item_key_to_cpu(l, &found_key, slot);
3541
3542 if (found_key.objectid != scrub_dev->devid)
3543 break;
3544
3545 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3546 break;
3547
3548 if (found_key.offset >= end)
3549 break;
3550
3551 if (found_key.offset < key.offset)
3552 break;
3553
3554 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3555 length = btrfs_dev_extent_length(l, dev_extent);
3556
3557 if (found_key.offset + length <= start)
3558 goto skip;
3559
3560 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3561
3562 /*
3563 * get a reference on the corresponding block group to prevent
3564 * the chunk from going away while we scrub it
3565 */
3566 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3567
3568 /* some chunks are removed but not committed to disk yet,
3569 * continue scrubbing */
3570 if (!cache)
3571 goto skip;
3572
3573 /*
3574 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3575 * to avoid deadlock caused by:
3576 * btrfs_inc_block_group_ro()
3577 * -> btrfs_wait_for_commit()
3578 * -> btrfs_commit_transaction()
3579 * -> btrfs_scrub_pause()
3580 */
3581 scrub_pause_on(fs_info);
3582 ret = btrfs_inc_block_group_ro(root, cache);
3583 scrub_pause_off(fs_info);
3584
3585 if (ret == 0) {
3586 ro_set = 1;
3587 } else if (ret == -ENOSPC) {
3588 /*
3589 * btrfs_inc_block_group_ro return -ENOSPC when it
3590 * failed in creating new chunk for metadata.
3591 * It is not a problem for scrub/replace, because
3592 * metadata are always cowed, and our scrub paused
3593 * commit_transactions.
3594 */
3595 ro_set = 0;
3596 } else {
3597 btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
3598 ret);
3599 btrfs_put_block_group(cache);
3600 break;
3601 }
3602
3603 dev_replace->cursor_right = found_key.offset + length;
3604 dev_replace->cursor_left = found_key.offset;
3605 dev_replace->item_needs_writeback = 1;
3606 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3607 found_key.offset, cache, is_dev_replace);
3608
3609 /*
3610 * flush, submit all pending read and write bios, afterwards
3611 * wait for them.
3612 * Note that in the dev replace case, a read request causes
3613 * write requests that are submitted in the read completion
3614 * worker. Therefore in the current situation, it is required
3615 * that all write requests are flushed, so that all read and
3616 * write requests are really completed when bios_in_flight
3617 * changes to 0.
3618 */
3619 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3620 scrub_submit(sctx);
3621 mutex_lock(&sctx->wr_ctx.wr_lock);
3622 scrub_wr_submit(sctx);
3623 mutex_unlock(&sctx->wr_ctx.wr_lock);
3624
3625 wait_event(sctx->list_wait,
3626 atomic_read(&sctx->bios_in_flight) == 0);
3627
3628 scrub_pause_on(fs_info);
3629
3630 /*
3631 * must be called before we decrease @scrub_paused.
3632 * make sure we don't block transaction commit while
3633 * we are waiting pending workers finished.
3634 */
3635 wait_event(sctx->list_wait,
3636 atomic_read(&sctx->workers_pending) == 0);
3637 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3638
3639 scrub_pause_off(fs_info);
3640
3641 if (ro_set)
3642 btrfs_dec_block_group_ro(root, cache);
3643
3644 /*
3645 * We might have prevented the cleaner kthread from deleting
3646 * this block group if it was already unused because we raced
3647 * and set it to RO mode first. So add it back to the unused
3648 * list, otherwise it might not ever be deleted unless a manual
3649 * balance is triggered or it becomes used and unused again.
3650 */
3651 spin_lock(&cache->lock);
3652 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3653 btrfs_block_group_used(&cache->item) == 0) {
3654 spin_unlock(&cache->lock);
3655 spin_lock(&fs_info->unused_bgs_lock);
3656 if (list_empty(&cache->bg_list)) {
3657 btrfs_get_block_group(cache);
3658 list_add_tail(&cache->bg_list,
3659 &fs_info->unused_bgs);
3660 }
3661 spin_unlock(&fs_info->unused_bgs_lock);
3662 } else {
3663 spin_unlock(&cache->lock);
3664 }
3665
3666 btrfs_put_block_group(cache);
3667 if (ret)
3668 break;
3669 if (is_dev_replace &&
3670 atomic64_read(&dev_replace->num_write_errors) > 0) {
3671 ret = -EIO;
3672 break;
3673 }
3674 if (sctx->stat.malloc_errors > 0) {
3675 ret = -ENOMEM;
3676 break;
3677 }
3678
3679 dev_replace->cursor_left = dev_replace->cursor_right;
3680 dev_replace->item_needs_writeback = 1;
3681skip:
3682 key.offset = found_key.offset + length;
3683 btrfs_release_path(path);
3684 }
3685
3686 btrfs_free_path(path);
3687
3688 return ret;
3689}
3690
3691static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3692 struct btrfs_device *scrub_dev)
3693{
3694 int i;
3695 u64 bytenr;
3696 u64 gen;
3697 int ret;
3698 struct btrfs_root *root = sctx->dev_root;
3699
3700 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
3701 return -EIO;
3702
3703 /* Seed devices of a new filesystem has their own generation. */
3704 if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3705 gen = scrub_dev->generation;
3706 else
3707 gen = root->fs_info->last_trans_committed;
3708
3709 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3710 bytenr = btrfs_sb_offset(i);
3711 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3712 scrub_dev->commit_total_bytes)
3713 break;
3714
3715 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3716 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3717 NULL, 1, bytenr);
3718 if (ret)
3719 return ret;
3720 }
3721 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3722
3723 return 0;
3724}
3725
3726/*
3727 * get a reference count on fs_info->scrub_workers. start worker if necessary
3728 */
3729static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3730 int is_dev_replace)
3731{
3732 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3733 int max_active = fs_info->thread_pool_size;
3734
3735 if (fs_info->scrub_workers_refcnt == 0) {
3736 if (is_dev_replace)
3737 fs_info->scrub_workers =
3738 btrfs_alloc_workqueue("scrub", flags,
3739 1, 4);
3740 else
3741 fs_info->scrub_workers =
3742 btrfs_alloc_workqueue("scrub", flags,
3743 max_active, 4);
3744 if (!fs_info->scrub_workers)
3745 goto fail_scrub_workers;
3746
3747 fs_info->scrub_wr_completion_workers =
3748 btrfs_alloc_workqueue("scrubwrc", flags,
3749 max_active, 2);
3750 if (!fs_info->scrub_wr_completion_workers)
3751 goto fail_scrub_wr_completion_workers;
3752
3753 fs_info->scrub_nocow_workers =
3754 btrfs_alloc_workqueue("scrubnc", flags, 1, 0);
3755 if (!fs_info->scrub_nocow_workers)
3756 goto fail_scrub_nocow_workers;
3757 fs_info->scrub_parity_workers =
3758 btrfs_alloc_workqueue("scrubparity", flags,
3759 max_active, 2);
3760 if (!fs_info->scrub_parity_workers)
3761 goto fail_scrub_parity_workers;
3762 }
3763 ++fs_info->scrub_workers_refcnt;
3764 return 0;
3765
3766fail_scrub_parity_workers:
3767 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3768fail_scrub_nocow_workers:
3769 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3770fail_scrub_wr_completion_workers:
3771 btrfs_destroy_workqueue(fs_info->scrub_workers);
3772fail_scrub_workers:
3773 return -ENOMEM;
3774}
3775
3776static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3777{
3778 if (--fs_info->scrub_workers_refcnt == 0) {
3779 btrfs_destroy_workqueue(fs_info->scrub_workers);
3780 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3781 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3782 btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
3783 }
3784 WARN_ON(fs_info->scrub_workers_refcnt < 0);
3785}
3786
3787int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3788 u64 end, struct btrfs_scrub_progress *progress,
3789 int readonly, int is_dev_replace)
3790{
3791 struct scrub_ctx *sctx;
3792 int ret;
3793 struct btrfs_device *dev;
3794 struct rcu_string *name;
3795
3796 if (btrfs_fs_closing(fs_info))
3797 return -EINVAL;
3798
3799 if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
3800 /*
3801 * in this case scrub is unable to calculate the checksum
3802 * the way scrub is implemented. Do not handle this
3803 * situation at all because it won't ever happen.
3804 */
3805 btrfs_err(fs_info,
3806 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3807 fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
3808 return -EINVAL;
3809 }
3810
3811 if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
3812 /* not supported for data w/o checksums */
3813 btrfs_err(fs_info,
3814 "scrub: size assumption sectorsize != PAGE_SIZE "
3815 "(%d != %lu) fails",
3816 fs_info->chunk_root->sectorsize, PAGE_SIZE);
3817 return -EINVAL;
3818 }
3819
3820 if (fs_info->chunk_root->nodesize >
3821 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3822 fs_info->chunk_root->sectorsize >
3823 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3824 /*
3825 * would exhaust the array bounds of pagev member in
3826 * struct scrub_block
3827 */
3828 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3829 "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3830 fs_info->chunk_root->nodesize,
3831 SCRUB_MAX_PAGES_PER_BLOCK,
3832 fs_info->chunk_root->sectorsize,
3833 SCRUB_MAX_PAGES_PER_BLOCK);
3834 return -EINVAL;
3835 }
3836
3837
3838 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3839 dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3840 if (!dev || (dev->missing && !is_dev_replace)) {
3841 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3842 return -ENODEV;
3843 }
3844
3845 if (!is_dev_replace && !readonly && !dev->writeable) {
3846 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3847 rcu_read_lock();
3848 name = rcu_dereference(dev->name);
3849 btrfs_err(fs_info, "scrub: device %s is not writable",
3850 name->str);
3851 rcu_read_unlock();
3852 return -EROFS;
3853 }
3854
3855 mutex_lock(&fs_info->scrub_lock);
3856 if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
3857 mutex_unlock(&fs_info->scrub_lock);
3858 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3859 return -EIO;
3860 }
3861
3862 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
3863 if (dev->scrub_device ||
3864 (!is_dev_replace &&
3865 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3866 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3867 mutex_unlock(&fs_info->scrub_lock);
3868 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3869 return -EINPROGRESS;
3870 }
3871 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
3872
3873 ret = scrub_workers_get(fs_info, is_dev_replace);
3874 if (ret) {
3875 mutex_unlock(&fs_info->scrub_lock);
3876 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3877 return ret;
3878 }
3879
3880 sctx = scrub_setup_ctx(dev, is_dev_replace);
3881 if (IS_ERR(sctx)) {
3882 mutex_unlock(&fs_info->scrub_lock);
3883 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3884 scrub_workers_put(fs_info);
3885 return PTR_ERR(sctx);
3886 }
3887 sctx->readonly = readonly;
3888 dev->scrub_device = sctx;
3889 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3890
3891 /*
3892 * checking @scrub_pause_req here, we can avoid
3893 * race between committing transaction and scrubbing.
3894 */
3895 __scrub_blocked_if_needed(fs_info);
3896 atomic_inc(&fs_info->scrubs_running);
3897 mutex_unlock(&fs_info->scrub_lock);
3898
3899 if (!is_dev_replace) {
3900 /*
3901 * by holding device list mutex, we can
3902 * kick off writing super in log tree sync.
3903 */
3904 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3905 ret = scrub_supers(sctx, dev);
3906 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3907 }
3908
3909 if (!ret)
3910 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3911 is_dev_replace);
3912
3913 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3914 atomic_dec(&fs_info->scrubs_running);
3915 wake_up(&fs_info->scrub_pause_wait);
3916
3917 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3918
3919 if (progress)
3920 memcpy(progress, &sctx->stat, sizeof(*progress));
3921
3922 mutex_lock(&fs_info->scrub_lock);
3923 dev->scrub_device = NULL;
3924 scrub_workers_put(fs_info);
3925 mutex_unlock(&fs_info->scrub_lock);
3926
3927 scrub_put_ctx(sctx);
3928
3929 return ret;
3930}
3931
3932void btrfs_scrub_pause(struct btrfs_root *root)
3933{
3934 struct btrfs_fs_info *fs_info = root->fs_info;
3935
3936 mutex_lock(&fs_info->scrub_lock);
3937 atomic_inc(&fs_info->scrub_pause_req);
3938 while (atomic_read(&fs_info->scrubs_paused) !=
3939 atomic_read(&fs_info->scrubs_running)) {
3940 mutex_unlock(&fs_info->scrub_lock);
3941 wait_event(fs_info->scrub_pause_wait,
3942 atomic_read(&fs_info->scrubs_paused) ==
3943 atomic_read(&fs_info->scrubs_running));
3944 mutex_lock(&fs_info->scrub_lock);
3945 }
3946 mutex_unlock(&fs_info->scrub_lock);
3947}
3948
3949void btrfs_scrub_continue(struct btrfs_root *root)
3950{
3951 struct btrfs_fs_info *fs_info = root->fs_info;
3952
3953 atomic_dec(&fs_info->scrub_pause_req);
3954 wake_up(&fs_info->scrub_pause_wait);
3955}
3956
3957int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3958{
3959 mutex_lock(&fs_info->scrub_lock);
3960 if (!atomic_read(&fs_info->scrubs_running)) {
3961 mutex_unlock(&fs_info->scrub_lock);
3962 return -ENOTCONN;
3963 }
3964
3965 atomic_inc(&fs_info->scrub_cancel_req);
3966 while (atomic_read(&fs_info->scrubs_running)) {
3967 mutex_unlock(&fs_info->scrub_lock);
3968 wait_event(fs_info->scrub_pause_wait,
3969 atomic_read(&fs_info->scrubs_running) == 0);
3970 mutex_lock(&fs_info->scrub_lock);
3971 }
3972 atomic_dec(&fs_info->scrub_cancel_req);
3973 mutex_unlock(&fs_info->scrub_lock);
3974
3975 return 0;
3976}
3977
3978int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3979 struct btrfs_device *dev)
3980{
3981 struct scrub_ctx *sctx;
3982
3983 mutex_lock(&fs_info->scrub_lock);
3984 sctx = dev->scrub_device;
3985 if (!sctx) {
3986 mutex_unlock(&fs_info->scrub_lock);
3987 return -ENOTCONN;
3988 }
3989 atomic_inc(&sctx->cancel_req);
3990 while (dev->scrub_device) {
3991 mutex_unlock(&fs_info->scrub_lock);
3992 wait_event(fs_info->scrub_pause_wait,
3993 dev->scrub_device == NULL);
3994 mutex_lock(&fs_info->scrub_lock);
3995 }
3996 mutex_unlock(&fs_info->scrub_lock);
3997
3998 return 0;
3999}
4000
4001int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
4002 struct btrfs_scrub_progress *progress)
4003{
4004 struct btrfs_device *dev;
4005 struct scrub_ctx *sctx = NULL;
4006
4007 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
4008 dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
4009 if (dev)
4010 sctx = dev->scrub_device;
4011 if (sctx)
4012 memcpy(progress, &sctx->stat, sizeof(*progress));
4013 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
4014
4015 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4016}
4017
4018static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4019 u64 extent_logical, u64 extent_len,
4020 u64 *extent_physical,
4021 struct btrfs_device **extent_dev,
4022 int *extent_mirror_num)
4023{
4024 u64 mapped_length;
4025 struct btrfs_bio *bbio = NULL;
4026 int ret;
4027
4028 mapped_length = extent_len;
4029 ret = btrfs_map_block(fs_info, READ, extent_logical,
4030 &mapped_length, &bbio, 0);
4031 if (ret || !bbio || mapped_length < extent_len ||
4032 !bbio->stripes[0].dev->bdev) {
4033 btrfs_put_bbio(bbio);
4034 return;
4035 }
4036
4037 *extent_physical = bbio->stripes[0].physical;
4038 *extent_mirror_num = bbio->mirror_num;
4039 *extent_dev = bbio->stripes[0].dev;
4040 btrfs_put_bbio(bbio);
4041}
4042
4043static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
4044 struct scrub_wr_ctx *wr_ctx,
4045 struct btrfs_fs_info *fs_info,
4046 struct btrfs_device *dev,
4047 int is_dev_replace)
4048{
4049 WARN_ON(wr_ctx->wr_curr_bio != NULL);
4050
4051 mutex_init(&wr_ctx->wr_lock);
4052 wr_ctx->wr_curr_bio = NULL;
4053 if (!is_dev_replace)
4054 return 0;
4055
4056 WARN_ON(!dev->bdev);
4057 wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
4058 wr_ctx->tgtdev = dev;
4059 atomic_set(&wr_ctx->flush_all_writes, 0);
4060 return 0;
4061}
4062
4063static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
4064{
4065 mutex_lock(&wr_ctx->wr_lock);
4066 kfree(wr_ctx->wr_curr_bio);
4067 wr_ctx->wr_curr_bio = NULL;
4068 mutex_unlock(&wr_ctx->wr_lock);
4069}
4070
4071static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
4072 int mirror_num, u64 physical_for_dev_replace)
4073{
4074 struct scrub_copy_nocow_ctx *nocow_ctx;
4075 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
4076
4077 nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
4078 if (!nocow_ctx) {
4079 spin_lock(&sctx->stat_lock);
4080 sctx->stat.malloc_errors++;
4081 spin_unlock(&sctx->stat_lock);
4082 return -ENOMEM;
4083 }
4084
4085 scrub_pending_trans_workers_inc(sctx);
4086
4087 nocow_ctx->sctx = sctx;
4088 nocow_ctx->logical = logical;
4089 nocow_ctx->len = len;
4090 nocow_ctx->mirror_num = mirror_num;
4091 nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
4092 btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
4093 copy_nocow_pages_worker, NULL, NULL);
4094 INIT_LIST_HEAD(&nocow_ctx->inodes);
4095 btrfs_queue_work(fs_info->scrub_nocow_workers,
4096 &nocow_ctx->work);
4097
4098 return 0;
4099}
4100
4101static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
4102{
4103 struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
4104 struct scrub_nocow_inode *nocow_inode;
4105
4106 nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
4107 if (!nocow_inode)
4108 return -ENOMEM;
4109 nocow_inode->inum = inum;
4110 nocow_inode->offset = offset;
4111 nocow_inode->root = root;
4112 list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
4113 return 0;
4114}
4115
4116#define COPY_COMPLETE 1
4117
4118static void copy_nocow_pages_worker(struct btrfs_work *work)
4119{
4120 struct scrub_copy_nocow_ctx *nocow_ctx =
4121 container_of(work, struct scrub_copy_nocow_ctx, work);
4122 struct scrub_ctx *sctx = nocow_ctx->sctx;
4123 u64 logical = nocow_ctx->logical;
4124 u64 len = nocow_ctx->len;
4125 int mirror_num = nocow_ctx->mirror_num;
4126 u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4127 int ret;
4128 struct btrfs_trans_handle *trans = NULL;
4129 struct btrfs_fs_info *fs_info;
4130 struct btrfs_path *path;
4131 struct btrfs_root *root;
4132 int not_written = 0;
4133
4134 fs_info = sctx->dev_root->fs_info;
4135 root = fs_info->extent_root;
4136
4137 path = btrfs_alloc_path();
4138 if (!path) {
4139 spin_lock(&sctx->stat_lock);
4140 sctx->stat.malloc_errors++;
4141 spin_unlock(&sctx->stat_lock);
4142 not_written = 1;
4143 goto out;
4144 }
4145
4146 trans = btrfs_join_transaction(root);
4147 if (IS_ERR(trans)) {
4148 not_written = 1;
4149 goto out;
4150 }
4151
4152 ret = iterate_inodes_from_logical(logical, fs_info, path,
4153 record_inode_for_nocow, nocow_ctx);
4154 if (ret != 0 && ret != -ENOENT) {
4155 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4156 "phys %llu, len %llu, mir %u, ret %d",
4157 logical, physical_for_dev_replace, len, mirror_num,
4158 ret);
4159 not_written = 1;
4160 goto out;
4161 }
4162
4163 btrfs_end_transaction(trans, root);
4164 trans = NULL;
4165 while (!list_empty(&nocow_ctx->inodes)) {
4166 struct scrub_nocow_inode *entry;
4167 entry = list_first_entry(&nocow_ctx->inodes,
4168 struct scrub_nocow_inode,
4169 list);
4170 list_del_init(&entry->list);
4171 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4172 entry->root, nocow_ctx);
4173 kfree(entry);
4174 if (ret == COPY_COMPLETE) {
4175 ret = 0;
4176 break;
4177 } else if (ret) {
4178 break;
4179 }
4180 }
4181out:
4182 while (!list_empty(&nocow_ctx->inodes)) {
4183 struct scrub_nocow_inode *entry;
4184 entry = list_first_entry(&nocow_ctx->inodes,
4185 struct scrub_nocow_inode,
4186 list);
4187 list_del_init(&entry->list);
4188 kfree(entry);
4189 }
4190 if (trans && !IS_ERR(trans))
4191 btrfs_end_transaction(trans, root);
4192 if (not_written)
4193 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4194 num_uncorrectable_read_errors);
4195
4196 btrfs_free_path(path);
4197 kfree(nocow_ctx);
4198
4199 scrub_pending_trans_workers_dec(sctx);
4200}
4201
4202static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4203 u64 logical)
4204{
4205 struct extent_state *cached_state = NULL;
4206 struct btrfs_ordered_extent *ordered;
4207 struct extent_io_tree *io_tree;
4208 struct extent_map *em;
4209 u64 lockstart = start, lockend = start + len - 1;
4210 int ret = 0;
4211
4212 io_tree = &BTRFS_I(inode)->io_tree;
4213
4214 lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
4215 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4216 if (ordered) {
4217 btrfs_put_ordered_extent(ordered);
4218 ret = 1;
4219 goto out_unlock;
4220 }
4221
4222 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4223 if (IS_ERR(em)) {
4224 ret = PTR_ERR(em);
4225 goto out_unlock;
4226 }
4227
4228 /*
4229 * This extent does not actually cover the logical extent anymore,
4230 * move on to the next inode.
4231 */
4232 if (em->block_start > logical ||
4233 em->block_start + em->block_len < logical + len) {
4234 free_extent_map(em);
4235 ret = 1;
4236 goto out_unlock;
4237 }
4238 free_extent_map(em);
4239
4240out_unlock:
4241 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4242 GFP_NOFS);
4243 return ret;
4244}
4245
4246static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4247 struct scrub_copy_nocow_ctx *nocow_ctx)
4248{
4249 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
4250 struct btrfs_key key;
4251 struct inode *inode;
4252 struct page *page;
4253 struct btrfs_root *local_root;
4254 struct extent_io_tree *io_tree;
4255 u64 physical_for_dev_replace;
4256 u64 nocow_ctx_logical;
4257 u64 len = nocow_ctx->len;
4258 unsigned long index;
4259 int srcu_index;
4260 int ret = 0;
4261 int err = 0;
4262
4263 key.objectid = root;
4264 key.type = BTRFS_ROOT_ITEM_KEY;
4265 key.offset = (u64)-1;
4266
4267 srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4268
4269 local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4270 if (IS_ERR(local_root)) {
4271 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4272 return PTR_ERR(local_root);
4273 }
4274
4275 key.type = BTRFS_INODE_ITEM_KEY;
4276 key.objectid = inum;
4277 key.offset = 0;
4278 inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4279 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4280 if (IS_ERR(inode))
4281 return PTR_ERR(inode);
4282
4283 /* Avoid truncate/dio/punch hole.. */
4284 inode_lock(inode);
4285 inode_dio_wait(inode);
4286
4287 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4288 io_tree = &BTRFS_I(inode)->io_tree;
4289 nocow_ctx_logical = nocow_ctx->logical;
4290
4291 ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4292 if (ret) {
4293 ret = ret > 0 ? 0 : ret;
4294 goto out;
4295 }
4296
4297 while (len >= PAGE_SIZE) {
4298 index = offset >> PAGE_SHIFT;
4299again:
4300 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4301 if (!page) {
4302 btrfs_err(fs_info, "find_or_create_page() failed");
4303 ret = -ENOMEM;
4304 goto out;
4305 }
4306
4307 if (PageUptodate(page)) {
4308 if (PageDirty(page))
4309 goto next_page;
4310 } else {
4311 ClearPageError(page);
4312 err = extent_read_full_page(io_tree, page,
4313 btrfs_get_extent,
4314 nocow_ctx->mirror_num);
4315 if (err) {
4316 ret = err;
4317 goto next_page;
4318 }
4319
4320 lock_page(page);
4321 /*
4322 * If the page has been remove from the page cache,
4323 * the data on it is meaningless, because it may be
4324 * old one, the new data may be written into the new
4325 * page in the page cache.
4326 */
4327 if (page->mapping != inode->i_mapping) {
4328 unlock_page(page);
4329 put_page(page);
4330 goto again;
4331 }
4332 if (!PageUptodate(page)) {
4333 ret = -EIO;
4334 goto next_page;
4335 }
4336 }
4337
4338 ret = check_extent_to_block(inode, offset, len,
4339 nocow_ctx_logical);
4340 if (ret) {
4341 ret = ret > 0 ? 0 : ret;
4342 goto next_page;
4343 }
4344
4345 err = write_page_nocow(nocow_ctx->sctx,
4346 physical_for_dev_replace, page);
4347 if (err)
4348 ret = err;
4349next_page:
4350 unlock_page(page);
4351 put_page(page);
4352
4353 if (ret)
4354 break;
4355
4356 offset += PAGE_SIZE;
4357 physical_for_dev_replace += PAGE_SIZE;
4358 nocow_ctx_logical += PAGE_SIZE;
4359 len -= PAGE_SIZE;
4360 }
4361 ret = COPY_COMPLETE;
4362out:
4363 inode_unlock(inode);
4364 iput(inode);
4365 return ret;
4366}
4367
4368static int write_page_nocow(struct scrub_ctx *sctx,
4369 u64 physical_for_dev_replace, struct page *page)
4370{
4371 struct bio *bio;
4372 struct btrfs_device *dev;
4373 int ret;
4374
4375 dev = sctx->wr_ctx.tgtdev;
4376 if (!dev)
4377 return -EIO;
4378 if (!dev->bdev) {
4379 btrfs_warn_rl(dev->dev_root->fs_info,
4380 "scrub write_page_nocow(bdev == NULL) is unexpected");
4381 return -EIO;
4382 }
4383 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4384 if (!bio) {
4385 spin_lock(&sctx->stat_lock);
4386 sctx->stat.malloc_errors++;
4387 spin_unlock(&sctx->stat_lock);
4388 return -ENOMEM;
4389 }
4390 bio->bi_iter.bi_size = 0;
4391 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4392 bio->bi_bdev = dev->bdev;
4393 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
4394 if (ret != PAGE_SIZE) {
4395leave_with_eio:
4396 bio_put(bio);
4397 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4398 return -EIO;
4399 }
4400
4401 if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
4402 goto leave_with_eio;
4403
4404 bio_put(bio);
4405 return 0;
4406}