Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 */
5
6#include <linux/blkdev.h>
7#include <linux/ratelimit.h>
8#include <linux/sched/mm.h>
9#include <crypto/hash.h>
10#include "ctree.h"
11#include "discard.h"
12#include "volumes.h"
13#include "disk-io.h"
14#include "ordered-data.h"
15#include "transaction.h"
16#include "backref.h"
17#include "extent_io.h"
18#include "dev-replace.h"
19#include "raid56.h"
20#include "block-group.h"
21#include "zoned.h"
22#include "fs.h"
23#include "accessors.h"
24#include "file-item.h"
25#include "scrub.h"
26#include "raid-stripe-tree.h"
27
28/*
29 * This is only the first step towards a full-features scrub. It reads all
30 * extent and super block and verifies the checksums. In case a bad checksum
31 * is found or the extent cannot be read, good data will be written back if
32 * any can be found.
33 *
34 * Future enhancements:
35 * - In case an unrepairable extent is encountered, track which files are
36 * affected and report them
37 * - track and record media errors, throw out bad devices
38 * - add a mode to also read unallocated space
39 */
40
41struct scrub_ctx;
42
43/*
44 * The following value only influences the performance.
45 *
46 * This determines how many stripes would be submitted in one go,
47 * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
48 */
49#define SCRUB_STRIPES_PER_GROUP 8
50
51/*
52 * How many groups we have for each sctx.
53 *
54 * This would be 8M per device, the same value as the old scrub in-flight bios
55 * size limit.
56 */
57#define SCRUB_GROUPS_PER_SCTX 16
58
59#define SCRUB_TOTAL_STRIPES (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
60
61/*
62 * The following value times PAGE_SIZE needs to be large enough to match the
63 * largest node/leaf/sector size that shall be supported.
64 */
65#define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
66
67/* Represent one sector and its needed info to verify the content. */
68struct scrub_sector_verification {
69 bool is_metadata;
70
71 union {
72 /*
73 * Csum pointer for data csum verification. Should point to a
74 * sector csum inside scrub_stripe::csums.
75 *
76 * NULL if this data sector has no csum.
77 */
78 u8 *csum;
79
80 /*
81 * Extra info for metadata verification. All sectors inside a
82 * tree block share the same generation.
83 */
84 u64 generation;
85 };
86};
87
88enum scrub_stripe_flags {
89 /* Set when @mirror_num, @dev, @physical and @logical are set. */
90 SCRUB_STRIPE_FLAG_INITIALIZED,
91
92 /* Set when the read-repair is finished. */
93 SCRUB_STRIPE_FLAG_REPAIR_DONE,
94
95 /*
96 * Set for data stripes if it's triggered from P/Q stripe.
97 * During such scrub, we should not report errors in data stripes, nor
98 * update the accounting.
99 */
100 SCRUB_STRIPE_FLAG_NO_REPORT,
101};
102
103#define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE)
104
105/*
106 * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
107 */
108struct scrub_stripe {
109 struct scrub_ctx *sctx;
110 struct btrfs_block_group *bg;
111
112 struct page *pages[SCRUB_STRIPE_PAGES];
113 struct scrub_sector_verification *sectors;
114
115 struct btrfs_device *dev;
116 u64 logical;
117 u64 physical;
118
119 u16 mirror_num;
120
121 /* Should be BTRFS_STRIPE_LEN / sectorsize. */
122 u16 nr_sectors;
123
124 /*
125 * How many data/meta extents are in this stripe. Only for scrub status
126 * reporting purposes.
127 */
128 u16 nr_data_extents;
129 u16 nr_meta_extents;
130
131 atomic_t pending_io;
132 wait_queue_head_t io_wait;
133 wait_queue_head_t repair_wait;
134
135 /*
136 * Indicate the states of the stripe. Bits are defined in
137 * scrub_stripe_flags enum.
138 */
139 unsigned long state;
140
141 /* Indicate which sectors are covered by extent items. */
142 unsigned long extent_sector_bitmap;
143
144 /*
145 * The errors hit during the initial read of the stripe.
146 *
147 * Would be utilized for error reporting and repair.
148 *
149 * The remaining init_nr_* records the number of errors hit, only used
150 * by error reporting.
151 */
152 unsigned long init_error_bitmap;
153 unsigned int init_nr_io_errors;
154 unsigned int init_nr_csum_errors;
155 unsigned int init_nr_meta_errors;
156
157 /*
158 * The following error bitmaps are all for the current status.
159 * Every time we submit a new read, these bitmaps may be updated.
160 *
161 * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
162 *
163 * IO and csum errors can happen for both metadata and data.
164 */
165 unsigned long error_bitmap;
166 unsigned long io_error_bitmap;
167 unsigned long csum_error_bitmap;
168 unsigned long meta_error_bitmap;
169
170 /* For writeback (repair or replace) error reporting. */
171 unsigned long write_error_bitmap;
172
173 /* Writeback can be concurrent, thus we need to protect the bitmap. */
174 spinlock_t write_error_lock;
175
176 /*
177 * Checksum for the whole stripe if this stripe is inside a data block
178 * group.
179 */
180 u8 *csums;
181
182 struct work_struct work;
183};
184
185struct scrub_ctx {
186 struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES];
187 struct scrub_stripe *raid56_data_stripes;
188 struct btrfs_fs_info *fs_info;
189 struct btrfs_path extent_path;
190 struct btrfs_path csum_path;
191 int first_free;
192 int cur_stripe;
193 atomic_t cancel_req;
194 int readonly;
195
196 /* State of IO submission throttling affecting the associated device */
197 ktime_t throttle_deadline;
198 u64 throttle_sent;
199
200 int is_dev_replace;
201 u64 write_pointer;
202
203 struct mutex wr_lock;
204 struct btrfs_device *wr_tgtdev;
205
206 /*
207 * statistics
208 */
209 struct btrfs_scrub_progress stat;
210 spinlock_t stat_lock;
211
212 /*
213 * Use a ref counter to avoid use-after-free issues. Scrub workers
214 * decrement bios_in_flight and workers_pending and then do a wakeup
215 * on the list_wait wait queue. We must ensure the main scrub task
216 * doesn't free the scrub context before or while the workers are
217 * doing the wakeup() call.
218 */
219 refcount_t refs;
220};
221
222struct scrub_warning {
223 struct btrfs_path *path;
224 u64 extent_item_size;
225 const char *errstr;
226 u64 physical;
227 u64 logical;
228 struct btrfs_device *dev;
229};
230
231static void release_scrub_stripe(struct scrub_stripe *stripe)
232{
233 if (!stripe)
234 return;
235
236 for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
237 if (stripe->pages[i])
238 __free_page(stripe->pages[i]);
239 stripe->pages[i] = NULL;
240 }
241 kfree(stripe->sectors);
242 kfree(stripe->csums);
243 stripe->sectors = NULL;
244 stripe->csums = NULL;
245 stripe->sctx = NULL;
246 stripe->state = 0;
247}
248
249static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
250 struct scrub_stripe *stripe)
251{
252 int ret;
253
254 memset(stripe, 0, sizeof(*stripe));
255
256 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
257 stripe->state = 0;
258
259 init_waitqueue_head(&stripe->io_wait);
260 init_waitqueue_head(&stripe->repair_wait);
261 atomic_set(&stripe->pending_io, 0);
262 spin_lock_init(&stripe->write_error_lock);
263
264 ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages, 0);
265 if (ret < 0)
266 goto error;
267
268 stripe->sectors = kcalloc(stripe->nr_sectors,
269 sizeof(struct scrub_sector_verification),
270 GFP_KERNEL);
271 if (!stripe->sectors)
272 goto error;
273
274 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
275 fs_info->csum_size, GFP_KERNEL);
276 if (!stripe->csums)
277 goto error;
278 return 0;
279error:
280 release_scrub_stripe(stripe);
281 return -ENOMEM;
282}
283
284static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
285{
286 wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
287}
288
289static void scrub_put_ctx(struct scrub_ctx *sctx);
290
291static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
292{
293 while (atomic_read(&fs_info->scrub_pause_req)) {
294 mutex_unlock(&fs_info->scrub_lock);
295 wait_event(fs_info->scrub_pause_wait,
296 atomic_read(&fs_info->scrub_pause_req) == 0);
297 mutex_lock(&fs_info->scrub_lock);
298 }
299}
300
301static void scrub_pause_on(struct btrfs_fs_info *fs_info)
302{
303 atomic_inc(&fs_info->scrubs_paused);
304 wake_up(&fs_info->scrub_pause_wait);
305}
306
307static void scrub_pause_off(struct btrfs_fs_info *fs_info)
308{
309 mutex_lock(&fs_info->scrub_lock);
310 __scrub_blocked_if_needed(fs_info);
311 atomic_dec(&fs_info->scrubs_paused);
312 mutex_unlock(&fs_info->scrub_lock);
313
314 wake_up(&fs_info->scrub_pause_wait);
315}
316
317static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
318{
319 scrub_pause_on(fs_info);
320 scrub_pause_off(fs_info);
321}
322
323static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
324{
325 int i;
326
327 if (!sctx)
328 return;
329
330 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
331 release_scrub_stripe(&sctx->stripes[i]);
332
333 kvfree(sctx);
334}
335
336static void scrub_put_ctx(struct scrub_ctx *sctx)
337{
338 if (refcount_dec_and_test(&sctx->refs))
339 scrub_free_ctx(sctx);
340}
341
342static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
343 struct btrfs_fs_info *fs_info, int is_dev_replace)
344{
345 struct scrub_ctx *sctx;
346 int i;
347
348 /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use
349 * kvzalloc().
350 */
351 sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
352 if (!sctx)
353 goto nomem;
354 refcount_set(&sctx->refs, 1);
355 sctx->is_dev_replace = is_dev_replace;
356 sctx->fs_info = fs_info;
357 sctx->extent_path.search_commit_root = 1;
358 sctx->extent_path.skip_locking = 1;
359 sctx->csum_path.search_commit_root = 1;
360 sctx->csum_path.skip_locking = 1;
361 for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
362 int ret;
363
364 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
365 if (ret < 0)
366 goto nomem;
367 sctx->stripes[i].sctx = sctx;
368 }
369 sctx->first_free = 0;
370 atomic_set(&sctx->cancel_req, 0);
371
372 spin_lock_init(&sctx->stat_lock);
373 sctx->throttle_deadline = 0;
374
375 mutex_init(&sctx->wr_lock);
376 if (is_dev_replace) {
377 WARN_ON(!fs_info->dev_replace.tgtdev);
378 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
379 }
380
381 return sctx;
382
383nomem:
384 scrub_free_ctx(sctx);
385 return ERR_PTR(-ENOMEM);
386}
387
388static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
389 u64 root, void *warn_ctx)
390{
391 u32 nlink;
392 int ret;
393 int i;
394 unsigned nofs_flag;
395 struct extent_buffer *eb;
396 struct btrfs_inode_item *inode_item;
397 struct scrub_warning *swarn = warn_ctx;
398 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
399 struct inode_fs_paths *ipath = NULL;
400 struct btrfs_root *local_root;
401 struct btrfs_key key;
402
403 local_root = btrfs_get_fs_root(fs_info, root, true);
404 if (IS_ERR(local_root)) {
405 ret = PTR_ERR(local_root);
406 goto err;
407 }
408
409 /*
410 * this makes the path point to (inum INODE_ITEM ioff)
411 */
412 key.objectid = inum;
413 key.type = BTRFS_INODE_ITEM_KEY;
414 key.offset = 0;
415
416 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
417 if (ret) {
418 btrfs_put_root(local_root);
419 btrfs_release_path(swarn->path);
420 goto err;
421 }
422
423 eb = swarn->path->nodes[0];
424 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
425 struct btrfs_inode_item);
426 nlink = btrfs_inode_nlink(eb, inode_item);
427 btrfs_release_path(swarn->path);
428
429 /*
430 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
431 * uses GFP_NOFS in this context, so we keep it consistent but it does
432 * not seem to be strictly necessary.
433 */
434 nofs_flag = memalloc_nofs_save();
435 ipath = init_ipath(4096, local_root, swarn->path);
436 memalloc_nofs_restore(nofs_flag);
437 if (IS_ERR(ipath)) {
438 btrfs_put_root(local_root);
439 ret = PTR_ERR(ipath);
440 ipath = NULL;
441 goto err;
442 }
443 ret = paths_from_inode(inum, ipath);
444
445 if (ret < 0)
446 goto err;
447
448 /*
449 * we deliberately ignore the bit ipath might have been too small to
450 * hold all of the paths here
451 */
452 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
453 btrfs_warn_in_rcu(fs_info,
454"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
455 swarn->errstr, swarn->logical,
456 btrfs_dev_name(swarn->dev),
457 swarn->physical,
458 root, inum, offset,
459 fs_info->sectorsize, nlink,
460 (char *)(unsigned long)ipath->fspath->val[i]);
461
462 btrfs_put_root(local_root);
463 free_ipath(ipath);
464 return 0;
465
466err:
467 btrfs_warn_in_rcu(fs_info,
468 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
469 swarn->errstr, swarn->logical,
470 btrfs_dev_name(swarn->dev),
471 swarn->physical,
472 root, inum, offset, ret);
473
474 free_ipath(ipath);
475 return 0;
476}
477
478static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
479 bool is_super, u64 logical, u64 physical)
480{
481 struct btrfs_fs_info *fs_info = dev->fs_info;
482 struct btrfs_path *path;
483 struct btrfs_key found_key;
484 struct extent_buffer *eb;
485 struct btrfs_extent_item *ei;
486 struct scrub_warning swarn;
487 u64 flags = 0;
488 u32 item_size;
489 int ret;
490
491 /* Super block error, no need to search extent tree. */
492 if (is_super) {
493 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
494 errstr, btrfs_dev_name(dev), physical);
495 return;
496 }
497 path = btrfs_alloc_path();
498 if (!path)
499 return;
500
501 swarn.physical = physical;
502 swarn.logical = logical;
503 swarn.errstr = errstr;
504 swarn.dev = NULL;
505
506 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
507 &flags);
508 if (ret < 0)
509 goto out;
510
511 swarn.extent_item_size = found_key.offset;
512
513 eb = path->nodes[0];
514 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
515 item_size = btrfs_item_size(eb, path->slots[0]);
516
517 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
518 unsigned long ptr = 0;
519 u8 ref_level;
520 u64 ref_root;
521
522 while (true) {
523 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
524 item_size, &ref_root,
525 &ref_level);
526 if (ret < 0) {
527 btrfs_warn(fs_info,
528 "failed to resolve tree backref for logical %llu: %d",
529 swarn.logical, ret);
530 break;
531 }
532 if (ret > 0)
533 break;
534 btrfs_warn_in_rcu(fs_info,
535"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
536 errstr, swarn.logical, btrfs_dev_name(dev),
537 swarn.physical, (ref_level ? "node" : "leaf"),
538 ref_level, ref_root);
539 }
540 btrfs_release_path(path);
541 } else {
542 struct btrfs_backref_walk_ctx ctx = { 0 };
543
544 btrfs_release_path(path);
545
546 ctx.bytenr = found_key.objectid;
547 ctx.extent_item_pos = swarn.logical - found_key.objectid;
548 ctx.fs_info = fs_info;
549
550 swarn.path = path;
551 swarn.dev = dev;
552
553 iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
554 }
555
556out:
557 btrfs_free_path(path);
558}
559
560static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
561{
562 int ret = 0;
563 u64 length;
564
565 if (!btrfs_is_zoned(sctx->fs_info))
566 return 0;
567
568 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
569 return 0;
570
571 if (sctx->write_pointer < physical) {
572 length = physical - sctx->write_pointer;
573
574 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
575 sctx->write_pointer, length);
576 if (!ret)
577 sctx->write_pointer = physical;
578 }
579 return ret;
580}
581
582static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
583{
584 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
585 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
586
587 return stripe->pages[page_index];
588}
589
590static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
591 int sector_nr)
592{
593 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
594
595 return offset_in_page(sector_nr << fs_info->sectorsize_bits);
596}
597
598static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
599{
600 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
601 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
602 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
603 const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
604 const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
605 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
606 u8 on_disk_csum[BTRFS_CSUM_SIZE];
607 u8 calculated_csum[BTRFS_CSUM_SIZE];
608 struct btrfs_header *header;
609
610 /*
611 * Here we don't have a good way to attach the pages (and subpages)
612 * to a dummy extent buffer, thus we have to directly grab the members
613 * from pages.
614 */
615 header = (struct btrfs_header *)(page_address(first_page) + first_off);
616 memcpy(on_disk_csum, header->csum, fs_info->csum_size);
617
618 if (logical != btrfs_stack_header_bytenr(header)) {
619 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
620 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
621 btrfs_warn_rl(fs_info,
622 "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
623 logical, stripe->mirror_num,
624 btrfs_stack_header_bytenr(header), logical);
625 return;
626 }
627 if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
628 BTRFS_FSID_SIZE) != 0) {
629 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
630 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
631 btrfs_warn_rl(fs_info,
632 "tree block %llu mirror %u has bad fsid, has %pU want %pU",
633 logical, stripe->mirror_num,
634 header->fsid, fs_info->fs_devices->fsid);
635 return;
636 }
637 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
638 BTRFS_UUID_SIZE) != 0) {
639 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
640 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
641 btrfs_warn_rl(fs_info,
642 "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
643 logical, stripe->mirror_num,
644 header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
645 return;
646 }
647
648 /* Now check tree block csum. */
649 shash->tfm = fs_info->csum_shash;
650 crypto_shash_init(shash);
651 crypto_shash_update(shash, page_address(first_page) + first_off +
652 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
653
654 for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
655 struct page *page = scrub_stripe_get_page(stripe, i);
656 unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
657
658 crypto_shash_update(shash, page_address(page) + page_off,
659 fs_info->sectorsize);
660 }
661
662 crypto_shash_final(shash, calculated_csum);
663 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
664 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
665 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
666 btrfs_warn_rl(fs_info,
667 "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
668 logical, stripe->mirror_num,
669 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
670 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
671 return;
672 }
673 if (stripe->sectors[sector_nr].generation !=
674 btrfs_stack_header_generation(header)) {
675 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
676 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
677 btrfs_warn_rl(fs_info,
678 "tree block %llu mirror %u has bad generation, has %llu want %llu",
679 logical, stripe->mirror_num,
680 btrfs_stack_header_generation(header),
681 stripe->sectors[sector_nr].generation);
682 return;
683 }
684 bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
685 bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
686 bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
687}
688
689static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
690{
691 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
692 struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
693 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
694 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
695 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
696 u8 csum_buf[BTRFS_CSUM_SIZE];
697 int ret;
698
699 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
700
701 /* Sector not utilized, skip it. */
702 if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
703 return;
704
705 /* IO error, no need to check. */
706 if (test_bit(sector_nr, &stripe->io_error_bitmap))
707 return;
708
709 /* Metadata, verify the full tree block. */
710 if (sector->is_metadata) {
711 /*
712 * Check if the tree block crosses the stripe boundary. If
713 * crossed the boundary, we cannot verify it but only give a
714 * warning.
715 *
716 * This can only happen on a very old filesystem where chunks
717 * are not ensured to be stripe aligned.
718 */
719 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
720 btrfs_warn_rl(fs_info,
721 "tree block at %llu crosses stripe boundary %llu",
722 stripe->logical +
723 (sector_nr << fs_info->sectorsize_bits),
724 stripe->logical);
725 return;
726 }
727 scrub_verify_one_metadata(stripe, sector_nr);
728 return;
729 }
730
731 /*
732 * Data is easier, we just verify the data csum (if we have it). For
733 * cases without csum, we have no other choice but to trust it.
734 */
735 if (!sector->csum) {
736 clear_bit(sector_nr, &stripe->error_bitmap);
737 return;
738 }
739
740 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
741 if (ret < 0) {
742 set_bit(sector_nr, &stripe->csum_error_bitmap);
743 set_bit(sector_nr, &stripe->error_bitmap);
744 } else {
745 clear_bit(sector_nr, &stripe->csum_error_bitmap);
746 clear_bit(sector_nr, &stripe->error_bitmap);
747 }
748}
749
750/* Verify specified sectors of a stripe. */
751static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
752{
753 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
754 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
755 int sector_nr;
756
757 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
758 scrub_verify_one_sector(stripe, sector_nr);
759 if (stripe->sectors[sector_nr].is_metadata)
760 sector_nr += sectors_per_tree - 1;
761 }
762}
763
764static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
765{
766 int i;
767
768 for (i = 0; i < stripe->nr_sectors; i++) {
769 if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
770 scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
771 break;
772 }
773 ASSERT(i < stripe->nr_sectors);
774 return i;
775}
776
777/*
778 * Repair read is different to the regular read:
779 *
780 * - Only reads the failed sectors
781 * - May have extra blocksize limits
782 */
783static void scrub_repair_read_endio(struct btrfs_bio *bbio)
784{
785 struct scrub_stripe *stripe = bbio->private;
786 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
787 struct bio_vec *bvec;
788 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
789 u32 bio_size = 0;
790 int i;
791
792 ASSERT(sector_nr < stripe->nr_sectors);
793
794 bio_for_each_bvec_all(bvec, &bbio->bio, i)
795 bio_size += bvec->bv_len;
796
797 if (bbio->bio.bi_status) {
798 bitmap_set(&stripe->io_error_bitmap, sector_nr,
799 bio_size >> fs_info->sectorsize_bits);
800 bitmap_set(&stripe->error_bitmap, sector_nr,
801 bio_size >> fs_info->sectorsize_bits);
802 } else {
803 bitmap_clear(&stripe->io_error_bitmap, sector_nr,
804 bio_size >> fs_info->sectorsize_bits);
805 }
806 bio_put(&bbio->bio);
807 if (atomic_dec_and_test(&stripe->pending_io))
808 wake_up(&stripe->io_wait);
809}
810
811static int calc_next_mirror(int mirror, int num_copies)
812{
813 ASSERT(mirror <= num_copies);
814 return (mirror + 1 > num_copies) ? 1 : mirror + 1;
815}
816
817static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
818 int mirror, int blocksize, bool wait)
819{
820 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
821 struct btrfs_bio *bbio = NULL;
822 const unsigned long old_error_bitmap = stripe->error_bitmap;
823 int i;
824
825 ASSERT(stripe->mirror_num >= 1);
826 ASSERT(atomic_read(&stripe->pending_io) == 0);
827
828 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
829 struct page *page;
830 int pgoff;
831 int ret;
832
833 page = scrub_stripe_get_page(stripe, i);
834 pgoff = scrub_stripe_get_page_offset(stripe, i);
835
836 /* The current sector cannot be merged, submit the bio. */
837 if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
838 bbio->bio.bi_iter.bi_size >= blocksize)) {
839 ASSERT(bbio->bio.bi_iter.bi_size);
840 atomic_inc(&stripe->pending_io);
841 btrfs_submit_bio(bbio, mirror);
842 if (wait)
843 wait_scrub_stripe_io(stripe);
844 bbio = NULL;
845 }
846
847 if (!bbio) {
848 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
849 fs_info, scrub_repair_read_endio, stripe);
850 bbio->bio.bi_iter.bi_sector = (stripe->logical +
851 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
852 }
853
854 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
855 ASSERT(ret == fs_info->sectorsize);
856 }
857 if (bbio) {
858 ASSERT(bbio->bio.bi_iter.bi_size);
859 atomic_inc(&stripe->pending_io);
860 btrfs_submit_bio(bbio, mirror);
861 if (wait)
862 wait_scrub_stripe_io(stripe);
863 }
864}
865
866static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
867 struct scrub_stripe *stripe)
868{
869 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
870 DEFAULT_RATELIMIT_BURST);
871 struct btrfs_fs_info *fs_info = sctx->fs_info;
872 struct btrfs_device *dev = NULL;
873 u64 physical = 0;
874 int nr_data_sectors = 0;
875 int nr_meta_sectors = 0;
876 int nr_nodatacsum_sectors = 0;
877 int nr_repaired_sectors = 0;
878 int sector_nr;
879
880 if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
881 return;
882
883 /*
884 * Init needed infos for error reporting.
885 *
886 * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
887 * thus no need for dev/physical, error reporting still needs dev and physical.
888 */
889 if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
890 u64 mapped_len = fs_info->sectorsize;
891 struct btrfs_io_context *bioc = NULL;
892 int stripe_index = stripe->mirror_num - 1;
893 int ret;
894
895 /* For scrub, our mirror_num should always start at 1. */
896 ASSERT(stripe->mirror_num >= 1);
897 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
898 stripe->logical, &mapped_len, &bioc,
899 NULL, NULL);
900 /*
901 * If we failed, dev will be NULL, and later detailed reports
902 * will just be skipped.
903 */
904 if (ret < 0)
905 goto skip;
906 physical = bioc->stripes[stripe_index].physical;
907 dev = bioc->stripes[stripe_index].dev;
908 btrfs_put_bioc(bioc);
909 }
910
911skip:
912 for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
913 bool repaired = false;
914
915 if (stripe->sectors[sector_nr].is_metadata) {
916 nr_meta_sectors++;
917 } else {
918 nr_data_sectors++;
919 if (!stripe->sectors[sector_nr].csum)
920 nr_nodatacsum_sectors++;
921 }
922
923 if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
924 !test_bit(sector_nr, &stripe->error_bitmap)) {
925 nr_repaired_sectors++;
926 repaired = true;
927 }
928
929 /* Good sector from the beginning, nothing need to be done. */
930 if (!test_bit(sector_nr, &stripe->init_error_bitmap))
931 continue;
932
933 /*
934 * Report error for the corrupted sectors. If repaired, just
935 * output the message of repaired message.
936 */
937 if (repaired) {
938 if (dev) {
939 btrfs_err_rl_in_rcu(fs_info,
940 "fixed up error at logical %llu on dev %s physical %llu",
941 stripe->logical, btrfs_dev_name(dev),
942 physical);
943 } else {
944 btrfs_err_rl_in_rcu(fs_info,
945 "fixed up error at logical %llu on mirror %u",
946 stripe->logical, stripe->mirror_num);
947 }
948 continue;
949 }
950
951 /* The remaining are all for unrepaired. */
952 if (dev) {
953 btrfs_err_rl_in_rcu(fs_info,
954 "unable to fixup (regular) error at logical %llu on dev %s physical %llu",
955 stripe->logical, btrfs_dev_name(dev),
956 physical);
957 } else {
958 btrfs_err_rl_in_rcu(fs_info,
959 "unable to fixup (regular) error at logical %llu on mirror %u",
960 stripe->logical, stripe->mirror_num);
961 }
962
963 if (test_bit(sector_nr, &stripe->io_error_bitmap))
964 if (__ratelimit(&rs) && dev)
965 scrub_print_common_warning("i/o error", dev, false,
966 stripe->logical, physical);
967 if (test_bit(sector_nr, &stripe->csum_error_bitmap))
968 if (__ratelimit(&rs) && dev)
969 scrub_print_common_warning("checksum error", dev, false,
970 stripe->logical, physical);
971 if (test_bit(sector_nr, &stripe->meta_error_bitmap))
972 if (__ratelimit(&rs) && dev)
973 scrub_print_common_warning("header error", dev, false,
974 stripe->logical, physical);
975 }
976
977 spin_lock(&sctx->stat_lock);
978 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
979 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
980 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
981 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
982 sctx->stat.no_csum += nr_nodatacsum_sectors;
983 sctx->stat.read_errors += stripe->init_nr_io_errors;
984 sctx->stat.csum_errors += stripe->init_nr_csum_errors;
985 sctx->stat.verify_errors += stripe->init_nr_meta_errors;
986 sctx->stat.uncorrectable_errors +=
987 bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
988 sctx->stat.corrected_errors += nr_repaired_sectors;
989 spin_unlock(&sctx->stat_lock);
990}
991
992static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
993 unsigned long write_bitmap, bool dev_replace);
994
995/*
996 * The main entrance for all read related scrub work, including:
997 *
998 * - Wait for the initial read to finish
999 * - Verify and locate any bad sectors
1000 * - Go through the remaining mirrors and try to read as large blocksize as
1001 * possible
1002 * - Go through all mirrors (including the failed mirror) sector-by-sector
1003 * - Submit writeback for repaired sectors
1004 *
1005 * Writeback for dev-replace does not happen here, it needs extra
1006 * synchronization for zoned devices.
1007 */
1008static void scrub_stripe_read_repair_worker(struct work_struct *work)
1009{
1010 struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
1011 struct scrub_ctx *sctx = stripe->sctx;
1012 struct btrfs_fs_info *fs_info = sctx->fs_info;
1013 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1014 stripe->bg->length);
1015 int mirror;
1016 int i;
1017
1018 ASSERT(stripe->mirror_num > 0);
1019
1020 wait_scrub_stripe_io(stripe);
1021 scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
1022 /* Save the initial failed bitmap for later repair and report usage. */
1023 stripe->init_error_bitmap = stripe->error_bitmap;
1024 stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
1025 stripe->nr_sectors);
1026 stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1027 stripe->nr_sectors);
1028 stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1029 stripe->nr_sectors);
1030
1031 if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1032 goto out;
1033
1034 /*
1035 * Try all remaining mirrors.
1036 *
1037 * Here we still try to read as large block as possible, as this is
1038 * faster and we have extra safety nets to rely on.
1039 */
1040 for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1041 mirror != stripe->mirror_num;
1042 mirror = calc_next_mirror(mirror, num_copies)) {
1043 const unsigned long old_error_bitmap = stripe->error_bitmap;
1044
1045 scrub_stripe_submit_repair_read(stripe, mirror,
1046 BTRFS_STRIPE_LEN, false);
1047 wait_scrub_stripe_io(stripe);
1048 scrub_verify_one_stripe(stripe, old_error_bitmap);
1049 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1050 goto out;
1051 }
1052
1053 /*
1054 * Last safety net, try re-checking all mirrors, including the failed
1055 * one, sector-by-sector.
1056 *
1057 * As if one sector failed the drive's internal csum, the whole read
1058 * containing the offending sector would be marked as error.
1059 * Thus here we do sector-by-sector read.
1060 *
1061 * This can be slow, thus we only try it as the last resort.
1062 */
1063
1064 for (i = 0, mirror = stripe->mirror_num;
1065 i < num_copies;
1066 i++, mirror = calc_next_mirror(mirror, num_copies)) {
1067 const unsigned long old_error_bitmap = stripe->error_bitmap;
1068
1069 scrub_stripe_submit_repair_read(stripe, mirror,
1070 fs_info->sectorsize, true);
1071 wait_scrub_stripe_io(stripe);
1072 scrub_verify_one_stripe(stripe, old_error_bitmap);
1073 if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1074 goto out;
1075 }
1076out:
1077 /*
1078 * Submit the repaired sectors. For zoned case, we cannot do repair
1079 * in-place, but queue the bg to be relocated.
1080 */
1081 if (btrfs_is_zoned(fs_info)) {
1082 if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1083 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1084 } else if (!sctx->readonly) {
1085 unsigned long repaired;
1086
1087 bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1088 &stripe->error_bitmap, stripe->nr_sectors);
1089 scrub_write_sectors(sctx, stripe, repaired, false);
1090 wait_scrub_stripe_io(stripe);
1091 }
1092
1093 scrub_stripe_report_errors(sctx, stripe);
1094 set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1095 wake_up(&stripe->repair_wait);
1096}
1097
1098static void scrub_read_endio(struct btrfs_bio *bbio)
1099{
1100 struct scrub_stripe *stripe = bbio->private;
1101 struct bio_vec *bvec;
1102 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1103 int num_sectors;
1104 u32 bio_size = 0;
1105 int i;
1106
1107 ASSERT(sector_nr < stripe->nr_sectors);
1108 bio_for_each_bvec_all(bvec, &bbio->bio, i)
1109 bio_size += bvec->bv_len;
1110 num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
1111
1112 if (bbio->bio.bi_status) {
1113 bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
1114 bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
1115 } else {
1116 bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
1117 }
1118 bio_put(&bbio->bio);
1119 if (atomic_dec_and_test(&stripe->pending_io)) {
1120 wake_up(&stripe->io_wait);
1121 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1122 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1123 }
1124}
1125
1126static void scrub_write_endio(struct btrfs_bio *bbio)
1127{
1128 struct scrub_stripe *stripe = bbio->private;
1129 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1130 struct bio_vec *bvec;
1131 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1132 u32 bio_size = 0;
1133 int i;
1134
1135 bio_for_each_bvec_all(bvec, &bbio->bio, i)
1136 bio_size += bvec->bv_len;
1137
1138 if (bbio->bio.bi_status) {
1139 unsigned long flags;
1140
1141 spin_lock_irqsave(&stripe->write_error_lock, flags);
1142 bitmap_set(&stripe->write_error_bitmap, sector_nr,
1143 bio_size >> fs_info->sectorsize_bits);
1144 spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1145 }
1146 bio_put(&bbio->bio);
1147
1148 if (atomic_dec_and_test(&stripe->pending_io))
1149 wake_up(&stripe->io_wait);
1150}
1151
1152static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1153 struct scrub_stripe *stripe,
1154 struct btrfs_bio *bbio, bool dev_replace)
1155{
1156 struct btrfs_fs_info *fs_info = sctx->fs_info;
1157 u32 bio_len = bbio->bio.bi_iter.bi_size;
1158 u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1159 stripe->logical;
1160
1161 fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1162 atomic_inc(&stripe->pending_io);
1163 btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1164 if (!btrfs_is_zoned(fs_info))
1165 return;
1166 /*
1167 * For zoned writeback, queue depth must be 1, thus we must wait for
1168 * the write to finish before the next write.
1169 */
1170 wait_scrub_stripe_io(stripe);
1171
1172 /*
1173 * And also need to update the write pointer if write finished
1174 * successfully.
1175 */
1176 if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1177 &stripe->write_error_bitmap))
1178 sctx->write_pointer += bio_len;
1179}
1180
1181/*
1182 * Submit the write bio(s) for the sectors specified by @write_bitmap.
1183 *
1184 * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1185 *
1186 * - Only needs logical bytenr and mirror_num
1187 * Just like the scrub read path
1188 *
1189 * - Would only result in writes to the specified mirror
1190 * Unlike the regular writeback path, which would write back to all stripes
1191 *
1192 * - Handle dev-replace and read-repair writeback differently
1193 */
1194static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1195 unsigned long write_bitmap, bool dev_replace)
1196{
1197 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1198 struct btrfs_bio *bbio = NULL;
1199 int sector_nr;
1200
1201 for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1202 struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1203 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1204 int ret;
1205
1206 /* We should only writeback sectors covered by an extent. */
1207 ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1208
1209 /* Cannot merge with previous sector, submit the current one. */
1210 if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1211 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1212 bbio = NULL;
1213 }
1214 if (!bbio) {
1215 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1216 fs_info, scrub_write_endio, stripe);
1217 bbio->bio.bi_iter.bi_sector = (stripe->logical +
1218 (sector_nr << fs_info->sectorsize_bits)) >>
1219 SECTOR_SHIFT;
1220 }
1221 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1222 ASSERT(ret == fs_info->sectorsize);
1223 }
1224 if (bbio)
1225 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1226}
1227
1228/*
1229 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1230 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1231 */
1232static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1233 unsigned int bio_size)
1234{
1235 const int time_slice = 1000;
1236 s64 delta;
1237 ktime_t now;
1238 u32 div;
1239 u64 bwlimit;
1240
1241 bwlimit = READ_ONCE(device->scrub_speed_max);
1242 if (bwlimit == 0)
1243 return;
1244
1245 /*
1246 * Slice is divided into intervals when the IO is submitted, adjust by
1247 * bwlimit and maximum of 64 intervals.
1248 */
1249 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1250 div = min_t(u32, 64, div);
1251
1252 /* Start new epoch, set deadline */
1253 now = ktime_get();
1254 if (sctx->throttle_deadline == 0) {
1255 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1256 sctx->throttle_sent = 0;
1257 }
1258
1259 /* Still in the time to send? */
1260 if (ktime_before(now, sctx->throttle_deadline)) {
1261 /* If current bio is within the limit, send it */
1262 sctx->throttle_sent += bio_size;
1263 if (sctx->throttle_sent <= div_u64(bwlimit, div))
1264 return;
1265
1266 /* We're over the limit, sleep until the rest of the slice */
1267 delta = ktime_ms_delta(sctx->throttle_deadline, now);
1268 } else {
1269 /* New request after deadline, start new epoch */
1270 delta = 0;
1271 }
1272
1273 if (delta) {
1274 long timeout;
1275
1276 timeout = div_u64(delta * HZ, 1000);
1277 schedule_timeout_interruptible(timeout);
1278 }
1279
1280 /* Next call will start the deadline period */
1281 sctx->throttle_deadline = 0;
1282}
1283
1284/*
1285 * Given a physical address, this will calculate it's
1286 * logical offset. if this is a parity stripe, it will return
1287 * the most left data stripe's logical offset.
1288 *
1289 * return 0 if it is a data stripe, 1 means parity stripe.
1290 */
1291static int get_raid56_logic_offset(u64 physical, int num,
1292 struct btrfs_chunk_map *map, u64 *offset,
1293 u64 *stripe_start)
1294{
1295 int i;
1296 int j = 0;
1297 u64 last_offset;
1298 const int data_stripes = nr_data_stripes(map);
1299
1300 last_offset = (physical - map->stripes[num].physical) * data_stripes;
1301 if (stripe_start)
1302 *stripe_start = last_offset;
1303
1304 *offset = last_offset;
1305 for (i = 0; i < data_stripes; i++) {
1306 u32 stripe_nr;
1307 u32 stripe_index;
1308 u32 rot;
1309
1310 *offset = last_offset + btrfs_stripe_nr_to_offset(i);
1311
1312 stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
1313
1314 /* Work out the disk rotation on this stripe-set */
1315 rot = stripe_nr % map->num_stripes;
1316 /* calculate which stripe this data locates */
1317 rot += i;
1318 stripe_index = rot % map->num_stripes;
1319 if (stripe_index == num)
1320 return 0;
1321 if (stripe_index < num)
1322 j++;
1323 }
1324 *offset = last_offset + btrfs_stripe_nr_to_offset(j);
1325 return 1;
1326}
1327
1328/*
1329 * Return 0 if the extent item range covers any byte of the range.
1330 * Return <0 if the extent item is before @search_start.
1331 * Return >0 if the extent item is after @start_start + @search_len.
1332 */
1333static int compare_extent_item_range(struct btrfs_path *path,
1334 u64 search_start, u64 search_len)
1335{
1336 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1337 u64 len;
1338 struct btrfs_key key;
1339
1340 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1341 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1342 key.type == BTRFS_METADATA_ITEM_KEY);
1343 if (key.type == BTRFS_METADATA_ITEM_KEY)
1344 len = fs_info->nodesize;
1345 else
1346 len = key.offset;
1347
1348 if (key.objectid + len <= search_start)
1349 return -1;
1350 if (key.objectid >= search_start + search_len)
1351 return 1;
1352 return 0;
1353}
1354
1355/*
1356 * Locate one extent item which covers any byte in range
1357 * [@search_start, @search_start + @search_length)
1358 *
1359 * If the path is not initialized, we will initialize the search by doing
1360 * a btrfs_search_slot().
1361 * If the path is already initialized, we will use the path as the initial
1362 * slot, to avoid duplicated btrfs_search_slot() calls.
1363 *
1364 * NOTE: If an extent item starts before @search_start, we will still
1365 * return the extent item. This is for data extent crossing stripe boundary.
1366 *
1367 * Return 0 if we found such extent item, and @path will point to the extent item.
1368 * Return >0 if no such extent item can be found, and @path will be released.
1369 * Return <0 if hit fatal error, and @path will be released.
1370 */
1371static int find_first_extent_item(struct btrfs_root *extent_root,
1372 struct btrfs_path *path,
1373 u64 search_start, u64 search_len)
1374{
1375 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1376 struct btrfs_key key;
1377 int ret;
1378
1379 /* Continue using the existing path */
1380 if (path->nodes[0])
1381 goto search_forward;
1382
1383 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1384 key.type = BTRFS_METADATA_ITEM_KEY;
1385 else
1386 key.type = BTRFS_EXTENT_ITEM_KEY;
1387 key.objectid = search_start;
1388 key.offset = (u64)-1;
1389
1390 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1391 if (ret < 0)
1392 return ret;
1393
1394 ASSERT(ret > 0);
1395 /*
1396 * Here we intentionally pass 0 as @min_objectid, as there could be
1397 * an extent item starting before @search_start.
1398 */
1399 ret = btrfs_previous_extent_item(extent_root, path, 0);
1400 if (ret < 0)
1401 return ret;
1402 /*
1403 * No matter whether we have found an extent item, the next loop will
1404 * properly do every check on the key.
1405 */
1406search_forward:
1407 while (true) {
1408 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1409 if (key.objectid >= search_start + search_len)
1410 break;
1411 if (key.type != BTRFS_METADATA_ITEM_KEY &&
1412 key.type != BTRFS_EXTENT_ITEM_KEY)
1413 goto next;
1414
1415 ret = compare_extent_item_range(path, search_start, search_len);
1416 if (ret == 0)
1417 return ret;
1418 if (ret > 0)
1419 break;
1420next:
1421 ret = btrfs_next_item(extent_root, path);
1422 if (ret) {
1423 /* Either no more items or a fatal error. */
1424 btrfs_release_path(path);
1425 return ret;
1426 }
1427 }
1428 btrfs_release_path(path);
1429 return 1;
1430}
1431
1432static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1433 u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1434{
1435 struct btrfs_key key;
1436 struct btrfs_extent_item *ei;
1437
1438 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1439 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1440 key.type == BTRFS_EXTENT_ITEM_KEY);
1441 *extent_start_ret = key.objectid;
1442 if (key.type == BTRFS_METADATA_ITEM_KEY)
1443 *size_ret = path->nodes[0]->fs_info->nodesize;
1444 else
1445 *size_ret = key.offset;
1446 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1447 *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1448 *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1449}
1450
1451static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1452 u64 physical, u64 physical_end)
1453{
1454 struct btrfs_fs_info *fs_info = sctx->fs_info;
1455 int ret = 0;
1456
1457 if (!btrfs_is_zoned(fs_info))
1458 return 0;
1459
1460 mutex_lock(&sctx->wr_lock);
1461 if (sctx->write_pointer < physical_end) {
1462 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1463 physical,
1464 sctx->write_pointer);
1465 if (ret)
1466 btrfs_err(fs_info,
1467 "zoned: failed to recover write pointer");
1468 }
1469 mutex_unlock(&sctx->wr_lock);
1470 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1471
1472 return ret;
1473}
1474
1475static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1476 struct scrub_stripe *stripe,
1477 u64 extent_start, u64 extent_len,
1478 u64 extent_flags, u64 extent_gen)
1479{
1480 for (u64 cur_logical = max(stripe->logical, extent_start);
1481 cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1482 extent_start + extent_len);
1483 cur_logical += fs_info->sectorsize) {
1484 const int nr_sector = (cur_logical - stripe->logical) >>
1485 fs_info->sectorsize_bits;
1486 struct scrub_sector_verification *sector =
1487 &stripe->sectors[nr_sector];
1488
1489 set_bit(nr_sector, &stripe->extent_sector_bitmap);
1490 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1491 sector->is_metadata = true;
1492 sector->generation = extent_gen;
1493 }
1494 }
1495}
1496
1497static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1498{
1499 stripe->extent_sector_bitmap = 0;
1500 stripe->init_error_bitmap = 0;
1501 stripe->init_nr_io_errors = 0;
1502 stripe->init_nr_csum_errors = 0;
1503 stripe->init_nr_meta_errors = 0;
1504 stripe->error_bitmap = 0;
1505 stripe->io_error_bitmap = 0;
1506 stripe->csum_error_bitmap = 0;
1507 stripe->meta_error_bitmap = 0;
1508}
1509
1510/*
1511 * Locate one stripe which has at least one extent in its range.
1512 *
1513 * Return 0 if found such stripe, and store its info into @stripe.
1514 * Return >0 if there is no such stripe in the specified range.
1515 * Return <0 for error.
1516 */
1517static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1518 struct btrfs_path *extent_path,
1519 struct btrfs_path *csum_path,
1520 struct btrfs_device *dev, u64 physical,
1521 int mirror_num, u64 logical_start,
1522 u32 logical_len,
1523 struct scrub_stripe *stripe)
1524{
1525 struct btrfs_fs_info *fs_info = bg->fs_info;
1526 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1527 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1528 const u64 logical_end = logical_start + logical_len;
1529 u64 cur_logical = logical_start;
1530 u64 stripe_end;
1531 u64 extent_start;
1532 u64 extent_len;
1533 u64 extent_flags;
1534 u64 extent_gen;
1535 int ret;
1536
1537 memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1538 stripe->nr_sectors);
1539 scrub_stripe_reset_bitmaps(stripe);
1540
1541 /* The range must be inside the bg. */
1542 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1543
1544 ret = find_first_extent_item(extent_root, extent_path, logical_start,
1545 logical_len);
1546 /* Either error or not found. */
1547 if (ret)
1548 goto out;
1549 get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
1550 &extent_gen);
1551 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1552 stripe->nr_meta_extents++;
1553 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1554 stripe->nr_data_extents++;
1555 cur_logical = max(extent_start, cur_logical);
1556
1557 /*
1558 * Round down to stripe boundary.
1559 *
1560 * The extra calculation against bg->start is to handle block groups
1561 * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1562 */
1563 stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1564 bg->start;
1565 stripe->physical = physical + stripe->logical - logical_start;
1566 stripe->dev = dev;
1567 stripe->bg = bg;
1568 stripe->mirror_num = mirror_num;
1569 stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1570
1571 /* Fill the first extent info into stripe->sectors[] array. */
1572 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1573 extent_flags, extent_gen);
1574 cur_logical = extent_start + extent_len;
1575
1576 /* Fill the extent info for the remaining sectors. */
1577 while (cur_logical <= stripe_end) {
1578 ret = find_first_extent_item(extent_root, extent_path, cur_logical,
1579 stripe_end - cur_logical + 1);
1580 if (ret < 0)
1581 goto out;
1582 if (ret > 0) {
1583 ret = 0;
1584 break;
1585 }
1586 get_extent_info(extent_path, &extent_start, &extent_len,
1587 &extent_flags, &extent_gen);
1588 if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1589 stripe->nr_meta_extents++;
1590 if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1591 stripe->nr_data_extents++;
1592 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1593 extent_flags, extent_gen);
1594 cur_logical = extent_start + extent_len;
1595 }
1596
1597 /* Now fill the data csum. */
1598 if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1599 int sector_nr;
1600 unsigned long csum_bitmap = 0;
1601
1602 /* Csum space should have already been allocated. */
1603 ASSERT(stripe->csums);
1604
1605 /*
1606 * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1607 * should contain at most 16 sectors.
1608 */
1609 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1610
1611 ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
1612 stripe->logical, stripe_end,
1613 stripe->csums, &csum_bitmap);
1614 if (ret < 0)
1615 goto out;
1616 if (ret > 0)
1617 ret = 0;
1618
1619 for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1620 stripe->sectors[sector_nr].csum = stripe->csums +
1621 sector_nr * fs_info->csum_size;
1622 }
1623 }
1624 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1625out:
1626 return ret;
1627}
1628
1629static void scrub_reset_stripe(struct scrub_stripe *stripe)
1630{
1631 scrub_stripe_reset_bitmaps(stripe);
1632
1633 stripe->nr_meta_extents = 0;
1634 stripe->nr_data_extents = 0;
1635 stripe->state = 0;
1636
1637 for (int i = 0; i < stripe->nr_sectors; i++) {
1638 stripe->sectors[i].is_metadata = false;
1639 stripe->sectors[i].csum = NULL;
1640 stripe->sectors[i].generation = 0;
1641 }
1642}
1643
1644static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
1645 struct scrub_stripe *stripe)
1646{
1647 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1648 struct btrfs_bio *bbio = NULL;
1649 unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
1650 stripe->bg->length - stripe->logical) >>
1651 fs_info->sectorsize_bits;
1652 u64 stripe_len = BTRFS_STRIPE_LEN;
1653 int mirror = stripe->mirror_num;
1654 int i;
1655
1656 atomic_inc(&stripe->pending_io);
1657
1658 for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
1659 struct page *page = scrub_stripe_get_page(stripe, i);
1660 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
1661
1662 /* We're beyond the chunk boundary, no need to read anymore. */
1663 if (i >= nr_sectors)
1664 break;
1665
1666 /* The current sector cannot be merged, submit the bio. */
1667 if (bbio &&
1668 ((i > 0 &&
1669 !test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
1670 bbio->bio.bi_iter.bi_size >= stripe_len)) {
1671 ASSERT(bbio->bio.bi_iter.bi_size);
1672 atomic_inc(&stripe->pending_io);
1673 btrfs_submit_bio(bbio, mirror);
1674 bbio = NULL;
1675 }
1676
1677 if (!bbio) {
1678 struct btrfs_io_stripe io_stripe = {};
1679 struct btrfs_io_context *bioc = NULL;
1680 const u64 logical = stripe->logical +
1681 (i << fs_info->sectorsize_bits);
1682 int err;
1683
1684 bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
1685 fs_info, scrub_read_endio, stripe);
1686 bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
1687
1688 io_stripe.is_scrub = true;
1689 err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
1690 &stripe_len, &bioc, &io_stripe,
1691 &mirror);
1692 btrfs_put_bioc(bioc);
1693 if (err) {
1694 btrfs_bio_end_io(bbio,
1695 errno_to_blk_status(err));
1696 return;
1697 }
1698 }
1699
1700 __bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1701 }
1702
1703 if (bbio) {
1704 ASSERT(bbio->bio.bi_iter.bi_size);
1705 atomic_inc(&stripe->pending_io);
1706 btrfs_submit_bio(bbio, mirror);
1707 }
1708
1709 if (atomic_dec_and_test(&stripe->pending_io)) {
1710 wake_up(&stripe->io_wait);
1711 INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1712 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1713 }
1714}
1715
1716static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1717 struct scrub_stripe *stripe)
1718{
1719 struct btrfs_fs_info *fs_info = sctx->fs_info;
1720 struct btrfs_bio *bbio;
1721 unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
1722 stripe->bg->length - stripe->logical) >>
1723 fs_info->sectorsize_bits;
1724 int mirror = stripe->mirror_num;
1725
1726 ASSERT(stripe->bg);
1727 ASSERT(stripe->mirror_num > 0);
1728 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1729
1730 if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
1731 scrub_submit_extent_sector_read(sctx, stripe);
1732 return;
1733 }
1734
1735 bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1736 scrub_read_endio, stripe);
1737
1738 bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1739 /* Read the whole range inside the chunk boundary. */
1740 for (unsigned int cur = 0; cur < nr_sectors; cur++) {
1741 struct page *page = scrub_stripe_get_page(stripe, cur);
1742 unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
1743 int ret;
1744
1745 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1746 /* We should have allocated enough bio vectors. */
1747 ASSERT(ret == fs_info->sectorsize);
1748 }
1749 atomic_inc(&stripe->pending_io);
1750
1751 /*
1752 * For dev-replace, either user asks to avoid the source dev, or
1753 * the device is missing, we try the next mirror instead.
1754 */
1755 if (sctx->is_dev_replace &&
1756 (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1757 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1758 !stripe->dev->bdev)) {
1759 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1760 stripe->bg->length);
1761
1762 mirror = calc_next_mirror(mirror, num_copies);
1763 }
1764 btrfs_submit_bio(bbio, mirror);
1765}
1766
1767static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1768{
1769 int i;
1770
1771 for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1772 if (stripe->sectors[i].is_metadata) {
1773 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1774
1775 btrfs_err(fs_info,
1776 "stripe %llu has unrepaired metadata sector at %llu",
1777 stripe->logical,
1778 stripe->logical + (i << fs_info->sectorsize_bits));
1779 return true;
1780 }
1781 }
1782 return false;
1783}
1784
1785static void submit_initial_group_read(struct scrub_ctx *sctx,
1786 unsigned int first_slot,
1787 unsigned int nr_stripes)
1788{
1789 struct blk_plug plug;
1790
1791 ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
1792 ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
1793
1794 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1795 btrfs_stripe_nr_to_offset(nr_stripes));
1796 blk_start_plug(&plug);
1797 for (int i = 0; i < nr_stripes; i++) {
1798 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1799
1800 /* Those stripes should be initialized. */
1801 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1802 scrub_submit_initial_read(sctx, stripe);
1803 }
1804 blk_finish_plug(&plug);
1805}
1806
1807static int flush_scrub_stripes(struct scrub_ctx *sctx)
1808{
1809 struct btrfs_fs_info *fs_info = sctx->fs_info;
1810 struct scrub_stripe *stripe;
1811 const int nr_stripes = sctx->cur_stripe;
1812 int ret = 0;
1813
1814 if (!nr_stripes)
1815 return 0;
1816
1817 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1818
1819 /* Submit the stripes which are populated but not submitted. */
1820 if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
1821 const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
1822
1823 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
1824 }
1825
1826 for (int i = 0; i < nr_stripes; i++) {
1827 stripe = &sctx->stripes[i];
1828
1829 wait_event(stripe->repair_wait,
1830 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1831 }
1832
1833 /* Submit for dev-replace. */
1834 if (sctx->is_dev_replace) {
1835 /*
1836 * For dev-replace, if we know there is something wrong with
1837 * metadata, we should immediately abort.
1838 */
1839 for (int i = 0; i < nr_stripes; i++) {
1840 if (stripe_has_metadata_error(&sctx->stripes[i])) {
1841 ret = -EIO;
1842 goto out;
1843 }
1844 }
1845 for (int i = 0; i < nr_stripes; i++) {
1846 unsigned long good;
1847
1848 stripe = &sctx->stripes[i];
1849
1850 ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1851
1852 bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1853 &stripe->error_bitmap, stripe->nr_sectors);
1854 scrub_write_sectors(sctx, stripe, good, true);
1855 }
1856 }
1857
1858 /* Wait for the above writebacks to finish. */
1859 for (int i = 0; i < nr_stripes; i++) {
1860 stripe = &sctx->stripes[i];
1861
1862 wait_scrub_stripe_io(stripe);
1863 scrub_reset_stripe(stripe);
1864 }
1865out:
1866 sctx->cur_stripe = 0;
1867 return ret;
1868}
1869
1870static void raid56_scrub_wait_endio(struct bio *bio)
1871{
1872 complete(bio->bi_private);
1873}
1874
1875static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1876 struct btrfs_device *dev, int mirror_num,
1877 u64 logical, u32 length, u64 physical,
1878 u64 *found_logical_ret)
1879{
1880 struct scrub_stripe *stripe;
1881 int ret;
1882
1883 /*
1884 * There should always be one slot left, as caller filling the last
1885 * slot should flush them all.
1886 */
1887 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
1888
1889 /* @found_logical_ret must be specified. */
1890 ASSERT(found_logical_ret);
1891
1892 stripe = &sctx->stripes[sctx->cur_stripe];
1893 scrub_reset_stripe(stripe);
1894 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1895 &sctx->csum_path, dev, physical,
1896 mirror_num, logical, length, stripe);
1897 /* Either >0 as no more extents or <0 for error. */
1898 if (ret)
1899 return ret;
1900 *found_logical_ret = stripe->logical;
1901 sctx->cur_stripe++;
1902
1903 /* We filled one group, submit it. */
1904 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1905 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
1906
1907 submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
1908 }
1909
1910 /* Last slot used, flush them all. */
1911 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1912 return flush_scrub_stripes(sctx);
1913 return 0;
1914}
1915
1916static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1917 struct btrfs_device *scrub_dev,
1918 struct btrfs_block_group *bg,
1919 struct btrfs_chunk_map *map,
1920 u64 full_stripe_start)
1921{
1922 DECLARE_COMPLETION_ONSTACK(io_done);
1923 struct btrfs_fs_info *fs_info = sctx->fs_info;
1924 struct btrfs_raid_bio *rbio;
1925 struct btrfs_io_context *bioc = NULL;
1926 struct btrfs_path extent_path = { 0 };
1927 struct btrfs_path csum_path = { 0 };
1928 struct bio *bio;
1929 struct scrub_stripe *stripe;
1930 bool all_empty = true;
1931 const int data_stripes = nr_data_stripes(map);
1932 unsigned long extent_bitmap = 0;
1933 u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1934 int ret;
1935
1936 ASSERT(sctx->raid56_data_stripes);
1937
1938 /*
1939 * For data stripe search, we cannot re-use the same extent/csum paths,
1940 * as the data stripe bytenr may be smaller than previous extent. Thus
1941 * we have to use our own extent/csum paths.
1942 */
1943 extent_path.search_commit_root = 1;
1944 extent_path.skip_locking = 1;
1945 csum_path.search_commit_root = 1;
1946 csum_path.skip_locking = 1;
1947
1948 for (int i = 0; i < data_stripes; i++) {
1949 int stripe_index;
1950 int rot;
1951 u64 physical;
1952
1953 stripe = &sctx->raid56_data_stripes[i];
1954 rot = div_u64(full_stripe_start - bg->start,
1955 data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1956 stripe_index = (i + rot) % map->num_stripes;
1957 physical = map->stripes[stripe_index].physical +
1958 btrfs_stripe_nr_to_offset(rot);
1959
1960 scrub_reset_stripe(stripe);
1961 set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1962 ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
1963 map->stripes[stripe_index].dev, physical, 1,
1964 full_stripe_start + btrfs_stripe_nr_to_offset(i),
1965 BTRFS_STRIPE_LEN, stripe);
1966 if (ret < 0)
1967 goto out;
1968 /*
1969 * No extent in this data stripe, need to manually mark them
1970 * initialized to make later read submission happy.
1971 */
1972 if (ret > 0) {
1973 stripe->logical = full_stripe_start +
1974 btrfs_stripe_nr_to_offset(i);
1975 stripe->dev = map->stripes[stripe_index].dev;
1976 stripe->mirror_num = 1;
1977 set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1978 }
1979 }
1980
1981 /* Check if all data stripes are empty. */
1982 for (int i = 0; i < data_stripes; i++) {
1983 stripe = &sctx->raid56_data_stripes[i];
1984 if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
1985 all_empty = false;
1986 break;
1987 }
1988 }
1989 if (all_empty) {
1990 ret = 0;
1991 goto out;
1992 }
1993
1994 for (int i = 0; i < data_stripes; i++) {
1995 stripe = &sctx->raid56_data_stripes[i];
1996 scrub_submit_initial_read(sctx, stripe);
1997 }
1998 for (int i = 0; i < data_stripes; i++) {
1999 stripe = &sctx->raid56_data_stripes[i];
2000
2001 wait_event(stripe->repair_wait,
2002 test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
2003 }
2004 /* For now, no zoned support for RAID56. */
2005 ASSERT(!btrfs_is_zoned(sctx->fs_info));
2006
2007 /*
2008 * Now all data stripes are properly verified. Check if we have any
2009 * unrepaired, if so abort immediately or we could further corrupt the
2010 * P/Q stripes.
2011 *
2012 * During the loop, also populate extent_bitmap.
2013 */
2014 for (int i = 0; i < data_stripes; i++) {
2015 unsigned long error;
2016
2017 stripe = &sctx->raid56_data_stripes[i];
2018
2019 /*
2020 * We should only check the errors where there is an extent.
2021 * As we may hit an empty data stripe while it's missing.
2022 */
2023 bitmap_and(&error, &stripe->error_bitmap,
2024 &stripe->extent_sector_bitmap, stripe->nr_sectors);
2025 if (!bitmap_empty(&error, stripe->nr_sectors)) {
2026 btrfs_err(fs_info,
2027"unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
2028 full_stripe_start, i, stripe->nr_sectors,
2029 &error);
2030 ret = -EIO;
2031 goto out;
2032 }
2033 bitmap_or(&extent_bitmap, &extent_bitmap,
2034 &stripe->extent_sector_bitmap, stripe->nr_sectors);
2035 }
2036
2037 /* Now we can check and regenerate the P/Q stripe. */
2038 bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
2039 bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
2040 bio->bi_private = &io_done;
2041 bio->bi_end_io = raid56_scrub_wait_endio;
2042
2043 btrfs_bio_counter_inc_blocked(fs_info);
2044 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
2045 &length, &bioc, NULL, NULL);
2046 if (ret < 0) {
2047 btrfs_put_bioc(bioc);
2048 btrfs_bio_counter_dec(fs_info);
2049 goto out;
2050 }
2051 rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
2052 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
2053 btrfs_put_bioc(bioc);
2054 if (!rbio) {
2055 ret = -ENOMEM;
2056 btrfs_bio_counter_dec(fs_info);
2057 goto out;
2058 }
2059 /* Use the recovered stripes as cache to avoid read them from disk again. */
2060 for (int i = 0; i < data_stripes; i++) {
2061 stripe = &sctx->raid56_data_stripes[i];
2062
2063 raid56_parity_cache_data_pages(rbio, stripe->pages,
2064 full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
2065 }
2066 raid56_parity_submit_scrub_rbio(rbio);
2067 wait_for_completion_io(&io_done);
2068 ret = blk_status_to_errno(bio->bi_status);
2069 bio_put(bio);
2070 btrfs_bio_counter_dec(fs_info);
2071
2072 btrfs_release_path(&extent_path);
2073 btrfs_release_path(&csum_path);
2074out:
2075 return ret;
2076}
2077
2078/*
2079 * Scrub one range which can only has simple mirror based profile.
2080 * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
2081 * RAID0/RAID10).
2082 *
2083 * Since we may need to handle a subset of block group, we need @logical_start
2084 * and @logical_length parameter.
2085 */
2086static int scrub_simple_mirror(struct scrub_ctx *sctx,
2087 struct btrfs_block_group *bg,
2088 struct btrfs_chunk_map *map,
2089 u64 logical_start, u64 logical_length,
2090 struct btrfs_device *device,
2091 u64 physical, int mirror_num)
2092{
2093 struct btrfs_fs_info *fs_info = sctx->fs_info;
2094 const u64 logical_end = logical_start + logical_length;
2095 u64 cur_logical = logical_start;
2096 int ret;
2097
2098 /* The range must be inside the bg */
2099 ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
2100
2101 /* Go through each extent items inside the logical range */
2102 while (cur_logical < logical_end) {
2103 u64 found_logical = U64_MAX;
2104 u64 cur_physical = physical + cur_logical - logical_start;
2105
2106 /* Canceled? */
2107 if (atomic_read(&fs_info->scrub_cancel_req) ||
2108 atomic_read(&sctx->cancel_req)) {
2109 ret = -ECANCELED;
2110 break;
2111 }
2112 /* Paused? */
2113 if (atomic_read(&fs_info->scrub_pause_req)) {
2114 /* Push queued extents */
2115 scrub_blocked_if_needed(fs_info);
2116 }
2117 /* Block group removed? */
2118 spin_lock(&bg->lock);
2119 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
2120 spin_unlock(&bg->lock);
2121 ret = 0;
2122 break;
2123 }
2124 spin_unlock(&bg->lock);
2125
2126 ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2127 cur_logical, logical_end - cur_logical,
2128 cur_physical, &found_logical);
2129 if (ret > 0) {
2130 /* No more extent, just update the accounting */
2131 sctx->stat.last_physical = physical + logical_length;
2132 ret = 0;
2133 break;
2134 }
2135 if (ret < 0)
2136 break;
2137
2138 /* queue_scrub_stripe() returned 0, @found_logical must be updated. */
2139 ASSERT(found_logical != U64_MAX);
2140 cur_logical = found_logical + BTRFS_STRIPE_LEN;
2141
2142 /* Don't hold CPU for too long time */
2143 cond_resched();
2144 }
2145 return ret;
2146}
2147
2148/* Calculate the full stripe length for simple stripe based profiles */
2149static u64 simple_stripe_full_stripe_len(const struct btrfs_chunk_map *map)
2150{
2151 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2152 BTRFS_BLOCK_GROUP_RAID10));
2153
2154 return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
2155}
2156
2157/* Get the logical bytenr for the stripe */
2158static u64 simple_stripe_get_logical(struct btrfs_chunk_map *map,
2159 struct btrfs_block_group *bg,
2160 int stripe_index)
2161{
2162 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2163 BTRFS_BLOCK_GROUP_RAID10));
2164 ASSERT(stripe_index < map->num_stripes);
2165
2166 /*
2167 * (stripe_index / sub_stripes) gives how many data stripes we need to
2168 * skip.
2169 */
2170 return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2171 bg->start;
2172}
2173
2174/* Get the mirror number for the stripe */
2175static int simple_stripe_mirror_num(struct btrfs_chunk_map *map, int stripe_index)
2176{
2177 ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2178 BTRFS_BLOCK_GROUP_RAID10));
2179 ASSERT(stripe_index < map->num_stripes);
2180
2181 /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2182 return stripe_index % map->sub_stripes + 1;
2183}
2184
2185static int scrub_simple_stripe(struct scrub_ctx *sctx,
2186 struct btrfs_block_group *bg,
2187 struct btrfs_chunk_map *map,
2188 struct btrfs_device *device,
2189 int stripe_index)
2190{
2191 const u64 logical_increment = simple_stripe_full_stripe_len(map);
2192 const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2193 const u64 orig_physical = map->stripes[stripe_index].physical;
2194 const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2195 u64 cur_logical = orig_logical;
2196 u64 cur_physical = orig_physical;
2197 int ret = 0;
2198
2199 while (cur_logical < bg->start + bg->length) {
2200 /*
2201 * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2202 * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2203 * this stripe.
2204 */
2205 ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2206 BTRFS_STRIPE_LEN, device, cur_physical,
2207 mirror_num);
2208 if (ret)
2209 return ret;
2210 /* Skip to next stripe which belongs to the target device */
2211 cur_logical += logical_increment;
2212 /* For physical offset, we just go to next stripe */
2213 cur_physical += BTRFS_STRIPE_LEN;
2214 }
2215 return ret;
2216}
2217
2218static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2219 struct btrfs_block_group *bg,
2220 struct btrfs_chunk_map *map,
2221 struct btrfs_device *scrub_dev,
2222 int stripe_index)
2223{
2224 struct btrfs_fs_info *fs_info = sctx->fs_info;
2225 const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2226 const u64 chunk_logical = bg->start;
2227 int ret;
2228 int ret2;
2229 u64 physical = map->stripes[stripe_index].physical;
2230 const u64 dev_stripe_len = btrfs_calc_stripe_length(map);
2231 const u64 physical_end = physical + dev_stripe_len;
2232 u64 logical;
2233 u64 logic_end;
2234 /* The logical increment after finishing one stripe */
2235 u64 increment;
2236 /* Offset inside the chunk */
2237 u64 offset;
2238 u64 stripe_logical;
2239 int stop_loop = 0;
2240
2241 /* Extent_path should be released by now. */
2242 ASSERT(sctx->extent_path.nodes[0] == NULL);
2243
2244 scrub_blocked_if_needed(fs_info);
2245
2246 if (sctx->is_dev_replace &&
2247 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2248 mutex_lock(&sctx->wr_lock);
2249 sctx->write_pointer = physical;
2250 mutex_unlock(&sctx->wr_lock);
2251 }
2252
2253 /* Prepare the extra data stripes used by RAID56. */
2254 if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2255 ASSERT(sctx->raid56_data_stripes == NULL);
2256
2257 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2258 sizeof(struct scrub_stripe),
2259 GFP_KERNEL);
2260 if (!sctx->raid56_data_stripes) {
2261 ret = -ENOMEM;
2262 goto out;
2263 }
2264 for (int i = 0; i < nr_data_stripes(map); i++) {
2265 ret = init_scrub_stripe(fs_info,
2266 &sctx->raid56_data_stripes[i]);
2267 if (ret < 0)
2268 goto out;
2269 sctx->raid56_data_stripes[i].bg = bg;
2270 sctx->raid56_data_stripes[i].sctx = sctx;
2271 }
2272 }
2273 /*
2274 * There used to be a big double loop to handle all profiles using the
2275 * same routine, which grows larger and more gross over time.
2276 *
2277 * So here we handle each profile differently, so simpler profiles
2278 * have simpler scrubbing function.
2279 */
2280 if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2281 BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2282 /*
2283 * Above check rules out all complex profile, the remaining
2284 * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2285 * mirrored duplication without stripe.
2286 *
2287 * Only @physical and @mirror_num needs to calculated using
2288 * @stripe_index.
2289 */
2290 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2291 scrub_dev, map->stripes[stripe_index].physical,
2292 stripe_index + 1);
2293 offset = 0;
2294 goto out;
2295 }
2296 if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2297 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2298 offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
2299 goto out;
2300 }
2301
2302 /* Only RAID56 goes through the old code */
2303 ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2304 ret = 0;
2305
2306 /* Calculate the logical end of the stripe */
2307 get_raid56_logic_offset(physical_end, stripe_index,
2308 map, &logic_end, NULL);
2309 logic_end += chunk_logical;
2310
2311 /* Initialize @offset in case we need to go to out: label */
2312 get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2313 increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2314
2315 /*
2316 * Due to the rotation, for RAID56 it's better to iterate each stripe
2317 * using their physical offset.
2318 */
2319 while (physical < physical_end) {
2320 ret = get_raid56_logic_offset(physical, stripe_index, map,
2321 &logical, &stripe_logical);
2322 logical += chunk_logical;
2323 if (ret) {
2324 /* it is parity strip */
2325 stripe_logical += chunk_logical;
2326 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2327 map, stripe_logical);
2328 if (ret)
2329 goto out;
2330 goto next;
2331 }
2332
2333 /*
2334 * Now we're at a data stripe, scrub each extents in the range.
2335 *
2336 * At this stage, if we ignore the repair part, inside each data
2337 * stripe it is no different than SINGLE profile.
2338 * We can reuse scrub_simple_mirror() here, as the repair part
2339 * is still based on @mirror_num.
2340 */
2341 ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
2342 scrub_dev, physical, 1);
2343 if (ret < 0)
2344 goto out;
2345next:
2346 logical += increment;
2347 physical += BTRFS_STRIPE_LEN;
2348 spin_lock(&sctx->stat_lock);
2349 if (stop_loop)
2350 sctx->stat.last_physical =
2351 map->stripes[stripe_index].physical + dev_stripe_len;
2352 else
2353 sctx->stat.last_physical = physical;
2354 spin_unlock(&sctx->stat_lock);
2355 if (stop_loop)
2356 break;
2357 }
2358out:
2359 ret2 = flush_scrub_stripes(sctx);
2360 if (!ret)
2361 ret = ret2;
2362 btrfs_release_path(&sctx->extent_path);
2363 btrfs_release_path(&sctx->csum_path);
2364
2365 if (sctx->raid56_data_stripes) {
2366 for (int i = 0; i < nr_data_stripes(map); i++)
2367 release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2368 kfree(sctx->raid56_data_stripes);
2369 sctx->raid56_data_stripes = NULL;
2370 }
2371
2372 if (sctx->is_dev_replace && ret >= 0) {
2373 int ret2;
2374
2375 ret2 = sync_write_pointer_for_zoned(sctx,
2376 chunk_logical + offset,
2377 map->stripes[stripe_index].physical,
2378 physical_end);
2379 if (ret2)
2380 ret = ret2;
2381 }
2382
2383 return ret < 0 ? ret : 0;
2384}
2385
2386static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2387 struct btrfs_block_group *bg,
2388 struct btrfs_device *scrub_dev,
2389 u64 dev_offset,
2390 u64 dev_extent_len)
2391{
2392 struct btrfs_fs_info *fs_info = sctx->fs_info;
2393 struct btrfs_chunk_map *map;
2394 int i;
2395 int ret = 0;
2396
2397 map = btrfs_find_chunk_map(fs_info, bg->start, bg->length);
2398 if (!map) {
2399 /*
2400 * Might have been an unused block group deleted by the cleaner
2401 * kthread or relocation.
2402 */
2403 spin_lock(&bg->lock);
2404 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2405 ret = -EINVAL;
2406 spin_unlock(&bg->lock);
2407
2408 return ret;
2409 }
2410 if (map->start != bg->start)
2411 goto out;
2412 if (map->chunk_len < dev_extent_len)
2413 goto out;
2414
2415 for (i = 0; i < map->num_stripes; ++i) {
2416 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2417 map->stripes[i].physical == dev_offset) {
2418 ret = scrub_stripe(sctx, bg, map, scrub_dev, i);
2419 if (ret)
2420 goto out;
2421 }
2422 }
2423out:
2424 btrfs_free_chunk_map(map);
2425
2426 return ret;
2427}
2428
2429static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2430 struct btrfs_block_group *cache)
2431{
2432 struct btrfs_fs_info *fs_info = cache->fs_info;
2433 struct btrfs_trans_handle *trans;
2434
2435 if (!btrfs_is_zoned(fs_info))
2436 return 0;
2437
2438 btrfs_wait_block_group_reservations(cache);
2439 btrfs_wait_nocow_writers(cache);
2440 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2441
2442 trans = btrfs_join_transaction(root);
2443 if (IS_ERR(trans))
2444 return PTR_ERR(trans);
2445 return btrfs_commit_transaction(trans);
2446}
2447
2448static noinline_for_stack
2449int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2450 struct btrfs_device *scrub_dev, u64 start, u64 end)
2451{
2452 struct btrfs_dev_extent *dev_extent = NULL;
2453 struct btrfs_path *path;
2454 struct btrfs_fs_info *fs_info = sctx->fs_info;
2455 struct btrfs_root *root = fs_info->dev_root;
2456 u64 chunk_offset;
2457 int ret = 0;
2458 int ro_set;
2459 int slot;
2460 struct extent_buffer *l;
2461 struct btrfs_key key;
2462 struct btrfs_key found_key;
2463 struct btrfs_block_group *cache;
2464 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2465
2466 path = btrfs_alloc_path();
2467 if (!path)
2468 return -ENOMEM;
2469
2470 path->reada = READA_FORWARD;
2471 path->search_commit_root = 1;
2472 path->skip_locking = 1;
2473
2474 key.objectid = scrub_dev->devid;
2475 key.offset = 0ull;
2476 key.type = BTRFS_DEV_EXTENT_KEY;
2477
2478 while (1) {
2479 u64 dev_extent_len;
2480
2481 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2482 if (ret < 0)
2483 break;
2484 if (ret > 0) {
2485 if (path->slots[0] >=
2486 btrfs_header_nritems(path->nodes[0])) {
2487 ret = btrfs_next_leaf(root, path);
2488 if (ret < 0)
2489 break;
2490 if (ret > 0) {
2491 ret = 0;
2492 break;
2493 }
2494 } else {
2495 ret = 0;
2496 }
2497 }
2498
2499 l = path->nodes[0];
2500 slot = path->slots[0];
2501
2502 btrfs_item_key_to_cpu(l, &found_key, slot);
2503
2504 if (found_key.objectid != scrub_dev->devid)
2505 break;
2506
2507 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2508 break;
2509
2510 if (found_key.offset >= end)
2511 break;
2512
2513 if (found_key.offset < key.offset)
2514 break;
2515
2516 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2517 dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2518
2519 if (found_key.offset + dev_extent_len <= start)
2520 goto skip;
2521
2522 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2523
2524 /*
2525 * get a reference on the corresponding block group to prevent
2526 * the chunk from going away while we scrub it
2527 */
2528 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2529
2530 /* some chunks are removed but not committed to disk yet,
2531 * continue scrubbing */
2532 if (!cache)
2533 goto skip;
2534
2535 ASSERT(cache->start <= chunk_offset);
2536 /*
2537 * We are using the commit root to search for device extents, so
2538 * that means we could have found a device extent item from a
2539 * block group that was deleted in the current transaction. The
2540 * logical start offset of the deleted block group, stored at
2541 * @chunk_offset, might be part of the logical address range of
2542 * a new block group (which uses different physical extents).
2543 * In this case btrfs_lookup_block_group() has returned the new
2544 * block group, and its start address is less than @chunk_offset.
2545 *
2546 * We skip such new block groups, because it's pointless to
2547 * process them, as we won't find their extents because we search
2548 * for them using the commit root of the extent tree. For a device
2549 * replace it's also fine to skip it, we won't miss copying them
2550 * to the target device because we have the write duplication
2551 * setup through the regular write path (by btrfs_map_block()),
2552 * and we have committed a transaction when we started the device
2553 * replace, right after setting up the device replace state.
2554 */
2555 if (cache->start < chunk_offset) {
2556 btrfs_put_block_group(cache);
2557 goto skip;
2558 }
2559
2560 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2561 if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2562 btrfs_put_block_group(cache);
2563 goto skip;
2564 }
2565 }
2566
2567 /*
2568 * Make sure that while we are scrubbing the corresponding block
2569 * group doesn't get its logical address and its device extents
2570 * reused for another block group, which can possibly be of a
2571 * different type and different profile. We do this to prevent
2572 * false error detections and crashes due to bogus attempts to
2573 * repair extents.
2574 */
2575 spin_lock(&cache->lock);
2576 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2577 spin_unlock(&cache->lock);
2578 btrfs_put_block_group(cache);
2579 goto skip;
2580 }
2581 btrfs_freeze_block_group(cache);
2582 spin_unlock(&cache->lock);
2583
2584 /*
2585 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2586 * to avoid deadlock caused by:
2587 * btrfs_inc_block_group_ro()
2588 * -> btrfs_wait_for_commit()
2589 * -> btrfs_commit_transaction()
2590 * -> btrfs_scrub_pause()
2591 */
2592 scrub_pause_on(fs_info);
2593
2594 /*
2595 * Don't do chunk preallocation for scrub.
2596 *
2597 * This is especially important for SYSTEM bgs, or we can hit
2598 * -EFBIG from btrfs_finish_chunk_alloc() like:
2599 * 1. The only SYSTEM bg is marked RO.
2600 * Since SYSTEM bg is small, that's pretty common.
2601 * 2. New SYSTEM bg will be allocated
2602 * Due to regular version will allocate new chunk.
2603 * 3. New SYSTEM bg is empty and will get cleaned up
2604 * Before cleanup really happens, it's marked RO again.
2605 * 4. Empty SYSTEM bg get scrubbed
2606 * We go back to 2.
2607 *
2608 * This can easily boost the amount of SYSTEM chunks if cleaner
2609 * thread can't be triggered fast enough, and use up all space
2610 * of btrfs_super_block::sys_chunk_array
2611 *
2612 * While for dev replace, we need to try our best to mark block
2613 * group RO, to prevent race between:
2614 * - Write duplication
2615 * Contains latest data
2616 * - Scrub copy
2617 * Contains data from commit tree
2618 *
2619 * If target block group is not marked RO, nocow writes can
2620 * be overwritten by scrub copy, causing data corruption.
2621 * So for dev-replace, it's not allowed to continue if a block
2622 * group is not RO.
2623 */
2624 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2625 if (!ret && sctx->is_dev_replace) {
2626 ret = finish_extent_writes_for_zoned(root, cache);
2627 if (ret) {
2628 btrfs_dec_block_group_ro(cache);
2629 scrub_pause_off(fs_info);
2630 btrfs_put_block_group(cache);
2631 break;
2632 }
2633 }
2634
2635 if (ret == 0) {
2636 ro_set = 1;
2637 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2638 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2639 /*
2640 * btrfs_inc_block_group_ro return -ENOSPC when it
2641 * failed in creating new chunk for metadata.
2642 * It is not a problem for scrub, because
2643 * metadata are always cowed, and our scrub paused
2644 * commit_transactions.
2645 *
2646 * For RAID56 chunks, we have to mark them read-only
2647 * for scrub, as later we would use our own cache
2648 * out of RAID56 realm.
2649 * Thus we want the RAID56 bg to be marked RO to
2650 * prevent RMW from screwing up out cache.
2651 */
2652 ro_set = 0;
2653 } else if (ret == -ETXTBSY) {
2654 btrfs_warn(fs_info,
2655 "skipping scrub of block group %llu due to active swapfile",
2656 cache->start);
2657 scrub_pause_off(fs_info);
2658 ret = 0;
2659 goto skip_unfreeze;
2660 } else {
2661 btrfs_warn(fs_info,
2662 "failed setting block group ro: %d", ret);
2663 btrfs_unfreeze_block_group(cache);
2664 btrfs_put_block_group(cache);
2665 scrub_pause_off(fs_info);
2666 break;
2667 }
2668
2669 /*
2670 * Now the target block is marked RO, wait for nocow writes to
2671 * finish before dev-replace.
2672 * COW is fine, as COW never overwrites extents in commit tree.
2673 */
2674 if (sctx->is_dev_replace) {
2675 btrfs_wait_nocow_writers(cache);
2676 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2677 cache->length);
2678 }
2679
2680 scrub_pause_off(fs_info);
2681 down_write(&dev_replace->rwsem);
2682 dev_replace->cursor_right = found_key.offset + dev_extent_len;
2683 dev_replace->cursor_left = found_key.offset;
2684 dev_replace->item_needs_writeback = 1;
2685 up_write(&dev_replace->rwsem);
2686
2687 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2688 dev_extent_len);
2689 if (sctx->is_dev_replace &&
2690 !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2691 cache, found_key.offset))
2692 ro_set = 0;
2693
2694 down_write(&dev_replace->rwsem);
2695 dev_replace->cursor_left = dev_replace->cursor_right;
2696 dev_replace->item_needs_writeback = 1;
2697 up_write(&dev_replace->rwsem);
2698
2699 if (ro_set)
2700 btrfs_dec_block_group_ro(cache);
2701
2702 /*
2703 * We might have prevented the cleaner kthread from deleting
2704 * this block group if it was already unused because we raced
2705 * and set it to RO mode first. So add it back to the unused
2706 * list, otherwise it might not ever be deleted unless a manual
2707 * balance is triggered or it becomes used and unused again.
2708 */
2709 spin_lock(&cache->lock);
2710 if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2711 !cache->ro && cache->reserved == 0 && cache->used == 0) {
2712 spin_unlock(&cache->lock);
2713 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2714 btrfs_discard_queue_work(&fs_info->discard_ctl,
2715 cache);
2716 else
2717 btrfs_mark_bg_unused(cache);
2718 } else {
2719 spin_unlock(&cache->lock);
2720 }
2721skip_unfreeze:
2722 btrfs_unfreeze_block_group(cache);
2723 btrfs_put_block_group(cache);
2724 if (ret)
2725 break;
2726 if (sctx->is_dev_replace &&
2727 atomic64_read(&dev_replace->num_write_errors) > 0) {
2728 ret = -EIO;
2729 break;
2730 }
2731 if (sctx->stat.malloc_errors > 0) {
2732 ret = -ENOMEM;
2733 break;
2734 }
2735skip:
2736 key.offset = found_key.offset + dev_extent_len;
2737 btrfs_release_path(path);
2738 }
2739
2740 btrfs_free_path(path);
2741
2742 return ret;
2743}
2744
2745static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2746 struct page *page, u64 physical, u64 generation)
2747{
2748 struct btrfs_fs_info *fs_info = sctx->fs_info;
2749 struct bio_vec bvec;
2750 struct bio bio;
2751 struct btrfs_super_block *sb = page_address(page);
2752 int ret;
2753
2754 bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2755 bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2756 __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2757 ret = submit_bio_wait(&bio);
2758 bio_uninit(&bio);
2759
2760 if (ret < 0)
2761 return ret;
2762 ret = btrfs_check_super_csum(fs_info, sb);
2763 if (ret != 0) {
2764 btrfs_err_rl(fs_info,
2765 "super block at physical %llu devid %llu has bad csum",
2766 physical, dev->devid);
2767 return -EIO;
2768 }
2769 if (btrfs_super_generation(sb) != generation) {
2770 btrfs_err_rl(fs_info,
2771"super block at physical %llu devid %llu has bad generation %llu expect %llu",
2772 physical, dev->devid,
2773 btrfs_super_generation(sb), generation);
2774 return -EUCLEAN;
2775 }
2776
2777 return btrfs_validate_super(fs_info, sb, -1);
2778}
2779
2780static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2781 struct btrfs_device *scrub_dev)
2782{
2783 int i;
2784 u64 bytenr;
2785 u64 gen;
2786 int ret = 0;
2787 struct page *page;
2788 struct btrfs_fs_info *fs_info = sctx->fs_info;
2789
2790 if (BTRFS_FS_ERROR(fs_info))
2791 return -EROFS;
2792
2793 page = alloc_page(GFP_KERNEL);
2794 if (!page) {
2795 spin_lock(&sctx->stat_lock);
2796 sctx->stat.malloc_errors++;
2797 spin_unlock(&sctx->stat_lock);
2798 return -ENOMEM;
2799 }
2800
2801 /* Seed devices of a new filesystem has their own generation. */
2802 if (scrub_dev->fs_devices != fs_info->fs_devices)
2803 gen = scrub_dev->generation;
2804 else
2805 gen = btrfs_get_last_trans_committed(fs_info);
2806
2807 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2808 bytenr = btrfs_sb_offset(i);
2809 if (bytenr + BTRFS_SUPER_INFO_SIZE >
2810 scrub_dev->commit_total_bytes)
2811 break;
2812 if (!btrfs_check_super_location(scrub_dev, bytenr))
2813 continue;
2814
2815 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2816 if (ret) {
2817 spin_lock(&sctx->stat_lock);
2818 sctx->stat.super_errors++;
2819 spin_unlock(&sctx->stat_lock);
2820 }
2821 }
2822 __free_page(page);
2823 return 0;
2824}
2825
2826static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2827{
2828 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2829 &fs_info->scrub_lock)) {
2830 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2831
2832 fs_info->scrub_workers = NULL;
2833 mutex_unlock(&fs_info->scrub_lock);
2834
2835 if (scrub_workers)
2836 destroy_workqueue(scrub_workers);
2837 }
2838}
2839
2840/*
2841 * get a reference count on fs_info->scrub_workers. start worker if necessary
2842 */
2843static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
2844{
2845 struct workqueue_struct *scrub_workers = NULL;
2846 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2847 int max_active = fs_info->thread_pool_size;
2848 int ret = -ENOMEM;
2849
2850 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2851 return 0;
2852
2853 scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
2854 if (!scrub_workers)
2855 return -ENOMEM;
2856
2857 mutex_lock(&fs_info->scrub_lock);
2858 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2859 ASSERT(fs_info->scrub_workers == NULL);
2860 fs_info->scrub_workers = scrub_workers;
2861 refcount_set(&fs_info->scrub_workers_refcnt, 1);
2862 mutex_unlock(&fs_info->scrub_lock);
2863 return 0;
2864 }
2865 /* Other thread raced in and created the workers for us */
2866 refcount_inc(&fs_info->scrub_workers_refcnt);
2867 mutex_unlock(&fs_info->scrub_lock);
2868
2869 ret = 0;
2870
2871 destroy_workqueue(scrub_workers);
2872 return ret;
2873}
2874
2875int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2876 u64 end, struct btrfs_scrub_progress *progress,
2877 int readonly, int is_dev_replace)
2878{
2879 struct btrfs_dev_lookup_args args = { .devid = devid };
2880 struct scrub_ctx *sctx;
2881 int ret;
2882 struct btrfs_device *dev;
2883 unsigned int nofs_flag;
2884 bool need_commit = false;
2885
2886 if (btrfs_fs_closing(fs_info))
2887 return -EAGAIN;
2888
2889 /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2890 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2891
2892 /*
2893 * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2894 * value (max nodesize / min sectorsize), thus nodesize should always
2895 * be fine.
2896 */
2897 ASSERT(fs_info->nodesize <=
2898 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
2899
2900 /* Allocate outside of device_list_mutex */
2901 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2902 if (IS_ERR(sctx))
2903 return PTR_ERR(sctx);
2904
2905 ret = scrub_workers_get(fs_info);
2906 if (ret)
2907 goto out_free_ctx;
2908
2909 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2910 dev = btrfs_find_device(fs_info->fs_devices, &args);
2911 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2912 !is_dev_replace)) {
2913 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2914 ret = -ENODEV;
2915 goto out;
2916 }
2917
2918 if (!is_dev_replace && !readonly &&
2919 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2920 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2921 btrfs_err_in_rcu(fs_info,
2922 "scrub on devid %llu: filesystem on %s is not writable",
2923 devid, btrfs_dev_name(dev));
2924 ret = -EROFS;
2925 goto out;
2926 }
2927
2928 mutex_lock(&fs_info->scrub_lock);
2929 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2930 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2931 mutex_unlock(&fs_info->scrub_lock);
2932 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2933 ret = -EIO;
2934 goto out;
2935 }
2936
2937 down_read(&fs_info->dev_replace.rwsem);
2938 if (dev->scrub_ctx ||
2939 (!is_dev_replace &&
2940 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2941 up_read(&fs_info->dev_replace.rwsem);
2942 mutex_unlock(&fs_info->scrub_lock);
2943 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2944 ret = -EINPROGRESS;
2945 goto out;
2946 }
2947 up_read(&fs_info->dev_replace.rwsem);
2948
2949 sctx->readonly = readonly;
2950 dev->scrub_ctx = sctx;
2951 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2952
2953 /*
2954 * checking @scrub_pause_req here, we can avoid
2955 * race between committing transaction and scrubbing.
2956 */
2957 __scrub_blocked_if_needed(fs_info);
2958 atomic_inc(&fs_info->scrubs_running);
2959 mutex_unlock(&fs_info->scrub_lock);
2960
2961 /*
2962 * In order to avoid deadlock with reclaim when there is a transaction
2963 * trying to pause scrub, make sure we use GFP_NOFS for all the
2964 * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2965 * invoked by our callees. The pausing request is done when the
2966 * transaction commit starts, and it blocks the transaction until scrub
2967 * is paused (done at specific points at scrub_stripe() or right above
2968 * before incrementing fs_info->scrubs_running).
2969 */
2970 nofs_flag = memalloc_nofs_save();
2971 if (!is_dev_replace) {
2972 u64 old_super_errors;
2973
2974 spin_lock(&sctx->stat_lock);
2975 old_super_errors = sctx->stat.super_errors;
2976 spin_unlock(&sctx->stat_lock);
2977
2978 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
2979 /*
2980 * by holding device list mutex, we can
2981 * kick off writing super in log tree sync.
2982 */
2983 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2984 ret = scrub_supers(sctx, dev);
2985 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2986
2987 spin_lock(&sctx->stat_lock);
2988 /*
2989 * Super block errors found, but we can not commit transaction
2990 * at current context, since btrfs_commit_transaction() needs
2991 * to pause the current running scrub (hold by ourselves).
2992 */
2993 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2994 need_commit = true;
2995 spin_unlock(&sctx->stat_lock);
2996 }
2997
2998 if (!ret)
2999 ret = scrub_enumerate_chunks(sctx, dev, start, end);
3000 memalloc_nofs_restore(nofs_flag);
3001
3002 atomic_dec(&fs_info->scrubs_running);
3003 wake_up(&fs_info->scrub_pause_wait);
3004
3005 if (progress)
3006 memcpy(progress, &sctx->stat, sizeof(*progress));
3007
3008 if (!is_dev_replace)
3009 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
3010 ret ? "not finished" : "finished", devid, ret);
3011
3012 mutex_lock(&fs_info->scrub_lock);
3013 dev->scrub_ctx = NULL;
3014 mutex_unlock(&fs_info->scrub_lock);
3015
3016 scrub_workers_put(fs_info);
3017 scrub_put_ctx(sctx);
3018
3019 /*
3020 * We found some super block errors before, now try to force a
3021 * transaction commit, as scrub has finished.
3022 */
3023 if (need_commit) {
3024 struct btrfs_trans_handle *trans;
3025
3026 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3027 if (IS_ERR(trans)) {
3028 ret = PTR_ERR(trans);
3029 btrfs_err(fs_info,
3030 "scrub: failed to start transaction to fix super block errors: %d", ret);
3031 return ret;
3032 }
3033 ret = btrfs_commit_transaction(trans);
3034 if (ret < 0)
3035 btrfs_err(fs_info,
3036 "scrub: failed to commit transaction to fix super block errors: %d", ret);
3037 }
3038 return ret;
3039out:
3040 scrub_workers_put(fs_info);
3041out_free_ctx:
3042 scrub_free_ctx(sctx);
3043
3044 return ret;
3045}
3046
3047void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
3048{
3049 mutex_lock(&fs_info->scrub_lock);
3050 atomic_inc(&fs_info->scrub_pause_req);
3051 while (atomic_read(&fs_info->scrubs_paused) !=
3052 atomic_read(&fs_info->scrubs_running)) {
3053 mutex_unlock(&fs_info->scrub_lock);
3054 wait_event(fs_info->scrub_pause_wait,
3055 atomic_read(&fs_info->scrubs_paused) ==
3056 atomic_read(&fs_info->scrubs_running));
3057 mutex_lock(&fs_info->scrub_lock);
3058 }
3059 mutex_unlock(&fs_info->scrub_lock);
3060}
3061
3062void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
3063{
3064 atomic_dec(&fs_info->scrub_pause_req);
3065 wake_up(&fs_info->scrub_pause_wait);
3066}
3067
3068int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3069{
3070 mutex_lock(&fs_info->scrub_lock);
3071 if (!atomic_read(&fs_info->scrubs_running)) {
3072 mutex_unlock(&fs_info->scrub_lock);
3073 return -ENOTCONN;
3074 }
3075
3076 atomic_inc(&fs_info->scrub_cancel_req);
3077 while (atomic_read(&fs_info->scrubs_running)) {
3078 mutex_unlock(&fs_info->scrub_lock);
3079 wait_event(fs_info->scrub_pause_wait,
3080 atomic_read(&fs_info->scrubs_running) == 0);
3081 mutex_lock(&fs_info->scrub_lock);
3082 }
3083 atomic_dec(&fs_info->scrub_cancel_req);
3084 mutex_unlock(&fs_info->scrub_lock);
3085
3086 return 0;
3087}
3088
3089int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
3090{
3091 struct btrfs_fs_info *fs_info = dev->fs_info;
3092 struct scrub_ctx *sctx;
3093
3094 mutex_lock(&fs_info->scrub_lock);
3095 sctx = dev->scrub_ctx;
3096 if (!sctx) {
3097 mutex_unlock(&fs_info->scrub_lock);
3098 return -ENOTCONN;
3099 }
3100 atomic_inc(&sctx->cancel_req);
3101 while (dev->scrub_ctx) {
3102 mutex_unlock(&fs_info->scrub_lock);
3103 wait_event(fs_info->scrub_pause_wait,
3104 dev->scrub_ctx == NULL);
3105 mutex_lock(&fs_info->scrub_lock);
3106 }
3107 mutex_unlock(&fs_info->scrub_lock);
3108
3109 return 0;
3110}
3111
3112int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3113 struct btrfs_scrub_progress *progress)
3114{
3115 struct btrfs_dev_lookup_args args = { .devid = devid };
3116 struct btrfs_device *dev;
3117 struct scrub_ctx *sctx = NULL;
3118
3119 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3120 dev = btrfs_find_device(fs_info->fs_devices, &args);
3121 if (dev)
3122 sctx = dev->scrub_ctx;
3123 if (sctx)
3124 memcpy(progress, &sctx->stat, sizeof(*progress));
3125 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3126
3127 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3128}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 */
5
6#include <linux/blkdev.h>
7#include <linux/ratelimit.h>
8#include <linux/sched/mm.h>
9#include <crypto/hash.h>
10#include "ctree.h"
11#include "discard.h"
12#include "volumes.h"
13#include "disk-io.h"
14#include "ordered-data.h"
15#include "transaction.h"
16#include "backref.h"
17#include "extent_io.h"
18#include "dev-replace.h"
19#include "check-integrity.h"
20#include "rcu-string.h"
21#include "raid56.h"
22#include "block-group.h"
23#include "zoned.h"
24
25/*
26 * This is only the first step towards a full-features scrub. It reads all
27 * extent and super block and verifies the checksums. In case a bad checksum
28 * is found or the extent cannot be read, good data will be written back if
29 * any can be found.
30 *
31 * Future enhancements:
32 * - In case an unrepairable extent is encountered, track which files are
33 * affected and report them
34 * - track and record media errors, throw out bad devices
35 * - add a mode to also read unallocated space
36 */
37
38struct scrub_block;
39struct scrub_ctx;
40
41/*
42 * the following three values only influence the performance.
43 * The last one configures the number of parallel and outstanding I/O
44 * operations. The first two values configure an upper limit for the number
45 * of (dynamically allocated) pages that are added to a bio.
46 */
47#define SCRUB_PAGES_PER_RD_BIO 32 /* 128k per bio */
48#define SCRUB_PAGES_PER_WR_BIO 32 /* 128k per bio */
49#define SCRUB_BIOS_PER_SCTX 64 /* 8MB per device in flight */
50
51/*
52 * the following value times PAGE_SIZE needs to be large enough to match the
53 * largest node/leaf/sector size that shall be supported.
54 * Values larger than BTRFS_STRIPE_LEN are not supported.
55 */
56#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
57
58struct scrub_recover {
59 refcount_t refs;
60 struct btrfs_bio *bbio;
61 u64 map_length;
62};
63
64struct scrub_page {
65 struct scrub_block *sblock;
66 struct page *page;
67 struct btrfs_device *dev;
68 struct list_head list;
69 u64 flags; /* extent flags */
70 u64 generation;
71 u64 logical;
72 u64 physical;
73 u64 physical_for_dev_replace;
74 atomic_t refs;
75 u8 mirror_num;
76 int have_csum:1;
77 int io_error:1;
78 u8 csum[BTRFS_CSUM_SIZE];
79
80 struct scrub_recover *recover;
81};
82
83struct scrub_bio {
84 int index;
85 struct scrub_ctx *sctx;
86 struct btrfs_device *dev;
87 struct bio *bio;
88 blk_status_t status;
89 u64 logical;
90 u64 physical;
91#if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
92 struct scrub_page *pagev[SCRUB_PAGES_PER_WR_BIO];
93#else
94 struct scrub_page *pagev[SCRUB_PAGES_PER_RD_BIO];
95#endif
96 int page_count;
97 int next_free;
98 struct btrfs_work work;
99};
100
101struct scrub_block {
102 struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
103 int page_count;
104 atomic_t outstanding_pages;
105 refcount_t refs; /* free mem on transition to zero */
106 struct scrub_ctx *sctx;
107 struct scrub_parity *sparity;
108 struct {
109 unsigned int header_error:1;
110 unsigned int checksum_error:1;
111 unsigned int no_io_error_seen:1;
112 unsigned int generation_error:1; /* also sets header_error */
113
114 /* The following is for the data used to check parity */
115 /* It is for the data with checksum */
116 unsigned int data_corrected:1;
117 };
118 struct btrfs_work work;
119};
120
121/* Used for the chunks with parity stripe such RAID5/6 */
122struct scrub_parity {
123 struct scrub_ctx *sctx;
124
125 struct btrfs_device *scrub_dev;
126
127 u64 logic_start;
128
129 u64 logic_end;
130
131 int nsectors;
132
133 u32 stripe_len;
134
135 refcount_t refs;
136
137 struct list_head spages;
138
139 /* Work of parity check and repair */
140 struct btrfs_work work;
141
142 /* Mark the parity blocks which have data */
143 unsigned long *dbitmap;
144
145 /*
146 * Mark the parity blocks which have data, but errors happen when
147 * read data or check data
148 */
149 unsigned long *ebitmap;
150
151 unsigned long bitmap[];
152};
153
154struct scrub_ctx {
155 struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
156 struct btrfs_fs_info *fs_info;
157 int first_free;
158 int curr;
159 atomic_t bios_in_flight;
160 atomic_t workers_pending;
161 spinlock_t list_lock;
162 wait_queue_head_t list_wait;
163 struct list_head csum_list;
164 atomic_t cancel_req;
165 int readonly;
166 int pages_per_rd_bio;
167
168 /* State of IO submission throttling affecting the associated device */
169 ktime_t throttle_deadline;
170 u64 throttle_sent;
171
172 int is_dev_replace;
173 u64 write_pointer;
174
175 struct scrub_bio *wr_curr_bio;
176 struct mutex wr_lock;
177 int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
178 struct btrfs_device *wr_tgtdev;
179 bool flush_all_writes;
180
181 /*
182 * statistics
183 */
184 struct btrfs_scrub_progress stat;
185 spinlock_t stat_lock;
186
187 /*
188 * Use a ref counter to avoid use-after-free issues. Scrub workers
189 * decrement bios_in_flight and workers_pending and then do a wakeup
190 * on the list_wait wait queue. We must ensure the main scrub task
191 * doesn't free the scrub context before or while the workers are
192 * doing the wakeup() call.
193 */
194 refcount_t refs;
195};
196
197struct scrub_warning {
198 struct btrfs_path *path;
199 u64 extent_item_size;
200 const char *errstr;
201 u64 physical;
202 u64 logical;
203 struct btrfs_device *dev;
204};
205
206struct full_stripe_lock {
207 struct rb_node node;
208 u64 logical;
209 u64 refs;
210 struct mutex mutex;
211};
212
213static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
214 struct scrub_block *sblocks_for_recheck);
215static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
216 struct scrub_block *sblock,
217 int retry_failed_mirror);
218static void scrub_recheck_block_checksum(struct scrub_block *sblock);
219static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
220 struct scrub_block *sblock_good);
221static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
222 struct scrub_block *sblock_good,
223 int page_num, int force_write);
224static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
225static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
226 int page_num);
227static int scrub_checksum_data(struct scrub_block *sblock);
228static int scrub_checksum_tree_block(struct scrub_block *sblock);
229static int scrub_checksum_super(struct scrub_block *sblock);
230static void scrub_block_put(struct scrub_block *sblock);
231static void scrub_page_get(struct scrub_page *spage);
232static void scrub_page_put(struct scrub_page *spage);
233static void scrub_parity_get(struct scrub_parity *sparity);
234static void scrub_parity_put(struct scrub_parity *sparity);
235static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len,
236 u64 physical, struct btrfs_device *dev, u64 flags,
237 u64 gen, int mirror_num, u8 *csum,
238 u64 physical_for_dev_replace);
239static void scrub_bio_end_io(struct bio *bio);
240static void scrub_bio_end_io_worker(struct btrfs_work *work);
241static void scrub_block_complete(struct scrub_block *sblock);
242static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
243 u64 extent_logical, u32 extent_len,
244 u64 *extent_physical,
245 struct btrfs_device **extent_dev,
246 int *extent_mirror_num);
247static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
248 struct scrub_page *spage);
249static void scrub_wr_submit(struct scrub_ctx *sctx);
250static void scrub_wr_bio_end_io(struct bio *bio);
251static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
252static void scrub_put_ctx(struct scrub_ctx *sctx);
253
254static inline int scrub_is_page_on_raid56(struct scrub_page *spage)
255{
256 return spage->recover &&
257 (spage->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
258}
259
260static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
261{
262 refcount_inc(&sctx->refs);
263 atomic_inc(&sctx->bios_in_flight);
264}
265
266static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
267{
268 atomic_dec(&sctx->bios_in_flight);
269 wake_up(&sctx->list_wait);
270 scrub_put_ctx(sctx);
271}
272
273static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
274{
275 while (atomic_read(&fs_info->scrub_pause_req)) {
276 mutex_unlock(&fs_info->scrub_lock);
277 wait_event(fs_info->scrub_pause_wait,
278 atomic_read(&fs_info->scrub_pause_req) == 0);
279 mutex_lock(&fs_info->scrub_lock);
280 }
281}
282
283static void scrub_pause_on(struct btrfs_fs_info *fs_info)
284{
285 atomic_inc(&fs_info->scrubs_paused);
286 wake_up(&fs_info->scrub_pause_wait);
287}
288
289static void scrub_pause_off(struct btrfs_fs_info *fs_info)
290{
291 mutex_lock(&fs_info->scrub_lock);
292 __scrub_blocked_if_needed(fs_info);
293 atomic_dec(&fs_info->scrubs_paused);
294 mutex_unlock(&fs_info->scrub_lock);
295
296 wake_up(&fs_info->scrub_pause_wait);
297}
298
299static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
300{
301 scrub_pause_on(fs_info);
302 scrub_pause_off(fs_info);
303}
304
305/*
306 * Insert new full stripe lock into full stripe locks tree
307 *
308 * Return pointer to existing or newly inserted full_stripe_lock structure if
309 * everything works well.
310 * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
311 *
312 * NOTE: caller must hold full_stripe_locks_root->lock before calling this
313 * function
314 */
315static struct full_stripe_lock *insert_full_stripe_lock(
316 struct btrfs_full_stripe_locks_tree *locks_root,
317 u64 fstripe_logical)
318{
319 struct rb_node **p;
320 struct rb_node *parent = NULL;
321 struct full_stripe_lock *entry;
322 struct full_stripe_lock *ret;
323
324 lockdep_assert_held(&locks_root->lock);
325
326 p = &locks_root->root.rb_node;
327 while (*p) {
328 parent = *p;
329 entry = rb_entry(parent, struct full_stripe_lock, node);
330 if (fstripe_logical < entry->logical) {
331 p = &(*p)->rb_left;
332 } else if (fstripe_logical > entry->logical) {
333 p = &(*p)->rb_right;
334 } else {
335 entry->refs++;
336 return entry;
337 }
338 }
339
340 /*
341 * Insert new lock.
342 */
343 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
344 if (!ret)
345 return ERR_PTR(-ENOMEM);
346 ret->logical = fstripe_logical;
347 ret->refs = 1;
348 mutex_init(&ret->mutex);
349
350 rb_link_node(&ret->node, parent, p);
351 rb_insert_color(&ret->node, &locks_root->root);
352 return ret;
353}
354
355/*
356 * Search for a full stripe lock of a block group
357 *
358 * Return pointer to existing full stripe lock if found
359 * Return NULL if not found
360 */
361static struct full_stripe_lock *search_full_stripe_lock(
362 struct btrfs_full_stripe_locks_tree *locks_root,
363 u64 fstripe_logical)
364{
365 struct rb_node *node;
366 struct full_stripe_lock *entry;
367
368 lockdep_assert_held(&locks_root->lock);
369
370 node = locks_root->root.rb_node;
371 while (node) {
372 entry = rb_entry(node, struct full_stripe_lock, node);
373 if (fstripe_logical < entry->logical)
374 node = node->rb_left;
375 else if (fstripe_logical > entry->logical)
376 node = node->rb_right;
377 else
378 return entry;
379 }
380 return NULL;
381}
382
383/*
384 * Helper to get full stripe logical from a normal bytenr.
385 *
386 * Caller must ensure @cache is a RAID56 block group.
387 */
388static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
389{
390 u64 ret;
391
392 /*
393 * Due to chunk item size limit, full stripe length should not be
394 * larger than U32_MAX. Just a sanity check here.
395 */
396 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
397
398 /*
399 * round_down() can only handle power of 2, while RAID56 full
400 * stripe length can be 64KiB * n, so we need to manually round down.
401 */
402 ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
403 cache->full_stripe_len + cache->start;
404 return ret;
405}
406
407/*
408 * Lock a full stripe to avoid concurrency of recovery and read
409 *
410 * It's only used for profiles with parities (RAID5/6), for other profiles it
411 * does nothing.
412 *
413 * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
414 * So caller must call unlock_full_stripe() at the same context.
415 *
416 * Return <0 if encounters error.
417 */
418static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
419 bool *locked_ret)
420{
421 struct btrfs_block_group *bg_cache;
422 struct btrfs_full_stripe_locks_tree *locks_root;
423 struct full_stripe_lock *existing;
424 u64 fstripe_start;
425 int ret = 0;
426
427 *locked_ret = false;
428 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
429 if (!bg_cache) {
430 ASSERT(0);
431 return -ENOENT;
432 }
433
434 /* Profiles not based on parity don't need full stripe lock */
435 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
436 goto out;
437 locks_root = &bg_cache->full_stripe_locks_root;
438
439 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
440
441 /* Now insert the full stripe lock */
442 mutex_lock(&locks_root->lock);
443 existing = insert_full_stripe_lock(locks_root, fstripe_start);
444 mutex_unlock(&locks_root->lock);
445 if (IS_ERR(existing)) {
446 ret = PTR_ERR(existing);
447 goto out;
448 }
449 mutex_lock(&existing->mutex);
450 *locked_ret = true;
451out:
452 btrfs_put_block_group(bg_cache);
453 return ret;
454}
455
456/*
457 * Unlock a full stripe.
458 *
459 * NOTE: Caller must ensure it's the same context calling corresponding
460 * lock_full_stripe().
461 *
462 * Return 0 if we unlock full stripe without problem.
463 * Return <0 for error
464 */
465static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
466 bool locked)
467{
468 struct btrfs_block_group *bg_cache;
469 struct btrfs_full_stripe_locks_tree *locks_root;
470 struct full_stripe_lock *fstripe_lock;
471 u64 fstripe_start;
472 bool freeit = false;
473 int ret = 0;
474
475 /* If we didn't acquire full stripe lock, no need to continue */
476 if (!locked)
477 return 0;
478
479 bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
480 if (!bg_cache) {
481 ASSERT(0);
482 return -ENOENT;
483 }
484 if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
485 goto out;
486
487 locks_root = &bg_cache->full_stripe_locks_root;
488 fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
489
490 mutex_lock(&locks_root->lock);
491 fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
492 /* Unpaired unlock_full_stripe() detected */
493 if (!fstripe_lock) {
494 WARN_ON(1);
495 ret = -ENOENT;
496 mutex_unlock(&locks_root->lock);
497 goto out;
498 }
499
500 if (fstripe_lock->refs == 0) {
501 WARN_ON(1);
502 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
503 fstripe_lock->logical);
504 } else {
505 fstripe_lock->refs--;
506 }
507
508 if (fstripe_lock->refs == 0) {
509 rb_erase(&fstripe_lock->node, &locks_root->root);
510 freeit = true;
511 }
512 mutex_unlock(&locks_root->lock);
513
514 mutex_unlock(&fstripe_lock->mutex);
515 if (freeit)
516 kfree(fstripe_lock);
517out:
518 btrfs_put_block_group(bg_cache);
519 return ret;
520}
521
522static void scrub_free_csums(struct scrub_ctx *sctx)
523{
524 while (!list_empty(&sctx->csum_list)) {
525 struct btrfs_ordered_sum *sum;
526 sum = list_first_entry(&sctx->csum_list,
527 struct btrfs_ordered_sum, list);
528 list_del(&sum->list);
529 kfree(sum);
530 }
531}
532
533static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
534{
535 int i;
536
537 if (!sctx)
538 return;
539
540 /* this can happen when scrub is cancelled */
541 if (sctx->curr != -1) {
542 struct scrub_bio *sbio = sctx->bios[sctx->curr];
543
544 for (i = 0; i < sbio->page_count; i++) {
545 WARN_ON(!sbio->pagev[i]->page);
546 scrub_block_put(sbio->pagev[i]->sblock);
547 }
548 bio_put(sbio->bio);
549 }
550
551 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
552 struct scrub_bio *sbio = sctx->bios[i];
553
554 if (!sbio)
555 break;
556 kfree(sbio);
557 }
558
559 kfree(sctx->wr_curr_bio);
560 scrub_free_csums(sctx);
561 kfree(sctx);
562}
563
564static void scrub_put_ctx(struct scrub_ctx *sctx)
565{
566 if (refcount_dec_and_test(&sctx->refs))
567 scrub_free_ctx(sctx);
568}
569
570static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
571 struct btrfs_fs_info *fs_info, int is_dev_replace)
572{
573 struct scrub_ctx *sctx;
574 int i;
575
576 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
577 if (!sctx)
578 goto nomem;
579 refcount_set(&sctx->refs, 1);
580 sctx->is_dev_replace = is_dev_replace;
581 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
582 sctx->curr = -1;
583 sctx->fs_info = fs_info;
584 INIT_LIST_HEAD(&sctx->csum_list);
585 for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
586 struct scrub_bio *sbio;
587
588 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
589 if (!sbio)
590 goto nomem;
591 sctx->bios[i] = sbio;
592
593 sbio->index = i;
594 sbio->sctx = sctx;
595 sbio->page_count = 0;
596 btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
597 NULL);
598
599 if (i != SCRUB_BIOS_PER_SCTX - 1)
600 sctx->bios[i]->next_free = i + 1;
601 else
602 sctx->bios[i]->next_free = -1;
603 }
604 sctx->first_free = 0;
605 atomic_set(&sctx->bios_in_flight, 0);
606 atomic_set(&sctx->workers_pending, 0);
607 atomic_set(&sctx->cancel_req, 0);
608
609 spin_lock_init(&sctx->list_lock);
610 spin_lock_init(&sctx->stat_lock);
611 init_waitqueue_head(&sctx->list_wait);
612 sctx->throttle_deadline = 0;
613
614 WARN_ON(sctx->wr_curr_bio != NULL);
615 mutex_init(&sctx->wr_lock);
616 sctx->wr_curr_bio = NULL;
617 if (is_dev_replace) {
618 WARN_ON(!fs_info->dev_replace.tgtdev);
619 sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
620 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
621 sctx->flush_all_writes = false;
622 }
623
624 return sctx;
625
626nomem:
627 scrub_free_ctx(sctx);
628 return ERR_PTR(-ENOMEM);
629}
630
631static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
632 void *warn_ctx)
633{
634 u32 nlink;
635 int ret;
636 int i;
637 unsigned nofs_flag;
638 struct extent_buffer *eb;
639 struct btrfs_inode_item *inode_item;
640 struct scrub_warning *swarn = warn_ctx;
641 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
642 struct inode_fs_paths *ipath = NULL;
643 struct btrfs_root *local_root;
644 struct btrfs_key key;
645
646 local_root = btrfs_get_fs_root(fs_info, root, true);
647 if (IS_ERR(local_root)) {
648 ret = PTR_ERR(local_root);
649 goto err;
650 }
651
652 /*
653 * this makes the path point to (inum INODE_ITEM ioff)
654 */
655 key.objectid = inum;
656 key.type = BTRFS_INODE_ITEM_KEY;
657 key.offset = 0;
658
659 ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
660 if (ret) {
661 btrfs_put_root(local_root);
662 btrfs_release_path(swarn->path);
663 goto err;
664 }
665
666 eb = swarn->path->nodes[0];
667 inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
668 struct btrfs_inode_item);
669 nlink = btrfs_inode_nlink(eb, inode_item);
670 btrfs_release_path(swarn->path);
671
672 /*
673 * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
674 * uses GFP_NOFS in this context, so we keep it consistent but it does
675 * not seem to be strictly necessary.
676 */
677 nofs_flag = memalloc_nofs_save();
678 ipath = init_ipath(4096, local_root, swarn->path);
679 memalloc_nofs_restore(nofs_flag);
680 if (IS_ERR(ipath)) {
681 btrfs_put_root(local_root);
682 ret = PTR_ERR(ipath);
683 ipath = NULL;
684 goto err;
685 }
686 ret = paths_from_inode(inum, ipath);
687
688 if (ret < 0)
689 goto err;
690
691 /*
692 * we deliberately ignore the bit ipath might have been too small to
693 * hold all of the paths here
694 */
695 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
696 btrfs_warn_in_rcu(fs_info,
697"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
698 swarn->errstr, swarn->logical,
699 rcu_str_deref(swarn->dev->name),
700 swarn->physical,
701 root, inum, offset,
702 fs_info->sectorsize, nlink,
703 (char *)(unsigned long)ipath->fspath->val[i]);
704
705 btrfs_put_root(local_root);
706 free_ipath(ipath);
707 return 0;
708
709err:
710 btrfs_warn_in_rcu(fs_info,
711 "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
712 swarn->errstr, swarn->logical,
713 rcu_str_deref(swarn->dev->name),
714 swarn->physical,
715 root, inum, offset, ret);
716
717 free_ipath(ipath);
718 return 0;
719}
720
721static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
722{
723 struct btrfs_device *dev;
724 struct btrfs_fs_info *fs_info;
725 struct btrfs_path *path;
726 struct btrfs_key found_key;
727 struct extent_buffer *eb;
728 struct btrfs_extent_item *ei;
729 struct scrub_warning swarn;
730 unsigned long ptr = 0;
731 u64 extent_item_pos;
732 u64 flags = 0;
733 u64 ref_root;
734 u32 item_size;
735 u8 ref_level = 0;
736 int ret;
737
738 WARN_ON(sblock->page_count < 1);
739 dev = sblock->pagev[0]->dev;
740 fs_info = sblock->sctx->fs_info;
741
742 path = btrfs_alloc_path();
743 if (!path)
744 return;
745
746 swarn.physical = sblock->pagev[0]->physical;
747 swarn.logical = sblock->pagev[0]->logical;
748 swarn.errstr = errstr;
749 swarn.dev = NULL;
750
751 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
752 &flags);
753 if (ret < 0)
754 goto out;
755
756 extent_item_pos = swarn.logical - found_key.objectid;
757 swarn.extent_item_size = found_key.offset;
758
759 eb = path->nodes[0];
760 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
761 item_size = btrfs_item_size_nr(eb, path->slots[0]);
762
763 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
764 do {
765 ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
766 item_size, &ref_root,
767 &ref_level);
768 btrfs_warn_in_rcu(fs_info,
769"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
770 errstr, swarn.logical,
771 rcu_str_deref(dev->name),
772 swarn.physical,
773 ref_level ? "node" : "leaf",
774 ret < 0 ? -1 : ref_level,
775 ret < 0 ? -1 : ref_root);
776 } while (ret != 1);
777 btrfs_release_path(path);
778 } else {
779 btrfs_release_path(path);
780 swarn.path = path;
781 swarn.dev = dev;
782 iterate_extent_inodes(fs_info, found_key.objectid,
783 extent_item_pos, 1,
784 scrub_print_warning_inode, &swarn, false);
785 }
786
787out:
788 btrfs_free_path(path);
789}
790
791static inline void scrub_get_recover(struct scrub_recover *recover)
792{
793 refcount_inc(&recover->refs);
794}
795
796static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
797 struct scrub_recover *recover)
798{
799 if (refcount_dec_and_test(&recover->refs)) {
800 btrfs_bio_counter_dec(fs_info);
801 btrfs_put_bbio(recover->bbio);
802 kfree(recover);
803 }
804}
805
806/*
807 * scrub_handle_errored_block gets called when either verification of the
808 * pages failed or the bio failed to read, e.g. with EIO. In the latter
809 * case, this function handles all pages in the bio, even though only one
810 * may be bad.
811 * The goal of this function is to repair the errored block by using the
812 * contents of one of the mirrors.
813 */
814static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
815{
816 struct scrub_ctx *sctx = sblock_to_check->sctx;
817 struct btrfs_device *dev;
818 struct btrfs_fs_info *fs_info;
819 u64 logical;
820 unsigned int failed_mirror_index;
821 unsigned int is_metadata;
822 unsigned int have_csum;
823 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
824 struct scrub_block *sblock_bad;
825 int ret;
826 int mirror_index;
827 int page_num;
828 int success;
829 bool full_stripe_locked;
830 unsigned int nofs_flag;
831 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
832 DEFAULT_RATELIMIT_BURST);
833
834 BUG_ON(sblock_to_check->page_count < 1);
835 fs_info = sctx->fs_info;
836 if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
837 /*
838 * if we find an error in a super block, we just report it.
839 * They will get written with the next transaction commit
840 * anyway
841 */
842 spin_lock(&sctx->stat_lock);
843 ++sctx->stat.super_errors;
844 spin_unlock(&sctx->stat_lock);
845 return 0;
846 }
847 logical = sblock_to_check->pagev[0]->logical;
848 BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
849 failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
850 is_metadata = !(sblock_to_check->pagev[0]->flags &
851 BTRFS_EXTENT_FLAG_DATA);
852 have_csum = sblock_to_check->pagev[0]->have_csum;
853 dev = sblock_to_check->pagev[0]->dev;
854
855 if (btrfs_is_zoned(fs_info) && !sctx->is_dev_replace)
856 return btrfs_repair_one_zone(fs_info, logical);
857
858 /*
859 * We must use GFP_NOFS because the scrub task might be waiting for a
860 * worker task executing this function and in turn a transaction commit
861 * might be waiting the scrub task to pause (which needs to wait for all
862 * the worker tasks to complete before pausing).
863 * We do allocations in the workers through insert_full_stripe_lock()
864 * and scrub_add_page_to_wr_bio(), which happens down the call chain of
865 * this function.
866 */
867 nofs_flag = memalloc_nofs_save();
868 /*
869 * For RAID5/6, race can happen for a different device scrub thread.
870 * For data corruption, Parity and Data threads will both try
871 * to recovery the data.
872 * Race can lead to doubly added csum error, or even unrecoverable
873 * error.
874 */
875 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
876 if (ret < 0) {
877 memalloc_nofs_restore(nofs_flag);
878 spin_lock(&sctx->stat_lock);
879 if (ret == -ENOMEM)
880 sctx->stat.malloc_errors++;
881 sctx->stat.read_errors++;
882 sctx->stat.uncorrectable_errors++;
883 spin_unlock(&sctx->stat_lock);
884 return ret;
885 }
886
887 /*
888 * read all mirrors one after the other. This includes to
889 * re-read the extent or metadata block that failed (that was
890 * the cause that this fixup code is called) another time,
891 * sector by sector this time in order to know which sectors
892 * caused I/O errors and which ones are good (for all mirrors).
893 * It is the goal to handle the situation when more than one
894 * mirror contains I/O errors, but the errors do not
895 * overlap, i.e. the data can be repaired by selecting the
896 * sectors from those mirrors without I/O error on the
897 * particular sectors. One example (with blocks >= 2 * sectorsize)
898 * would be that mirror #1 has an I/O error on the first sector,
899 * the second sector is good, and mirror #2 has an I/O error on
900 * the second sector, but the first sector is good.
901 * Then the first sector of the first mirror can be repaired by
902 * taking the first sector of the second mirror, and the
903 * second sector of the second mirror can be repaired by
904 * copying the contents of the 2nd sector of the 1st mirror.
905 * One more note: if the sectors of one mirror contain I/O
906 * errors, the checksum cannot be verified. In order to get
907 * the best data for repairing, the first attempt is to find
908 * a mirror without I/O errors and with a validated checksum.
909 * Only if this is not possible, the sectors are picked from
910 * mirrors with I/O errors without considering the checksum.
911 * If the latter is the case, at the end, the checksum of the
912 * repaired area is verified in order to correctly maintain
913 * the statistics.
914 */
915
916 sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
917 sizeof(*sblocks_for_recheck), GFP_KERNEL);
918 if (!sblocks_for_recheck) {
919 spin_lock(&sctx->stat_lock);
920 sctx->stat.malloc_errors++;
921 sctx->stat.read_errors++;
922 sctx->stat.uncorrectable_errors++;
923 spin_unlock(&sctx->stat_lock);
924 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
925 goto out;
926 }
927
928 /* setup the context, map the logical blocks and alloc the pages */
929 ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
930 if (ret) {
931 spin_lock(&sctx->stat_lock);
932 sctx->stat.read_errors++;
933 sctx->stat.uncorrectable_errors++;
934 spin_unlock(&sctx->stat_lock);
935 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
936 goto out;
937 }
938 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
939 sblock_bad = sblocks_for_recheck + failed_mirror_index;
940
941 /* build and submit the bios for the failed mirror, check checksums */
942 scrub_recheck_block(fs_info, sblock_bad, 1);
943
944 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
945 sblock_bad->no_io_error_seen) {
946 /*
947 * the error disappeared after reading page by page, or
948 * the area was part of a huge bio and other parts of the
949 * bio caused I/O errors, or the block layer merged several
950 * read requests into one and the error is caused by a
951 * different bio (usually one of the two latter cases is
952 * the cause)
953 */
954 spin_lock(&sctx->stat_lock);
955 sctx->stat.unverified_errors++;
956 sblock_to_check->data_corrected = 1;
957 spin_unlock(&sctx->stat_lock);
958
959 if (sctx->is_dev_replace)
960 scrub_write_block_to_dev_replace(sblock_bad);
961 goto out;
962 }
963
964 if (!sblock_bad->no_io_error_seen) {
965 spin_lock(&sctx->stat_lock);
966 sctx->stat.read_errors++;
967 spin_unlock(&sctx->stat_lock);
968 if (__ratelimit(&rs))
969 scrub_print_warning("i/o error", sblock_to_check);
970 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
971 } else if (sblock_bad->checksum_error) {
972 spin_lock(&sctx->stat_lock);
973 sctx->stat.csum_errors++;
974 spin_unlock(&sctx->stat_lock);
975 if (__ratelimit(&rs))
976 scrub_print_warning("checksum error", sblock_to_check);
977 btrfs_dev_stat_inc_and_print(dev,
978 BTRFS_DEV_STAT_CORRUPTION_ERRS);
979 } else if (sblock_bad->header_error) {
980 spin_lock(&sctx->stat_lock);
981 sctx->stat.verify_errors++;
982 spin_unlock(&sctx->stat_lock);
983 if (__ratelimit(&rs))
984 scrub_print_warning("checksum/header error",
985 sblock_to_check);
986 if (sblock_bad->generation_error)
987 btrfs_dev_stat_inc_and_print(dev,
988 BTRFS_DEV_STAT_GENERATION_ERRS);
989 else
990 btrfs_dev_stat_inc_and_print(dev,
991 BTRFS_DEV_STAT_CORRUPTION_ERRS);
992 }
993
994 if (sctx->readonly) {
995 ASSERT(!sctx->is_dev_replace);
996 goto out;
997 }
998
999 /*
1000 * now build and submit the bios for the other mirrors, check
1001 * checksums.
1002 * First try to pick the mirror which is completely without I/O
1003 * errors and also does not have a checksum error.
1004 * If one is found, and if a checksum is present, the full block
1005 * that is known to contain an error is rewritten. Afterwards
1006 * the block is known to be corrected.
1007 * If a mirror is found which is completely correct, and no
1008 * checksum is present, only those pages are rewritten that had
1009 * an I/O error in the block to be repaired, since it cannot be
1010 * determined, which copy of the other pages is better (and it
1011 * could happen otherwise that a correct page would be
1012 * overwritten by a bad one).
1013 */
1014 for (mirror_index = 0; ;mirror_index++) {
1015 struct scrub_block *sblock_other;
1016
1017 if (mirror_index == failed_mirror_index)
1018 continue;
1019
1020 /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
1021 if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1022 if (mirror_index >= BTRFS_MAX_MIRRORS)
1023 break;
1024 if (!sblocks_for_recheck[mirror_index].page_count)
1025 break;
1026
1027 sblock_other = sblocks_for_recheck + mirror_index;
1028 } else {
1029 struct scrub_recover *r = sblock_bad->pagev[0]->recover;
1030 int max_allowed = r->bbio->num_stripes -
1031 r->bbio->num_tgtdevs;
1032
1033 if (mirror_index >= max_allowed)
1034 break;
1035 if (!sblocks_for_recheck[1].page_count)
1036 break;
1037
1038 ASSERT(failed_mirror_index == 0);
1039 sblock_other = sblocks_for_recheck + 1;
1040 sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
1041 }
1042
1043 /* build and submit the bios, check checksums */
1044 scrub_recheck_block(fs_info, sblock_other, 0);
1045
1046 if (!sblock_other->header_error &&
1047 !sblock_other->checksum_error &&
1048 sblock_other->no_io_error_seen) {
1049 if (sctx->is_dev_replace) {
1050 scrub_write_block_to_dev_replace(sblock_other);
1051 goto corrected_error;
1052 } else {
1053 ret = scrub_repair_block_from_good_copy(
1054 sblock_bad, sblock_other);
1055 if (!ret)
1056 goto corrected_error;
1057 }
1058 }
1059 }
1060
1061 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1062 goto did_not_correct_error;
1063
1064 /*
1065 * In case of I/O errors in the area that is supposed to be
1066 * repaired, continue by picking good copies of those sectors.
1067 * Select the good sectors from mirrors to rewrite bad sectors from
1068 * the area to fix. Afterwards verify the checksum of the block
1069 * that is supposed to be repaired. This verification step is
1070 * only done for the purpose of statistic counting and for the
1071 * final scrub report, whether errors remain.
1072 * A perfect algorithm could make use of the checksum and try
1073 * all possible combinations of sectors from the different mirrors
1074 * until the checksum verification succeeds. For example, when
1075 * the 2nd sector of mirror #1 faces I/O errors, and the 2nd sector
1076 * of mirror #2 is readable but the final checksum test fails,
1077 * then the 2nd sector of mirror #3 could be tried, whether now
1078 * the final checksum succeeds. But this would be a rare
1079 * exception and is therefore not implemented. At least it is
1080 * avoided that the good copy is overwritten.
1081 * A more useful improvement would be to pick the sectors
1082 * without I/O error based on sector sizes (512 bytes on legacy
1083 * disks) instead of on sectorsize. Then maybe 512 byte of one
1084 * mirror could be repaired by taking 512 byte of a different
1085 * mirror, even if other 512 byte sectors in the same sectorsize
1086 * area are unreadable.
1087 */
1088 success = 1;
1089 for (page_num = 0; page_num < sblock_bad->page_count;
1090 page_num++) {
1091 struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
1092 struct scrub_block *sblock_other = NULL;
1093
1094 /* skip no-io-error page in scrub */
1095 if (!spage_bad->io_error && !sctx->is_dev_replace)
1096 continue;
1097
1098 if (scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
1099 /*
1100 * In case of dev replace, if raid56 rebuild process
1101 * didn't work out correct data, then copy the content
1102 * in sblock_bad to make sure target device is identical
1103 * to source device, instead of writing garbage data in
1104 * sblock_for_recheck array to target device.
1105 */
1106 sblock_other = NULL;
1107 } else if (spage_bad->io_error) {
1108 /* try to find no-io-error page in mirrors */
1109 for (mirror_index = 0;
1110 mirror_index < BTRFS_MAX_MIRRORS &&
1111 sblocks_for_recheck[mirror_index].page_count > 0;
1112 mirror_index++) {
1113 if (!sblocks_for_recheck[mirror_index].
1114 pagev[page_num]->io_error) {
1115 sblock_other = sblocks_for_recheck +
1116 mirror_index;
1117 break;
1118 }
1119 }
1120 if (!sblock_other)
1121 success = 0;
1122 }
1123
1124 if (sctx->is_dev_replace) {
1125 /*
1126 * did not find a mirror to fetch the page
1127 * from. scrub_write_page_to_dev_replace()
1128 * handles this case (page->io_error), by
1129 * filling the block with zeros before
1130 * submitting the write request
1131 */
1132 if (!sblock_other)
1133 sblock_other = sblock_bad;
1134
1135 if (scrub_write_page_to_dev_replace(sblock_other,
1136 page_num) != 0) {
1137 atomic64_inc(
1138 &fs_info->dev_replace.num_write_errors);
1139 success = 0;
1140 }
1141 } else if (sblock_other) {
1142 ret = scrub_repair_page_from_good_copy(sblock_bad,
1143 sblock_other,
1144 page_num, 0);
1145 if (0 == ret)
1146 spage_bad->io_error = 0;
1147 else
1148 success = 0;
1149 }
1150 }
1151
1152 if (success && !sctx->is_dev_replace) {
1153 if (is_metadata || have_csum) {
1154 /*
1155 * need to verify the checksum now that all
1156 * sectors on disk are repaired (the write
1157 * request for data to be repaired is on its way).
1158 * Just be lazy and use scrub_recheck_block()
1159 * which re-reads the data before the checksum
1160 * is verified, but most likely the data comes out
1161 * of the page cache.
1162 */
1163 scrub_recheck_block(fs_info, sblock_bad, 1);
1164 if (!sblock_bad->header_error &&
1165 !sblock_bad->checksum_error &&
1166 sblock_bad->no_io_error_seen)
1167 goto corrected_error;
1168 else
1169 goto did_not_correct_error;
1170 } else {
1171corrected_error:
1172 spin_lock(&sctx->stat_lock);
1173 sctx->stat.corrected_errors++;
1174 sblock_to_check->data_corrected = 1;
1175 spin_unlock(&sctx->stat_lock);
1176 btrfs_err_rl_in_rcu(fs_info,
1177 "fixed up error at logical %llu on dev %s",
1178 logical, rcu_str_deref(dev->name));
1179 }
1180 } else {
1181did_not_correct_error:
1182 spin_lock(&sctx->stat_lock);
1183 sctx->stat.uncorrectable_errors++;
1184 spin_unlock(&sctx->stat_lock);
1185 btrfs_err_rl_in_rcu(fs_info,
1186 "unable to fixup (regular) error at logical %llu on dev %s",
1187 logical, rcu_str_deref(dev->name));
1188 }
1189
1190out:
1191 if (sblocks_for_recheck) {
1192 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1193 mirror_index++) {
1194 struct scrub_block *sblock = sblocks_for_recheck +
1195 mirror_index;
1196 struct scrub_recover *recover;
1197 int page_index;
1198
1199 for (page_index = 0; page_index < sblock->page_count;
1200 page_index++) {
1201 sblock->pagev[page_index]->sblock = NULL;
1202 recover = sblock->pagev[page_index]->recover;
1203 if (recover) {
1204 scrub_put_recover(fs_info, recover);
1205 sblock->pagev[page_index]->recover =
1206 NULL;
1207 }
1208 scrub_page_put(sblock->pagev[page_index]);
1209 }
1210 }
1211 kfree(sblocks_for_recheck);
1212 }
1213
1214 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
1215 memalloc_nofs_restore(nofs_flag);
1216 if (ret < 0)
1217 return ret;
1218 return 0;
1219}
1220
1221static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1222{
1223 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1224 return 2;
1225 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1226 return 3;
1227 else
1228 return (int)bbio->num_stripes;
1229}
1230
1231static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
1232 u64 *raid_map,
1233 u64 mapped_length,
1234 int nstripes, int mirror,
1235 int *stripe_index,
1236 u64 *stripe_offset)
1237{
1238 int i;
1239
1240 if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
1241 /* RAID5/6 */
1242 for (i = 0; i < nstripes; i++) {
1243 if (raid_map[i] == RAID6_Q_STRIPE ||
1244 raid_map[i] == RAID5_P_STRIPE)
1245 continue;
1246
1247 if (logical >= raid_map[i] &&
1248 logical < raid_map[i] + mapped_length)
1249 break;
1250 }
1251
1252 *stripe_index = i;
1253 *stripe_offset = logical - raid_map[i];
1254 } else {
1255 /* The other RAID type */
1256 *stripe_index = mirror;
1257 *stripe_offset = 0;
1258 }
1259}
1260
1261static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
1262 struct scrub_block *sblocks_for_recheck)
1263{
1264 struct scrub_ctx *sctx = original_sblock->sctx;
1265 struct btrfs_fs_info *fs_info = sctx->fs_info;
1266 u64 length = original_sblock->page_count * fs_info->sectorsize;
1267 u64 logical = original_sblock->pagev[0]->logical;
1268 u64 generation = original_sblock->pagev[0]->generation;
1269 u64 flags = original_sblock->pagev[0]->flags;
1270 u64 have_csum = original_sblock->pagev[0]->have_csum;
1271 struct scrub_recover *recover;
1272 struct btrfs_bio *bbio;
1273 u64 sublen;
1274 u64 mapped_length;
1275 u64 stripe_offset;
1276 int stripe_index;
1277 int page_index = 0;
1278 int mirror_index;
1279 int nmirrors;
1280 int ret;
1281
1282 /*
1283 * note: the two members refs and outstanding_pages
1284 * are not used (and not set) in the blocks that are used for
1285 * the recheck procedure
1286 */
1287
1288 while (length > 0) {
1289 sublen = min_t(u64, length, fs_info->sectorsize);
1290 mapped_length = sublen;
1291 bbio = NULL;
1292
1293 /*
1294 * With a length of sectorsize, each returned stripe represents
1295 * one mirror
1296 */
1297 btrfs_bio_counter_inc_blocked(fs_info);
1298 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
1299 logical, &mapped_length, &bbio);
1300 if (ret || !bbio || mapped_length < sublen) {
1301 btrfs_put_bbio(bbio);
1302 btrfs_bio_counter_dec(fs_info);
1303 return -EIO;
1304 }
1305
1306 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1307 if (!recover) {
1308 btrfs_put_bbio(bbio);
1309 btrfs_bio_counter_dec(fs_info);
1310 return -ENOMEM;
1311 }
1312
1313 refcount_set(&recover->refs, 1);
1314 recover->bbio = bbio;
1315 recover->map_length = mapped_length;
1316
1317 BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK);
1318
1319 nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
1320
1321 for (mirror_index = 0; mirror_index < nmirrors;
1322 mirror_index++) {
1323 struct scrub_block *sblock;
1324 struct scrub_page *spage;
1325
1326 sblock = sblocks_for_recheck + mirror_index;
1327 sblock->sctx = sctx;
1328
1329 spage = kzalloc(sizeof(*spage), GFP_NOFS);
1330 if (!spage) {
1331leave_nomem:
1332 spin_lock(&sctx->stat_lock);
1333 sctx->stat.malloc_errors++;
1334 spin_unlock(&sctx->stat_lock);
1335 scrub_put_recover(fs_info, recover);
1336 return -ENOMEM;
1337 }
1338 scrub_page_get(spage);
1339 sblock->pagev[page_index] = spage;
1340 spage->sblock = sblock;
1341 spage->flags = flags;
1342 spage->generation = generation;
1343 spage->logical = logical;
1344 spage->have_csum = have_csum;
1345 if (have_csum)
1346 memcpy(spage->csum,
1347 original_sblock->pagev[0]->csum,
1348 sctx->fs_info->csum_size);
1349
1350 scrub_stripe_index_and_offset(logical,
1351 bbio->map_type,
1352 bbio->raid_map,
1353 mapped_length,
1354 bbio->num_stripes -
1355 bbio->num_tgtdevs,
1356 mirror_index,
1357 &stripe_index,
1358 &stripe_offset);
1359 spage->physical = bbio->stripes[stripe_index].physical +
1360 stripe_offset;
1361 spage->dev = bbio->stripes[stripe_index].dev;
1362
1363 BUG_ON(page_index >= original_sblock->page_count);
1364 spage->physical_for_dev_replace =
1365 original_sblock->pagev[page_index]->
1366 physical_for_dev_replace;
1367 /* for missing devices, dev->bdev is NULL */
1368 spage->mirror_num = mirror_index + 1;
1369 sblock->page_count++;
1370 spage->page = alloc_page(GFP_NOFS);
1371 if (!spage->page)
1372 goto leave_nomem;
1373
1374 scrub_get_recover(recover);
1375 spage->recover = recover;
1376 }
1377 scrub_put_recover(fs_info, recover);
1378 length -= sublen;
1379 logical += sublen;
1380 page_index++;
1381 }
1382
1383 return 0;
1384}
1385
1386static void scrub_bio_wait_endio(struct bio *bio)
1387{
1388 complete(bio->bi_private);
1389}
1390
1391static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1392 struct bio *bio,
1393 struct scrub_page *spage)
1394{
1395 DECLARE_COMPLETION_ONSTACK(done);
1396 int ret;
1397 int mirror_num;
1398
1399 bio->bi_iter.bi_sector = spage->logical >> 9;
1400 bio->bi_private = &done;
1401 bio->bi_end_io = scrub_bio_wait_endio;
1402
1403 mirror_num = spage->sblock->pagev[0]->mirror_num;
1404 ret = raid56_parity_recover(fs_info, bio, spage->recover->bbio,
1405 spage->recover->map_length,
1406 mirror_num, 0);
1407 if (ret)
1408 return ret;
1409
1410 wait_for_completion_io(&done);
1411 return blk_status_to_errno(bio->bi_status);
1412}
1413
1414static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
1415 struct scrub_block *sblock)
1416{
1417 struct scrub_page *first_page = sblock->pagev[0];
1418 struct bio *bio;
1419 int page_num;
1420
1421 /* All pages in sblock belong to the same stripe on the same device. */
1422 ASSERT(first_page->dev);
1423 if (!first_page->dev->bdev)
1424 goto out;
1425
1426 bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
1427 bio_set_dev(bio, first_page->dev->bdev);
1428
1429 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1430 struct scrub_page *spage = sblock->pagev[page_num];
1431
1432 WARN_ON(!spage->page);
1433 bio_add_page(bio, spage->page, PAGE_SIZE, 0);
1434 }
1435
1436 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) {
1437 bio_put(bio);
1438 goto out;
1439 }
1440
1441 bio_put(bio);
1442
1443 scrub_recheck_block_checksum(sblock);
1444
1445 return;
1446out:
1447 for (page_num = 0; page_num < sblock->page_count; page_num++)
1448 sblock->pagev[page_num]->io_error = 1;
1449
1450 sblock->no_io_error_seen = 0;
1451}
1452
1453/*
1454 * this function will check the on disk data for checksum errors, header
1455 * errors and read I/O errors. If any I/O errors happen, the exact pages
1456 * which are errored are marked as being bad. The goal is to enable scrub
1457 * to take those pages that are not errored from all the mirrors so that
1458 * the pages that are errored in the just handled mirror can be repaired.
1459 */
1460static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1461 struct scrub_block *sblock,
1462 int retry_failed_mirror)
1463{
1464 int page_num;
1465
1466 sblock->no_io_error_seen = 1;
1467
1468 /* short cut for raid56 */
1469 if (!retry_failed_mirror && scrub_is_page_on_raid56(sblock->pagev[0]))
1470 return scrub_recheck_block_on_raid56(fs_info, sblock);
1471
1472 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1473 struct bio *bio;
1474 struct scrub_page *spage = sblock->pagev[page_num];
1475
1476 if (spage->dev->bdev == NULL) {
1477 spage->io_error = 1;
1478 sblock->no_io_error_seen = 0;
1479 continue;
1480 }
1481
1482 WARN_ON(!spage->page);
1483 bio = btrfs_io_bio_alloc(1);
1484 bio_set_dev(bio, spage->dev->bdev);
1485
1486 bio_add_page(bio, spage->page, fs_info->sectorsize, 0);
1487 bio->bi_iter.bi_sector = spage->physical >> 9;
1488 bio->bi_opf = REQ_OP_READ;
1489
1490 if (btrfsic_submit_bio_wait(bio)) {
1491 spage->io_error = 1;
1492 sblock->no_io_error_seen = 0;
1493 }
1494
1495 bio_put(bio);
1496 }
1497
1498 if (sblock->no_io_error_seen)
1499 scrub_recheck_block_checksum(sblock);
1500}
1501
1502static inline int scrub_check_fsid(u8 fsid[],
1503 struct scrub_page *spage)
1504{
1505 struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1506 int ret;
1507
1508 ret = memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1509 return !ret;
1510}
1511
1512static void scrub_recheck_block_checksum(struct scrub_block *sblock)
1513{
1514 sblock->header_error = 0;
1515 sblock->checksum_error = 0;
1516 sblock->generation_error = 0;
1517
1518 if (sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA)
1519 scrub_checksum_data(sblock);
1520 else
1521 scrub_checksum_tree_block(sblock);
1522}
1523
1524static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1525 struct scrub_block *sblock_good)
1526{
1527 int page_num;
1528 int ret = 0;
1529
1530 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1531 int ret_sub;
1532
1533 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1534 sblock_good,
1535 page_num, 1);
1536 if (ret_sub)
1537 ret = ret_sub;
1538 }
1539
1540 return ret;
1541}
1542
1543static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1544 struct scrub_block *sblock_good,
1545 int page_num, int force_write)
1546{
1547 struct scrub_page *spage_bad = sblock_bad->pagev[page_num];
1548 struct scrub_page *spage_good = sblock_good->pagev[page_num];
1549 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1550 const u32 sectorsize = fs_info->sectorsize;
1551
1552 BUG_ON(spage_bad->page == NULL);
1553 BUG_ON(spage_good->page == NULL);
1554 if (force_write || sblock_bad->header_error ||
1555 sblock_bad->checksum_error || spage_bad->io_error) {
1556 struct bio *bio;
1557 int ret;
1558
1559 if (!spage_bad->dev->bdev) {
1560 btrfs_warn_rl(fs_info,
1561 "scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
1562 return -EIO;
1563 }
1564
1565 bio = btrfs_io_bio_alloc(1);
1566 bio_set_dev(bio, spage_bad->dev->bdev);
1567 bio->bi_iter.bi_sector = spage_bad->physical >> 9;
1568 bio->bi_opf = REQ_OP_WRITE;
1569
1570 ret = bio_add_page(bio, spage_good->page, sectorsize, 0);
1571 if (ret != sectorsize) {
1572 bio_put(bio);
1573 return -EIO;
1574 }
1575
1576 if (btrfsic_submit_bio_wait(bio)) {
1577 btrfs_dev_stat_inc_and_print(spage_bad->dev,
1578 BTRFS_DEV_STAT_WRITE_ERRS);
1579 atomic64_inc(&fs_info->dev_replace.num_write_errors);
1580 bio_put(bio);
1581 return -EIO;
1582 }
1583 bio_put(bio);
1584 }
1585
1586 return 0;
1587}
1588
1589static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1590{
1591 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1592 int page_num;
1593
1594 /*
1595 * This block is used for the check of the parity on the source device,
1596 * so the data needn't be written into the destination device.
1597 */
1598 if (sblock->sparity)
1599 return;
1600
1601 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1602 int ret;
1603
1604 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1605 if (ret)
1606 atomic64_inc(&fs_info->dev_replace.num_write_errors);
1607 }
1608}
1609
1610static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1611 int page_num)
1612{
1613 struct scrub_page *spage = sblock->pagev[page_num];
1614
1615 BUG_ON(spage->page == NULL);
1616 if (spage->io_error)
1617 clear_page(page_address(spage->page));
1618
1619 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1620}
1621
1622static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
1623{
1624 int ret = 0;
1625 u64 length;
1626
1627 if (!btrfs_is_zoned(sctx->fs_info))
1628 return 0;
1629
1630 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
1631 return 0;
1632
1633 if (sctx->write_pointer < physical) {
1634 length = physical - sctx->write_pointer;
1635
1636 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
1637 sctx->write_pointer, length);
1638 if (!ret)
1639 sctx->write_pointer = physical;
1640 }
1641 return ret;
1642}
1643
1644static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1645 struct scrub_page *spage)
1646{
1647 struct scrub_bio *sbio;
1648 int ret;
1649 const u32 sectorsize = sctx->fs_info->sectorsize;
1650
1651 mutex_lock(&sctx->wr_lock);
1652again:
1653 if (!sctx->wr_curr_bio) {
1654 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1655 GFP_KERNEL);
1656 if (!sctx->wr_curr_bio) {
1657 mutex_unlock(&sctx->wr_lock);
1658 return -ENOMEM;
1659 }
1660 sctx->wr_curr_bio->sctx = sctx;
1661 sctx->wr_curr_bio->page_count = 0;
1662 }
1663 sbio = sctx->wr_curr_bio;
1664 if (sbio->page_count == 0) {
1665 struct bio *bio;
1666
1667 ret = fill_writer_pointer_gap(sctx,
1668 spage->physical_for_dev_replace);
1669 if (ret) {
1670 mutex_unlock(&sctx->wr_lock);
1671 return ret;
1672 }
1673
1674 sbio->physical = spage->physical_for_dev_replace;
1675 sbio->logical = spage->logical;
1676 sbio->dev = sctx->wr_tgtdev;
1677 bio = sbio->bio;
1678 if (!bio) {
1679 bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1680 sbio->bio = bio;
1681 }
1682
1683 bio->bi_private = sbio;
1684 bio->bi_end_io = scrub_wr_bio_end_io;
1685 bio_set_dev(bio, sbio->dev->bdev);
1686 bio->bi_iter.bi_sector = sbio->physical >> 9;
1687 bio->bi_opf = REQ_OP_WRITE;
1688 sbio->status = 0;
1689 } else if (sbio->physical + sbio->page_count * sectorsize !=
1690 spage->physical_for_dev_replace ||
1691 sbio->logical + sbio->page_count * sectorsize !=
1692 spage->logical) {
1693 scrub_wr_submit(sctx);
1694 goto again;
1695 }
1696
1697 ret = bio_add_page(sbio->bio, spage->page, sectorsize, 0);
1698 if (ret != sectorsize) {
1699 if (sbio->page_count < 1) {
1700 bio_put(sbio->bio);
1701 sbio->bio = NULL;
1702 mutex_unlock(&sctx->wr_lock);
1703 return -EIO;
1704 }
1705 scrub_wr_submit(sctx);
1706 goto again;
1707 }
1708
1709 sbio->pagev[sbio->page_count] = spage;
1710 scrub_page_get(spage);
1711 sbio->page_count++;
1712 if (sbio->page_count == sctx->pages_per_wr_bio)
1713 scrub_wr_submit(sctx);
1714 mutex_unlock(&sctx->wr_lock);
1715
1716 return 0;
1717}
1718
1719static void scrub_wr_submit(struct scrub_ctx *sctx)
1720{
1721 struct scrub_bio *sbio;
1722
1723 if (!sctx->wr_curr_bio)
1724 return;
1725
1726 sbio = sctx->wr_curr_bio;
1727 sctx->wr_curr_bio = NULL;
1728 WARN_ON(!sbio->bio->bi_bdev);
1729 scrub_pending_bio_inc(sctx);
1730 /* process all writes in a single worker thread. Then the block layer
1731 * orders the requests before sending them to the driver which
1732 * doubled the write performance on spinning disks when measured
1733 * with Linux 3.5 */
1734 btrfsic_submit_bio(sbio->bio);
1735
1736 if (btrfs_is_zoned(sctx->fs_info))
1737 sctx->write_pointer = sbio->physical + sbio->page_count *
1738 sctx->fs_info->sectorsize;
1739}
1740
1741static void scrub_wr_bio_end_io(struct bio *bio)
1742{
1743 struct scrub_bio *sbio = bio->bi_private;
1744 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1745
1746 sbio->status = bio->bi_status;
1747 sbio->bio = bio;
1748
1749 btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
1750 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1751}
1752
1753static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1754{
1755 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1756 struct scrub_ctx *sctx = sbio->sctx;
1757 int i;
1758
1759 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1760 if (sbio->status) {
1761 struct btrfs_dev_replace *dev_replace =
1762 &sbio->sctx->fs_info->dev_replace;
1763
1764 for (i = 0; i < sbio->page_count; i++) {
1765 struct scrub_page *spage = sbio->pagev[i];
1766
1767 spage->io_error = 1;
1768 atomic64_inc(&dev_replace->num_write_errors);
1769 }
1770 }
1771
1772 for (i = 0; i < sbio->page_count; i++)
1773 scrub_page_put(sbio->pagev[i]);
1774
1775 bio_put(sbio->bio);
1776 kfree(sbio);
1777 scrub_pending_bio_dec(sctx);
1778}
1779
1780static int scrub_checksum(struct scrub_block *sblock)
1781{
1782 u64 flags;
1783 int ret;
1784
1785 /*
1786 * No need to initialize these stats currently,
1787 * because this function only use return value
1788 * instead of these stats value.
1789 *
1790 * Todo:
1791 * always use stats
1792 */
1793 sblock->header_error = 0;
1794 sblock->generation_error = 0;
1795 sblock->checksum_error = 0;
1796
1797 WARN_ON(sblock->page_count < 1);
1798 flags = sblock->pagev[0]->flags;
1799 ret = 0;
1800 if (flags & BTRFS_EXTENT_FLAG_DATA)
1801 ret = scrub_checksum_data(sblock);
1802 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1803 ret = scrub_checksum_tree_block(sblock);
1804 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1805 (void)scrub_checksum_super(sblock);
1806 else
1807 WARN_ON(1);
1808 if (ret)
1809 scrub_handle_errored_block(sblock);
1810
1811 return ret;
1812}
1813
1814static int scrub_checksum_data(struct scrub_block *sblock)
1815{
1816 struct scrub_ctx *sctx = sblock->sctx;
1817 struct btrfs_fs_info *fs_info = sctx->fs_info;
1818 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1819 u8 csum[BTRFS_CSUM_SIZE];
1820 struct scrub_page *spage;
1821 char *kaddr;
1822
1823 BUG_ON(sblock->page_count < 1);
1824 spage = sblock->pagev[0];
1825 if (!spage->have_csum)
1826 return 0;
1827
1828 kaddr = page_address(spage->page);
1829
1830 shash->tfm = fs_info->csum_shash;
1831 crypto_shash_init(shash);
1832
1833 /*
1834 * In scrub_pages() and scrub_pages_for_parity() we ensure each spage
1835 * only contains one sector of data.
1836 */
1837 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum);
1838
1839 if (memcmp(csum, spage->csum, fs_info->csum_size))
1840 sblock->checksum_error = 1;
1841 return sblock->checksum_error;
1842}
1843
1844static int scrub_checksum_tree_block(struct scrub_block *sblock)
1845{
1846 struct scrub_ctx *sctx = sblock->sctx;
1847 struct btrfs_header *h;
1848 struct btrfs_fs_info *fs_info = sctx->fs_info;
1849 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1850 u8 calculated_csum[BTRFS_CSUM_SIZE];
1851 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1852 /*
1853 * This is done in sectorsize steps even for metadata as there's a
1854 * constraint for nodesize to be aligned to sectorsize. This will need
1855 * to change so we don't misuse data and metadata units like that.
1856 */
1857 const u32 sectorsize = sctx->fs_info->sectorsize;
1858 const int num_sectors = fs_info->nodesize >> fs_info->sectorsize_bits;
1859 int i;
1860 struct scrub_page *spage;
1861 char *kaddr;
1862
1863 BUG_ON(sblock->page_count < 1);
1864
1865 /* Each member in pagev is just one block, not a full page */
1866 ASSERT(sblock->page_count == num_sectors);
1867
1868 spage = sblock->pagev[0];
1869 kaddr = page_address(spage->page);
1870 h = (struct btrfs_header *)kaddr;
1871 memcpy(on_disk_csum, h->csum, sctx->fs_info->csum_size);
1872
1873 /*
1874 * we don't use the getter functions here, as we
1875 * a) don't have an extent buffer and
1876 * b) the page is already kmapped
1877 */
1878 if (spage->logical != btrfs_stack_header_bytenr(h))
1879 sblock->header_error = 1;
1880
1881 if (spage->generation != btrfs_stack_header_generation(h)) {
1882 sblock->header_error = 1;
1883 sblock->generation_error = 1;
1884 }
1885
1886 if (!scrub_check_fsid(h->fsid, spage))
1887 sblock->header_error = 1;
1888
1889 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1890 BTRFS_UUID_SIZE))
1891 sblock->header_error = 1;
1892
1893 shash->tfm = fs_info->csum_shash;
1894 crypto_shash_init(shash);
1895 crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
1896 sectorsize - BTRFS_CSUM_SIZE);
1897
1898 for (i = 1; i < num_sectors; i++) {
1899 kaddr = page_address(sblock->pagev[i]->page);
1900 crypto_shash_update(shash, kaddr, sectorsize);
1901 }
1902
1903 crypto_shash_final(shash, calculated_csum);
1904 if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
1905 sblock->checksum_error = 1;
1906
1907 return sblock->header_error || sblock->checksum_error;
1908}
1909
1910static int scrub_checksum_super(struct scrub_block *sblock)
1911{
1912 struct btrfs_super_block *s;
1913 struct scrub_ctx *sctx = sblock->sctx;
1914 struct btrfs_fs_info *fs_info = sctx->fs_info;
1915 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
1916 u8 calculated_csum[BTRFS_CSUM_SIZE];
1917 struct scrub_page *spage;
1918 char *kaddr;
1919 int fail_gen = 0;
1920 int fail_cor = 0;
1921
1922 BUG_ON(sblock->page_count < 1);
1923 spage = sblock->pagev[0];
1924 kaddr = page_address(spage->page);
1925 s = (struct btrfs_super_block *)kaddr;
1926
1927 if (spage->logical != btrfs_super_bytenr(s))
1928 ++fail_cor;
1929
1930 if (spage->generation != btrfs_super_generation(s))
1931 ++fail_gen;
1932
1933 if (!scrub_check_fsid(s->fsid, spage))
1934 ++fail_cor;
1935
1936 shash->tfm = fs_info->csum_shash;
1937 crypto_shash_init(shash);
1938 crypto_shash_digest(shash, kaddr + BTRFS_CSUM_SIZE,
1939 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, calculated_csum);
1940
1941 if (memcmp(calculated_csum, s->csum, sctx->fs_info->csum_size))
1942 ++fail_cor;
1943
1944 if (fail_cor + fail_gen) {
1945 /*
1946 * if we find an error in a super block, we just report it.
1947 * They will get written with the next transaction commit
1948 * anyway
1949 */
1950 spin_lock(&sctx->stat_lock);
1951 ++sctx->stat.super_errors;
1952 spin_unlock(&sctx->stat_lock);
1953 if (fail_cor)
1954 btrfs_dev_stat_inc_and_print(spage->dev,
1955 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1956 else
1957 btrfs_dev_stat_inc_and_print(spage->dev,
1958 BTRFS_DEV_STAT_GENERATION_ERRS);
1959 }
1960
1961 return fail_cor + fail_gen;
1962}
1963
1964static void scrub_block_get(struct scrub_block *sblock)
1965{
1966 refcount_inc(&sblock->refs);
1967}
1968
1969static void scrub_block_put(struct scrub_block *sblock)
1970{
1971 if (refcount_dec_and_test(&sblock->refs)) {
1972 int i;
1973
1974 if (sblock->sparity)
1975 scrub_parity_put(sblock->sparity);
1976
1977 for (i = 0; i < sblock->page_count; i++)
1978 scrub_page_put(sblock->pagev[i]);
1979 kfree(sblock);
1980 }
1981}
1982
1983static void scrub_page_get(struct scrub_page *spage)
1984{
1985 atomic_inc(&spage->refs);
1986}
1987
1988static void scrub_page_put(struct scrub_page *spage)
1989{
1990 if (atomic_dec_and_test(&spage->refs)) {
1991 if (spage->page)
1992 __free_page(spage->page);
1993 kfree(spage);
1994 }
1995}
1996
1997/*
1998 * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1999 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
2000 */
2001static void scrub_throttle(struct scrub_ctx *sctx)
2002{
2003 const int time_slice = 1000;
2004 struct scrub_bio *sbio;
2005 struct btrfs_device *device;
2006 s64 delta;
2007 ktime_t now;
2008 u32 div;
2009 u64 bwlimit;
2010
2011 sbio = sctx->bios[sctx->curr];
2012 device = sbio->dev;
2013 bwlimit = READ_ONCE(device->scrub_speed_max);
2014 if (bwlimit == 0)
2015 return;
2016
2017 /*
2018 * Slice is divided into intervals when the IO is submitted, adjust by
2019 * bwlimit and maximum of 64 intervals.
2020 */
2021 div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
2022 div = min_t(u32, 64, div);
2023
2024 /* Start new epoch, set deadline */
2025 now = ktime_get();
2026 if (sctx->throttle_deadline == 0) {
2027 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
2028 sctx->throttle_sent = 0;
2029 }
2030
2031 /* Still in the time to send? */
2032 if (ktime_before(now, sctx->throttle_deadline)) {
2033 /* If current bio is within the limit, send it */
2034 sctx->throttle_sent += sbio->bio->bi_iter.bi_size;
2035 if (sctx->throttle_sent <= div_u64(bwlimit, div))
2036 return;
2037
2038 /* We're over the limit, sleep until the rest of the slice */
2039 delta = ktime_ms_delta(sctx->throttle_deadline, now);
2040 } else {
2041 /* New request after deadline, start new epoch */
2042 delta = 0;
2043 }
2044
2045 if (delta) {
2046 long timeout;
2047
2048 timeout = div_u64(delta * HZ, 1000);
2049 schedule_timeout_interruptible(timeout);
2050 }
2051
2052 /* Next call will start the deadline period */
2053 sctx->throttle_deadline = 0;
2054}
2055
2056static void scrub_submit(struct scrub_ctx *sctx)
2057{
2058 struct scrub_bio *sbio;
2059
2060 if (sctx->curr == -1)
2061 return;
2062
2063 scrub_throttle(sctx);
2064
2065 sbio = sctx->bios[sctx->curr];
2066 sctx->curr = -1;
2067 scrub_pending_bio_inc(sctx);
2068 btrfsic_submit_bio(sbio->bio);
2069}
2070
2071static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2072 struct scrub_page *spage)
2073{
2074 struct scrub_block *sblock = spage->sblock;
2075 struct scrub_bio *sbio;
2076 const u32 sectorsize = sctx->fs_info->sectorsize;
2077 int ret;
2078
2079again:
2080 /*
2081 * grab a fresh bio or wait for one to become available
2082 */
2083 while (sctx->curr == -1) {
2084 spin_lock(&sctx->list_lock);
2085 sctx->curr = sctx->first_free;
2086 if (sctx->curr != -1) {
2087 sctx->first_free = sctx->bios[sctx->curr]->next_free;
2088 sctx->bios[sctx->curr]->next_free = -1;
2089 sctx->bios[sctx->curr]->page_count = 0;
2090 spin_unlock(&sctx->list_lock);
2091 } else {
2092 spin_unlock(&sctx->list_lock);
2093 wait_event(sctx->list_wait, sctx->first_free != -1);
2094 }
2095 }
2096 sbio = sctx->bios[sctx->curr];
2097 if (sbio->page_count == 0) {
2098 struct bio *bio;
2099
2100 sbio->physical = spage->physical;
2101 sbio->logical = spage->logical;
2102 sbio->dev = spage->dev;
2103 bio = sbio->bio;
2104 if (!bio) {
2105 bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
2106 sbio->bio = bio;
2107 }
2108
2109 bio->bi_private = sbio;
2110 bio->bi_end_io = scrub_bio_end_io;
2111 bio_set_dev(bio, sbio->dev->bdev);
2112 bio->bi_iter.bi_sector = sbio->physical >> 9;
2113 bio->bi_opf = REQ_OP_READ;
2114 sbio->status = 0;
2115 } else if (sbio->physical + sbio->page_count * sectorsize !=
2116 spage->physical ||
2117 sbio->logical + sbio->page_count * sectorsize !=
2118 spage->logical ||
2119 sbio->dev != spage->dev) {
2120 scrub_submit(sctx);
2121 goto again;
2122 }
2123
2124 sbio->pagev[sbio->page_count] = spage;
2125 ret = bio_add_page(sbio->bio, spage->page, sectorsize, 0);
2126 if (ret != sectorsize) {
2127 if (sbio->page_count < 1) {
2128 bio_put(sbio->bio);
2129 sbio->bio = NULL;
2130 return -EIO;
2131 }
2132 scrub_submit(sctx);
2133 goto again;
2134 }
2135
2136 scrub_block_get(sblock); /* one for the page added to the bio */
2137 atomic_inc(&sblock->outstanding_pages);
2138 sbio->page_count++;
2139 if (sbio->page_count == sctx->pages_per_rd_bio)
2140 scrub_submit(sctx);
2141
2142 return 0;
2143}
2144
2145static void scrub_missing_raid56_end_io(struct bio *bio)
2146{
2147 struct scrub_block *sblock = bio->bi_private;
2148 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2149
2150 if (bio->bi_status)
2151 sblock->no_io_error_seen = 0;
2152
2153 bio_put(bio);
2154
2155 btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
2156}
2157
2158static void scrub_missing_raid56_worker(struct btrfs_work *work)
2159{
2160 struct scrub_block *sblock = container_of(work, struct scrub_block, work);
2161 struct scrub_ctx *sctx = sblock->sctx;
2162 struct btrfs_fs_info *fs_info = sctx->fs_info;
2163 u64 logical;
2164 struct btrfs_device *dev;
2165
2166 logical = sblock->pagev[0]->logical;
2167 dev = sblock->pagev[0]->dev;
2168
2169 if (sblock->no_io_error_seen)
2170 scrub_recheck_block_checksum(sblock);
2171
2172 if (!sblock->no_io_error_seen) {
2173 spin_lock(&sctx->stat_lock);
2174 sctx->stat.read_errors++;
2175 spin_unlock(&sctx->stat_lock);
2176 btrfs_err_rl_in_rcu(fs_info,
2177 "IO error rebuilding logical %llu for dev %s",
2178 logical, rcu_str_deref(dev->name));
2179 } else if (sblock->header_error || sblock->checksum_error) {
2180 spin_lock(&sctx->stat_lock);
2181 sctx->stat.uncorrectable_errors++;
2182 spin_unlock(&sctx->stat_lock);
2183 btrfs_err_rl_in_rcu(fs_info,
2184 "failed to rebuild valid logical %llu for dev %s",
2185 logical, rcu_str_deref(dev->name));
2186 } else {
2187 scrub_write_block_to_dev_replace(sblock);
2188 }
2189
2190 if (sctx->is_dev_replace && sctx->flush_all_writes) {
2191 mutex_lock(&sctx->wr_lock);
2192 scrub_wr_submit(sctx);
2193 mutex_unlock(&sctx->wr_lock);
2194 }
2195
2196 scrub_block_put(sblock);
2197 scrub_pending_bio_dec(sctx);
2198}
2199
2200static void scrub_missing_raid56_pages(struct scrub_block *sblock)
2201{
2202 struct scrub_ctx *sctx = sblock->sctx;
2203 struct btrfs_fs_info *fs_info = sctx->fs_info;
2204 u64 length = sblock->page_count * PAGE_SIZE;
2205 u64 logical = sblock->pagev[0]->logical;
2206 struct btrfs_bio *bbio = NULL;
2207 struct bio *bio;
2208 struct btrfs_raid_bio *rbio;
2209 int ret;
2210 int i;
2211
2212 btrfs_bio_counter_inc_blocked(fs_info);
2213 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
2214 &length, &bbio);
2215 if (ret || !bbio || !bbio->raid_map)
2216 goto bbio_out;
2217
2218 if (WARN_ON(!sctx->is_dev_replace ||
2219 !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2220 /*
2221 * We shouldn't be scrubbing a missing device. Even for dev
2222 * replace, we should only get here for RAID 5/6. We either
2223 * managed to mount something with no mirrors remaining or
2224 * there's a bug in scrub_remap_extent()/btrfs_map_block().
2225 */
2226 goto bbio_out;
2227 }
2228
2229 bio = btrfs_io_bio_alloc(0);
2230 bio->bi_iter.bi_sector = logical >> 9;
2231 bio->bi_private = sblock;
2232 bio->bi_end_io = scrub_missing_raid56_end_io;
2233
2234 rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
2235 if (!rbio)
2236 goto rbio_out;
2237
2238 for (i = 0; i < sblock->page_count; i++) {
2239 struct scrub_page *spage = sblock->pagev[i];
2240
2241 raid56_add_scrub_pages(rbio, spage->page, spage->logical);
2242 }
2243
2244 btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);
2245 scrub_block_get(sblock);
2246 scrub_pending_bio_inc(sctx);
2247 raid56_submit_missing_rbio(rbio);
2248 return;
2249
2250rbio_out:
2251 bio_put(bio);
2252bbio_out:
2253 btrfs_bio_counter_dec(fs_info);
2254 btrfs_put_bbio(bbio);
2255 spin_lock(&sctx->stat_lock);
2256 sctx->stat.malloc_errors++;
2257 spin_unlock(&sctx->stat_lock);
2258}
2259
2260static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u32 len,
2261 u64 physical, struct btrfs_device *dev, u64 flags,
2262 u64 gen, int mirror_num, u8 *csum,
2263 u64 physical_for_dev_replace)
2264{
2265 struct scrub_block *sblock;
2266 const u32 sectorsize = sctx->fs_info->sectorsize;
2267 int index;
2268
2269 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2270 if (!sblock) {
2271 spin_lock(&sctx->stat_lock);
2272 sctx->stat.malloc_errors++;
2273 spin_unlock(&sctx->stat_lock);
2274 return -ENOMEM;
2275 }
2276
2277 /* one ref inside this function, plus one for each page added to
2278 * a bio later on */
2279 refcount_set(&sblock->refs, 1);
2280 sblock->sctx = sctx;
2281 sblock->no_io_error_seen = 1;
2282
2283 for (index = 0; len > 0; index++) {
2284 struct scrub_page *spage;
2285 /*
2286 * Here we will allocate one page for one sector to scrub.
2287 * This is fine if PAGE_SIZE == sectorsize, but will cost
2288 * more memory for PAGE_SIZE > sectorsize case.
2289 */
2290 u32 l = min(sectorsize, len);
2291
2292 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2293 if (!spage) {
2294leave_nomem:
2295 spin_lock(&sctx->stat_lock);
2296 sctx->stat.malloc_errors++;
2297 spin_unlock(&sctx->stat_lock);
2298 scrub_block_put(sblock);
2299 return -ENOMEM;
2300 }
2301 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2302 scrub_page_get(spage);
2303 sblock->pagev[index] = spage;
2304 spage->sblock = sblock;
2305 spage->dev = dev;
2306 spage->flags = flags;
2307 spage->generation = gen;
2308 spage->logical = logical;
2309 spage->physical = physical;
2310 spage->physical_for_dev_replace = physical_for_dev_replace;
2311 spage->mirror_num = mirror_num;
2312 if (csum) {
2313 spage->have_csum = 1;
2314 memcpy(spage->csum, csum, sctx->fs_info->csum_size);
2315 } else {
2316 spage->have_csum = 0;
2317 }
2318 sblock->page_count++;
2319 spage->page = alloc_page(GFP_KERNEL);
2320 if (!spage->page)
2321 goto leave_nomem;
2322 len -= l;
2323 logical += l;
2324 physical += l;
2325 physical_for_dev_replace += l;
2326 }
2327
2328 WARN_ON(sblock->page_count == 0);
2329 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2330 /*
2331 * This case should only be hit for RAID 5/6 device replace. See
2332 * the comment in scrub_missing_raid56_pages() for details.
2333 */
2334 scrub_missing_raid56_pages(sblock);
2335 } else {
2336 for (index = 0; index < sblock->page_count; index++) {
2337 struct scrub_page *spage = sblock->pagev[index];
2338 int ret;
2339
2340 ret = scrub_add_page_to_rd_bio(sctx, spage);
2341 if (ret) {
2342 scrub_block_put(sblock);
2343 return ret;
2344 }
2345 }
2346
2347 if (flags & BTRFS_EXTENT_FLAG_SUPER)
2348 scrub_submit(sctx);
2349 }
2350
2351 /* last one frees, either here or in bio completion for last page */
2352 scrub_block_put(sblock);
2353 return 0;
2354}
2355
2356static void scrub_bio_end_io(struct bio *bio)
2357{
2358 struct scrub_bio *sbio = bio->bi_private;
2359 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2360
2361 sbio->status = bio->bi_status;
2362 sbio->bio = bio;
2363
2364 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2365}
2366
2367static void scrub_bio_end_io_worker(struct btrfs_work *work)
2368{
2369 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2370 struct scrub_ctx *sctx = sbio->sctx;
2371 int i;
2372
2373 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2374 if (sbio->status) {
2375 for (i = 0; i < sbio->page_count; i++) {
2376 struct scrub_page *spage = sbio->pagev[i];
2377
2378 spage->io_error = 1;
2379 spage->sblock->no_io_error_seen = 0;
2380 }
2381 }
2382
2383 /* now complete the scrub_block items that have all pages completed */
2384 for (i = 0; i < sbio->page_count; i++) {
2385 struct scrub_page *spage = sbio->pagev[i];
2386 struct scrub_block *sblock = spage->sblock;
2387
2388 if (atomic_dec_and_test(&sblock->outstanding_pages))
2389 scrub_block_complete(sblock);
2390 scrub_block_put(sblock);
2391 }
2392
2393 bio_put(sbio->bio);
2394 sbio->bio = NULL;
2395 spin_lock(&sctx->list_lock);
2396 sbio->next_free = sctx->first_free;
2397 sctx->first_free = sbio->index;
2398 spin_unlock(&sctx->list_lock);
2399
2400 if (sctx->is_dev_replace && sctx->flush_all_writes) {
2401 mutex_lock(&sctx->wr_lock);
2402 scrub_wr_submit(sctx);
2403 mutex_unlock(&sctx->wr_lock);
2404 }
2405
2406 scrub_pending_bio_dec(sctx);
2407}
2408
2409static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2410 unsigned long *bitmap,
2411 u64 start, u32 len)
2412{
2413 u64 offset;
2414 u32 nsectors;
2415 u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
2416
2417 if (len >= sparity->stripe_len) {
2418 bitmap_set(bitmap, 0, sparity->nsectors);
2419 return;
2420 }
2421
2422 start -= sparity->logic_start;
2423 start = div64_u64_rem(start, sparity->stripe_len, &offset);
2424 offset = offset >> sectorsize_bits;
2425 nsectors = len >> sectorsize_bits;
2426
2427 if (offset + nsectors <= sparity->nsectors) {
2428 bitmap_set(bitmap, offset, nsectors);
2429 return;
2430 }
2431
2432 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2433 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2434}
2435
2436static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2437 u64 start, u32 len)
2438{
2439 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2440}
2441
2442static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2443 u64 start, u32 len)
2444{
2445 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2446}
2447
2448static void scrub_block_complete(struct scrub_block *sblock)
2449{
2450 int corrupted = 0;
2451
2452 if (!sblock->no_io_error_seen) {
2453 corrupted = 1;
2454 scrub_handle_errored_block(sblock);
2455 } else {
2456 /*
2457 * if has checksum error, write via repair mechanism in
2458 * dev replace case, otherwise write here in dev replace
2459 * case.
2460 */
2461 corrupted = scrub_checksum(sblock);
2462 if (!corrupted && sblock->sctx->is_dev_replace)
2463 scrub_write_block_to_dev_replace(sblock);
2464 }
2465
2466 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2467 u64 start = sblock->pagev[0]->logical;
2468 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2469 sblock->sctx->fs_info->sectorsize;
2470
2471 ASSERT(end - start <= U32_MAX);
2472 scrub_parity_mark_sectors_error(sblock->sparity,
2473 start, end - start);
2474 }
2475}
2476
2477static void drop_csum_range(struct scrub_ctx *sctx, struct btrfs_ordered_sum *sum)
2478{
2479 sctx->stat.csum_discards += sum->len >> sctx->fs_info->sectorsize_bits;
2480 list_del(&sum->list);
2481 kfree(sum);
2482}
2483
2484/*
2485 * Find the desired csum for range [logical, logical + sectorsize), and store
2486 * the csum into @csum.
2487 *
2488 * The search source is sctx->csum_list, which is a pre-populated list
2489 * storing bytenr ordered csum ranges. We're responsible to cleanup any range
2490 * that is before @logical.
2491 *
2492 * Return 0 if there is no csum for the range.
2493 * Return 1 if there is csum for the range and copied to @csum.
2494 */
2495static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2496{
2497 bool found = false;
2498
2499 while (!list_empty(&sctx->csum_list)) {
2500 struct btrfs_ordered_sum *sum = NULL;
2501 unsigned long index;
2502 unsigned long num_sectors;
2503
2504 sum = list_first_entry(&sctx->csum_list,
2505 struct btrfs_ordered_sum, list);
2506 /* The current csum range is beyond our range, no csum found */
2507 if (sum->bytenr > logical)
2508 break;
2509
2510 /*
2511 * The current sum is before our bytenr, since scrub is always
2512 * done in bytenr order, the csum will never be used anymore,
2513 * clean it up so that later calls won't bother with the range,
2514 * and continue search the next range.
2515 */
2516 if (sum->bytenr + sum->len <= logical) {
2517 drop_csum_range(sctx, sum);
2518 continue;
2519 }
2520
2521 /* Now the csum range covers our bytenr, copy the csum */
2522 found = true;
2523 index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
2524 num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
2525
2526 memcpy(csum, sum->sums + index * sctx->fs_info->csum_size,
2527 sctx->fs_info->csum_size);
2528
2529 /* Cleanup the range if we're at the end of the csum range */
2530 if (index == num_sectors - 1)
2531 drop_csum_range(sctx, sum);
2532 break;
2533 }
2534 if (!found)
2535 return 0;
2536 return 1;
2537}
2538
2539/* scrub extent tries to collect up to 64 kB for each bio */
2540static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2541 u64 logical, u32 len,
2542 u64 physical, struct btrfs_device *dev, u64 flags,
2543 u64 gen, int mirror_num, u64 physical_for_dev_replace)
2544{
2545 int ret;
2546 u8 csum[BTRFS_CSUM_SIZE];
2547 u32 blocksize;
2548
2549 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2550 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2551 blocksize = map->stripe_len;
2552 else
2553 blocksize = sctx->fs_info->sectorsize;
2554 spin_lock(&sctx->stat_lock);
2555 sctx->stat.data_extents_scrubbed++;
2556 sctx->stat.data_bytes_scrubbed += len;
2557 spin_unlock(&sctx->stat_lock);
2558 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2559 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2560 blocksize = map->stripe_len;
2561 else
2562 blocksize = sctx->fs_info->nodesize;
2563 spin_lock(&sctx->stat_lock);
2564 sctx->stat.tree_extents_scrubbed++;
2565 sctx->stat.tree_bytes_scrubbed += len;
2566 spin_unlock(&sctx->stat_lock);
2567 } else {
2568 blocksize = sctx->fs_info->sectorsize;
2569 WARN_ON(1);
2570 }
2571
2572 while (len) {
2573 u32 l = min(len, blocksize);
2574 int have_csum = 0;
2575
2576 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2577 /* push csums to sbio */
2578 have_csum = scrub_find_csum(sctx, logical, csum);
2579 if (have_csum == 0)
2580 ++sctx->stat.no_csum;
2581 }
2582 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2583 mirror_num, have_csum ? csum : NULL,
2584 physical_for_dev_replace);
2585 if (ret)
2586 return ret;
2587 len -= l;
2588 logical += l;
2589 physical += l;
2590 physical_for_dev_replace += l;
2591 }
2592 return 0;
2593}
2594
2595static int scrub_pages_for_parity(struct scrub_parity *sparity,
2596 u64 logical, u32 len,
2597 u64 physical, struct btrfs_device *dev,
2598 u64 flags, u64 gen, int mirror_num, u8 *csum)
2599{
2600 struct scrub_ctx *sctx = sparity->sctx;
2601 struct scrub_block *sblock;
2602 const u32 sectorsize = sctx->fs_info->sectorsize;
2603 int index;
2604
2605 ASSERT(IS_ALIGNED(len, sectorsize));
2606
2607 sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
2608 if (!sblock) {
2609 spin_lock(&sctx->stat_lock);
2610 sctx->stat.malloc_errors++;
2611 spin_unlock(&sctx->stat_lock);
2612 return -ENOMEM;
2613 }
2614
2615 /* one ref inside this function, plus one for each page added to
2616 * a bio later on */
2617 refcount_set(&sblock->refs, 1);
2618 sblock->sctx = sctx;
2619 sblock->no_io_error_seen = 1;
2620 sblock->sparity = sparity;
2621 scrub_parity_get(sparity);
2622
2623 for (index = 0; len > 0; index++) {
2624 struct scrub_page *spage;
2625
2626 spage = kzalloc(sizeof(*spage), GFP_KERNEL);
2627 if (!spage) {
2628leave_nomem:
2629 spin_lock(&sctx->stat_lock);
2630 sctx->stat.malloc_errors++;
2631 spin_unlock(&sctx->stat_lock);
2632 scrub_block_put(sblock);
2633 return -ENOMEM;
2634 }
2635 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2636 /* For scrub block */
2637 scrub_page_get(spage);
2638 sblock->pagev[index] = spage;
2639 /* For scrub parity */
2640 scrub_page_get(spage);
2641 list_add_tail(&spage->list, &sparity->spages);
2642 spage->sblock = sblock;
2643 spage->dev = dev;
2644 spage->flags = flags;
2645 spage->generation = gen;
2646 spage->logical = logical;
2647 spage->physical = physical;
2648 spage->mirror_num = mirror_num;
2649 if (csum) {
2650 spage->have_csum = 1;
2651 memcpy(spage->csum, csum, sctx->fs_info->csum_size);
2652 } else {
2653 spage->have_csum = 0;
2654 }
2655 sblock->page_count++;
2656 spage->page = alloc_page(GFP_KERNEL);
2657 if (!spage->page)
2658 goto leave_nomem;
2659
2660
2661 /* Iterate over the stripe range in sectorsize steps */
2662 len -= sectorsize;
2663 logical += sectorsize;
2664 physical += sectorsize;
2665 }
2666
2667 WARN_ON(sblock->page_count == 0);
2668 for (index = 0; index < sblock->page_count; index++) {
2669 struct scrub_page *spage = sblock->pagev[index];
2670 int ret;
2671
2672 ret = scrub_add_page_to_rd_bio(sctx, spage);
2673 if (ret) {
2674 scrub_block_put(sblock);
2675 return ret;
2676 }
2677 }
2678
2679 /* last one frees, either here or in bio completion for last page */
2680 scrub_block_put(sblock);
2681 return 0;
2682}
2683
2684static int scrub_extent_for_parity(struct scrub_parity *sparity,
2685 u64 logical, u32 len,
2686 u64 physical, struct btrfs_device *dev,
2687 u64 flags, u64 gen, int mirror_num)
2688{
2689 struct scrub_ctx *sctx = sparity->sctx;
2690 int ret;
2691 u8 csum[BTRFS_CSUM_SIZE];
2692 u32 blocksize;
2693
2694 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
2695 scrub_parity_mark_sectors_error(sparity, logical, len);
2696 return 0;
2697 }
2698
2699 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2700 blocksize = sparity->stripe_len;
2701 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2702 blocksize = sparity->stripe_len;
2703 } else {
2704 blocksize = sctx->fs_info->sectorsize;
2705 WARN_ON(1);
2706 }
2707
2708 while (len) {
2709 u32 l = min(len, blocksize);
2710 int have_csum = 0;
2711
2712 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2713 /* push csums to sbio */
2714 have_csum = scrub_find_csum(sctx, logical, csum);
2715 if (have_csum == 0)
2716 goto skip;
2717 }
2718 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2719 flags, gen, mirror_num,
2720 have_csum ? csum : NULL);
2721 if (ret)
2722 return ret;
2723skip:
2724 len -= l;
2725 logical += l;
2726 physical += l;
2727 }
2728 return 0;
2729}
2730
2731/*
2732 * Given a physical address, this will calculate it's
2733 * logical offset. if this is a parity stripe, it will return
2734 * the most left data stripe's logical offset.
2735 *
2736 * return 0 if it is a data stripe, 1 means parity stripe.
2737 */
2738static int get_raid56_logic_offset(u64 physical, int num,
2739 struct map_lookup *map, u64 *offset,
2740 u64 *stripe_start)
2741{
2742 int i;
2743 int j = 0;
2744 u64 stripe_nr;
2745 u64 last_offset;
2746 u32 stripe_index;
2747 u32 rot;
2748 const int data_stripes = nr_data_stripes(map);
2749
2750 last_offset = (physical - map->stripes[num].physical) * data_stripes;
2751 if (stripe_start)
2752 *stripe_start = last_offset;
2753
2754 *offset = last_offset;
2755 for (i = 0; i < data_stripes; i++) {
2756 *offset = last_offset + i * map->stripe_len;
2757
2758 stripe_nr = div64_u64(*offset, map->stripe_len);
2759 stripe_nr = div_u64(stripe_nr, data_stripes);
2760
2761 /* Work out the disk rotation on this stripe-set */
2762 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
2763 /* calculate which stripe this data locates */
2764 rot += i;
2765 stripe_index = rot % map->num_stripes;
2766 if (stripe_index == num)
2767 return 0;
2768 if (stripe_index < num)
2769 j++;
2770 }
2771 *offset = last_offset + j * map->stripe_len;
2772 return 1;
2773}
2774
2775static void scrub_free_parity(struct scrub_parity *sparity)
2776{
2777 struct scrub_ctx *sctx = sparity->sctx;
2778 struct scrub_page *curr, *next;
2779 int nbits;
2780
2781 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2782 if (nbits) {
2783 spin_lock(&sctx->stat_lock);
2784 sctx->stat.read_errors += nbits;
2785 sctx->stat.uncorrectable_errors += nbits;
2786 spin_unlock(&sctx->stat_lock);
2787 }
2788
2789 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2790 list_del_init(&curr->list);
2791 scrub_page_put(curr);
2792 }
2793
2794 kfree(sparity);
2795}
2796
2797static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
2798{
2799 struct scrub_parity *sparity = container_of(work, struct scrub_parity,
2800 work);
2801 struct scrub_ctx *sctx = sparity->sctx;
2802
2803 scrub_free_parity(sparity);
2804 scrub_pending_bio_dec(sctx);
2805}
2806
2807static void scrub_parity_bio_endio(struct bio *bio)
2808{
2809 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2810 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2811
2812 if (bio->bi_status)
2813 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2814 sparity->nsectors);
2815
2816 bio_put(bio);
2817
2818 btrfs_init_work(&sparity->work, scrub_parity_bio_endio_worker, NULL,
2819 NULL);
2820 btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
2821}
2822
2823static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2824{
2825 struct scrub_ctx *sctx = sparity->sctx;
2826 struct btrfs_fs_info *fs_info = sctx->fs_info;
2827 struct bio *bio;
2828 struct btrfs_raid_bio *rbio;
2829 struct btrfs_bio *bbio = NULL;
2830 u64 length;
2831 int ret;
2832
2833 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2834 sparity->nsectors))
2835 goto out;
2836
2837 length = sparity->logic_end - sparity->logic_start;
2838
2839 btrfs_bio_counter_inc_blocked(fs_info);
2840 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
2841 &length, &bbio);
2842 if (ret || !bbio || !bbio->raid_map)
2843 goto bbio_out;
2844
2845 bio = btrfs_io_bio_alloc(0);
2846 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2847 bio->bi_private = sparity;
2848 bio->bi_end_io = scrub_parity_bio_endio;
2849
2850 rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
2851 length, sparity->scrub_dev,
2852 sparity->dbitmap,
2853 sparity->nsectors);
2854 if (!rbio)
2855 goto rbio_out;
2856
2857 scrub_pending_bio_inc(sctx);
2858 raid56_parity_submit_scrub_rbio(rbio);
2859 return;
2860
2861rbio_out:
2862 bio_put(bio);
2863bbio_out:
2864 btrfs_bio_counter_dec(fs_info);
2865 btrfs_put_bbio(bbio);
2866 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2867 sparity->nsectors);
2868 spin_lock(&sctx->stat_lock);
2869 sctx->stat.malloc_errors++;
2870 spin_unlock(&sctx->stat_lock);
2871out:
2872 scrub_free_parity(sparity);
2873}
2874
2875static inline int scrub_calc_parity_bitmap_len(int nsectors)
2876{
2877 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * sizeof(long);
2878}
2879
2880static void scrub_parity_get(struct scrub_parity *sparity)
2881{
2882 refcount_inc(&sparity->refs);
2883}
2884
2885static void scrub_parity_put(struct scrub_parity *sparity)
2886{
2887 if (!refcount_dec_and_test(&sparity->refs))
2888 return;
2889
2890 scrub_parity_check_and_repair(sparity);
2891}
2892
2893static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2894 struct map_lookup *map,
2895 struct btrfs_device *sdev,
2896 struct btrfs_path *path,
2897 u64 logic_start,
2898 u64 logic_end)
2899{
2900 struct btrfs_fs_info *fs_info = sctx->fs_info;
2901 struct btrfs_root *root = fs_info->extent_root;
2902 struct btrfs_root *csum_root = fs_info->csum_root;
2903 struct btrfs_extent_item *extent;
2904 struct btrfs_bio *bbio = NULL;
2905 u64 flags;
2906 int ret;
2907 int slot;
2908 struct extent_buffer *l;
2909 struct btrfs_key key;
2910 u64 generation;
2911 u64 extent_logical;
2912 u64 extent_physical;
2913 /* Check the comment in scrub_stripe() for why u32 is enough here */
2914 u32 extent_len;
2915 u64 mapped_length;
2916 struct btrfs_device *extent_dev;
2917 struct scrub_parity *sparity;
2918 int nsectors;
2919 int bitmap_len;
2920 int extent_mirror_num;
2921 int stop_loop = 0;
2922
2923 ASSERT(map->stripe_len <= U32_MAX);
2924 nsectors = map->stripe_len >> fs_info->sectorsize_bits;
2925 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2926 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2927 GFP_NOFS);
2928 if (!sparity) {
2929 spin_lock(&sctx->stat_lock);
2930 sctx->stat.malloc_errors++;
2931 spin_unlock(&sctx->stat_lock);
2932 return -ENOMEM;
2933 }
2934
2935 ASSERT(map->stripe_len <= U32_MAX);
2936 sparity->stripe_len = map->stripe_len;
2937 sparity->nsectors = nsectors;
2938 sparity->sctx = sctx;
2939 sparity->scrub_dev = sdev;
2940 sparity->logic_start = logic_start;
2941 sparity->logic_end = logic_end;
2942 refcount_set(&sparity->refs, 1);
2943 INIT_LIST_HEAD(&sparity->spages);
2944 sparity->dbitmap = sparity->bitmap;
2945 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2946
2947 ret = 0;
2948 while (logic_start < logic_end) {
2949 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2950 key.type = BTRFS_METADATA_ITEM_KEY;
2951 else
2952 key.type = BTRFS_EXTENT_ITEM_KEY;
2953 key.objectid = logic_start;
2954 key.offset = (u64)-1;
2955
2956 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2957 if (ret < 0)
2958 goto out;
2959
2960 if (ret > 0) {
2961 ret = btrfs_previous_extent_item(root, path, 0);
2962 if (ret < 0)
2963 goto out;
2964 if (ret > 0) {
2965 btrfs_release_path(path);
2966 ret = btrfs_search_slot(NULL, root, &key,
2967 path, 0, 0);
2968 if (ret < 0)
2969 goto out;
2970 }
2971 }
2972
2973 stop_loop = 0;
2974 while (1) {
2975 u64 bytes;
2976
2977 l = path->nodes[0];
2978 slot = path->slots[0];
2979 if (slot >= btrfs_header_nritems(l)) {
2980 ret = btrfs_next_leaf(root, path);
2981 if (ret == 0)
2982 continue;
2983 if (ret < 0)
2984 goto out;
2985
2986 stop_loop = 1;
2987 break;
2988 }
2989 btrfs_item_key_to_cpu(l, &key, slot);
2990
2991 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2992 key.type != BTRFS_METADATA_ITEM_KEY)
2993 goto next;
2994
2995 if (key.type == BTRFS_METADATA_ITEM_KEY)
2996 bytes = fs_info->nodesize;
2997 else
2998 bytes = key.offset;
2999
3000 if (key.objectid + bytes <= logic_start)
3001 goto next;
3002
3003 if (key.objectid >= logic_end) {
3004 stop_loop = 1;
3005 break;
3006 }
3007
3008 while (key.objectid >= logic_start + map->stripe_len)
3009 logic_start += map->stripe_len;
3010
3011 extent = btrfs_item_ptr(l, slot,
3012 struct btrfs_extent_item);
3013 flags = btrfs_extent_flags(l, extent);
3014 generation = btrfs_extent_generation(l, extent);
3015
3016 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3017 (key.objectid < logic_start ||
3018 key.objectid + bytes >
3019 logic_start + map->stripe_len)) {
3020 btrfs_err(fs_info,
3021 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3022 key.objectid, logic_start);
3023 spin_lock(&sctx->stat_lock);
3024 sctx->stat.uncorrectable_errors++;
3025 spin_unlock(&sctx->stat_lock);
3026 goto next;
3027 }
3028again:
3029 extent_logical = key.objectid;
3030 ASSERT(bytes <= U32_MAX);
3031 extent_len = bytes;
3032
3033 if (extent_logical < logic_start) {
3034 extent_len -= logic_start - extent_logical;
3035 extent_logical = logic_start;
3036 }
3037
3038 if (extent_logical + extent_len >
3039 logic_start + map->stripe_len)
3040 extent_len = logic_start + map->stripe_len -
3041 extent_logical;
3042
3043 scrub_parity_mark_sectors_data(sparity, extent_logical,
3044 extent_len);
3045
3046 mapped_length = extent_len;
3047 bbio = NULL;
3048 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
3049 extent_logical, &mapped_length, &bbio,
3050 0);
3051 if (!ret) {
3052 if (!bbio || mapped_length < extent_len)
3053 ret = -EIO;
3054 }
3055 if (ret) {
3056 btrfs_put_bbio(bbio);
3057 goto out;
3058 }
3059 extent_physical = bbio->stripes[0].physical;
3060 extent_mirror_num = bbio->mirror_num;
3061 extent_dev = bbio->stripes[0].dev;
3062 btrfs_put_bbio(bbio);
3063
3064 ret = btrfs_lookup_csums_range(csum_root,
3065 extent_logical,
3066 extent_logical + extent_len - 1,
3067 &sctx->csum_list, 1);
3068 if (ret)
3069 goto out;
3070
3071 ret = scrub_extent_for_parity(sparity, extent_logical,
3072 extent_len,
3073 extent_physical,
3074 extent_dev, flags,
3075 generation,
3076 extent_mirror_num);
3077
3078 scrub_free_csums(sctx);
3079
3080 if (ret)
3081 goto out;
3082
3083 if (extent_logical + extent_len <
3084 key.objectid + bytes) {
3085 logic_start += map->stripe_len;
3086
3087 if (logic_start >= logic_end) {
3088 stop_loop = 1;
3089 break;
3090 }
3091
3092 if (logic_start < key.objectid + bytes) {
3093 cond_resched();
3094 goto again;
3095 }
3096 }
3097next:
3098 path->slots[0]++;
3099 }
3100
3101 btrfs_release_path(path);
3102
3103 if (stop_loop)
3104 break;
3105
3106 logic_start += map->stripe_len;
3107 }
3108out:
3109 if (ret < 0) {
3110 ASSERT(logic_end - logic_start <= U32_MAX);
3111 scrub_parity_mark_sectors_error(sparity, logic_start,
3112 logic_end - logic_start);
3113 }
3114 scrub_parity_put(sparity);
3115 scrub_submit(sctx);
3116 mutex_lock(&sctx->wr_lock);
3117 scrub_wr_submit(sctx);
3118 mutex_unlock(&sctx->wr_lock);
3119
3120 btrfs_release_path(path);
3121 return ret < 0 ? ret : 0;
3122}
3123
3124static void sync_replace_for_zoned(struct scrub_ctx *sctx)
3125{
3126 if (!btrfs_is_zoned(sctx->fs_info))
3127 return;
3128
3129 sctx->flush_all_writes = true;
3130 scrub_submit(sctx);
3131 mutex_lock(&sctx->wr_lock);
3132 scrub_wr_submit(sctx);
3133 mutex_unlock(&sctx->wr_lock);
3134
3135 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3136}
3137
3138static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
3139 u64 physical, u64 physical_end)
3140{
3141 struct btrfs_fs_info *fs_info = sctx->fs_info;
3142 int ret = 0;
3143
3144 if (!btrfs_is_zoned(fs_info))
3145 return 0;
3146
3147 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3148
3149 mutex_lock(&sctx->wr_lock);
3150 if (sctx->write_pointer < physical_end) {
3151 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
3152 physical,
3153 sctx->write_pointer);
3154 if (ret)
3155 btrfs_err(fs_info,
3156 "zoned: failed to recover write pointer");
3157 }
3158 mutex_unlock(&sctx->wr_lock);
3159 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
3160
3161 return ret;
3162}
3163
3164static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
3165 struct map_lookup *map,
3166 struct btrfs_device *scrub_dev,
3167 int num, u64 base, u64 length,
3168 struct btrfs_block_group *cache)
3169{
3170 struct btrfs_path *path, *ppath;
3171 struct btrfs_fs_info *fs_info = sctx->fs_info;
3172 struct btrfs_root *root = fs_info->extent_root;
3173 struct btrfs_root *csum_root = fs_info->csum_root;
3174 struct btrfs_extent_item *extent;
3175 struct blk_plug plug;
3176 u64 flags;
3177 int ret;
3178 int slot;
3179 u64 nstripes;
3180 struct extent_buffer *l;
3181 u64 physical;
3182 u64 logical;
3183 u64 logic_end;
3184 u64 physical_end;
3185 u64 generation;
3186 int mirror_num;
3187 struct reada_control *reada1;
3188 struct reada_control *reada2;
3189 struct btrfs_key key;
3190 struct btrfs_key key_end;
3191 u64 increment = map->stripe_len;
3192 u64 offset;
3193 u64 extent_logical;
3194 u64 extent_physical;
3195 /*
3196 * Unlike chunk length, extent length should never go beyond
3197 * BTRFS_MAX_EXTENT_SIZE, thus u32 is enough here.
3198 */
3199 u32 extent_len;
3200 u64 stripe_logical;
3201 u64 stripe_end;
3202 struct btrfs_device *extent_dev;
3203 int extent_mirror_num;
3204 int stop_loop = 0;
3205
3206 physical = map->stripes[num].physical;
3207 offset = 0;
3208 nstripes = div64_u64(length, map->stripe_len);
3209 mirror_num = 1;
3210 increment = map->stripe_len;
3211 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3212 offset = map->stripe_len * num;
3213 increment = map->stripe_len * map->num_stripes;
3214 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3215 int factor = map->num_stripes / map->sub_stripes;
3216 offset = map->stripe_len * (num / map->sub_stripes);
3217 increment = map->stripe_len * factor;
3218 mirror_num = num % map->sub_stripes + 1;
3219 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
3220 mirror_num = num % map->num_stripes + 1;
3221 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3222 mirror_num = num % map->num_stripes + 1;
3223 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3224 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3225 increment = map->stripe_len * nr_data_stripes(map);
3226 }
3227
3228 path = btrfs_alloc_path();
3229 if (!path)
3230 return -ENOMEM;
3231
3232 ppath = btrfs_alloc_path();
3233 if (!ppath) {
3234 btrfs_free_path(path);
3235 return -ENOMEM;
3236 }
3237
3238 /*
3239 * work on commit root. The related disk blocks are static as
3240 * long as COW is applied. This means, it is save to rewrite
3241 * them to repair disk errors without any race conditions
3242 */
3243 path->search_commit_root = 1;
3244 path->skip_locking = 1;
3245
3246 ppath->search_commit_root = 1;
3247 ppath->skip_locking = 1;
3248 /*
3249 * trigger the readahead for extent tree csum tree and wait for
3250 * completion. During readahead, the scrub is officially paused
3251 * to not hold off transaction commits
3252 */
3253 logical = base + offset;
3254 physical_end = physical + nstripes * map->stripe_len;
3255 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3256 get_raid56_logic_offset(physical_end, num,
3257 map, &logic_end, NULL);
3258 logic_end += base;
3259 } else {
3260 logic_end = logical + increment * nstripes;
3261 }
3262 wait_event(sctx->list_wait,
3263 atomic_read(&sctx->bios_in_flight) == 0);
3264 scrub_blocked_if_needed(fs_info);
3265
3266 /* FIXME it might be better to start readahead at commit root */
3267 key.objectid = logical;
3268 key.type = BTRFS_EXTENT_ITEM_KEY;
3269 key.offset = (u64)0;
3270 key_end.objectid = logic_end;
3271 key_end.type = BTRFS_METADATA_ITEM_KEY;
3272 key_end.offset = (u64)-1;
3273 reada1 = btrfs_reada_add(root, &key, &key_end);
3274
3275 if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
3276 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3277 key.type = BTRFS_EXTENT_CSUM_KEY;
3278 key.offset = logical;
3279 key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3280 key_end.type = BTRFS_EXTENT_CSUM_KEY;
3281 key_end.offset = logic_end;
3282 reada2 = btrfs_reada_add(csum_root, &key, &key_end);
3283 } else {
3284 reada2 = NULL;
3285 }
3286
3287 if (!IS_ERR(reada1))
3288 btrfs_reada_wait(reada1);
3289 if (!IS_ERR_OR_NULL(reada2))
3290 btrfs_reada_wait(reada2);
3291
3292
3293 /*
3294 * collect all data csums for the stripe to avoid seeking during
3295 * the scrub. This might currently (crc32) end up to be about 1MB
3296 */
3297 blk_start_plug(&plug);
3298
3299 if (sctx->is_dev_replace &&
3300 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
3301 mutex_lock(&sctx->wr_lock);
3302 sctx->write_pointer = physical;
3303 mutex_unlock(&sctx->wr_lock);
3304 sctx->flush_all_writes = true;
3305 }
3306
3307 /*
3308 * now find all extents for each stripe and scrub them
3309 */
3310 ret = 0;
3311 while (physical < physical_end) {
3312 /*
3313 * canceled?
3314 */
3315 if (atomic_read(&fs_info->scrub_cancel_req) ||
3316 atomic_read(&sctx->cancel_req)) {
3317 ret = -ECANCELED;
3318 goto out;
3319 }
3320 /*
3321 * check to see if we have to pause
3322 */
3323 if (atomic_read(&fs_info->scrub_pause_req)) {
3324 /* push queued extents */
3325 sctx->flush_all_writes = true;
3326 scrub_submit(sctx);
3327 mutex_lock(&sctx->wr_lock);
3328 scrub_wr_submit(sctx);
3329 mutex_unlock(&sctx->wr_lock);
3330 wait_event(sctx->list_wait,
3331 atomic_read(&sctx->bios_in_flight) == 0);
3332 sctx->flush_all_writes = false;
3333 scrub_blocked_if_needed(fs_info);
3334 }
3335
3336 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3337 ret = get_raid56_logic_offset(physical, num, map,
3338 &logical,
3339 &stripe_logical);
3340 logical += base;
3341 if (ret) {
3342 /* it is parity strip */
3343 stripe_logical += base;
3344 stripe_end = stripe_logical + increment;
3345 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3346 ppath, stripe_logical,
3347 stripe_end);
3348 if (ret)
3349 goto out;
3350 goto skip;
3351 }
3352 }
3353
3354 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3355 key.type = BTRFS_METADATA_ITEM_KEY;
3356 else
3357 key.type = BTRFS_EXTENT_ITEM_KEY;
3358 key.objectid = logical;
3359 key.offset = (u64)-1;
3360
3361 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3362 if (ret < 0)
3363 goto out;
3364
3365 if (ret > 0) {
3366 ret = btrfs_previous_extent_item(root, path, 0);
3367 if (ret < 0)
3368 goto out;
3369 if (ret > 0) {
3370 /* there's no smaller item, so stick with the
3371 * larger one */
3372 btrfs_release_path(path);
3373 ret = btrfs_search_slot(NULL, root, &key,
3374 path, 0, 0);
3375 if (ret < 0)
3376 goto out;
3377 }
3378 }
3379
3380 stop_loop = 0;
3381 while (1) {
3382 u64 bytes;
3383
3384 l = path->nodes[0];
3385 slot = path->slots[0];
3386 if (slot >= btrfs_header_nritems(l)) {
3387 ret = btrfs_next_leaf(root, path);
3388 if (ret == 0)
3389 continue;
3390 if (ret < 0)
3391 goto out;
3392
3393 stop_loop = 1;
3394 break;
3395 }
3396 btrfs_item_key_to_cpu(l, &key, slot);
3397
3398 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3399 key.type != BTRFS_METADATA_ITEM_KEY)
3400 goto next;
3401
3402 if (key.type == BTRFS_METADATA_ITEM_KEY)
3403 bytes = fs_info->nodesize;
3404 else
3405 bytes = key.offset;
3406
3407 if (key.objectid + bytes <= logical)
3408 goto next;
3409
3410 if (key.objectid >= logical + map->stripe_len) {
3411 /* out of this device extent */
3412 if (key.objectid >= logic_end)
3413 stop_loop = 1;
3414 break;
3415 }
3416
3417 /*
3418 * If our block group was removed in the meanwhile, just
3419 * stop scrubbing since there is no point in continuing.
3420 * Continuing would prevent reusing its device extents
3421 * for new block groups for a long time.
3422 */
3423 spin_lock(&cache->lock);
3424 if (cache->removed) {
3425 spin_unlock(&cache->lock);
3426 ret = 0;
3427 goto out;
3428 }
3429 spin_unlock(&cache->lock);
3430
3431 extent = btrfs_item_ptr(l, slot,
3432 struct btrfs_extent_item);
3433 flags = btrfs_extent_flags(l, extent);
3434 generation = btrfs_extent_generation(l, extent);
3435
3436 if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
3437 (key.objectid < logical ||
3438 key.objectid + bytes >
3439 logical + map->stripe_len)) {
3440 btrfs_err(fs_info,
3441 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
3442 key.objectid, logical);
3443 spin_lock(&sctx->stat_lock);
3444 sctx->stat.uncorrectable_errors++;
3445 spin_unlock(&sctx->stat_lock);
3446 goto next;
3447 }
3448
3449again:
3450 extent_logical = key.objectid;
3451 ASSERT(bytes <= U32_MAX);
3452 extent_len = bytes;
3453
3454 /*
3455 * trim extent to this stripe
3456 */
3457 if (extent_logical < logical) {
3458 extent_len -= logical - extent_logical;
3459 extent_logical = logical;
3460 }
3461 if (extent_logical + extent_len >
3462 logical + map->stripe_len) {
3463 extent_len = logical + map->stripe_len -
3464 extent_logical;
3465 }
3466
3467 extent_physical = extent_logical - logical + physical;
3468 extent_dev = scrub_dev;
3469 extent_mirror_num = mirror_num;
3470 if (sctx->is_dev_replace)
3471 scrub_remap_extent(fs_info, extent_logical,
3472 extent_len, &extent_physical,
3473 &extent_dev,
3474 &extent_mirror_num);
3475
3476 if (flags & BTRFS_EXTENT_FLAG_DATA) {
3477 ret = btrfs_lookup_csums_range(csum_root,
3478 extent_logical,
3479 extent_logical + extent_len - 1,
3480 &sctx->csum_list, 1);
3481 if (ret)
3482 goto out;
3483 }
3484
3485 ret = scrub_extent(sctx, map, extent_logical, extent_len,
3486 extent_physical, extent_dev, flags,
3487 generation, extent_mirror_num,
3488 extent_logical - logical + physical);
3489
3490 scrub_free_csums(sctx);
3491
3492 if (ret)
3493 goto out;
3494
3495 if (sctx->is_dev_replace)
3496 sync_replace_for_zoned(sctx);
3497
3498 if (extent_logical + extent_len <
3499 key.objectid + bytes) {
3500 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3501 /*
3502 * loop until we find next data stripe
3503 * or we have finished all stripes.
3504 */
3505loop:
3506 physical += map->stripe_len;
3507 ret = get_raid56_logic_offset(physical,
3508 num, map, &logical,
3509 &stripe_logical);
3510 logical += base;
3511
3512 if (ret && physical < physical_end) {
3513 stripe_logical += base;
3514 stripe_end = stripe_logical +
3515 increment;
3516 ret = scrub_raid56_parity(sctx,
3517 map, scrub_dev, ppath,
3518 stripe_logical,
3519 stripe_end);
3520 if (ret)
3521 goto out;
3522 goto loop;
3523 }
3524 } else {
3525 physical += map->stripe_len;
3526 logical += increment;
3527 }
3528 if (logical < key.objectid + bytes) {
3529 cond_resched();
3530 goto again;
3531 }
3532
3533 if (physical >= physical_end) {
3534 stop_loop = 1;
3535 break;
3536 }
3537 }
3538next:
3539 path->slots[0]++;
3540 }
3541 btrfs_release_path(path);
3542skip:
3543 logical += increment;
3544 physical += map->stripe_len;
3545 spin_lock(&sctx->stat_lock);
3546 if (stop_loop)
3547 sctx->stat.last_physical = map->stripes[num].physical +
3548 length;
3549 else
3550 sctx->stat.last_physical = physical;
3551 spin_unlock(&sctx->stat_lock);
3552 if (stop_loop)
3553 break;
3554 }
3555out:
3556 /* push queued extents */
3557 scrub_submit(sctx);
3558 mutex_lock(&sctx->wr_lock);
3559 scrub_wr_submit(sctx);
3560 mutex_unlock(&sctx->wr_lock);
3561
3562 blk_finish_plug(&plug);
3563 btrfs_free_path(path);
3564 btrfs_free_path(ppath);
3565
3566 if (sctx->is_dev_replace && ret >= 0) {
3567 int ret2;
3568
3569 ret2 = sync_write_pointer_for_zoned(sctx, base + offset,
3570 map->stripes[num].physical,
3571 physical_end);
3572 if (ret2)
3573 ret = ret2;
3574 }
3575
3576 return ret < 0 ? ret : 0;
3577}
3578
3579static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3580 struct btrfs_device *scrub_dev,
3581 u64 chunk_offset, u64 length,
3582 u64 dev_offset,
3583 struct btrfs_block_group *cache)
3584{
3585 struct btrfs_fs_info *fs_info = sctx->fs_info;
3586 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
3587 struct map_lookup *map;
3588 struct extent_map *em;
3589 int i;
3590 int ret = 0;
3591
3592 read_lock(&map_tree->lock);
3593 em = lookup_extent_mapping(map_tree, chunk_offset, 1);
3594 read_unlock(&map_tree->lock);
3595
3596 if (!em) {
3597 /*
3598 * Might have been an unused block group deleted by the cleaner
3599 * kthread or relocation.
3600 */
3601 spin_lock(&cache->lock);
3602 if (!cache->removed)
3603 ret = -EINVAL;
3604 spin_unlock(&cache->lock);
3605
3606 return ret;
3607 }
3608
3609 map = em->map_lookup;
3610 if (em->start != chunk_offset)
3611 goto out;
3612
3613 if (em->len < length)
3614 goto out;
3615
3616 for (i = 0; i < map->num_stripes; ++i) {
3617 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3618 map->stripes[i].physical == dev_offset) {
3619 ret = scrub_stripe(sctx, map, scrub_dev, i,
3620 chunk_offset, length, cache);
3621 if (ret)
3622 goto out;
3623 }
3624 }
3625out:
3626 free_extent_map(em);
3627
3628 return ret;
3629}
3630
3631static int finish_extent_writes_for_zoned(struct btrfs_root *root,
3632 struct btrfs_block_group *cache)
3633{
3634 struct btrfs_fs_info *fs_info = cache->fs_info;
3635 struct btrfs_trans_handle *trans;
3636
3637 if (!btrfs_is_zoned(fs_info))
3638 return 0;
3639
3640 btrfs_wait_block_group_reservations(cache);
3641 btrfs_wait_nocow_writers(cache);
3642 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
3643
3644 trans = btrfs_join_transaction(root);
3645 if (IS_ERR(trans))
3646 return PTR_ERR(trans);
3647 return btrfs_commit_transaction(trans);
3648}
3649
3650static noinline_for_stack
3651int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3652 struct btrfs_device *scrub_dev, u64 start, u64 end)
3653{
3654 struct btrfs_dev_extent *dev_extent = NULL;
3655 struct btrfs_path *path;
3656 struct btrfs_fs_info *fs_info = sctx->fs_info;
3657 struct btrfs_root *root = fs_info->dev_root;
3658 u64 length;
3659 u64 chunk_offset;
3660 int ret = 0;
3661 int ro_set;
3662 int slot;
3663 struct extent_buffer *l;
3664 struct btrfs_key key;
3665 struct btrfs_key found_key;
3666 struct btrfs_block_group *cache;
3667 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3668
3669 path = btrfs_alloc_path();
3670 if (!path)
3671 return -ENOMEM;
3672
3673 path->reada = READA_FORWARD;
3674 path->search_commit_root = 1;
3675 path->skip_locking = 1;
3676
3677 key.objectid = scrub_dev->devid;
3678 key.offset = 0ull;
3679 key.type = BTRFS_DEV_EXTENT_KEY;
3680
3681 while (1) {
3682 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3683 if (ret < 0)
3684 break;
3685 if (ret > 0) {
3686 if (path->slots[0] >=
3687 btrfs_header_nritems(path->nodes[0])) {
3688 ret = btrfs_next_leaf(root, path);
3689 if (ret < 0)
3690 break;
3691 if (ret > 0) {
3692 ret = 0;
3693 break;
3694 }
3695 } else {
3696 ret = 0;
3697 }
3698 }
3699
3700 l = path->nodes[0];
3701 slot = path->slots[0];
3702
3703 btrfs_item_key_to_cpu(l, &found_key, slot);
3704
3705 if (found_key.objectid != scrub_dev->devid)
3706 break;
3707
3708 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3709 break;
3710
3711 if (found_key.offset >= end)
3712 break;
3713
3714 if (found_key.offset < key.offset)
3715 break;
3716
3717 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3718 length = btrfs_dev_extent_length(l, dev_extent);
3719
3720 if (found_key.offset + length <= start)
3721 goto skip;
3722
3723 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3724
3725 /*
3726 * get a reference on the corresponding block group to prevent
3727 * the chunk from going away while we scrub it
3728 */
3729 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3730
3731 /* some chunks are removed but not committed to disk yet,
3732 * continue scrubbing */
3733 if (!cache)
3734 goto skip;
3735
3736 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
3737 spin_lock(&cache->lock);
3738 if (!cache->to_copy) {
3739 spin_unlock(&cache->lock);
3740 btrfs_put_block_group(cache);
3741 goto skip;
3742 }
3743 spin_unlock(&cache->lock);
3744 }
3745
3746 /*
3747 * Make sure that while we are scrubbing the corresponding block
3748 * group doesn't get its logical address and its device extents
3749 * reused for another block group, which can possibly be of a
3750 * different type and different profile. We do this to prevent
3751 * false error detections and crashes due to bogus attempts to
3752 * repair extents.
3753 */
3754 spin_lock(&cache->lock);
3755 if (cache->removed) {
3756 spin_unlock(&cache->lock);
3757 btrfs_put_block_group(cache);
3758 goto skip;
3759 }
3760 btrfs_freeze_block_group(cache);
3761 spin_unlock(&cache->lock);
3762
3763 /*
3764 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
3765 * to avoid deadlock caused by:
3766 * btrfs_inc_block_group_ro()
3767 * -> btrfs_wait_for_commit()
3768 * -> btrfs_commit_transaction()
3769 * -> btrfs_scrub_pause()
3770 */
3771 scrub_pause_on(fs_info);
3772
3773 /*
3774 * Don't do chunk preallocation for scrub.
3775 *
3776 * This is especially important for SYSTEM bgs, or we can hit
3777 * -EFBIG from btrfs_finish_chunk_alloc() like:
3778 * 1. The only SYSTEM bg is marked RO.
3779 * Since SYSTEM bg is small, that's pretty common.
3780 * 2. New SYSTEM bg will be allocated
3781 * Due to regular version will allocate new chunk.
3782 * 3. New SYSTEM bg is empty and will get cleaned up
3783 * Before cleanup really happens, it's marked RO again.
3784 * 4. Empty SYSTEM bg get scrubbed
3785 * We go back to 2.
3786 *
3787 * This can easily boost the amount of SYSTEM chunks if cleaner
3788 * thread can't be triggered fast enough, and use up all space
3789 * of btrfs_super_block::sys_chunk_array
3790 *
3791 * While for dev replace, we need to try our best to mark block
3792 * group RO, to prevent race between:
3793 * - Write duplication
3794 * Contains latest data
3795 * - Scrub copy
3796 * Contains data from commit tree
3797 *
3798 * If target block group is not marked RO, nocow writes can
3799 * be overwritten by scrub copy, causing data corruption.
3800 * So for dev-replace, it's not allowed to continue if a block
3801 * group is not RO.
3802 */
3803 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3804 if (!ret && sctx->is_dev_replace) {
3805 ret = finish_extent_writes_for_zoned(root, cache);
3806 if (ret) {
3807 btrfs_dec_block_group_ro(cache);
3808 scrub_pause_off(fs_info);
3809 btrfs_put_block_group(cache);
3810 break;
3811 }
3812 }
3813
3814 if (ret == 0) {
3815 ro_set = 1;
3816 } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
3817 /*
3818 * btrfs_inc_block_group_ro return -ENOSPC when it
3819 * failed in creating new chunk for metadata.
3820 * It is not a problem for scrub, because
3821 * metadata are always cowed, and our scrub paused
3822 * commit_transactions.
3823 */
3824 ro_set = 0;
3825 } else if (ret == -ETXTBSY) {
3826 btrfs_warn(fs_info,
3827 "skipping scrub of block group %llu due to active swapfile",
3828 cache->start);
3829 scrub_pause_off(fs_info);
3830 ret = 0;
3831 goto skip_unfreeze;
3832 } else {
3833 btrfs_warn(fs_info,
3834 "failed setting block group ro: %d", ret);
3835 btrfs_unfreeze_block_group(cache);
3836 btrfs_put_block_group(cache);
3837 scrub_pause_off(fs_info);
3838 break;
3839 }
3840
3841 /*
3842 * Now the target block is marked RO, wait for nocow writes to
3843 * finish before dev-replace.
3844 * COW is fine, as COW never overwrites extents in commit tree.
3845 */
3846 if (sctx->is_dev_replace) {
3847 btrfs_wait_nocow_writers(cache);
3848 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
3849 cache->length);
3850 }
3851
3852 scrub_pause_off(fs_info);
3853 down_write(&dev_replace->rwsem);
3854 dev_replace->cursor_right = found_key.offset + length;
3855 dev_replace->cursor_left = found_key.offset;
3856 dev_replace->item_needs_writeback = 1;
3857 up_write(&dev_replace->rwsem);
3858
3859 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3860 found_key.offset, cache);
3861
3862 /*
3863 * flush, submit all pending read and write bios, afterwards
3864 * wait for them.
3865 * Note that in the dev replace case, a read request causes
3866 * write requests that are submitted in the read completion
3867 * worker. Therefore in the current situation, it is required
3868 * that all write requests are flushed, so that all read and
3869 * write requests are really completed when bios_in_flight
3870 * changes to 0.
3871 */
3872 sctx->flush_all_writes = true;
3873 scrub_submit(sctx);
3874 mutex_lock(&sctx->wr_lock);
3875 scrub_wr_submit(sctx);
3876 mutex_unlock(&sctx->wr_lock);
3877
3878 wait_event(sctx->list_wait,
3879 atomic_read(&sctx->bios_in_flight) == 0);
3880
3881 scrub_pause_on(fs_info);
3882
3883 /*
3884 * must be called before we decrease @scrub_paused.
3885 * make sure we don't block transaction commit while
3886 * we are waiting pending workers finished.
3887 */
3888 wait_event(sctx->list_wait,
3889 atomic_read(&sctx->workers_pending) == 0);
3890 sctx->flush_all_writes = false;
3891
3892 scrub_pause_off(fs_info);
3893
3894 if (sctx->is_dev_replace &&
3895 !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
3896 cache, found_key.offset))
3897 ro_set = 0;
3898
3899 down_write(&dev_replace->rwsem);
3900 dev_replace->cursor_left = dev_replace->cursor_right;
3901 dev_replace->item_needs_writeback = 1;
3902 up_write(&dev_replace->rwsem);
3903
3904 if (ro_set)
3905 btrfs_dec_block_group_ro(cache);
3906
3907 /*
3908 * We might have prevented the cleaner kthread from deleting
3909 * this block group if it was already unused because we raced
3910 * and set it to RO mode first. So add it back to the unused
3911 * list, otherwise it might not ever be deleted unless a manual
3912 * balance is triggered or it becomes used and unused again.
3913 */
3914 spin_lock(&cache->lock);
3915 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3916 cache->used == 0) {
3917 spin_unlock(&cache->lock);
3918 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
3919 btrfs_discard_queue_work(&fs_info->discard_ctl,
3920 cache);
3921 else
3922 btrfs_mark_bg_unused(cache);
3923 } else {
3924 spin_unlock(&cache->lock);
3925 }
3926skip_unfreeze:
3927 btrfs_unfreeze_block_group(cache);
3928 btrfs_put_block_group(cache);
3929 if (ret)
3930 break;
3931 if (sctx->is_dev_replace &&
3932 atomic64_read(&dev_replace->num_write_errors) > 0) {
3933 ret = -EIO;
3934 break;
3935 }
3936 if (sctx->stat.malloc_errors > 0) {
3937 ret = -ENOMEM;
3938 break;
3939 }
3940skip:
3941 key.offset = found_key.offset + length;
3942 btrfs_release_path(path);
3943 }
3944
3945 btrfs_free_path(path);
3946
3947 return ret;
3948}
3949
3950static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3951 struct btrfs_device *scrub_dev)
3952{
3953 int i;
3954 u64 bytenr;
3955 u64 gen;
3956 int ret;
3957 struct btrfs_fs_info *fs_info = sctx->fs_info;
3958
3959 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3960 return -EROFS;
3961
3962 /* Seed devices of a new filesystem has their own generation. */
3963 if (scrub_dev->fs_devices != fs_info->fs_devices)
3964 gen = scrub_dev->generation;
3965 else
3966 gen = fs_info->last_trans_committed;
3967
3968 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3969 bytenr = btrfs_sb_offset(i);
3970 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3971 scrub_dev->commit_total_bytes)
3972 break;
3973 if (!btrfs_check_super_location(scrub_dev, bytenr))
3974 continue;
3975
3976 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3977 scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3978 NULL, bytenr);
3979 if (ret)
3980 return ret;
3981 }
3982 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3983
3984 return 0;
3985}
3986
3987static void scrub_workers_put(struct btrfs_fs_info *fs_info)
3988{
3989 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
3990 &fs_info->scrub_lock)) {
3991 struct btrfs_workqueue *scrub_workers = NULL;
3992 struct btrfs_workqueue *scrub_wr_comp = NULL;
3993 struct btrfs_workqueue *scrub_parity = NULL;
3994
3995 scrub_workers = fs_info->scrub_workers;
3996 scrub_wr_comp = fs_info->scrub_wr_completion_workers;
3997 scrub_parity = fs_info->scrub_parity_workers;
3998
3999 fs_info->scrub_workers = NULL;
4000 fs_info->scrub_wr_completion_workers = NULL;
4001 fs_info->scrub_parity_workers = NULL;
4002 mutex_unlock(&fs_info->scrub_lock);
4003
4004 btrfs_destroy_workqueue(scrub_workers);
4005 btrfs_destroy_workqueue(scrub_wr_comp);
4006 btrfs_destroy_workqueue(scrub_parity);
4007 }
4008}
4009
4010/*
4011 * get a reference count on fs_info->scrub_workers. start worker if necessary
4012 */
4013static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
4014 int is_dev_replace)
4015{
4016 struct btrfs_workqueue *scrub_workers = NULL;
4017 struct btrfs_workqueue *scrub_wr_comp = NULL;
4018 struct btrfs_workqueue *scrub_parity = NULL;
4019 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
4020 int max_active = fs_info->thread_pool_size;
4021 int ret = -ENOMEM;
4022
4023 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
4024 return 0;
4025
4026 scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
4027 is_dev_replace ? 1 : max_active, 4);
4028 if (!scrub_workers)
4029 goto fail_scrub_workers;
4030
4031 scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
4032 max_active, 2);
4033 if (!scrub_wr_comp)
4034 goto fail_scrub_wr_completion_workers;
4035
4036 scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
4037 max_active, 2);
4038 if (!scrub_parity)
4039 goto fail_scrub_parity_workers;
4040
4041 mutex_lock(&fs_info->scrub_lock);
4042 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
4043 ASSERT(fs_info->scrub_workers == NULL &&
4044 fs_info->scrub_wr_completion_workers == NULL &&
4045 fs_info->scrub_parity_workers == NULL);
4046 fs_info->scrub_workers = scrub_workers;
4047 fs_info->scrub_wr_completion_workers = scrub_wr_comp;
4048 fs_info->scrub_parity_workers = scrub_parity;
4049 refcount_set(&fs_info->scrub_workers_refcnt, 1);
4050 mutex_unlock(&fs_info->scrub_lock);
4051 return 0;
4052 }
4053 /* Other thread raced in and created the workers for us */
4054 refcount_inc(&fs_info->scrub_workers_refcnt);
4055 mutex_unlock(&fs_info->scrub_lock);
4056
4057 ret = 0;
4058 btrfs_destroy_workqueue(scrub_parity);
4059fail_scrub_parity_workers:
4060 btrfs_destroy_workqueue(scrub_wr_comp);
4061fail_scrub_wr_completion_workers:
4062 btrfs_destroy_workqueue(scrub_workers);
4063fail_scrub_workers:
4064 return ret;
4065}
4066
4067int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
4068 u64 end, struct btrfs_scrub_progress *progress,
4069 int readonly, int is_dev_replace)
4070{
4071 struct scrub_ctx *sctx;
4072 int ret;
4073 struct btrfs_device *dev;
4074 unsigned int nofs_flag;
4075
4076 if (btrfs_fs_closing(fs_info))
4077 return -EAGAIN;
4078
4079 if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
4080 /*
4081 * in this case scrub is unable to calculate the checksum
4082 * the way scrub is implemented. Do not handle this
4083 * situation at all because it won't ever happen.
4084 */
4085 btrfs_err(fs_info,
4086 "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
4087 fs_info->nodesize,
4088 BTRFS_STRIPE_LEN);
4089 return -EINVAL;
4090 }
4091
4092 if (fs_info->nodesize >
4093 PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
4094 fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
4095 /*
4096 * would exhaust the array bounds of pagev member in
4097 * struct scrub_block
4098 */
4099 btrfs_err(fs_info,
4100 "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
4101 fs_info->nodesize,
4102 SCRUB_MAX_PAGES_PER_BLOCK,
4103 fs_info->sectorsize,
4104 SCRUB_MAX_PAGES_PER_BLOCK);
4105 return -EINVAL;
4106 }
4107
4108 /* Allocate outside of device_list_mutex */
4109 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
4110 if (IS_ERR(sctx))
4111 return PTR_ERR(sctx);
4112
4113 ret = scrub_workers_get(fs_info, is_dev_replace);
4114 if (ret)
4115 goto out_free_ctx;
4116
4117 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4118 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
4119 if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
4120 !is_dev_replace)) {
4121 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4122 ret = -ENODEV;
4123 goto out;
4124 }
4125
4126 if (!is_dev_replace && !readonly &&
4127 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
4128 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4129 btrfs_err_in_rcu(fs_info,
4130 "scrub on devid %llu: filesystem on %s is not writable",
4131 devid, rcu_str_deref(dev->name));
4132 ret = -EROFS;
4133 goto out;
4134 }
4135
4136 mutex_lock(&fs_info->scrub_lock);
4137 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4138 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
4139 mutex_unlock(&fs_info->scrub_lock);
4140 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4141 ret = -EIO;
4142 goto out;
4143 }
4144
4145 down_read(&fs_info->dev_replace.rwsem);
4146 if (dev->scrub_ctx ||
4147 (!is_dev_replace &&
4148 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
4149 up_read(&fs_info->dev_replace.rwsem);
4150 mutex_unlock(&fs_info->scrub_lock);
4151 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4152 ret = -EINPROGRESS;
4153 goto out;
4154 }
4155 up_read(&fs_info->dev_replace.rwsem);
4156
4157 sctx->readonly = readonly;
4158 dev->scrub_ctx = sctx;
4159 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4160
4161 /*
4162 * checking @scrub_pause_req here, we can avoid
4163 * race between committing transaction and scrubbing.
4164 */
4165 __scrub_blocked_if_needed(fs_info);
4166 atomic_inc(&fs_info->scrubs_running);
4167 mutex_unlock(&fs_info->scrub_lock);
4168
4169 /*
4170 * In order to avoid deadlock with reclaim when there is a transaction
4171 * trying to pause scrub, make sure we use GFP_NOFS for all the
4172 * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
4173 * invoked by our callees. The pausing request is done when the
4174 * transaction commit starts, and it blocks the transaction until scrub
4175 * is paused (done at specific points at scrub_stripe() or right above
4176 * before incrementing fs_info->scrubs_running).
4177 */
4178 nofs_flag = memalloc_nofs_save();
4179 if (!is_dev_replace) {
4180 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
4181 /*
4182 * by holding device list mutex, we can
4183 * kick off writing super in log tree sync.
4184 */
4185 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4186 ret = scrub_supers(sctx, dev);
4187 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4188 }
4189
4190 if (!ret)
4191 ret = scrub_enumerate_chunks(sctx, dev, start, end);
4192 memalloc_nofs_restore(nofs_flag);
4193
4194 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
4195 atomic_dec(&fs_info->scrubs_running);
4196 wake_up(&fs_info->scrub_pause_wait);
4197
4198 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
4199
4200 if (progress)
4201 memcpy(progress, &sctx->stat, sizeof(*progress));
4202
4203 if (!is_dev_replace)
4204 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
4205 ret ? "not finished" : "finished", devid, ret);
4206
4207 mutex_lock(&fs_info->scrub_lock);
4208 dev->scrub_ctx = NULL;
4209 mutex_unlock(&fs_info->scrub_lock);
4210
4211 scrub_workers_put(fs_info);
4212 scrub_put_ctx(sctx);
4213
4214 return ret;
4215out:
4216 scrub_workers_put(fs_info);
4217out_free_ctx:
4218 scrub_free_ctx(sctx);
4219
4220 return ret;
4221}
4222
4223void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
4224{
4225 mutex_lock(&fs_info->scrub_lock);
4226 atomic_inc(&fs_info->scrub_pause_req);
4227 while (atomic_read(&fs_info->scrubs_paused) !=
4228 atomic_read(&fs_info->scrubs_running)) {
4229 mutex_unlock(&fs_info->scrub_lock);
4230 wait_event(fs_info->scrub_pause_wait,
4231 atomic_read(&fs_info->scrubs_paused) ==
4232 atomic_read(&fs_info->scrubs_running));
4233 mutex_lock(&fs_info->scrub_lock);
4234 }
4235 mutex_unlock(&fs_info->scrub_lock);
4236}
4237
4238void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
4239{
4240 atomic_dec(&fs_info->scrub_pause_req);
4241 wake_up(&fs_info->scrub_pause_wait);
4242}
4243
4244int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
4245{
4246 mutex_lock(&fs_info->scrub_lock);
4247 if (!atomic_read(&fs_info->scrubs_running)) {
4248 mutex_unlock(&fs_info->scrub_lock);
4249 return -ENOTCONN;
4250 }
4251
4252 atomic_inc(&fs_info->scrub_cancel_req);
4253 while (atomic_read(&fs_info->scrubs_running)) {
4254 mutex_unlock(&fs_info->scrub_lock);
4255 wait_event(fs_info->scrub_pause_wait,
4256 atomic_read(&fs_info->scrubs_running) == 0);
4257 mutex_lock(&fs_info->scrub_lock);
4258 }
4259 atomic_dec(&fs_info->scrub_cancel_req);
4260 mutex_unlock(&fs_info->scrub_lock);
4261
4262 return 0;
4263}
4264
4265int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
4266{
4267 struct btrfs_fs_info *fs_info = dev->fs_info;
4268 struct scrub_ctx *sctx;
4269
4270 mutex_lock(&fs_info->scrub_lock);
4271 sctx = dev->scrub_ctx;
4272 if (!sctx) {
4273 mutex_unlock(&fs_info->scrub_lock);
4274 return -ENOTCONN;
4275 }
4276 atomic_inc(&sctx->cancel_req);
4277 while (dev->scrub_ctx) {
4278 mutex_unlock(&fs_info->scrub_lock);
4279 wait_event(fs_info->scrub_pause_wait,
4280 dev->scrub_ctx == NULL);
4281 mutex_lock(&fs_info->scrub_lock);
4282 }
4283 mutex_unlock(&fs_info->scrub_lock);
4284
4285 return 0;
4286}
4287
4288int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
4289 struct btrfs_scrub_progress *progress)
4290{
4291 struct btrfs_device *dev;
4292 struct scrub_ctx *sctx = NULL;
4293
4294 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4295 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
4296 if (dev)
4297 sctx = dev->scrub_ctx;
4298 if (sctx)
4299 memcpy(progress, &sctx->stat, sizeof(*progress));
4300 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
4301
4302 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
4303}
4304
4305static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
4306 u64 extent_logical, u32 extent_len,
4307 u64 *extent_physical,
4308 struct btrfs_device **extent_dev,
4309 int *extent_mirror_num)
4310{
4311 u64 mapped_length;
4312 struct btrfs_bio *bbio = NULL;
4313 int ret;
4314
4315 mapped_length = extent_len;
4316 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
4317 &mapped_length, &bbio, 0);
4318 if (ret || !bbio || mapped_length < extent_len ||
4319 !bbio->stripes[0].dev->bdev) {
4320 btrfs_put_bbio(bbio);
4321 return;
4322 }
4323
4324 *extent_physical = bbio->stripes[0].physical;
4325 *extent_mirror_num = bbio->mirror_num;
4326 *extent_dev = bbio->stripes[0].dev;
4327 btrfs_put_bbio(bbio);
4328}