Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/bio.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/raid/pq.h>
12#include <linux/hash.h>
13#include <linux/list_sort.h>
14#include <linux/raid/xor.h>
15#include <linux/mm.h>
16#include "messages.h"
17#include "misc.h"
18#include "ctree.h"
19#include "disk-io.h"
20#include "volumes.h"
21#include "raid56.h"
22#include "async-thread.h"
23#include "file-item.h"
24#include "btrfs_inode.h"
25
26/* set when additional merges to this rbio are not allowed */
27#define RBIO_RMW_LOCKED_BIT 1
28
29/*
30 * set when this rbio is sitting in the hash, but it is just a cache
31 * of past RMW
32 */
33#define RBIO_CACHE_BIT 2
34
35/*
36 * set when it is safe to trust the stripe_pages for caching
37 */
38#define RBIO_CACHE_READY_BIT 3
39
40#define RBIO_CACHE_SIZE 1024
41
42#define BTRFS_STRIPE_HASH_TABLE_BITS 11
43
44/* Used by the raid56 code to lock stripes for read/modify/write */
45struct btrfs_stripe_hash {
46 struct list_head hash_list;
47 spinlock_t lock;
48};
49
50/* Used by the raid56 code to lock stripes for read/modify/write */
51struct btrfs_stripe_hash_table {
52 struct list_head stripe_cache;
53 spinlock_t cache_lock;
54 int cache_size;
55 struct btrfs_stripe_hash table[];
56};
57
58/*
59 * A bvec like structure to present a sector inside a page.
60 *
61 * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
62 */
63struct sector_ptr {
64 struct page *page;
65 unsigned int pgoff:24;
66 unsigned int uptodate:8;
67};
68
69static void rmw_rbio_work(struct work_struct *work);
70static void rmw_rbio_work_locked(struct work_struct *work);
71static void index_rbio_pages(struct btrfs_raid_bio *rbio);
72static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
73
74static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check);
75static void scrub_rbio_work_locked(struct work_struct *work);
76
77static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
78{
79 bitmap_free(rbio->error_bitmap);
80 kfree(rbio->stripe_pages);
81 kfree(rbio->bio_sectors);
82 kfree(rbio->stripe_sectors);
83 kfree(rbio->finish_pointers);
84}
85
86static void free_raid_bio(struct btrfs_raid_bio *rbio)
87{
88 int i;
89
90 if (!refcount_dec_and_test(&rbio->refs))
91 return;
92
93 WARN_ON(!list_empty(&rbio->stripe_cache));
94 WARN_ON(!list_empty(&rbio->hash_list));
95 WARN_ON(!bio_list_empty(&rbio->bio_list));
96
97 for (i = 0; i < rbio->nr_pages; i++) {
98 if (rbio->stripe_pages[i]) {
99 __free_page(rbio->stripe_pages[i]);
100 rbio->stripe_pages[i] = NULL;
101 }
102 }
103
104 btrfs_put_bioc(rbio->bioc);
105 free_raid_bio_pointers(rbio);
106 kfree(rbio);
107}
108
109static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
110{
111 INIT_WORK(&rbio->work, work_func);
112 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
113}
114
115/*
116 * the stripe hash table is used for locking, and to collect
117 * bios in hopes of making a full stripe
118 */
119int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
120{
121 struct btrfs_stripe_hash_table *table;
122 struct btrfs_stripe_hash_table *x;
123 struct btrfs_stripe_hash *cur;
124 struct btrfs_stripe_hash *h;
125 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
126 int i;
127
128 if (info->stripe_hash_table)
129 return 0;
130
131 /*
132 * The table is large, starting with order 4 and can go as high as
133 * order 7 in case lock debugging is turned on.
134 *
135 * Try harder to allocate and fallback to vmalloc to lower the chance
136 * of a failing mount.
137 */
138 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
139 if (!table)
140 return -ENOMEM;
141
142 spin_lock_init(&table->cache_lock);
143 INIT_LIST_HEAD(&table->stripe_cache);
144
145 h = table->table;
146
147 for (i = 0; i < num_entries; i++) {
148 cur = h + i;
149 INIT_LIST_HEAD(&cur->hash_list);
150 spin_lock_init(&cur->lock);
151 }
152
153 x = cmpxchg(&info->stripe_hash_table, NULL, table);
154 kvfree(x);
155 return 0;
156}
157
158/*
159 * caching an rbio means to copy anything from the
160 * bio_sectors array into the stripe_pages array. We
161 * use the page uptodate bit in the stripe cache array
162 * to indicate if it has valid data
163 *
164 * once the caching is done, we set the cache ready
165 * bit.
166 */
167static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
168{
169 int i;
170 int ret;
171
172 ret = alloc_rbio_pages(rbio);
173 if (ret)
174 return;
175
176 for (i = 0; i < rbio->nr_sectors; i++) {
177 /* Some range not covered by bio (partial write), skip it */
178 if (!rbio->bio_sectors[i].page) {
179 /*
180 * Even if the sector is not covered by bio, if it is
181 * a data sector it should still be uptodate as it is
182 * read from disk.
183 */
184 if (i < rbio->nr_data * rbio->stripe_nsectors)
185 ASSERT(rbio->stripe_sectors[i].uptodate);
186 continue;
187 }
188
189 ASSERT(rbio->stripe_sectors[i].page);
190 memcpy_page(rbio->stripe_sectors[i].page,
191 rbio->stripe_sectors[i].pgoff,
192 rbio->bio_sectors[i].page,
193 rbio->bio_sectors[i].pgoff,
194 rbio->bioc->fs_info->sectorsize);
195 rbio->stripe_sectors[i].uptodate = 1;
196 }
197 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
198}
199
200/*
201 * we hash on the first logical address of the stripe
202 */
203static int rbio_bucket(struct btrfs_raid_bio *rbio)
204{
205 u64 num = rbio->bioc->raid_map[0];
206
207 /*
208 * we shift down quite a bit. We're using byte
209 * addressing, and most of the lower bits are zeros.
210 * This tends to upset hash_64, and it consistently
211 * returns just one or two different values.
212 *
213 * shifting off the lower bits fixes things.
214 */
215 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
216}
217
218static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
219 unsigned int page_nr)
220{
221 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
222 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
223 int i;
224
225 ASSERT(page_nr < rbio->nr_pages);
226
227 for (i = sectors_per_page * page_nr;
228 i < sectors_per_page * page_nr + sectors_per_page;
229 i++) {
230 if (!rbio->stripe_sectors[i].uptodate)
231 return false;
232 }
233 return true;
234}
235
236/*
237 * Update the stripe_sectors[] array to use correct page and pgoff
238 *
239 * Should be called every time any page pointer in stripes_pages[] got modified.
240 */
241static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
242{
243 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
244 u32 offset;
245 int i;
246
247 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
248 int page_index = offset >> PAGE_SHIFT;
249
250 ASSERT(page_index < rbio->nr_pages);
251 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
252 rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
253 }
254}
255
256static void steal_rbio_page(struct btrfs_raid_bio *src,
257 struct btrfs_raid_bio *dest, int page_nr)
258{
259 const u32 sectorsize = src->bioc->fs_info->sectorsize;
260 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
261 int i;
262
263 if (dest->stripe_pages[page_nr])
264 __free_page(dest->stripe_pages[page_nr]);
265 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
266 src->stripe_pages[page_nr] = NULL;
267
268 /* Also update the sector->uptodate bits. */
269 for (i = sectors_per_page * page_nr;
270 i < sectors_per_page * page_nr + sectors_per_page; i++)
271 dest->stripe_sectors[i].uptodate = true;
272}
273
274static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
275{
276 const int sector_nr = (page_nr << PAGE_SHIFT) >>
277 rbio->bioc->fs_info->sectorsize_bits;
278
279 /*
280 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
281 * we won't have a page which is half data half parity.
282 *
283 * Thus if the first sector of the page belongs to data stripes, then
284 * the full page belongs to data stripes.
285 */
286 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
287}
288
289/*
290 * Stealing an rbio means taking all the uptodate pages from the stripe array
291 * in the source rbio and putting them into the destination rbio.
292 *
293 * This will also update the involved stripe_sectors[] which are referring to
294 * the old pages.
295 */
296static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
297{
298 int i;
299
300 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
301 return;
302
303 for (i = 0; i < dest->nr_pages; i++) {
304 struct page *p = src->stripe_pages[i];
305
306 /*
307 * We don't need to steal P/Q pages as they will always be
308 * regenerated for RMW or full write anyway.
309 */
310 if (!is_data_stripe_page(src, i))
311 continue;
312
313 /*
314 * If @src already has RBIO_CACHE_READY_BIT, it should have
315 * all data stripe pages present and uptodate.
316 */
317 ASSERT(p);
318 ASSERT(full_page_sectors_uptodate(src, i));
319 steal_rbio_page(src, dest, i);
320 }
321 index_stripe_sectors(dest);
322 index_stripe_sectors(src);
323}
324
325/*
326 * merging means we take the bio_list from the victim and
327 * splice it into the destination. The victim should
328 * be discarded afterwards.
329 *
330 * must be called with dest->rbio_list_lock held
331 */
332static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
334{
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
337 /* Also inherit the bitmaps from @victim. */
338 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
339 dest->stripe_nsectors);
340 bio_list_init(&victim->bio_list);
341}
342
343/*
344 * used to prune items that are in the cache. The caller
345 * must hold the hash table lock.
346 */
347static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
348{
349 int bucket = rbio_bucket(rbio);
350 struct btrfs_stripe_hash_table *table;
351 struct btrfs_stripe_hash *h;
352 int freeit = 0;
353
354 /*
355 * check the bit again under the hash table lock.
356 */
357 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
358 return;
359
360 table = rbio->bioc->fs_info->stripe_hash_table;
361 h = table->table + bucket;
362
363 /* hold the lock for the bucket because we may be
364 * removing it from the hash table
365 */
366 spin_lock(&h->lock);
367
368 /*
369 * hold the lock for the bio list because we need
370 * to make sure the bio list is empty
371 */
372 spin_lock(&rbio->bio_list_lock);
373
374 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
375 list_del_init(&rbio->stripe_cache);
376 table->cache_size -= 1;
377 freeit = 1;
378
379 /* if the bio list isn't empty, this rbio is
380 * still involved in an IO. We take it out
381 * of the cache list, and drop the ref that
382 * was held for the list.
383 *
384 * If the bio_list was empty, we also remove
385 * the rbio from the hash_table, and drop
386 * the corresponding ref
387 */
388 if (bio_list_empty(&rbio->bio_list)) {
389 if (!list_empty(&rbio->hash_list)) {
390 list_del_init(&rbio->hash_list);
391 refcount_dec(&rbio->refs);
392 BUG_ON(!list_empty(&rbio->plug_list));
393 }
394 }
395 }
396
397 spin_unlock(&rbio->bio_list_lock);
398 spin_unlock(&h->lock);
399
400 if (freeit)
401 free_raid_bio(rbio);
402}
403
404/*
405 * prune a given rbio from the cache
406 */
407static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
408{
409 struct btrfs_stripe_hash_table *table;
410 unsigned long flags;
411
412 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
413 return;
414
415 table = rbio->bioc->fs_info->stripe_hash_table;
416
417 spin_lock_irqsave(&table->cache_lock, flags);
418 __remove_rbio_from_cache(rbio);
419 spin_unlock_irqrestore(&table->cache_lock, flags);
420}
421
422/*
423 * remove everything in the cache
424 */
425static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
426{
427 struct btrfs_stripe_hash_table *table;
428 unsigned long flags;
429 struct btrfs_raid_bio *rbio;
430
431 table = info->stripe_hash_table;
432
433 spin_lock_irqsave(&table->cache_lock, flags);
434 while (!list_empty(&table->stripe_cache)) {
435 rbio = list_entry(table->stripe_cache.next,
436 struct btrfs_raid_bio,
437 stripe_cache);
438 __remove_rbio_from_cache(rbio);
439 }
440 spin_unlock_irqrestore(&table->cache_lock, flags);
441}
442
443/*
444 * remove all cached entries and free the hash table
445 * used by unmount
446 */
447void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
448{
449 if (!info->stripe_hash_table)
450 return;
451 btrfs_clear_rbio_cache(info);
452 kvfree(info->stripe_hash_table);
453 info->stripe_hash_table = NULL;
454}
455
456/*
457 * insert an rbio into the stripe cache. It
458 * must have already been prepared by calling
459 * cache_rbio_pages
460 *
461 * If this rbio was already cached, it gets
462 * moved to the front of the lru.
463 *
464 * If the size of the rbio cache is too big, we
465 * prune an item.
466 */
467static void cache_rbio(struct btrfs_raid_bio *rbio)
468{
469 struct btrfs_stripe_hash_table *table;
470 unsigned long flags;
471
472 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
473 return;
474
475 table = rbio->bioc->fs_info->stripe_hash_table;
476
477 spin_lock_irqsave(&table->cache_lock, flags);
478 spin_lock(&rbio->bio_list_lock);
479
480 /* bump our ref if we were not in the list before */
481 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
482 refcount_inc(&rbio->refs);
483
484 if (!list_empty(&rbio->stripe_cache)){
485 list_move(&rbio->stripe_cache, &table->stripe_cache);
486 } else {
487 list_add(&rbio->stripe_cache, &table->stripe_cache);
488 table->cache_size += 1;
489 }
490
491 spin_unlock(&rbio->bio_list_lock);
492
493 if (table->cache_size > RBIO_CACHE_SIZE) {
494 struct btrfs_raid_bio *found;
495
496 found = list_entry(table->stripe_cache.prev,
497 struct btrfs_raid_bio,
498 stripe_cache);
499
500 if (found != rbio)
501 __remove_rbio_from_cache(found);
502 }
503
504 spin_unlock_irqrestore(&table->cache_lock, flags);
505}
506
507/*
508 * helper function to run the xor_blocks api. It is only
509 * able to do MAX_XOR_BLOCKS at a time, so we need to
510 * loop through.
511 */
512static void run_xor(void **pages, int src_cnt, ssize_t len)
513{
514 int src_off = 0;
515 int xor_src_cnt = 0;
516 void *dest = pages[src_cnt];
517
518 while(src_cnt > 0) {
519 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
520 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
521
522 src_cnt -= xor_src_cnt;
523 src_off += xor_src_cnt;
524 }
525}
526
527/*
528 * Returns true if the bio list inside this rbio covers an entire stripe (no
529 * rmw required).
530 */
531static int rbio_is_full(struct btrfs_raid_bio *rbio)
532{
533 unsigned long flags;
534 unsigned long size = rbio->bio_list_bytes;
535 int ret = 1;
536
537 spin_lock_irqsave(&rbio->bio_list_lock, flags);
538 if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
539 ret = 0;
540 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
541 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
542
543 return ret;
544}
545
546/*
547 * returns 1 if it is safe to merge two rbios together.
548 * The merging is safe if the two rbios correspond to
549 * the same stripe and if they are both going in the same
550 * direction (read vs write), and if neither one is
551 * locked for final IO
552 *
553 * The caller is responsible for locking such that
554 * rmw_locked is safe to test
555 */
556static int rbio_can_merge(struct btrfs_raid_bio *last,
557 struct btrfs_raid_bio *cur)
558{
559 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
560 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
561 return 0;
562
563 /*
564 * we can't merge with cached rbios, since the
565 * idea is that when we merge the destination
566 * rbio is going to run our IO for us. We can
567 * steal from cached rbios though, other functions
568 * handle that.
569 */
570 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
571 test_bit(RBIO_CACHE_BIT, &cur->flags))
572 return 0;
573
574 if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
575 return 0;
576
577 /* we can't merge with different operations */
578 if (last->operation != cur->operation)
579 return 0;
580 /*
581 * We've need read the full stripe from the drive.
582 * check and repair the parity and write the new results.
583 *
584 * We're not allowed to add any new bios to the
585 * bio list here, anyone else that wants to
586 * change this stripe needs to do their own rmw.
587 */
588 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
589 return 0;
590
591 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
592 last->operation == BTRFS_RBIO_READ_REBUILD)
593 return 0;
594
595 return 1;
596}
597
598static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
599 unsigned int stripe_nr,
600 unsigned int sector_nr)
601{
602 ASSERT(stripe_nr < rbio->real_stripes);
603 ASSERT(sector_nr < rbio->stripe_nsectors);
604
605 return stripe_nr * rbio->stripe_nsectors + sector_nr;
606}
607
608/* Return a sector from rbio->stripe_sectors, not from the bio list */
609static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
610 unsigned int stripe_nr,
611 unsigned int sector_nr)
612{
613 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
614 sector_nr)];
615}
616
617/* Grab a sector inside P stripe */
618static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
619 unsigned int sector_nr)
620{
621 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
622}
623
624/* Grab a sector inside Q stripe, return NULL if not RAID6 */
625static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
626 unsigned int sector_nr)
627{
628 if (rbio->nr_data + 1 == rbio->real_stripes)
629 return NULL;
630 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
631}
632
633/*
634 * The first stripe in the table for a logical address
635 * has the lock. rbios are added in one of three ways:
636 *
637 * 1) Nobody has the stripe locked yet. The rbio is given
638 * the lock and 0 is returned. The caller must start the IO
639 * themselves.
640 *
641 * 2) Someone has the stripe locked, but we're able to merge
642 * with the lock owner. The rbio is freed and the IO will
643 * start automatically along with the existing rbio. 1 is returned.
644 *
645 * 3) Someone has the stripe locked, but we're not able to merge.
646 * The rbio is added to the lock owner's plug list, or merged into
647 * an rbio already on the plug list. When the lock owner unlocks,
648 * the next rbio on the list is run and the IO is started automatically.
649 * 1 is returned
650 *
651 * If we return 0, the caller still owns the rbio and must continue with
652 * IO submission. If we return 1, the caller must assume the rbio has
653 * already been freed.
654 */
655static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
656{
657 struct btrfs_stripe_hash *h;
658 struct btrfs_raid_bio *cur;
659 struct btrfs_raid_bio *pending;
660 unsigned long flags;
661 struct btrfs_raid_bio *freeit = NULL;
662 struct btrfs_raid_bio *cache_drop = NULL;
663 int ret = 0;
664
665 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
666
667 spin_lock_irqsave(&h->lock, flags);
668 list_for_each_entry(cur, &h->hash_list, hash_list) {
669 if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
670 continue;
671
672 spin_lock(&cur->bio_list_lock);
673
674 /* Can we steal this cached rbio's pages? */
675 if (bio_list_empty(&cur->bio_list) &&
676 list_empty(&cur->plug_list) &&
677 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
678 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
679 list_del_init(&cur->hash_list);
680 refcount_dec(&cur->refs);
681
682 steal_rbio(cur, rbio);
683 cache_drop = cur;
684 spin_unlock(&cur->bio_list_lock);
685
686 goto lockit;
687 }
688
689 /* Can we merge into the lock owner? */
690 if (rbio_can_merge(cur, rbio)) {
691 merge_rbio(cur, rbio);
692 spin_unlock(&cur->bio_list_lock);
693 freeit = rbio;
694 ret = 1;
695 goto out;
696 }
697
698
699 /*
700 * We couldn't merge with the running rbio, see if we can merge
701 * with the pending ones. We don't have to check for rmw_locked
702 * because there is no way they are inside finish_rmw right now
703 */
704 list_for_each_entry(pending, &cur->plug_list, plug_list) {
705 if (rbio_can_merge(pending, rbio)) {
706 merge_rbio(pending, rbio);
707 spin_unlock(&cur->bio_list_lock);
708 freeit = rbio;
709 ret = 1;
710 goto out;
711 }
712 }
713
714 /*
715 * No merging, put us on the tail of the plug list, our rbio
716 * will be started with the currently running rbio unlocks
717 */
718 list_add_tail(&rbio->plug_list, &cur->plug_list);
719 spin_unlock(&cur->bio_list_lock);
720 ret = 1;
721 goto out;
722 }
723lockit:
724 refcount_inc(&rbio->refs);
725 list_add(&rbio->hash_list, &h->hash_list);
726out:
727 spin_unlock_irqrestore(&h->lock, flags);
728 if (cache_drop)
729 remove_rbio_from_cache(cache_drop);
730 if (freeit)
731 free_raid_bio(freeit);
732 return ret;
733}
734
735static void recover_rbio_work_locked(struct work_struct *work);
736
737/*
738 * called as rmw or parity rebuild is completed. If the plug list has more
739 * rbios waiting for this stripe, the next one on the list will be started
740 */
741static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
742{
743 int bucket;
744 struct btrfs_stripe_hash *h;
745 unsigned long flags;
746 int keep_cache = 0;
747
748 bucket = rbio_bucket(rbio);
749 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
750
751 if (list_empty(&rbio->plug_list))
752 cache_rbio(rbio);
753
754 spin_lock_irqsave(&h->lock, flags);
755 spin_lock(&rbio->bio_list_lock);
756
757 if (!list_empty(&rbio->hash_list)) {
758 /*
759 * if we're still cached and there is no other IO
760 * to perform, just leave this rbio here for others
761 * to steal from later
762 */
763 if (list_empty(&rbio->plug_list) &&
764 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
765 keep_cache = 1;
766 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
767 BUG_ON(!bio_list_empty(&rbio->bio_list));
768 goto done;
769 }
770
771 list_del_init(&rbio->hash_list);
772 refcount_dec(&rbio->refs);
773
774 /*
775 * we use the plug list to hold all the rbios
776 * waiting for the chance to lock this stripe.
777 * hand the lock over to one of them.
778 */
779 if (!list_empty(&rbio->plug_list)) {
780 struct btrfs_raid_bio *next;
781 struct list_head *head = rbio->plug_list.next;
782
783 next = list_entry(head, struct btrfs_raid_bio,
784 plug_list);
785
786 list_del_init(&rbio->plug_list);
787
788 list_add(&next->hash_list, &h->hash_list);
789 refcount_inc(&next->refs);
790 spin_unlock(&rbio->bio_list_lock);
791 spin_unlock_irqrestore(&h->lock, flags);
792
793 if (next->operation == BTRFS_RBIO_READ_REBUILD)
794 start_async_work(next, recover_rbio_work_locked);
795 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
796 steal_rbio(rbio, next);
797 start_async_work(next, recover_rbio_work_locked);
798 } else if (next->operation == BTRFS_RBIO_WRITE) {
799 steal_rbio(rbio, next);
800 start_async_work(next, rmw_rbio_work_locked);
801 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
802 steal_rbio(rbio, next);
803 start_async_work(next, scrub_rbio_work_locked);
804 }
805
806 goto done_nolock;
807 }
808 }
809done:
810 spin_unlock(&rbio->bio_list_lock);
811 spin_unlock_irqrestore(&h->lock, flags);
812
813done_nolock:
814 if (!keep_cache)
815 remove_rbio_from_cache(rbio);
816}
817
818static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
819{
820 struct bio *next;
821
822 while (cur) {
823 next = cur->bi_next;
824 cur->bi_next = NULL;
825 cur->bi_status = err;
826 bio_endio(cur);
827 cur = next;
828 }
829}
830
831/*
832 * this frees the rbio and runs through all the bios in the
833 * bio_list and calls end_io on them
834 */
835static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
836{
837 struct bio *cur = bio_list_get(&rbio->bio_list);
838 struct bio *extra;
839
840 kfree(rbio->csum_buf);
841 bitmap_free(rbio->csum_bitmap);
842 rbio->csum_buf = NULL;
843 rbio->csum_bitmap = NULL;
844
845 /*
846 * Clear the data bitmap, as the rbio may be cached for later usage.
847 * do this before before unlock_stripe() so there will be no new bio
848 * for this bio.
849 */
850 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
851
852 /*
853 * At this moment, rbio->bio_list is empty, however since rbio does not
854 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
855 * hash list, rbio may be merged with others so that rbio->bio_list
856 * becomes non-empty.
857 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
858 * more and we can call bio_endio() on all queued bios.
859 */
860 unlock_stripe(rbio);
861 extra = bio_list_get(&rbio->bio_list);
862 free_raid_bio(rbio);
863
864 rbio_endio_bio_list(cur, err);
865 if (extra)
866 rbio_endio_bio_list(extra, err);
867}
868
869/*
870 * Get a sector pointer specified by its @stripe_nr and @sector_nr.
871 *
872 * @rbio: The raid bio
873 * @stripe_nr: Stripe number, valid range [0, real_stripe)
874 * @sector_nr: Sector number inside the stripe,
875 * valid range [0, stripe_nsectors)
876 * @bio_list_only: Whether to use sectors inside the bio list only.
877 *
878 * The read/modify/write code wants to reuse the original bio page as much
879 * as possible, and only use stripe_sectors as fallback.
880 */
881static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
882 int stripe_nr, int sector_nr,
883 bool bio_list_only)
884{
885 struct sector_ptr *sector;
886 int index;
887
888 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
889 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
890
891 index = stripe_nr * rbio->stripe_nsectors + sector_nr;
892 ASSERT(index >= 0 && index < rbio->nr_sectors);
893
894 spin_lock_irq(&rbio->bio_list_lock);
895 sector = &rbio->bio_sectors[index];
896 if (sector->page || bio_list_only) {
897 /* Don't return sector without a valid page pointer */
898 if (!sector->page)
899 sector = NULL;
900 spin_unlock_irq(&rbio->bio_list_lock);
901 return sector;
902 }
903 spin_unlock_irq(&rbio->bio_list_lock);
904
905 return &rbio->stripe_sectors[index];
906}
907
908/*
909 * allocation and initial setup for the btrfs_raid_bio. Not
910 * this does not allocate any pages for rbio->pages.
911 */
912static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
913 struct btrfs_io_context *bioc)
914{
915 const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
916 const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
917 const unsigned int num_pages = stripe_npages * real_stripes;
918 const unsigned int stripe_nsectors =
919 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
920 const unsigned int num_sectors = stripe_nsectors * real_stripes;
921 struct btrfs_raid_bio *rbio;
922
923 /* PAGE_SIZE must also be aligned to sectorsize for subpage support */
924 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
925 /*
926 * Our current stripe len should be fixed to 64k thus stripe_nsectors
927 * (at most 16) should be no larger than BITS_PER_LONG.
928 */
929 ASSERT(stripe_nsectors <= BITS_PER_LONG);
930
931 rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
932 if (!rbio)
933 return ERR_PTR(-ENOMEM);
934 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
935 GFP_NOFS);
936 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
937 GFP_NOFS);
938 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
939 GFP_NOFS);
940 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
941 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
942
943 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
944 !rbio->finish_pointers || !rbio->error_bitmap) {
945 free_raid_bio_pointers(rbio);
946 kfree(rbio);
947 return ERR_PTR(-ENOMEM);
948 }
949
950 bio_list_init(&rbio->bio_list);
951 init_waitqueue_head(&rbio->io_wait);
952 INIT_LIST_HEAD(&rbio->plug_list);
953 spin_lock_init(&rbio->bio_list_lock);
954 INIT_LIST_HEAD(&rbio->stripe_cache);
955 INIT_LIST_HEAD(&rbio->hash_list);
956 btrfs_get_bioc(bioc);
957 rbio->bioc = bioc;
958 rbio->nr_pages = num_pages;
959 rbio->nr_sectors = num_sectors;
960 rbio->real_stripes = real_stripes;
961 rbio->stripe_npages = stripe_npages;
962 rbio->stripe_nsectors = stripe_nsectors;
963 refcount_set(&rbio->refs, 1);
964 atomic_set(&rbio->stripes_pending, 0);
965
966 ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
967 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
968
969 return rbio;
970}
971
972/* allocate pages for all the stripes in the bio, including parity */
973static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
974{
975 int ret;
976
977 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
978 if (ret < 0)
979 return ret;
980 /* Mapping all sectors */
981 index_stripe_sectors(rbio);
982 return 0;
983}
984
985/* only allocate pages for p/q stripes */
986static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
987{
988 const int data_pages = rbio->nr_data * rbio->stripe_npages;
989 int ret;
990
991 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
992 rbio->stripe_pages + data_pages);
993 if (ret < 0)
994 return ret;
995
996 index_stripe_sectors(rbio);
997 return 0;
998}
999
1000/*
1001 * Return the total numer of errors found in the vertical stripe of @sector_nr.
1002 *
1003 * @faila and @failb will also be updated to the first and second stripe
1004 * number of the errors.
1005 */
1006static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1007 int *faila, int *failb)
1008{
1009 int stripe_nr;
1010 int found_errors = 0;
1011
1012 if (faila || failb) {
1013 /*
1014 * Both @faila and @failb should be valid pointers if any of
1015 * them is specified.
1016 */
1017 ASSERT(faila && failb);
1018 *faila = -1;
1019 *failb = -1;
1020 }
1021
1022 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1023 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1024
1025 if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1026 found_errors++;
1027 if (faila) {
1028 /* Update faila and failb. */
1029 if (*faila < 0)
1030 *faila = stripe_nr;
1031 else if (*failb < 0)
1032 *failb = stripe_nr;
1033 }
1034 }
1035 }
1036 return found_errors;
1037}
1038
1039/*
1040 * Add a single sector @sector into our list of bios for IO.
1041 *
1042 * Return 0 if everything went well.
1043 * Return <0 for error.
1044 */
1045static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
1046 struct bio_list *bio_list,
1047 struct sector_ptr *sector,
1048 unsigned int stripe_nr,
1049 unsigned int sector_nr,
1050 enum req_op op)
1051{
1052 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1053 struct bio *last = bio_list->tail;
1054 int ret;
1055 struct bio *bio;
1056 struct btrfs_io_stripe *stripe;
1057 u64 disk_start;
1058
1059 /*
1060 * Note: here stripe_nr has taken device replace into consideration,
1061 * thus it can be larger than rbio->real_stripe.
1062 * So here we check against bioc->num_stripes, not rbio->real_stripes.
1063 */
1064 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
1065 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
1066 ASSERT(sector->page);
1067
1068 stripe = &rbio->bioc->stripes[stripe_nr];
1069 disk_start = stripe->physical + sector_nr * sectorsize;
1070
1071 /* if the device is missing, just fail this stripe */
1072 if (!stripe->dev->bdev) {
1073 int found_errors;
1074
1075 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
1076 rbio->error_bitmap);
1077
1078 /* Check if we have reached tolerance early. */
1079 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1080 NULL, NULL);
1081 if (found_errors > rbio->bioc->max_errors)
1082 return -EIO;
1083 return 0;
1084 }
1085
1086 /* see if we can add this page onto our existing bio */
1087 if (last) {
1088 u64 last_end = last->bi_iter.bi_sector << 9;
1089 last_end += last->bi_iter.bi_size;
1090
1091 /*
1092 * we can't merge these if they are from different
1093 * devices or if they are not contiguous
1094 */
1095 if (last_end == disk_start && !last->bi_status &&
1096 last->bi_bdev == stripe->dev->bdev) {
1097 ret = bio_add_page(last, sector->page, sectorsize,
1098 sector->pgoff);
1099 if (ret == sectorsize)
1100 return 0;
1101 }
1102 }
1103
1104 /* put a new bio on the list */
1105 bio = bio_alloc(stripe->dev->bdev,
1106 max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1107 op, GFP_NOFS);
1108 bio->bi_iter.bi_sector = disk_start >> 9;
1109 bio->bi_private = rbio;
1110
1111 bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
1112 bio_list_add(bio_list, bio);
1113 return 0;
1114}
1115
1116static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
1117{
1118 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1119 struct bio_vec bvec;
1120 struct bvec_iter iter;
1121 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1122 rbio->bioc->raid_map[0];
1123
1124 bio_for_each_segment(bvec, bio, iter) {
1125 u32 bvec_offset;
1126
1127 for (bvec_offset = 0; bvec_offset < bvec.bv_len;
1128 bvec_offset += sectorsize, offset += sectorsize) {
1129 int index = offset / sectorsize;
1130 struct sector_ptr *sector = &rbio->bio_sectors[index];
1131
1132 sector->page = bvec.bv_page;
1133 sector->pgoff = bvec.bv_offset + bvec_offset;
1134 ASSERT(sector->pgoff < PAGE_SIZE);
1135 }
1136 }
1137}
1138
1139/*
1140 * helper function to walk our bio list and populate the bio_pages array with
1141 * the result. This seems expensive, but it is faster than constantly
1142 * searching through the bio list as we setup the IO in finish_rmw or stripe
1143 * reconstruction.
1144 *
1145 * This must be called before you trust the answers from page_in_rbio
1146 */
1147static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1148{
1149 struct bio *bio;
1150
1151 spin_lock_irq(&rbio->bio_list_lock);
1152 bio_list_for_each(bio, &rbio->bio_list)
1153 index_one_bio(rbio, bio);
1154
1155 spin_unlock_irq(&rbio->bio_list_lock);
1156}
1157
1158static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1159 struct raid56_bio_trace_info *trace_info)
1160{
1161 const struct btrfs_io_context *bioc = rbio->bioc;
1162 int i;
1163
1164 ASSERT(bioc);
1165
1166 /* We rely on bio->bi_bdev to find the stripe number. */
1167 if (!bio->bi_bdev)
1168 goto not_found;
1169
1170 for (i = 0; i < bioc->num_stripes; i++) {
1171 if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1172 continue;
1173 trace_info->stripe_nr = i;
1174 trace_info->devid = bioc->stripes[i].dev->devid;
1175 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1176 bioc->stripes[i].physical;
1177 return;
1178 }
1179
1180not_found:
1181 trace_info->devid = -1;
1182 trace_info->offset = -1;
1183 trace_info->stripe_nr = -1;
1184}
1185
1186/* Generate PQ for one veritical stripe. */
1187static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
1188{
1189 void **pointers = rbio->finish_pointers;
1190 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1191 struct sector_ptr *sector;
1192 int stripe;
1193 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
1194
1195 /* First collect one sector from each data stripe */
1196 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1197 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
1198 pointers[stripe] = kmap_local_page(sector->page) +
1199 sector->pgoff;
1200 }
1201
1202 /* Then add the parity stripe */
1203 sector = rbio_pstripe_sector(rbio, sectornr);
1204 sector->uptodate = 1;
1205 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
1206
1207 if (has_qstripe) {
1208 /*
1209 * RAID6, add the qstripe and call the library function
1210 * to fill in our p/q
1211 */
1212 sector = rbio_qstripe_sector(rbio, sectornr);
1213 sector->uptodate = 1;
1214 pointers[stripe++] = kmap_local_page(sector->page) +
1215 sector->pgoff;
1216
1217 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
1218 pointers);
1219 } else {
1220 /* raid5 */
1221 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
1222 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
1223 }
1224 for (stripe = stripe - 1; stripe >= 0; stripe--)
1225 kunmap_local(pointers[stripe]);
1226}
1227
1228static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
1229 struct bio_list *bio_list)
1230{
1231 struct bio *bio;
1232 /* The total sector number inside the full stripe. */
1233 int total_sector_nr;
1234 int sectornr;
1235 int stripe;
1236 int ret;
1237
1238 ASSERT(bio_list_size(bio_list) == 0);
1239
1240 /* We should have at least one data sector. */
1241 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1242
1243 /*
1244 * Reset errors, as we may have errors inherited from from degraded
1245 * write.
1246 */
1247 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
1248
1249 /*
1250 * Start assembly. Make bios for everything from the higher layers (the
1251 * bio_list in our rbio) and our P/Q. Ignore everything else.
1252 */
1253 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1254 total_sector_nr++) {
1255 struct sector_ptr *sector;
1256
1257 stripe = total_sector_nr / rbio->stripe_nsectors;
1258 sectornr = total_sector_nr % rbio->stripe_nsectors;
1259
1260 /* This vertical stripe has no data, skip it. */
1261 if (!test_bit(sectornr, &rbio->dbitmap))
1262 continue;
1263
1264 if (stripe < rbio->nr_data) {
1265 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1266 if (!sector)
1267 continue;
1268 } else {
1269 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1270 }
1271
1272 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1273 sectornr, REQ_OP_WRITE);
1274 if (ret)
1275 goto error;
1276 }
1277
1278 if (likely(!rbio->bioc->num_tgtdevs))
1279 return 0;
1280
1281 /* Make a copy for the replace target device. */
1282 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1283 total_sector_nr++) {
1284 struct sector_ptr *sector;
1285
1286 stripe = total_sector_nr / rbio->stripe_nsectors;
1287 sectornr = total_sector_nr % rbio->stripe_nsectors;
1288
1289 if (!rbio->bioc->tgtdev_map[stripe]) {
1290 /*
1291 * We can skip the whole stripe completely, note
1292 * total_sector_nr will be increased by one anyway.
1293 */
1294 ASSERT(sectornr == 0);
1295 total_sector_nr += rbio->stripe_nsectors - 1;
1296 continue;
1297 }
1298
1299 /* This vertical stripe has no data, skip it. */
1300 if (!test_bit(sectornr, &rbio->dbitmap))
1301 continue;
1302
1303 if (stripe < rbio->nr_data) {
1304 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1305 if (!sector)
1306 continue;
1307 } else {
1308 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1309 }
1310
1311 ret = rbio_add_io_sector(rbio, bio_list, sector,
1312 rbio->bioc->tgtdev_map[stripe],
1313 sectornr, REQ_OP_WRITE);
1314 if (ret)
1315 goto error;
1316 }
1317
1318 return 0;
1319error:
1320 while ((bio = bio_list_pop(bio_list)))
1321 bio_put(bio);
1322 return -EIO;
1323}
1324
1325static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
1326{
1327 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1328 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1329 rbio->bioc->raid_map[0];
1330 int total_nr_sector = offset >> fs_info->sectorsize_bits;
1331
1332 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
1333
1334 bitmap_set(rbio->error_bitmap, total_nr_sector,
1335 bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
1336
1337 /*
1338 * Special handling for raid56_alloc_missing_rbio() used by
1339 * scrub/replace. Unlike call path in raid56_parity_recover(), they
1340 * pass an empty bio here. Thus we have to find out the missing device
1341 * and mark the stripe error instead.
1342 */
1343 if (bio->bi_iter.bi_size == 0) {
1344 bool found_missing = false;
1345 int stripe_nr;
1346
1347 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1348 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
1349 found_missing = true;
1350 bitmap_set(rbio->error_bitmap,
1351 stripe_nr * rbio->stripe_nsectors,
1352 rbio->stripe_nsectors);
1353 }
1354 }
1355 ASSERT(found_missing);
1356 }
1357}
1358
1359/*
1360 * For subpage case, we can no longer set page Uptodate directly for
1361 * stripe_pages[], thus we need to locate the sector.
1362 */
1363static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
1364 struct page *page,
1365 unsigned int pgoff)
1366{
1367 int i;
1368
1369 for (i = 0; i < rbio->nr_sectors; i++) {
1370 struct sector_ptr *sector = &rbio->stripe_sectors[i];
1371
1372 if (sector->page == page && sector->pgoff == pgoff)
1373 return sector;
1374 }
1375 return NULL;
1376}
1377
1378/*
1379 * this sets each page in the bio uptodate. It should only be used on private
1380 * rbio pages, nothing that comes in from the higher layers
1381 */
1382static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
1383{
1384 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1385 struct bio_vec *bvec;
1386 struct bvec_iter_all iter_all;
1387
1388 ASSERT(!bio_flagged(bio, BIO_CLONED));
1389
1390 bio_for_each_segment_all(bvec, bio, iter_all) {
1391 struct sector_ptr *sector;
1392 int pgoff;
1393
1394 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
1395 pgoff += sectorsize) {
1396 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
1397 ASSERT(sector);
1398 if (sector)
1399 sector->uptodate = 1;
1400 }
1401 }
1402}
1403
1404static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
1405{
1406 struct bio_vec *bv = bio_first_bvec_all(bio);
1407 int i;
1408
1409 for (i = 0; i < rbio->nr_sectors; i++) {
1410 struct sector_ptr *sector;
1411
1412 sector = &rbio->stripe_sectors[i];
1413 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1414 break;
1415 sector = &rbio->bio_sectors[i];
1416 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1417 break;
1418 }
1419 ASSERT(i < rbio->nr_sectors);
1420 return i;
1421}
1422
1423static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
1424{
1425 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1426 u32 bio_size = 0;
1427 struct bio_vec *bvec;
1428 struct bvec_iter_all iter_all;
1429 int i;
1430
1431 bio_for_each_segment_all(bvec, bio, iter_all)
1432 bio_size += bvec->bv_len;
1433
1434 /*
1435 * Since we can have multiple bios touching the error_bitmap, we cannot
1436 * call bitmap_set() without protection.
1437 *
1438 * Instead use set_bit() for each bit, as set_bit() itself is atomic.
1439 */
1440 for (i = total_sector_nr; i < total_sector_nr +
1441 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
1442 set_bit(i, rbio->error_bitmap);
1443}
1444
1445/* Verify the data sectors at read time. */
1446static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
1447 struct bio *bio)
1448{
1449 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1450 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1451 struct bio_vec *bvec;
1452 struct bvec_iter_all iter_all;
1453
1454 /* No data csum for the whole stripe, no need to verify. */
1455 if (!rbio->csum_bitmap || !rbio->csum_buf)
1456 return;
1457
1458 /* P/Q stripes, they have no data csum to verify against. */
1459 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
1460 return;
1461
1462 bio_for_each_segment_all(bvec, bio, iter_all) {
1463 int bv_offset;
1464
1465 for (bv_offset = bvec->bv_offset;
1466 bv_offset < bvec->bv_offset + bvec->bv_len;
1467 bv_offset += fs_info->sectorsize, total_sector_nr++) {
1468 u8 csum_buf[BTRFS_CSUM_SIZE];
1469 u8 *expected_csum = rbio->csum_buf +
1470 total_sector_nr * fs_info->csum_size;
1471 int ret;
1472
1473 /* No csum for this sector, skip to the next sector. */
1474 if (!test_bit(total_sector_nr, rbio->csum_bitmap))
1475 continue;
1476
1477 ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
1478 bv_offset, csum_buf, expected_csum);
1479 if (ret < 0)
1480 set_bit(total_sector_nr, rbio->error_bitmap);
1481 }
1482 }
1483}
1484
1485static void raid_wait_read_end_io(struct bio *bio)
1486{
1487 struct btrfs_raid_bio *rbio = bio->bi_private;
1488
1489 if (bio->bi_status) {
1490 rbio_update_error_bitmap(rbio, bio);
1491 } else {
1492 set_bio_pages_uptodate(rbio, bio);
1493 verify_bio_data_sectors(rbio, bio);
1494 }
1495
1496 bio_put(bio);
1497 if (atomic_dec_and_test(&rbio->stripes_pending))
1498 wake_up(&rbio->io_wait);
1499}
1500
1501static void submit_read_bios(struct btrfs_raid_bio *rbio,
1502 struct bio_list *bio_list)
1503{
1504 struct bio *bio;
1505
1506 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1507 while ((bio = bio_list_pop(bio_list))) {
1508 bio->bi_end_io = raid_wait_read_end_io;
1509
1510 if (trace_raid56_scrub_read_recover_enabled()) {
1511 struct raid56_bio_trace_info trace_info = { 0 };
1512
1513 bio_get_trace_info(rbio, bio, &trace_info);
1514 trace_raid56_scrub_read_recover(rbio, bio, &trace_info);
1515 }
1516 submit_bio(bio);
1517 }
1518}
1519
1520static int rmw_assemble_read_bios(struct btrfs_raid_bio *rbio,
1521 struct bio_list *bio_list)
1522{
1523 struct bio *bio;
1524 int total_sector_nr;
1525 int ret = 0;
1526
1527 ASSERT(bio_list_size(bio_list) == 0);
1528
1529 /*
1530 * Build a list of bios to read all sectors (including data and P/Q).
1531 *
1532 * This behaviro is to compensate the later csum verification and
1533 * recovery.
1534 */
1535 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1536 total_sector_nr++) {
1537 struct sector_ptr *sector;
1538 int stripe = total_sector_nr / rbio->stripe_nsectors;
1539 int sectornr = total_sector_nr % rbio->stripe_nsectors;
1540
1541 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1542 ret = rbio_add_io_sector(rbio, bio_list, sector,
1543 stripe, sectornr, REQ_OP_READ);
1544 if (ret)
1545 goto cleanup;
1546 }
1547 return 0;
1548
1549cleanup:
1550 while ((bio = bio_list_pop(bio_list)))
1551 bio_put(bio);
1552 return ret;
1553}
1554
1555static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
1556{
1557 const int data_pages = rbio->nr_data * rbio->stripe_npages;
1558 int ret;
1559
1560 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages);
1561 if (ret < 0)
1562 return ret;
1563
1564 index_stripe_sectors(rbio);
1565 return 0;
1566}
1567
1568/*
1569 * We use plugging call backs to collect full stripes.
1570 * Any time we get a partial stripe write while plugged
1571 * we collect it into a list. When the unplug comes down,
1572 * we sort the list by logical block number and merge
1573 * everything we can into the same rbios
1574 */
1575struct btrfs_plug_cb {
1576 struct blk_plug_cb cb;
1577 struct btrfs_fs_info *info;
1578 struct list_head rbio_list;
1579 struct work_struct work;
1580};
1581
1582/*
1583 * rbios on the plug list are sorted for easier merging.
1584 */
1585static int plug_cmp(void *priv, const struct list_head *a,
1586 const struct list_head *b)
1587{
1588 const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1589 plug_list);
1590 const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1591 plug_list);
1592 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1593 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1594
1595 if (a_sector < b_sector)
1596 return -1;
1597 if (a_sector > b_sector)
1598 return 1;
1599 return 0;
1600}
1601
1602static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1603{
1604 struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
1605 struct btrfs_raid_bio *cur;
1606 struct btrfs_raid_bio *last = NULL;
1607
1608 list_sort(NULL, &plug->rbio_list, plug_cmp);
1609
1610 while (!list_empty(&plug->rbio_list)) {
1611 cur = list_entry(plug->rbio_list.next,
1612 struct btrfs_raid_bio, plug_list);
1613 list_del_init(&cur->plug_list);
1614
1615 if (rbio_is_full(cur)) {
1616 /* We have a full stripe, queue it down. */
1617 start_async_work(cur, rmw_rbio_work);
1618 continue;
1619 }
1620 if (last) {
1621 if (rbio_can_merge(last, cur)) {
1622 merge_rbio(last, cur);
1623 free_raid_bio(cur);
1624 continue;
1625 }
1626 start_async_work(last, rmw_rbio_work);
1627 }
1628 last = cur;
1629 }
1630 if (last)
1631 start_async_work(last, rmw_rbio_work);
1632 kfree(plug);
1633}
1634
1635/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1636static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1637{
1638 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1639 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1640 const u64 full_stripe_start = rbio->bioc->raid_map[0];
1641 const u32 orig_len = orig_bio->bi_iter.bi_size;
1642 const u32 sectorsize = fs_info->sectorsize;
1643 u64 cur_logical;
1644
1645 ASSERT(orig_logical >= full_stripe_start &&
1646 orig_logical + orig_len <= full_stripe_start +
1647 rbio->nr_data * BTRFS_STRIPE_LEN);
1648
1649 bio_list_add(&rbio->bio_list, orig_bio);
1650 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1651
1652 /* Update the dbitmap. */
1653 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1654 cur_logical += sectorsize) {
1655 int bit = ((u32)(cur_logical - full_stripe_start) >>
1656 fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1657
1658 set_bit(bit, &rbio->dbitmap);
1659 }
1660}
1661
1662/*
1663 * our main entry point for writes from the rest of the FS.
1664 */
1665void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
1666{
1667 struct btrfs_fs_info *fs_info = bioc->fs_info;
1668 struct btrfs_raid_bio *rbio;
1669 struct btrfs_plug_cb *plug = NULL;
1670 struct blk_plug_cb *cb;
1671 int ret = 0;
1672
1673 rbio = alloc_rbio(fs_info, bioc);
1674 if (IS_ERR(rbio)) {
1675 ret = PTR_ERR(rbio);
1676 goto fail;
1677 }
1678 rbio->operation = BTRFS_RBIO_WRITE;
1679 rbio_add_bio(rbio, bio);
1680
1681 /*
1682 * Don't plug on full rbios, just get them out the door
1683 * as quickly as we can
1684 */
1685 if (rbio_is_full(rbio))
1686 goto queue_rbio;
1687
1688 cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
1689 if (cb) {
1690 plug = container_of(cb, struct btrfs_plug_cb, cb);
1691 if (!plug->info) {
1692 plug->info = fs_info;
1693 INIT_LIST_HEAD(&plug->rbio_list);
1694 }
1695 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1696 return;
1697 }
1698queue_rbio:
1699 /*
1700 * Either we don't have any existing plug, or we're doing a full stripe,
1701 * can queue the rmw work now.
1702 */
1703 start_async_work(rbio, rmw_rbio_work);
1704
1705 return;
1706
1707fail:
1708 bio->bi_status = errno_to_blk_status(ret);
1709 bio_endio(bio);
1710}
1711
1712static int verify_one_sector(struct btrfs_raid_bio *rbio,
1713 int stripe_nr, int sector_nr)
1714{
1715 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1716 struct sector_ptr *sector;
1717 u8 csum_buf[BTRFS_CSUM_SIZE];
1718 u8 *csum_expected;
1719 int ret;
1720
1721 if (!rbio->csum_bitmap || !rbio->csum_buf)
1722 return 0;
1723
1724 /* No way to verify P/Q as they are not covered by data csum. */
1725 if (stripe_nr >= rbio->nr_data)
1726 return 0;
1727 /*
1728 * If we're rebuilding a read, we have to use pages from the
1729 * bio list if possible.
1730 */
1731 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1732 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
1733 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1734 } else {
1735 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1736 }
1737
1738 ASSERT(sector->page);
1739
1740 csum_expected = rbio->csum_buf +
1741 (stripe_nr * rbio->stripe_nsectors + sector_nr) *
1742 fs_info->csum_size;
1743 ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff,
1744 csum_buf, csum_expected);
1745 return ret;
1746}
1747
1748/*
1749 * Recover a vertical stripe specified by @sector_nr.
1750 * @*pointers are the pre-allocated pointers by the caller, so we don't
1751 * need to allocate/free the pointers again and again.
1752 */
1753static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1754 void **pointers, void **unmap_array)
1755{
1756 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1757 struct sector_ptr *sector;
1758 const u32 sectorsize = fs_info->sectorsize;
1759 int found_errors;
1760 int faila;
1761 int failb;
1762 int stripe_nr;
1763 int ret = 0;
1764
1765 /*
1766 * Now we just use bitmap to mark the horizontal stripes in
1767 * which we have data when doing parity scrub.
1768 */
1769 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1770 !test_bit(sector_nr, &rbio->dbitmap))
1771 return 0;
1772
1773 found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1774 &failb);
1775 /*
1776 * No errors in the veritical stripe, skip it. Can happen for recovery
1777 * which only part of a stripe failed csum check.
1778 */
1779 if (!found_errors)
1780 return 0;
1781
1782 if (found_errors > rbio->bioc->max_errors)
1783 return -EIO;
1784
1785 /*
1786 * Setup our array of pointers with sectors from each stripe
1787 *
1788 * NOTE: store a duplicate array of pointers to preserve the
1789 * pointer order.
1790 */
1791 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1792 /*
1793 * If we're rebuilding a read, we have to use pages from the
1794 * bio list if possible.
1795 */
1796 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1797 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
1798 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1799 } else {
1800 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1801 }
1802 ASSERT(sector->page);
1803 pointers[stripe_nr] = kmap_local_page(sector->page) +
1804 sector->pgoff;
1805 unmap_array[stripe_nr] = pointers[stripe_nr];
1806 }
1807
1808 /* All raid6 handling here */
1809 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1810 /* Single failure, rebuild from parity raid5 style */
1811 if (failb < 0) {
1812 if (faila == rbio->nr_data)
1813 /*
1814 * Just the P stripe has failed, without
1815 * a bad data or Q stripe.
1816 * We have nothing to do, just skip the
1817 * recovery for this stripe.
1818 */
1819 goto cleanup;
1820 /*
1821 * a single failure in raid6 is rebuilt
1822 * in the pstripe code below
1823 */
1824 goto pstripe;
1825 }
1826
1827 /*
1828 * If the q stripe is failed, do a pstripe reconstruction from
1829 * the xors.
1830 * If both the q stripe and the P stripe are failed, we're
1831 * here due to a crc mismatch and we can't give them the
1832 * data they want.
1833 */
1834 if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
1835 if (rbio->bioc->raid_map[faila] ==
1836 RAID5_P_STRIPE)
1837 /*
1838 * Only P and Q are corrupted.
1839 * We only care about data stripes recovery,
1840 * can skip this vertical stripe.
1841 */
1842 goto cleanup;
1843 /*
1844 * Otherwise we have one bad data stripe and
1845 * a good P stripe. raid5!
1846 */
1847 goto pstripe;
1848 }
1849
1850 if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
1851 raid6_datap_recov(rbio->real_stripes, sectorsize,
1852 faila, pointers);
1853 } else {
1854 raid6_2data_recov(rbio->real_stripes, sectorsize,
1855 faila, failb, pointers);
1856 }
1857 } else {
1858 void *p;
1859
1860 /* Rebuild from P stripe here (raid5 or raid6). */
1861 ASSERT(failb == -1);
1862pstripe:
1863 /* Copy parity block into failed block to start with */
1864 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1865
1866 /* Rearrange the pointer array */
1867 p = pointers[faila];
1868 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
1869 stripe_nr++)
1870 pointers[stripe_nr] = pointers[stripe_nr + 1];
1871 pointers[rbio->nr_data - 1] = p;
1872
1873 /* Xor in the rest */
1874 run_xor(pointers, rbio->nr_data - 1, sectorsize);
1875
1876 }
1877
1878 /*
1879 * No matter if this is a RMW or recovery, we should have all
1880 * failed sectors repaired in the vertical stripe, thus they are now
1881 * uptodate.
1882 * Especially if we determine to cache the rbio, we need to
1883 * have at least all data sectors uptodate.
1884 *
1885 * If possible, also check if the repaired sector matches its data
1886 * checksum.
1887 */
1888 if (faila >= 0) {
1889 ret = verify_one_sector(rbio, faila, sector_nr);
1890 if (ret < 0)
1891 goto cleanup;
1892
1893 sector = rbio_stripe_sector(rbio, faila, sector_nr);
1894 sector->uptodate = 1;
1895 }
1896 if (failb >= 0) {
1897 ret = verify_one_sector(rbio, failb, sector_nr);
1898 if (ret < 0)
1899 goto cleanup;
1900
1901 sector = rbio_stripe_sector(rbio, failb, sector_nr);
1902 sector->uptodate = 1;
1903 }
1904
1905cleanup:
1906 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1907 kunmap_local(unmap_array[stripe_nr]);
1908 return ret;
1909}
1910
1911static int recover_sectors(struct btrfs_raid_bio *rbio)
1912{
1913 void **pointers = NULL;
1914 void **unmap_array = NULL;
1915 int sectornr;
1916 int ret = 0;
1917
1918 /*
1919 * @pointers array stores the pointer for each sector.
1920 *
1921 * @unmap_array stores copy of pointers that does not get reordered
1922 * during reconstruction so that kunmap_local works.
1923 */
1924 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1925 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1926 if (!pointers || !unmap_array) {
1927 ret = -ENOMEM;
1928 goto out;
1929 }
1930
1931 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1932 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1933 spin_lock_irq(&rbio->bio_list_lock);
1934 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1935 spin_unlock_irq(&rbio->bio_list_lock);
1936 }
1937
1938 index_rbio_pages(rbio);
1939
1940 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
1941 ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
1942 if (ret < 0)
1943 break;
1944 }
1945
1946out:
1947 kfree(pointers);
1948 kfree(unmap_array);
1949 return ret;
1950}
1951
1952static int recover_assemble_read_bios(struct btrfs_raid_bio *rbio,
1953 struct bio_list *bio_list)
1954{
1955 struct bio *bio;
1956 int total_sector_nr;
1957 int ret = 0;
1958
1959 ASSERT(bio_list_size(bio_list) == 0);
1960 /*
1961 * Read everything that hasn't failed. However this time we will
1962 * not trust any cached sector.
1963 * As we may read out some stale data but higher layer is not reading
1964 * that stale part.
1965 *
1966 * So here we always re-read everything in recovery path.
1967 */
1968 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1969 total_sector_nr++) {
1970 int stripe = total_sector_nr / rbio->stripe_nsectors;
1971 int sectornr = total_sector_nr % rbio->stripe_nsectors;
1972 struct sector_ptr *sector;
1973
1974 /*
1975 * Skip the range which has error. It can be a range which is
1976 * marked error (for csum mismatch), or it can be a missing
1977 * device.
1978 */
1979 if (!rbio->bioc->stripes[stripe].dev->bdev ||
1980 test_bit(total_sector_nr, rbio->error_bitmap)) {
1981 /*
1982 * Also set the error bit for missing device, which
1983 * may not yet have its error bit set.
1984 */
1985 set_bit(total_sector_nr, rbio->error_bitmap);
1986 continue;
1987 }
1988
1989 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1990 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1991 sectornr, REQ_OP_READ);
1992 if (ret < 0)
1993 goto error;
1994 }
1995 return 0;
1996error:
1997 while ((bio = bio_list_pop(bio_list)))
1998 bio_put(bio);
1999
2000 return -EIO;
2001}
2002
2003static int recover_rbio(struct btrfs_raid_bio *rbio)
2004{
2005 struct bio_list bio_list;
2006 struct bio *bio;
2007 int ret;
2008
2009 /*
2010 * Either we're doing recover for a read failure or degraded write,
2011 * caller should have set error bitmap correctly.
2012 */
2013 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
2014 bio_list_init(&bio_list);
2015
2016 /* For recovery, we need to read all sectors including P/Q. */
2017 ret = alloc_rbio_pages(rbio);
2018 if (ret < 0)
2019 goto out;
2020
2021 index_rbio_pages(rbio);
2022
2023 ret = recover_assemble_read_bios(rbio, &bio_list);
2024 if (ret < 0)
2025 goto out;
2026
2027 submit_read_bios(rbio, &bio_list);
2028 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2029
2030 ret = recover_sectors(rbio);
2031
2032out:
2033 while ((bio = bio_list_pop(&bio_list)))
2034 bio_put(bio);
2035
2036 return ret;
2037}
2038
2039static void recover_rbio_work(struct work_struct *work)
2040{
2041 struct btrfs_raid_bio *rbio;
2042 int ret;
2043
2044 rbio = container_of(work, struct btrfs_raid_bio, work);
2045
2046 ret = lock_stripe_add(rbio);
2047 if (ret == 0) {
2048 ret = recover_rbio(rbio);
2049 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2050 }
2051}
2052
2053static void recover_rbio_work_locked(struct work_struct *work)
2054{
2055 struct btrfs_raid_bio *rbio;
2056 int ret;
2057
2058 rbio = container_of(work, struct btrfs_raid_bio, work);
2059
2060 ret = recover_rbio(rbio);
2061 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2062}
2063
2064static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
2065{
2066 bool found = false;
2067 int sector_nr;
2068
2069 /*
2070 * This is for RAID6 extra recovery tries, thus mirror number should
2071 * be large than 2.
2072 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
2073 * RAID5 methods.
2074 */
2075 ASSERT(mirror_num > 2);
2076 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2077 int found_errors;
2078 int faila;
2079 int failb;
2080
2081 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2082 &faila, &failb);
2083 /* This vertical stripe doesn't have errors. */
2084 if (!found_errors)
2085 continue;
2086
2087 /*
2088 * If we found errors, there should be only one error marked
2089 * by previous set_rbio_range_error().
2090 */
2091 ASSERT(found_errors == 1);
2092 found = true;
2093
2094 /* Now select another stripe to mark as error. */
2095 failb = rbio->real_stripes - (mirror_num - 1);
2096 if (failb <= faila)
2097 failb--;
2098
2099 /* Set the extra bit in error bitmap. */
2100 if (failb >= 0)
2101 set_bit(failb * rbio->stripe_nsectors + sector_nr,
2102 rbio->error_bitmap);
2103 }
2104
2105 /* We should found at least one vertical stripe with error.*/
2106 ASSERT(found);
2107}
2108
2109/*
2110 * the main entry point for reads from the higher layers. This
2111 * is really only called when the normal read path had a failure,
2112 * so we assume the bio they send down corresponds to a failed part
2113 * of the drive.
2114 */
2115void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2116 int mirror_num)
2117{
2118 struct btrfs_fs_info *fs_info = bioc->fs_info;
2119 struct btrfs_raid_bio *rbio;
2120
2121 rbio = alloc_rbio(fs_info, bioc);
2122 if (IS_ERR(rbio)) {
2123 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2124 bio_endio(bio);
2125 return;
2126 }
2127
2128 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2129 rbio_add_bio(rbio, bio);
2130
2131 set_rbio_range_error(rbio, bio);
2132
2133 /*
2134 * Loop retry:
2135 * for 'mirror == 2', reconstruct from all other stripes.
2136 * for 'mirror_num > 2', select a stripe to fail on every retry.
2137 */
2138 if (mirror_num > 2)
2139 set_rbio_raid6_extra_error(rbio, mirror_num);
2140
2141 start_async_work(rbio, recover_rbio_work);
2142}
2143
2144static void fill_data_csums(struct btrfs_raid_bio *rbio)
2145{
2146 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
2147 struct btrfs_root *csum_root = btrfs_csum_root(fs_info,
2148 rbio->bioc->raid_map[0]);
2149 const u64 start = rbio->bioc->raid_map[0];
2150 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
2151 fs_info->sectorsize_bits;
2152 int ret;
2153
2154 /* The rbio should not have its csum buffer initialized. */
2155 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
2156
2157 /*
2158 * Skip the csum search if:
2159 *
2160 * - The rbio doesn't belong to data block groups
2161 * Then we are doing IO for tree blocks, no need to search csums.
2162 *
2163 * - The rbio belongs to mixed block groups
2164 * This is to avoid deadlock, as we're already holding the full
2165 * stripe lock, if we trigger a metadata read, and it needs to do
2166 * raid56 recovery, we will deadlock.
2167 */
2168 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
2169 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
2170 return;
2171
2172 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
2173 fs_info->csum_size, GFP_NOFS);
2174 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
2175 GFP_NOFS);
2176 if (!rbio->csum_buf || !rbio->csum_bitmap) {
2177 ret = -ENOMEM;
2178 goto error;
2179 }
2180
2181 ret = btrfs_lookup_csums_bitmap(csum_root, start, start + len - 1,
2182 rbio->csum_buf, rbio->csum_bitmap);
2183 if (ret < 0)
2184 goto error;
2185 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
2186 goto no_csum;
2187 return;
2188
2189error:
2190 /*
2191 * We failed to allocate memory or grab the csum, but it's not fatal,
2192 * we can still continue. But better to warn users that RMW is no
2193 * longer safe for this particular sub-stripe write.
2194 */
2195 btrfs_warn_rl(fs_info,
2196"sub-stripe write for full stripe %llu is not safe, failed to get csum: %d",
2197 rbio->bioc->raid_map[0], ret);
2198no_csum:
2199 kfree(rbio->csum_buf);
2200 bitmap_free(rbio->csum_bitmap);
2201 rbio->csum_buf = NULL;
2202 rbio->csum_bitmap = NULL;
2203}
2204
2205static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
2206{
2207 struct bio_list bio_list;
2208 struct bio *bio;
2209 int ret;
2210
2211 bio_list_init(&bio_list);
2212
2213 /*
2214 * Fill the data csums we need for data verification. We need to fill
2215 * the csum_bitmap/csum_buf first, as our endio function will try to
2216 * verify the data sectors.
2217 */
2218 fill_data_csums(rbio);
2219
2220 ret = rmw_assemble_read_bios(rbio, &bio_list);
2221 if (ret < 0)
2222 goto out;
2223
2224 submit_read_bios(rbio, &bio_list);
2225 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2226
2227 /*
2228 * We may or may not have any corrupted sectors (including missing dev
2229 * and csum mismatch), just let recover_sectors() to handle them all.
2230 */
2231 ret = recover_sectors(rbio);
2232 return ret;
2233out:
2234 while ((bio = bio_list_pop(&bio_list)))
2235 bio_put(bio);
2236
2237 return ret;
2238}
2239
2240static void raid_wait_write_end_io(struct bio *bio)
2241{
2242 struct btrfs_raid_bio *rbio = bio->bi_private;
2243 blk_status_t err = bio->bi_status;
2244
2245 if (err)
2246 rbio_update_error_bitmap(rbio, bio);
2247 bio_put(bio);
2248 if (atomic_dec_and_test(&rbio->stripes_pending))
2249 wake_up(&rbio->io_wait);
2250}
2251
2252static void submit_write_bios(struct btrfs_raid_bio *rbio,
2253 struct bio_list *bio_list)
2254{
2255 struct bio *bio;
2256
2257 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
2258 while ((bio = bio_list_pop(bio_list))) {
2259 bio->bi_end_io = raid_wait_write_end_io;
2260
2261 if (trace_raid56_write_stripe_enabled()) {
2262 struct raid56_bio_trace_info trace_info = { 0 };
2263
2264 bio_get_trace_info(rbio, bio, &trace_info);
2265 trace_raid56_write_stripe(rbio, bio, &trace_info);
2266 }
2267 submit_bio(bio);
2268 }
2269}
2270
2271/*
2272 * To determine if we need to read any sector from the disk.
2273 * Should only be utilized in RMW path, to skip cached rbio.
2274 */
2275static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
2276{
2277 int i;
2278
2279 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
2280 struct sector_ptr *sector = &rbio->stripe_sectors[i];
2281
2282 /*
2283 * We have a sector which doesn't have page nor uptodate,
2284 * thus this rbio can not be cached one, as cached one must
2285 * have all its data sectors present and uptodate.
2286 */
2287 if (!sector->page || !sector->uptodate)
2288 return true;
2289 }
2290 return false;
2291}
2292
2293static int rmw_rbio(struct btrfs_raid_bio *rbio)
2294{
2295 struct bio_list bio_list;
2296 int sectornr;
2297 int ret = 0;
2298
2299 /*
2300 * Allocate the pages for parity first, as P/Q pages will always be
2301 * needed for both full-stripe and sub-stripe writes.
2302 */
2303 ret = alloc_rbio_parity_pages(rbio);
2304 if (ret < 0)
2305 return ret;
2306
2307 /*
2308 * Either full stripe write, or we have every data sector already
2309 * cached, can go to write path immediately.
2310 */
2311 if (rbio_is_full(rbio) || !need_read_stripe_sectors(rbio))
2312 goto write;
2313
2314 /*
2315 * Now we're doing sub-stripe write, also need all data stripes to do
2316 * the full RMW.
2317 */
2318 ret = alloc_rbio_data_pages(rbio);
2319 if (ret < 0)
2320 return ret;
2321
2322 index_rbio_pages(rbio);
2323
2324 ret = rmw_read_wait_recover(rbio);
2325 if (ret < 0)
2326 return ret;
2327
2328write:
2329 /*
2330 * At this stage we're not allowed to add any new bios to the
2331 * bio list any more, anyone else that wants to change this stripe
2332 * needs to do their own rmw.
2333 */
2334 spin_lock_irq(&rbio->bio_list_lock);
2335 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
2336 spin_unlock_irq(&rbio->bio_list_lock);
2337
2338 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2339
2340 index_rbio_pages(rbio);
2341
2342 /*
2343 * We don't cache full rbios because we're assuming
2344 * the higher layers are unlikely to use this area of
2345 * the disk again soon. If they do use it again,
2346 * hopefully they will send another full bio.
2347 */
2348 if (!rbio_is_full(rbio))
2349 cache_rbio_pages(rbio);
2350 else
2351 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2352
2353 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
2354 generate_pq_vertical(rbio, sectornr);
2355
2356 bio_list_init(&bio_list);
2357 ret = rmw_assemble_write_bios(rbio, &bio_list);
2358 if (ret < 0)
2359 return ret;
2360
2361 /* We should have at least one bio assembled. */
2362 ASSERT(bio_list_size(&bio_list));
2363 submit_write_bios(rbio, &bio_list);
2364 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2365
2366 /* We may have more errors than our tolerance during the read. */
2367 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2368 int found_errors;
2369
2370 found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2371 if (found_errors > rbio->bioc->max_errors) {
2372 ret = -EIO;
2373 break;
2374 }
2375 }
2376 return ret;
2377}
2378
2379static void rmw_rbio_work(struct work_struct *work)
2380{
2381 struct btrfs_raid_bio *rbio;
2382 int ret;
2383
2384 rbio = container_of(work, struct btrfs_raid_bio, work);
2385
2386 ret = lock_stripe_add(rbio);
2387 if (ret == 0) {
2388 ret = rmw_rbio(rbio);
2389 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2390 }
2391}
2392
2393static void rmw_rbio_work_locked(struct work_struct *work)
2394{
2395 struct btrfs_raid_bio *rbio;
2396 int ret;
2397
2398 rbio = container_of(work, struct btrfs_raid_bio, work);
2399
2400 ret = rmw_rbio(rbio);
2401 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2402}
2403
2404/*
2405 * The following code is used to scrub/replace the parity stripe
2406 *
2407 * Caller must have already increased bio_counter for getting @bioc.
2408 *
2409 * Note: We need make sure all the pages that add into the scrub/replace
2410 * raid bio are correct and not be changed during the scrub/replace. That
2411 * is those pages just hold metadata or file data with checksum.
2412 */
2413
2414struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
2415 struct btrfs_io_context *bioc,
2416 struct btrfs_device *scrub_dev,
2417 unsigned long *dbitmap, int stripe_nsectors)
2418{
2419 struct btrfs_fs_info *fs_info = bioc->fs_info;
2420 struct btrfs_raid_bio *rbio;
2421 int i;
2422
2423 rbio = alloc_rbio(fs_info, bioc);
2424 if (IS_ERR(rbio))
2425 return NULL;
2426 bio_list_add(&rbio->bio_list, bio);
2427 /*
2428 * This is a special bio which is used to hold the completion handler
2429 * and make the scrub rbio is similar to the other types
2430 */
2431 ASSERT(!bio->bi_iter.bi_size);
2432 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2433
2434 /*
2435 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
2436 * to the end position, so this search can start from the first parity
2437 * stripe.
2438 */
2439 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2440 if (bioc->stripes[i].dev == scrub_dev) {
2441 rbio->scrubp = i;
2442 break;
2443 }
2444 }
2445 ASSERT(i < rbio->real_stripes);
2446
2447 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
2448 return rbio;
2449}
2450
2451/* Used for both parity scrub and missing. */
2452void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2453 unsigned int pgoff, u64 logical)
2454{
2455 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2456 int stripe_offset;
2457 int index;
2458
2459 ASSERT(logical >= rbio->bioc->raid_map[0]);
2460 ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
2461 BTRFS_STRIPE_LEN * rbio->nr_data);
2462 stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
2463 index = stripe_offset / sectorsize;
2464 rbio->bio_sectors[index].page = page;
2465 rbio->bio_sectors[index].pgoff = pgoff;
2466}
2467
2468/*
2469 * We just scrub the parity that we have correct data on the same horizontal,
2470 * so we needn't allocate all pages for all the stripes.
2471 */
2472static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2473{
2474 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2475 int total_sector_nr;
2476
2477 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2478 total_sector_nr++) {
2479 struct page *page;
2480 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2481 int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
2482
2483 if (!test_bit(sectornr, &rbio->dbitmap))
2484 continue;
2485 if (rbio->stripe_pages[index])
2486 continue;
2487 page = alloc_page(GFP_NOFS);
2488 if (!page)
2489 return -ENOMEM;
2490 rbio->stripe_pages[index] = page;
2491 }
2492 index_stripe_sectors(rbio);
2493 return 0;
2494}
2495
2496static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check)
2497{
2498 struct btrfs_io_context *bioc = rbio->bioc;
2499 const u32 sectorsize = bioc->fs_info->sectorsize;
2500 void **pointers = rbio->finish_pointers;
2501 unsigned long *pbitmap = &rbio->finish_pbitmap;
2502 int nr_data = rbio->nr_data;
2503 int stripe;
2504 int sectornr;
2505 bool has_qstripe;
2506 struct sector_ptr p_sector = { 0 };
2507 struct sector_ptr q_sector = { 0 };
2508 struct bio_list bio_list;
2509 struct bio *bio;
2510 int is_replace = 0;
2511 int ret;
2512
2513 bio_list_init(&bio_list);
2514
2515 if (rbio->real_stripes - rbio->nr_data == 1)
2516 has_qstripe = false;
2517 else if (rbio->real_stripes - rbio->nr_data == 2)
2518 has_qstripe = true;
2519 else
2520 BUG();
2521
2522 if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
2523 is_replace = 1;
2524 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
2525 }
2526
2527 /*
2528 * Because the higher layers(scrubber) are unlikely to
2529 * use this area of the disk again soon, so don't cache
2530 * it.
2531 */
2532 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2533
2534 if (!need_check)
2535 goto writeback;
2536
2537 p_sector.page = alloc_page(GFP_NOFS);
2538 if (!p_sector.page)
2539 return -ENOMEM;
2540 p_sector.pgoff = 0;
2541 p_sector.uptodate = 1;
2542
2543 if (has_qstripe) {
2544 /* RAID6, allocate and map temp space for the Q stripe */
2545 q_sector.page = alloc_page(GFP_NOFS);
2546 if (!q_sector.page) {
2547 __free_page(p_sector.page);
2548 p_sector.page = NULL;
2549 return -ENOMEM;
2550 }
2551 q_sector.pgoff = 0;
2552 q_sector.uptodate = 1;
2553 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
2554 }
2555
2556 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2557
2558 /* Map the parity stripe just once */
2559 pointers[nr_data] = kmap_local_page(p_sector.page);
2560
2561 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2562 struct sector_ptr *sector;
2563 void *parity;
2564
2565 /* first collect one page from each data stripe */
2566 for (stripe = 0; stripe < nr_data; stripe++) {
2567 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
2568 pointers[stripe] = kmap_local_page(sector->page) +
2569 sector->pgoff;
2570 }
2571
2572 if (has_qstripe) {
2573 /* RAID6, call the library function to fill in our P/Q */
2574 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
2575 pointers);
2576 } else {
2577 /* raid5 */
2578 memcpy(pointers[nr_data], pointers[0], sectorsize);
2579 run_xor(pointers + 1, nr_data - 1, sectorsize);
2580 }
2581
2582 /* Check scrubbing parity and repair it */
2583 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2584 parity = kmap_local_page(sector->page) + sector->pgoff;
2585 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2586 memcpy(parity, pointers[rbio->scrubp], sectorsize);
2587 else
2588 /* Parity is right, needn't writeback */
2589 bitmap_clear(&rbio->dbitmap, sectornr, 1);
2590 kunmap_local(parity);
2591
2592 for (stripe = nr_data - 1; stripe >= 0; stripe--)
2593 kunmap_local(pointers[stripe]);
2594 }
2595
2596 kunmap_local(pointers[nr_data]);
2597 __free_page(p_sector.page);
2598 p_sector.page = NULL;
2599 if (q_sector.page) {
2600 kunmap_local(pointers[rbio->real_stripes - 1]);
2601 __free_page(q_sector.page);
2602 q_sector.page = NULL;
2603 }
2604
2605writeback:
2606 /*
2607 * time to start writing. Make bios for everything from the
2608 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2609 * everything else.
2610 */
2611 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2612 struct sector_ptr *sector;
2613
2614 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2615 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2616 sectornr, REQ_OP_WRITE);
2617 if (ret)
2618 goto cleanup;
2619 }
2620
2621 if (!is_replace)
2622 goto submit_write;
2623
2624 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
2625 struct sector_ptr *sector;
2626
2627 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2628 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2629 bioc->tgtdev_map[rbio->scrubp],
2630 sectornr, REQ_OP_WRITE);
2631 if (ret)
2632 goto cleanup;
2633 }
2634
2635submit_write:
2636 submit_write_bios(rbio, &bio_list);
2637 return 0;
2638
2639cleanup:
2640 while ((bio = bio_list_pop(&bio_list)))
2641 bio_put(bio);
2642 return ret;
2643}
2644
2645static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2646{
2647 if (stripe >= 0 && stripe < rbio->nr_data)
2648 return 1;
2649 return 0;
2650}
2651
2652static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
2653{
2654 void **pointers = NULL;
2655 void **unmap_array = NULL;
2656 int sector_nr;
2657 int ret = 0;
2658
2659 /*
2660 * @pointers array stores the pointer for each sector.
2661 *
2662 * @unmap_array stores copy of pointers that does not get reordered
2663 * during reconstruction so that kunmap_local works.
2664 */
2665 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2666 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2667 if (!pointers || !unmap_array) {
2668 ret = -ENOMEM;
2669 goto out;
2670 }
2671
2672 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2673 int dfail = 0, failp = -1;
2674 int faila;
2675 int failb;
2676 int found_errors;
2677
2678 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2679 &faila, &failb);
2680 if (found_errors > rbio->bioc->max_errors) {
2681 ret = -EIO;
2682 goto out;
2683 }
2684 if (found_errors == 0)
2685 continue;
2686
2687 /* We should have at least one error here. */
2688 ASSERT(faila >= 0 || failb >= 0);
2689
2690 if (is_data_stripe(rbio, faila))
2691 dfail++;
2692 else if (is_parity_stripe(faila))
2693 failp = faila;
2694
2695 if (is_data_stripe(rbio, failb))
2696 dfail++;
2697 else if (is_parity_stripe(failb))
2698 failp = failb;
2699 /*
2700 * Because we can not use a scrubbing parity to repair the
2701 * data, so the capability of the repair is declined. (In the
2702 * case of RAID5, we can not repair anything.)
2703 */
2704 if (dfail > rbio->bioc->max_errors - 1) {
2705 ret = -EIO;
2706 goto out;
2707 }
2708 /*
2709 * If all data is good, only parity is correctly, just repair
2710 * the parity, no need to recover data stripes.
2711 */
2712 if (dfail == 0)
2713 continue;
2714
2715 /*
2716 * Here means we got one corrupted data stripe and one
2717 * corrupted parity on RAID6, if the corrupted parity is
2718 * scrubbing parity, luckily, use the other one to repair the
2719 * data, or we can not repair the data stripe.
2720 */
2721 if (failp != rbio->scrubp) {
2722 ret = -EIO;
2723 goto out;
2724 }
2725
2726 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2727 if (ret < 0)
2728 goto out;
2729 }
2730out:
2731 kfree(pointers);
2732 kfree(unmap_array);
2733 return ret;
2734}
2735
2736static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio,
2737 struct bio_list *bio_list)
2738{
2739 struct bio *bio;
2740 int total_sector_nr;
2741 int ret = 0;
2742
2743 ASSERT(bio_list_size(bio_list) == 0);
2744
2745 /* Build a list of bios to read all the missing parts. */
2746 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2747 total_sector_nr++) {
2748 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2749 int stripe = total_sector_nr / rbio->stripe_nsectors;
2750 struct sector_ptr *sector;
2751
2752 /* No data in the vertical stripe, no need to read. */
2753 if (!test_bit(sectornr, &rbio->dbitmap))
2754 continue;
2755
2756 /*
2757 * We want to find all the sectors missing from the rbio and
2758 * read them from the disk. If sector_in_rbio() finds a sector
2759 * in the bio list we don't need to read it off the stripe.
2760 */
2761 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
2762 if (sector)
2763 continue;
2764
2765 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2766 /*
2767 * The bio cache may have handed us an uptodate sector. If so,
2768 * use it.
2769 */
2770 if (sector->uptodate)
2771 continue;
2772
2773 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
2774 sectornr, REQ_OP_READ);
2775 if (ret)
2776 goto error;
2777 }
2778 return 0;
2779error:
2780 while ((bio = bio_list_pop(bio_list)))
2781 bio_put(bio);
2782 return ret;
2783}
2784
2785static int scrub_rbio(struct btrfs_raid_bio *rbio)
2786{
2787 bool need_check = false;
2788 struct bio_list bio_list;
2789 int sector_nr;
2790 int ret;
2791 struct bio *bio;
2792
2793 bio_list_init(&bio_list);
2794
2795 ret = alloc_rbio_essential_pages(rbio);
2796 if (ret)
2797 goto cleanup;
2798
2799 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2800
2801 ret = scrub_assemble_read_bios(rbio, &bio_list);
2802 if (ret < 0)
2803 goto cleanup;
2804
2805 submit_read_bios(rbio, &bio_list);
2806 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2807
2808 /* We may have some failures, recover the failed sectors first. */
2809 ret = recover_scrub_rbio(rbio);
2810 if (ret < 0)
2811 goto cleanup;
2812
2813 /*
2814 * We have every sector properly prepared. Can finish the scrub
2815 * and writeback the good content.
2816 */
2817 ret = finish_parity_scrub(rbio, need_check);
2818 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2819 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2820 int found_errors;
2821
2822 found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2823 if (found_errors > rbio->bioc->max_errors) {
2824 ret = -EIO;
2825 break;
2826 }
2827 }
2828 return ret;
2829
2830cleanup:
2831 while ((bio = bio_list_pop(&bio_list)))
2832 bio_put(bio);
2833
2834 return ret;
2835}
2836
2837static void scrub_rbio_work_locked(struct work_struct *work)
2838{
2839 struct btrfs_raid_bio *rbio;
2840 int ret;
2841
2842 rbio = container_of(work, struct btrfs_raid_bio, work);
2843 ret = scrub_rbio(rbio);
2844 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2845}
2846
2847void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2848{
2849 if (!lock_stripe_add(rbio))
2850 start_async_work(rbio, scrub_rbio_work_locked);
2851}
2852
2853/* The following code is used for dev replace of a missing RAID 5/6 device. */
2854
2855struct btrfs_raid_bio *
2856raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc)
2857{
2858 struct btrfs_fs_info *fs_info = bioc->fs_info;
2859 struct btrfs_raid_bio *rbio;
2860
2861 rbio = alloc_rbio(fs_info, bioc);
2862 if (IS_ERR(rbio))
2863 return NULL;
2864
2865 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2866 bio_list_add(&rbio->bio_list, bio);
2867 /*
2868 * This is a special bio which is used to hold the completion handler
2869 * and make the scrub rbio is similar to the other types
2870 */
2871 ASSERT(!bio->bi_iter.bi_size);
2872
2873 set_rbio_range_error(rbio, bio);
2874
2875 return rbio;
2876}
2877
2878void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2879{
2880 start_async_work(rbio, recover_rbio_work);
2881}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/bio.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/raid/pq.h>
12#include <linux/hash.h>
13#include <linux/list_sort.h>
14#include <linux/raid/xor.h>
15#include <linux/mm.h>
16#include "messages.h"
17#include "ctree.h"
18#include "disk-io.h"
19#include "volumes.h"
20#include "raid56.h"
21#include "async-thread.h"
22#include "file-item.h"
23#include "btrfs_inode.h"
24
25/* set when additional merges to this rbio are not allowed */
26#define RBIO_RMW_LOCKED_BIT 1
27
28/*
29 * set when this rbio is sitting in the hash, but it is just a cache
30 * of past RMW
31 */
32#define RBIO_CACHE_BIT 2
33
34/*
35 * set when it is safe to trust the stripe_pages for caching
36 */
37#define RBIO_CACHE_READY_BIT 3
38
39#define RBIO_CACHE_SIZE 1024
40
41#define BTRFS_STRIPE_HASH_TABLE_BITS 11
42
43/* Used by the raid56 code to lock stripes for read/modify/write */
44struct btrfs_stripe_hash {
45 struct list_head hash_list;
46 spinlock_t lock;
47};
48
49/* Used by the raid56 code to lock stripes for read/modify/write */
50struct btrfs_stripe_hash_table {
51 struct list_head stripe_cache;
52 spinlock_t cache_lock;
53 int cache_size;
54 struct btrfs_stripe_hash table[];
55};
56
57/*
58 * A bvec like structure to present a sector inside a page.
59 *
60 * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
61 */
62struct sector_ptr {
63 struct page *page;
64 unsigned int pgoff:24;
65 unsigned int uptodate:8;
66};
67
68static void rmw_rbio_work(struct work_struct *work);
69static void rmw_rbio_work_locked(struct work_struct *work);
70static void index_rbio_pages(struct btrfs_raid_bio *rbio);
71static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
72
73static int finish_parity_scrub(struct btrfs_raid_bio *rbio);
74static void scrub_rbio_work_locked(struct work_struct *work);
75
76static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
77{
78 bitmap_free(rbio->error_bitmap);
79 kfree(rbio->stripe_pages);
80 kfree(rbio->bio_sectors);
81 kfree(rbio->stripe_sectors);
82 kfree(rbio->finish_pointers);
83}
84
85static void free_raid_bio(struct btrfs_raid_bio *rbio)
86{
87 int i;
88
89 if (!refcount_dec_and_test(&rbio->refs))
90 return;
91
92 WARN_ON(!list_empty(&rbio->stripe_cache));
93 WARN_ON(!list_empty(&rbio->hash_list));
94 WARN_ON(!bio_list_empty(&rbio->bio_list));
95
96 for (i = 0; i < rbio->nr_pages; i++) {
97 if (rbio->stripe_pages[i]) {
98 __free_page(rbio->stripe_pages[i]);
99 rbio->stripe_pages[i] = NULL;
100 }
101 }
102
103 btrfs_put_bioc(rbio->bioc);
104 free_raid_bio_pointers(rbio);
105 kfree(rbio);
106}
107
108static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
109{
110 INIT_WORK(&rbio->work, work_func);
111 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
112}
113
114/*
115 * the stripe hash table is used for locking, and to collect
116 * bios in hopes of making a full stripe
117 */
118int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
119{
120 struct btrfs_stripe_hash_table *table;
121 struct btrfs_stripe_hash_table *x;
122 struct btrfs_stripe_hash *cur;
123 struct btrfs_stripe_hash *h;
124 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
125 int i;
126
127 if (info->stripe_hash_table)
128 return 0;
129
130 /*
131 * The table is large, starting with order 4 and can go as high as
132 * order 7 in case lock debugging is turned on.
133 *
134 * Try harder to allocate and fallback to vmalloc to lower the chance
135 * of a failing mount.
136 */
137 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
138 if (!table)
139 return -ENOMEM;
140
141 spin_lock_init(&table->cache_lock);
142 INIT_LIST_HEAD(&table->stripe_cache);
143
144 h = table->table;
145
146 for (i = 0; i < num_entries; i++) {
147 cur = h + i;
148 INIT_LIST_HEAD(&cur->hash_list);
149 spin_lock_init(&cur->lock);
150 }
151
152 x = cmpxchg(&info->stripe_hash_table, NULL, table);
153 kvfree(x);
154 return 0;
155}
156
157/*
158 * caching an rbio means to copy anything from the
159 * bio_sectors array into the stripe_pages array. We
160 * use the page uptodate bit in the stripe cache array
161 * to indicate if it has valid data
162 *
163 * once the caching is done, we set the cache ready
164 * bit.
165 */
166static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
167{
168 int i;
169 int ret;
170
171 ret = alloc_rbio_pages(rbio);
172 if (ret)
173 return;
174
175 for (i = 0; i < rbio->nr_sectors; i++) {
176 /* Some range not covered by bio (partial write), skip it */
177 if (!rbio->bio_sectors[i].page) {
178 /*
179 * Even if the sector is not covered by bio, if it is
180 * a data sector it should still be uptodate as it is
181 * read from disk.
182 */
183 if (i < rbio->nr_data * rbio->stripe_nsectors)
184 ASSERT(rbio->stripe_sectors[i].uptodate);
185 continue;
186 }
187
188 ASSERT(rbio->stripe_sectors[i].page);
189 memcpy_page(rbio->stripe_sectors[i].page,
190 rbio->stripe_sectors[i].pgoff,
191 rbio->bio_sectors[i].page,
192 rbio->bio_sectors[i].pgoff,
193 rbio->bioc->fs_info->sectorsize);
194 rbio->stripe_sectors[i].uptodate = 1;
195 }
196 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
197}
198
199/*
200 * we hash on the first logical address of the stripe
201 */
202static int rbio_bucket(struct btrfs_raid_bio *rbio)
203{
204 u64 num = rbio->bioc->full_stripe_logical;
205
206 /*
207 * we shift down quite a bit. We're using byte
208 * addressing, and most of the lower bits are zeros.
209 * This tends to upset hash_64, and it consistently
210 * returns just one or two different values.
211 *
212 * shifting off the lower bits fixes things.
213 */
214 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
215}
216
217static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
218 unsigned int page_nr)
219{
220 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
221 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
222 int i;
223
224 ASSERT(page_nr < rbio->nr_pages);
225
226 for (i = sectors_per_page * page_nr;
227 i < sectors_per_page * page_nr + sectors_per_page;
228 i++) {
229 if (!rbio->stripe_sectors[i].uptodate)
230 return false;
231 }
232 return true;
233}
234
235/*
236 * Update the stripe_sectors[] array to use correct page and pgoff
237 *
238 * Should be called every time any page pointer in stripes_pages[] got modified.
239 */
240static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
241{
242 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
243 u32 offset;
244 int i;
245
246 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
247 int page_index = offset >> PAGE_SHIFT;
248
249 ASSERT(page_index < rbio->nr_pages);
250 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
251 rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
252 }
253}
254
255static void steal_rbio_page(struct btrfs_raid_bio *src,
256 struct btrfs_raid_bio *dest, int page_nr)
257{
258 const u32 sectorsize = src->bioc->fs_info->sectorsize;
259 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
260 int i;
261
262 if (dest->stripe_pages[page_nr])
263 __free_page(dest->stripe_pages[page_nr]);
264 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
265 src->stripe_pages[page_nr] = NULL;
266
267 /* Also update the sector->uptodate bits. */
268 for (i = sectors_per_page * page_nr;
269 i < sectors_per_page * page_nr + sectors_per_page; i++)
270 dest->stripe_sectors[i].uptodate = true;
271}
272
273static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
274{
275 const int sector_nr = (page_nr << PAGE_SHIFT) >>
276 rbio->bioc->fs_info->sectorsize_bits;
277
278 /*
279 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
280 * we won't have a page which is half data half parity.
281 *
282 * Thus if the first sector of the page belongs to data stripes, then
283 * the full page belongs to data stripes.
284 */
285 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
286}
287
288/*
289 * Stealing an rbio means taking all the uptodate pages from the stripe array
290 * in the source rbio and putting them into the destination rbio.
291 *
292 * This will also update the involved stripe_sectors[] which are referring to
293 * the old pages.
294 */
295static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
296{
297 int i;
298
299 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
300 return;
301
302 for (i = 0; i < dest->nr_pages; i++) {
303 struct page *p = src->stripe_pages[i];
304
305 /*
306 * We don't need to steal P/Q pages as they will always be
307 * regenerated for RMW or full write anyway.
308 */
309 if (!is_data_stripe_page(src, i))
310 continue;
311
312 /*
313 * If @src already has RBIO_CACHE_READY_BIT, it should have
314 * all data stripe pages present and uptodate.
315 */
316 ASSERT(p);
317 ASSERT(full_page_sectors_uptodate(src, i));
318 steal_rbio_page(src, dest, i);
319 }
320 index_stripe_sectors(dest);
321 index_stripe_sectors(src);
322}
323
324/*
325 * merging means we take the bio_list from the victim and
326 * splice it into the destination. The victim should
327 * be discarded afterwards.
328 *
329 * must be called with dest->rbio_list_lock held
330 */
331static void merge_rbio(struct btrfs_raid_bio *dest,
332 struct btrfs_raid_bio *victim)
333{
334 bio_list_merge(&dest->bio_list, &victim->bio_list);
335 dest->bio_list_bytes += victim->bio_list_bytes;
336 /* Also inherit the bitmaps from @victim. */
337 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
338 dest->stripe_nsectors);
339 bio_list_init(&victim->bio_list);
340}
341
342/*
343 * used to prune items that are in the cache. The caller
344 * must hold the hash table lock.
345 */
346static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
347{
348 int bucket = rbio_bucket(rbio);
349 struct btrfs_stripe_hash_table *table;
350 struct btrfs_stripe_hash *h;
351 int freeit = 0;
352
353 /*
354 * check the bit again under the hash table lock.
355 */
356 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
357 return;
358
359 table = rbio->bioc->fs_info->stripe_hash_table;
360 h = table->table + bucket;
361
362 /* hold the lock for the bucket because we may be
363 * removing it from the hash table
364 */
365 spin_lock(&h->lock);
366
367 /*
368 * hold the lock for the bio list because we need
369 * to make sure the bio list is empty
370 */
371 spin_lock(&rbio->bio_list_lock);
372
373 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
374 list_del_init(&rbio->stripe_cache);
375 table->cache_size -= 1;
376 freeit = 1;
377
378 /* if the bio list isn't empty, this rbio is
379 * still involved in an IO. We take it out
380 * of the cache list, and drop the ref that
381 * was held for the list.
382 *
383 * If the bio_list was empty, we also remove
384 * the rbio from the hash_table, and drop
385 * the corresponding ref
386 */
387 if (bio_list_empty(&rbio->bio_list)) {
388 if (!list_empty(&rbio->hash_list)) {
389 list_del_init(&rbio->hash_list);
390 refcount_dec(&rbio->refs);
391 BUG_ON(!list_empty(&rbio->plug_list));
392 }
393 }
394 }
395
396 spin_unlock(&rbio->bio_list_lock);
397 spin_unlock(&h->lock);
398
399 if (freeit)
400 free_raid_bio(rbio);
401}
402
403/*
404 * prune a given rbio from the cache
405 */
406static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
407{
408 struct btrfs_stripe_hash_table *table;
409
410 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
411 return;
412
413 table = rbio->bioc->fs_info->stripe_hash_table;
414
415 spin_lock(&table->cache_lock);
416 __remove_rbio_from_cache(rbio);
417 spin_unlock(&table->cache_lock);
418}
419
420/*
421 * remove everything in the cache
422 */
423static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
424{
425 struct btrfs_stripe_hash_table *table;
426 struct btrfs_raid_bio *rbio;
427
428 table = info->stripe_hash_table;
429
430 spin_lock(&table->cache_lock);
431 while (!list_empty(&table->stripe_cache)) {
432 rbio = list_entry(table->stripe_cache.next,
433 struct btrfs_raid_bio,
434 stripe_cache);
435 __remove_rbio_from_cache(rbio);
436 }
437 spin_unlock(&table->cache_lock);
438}
439
440/*
441 * remove all cached entries and free the hash table
442 * used by unmount
443 */
444void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
445{
446 if (!info->stripe_hash_table)
447 return;
448 btrfs_clear_rbio_cache(info);
449 kvfree(info->stripe_hash_table);
450 info->stripe_hash_table = NULL;
451}
452
453/*
454 * insert an rbio into the stripe cache. It
455 * must have already been prepared by calling
456 * cache_rbio_pages
457 *
458 * If this rbio was already cached, it gets
459 * moved to the front of the lru.
460 *
461 * If the size of the rbio cache is too big, we
462 * prune an item.
463 */
464static void cache_rbio(struct btrfs_raid_bio *rbio)
465{
466 struct btrfs_stripe_hash_table *table;
467
468 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
469 return;
470
471 table = rbio->bioc->fs_info->stripe_hash_table;
472
473 spin_lock(&table->cache_lock);
474 spin_lock(&rbio->bio_list_lock);
475
476 /* bump our ref if we were not in the list before */
477 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
478 refcount_inc(&rbio->refs);
479
480 if (!list_empty(&rbio->stripe_cache)){
481 list_move(&rbio->stripe_cache, &table->stripe_cache);
482 } else {
483 list_add(&rbio->stripe_cache, &table->stripe_cache);
484 table->cache_size += 1;
485 }
486
487 spin_unlock(&rbio->bio_list_lock);
488
489 if (table->cache_size > RBIO_CACHE_SIZE) {
490 struct btrfs_raid_bio *found;
491
492 found = list_entry(table->stripe_cache.prev,
493 struct btrfs_raid_bio,
494 stripe_cache);
495
496 if (found != rbio)
497 __remove_rbio_from_cache(found);
498 }
499
500 spin_unlock(&table->cache_lock);
501}
502
503/*
504 * helper function to run the xor_blocks api. It is only
505 * able to do MAX_XOR_BLOCKS at a time, so we need to
506 * loop through.
507 */
508static void run_xor(void **pages, int src_cnt, ssize_t len)
509{
510 int src_off = 0;
511 int xor_src_cnt = 0;
512 void *dest = pages[src_cnt];
513
514 while(src_cnt > 0) {
515 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
516 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
517
518 src_cnt -= xor_src_cnt;
519 src_off += xor_src_cnt;
520 }
521}
522
523/*
524 * Returns true if the bio list inside this rbio covers an entire stripe (no
525 * rmw required).
526 */
527static int rbio_is_full(struct btrfs_raid_bio *rbio)
528{
529 unsigned long size = rbio->bio_list_bytes;
530 int ret = 1;
531
532 spin_lock(&rbio->bio_list_lock);
533 if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
534 ret = 0;
535 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
536 spin_unlock(&rbio->bio_list_lock);
537
538 return ret;
539}
540
541/*
542 * returns 1 if it is safe to merge two rbios together.
543 * The merging is safe if the two rbios correspond to
544 * the same stripe and if they are both going in the same
545 * direction (read vs write), and if neither one is
546 * locked for final IO
547 *
548 * The caller is responsible for locking such that
549 * rmw_locked is safe to test
550 */
551static int rbio_can_merge(struct btrfs_raid_bio *last,
552 struct btrfs_raid_bio *cur)
553{
554 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
555 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
556 return 0;
557
558 /*
559 * we can't merge with cached rbios, since the
560 * idea is that when we merge the destination
561 * rbio is going to run our IO for us. We can
562 * steal from cached rbios though, other functions
563 * handle that.
564 */
565 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
566 test_bit(RBIO_CACHE_BIT, &cur->flags))
567 return 0;
568
569 if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical)
570 return 0;
571
572 /* we can't merge with different operations */
573 if (last->operation != cur->operation)
574 return 0;
575 /*
576 * We've need read the full stripe from the drive.
577 * check and repair the parity and write the new results.
578 *
579 * We're not allowed to add any new bios to the
580 * bio list here, anyone else that wants to
581 * change this stripe needs to do their own rmw.
582 */
583 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
584 return 0;
585
586 if (last->operation == BTRFS_RBIO_READ_REBUILD)
587 return 0;
588
589 return 1;
590}
591
592static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
593 unsigned int stripe_nr,
594 unsigned int sector_nr)
595{
596 ASSERT(stripe_nr < rbio->real_stripes);
597 ASSERT(sector_nr < rbio->stripe_nsectors);
598
599 return stripe_nr * rbio->stripe_nsectors + sector_nr;
600}
601
602/* Return a sector from rbio->stripe_sectors, not from the bio list */
603static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
604 unsigned int stripe_nr,
605 unsigned int sector_nr)
606{
607 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
608 sector_nr)];
609}
610
611/* Grab a sector inside P stripe */
612static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
613 unsigned int sector_nr)
614{
615 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
616}
617
618/* Grab a sector inside Q stripe, return NULL if not RAID6 */
619static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
620 unsigned int sector_nr)
621{
622 if (rbio->nr_data + 1 == rbio->real_stripes)
623 return NULL;
624 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
625}
626
627/*
628 * The first stripe in the table for a logical address
629 * has the lock. rbios are added in one of three ways:
630 *
631 * 1) Nobody has the stripe locked yet. The rbio is given
632 * the lock and 0 is returned. The caller must start the IO
633 * themselves.
634 *
635 * 2) Someone has the stripe locked, but we're able to merge
636 * with the lock owner. The rbio is freed and the IO will
637 * start automatically along with the existing rbio. 1 is returned.
638 *
639 * 3) Someone has the stripe locked, but we're not able to merge.
640 * The rbio is added to the lock owner's plug list, or merged into
641 * an rbio already on the plug list. When the lock owner unlocks,
642 * the next rbio on the list is run and the IO is started automatically.
643 * 1 is returned
644 *
645 * If we return 0, the caller still owns the rbio and must continue with
646 * IO submission. If we return 1, the caller must assume the rbio has
647 * already been freed.
648 */
649static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
650{
651 struct btrfs_stripe_hash *h;
652 struct btrfs_raid_bio *cur;
653 struct btrfs_raid_bio *pending;
654 struct btrfs_raid_bio *freeit = NULL;
655 struct btrfs_raid_bio *cache_drop = NULL;
656 int ret = 0;
657
658 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
659
660 spin_lock(&h->lock);
661 list_for_each_entry(cur, &h->hash_list, hash_list) {
662 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical)
663 continue;
664
665 spin_lock(&cur->bio_list_lock);
666
667 /* Can we steal this cached rbio's pages? */
668 if (bio_list_empty(&cur->bio_list) &&
669 list_empty(&cur->plug_list) &&
670 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
671 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
672 list_del_init(&cur->hash_list);
673 refcount_dec(&cur->refs);
674
675 steal_rbio(cur, rbio);
676 cache_drop = cur;
677 spin_unlock(&cur->bio_list_lock);
678
679 goto lockit;
680 }
681
682 /* Can we merge into the lock owner? */
683 if (rbio_can_merge(cur, rbio)) {
684 merge_rbio(cur, rbio);
685 spin_unlock(&cur->bio_list_lock);
686 freeit = rbio;
687 ret = 1;
688 goto out;
689 }
690
691
692 /*
693 * We couldn't merge with the running rbio, see if we can merge
694 * with the pending ones. We don't have to check for rmw_locked
695 * because there is no way they are inside finish_rmw right now
696 */
697 list_for_each_entry(pending, &cur->plug_list, plug_list) {
698 if (rbio_can_merge(pending, rbio)) {
699 merge_rbio(pending, rbio);
700 spin_unlock(&cur->bio_list_lock);
701 freeit = rbio;
702 ret = 1;
703 goto out;
704 }
705 }
706
707 /*
708 * No merging, put us on the tail of the plug list, our rbio
709 * will be started with the currently running rbio unlocks
710 */
711 list_add_tail(&rbio->plug_list, &cur->plug_list);
712 spin_unlock(&cur->bio_list_lock);
713 ret = 1;
714 goto out;
715 }
716lockit:
717 refcount_inc(&rbio->refs);
718 list_add(&rbio->hash_list, &h->hash_list);
719out:
720 spin_unlock(&h->lock);
721 if (cache_drop)
722 remove_rbio_from_cache(cache_drop);
723 if (freeit)
724 free_raid_bio(freeit);
725 return ret;
726}
727
728static void recover_rbio_work_locked(struct work_struct *work);
729
730/*
731 * called as rmw or parity rebuild is completed. If the plug list has more
732 * rbios waiting for this stripe, the next one on the list will be started
733 */
734static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
735{
736 int bucket;
737 struct btrfs_stripe_hash *h;
738 int keep_cache = 0;
739
740 bucket = rbio_bucket(rbio);
741 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
742
743 if (list_empty(&rbio->plug_list))
744 cache_rbio(rbio);
745
746 spin_lock(&h->lock);
747 spin_lock(&rbio->bio_list_lock);
748
749 if (!list_empty(&rbio->hash_list)) {
750 /*
751 * if we're still cached and there is no other IO
752 * to perform, just leave this rbio here for others
753 * to steal from later
754 */
755 if (list_empty(&rbio->plug_list) &&
756 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
757 keep_cache = 1;
758 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
759 BUG_ON(!bio_list_empty(&rbio->bio_list));
760 goto done;
761 }
762
763 list_del_init(&rbio->hash_list);
764 refcount_dec(&rbio->refs);
765
766 /*
767 * we use the plug list to hold all the rbios
768 * waiting for the chance to lock this stripe.
769 * hand the lock over to one of them.
770 */
771 if (!list_empty(&rbio->plug_list)) {
772 struct btrfs_raid_bio *next;
773 struct list_head *head = rbio->plug_list.next;
774
775 next = list_entry(head, struct btrfs_raid_bio,
776 plug_list);
777
778 list_del_init(&rbio->plug_list);
779
780 list_add(&next->hash_list, &h->hash_list);
781 refcount_inc(&next->refs);
782 spin_unlock(&rbio->bio_list_lock);
783 spin_unlock(&h->lock);
784
785 if (next->operation == BTRFS_RBIO_READ_REBUILD) {
786 start_async_work(next, recover_rbio_work_locked);
787 } else if (next->operation == BTRFS_RBIO_WRITE) {
788 steal_rbio(rbio, next);
789 start_async_work(next, rmw_rbio_work_locked);
790 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
791 steal_rbio(rbio, next);
792 start_async_work(next, scrub_rbio_work_locked);
793 }
794
795 goto done_nolock;
796 }
797 }
798done:
799 spin_unlock(&rbio->bio_list_lock);
800 spin_unlock(&h->lock);
801
802done_nolock:
803 if (!keep_cache)
804 remove_rbio_from_cache(rbio);
805}
806
807static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
808{
809 struct bio *next;
810
811 while (cur) {
812 next = cur->bi_next;
813 cur->bi_next = NULL;
814 cur->bi_status = err;
815 bio_endio(cur);
816 cur = next;
817 }
818}
819
820/*
821 * this frees the rbio and runs through all the bios in the
822 * bio_list and calls end_io on them
823 */
824static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
825{
826 struct bio *cur = bio_list_get(&rbio->bio_list);
827 struct bio *extra;
828
829 kfree(rbio->csum_buf);
830 bitmap_free(rbio->csum_bitmap);
831 rbio->csum_buf = NULL;
832 rbio->csum_bitmap = NULL;
833
834 /*
835 * Clear the data bitmap, as the rbio may be cached for later usage.
836 * do this before before unlock_stripe() so there will be no new bio
837 * for this bio.
838 */
839 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
840
841 /*
842 * At this moment, rbio->bio_list is empty, however since rbio does not
843 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
844 * hash list, rbio may be merged with others so that rbio->bio_list
845 * becomes non-empty.
846 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
847 * more and we can call bio_endio() on all queued bios.
848 */
849 unlock_stripe(rbio);
850 extra = bio_list_get(&rbio->bio_list);
851 free_raid_bio(rbio);
852
853 rbio_endio_bio_list(cur, err);
854 if (extra)
855 rbio_endio_bio_list(extra, err);
856}
857
858/*
859 * Get a sector pointer specified by its @stripe_nr and @sector_nr.
860 *
861 * @rbio: The raid bio
862 * @stripe_nr: Stripe number, valid range [0, real_stripe)
863 * @sector_nr: Sector number inside the stripe,
864 * valid range [0, stripe_nsectors)
865 * @bio_list_only: Whether to use sectors inside the bio list only.
866 *
867 * The read/modify/write code wants to reuse the original bio page as much
868 * as possible, and only use stripe_sectors as fallback.
869 */
870static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
871 int stripe_nr, int sector_nr,
872 bool bio_list_only)
873{
874 struct sector_ptr *sector;
875 int index;
876
877 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
878 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
879
880 index = stripe_nr * rbio->stripe_nsectors + sector_nr;
881 ASSERT(index >= 0 && index < rbio->nr_sectors);
882
883 spin_lock(&rbio->bio_list_lock);
884 sector = &rbio->bio_sectors[index];
885 if (sector->page || bio_list_only) {
886 /* Don't return sector without a valid page pointer */
887 if (!sector->page)
888 sector = NULL;
889 spin_unlock(&rbio->bio_list_lock);
890 return sector;
891 }
892 spin_unlock(&rbio->bio_list_lock);
893
894 return &rbio->stripe_sectors[index];
895}
896
897/*
898 * allocation and initial setup for the btrfs_raid_bio. Not
899 * this does not allocate any pages for rbio->pages.
900 */
901static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
902 struct btrfs_io_context *bioc)
903{
904 const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes;
905 const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
906 const unsigned int num_pages = stripe_npages * real_stripes;
907 const unsigned int stripe_nsectors =
908 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
909 const unsigned int num_sectors = stripe_nsectors * real_stripes;
910 struct btrfs_raid_bio *rbio;
911
912 /* PAGE_SIZE must also be aligned to sectorsize for subpage support */
913 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
914 /*
915 * Our current stripe len should be fixed to 64k thus stripe_nsectors
916 * (at most 16) should be no larger than BITS_PER_LONG.
917 */
918 ASSERT(stripe_nsectors <= BITS_PER_LONG);
919
920 /*
921 * Real stripes must be between 2 (2 disks RAID5, aka RAID1) and 256
922 * (limited by u8).
923 */
924 ASSERT(real_stripes >= 2);
925 ASSERT(real_stripes <= U8_MAX);
926
927 rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
928 if (!rbio)
929 return ERR_PTR(-ENOMEM);
930 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
931 GFP_NOFS);
932 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
933 GFP_NOFS);
934 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
935 GFP_NOFS);
936 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
937 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
938
939 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
940 !rbio->finish_pointers || !rbio->error_bitmap) {
941 free_raid_bio_pointers(rbio);
942 kfree(rbio);
943 return ERR_PTR(-ENOMEM);
944 }
945
946 bio_list_init(&rbio->bio_list);
947 init_waitqueue_head(&rbio->io_wait);
948 INIT_LIST_HEAD(&rbio->plug_list);
949 spin_lock_init(&rbio->bio_list_lock);
950 INIT_LIST_HEAD(&rbio->stripe_cache);
951 INIT_LIST_HEAD(&rbio->hash_list);
952 btrfs_get_bioc(bioc);
953 rbio->bioc = bioc;
954 rbio->nr_pages = num_pages;
955 rbio->nr_sectors = num_sectors;
956 rbio->real_stripes = real_stripes;
957 rbio->stripe_npages = stripe_npages;
958 rbio->stripe_nsectors = stripe_nsectors;
959 refcount_set(&rbio->refs, 1);
960 atomic_set(&rbio->stripes_pending, 0);
961
962 ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
963 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
964 ASSERT(rbio->nr_data > 0);
965
966 return rbio;
967}
968
969/* allocate pages for all the stripes in the bio, including parity */
970static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
971{
972 int ret;
973
974 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, 0);
975 if (ret < 0)
976 return ret;
977 /* Mapping all sectors */
978 index_stripe_sectors(rbio);
979 return 0;
980}
981
982/* only allocate pages for p/q stripes */
983static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
984{
985 const int data_pages = rbio->nr_data * rbio->stripe_npages;
986 int ret;
987
988 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
989 rbio->stripe_pages + data_pages, 0);
990 if (ret < 0)
991 return ret;
992
993 index_stripe_sectors(rbio);
994 return 0;
995}
996
997/*
998 * Return the total number of errors found in the vertical stripe of @sector_nr.
999 *
1000 * @faila and @failb will also be updated to the first and second stripe
1001 * number of the errors.
1002 */
1003static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1004 int *faila, int *failb)
1005{
1006 int stripe_nr;
1007 int found_errors = 0;
1008
1009 if (faila || failb) {
1010 /*
1011 * Both @faila and @failb should be valid pointers if any of
1012 * them is specified.
1013 */
1014 ASSERT(faila && failb);
1015 *faila = -1;
1016 *failb = -1;
1017 }
1018
1019 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1020 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1021
1022 if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1023 found_errors++;
1024 if (faila) {
1025 /* Update faila and failb. */
1026 if (*faila < 0)
1027 *faila = stripe_nr;
1028 else if (*failb < 0)
1029 *failb = stripe_nr;
1030 }
1031 }
1032 }
1033 return found_errors;
1034}
1035
1036/*
1037 * Add a single sector @sector into our list of bios for IO.
1038 *
1039 * Return 0 if everything went well.
1040 * Return <0 for error.
1041 */
1042static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
1043 struct bio_list *bio_list,
1044 struct sector_ptr *sector,
1045 unsigned int stripe_nr,
1046 unsigned int sector_nr,
1047 enum req_op op)
1048{
1049 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1050 struct bio *last = bio_list->tail;
1051 int ret;
1052 struct bio *bio;
1053 struct btrfs_io_stripe *stripe;
1054 u64 disk_start;
1055
1056 /*
1057 * Note: here stripe_nr has taken device replace into consideration,
1058 * thus it can be larger than rbio->real_stripe.
1059 * So here we check against bioc->num_stripes, not rbio->real_stripes.
1060 */
1061 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
1062 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
1063 ASSERT(sector->page);
1064
1065 stripe = &rbio->bioc->stripes[stripe_nr];
1066 disk_start = stripe->physical + sector_nr * sectorsize;
1067
1068 /* if the device is missing, just fail this stripe */
1069 if (!stripe->dev->bdev) {
1070 int found_errors;
1071
1072 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
1073 rbio->error_bitmap);
1074
1075 /* Check if we have reached tolerance early. */
1076 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1077 NULL, NULL);
1078 if (found_errors > rbio->bioc->max_errors)
1079 return -EIO;
1080 return 0;
1081 }
1082
1083 /* see if we can add this page onto our existing bio */
1084 if (last) {
1085 u64 last_end = last->bi_iter.bi_sector << SECTOR_SHIFT;
1086 last_end += last->bi_iter.bi_size;
1087
1088 /*
1089 * we can't merge these if they are from different
1090 * devices or if they are not contiguous
1091 */
1092 if (last_end == disk_start && !last->bi_status &&
1093 last->bi_bdev == stripe->dev->bdev) {
1094 ret = bio_add_page(last, sector->page, sectorsize,
1095 sector->pgoff);
1096 if (ret == sectorsize)
1097 return 0;
1098 }
1099 }
1100
1101 /* put a new bio on the list */
1102 bio = bio_alloc(stripe->dev->bdev,
1103 max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1104 op, GFP_NOFS);
1105 bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT;
1106 bio->bi_private = rbio;
1107
1108 __bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
1109 bio_list_add(bio_list, bio);
1110 return 0;
1111}
1112
1113static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
1114{
1115 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1116 struct bio_vec bvec;
1117 struct bvec_iter iter;
1118 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1119 rbio->bioc->full_stripe_logical;
1120
1121 bio_for_each_segment(bvec, bio, iter) {
1122 u32 bvec_offset;
1123
1124 for (bvec_offset = 0; bvec_offset < bvec.bv_len;
1125 bvec_offset += sectorsize, offset += sectorsize) {
1126 int index = offset / sectorsize;
1127 struct sector_ptr *sector = &rbio->bio_sectors[index];
1128
1129 sector->page = bvec.bv_page;
1130 sector->pgoff = bvec.bv_offset + bvec_offset;
1131 ASSERT(sector->pgoff < PAGE_SIZE);
1132 }
1133 }
1134}
1135
1136/*
1137 * helper function to walk our bio list and populate the bio_pages array with
1138 * the result. This seems expensive, but it is faster than constantly
1139 * searching through the bio list as we setup the IO in finish_rmw or stripe
1140 * reconstruction.
1141 *
1142 * This must be called before you trust the answers from page_in_rbio
1143 */
1144static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1145{
1146 struct bio *bio;
1147
1148 spin_lock(&rbio->bio_list_lock);
1149 bio_list_for_each(bio, &rbio->bio_list)
1150 index_one_bio(rbio, bio);
1151
1152 spin_unlock(&rbio->bio_list_lock);
1153}
1154
1155static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1156 struct raid56_bio_trace_info *trace_info)
1157{
1158 const struct btrfs_io_context *bioc = rbio->bioc;
1159 int i;
1160
1161 ASSERT(bioc);
1162
1163 /* We rely on bio->bi_bdev to find the stripe number. */
1164 if (!bio->bi_bdev)
1165 goto not_found;
1166
1167 for (i = 0; i < bioc->num_stripes; i++) {
1168 if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1169 continue;
1170 trace_info->stripe_nr = i;
1171 trace_info->devid = bioc->stripes[i].dev->devid;
1172 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1173 bioc->stripes[i].physical;
1174 return;
1175 }
1176
1177not_found:
1178 trace_info->devid = -1;
1179 trace_info->offset = -1;
1180 trace_info->stripe_nr = -1;
1181}
1182
1183static inline void bio_list_put(struct bio_list *bio_list)
1184{
1185 struct bio *bio;
1186
1187 while ((bio = bio_list_pop(bio_list)))
1188 bio_put(bio);
1189}
1190
1191static void assert_rbio(struct btrfs_raid_bio *rbio)
1192{
1193 if (!IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
1194 !IS_ENABLED(CONFIG_BTRFS_ASSERT))
1195 return;
1196
1197 /*
1198 * At least two stripes (2 disks RAID5), and since real_stripes is U8,
1199 * we won't go beyond 256 disks anyway.
1200 */
1201 ASSERT(rbio->real_stripes >= 2);
1202 ASSERT(rbio->nr_data > 0);
1203
1204 /*
1205 * This is another check to make sure nr data stripes is smaller
1206 * than total stripes.
1207 */
1208 ASSERT(rbio->nr_data < rbio->real_stripes);
1209}
1210
1211/* Generate PQ for one vertical stripe. */
1212static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
1213{
1214 void **pointers = rbio->finish_pointers;
1215 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1216 struct sector_ptr *sector;
1217 int stripe;
1218 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
1219
1220 /* First collect one sector from each data stripe */
1221 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1222 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
1223 pointers[stripe] = kmap_local_page(sector->page) +
1224 sector->pgoff;
1225 }
1226
1227 /* Then add the parity stripe */
1228 sector = rbio_pstripe_sector(rbio, sectornr);
1229 sector->uptodate = 1;
1230 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
1231
1232 if (has_qstripe) {
1233 /*
1234 * RAID6, add the qstripe and call the library function
1235 * to fill in our p/q
1236 */
1237 sector = rbio_qstripe_sector(rbio, sectornr);
1238 sector->uptodate = 1;
1239 pointers[stripe++] = kmap_local_page(sector->page) +
1240 sector->pgoff;
1241
1242 assert_rbio(rbio);
1243 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
1244 pointers);
1245 } else {
1246 /* raid5 */
1247 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
1248 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
1249 }
1250 for (stripe = stripe - 1; stripe >= 0; stripe--)
1251 kunmap_local(pointers[stripe]);
1252}
1253
1254static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
1255 struct bio_list *bio_list)
1256{
1257 /* The total sector number inside the full stripe. */
1258 int total_sector_nr;
1259 int sectornr;
1260 int stripe;
1261 int ret;
1262
1263 ASSERT(bio_list_size(bio_list) == 0);
1264
1265 /* We should have at least one data sector. */
1266 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1267
1268 /*
1269 * Reset errors, as we may have errors inherited from from degraded
1270 * write.
1271 */
1272 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
1273
1274 /*
1275 * Start assembly. Make bios for everything from the higher layers (the
1276 * bio_list in our rbio) and our P/Q. Ignore everything else.
1277 */
1278 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1279 total_sector_nr++) {
1280 struct sector_ptr *sector;
1281
1282 stripe = total_sector_nr / rbio->stripe_nsectors;
1283 sectornr = total_sector_nr % rbio->stripe_nsectors;
1284
1285 /* This vertical stripe has no data, skip it. */
1286 if (!test_bit(sectornr, &rbio->dbitmap))
1287 continue;
1288
1289 if (stripe < rbio->nr_data) {
1290 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1291 if (!sector)
1292 continue;
1293 } else {
1294 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1295 }
1296
1297 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1298 sectornr, REQ_OP_WRITE);
1299 if (ret)
1300 goto error;
1301 }
1302
1303 if (likely(!rbio->bioc->replace_nr_stripes))
1304 return 0;
1305
1306 /*
1307 * Make a copy for the replace target device.
1308 *
1309 * Thus the source stripe number (in replace_stripe_src) should be valid.
1310 */
1311 ASSERT(rbio->bioc->replace_stripe_src >= 0);
1312
1313 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1314 total_sector_nr++) {
1315 struct sector_ptr *sector;
1316
1317 stripe = total_sector_nr / rbio->stripe_nsectors;
1318 sectornr = total_sector_nr % rbio->stripe_nsectors;
1319
1320 /*
1321 * For RAID56, there is only one device that can be replaced,
1322 * and replace_stripe_src[0] indicates the stripe number we
1323 * need to copy from.
1324 */
1325 if (stripe != rbio->bioc->replace_stripe_src) {
1326 /*
1327 * We can skip the whole stripe completely, note
1328 * total_sector_nr will be increased by one anyway.
1329 */
1330 ASSERT(sectornr == 0);
1331 total_sector_nr += rbio->stripe_nsectors - 1;
1332 continue;
1333 }
1334
1335 /* This vertical stripe has no data, skip it. */
1336 if (!test_bit(sectornr, &rbio->dbitmap))
1337 continue;
1338
1339 if (stripe < rbio->nr_data) {
1340 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1341 if (!sector)
1342 continue;
1343 } else {
1344 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1345 }
1346
1347 ret = rbio_add_io_sector(rbio, bio_list, sector,
1348 rbio->real_stripes,
1349 sectornr, REQ_OP_WRITE);
1350 if (ret)
1351 goto error;
1352 }
1353
1354 return 0;
1355error:
1356 bio_list_put(bio_list);
1357 return -EIO;
1358}
1359
1360static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
1361{
1362 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1363 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1364 rbio->bioc->full_stripe_logical;
1365 int total_nr_sector = offset >> fs_info->sectorsize_bits;
1366
1367 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
1368
1369 bitmap_set(rbio->error_bitmap, total_nr_sector,
1370 bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
1371
1372 /*
1373 * Special handling for raid56_alloc_missing_rbio() used by
1374 * scrub/replace. Unlike call path in raid56_parity_recover(), they
1375 * pass an empty bio here. Thus we have to find out the missing device
1376 * and mark the stripe error instead.
1377 */
1378 if (bio->bi_iter.bi_size == 0) {
1379 bool found_missing = false;
1380 int stripe_nr;
1381
1382 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1383 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
1384 found_missing = true;
1385 bitmap_set(rbio->error_bitmap,
1386 stripe_nr * rbio->stripe_nsectors,
1387 rbio->stripe_nsectors);
1388 }
1389 }
1390 ASSERT(found_missing);
1391 }
1392}
1393
1394/*
1395 * For subpage case, we can no longer set page Up-to-date directly for
1396 * stripe_pages[], thus we need to locate the sector.
1397 */
1398static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
1399 struct page *page,
1400 unsigned int pgoff)
1401{
1402 int i;
1403
1404 for (i = 0; i < rbio->nr_sectors; i++) {
1405 struct sector_ptr *sector = &rbio->stripe_sectors[i];
1406
1407 if (sector->page == page && sector->pgoff == pgoff)
1408 return sector;
1409 }
1410 return NULL;
1411}
1412
1413/*
1414 * this sets each page in the bio uptodate. It should only be used on private
1415 * rbio pages, nothing that comes in from the higher layers
1416 */
1417static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
1418{
1419 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1420 struct bio_vec *bvec;
1421 struct bvec_iter_all iter_all;
1422
1423 ASSERT(!bio_flagged(bio, BIO_CLONED));
1424
1425 bio_for_each_segment_all(bvec, bio, iter_all) {
1426 struct sector_ptr *sector;
1427 int pgoff;
1428
1429 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
1430 pgoff += sectorsize) {
1431 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
1432 ASSERT(sector);
1433 if (sector)
1434 sector->uptodate = 1;
1435 }
1436 }
1437}
1438
1439static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
1440{
1441 struct bio_vec *bv = bio_first_bvec_all(bio);
1442 int i;
1443
1444 for (i = 0; i < rbio->nr_sectors; i++) {
1445 struct sector_ptr *sector;
1446
1447 sector = &rbio->stripe_sectors[i];
1448 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1449 break;
1450 sector = &rbio->bio_sectors[i];
1451 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1452 break;
1453 }
1454 ASSERT(i < rbio->nr_sectors);
1455 return i;
1456}
1457
1458static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
1459{
1460 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1461 u32 bio_size = 0;
1462 struct bio_vec *bvec;
1463 int i;
1464
1465 bio_for_each_bvec_all(bvec, bio, i)
1466 bio_size += bvec->bv_len;
1467
1468 /*
1469 * Since we can have multiple bios touching the error_bitmap, we cannot
1470 * call bitmap_set() without protection.
1471 *
1472 * Instead use set_bit() for each bit, as set_bit() itself is atomic.
1473 */
1474 for (i = total_sector_nr; i < total_sector_nr +
1475 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
1476 set_bit(i, rbio->error_bitmap);
1477}
1478
1479/* Verify the data sectors at read time. */
1480static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
1481 struct bio *bio)
1482{
1483 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1484 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1485 struct bio_vec *bvec;
1486 struct bvec_iter_all iter_all;
1487
1488 /* No data csum for the whole stripe, no need to verify. */
1489 if (!rbio->csum_bitmap || !rbio->csum_buf)
1490 return;
1491
1492 /* P/Q stripes, they have no data csum to verify against. */
1493 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
1494 return;
1495
1496 bio_for_each_segment_all(bvec, bio, iter_all) {
1497 int bv_offset;
1498
1499 for (bv_offset = bvec->bv_offset;
1500 bv_offset < bvec->bv_offset + bvec->bv_len;
1501 bv_offset += fs_info->sectorsize, total_sector_nr++) {
1502 u8 csum_buf[BTRFS_CSUM_SIZE];
1503 u8 *expected_csum = rbio->csum_buf +
1504 total_sector_nr * fs_info->csum_size;
1505 int ret;
1506
1507 /* No csum for this sector, skip to the next sector. */
1508 if (!test_bit(total_sector_nr, rbio->csum_bitmap))
1509 continue;
1510
1511 ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
1512 bv_offset, csum_buf, expected_csum);
1513 if (ret < 0)
1514 set_bit(total_sector_nr, rbio->error_bitmap);
1515 }
1516 }
1517}
1518
1519static void raid_wait_read_end_io(struct bio *bio)
1520{
1521 struct btrfs_raid_bio *rbio = bio->bi_private;
1522
1523 if (bio->bi_status) {
1524 rbio_update_error_bitmap(rbio, bio);
1525 } else {
1526 set_bio_pages_uptodate(rbio, bio);
1527 verify_bio_data_sectors(rbio, bio);
1528 }
1529
1530 bio_put(bio);
1531 if (atomic_dec_and_test(&rbio->stripes_pending))
1532 wake_up(&rbio->io_wait);
1533}
1534
1535static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio,
1536 struct bio_list *bio_list)
1537{
1538 struct bio *bio;
1539
1540 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1541 while ((bio = bio_list_pop(bio_list))) {
1542 bio->bi_end_io = raid_wait_read_end_io;
1543
1544 if (trace_raid56_read_enabled()) {
1545 struct raid56_bio_trace_info trace_info = { 0 };
1546
1547 bio_get_trace_info(rbio, bio, &trace_info);
1548 trace_raid56_read(rbio, bio, &trace_info);
1549 }
1550 submit_bio(bio);
1551 }
1552
1553 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
1554}
1555
1556static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
1557{
1558 const int data_pages = rbio->nr_data * rbio->stripe_npages;
1559 int ret;
1560
1561 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, 0);
1562 if (ret < 0)
1563 return ret;
1564
1565 index_stripe_sectors(rbio);
1566 return 0;
1567}
1568
1569/*
1570 * We use plugging call backs to collect full stripes.
1571 * Any time we get a partial stripe write while plugged
1572 * we collect it into a list. When the unplug comes down,
1573 * we sort the list by logical block number and merge
1574 * everything we can into the same rbios
1575 */
1576struct btrfs_plug_cb {
1577 struct blk_plug_cb cb;
1578 struct btrfs_fs_info *info;
1579 struct list_head rbio_list;
1580};
1581
1582/*
1583 * rbios on the plug list are sorted for easier merging.
1584 */
1585static int plug_cmp(void *priv, const struct list_head *a,
1586 const struct list_head *b)
1587{
1588 const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1589 plug_list);
1590 const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1591 plug_list);
1592 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1593 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1594
1595 if (a_sector < b_sector)
1596 return -1;
1597 if (a_sector > b_sector)
1598 return 1;
1599 return 0;
1600}
1601
1602static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1603{
1604 struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
1605 struct btrfs_raid_bio *cur;
1606 struct btrfs_raid_bio *last = NULL;
1607
1608 list_sort(NULL, &plug->rbio_list, plug_cmp);
1609
1610 while (!list_empty(&plug->rbio_list)) {
1611 cur = list_entry(plug->rbio_list.next,
1612 struct btrfs_raid_bio, plug_list);
1613 list_del_init(&cur->plug_list);
1614
1615 if (rbio_is_full(cur)) {
1616 /* We have a full stripe, queue it down. */
1617 start_async_work(cur, rmw_rbio_work);
1618 continue;
1619 }
1620 if (last) {
1621 if (rbio_can_merge(last, cur)) {
1622 merge_rbio(last, cur);
1623 free_raid_bio(cur);
1624 continue;
1625 }
1626 start_async_work(last, rmw_rbio_work);
1627 }
1628 last = cur;
1629 }
1630 if (last)
1631 start_async_work(last, rmw_rbio_work);
1632 kfree(plug);
1633}
1634
1635/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1636static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1637{
1638 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1639 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1640 const u64 full_stripe_start = rbio->bioc->full_stripe_logical;
1641 const u32 orig_len = orig_bio->bi_iter.bi_size;
1642 const u32 sectorsize = fs_info->sectorsize;
1643 u64 cur_logical;
1644
1645 ASSERT(orig_logical >= full_stripe_start &&
1646 orig_logical + orig_len <= full_stripe_start +
1647 rbio->nr_data * BTRFS_STRIPE_LEN);
1648
1649 bio_list_add(&rbio->bio_list, orig_bio);
1650 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1651
1652 /* Update the dbitmap. */
1653 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1654 cur_logical += sectorsize) {
1655 int bit = ((u32)(cur_logical - full_stripe_start) >>
1656 fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1657
1658 set_bit(bit, &rbio->dbitmap);
1659 }
1660}
1661
1662/*
1663 * our main entry point for writes from the rest of the FS.
1664 */
1665void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
1666{
1667 struct btrfs_fs_info *fs_info = bioc->fs_info;
1668 struct btrfs_raid_bio *rbio;
1669 struct btrfs_plug_cb *plug = NULL;
1670 struct blk_plug_cb *cb;
1671
1672 rbio = alloc_rbio(fs_info, bioc);
1673 if (IS_ERR(rbio)) {
1674 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
1675 bio_endio(bio);
1676 return;
1677 }
1678 rbio->operation = BTRFS_RBIO_WRITE;
1679 rbio_add_bio(rbio, bio);
1680
1681 /*
1682 * Don't plug on full rbios, just get them out the door
1683 * as quickly as we can
1684 */
1685 if (!rbio_is_full(rbio)) {
1686 cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
1687 if (cb) {
1688 plug = container_of(cb, struct btrfs_plug_cb, cb);
1689 if (!plug->info) {
1690 plug->info = fs_info;
1691 INIT_LIST_HEAD(&plug->rbio_list);
1692 }
1693 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1694 return;
1695 }
1696 }
1697
1698 /*
1699 * Either we don't have any existing plug, or we're doing a full stripe,
1700 * queue the rmw work now.
1701 */
1702 start_async_work(rbio, rmw_rbio_work);
1703}
1704
1705static int verify_one_sector(struct btrfs_raid_bio *rbio,
1706 int stripe_nr, int sector_nr)
1707{
1708 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1709 struct sector_ptr *sector;
1710 u8 csum_buf[BTRFS_CSUM_SIZE];
1711 u8 *csum_expected;
1712 int ret;
1713
1714 if (!rbio->csum_bitmap || !rbio->csum_buf)
1715 return 0;
1716
1717 /* No way to verify P/Q as they are not covered by data csum. */
1718 if (stripe_nr >= rbio->nr_data)
1719 return 0;
1720 /*
1721 * If we're rebuilding a read, we have to use pages from the
1722 * bio list if possible.
1723 */
1724 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1725 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1726 } else {
1727 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1728 }
1729
1730 ASSERT(sector->page);
1731
1732 csum_expected = rbio->csum_buf +
1733 (stripe_nr * rbio->stripe_nsectors + sector_nr) *
1734 fs_info->csum_size;
1735 ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff,
1736 csum_buf, csum_expected);
1737 return ret;
1738}
1739
1740/*
1741 * Recover a vertical stripe specified by @sector_nr.
1742 * @*pointers are the pre-allocated pointers by the caller, so we don't
1743 * need to allocate/free the pointers again and again.
1744 */
1745static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1746 void **pointers, void **unmap_array)
1747{
1748 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1749 struct sector_ptr *sector;
1750 const u32 sectorsize = fs_info->sectorsize;
1751 int found_errors;
1752 int faila;
1753 int failb;
1754 int stripe_nr;
1755 int ret = 0;
1756
1757 /*
1758 * Now we just use bitmap to mark the horizontal stripes in
1759 * which we have data when doing parity scrub.
1760 */
1761 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1762 !test_bit(sector_nr, &rbio->dbitmap))
1763 return 0;
1764
1765 found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1766 &failb);
1767 /*
1768 * No errors in the vertical stripe, skip it. Can happen for recovery
1769 * which only part of a stripe failed csum check.
1770 */
1771 if (!found_errors)
1772 return 0;
1773
1774 if (found_errors > rbio->bioc->max_errors)
1775 return -EIO;
1776
1777 /*
1778 * Setup our array of pointers with sectors from each stripe
1779 *
1780 * NOTE: store a duplicate array of pointers to preserve the
1781 * pointer order.
1782 */
1783 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1784 /*
1785 * If we're rebuilding a read, we have to use pages from the
1786 * bio list if possible.
1787 */
1788 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1789 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1790 } else {
1791 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1792 }
1793 ASSERT(sector->page);
1794 pointers[stripe_nr] = kmap_local_page(sector->page) +
1795 sector->pgoff;
1796 unmap_array[stripe_nr] = pointers[stripe_nr];
1797 }
1798
1799 /* All raid6 handling here */
1800 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1801 /* Single failure, rebuild from parity raid5 style */
1802 if (failb < 0) {
1803 if (faila == rbio->nr_data)
1804 /*
1805 * Just the P stripe has failed, without
1806 * a bad data or Q stripe.
1807 * We have nothing to do, just skip the
1808 * recovery for this stripe.
1809 */
1810 goto cleanup;
1811 /*
1812 * a single failure in raid6 is rebuilt
1813 * in the pstripe code below
1814 */
1815 goto pstripe;
1816 }
1817
1818 /*
1819 * If the q stripe is failed, do a pstripe reconstruction from
1820 * the xors.
1821 * If both the q stripe and the P stripe are failed, we're
1822 * here due to a crc mismatch and we can't give them the
1823 * data they want.
1824 */
1825 if (failb == rbio->real_stripes - 1) {
1826 if (faila == rbio->real_stripes - 2)
1827 /*
1828 * Only P and Q are corrupted.
1829 * We only care about data stripes recovery,
1830 * can skip this vertical stripe.
1831 */
1832 goto cleanup;
1833 /*
1834 * Otherwise we have one bad data stripe and
1835 * a good P stripe. raid5!
1836 */
1837 goto pstripe;
1838 }
1839
1840 if (failb == rbio->real_stripes - 2) {
1841 raid6_datap_recov(rbio->real_stripes, sectorsize,
1842 faila, pointers);
1843 } else {
1844 raid6_2data_recov(rbio->real_stripes, sectorsize,
1845 faila, failb, pointers);
1846 }
1847 } else {
1848 void *p;
1849
1850 /* Rebuild from P stripe here (raid5 or raid6). */
1851 ASSERT(failb == -1);
1852pstripe:
1853 /* Copy parity block into failed block to start with */
1854 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1855
1856 /* Rearrange the pointer array */
1857 p = pointers[faila];
1858 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
1859 stripe_nr++)
1860 pointers[stripe_nr] = pointers[stripe_nr + 1];
1861 pointers[rbio->nr_data - 1] = p;
1862
1863 /* Xor in the rest */
1864 run_xor(pointers, rbio->nr_data - 1, sectorsize);
1865
1866 }
1867
1868 /*
1869 * No matter if this is a RMW or recovery, we should have all
1870 * failed sectors repaired in the vertical stripe, thus they are now
1871 * uptodate.
1872 * Especially if we determine to cache the rbio, we need to
1873 * have at least all data sectors uptodate.
1874 *
1875 * If possible, also check if the repaired sector matches its data
1876 * checksum.
1877 */
1878 if (faila >= 0) {
1879 ret = verify_one_sector(rbio, faila, sector_nr);
1880 if (ret < 0)
1881 goto cleanup;
1882
1883 sector = rbio_stripe_sector(rbio, faila, sector_nr);
1884 sector->uptodate = 1;
1885 }
1886 if (failb >= 0) {
1887 ret = verify_one_sector(rbio, failb, sector_nr);
1888 if (ret < 0)
1889 goto cleanup;
1890
1891 sector = rbio_stripe_sector(rbio, failb, sector_nr);
1892 sector->uptodate = 1;
1893 }
1894
1895cleanup:
1896 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1897 kunmap_local(unmap_array[stripe_nr]);
1898 return ret;
1899}
1900
1901static int recover_sectors(struct btrfs_raid_bio *rbio)
1902{
1903 void **pointers = NULL;
1904 void **unmap_array = NULL;
1905 int sectornr;
1906 int ret = 0;
1907
1908 /*
1909 * @pointers array stores the pointer for each sector.
1910 *
1911 * @unmap_array stores copy of pointers that does not get reordered
1912 * during reconstruction so that kunmap_local works.
1913 */
1914 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1915 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1916 if (!pointers || !unmap_array) {
1917 ret = -ENOMEM;
1918 goto out;
1919 }
1920
1921 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1922 spin_lock(&rbio->bio_list_lock);
1923 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1924 spin_unlock(&rbio->bio_list_lock);
1925 }
1926
1927 index_rbio_pages(rbio);
1928
1929 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
1930 ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
1931 if (ret < 0)
1932 break;
1933 }
1934
1935out:
1936 kfree(pointers);
1937 kfree(unmap_array);
1938 return ret;
1939}
1940
1941static void recover_rbio(struct btrfs_raid_bio *rbio)
1942{
1943 struct bio_list bio_list = BIO_EMPTY_LIST;
1944 int total_sector_nr;
1945 int ret = 0;
1946
1947 /*
1948 * Either we're doing recover for a read failure or degraded write,
1949 * caller should have set error bitmap correctly.
1950 */
1951 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
1952
1953 /* For recovery, we need to read all sectors including P/Q. */
1954 ret = alloc_rbio_pages(rbio);
1955 if (ret < 0)
1956 goto out;
1957
1958 index_rbio_pages(rbio);
1959
1960 /*
1961 * Read everything that hasn't failed. However this time we will
1962 * not trust any cached sector.
1963 * As we may read out some stale data but higher layer is not reading
1964 * that stale part.
1965 *
1966 * So here we always re-read everything in recovery path.
1967 */
1968 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1969 total_sector_nr++) {
1970 int stripe = total_sector_nr / rbio->stripe_nsectors;
1971 int sectornr = total_sector_nr % rbio->stripe_nsectors;
1972 struct sector_ptr *sector;
1973
1974 /*
1975 * Skip the range which has error. It can be a range which is
1976 * marked error (for csum mismatch), or it can be a missing
1977 * device.
1978 */
1979 if (!rbio->bioc->stripes[stripe].dev->bdev ||
1980 test_bit(total_sector_nr, rbio->error_bitmap)) {
1981 /*
1982 * Also set the error bit for missing device, which
1983 * may not yet have its error bit set.
1984 */
1985 set_bit(total_sector_nr, rbio->error_bitmap);
1986 continue;
1987 }
1988
1989 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1990 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
1991 sectornr, REQ_OP_READ);
1992 if (ret < 0) {
1993 bio_list_put(&bio_list);
1994 goto out;
1995 }
1996 }
1997
1998 submit_read_wait_bio_list(rbio, &bio_list);
1999 ret = recover_sectors(rbio);
2000out:
2001 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2002}
2003
2004static void recover_rbio_work(struct work_struct *work)
2005{
2006 struct btrfs_raid_bio *rbio;
2007
2008 rbio = container_of(work, struct btrfs_raid_bio, work);
2009 if (!lock_stripe_add(rbio))
2010 recover_rbio(rbio);
2011}
2012
2013static void recover_rbio_work_locked(struct work_struct *work)
2014{
2015 recover_rbio(container_of(work, struct btrfs_raid_bio, work));
2016}
2017
2018static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
2019{
2020 bool found = false;
2021 int sector_nr;
2022
2023 /*
2024 * This is for RAID6 extra recovery tries, thus mirror number should
2025 * be large than 2.
2026 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
2027 * RAID5 methods.
2028 */
2029 ASSERT(mirror_num > 2);
2030 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2031 int found_errors;
2032 int faila;
2033 int failb;
2034
2035 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2036 &faila, &failb);
2037 /* This vertical stripe doesn't have errors. */
2038 if (!found_errors)
2039 continue;
2040
2041 /*
2042 * If we found errors, there should be only one error marked
2043 * by previous set_rbio_range_error().
2044 */
2045 ASSERT(found_errors == 1);
2046 found = true;
2047
2048 /* Now select another stripe to mark as error. */
2049 failb = rbio->real_stripes - (mirror_num - 1);
2050 if (failb <= faila)
2051 failb--;
2052
2053 /* Set the extra bit in error bitmap. */
2054 if (failb >= 0)
2055 set_bit(failb * rbio->stripe_nsectors + sector_nr,
2056 rbio->error_bitmap);
2057 }
2058
2059 /* We should found at least one vertical stripe with error.*/
2060 ASSERT(found);
2061}
2062
2063/*
2064 * the main entry point for reads from the higher layers. This
2065 * is really only called when the normal read path had a failure,
2066 * so we assume the bio they send down corresponds to a failed part
2067 * of the drive.
2068 */
2069void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2070 int mirror_num)
2071{
2072 struct btrfs_fs_info *fs_info = bioc->fs_info;
2073 struct btrfs_raid_bio *rbio;
2074
2075 rbio = alloc_rbio(fs_info, bioc);
2076 if (IS_ERR(rbio)) {
2077 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2078 bio_endio(bio);
2079 return;
2080 }
2081
2082 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2083 rbio_add_bio(rbio, bio);
2084
2085 set_rbio_range_error(rbio, bio);
2086
2087 /*
2088 * Loop retry:
2089 * for 'mirror == 2', reconstruct from all other stripes.
2090 * for 'mirror_num > 2', select a stripe to fail on every retry.
2091 */
2092 if (mirror_num > 2)
2093 set_rbio_raid6_extra_error(rbio, mirror_num);
2094
2095 start_async_work(rbio, recover_rbio_work);
2096}
2097
2098static void fill_data_csums(struct btrfs_raid_bio *rbio)
2099{
2100 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
2101 struct btrfs_root *csum_root = btrfs_csum_root(fs_info,
2102 rbio->bioc->full_stripe_logical);
2103 const u64 start = rbio->bioc->full_stripe_logical;
2104 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
2105 fs_info->sectorsize_bits;
2106 int ret;
2107
2108 /* The rbio should not have its csum buffer initialized. */
2109 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
2110
2111 /*
2112 * Skip the csum search if:
2113 *
2114 * - The rbio doesn't belong to data block groups
2115 * Then we are doing IO for tree blocks, no need to search csums.
2116 *
2117 * - The rbio belongs to mixed block groups
2118 * This is to avoid deadlock, as we're already holding the full
2119 * stripe lock, if we trigger a metadata read, and it needs to do
2120 * raid56 recovery, we will deadlock.
2121 */
2122 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
2123 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
2124 return;
2125
2126 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
2127 fs_info->csum_size, GFP_NOFS);
2128 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
2129 GFP_NOFS);
2130 if (!rbio->csum_buf || !rbio->csum_bitmap) {
2131 ret = -ENOMEM;
2132 goto error;
2133 }
2134
2135 ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1,
2136 rbio->csum_buf, rbio->csum_bitmap);
2137 if (ret < 0)
2138 goto error;
2139 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
2140 goto no_csum;
2141 return;
2142
2143error:
2144 /*
2145 * We failed to allocate memory or grab the csum, but it's not fatal,
2146 * we can still continue. But better to warn users that RMW is no
2147 * longer safe for this particular sub-stripe write.
2148 */
2149 btrfs_warn_rl(fs_info,
2150"sub-stripe write for full stripe %llu is not safe, failed to get csum: %d",
2151 rbio->bioc->full_stripe_logical, ret);
2152no_csum:
2153 kfree(rbio->csum_buf);
2154 bitmap_free(rbio->csum_bitmap);
2155 rbio->csum_buf = NULL;
2156 rbio->csum_bitmap = NULL;
2157}
2158
2159static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
2160{
2161 struct bio_list bio_list = BIO_EMPTY_LIST;
2162 int total_sector_nr;
2163 int ret = 0;
2164
2165 /*
2166 * Fill the data csums we need for data verification. We need to fill
2167 * the csum_bitmap/csum_buf first, as our endio function will try to
2168 * verify the data sectors.
2169 */
2170 fill_data_csums(rbio);
2171
2172 /*
2173 * Build a list of bios to read all sectors (including data and P/Q).
2174 *
2175 * This behavior is to compensate the later csum verification and recovery.
2176 */
2177 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2178 total_sector_nr++) {
2179 struct sector_ptr *sector;
2180 int stripe = total_sector_nr / rbio->stripe_nsectors;
2181 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2182
2183 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2184 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2185 stripe, sectornr, REQ_OP_READ);
2186 if (ret) {
2187 bio_list_put(&bio_list);
2188 return ret;
2189 }
2190 }
2191
2192 /*
2193 * We may or may not have any corrupted sectors (including missing dev
2194 * and csum mismatch), just let recover_sectors() to handle them all.
2195 */
2196 submit_read_wait_bio_list(rbio, &bio_list);
2197 return recover_sectors(rbio);
2198}
2199
2200static void raid_wait_write_end_io(struct bio *bio)
2201{
2202 struct btrfs_raid_bio *rbio = bio->bi_private;
2203 blk_status_t err = bio->bi_status;
2204
2205 if (err)
2206 rbio_update_error_bitmap(rbio, bio);
2207 bio_put(bio);
2208 if (atomic_dec_and_test(&rbio->stripes_pending))
2209 wake_up(&rbio->io_wait);
2210}
2211
2212static void submit_write_bios(struct btrfs_raid_bio *rbio,
2213 struct bio_list *bio_list)
2214{
2215 struct bio *bio;
2216
2217 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
2218 while ((bio = bio_list_pop(bio_list))) {
2219 bio->bi_end_io = raid_wait_write_end_io;
2220
2221 if (trace_raid56_write_enabled()) {
2222 struct raid56_bio_trace_info trace_info = { 0 };
2223
2224 bio_get_trace_info(rbio, bio, &trace_info);
2225 trace_raid56_write(rbio, bio, &trace_info);
2226 }
2227 submit_bio(bio);
2228 }
2229}
2230
2231/*
2232 * To determine if we need to read any sector from the disk.
2233 * Should only be utilized in RMW path, to skip cached rbio.
2234 */
2235static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
2236{
2237 int i;
2238
2239 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
2240 struct sector_ptr *sector = &rbio->stripe_sectors[i];
2241
2242 /*
2243 * We have a sector which doesn't have page nor uptodate,
2244 * thus this rbio can not be cached one, as cached one must
2245 * have all its data sectors present and uptodate.
2246 */
2247 if (!sector->page || !sector->uptodate)
2248 return true;
2249 }
2250 return false;
2251}
2252
2253static void rmw_rbio(struct btrfs_raid_bio *rbio)
2254{
2255 struct bio_list bio_list;
2256 int sectornr;
2257 int ret = 0;
2258
2259 /*
2260 * Allocate the pages for parity first, as P/Q pages will always be
2261 * needed for both full-stripe and sub-stripe writes.
2262 */
2263 ret = alloc_rbio_parity_pages(rbio);
2264 if (ret < 0)
2265 goto out;
2266
2267 /*
2268 * Either full stripe write, or we have every data sector already
2269 * cached, can go to write path immediately.
2270 */
2271 if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) {
2272 /*
2273 * Now we're doing sub-stripe write, also need all data stripes
2274 * to do the full RMW.
2275 */
2276 ret = alloc_rbio_data_pages(rbio);
2277 if (ret < 0)
2278 goto out;
2279
2280 index_rbio_pages(rbio);
2281
2282 ret = rmw_read_wait_recover(rbio);
2283 if (ret < 0)
2284 goto out;
2285 }
2286
2287 /*
2288 * At this stage we're not allowed to add any new bios to the
2289 * bio list any more, anyone else that wants to change this stripe
2290 * needs to do their own rmw.
2291 */
2292 spin_lock(&rbio->bio_list_lock);
2293 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
2294 spin_unlock(&rbio->bio_list_lock);
2295
2296 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2297
2298 index_rbio_pages(rbio);
2299
2300 /*
2301 * We don't cache full rbios because we're assuming
2302 * the higher layers are unlikely to use this area of
2303 * the disk again soon. If they do use it again,
2304 * hopefully they will send another full bio.
2305 */
2306 if (!rbio_is_full(rbio))
2307 cache_rbio_pages(rbio);
2308 else
2309 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2310
2311 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
2312 generate_pq_vertical(rbio, sectornr);
2313
2314 bio_list_init(&bio_list);
2315 ret = rmw_assemble_write_bios(rbio, &bio_list);
2316 if (ret < 0)
2317 goto out;
2318
2319 /* We should have at least one bio assembled. */
2320 ASSERT(bio_list_size(&bio_list));
2321 submit_write_bios(rbio, &bio_list);
2322 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2323
2324 /* We may have more errors than our tolerance during the read. */
2325 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2326 int found_errors;
2327
2328 found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2329 if (found_errors > rbio->bioc->max_errors) {
2330 ret = -EIO;
2331 break;
2332 }
2333 }
2334out:
2335 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2336}
2337
2338static void rmw_rbio_work(struct work_struct *work)
2339{
2340 struct btrfs_raid_bio *rbio;
2341
2342 rbio = container_of(work, struct btrfs_raid_bio, work);
2343 if (lock_stripe_add(rbio) == 0)
2344 rmw_rbio(rbio);
2345}
2346
2347static void rmw_rbio_work_locked(struct work_struct *work)
2348{
2349 rmw_rbio(container_of(work, struct btrfs_raid_bio, work));
2350}
2351
2352/*
2353 * The following code is used to scrub/replace the parity stripe
2354 *
2355 * Caller must have already increased bio_counter for getting @bioc.
2356 *
2357 * Note: We need make sure all the pages that add into the scrub/replace
2358 * raid bio are correct and not be changed during the scrub/replace. That
2359 * is those pages just hold metadata or file data with checksum.
2360 */
2361
2362struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
2363 struct btrfs_io_context *bioc,
2364 struct btrfs_device *scrub_dev,
2365 unsigned long *dbitmap, int stripe_nsectors)
2366{
2367 struct btrfs_fs_info *fs_info = bioc->fs_info;
2368 struct btrfs_raid_bio *rbio;
2369 int i;
2370
2371 rbio = alloc_rbio(fs_info, bioc);
2372 if (IS_ERR(rbio))
2373 return NULL;
2374 bio_list_add(&rbio->bio_list, bio);
2375 /*
2376 * This is a special bio which is used to hold the completion handler
2377 * and make the scrub rbio is similar to the other types
2378 */
2379 ASSERT(!bio->bi_iter.bi_size);
2380 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2381
2382 /*
2383 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
2384 * to the end position, so this search can start from the first parity
2385 * stripe.
2386 */
2387 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2388 if (bioc->stripes[i].dev == scrub_dev) {
2389 rbio->scrubp = i;
2390 break;
2391 }
2392 }
2393 ASSERT(i < rbio->real_stripes);
2394
2395 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
2396 return rbio;
2397}
2398
2399/*
2400 * We just scrub the parity that we have correct data on the same horizontal,
2401 * so we needn't allocate all pages for all the stripes.
2402 */
2403static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2404{
2405 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2406 int total_sector_nr;
2407
2408 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2409 total_sector_nr++) {
2410 struct page *page;
2411 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2412 int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
2413
2414 if (!test_bit(sectornr, &rbio->dbitmap))
2415 continue;
2416 if (rbio->stripe_pages[index])
2417 continue;
2418 page = alloc_page(GFP_NOFS);
2419 if (!page)
2420 return -ENOMEM;
2421 rbio->stripe_pages[index] = page;
2422 }
2423 index_stripe_sectors(rbio);
2424 return 0;
2425}
2426
2427static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
2428{
2429 struct btrfs_io_context *bioc = rbio->bioc;
2430 const u32 sectorsize = bioc->fs_info->sectorsize;
2431 void **pointers = rbio->finish_pointers;
2432 unsigned long *pbitmap = &rbio->finish_pbitmap;
2433 int nr_data = rbio->nr_data;
2434 int stripe;
2435 int sectornr;
2436 bool has_qstripe;
2437 struct sector_ptr p_sector = { 0 };
2438 struct sector_ptr q_sector = { 0 };
2439 struct bio_list bio_list;
2440 int is_replace = 0;
2441 int ret;
2442
2443 bio_list_init(&bio_list);
2444
2445 if (rbio->real_stripes - rbio->nr_data == 1)
2446 has_qstripe = false;
2447 else if (rbio->real_stripes - rbio->nr_data == 2)
2448 has_qstripe = true;
2449 else
2450 BUG();
2451
2452 /*
2453 * Replace is running and our P/Q stripe is being replaced, then we
2454 * need to duplicate the final write to replace target.
2455 */
2456 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) {
2457 is_replace = 1;
2458 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
2459 }
2460
2461 /*
2462 * Because the higher layers(scrubber) are unlikely to
2463 * use this area of the disk again soon, so don't cache
2464 * it.
2465 */
2466 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2467
2468 p_sector.page = alloc_page(GFP_NOFS);
2469 if (!p_sector.page)
2470 return -ENOMEM;
2471 p_sector.pgoff = 0;
2472 p_sector.uptodate = 1;
2473
2474 if (has_qstripe) {
2475 /* RAID6, allocate and map temp space for the Q stripe */
2476 q_sector.page = alloc_page(GFP_NOFS);
2477 if (!q_sector.page) {
2478 __free_page(p_sector.page);
2479 p_sector.page = NULL;
2480 return -ENOMEM;
2481 }
2482 q_sector.pgoff = 0;
2483 q_sector.uptodate = 1;
2484 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
2485 }
2486
2487 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2488
2489 /* Map the parity stripe just once */
2490 pointers[nr_data] = kmap_local_page(p_sector.page);
2491
2492 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2493 struct sector_ptr *sector;
2494 void *parity;
2495
2496 /* first collect one page from each data stripe */
2497 for (stripe = 0; stripe < nr_data; stripe++) {
2498 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
2499 pointers[stripe] = kmap_local_page(sector->page) +
2500 sector->pgoff;
2501 }
2502
2503 if (has_qstripe) {
2504 assert_rbio(rbio);
2505 /* RAID6, call the library function to fill in our P/Q */
2506 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
2507 pointers);
2508 } else {
2509 /* raid5 */
2510 memcpy(pointers[nr_data], pointers[0], sectorsize);
2511 run_xor(pointers + 1, nr_data - 1, sectorsize);
2512 }
2513
2514 /* Check scrubbing parity and repair it */
2515 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2516 parity = kmap_local_page(sector->page) + sector->pgoff;
2517 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2518 memcpy(parity, pointers[rbio->scrubp], sectorsize);
2519 else
2520 /* Parity is right, needn't writeback */
2521 bitmap_clear(&rbio->dbitmap, sectornr, 1);
2522 kunmap_local(parity);
2523
2524 for (stripe = nr_data - 1; stripe >= 0; stripe--)
2525 kunmap_local(pointers[stripe]);
2526 }
2527
2528 kunmap_local(pointers[nr_data]);
2529 __free_page(p_sector.page);
2530 p_sector.page = NULL;
2531 if (q_sector.page) {
2532 kunmap_local(pointers[rbio->real_stripes - 1]);
2533 __free_page(q_sector.page);
2534 q_sector.page = NULL;
2535 }
2536
2537 /*
2538 * time to start writing. Make bios for everything from the
2539 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2540 * everything else.
2541 */
2542 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2543 struct sector_ptr *sector;
2544
2545 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2546 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2547 sectornr, REQ_OP_WRITE);
2548 if (ret)
2549 goto cleanup;
2550 }
2551
2552 if (!is_replace)
2553 goto submit_write;
2554
2555 /*
2556 * Replace is running and our parity stripe needs to be duplicated to
2557 * the target device. Check we have a valid source stripe number.
2558 */
2559 ASSERT(rbio->bioc->replace_stripe_src >= 0);
2560 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
2561 struct sector_ptr *sector;
2562
2563 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2564 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2565 rbio->real_stripes,
2566 sectornr, REQ_OP_WRITE);
2567 if (ret)
2568 goto cleanup;
2569 }
2570
2571submit_write:
2572 submit_write_bios(rbio, &bio_list);
2573 return 0;
2574
2575cleanup:
2576 bio_list_put(&bio_list);
2577 return ret;
2578}
2579
2580static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2581{
2582 if (stripe >= 0 && stripe < rbio->nr_data)
2583 return 1;
2584 return 0;
2585}
2586
2587static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
2588{
2589 void **pointers = NULL;
2590 void **unmap_array = NULL;
2591 int sector_nr;
2592 int ret = 0;
2593
2594 /*
2595 * @pointers array stores the pointer for each sector.
2596 *
2597 * @unmap_array stores copy of pointers that does not get reordered
2598 * during reconstruction so that kunmap_local works.
2599 */
2600 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2601 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2602 if (!pointers || !unmap_array) {
2603 ret = -ENOMEM;
2604 goto out;
2605 }
2606
2607 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2608 int dfail = 0, failp = -1;
2609 int faila;
2610 int failb;
2611 int found_errors;
2612
2613 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2614 &faila, &failb);
2615 if (found_errors > rbio->bioc->max_errors) {
2616 ret = -EIO;
2617 goto out;
2618 }
2619 if (found_errors == 0)
2620 continue;
2621
2622 /* We should have at least one error here. */
2623 ASSERT(faila >= 0 || failb >= 0);
2624
2625 if (is_data_stripe(rbio, faila))
2626 dfail++;
2627 else if (is_parity_stripe(faila))
2628 failp = faila;
2629
2630 if (is_data_stripe(rbio, failb))
2631 dfail++;
2632 else if (is_parity_stripe(failb))
2633 failp = failb;
2634 /*
2635 * Because we can not use a scrubbing parity to repair the
2636 * data, so the capability of the repair is declined. (In the
2637 * case of RAID5, we can not repair anything.)
2638 */
2639 if (dfail > rbio->bioc->max_errors - 1) {
2640 ret = -EIO;
2641 goto out;
2642 }
2643 /*
2644 * If all data is good, only parity is correctly, just repair
2645 * the parity, no need to recover data stripes.
2646 */
2647 if (dfail == 0)
2648 continue;
2649
2650 /*
2651 * Here means we got one corrupted data stripe and one
2652 * corrupted parity on RAID6, if the corrupted parity is
2653 * scrubbing parity, luckily, use the other one to repair the
2654 * data, or we can not repair the data stripe.
2655 */
2656 if (failp != rbio->scrubp) {
2657 ret = -EIO;
2658 goto out;
2659 }
2660
2661 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2662 if (ret < 0)
2663 goto out;
2664 }
2665out:
2666 kfree(pointers);
2667 kfree(unmap_array);
2668 return ret;
2669}
2670
2671static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
2672{
2673 struct bio_list bio_list = BIO_EMPTY_LIST;
2674 int total_sector_nr;
2675 int ret = 0;
2676
2677 /* Build a list of bios to read all the missing parts. */
2678 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2679 total_sector_nr++) {
2680 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2681 int stripe = total_sector_nr / rbio->stripe_nsectors;
2682 struct sector_ptr *sector;
2683
2684 /* No data in the vertical stripe, no need to read. */
2685 if (!test_bit(sectornr, &rbio->dbitmap))
2686 continue;
2687
2688 /*
2689 * We want to find all the sectors missing from the rbio and
2690 * read them from the disk. If sector_in_rbio() finds a sector
2691 * in the bio list we don't need to read it off the stripe.
2692 */
2693 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
2694 if (sector)
2695 continue;
2696
2697 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2698 /*
2699 * The bio cache may have handed us an uptodate sector. If so,
2700 * use it.
2701 */
2702 if (sector->uptodate)
2703 continue;
2704
2705 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
2706 sectornr, REQ_OP_READ);
2707 if (ret) {
2708 bio_list_put(&bio_list);
2709 return ret;
2710 }
2711 }
2712
2713 submit_read_wait_bio_list(rbio, &bio_list);
2714 return 0;
2715}
2716
2717static void scrub_rbio(struct btrfs_raid_bio *rbio)
2718{
2719 int sector_nr;
2720 int ret;
2721
2722 ret = alloc_rbio_essential_pages(rbio);
2723 if (ret)
2724 goto out;
2725
2726 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2727
2728 ret = scrub_assemble_read_bios(rbio);
2729 if (ret < 0)
2730 goto out;
2731
2732 /* We may have some failures, recover the failed sectors first. */
2733 ret = recover_scrub_rbio(rbio);
2734 if (ret < 0)
2735 goto out;
2736
2737 /*
2738 * We have every sector properly prepared. Can finish the scrub
2739 * and writeback the good content.
2740 */
2741 ret = finish_parity_scrub(rbio);
2742 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2743 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2744 int found_errors;
2745
2746 found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2747 if (found_errors > rbio->bioc->max_errors) {
2748 ret = -EIO;
2749 break;
2750 }
2751 }
2752out:
2753 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2754}
2755
2756static void scrub_rbio_work_locked(struct work_struct *work)
2757{
2758 scrub_rbio(container_of(work, struct btrfs_raid_bio, work));
2759}
2760
2761void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2762{
2763 if (!lock_stripe_add(rbio))
2764 start_async_work(rbio, scrub_rbio_work_locked);
2765}
2766
2767/*
2768 * This is for scrub call sites where we already have correct data contents.
2769 * This allows us to avoid reading data stripes again.
2770 *
2771 * Unfortunately here we have to do page copy, other than reusing the pages.
2772 * This is due to the fact rbio has its own page management for its cache.
2773 */
2774void raid56_parity_cache_data_pages(struct btrfs_raid_bio *rbio,
2775 struct page **data_pages, u64 data_logical)
2776{
2777 const u64 offset_in_full_stripe = data_logical -
2778 rbio->bioc->full_stripe_logical;
2779 const int page_index = offset_in_full_stripe >> PAGE_SHIFT;
2780 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2781 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
2782 int ret;
2783
2784 /*
2785 * If we hit ENOMEM temporarily, but later at
2786 * raid56_parity_submit_scrub_rbio() time it succeeded, we just do
2787 * the extra read, not a big deal.
2788 *
2789 * If we hit ENOMEM later at raid56_parity_submit_scrub_rbio() time,
2790 * the bio would got proper error number set.
2791 */
2792 ret = alloc_rbio_data_pages(rbio);
2793 if (ret < 0)
2794 return;
2795
2796 /* data_logical must be at stripe boundary and inside the full stripe. */
2797 ASSERT(IS_ALIGNED(offset_in_full_stripe, BTRFS_STRIPE_LEN));
2798 ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT));
2799
2800 for (int page_nr = 0; page_nr < (BTRFS_STRIPE_LEN >> PAGE_SHIFT); page_nr++) {
2801 struct page *dst = rbio->stripe_pages[page_nr + page_index];
2802 struct page *src = data_pages[page_nr];
2803
2804 memcpy_page(dst, 0, src, 0, PAGE_SIZE);
2805 for (int sector_nr = sectors_per_page * page_index;
2806 sector_nr < sectors_per_page * (page_index + 1);
2807 sector_nr++)
2808 rbio->stripe_sectors[sector_nr].uptodate = true;
2809 }
2810}