Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/bio.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/raid/pq.h>
12#include <linux/hash.h>
13#include <linux/list_sort.h>
14#include <linux/raid/xor.h>
15#include <linux/mm.h>
16#include "messages.h"
17#include "misc.h"
18#include "ctree.h"
19#include "disk-io.h"
20#include "volumes.h"
21#include "raid56.h"
22#include "async-thread.h"
23#include "file-item.h"
24#include "btrfs_inode.h"
25
26/* set when additional merges to this rbio are not allowed */
27#define RBIO_RMW_LOCKED_BIT 1
28
29/*
30 * set when this rbio is sitting in the hash, but it is just a cache
31 * of past RMW
32 */
33#define RBIO_CACHE_BIT 2
34
35/*
36 * set when it is safe to trust the stripe_pages for caching
37 */
38#define RBIO_CACHE_READY_BIT 3
39
40#define RBIO_CACHE_SIZE 1024
41
42#define BTRFS_STRIPE_HASH_TABLE_BITS 11
43
44/* Used by the raid56 code to lock stripes for read/modify/write */
45struct btrfs_stripe_hash {
46 struct list_head hash_list;
47 spinlock_t lock;
48};
49
50/* Used by the raid56 code to lock stripes for read/modify/write */
51struct btrfs_stripe_hash_table {
52 struct list_head stripe_cache;
53 spinlock_t cache_lock;
54 int cache_size;
55 struct btrfs_stripe_hash table[];
56};
57
58/*
59 * A bvec like structure to present a sector inside a page.
60 *
61 * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
62 */
63struct sector_ptr {
64 struct page *page;
65 unsigned int pgoff:24;
66 unsigned int uptodate:8;
67};
68
69static void rmw_rbio_work(struct work_struct *work);
70static void rmw_rbio_work_locked(struct work_struct *work);
71static void index_rbio_pages(struct btrfs_raid_bio *rbio);
72static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
73
74static int finish_parity_scrub(struct btrfs_raid_bio *rbio);
75static void scrub_rbio_work_locked(struct work_struct *work);
76
77static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
78{
79 bitmap_free(rbio->error_bitmap);
80 kfree(rbio->stripe_pages);
81 kfree(rbio->bio_sectors);
82 kfree(rbio->stripe_sectors);
83 kfree(rbio->finish_pointers);
84}
85
86static void free_raid_bio(struct btrfs_raid_bio *rbio)
87{
88 int i;
89
90 if (!refcount_dec_and_test(&rbio->refs))
91 return;
92
93 WARN_ON(!list_empty(&rbio->stripe_cache));
94 WARN_ON(!list_empty(&rbio->hash_list));
95 WARN_ON(!bio_list_empty(&rbio->bio_list));
96
97 for (i = 0; i < rbio->nr_pages; i++) {
98 if (rbio->stripe_pages[i]) {
99 __free_page(rbio->stripe_pages[i]);
100 rbio->stripe_pages[i] = NULL;
101 }
102 }
103
104 btrfs_put_bioc(rbio->bioc);
105 free_raid_bio_pointers(rbio);
106 kfree(rbio);
107}
108
109static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
110{
111 INIT_WORK(&rbio->work, work_func);
112 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
113}
114
115/*
116 * the stripe hash table is used for locking, and to collect
117 * bios in hopes of making a full stripe
118 */
119int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
120{
121 struct btrfs_stripe_hash_table *table;
122 struct btrfs_stripe_hash_table *x;
123 struct btrfs_stripe_hash *cur;
124 struct btrfs_stripe_hash *h;
125 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
126 int i;
127
128 if (info->stripe_hash_table)
129 return 0;
130
131 /*
132 * The table is large, starting with order 4 and can go as high as
133 * order 7 in case lock debugging is turned on.
134 *
135 * Try harder to allocate and fallback to vmalloc to lower the chance
136 * of a failing mount.
137 */
138 table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
139 if (!table)
140 return -ENOMEM;
141
142 spin_lock_init(&table->cache_lock);
143 INIT_LIST_HEAD(&table->stripe_cache);
144
145 h = table->table;
146
147 for (i = 0; i < num_entries; i++) {
148 cur = h + i;
149 INIT_LIST_HEAD(&cur->hash_list);
150 spin_lock_init(&cur->lock);
151 }
152
153 x = cmpxchg(&info->stripe_hash_table, NULL, table);
154 kvfree(x);
155 return 0;
156}
157
158/*
159 * caching an rbio means to copy anything from the
160 * bio_sectors array into the stripe_pages array. We
161 * use the page uptodate bit in the stripe cache array
162 * to indicate if it has valid data
163 *
164 * once the caching is done, we set the cache ready
165 * bit.
166 */
167static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
168{
169 int i;
170 int ret;
171
172 ret = alloc_rbio_pages(rbio);
173 if (ret)
174 return;
175
176 for (i = 0; i < rbio->nr_sectors; i++) {
177 /* Some range not covered by bio (partial write), skip it */
178 if (!rbio->bio_sectors[i].page) {
179 /*
180 * Even if the sector is not covered by bio, if it is
181 * a data sector it should still be uptodate as it is
182 * read from disk.
183 */
184 if (i < rbio->nr_data * rbio->stripe_nsectors)
185 ASSERT(rbio->stripe_sectors[i].uptodate);
186 continue;
187 }
188
189 ASSERT(rbio->stripe_sectors[i].page);
190 memcpy_page(rbio->stripe_sectors[i].page,
191 rbio->stripe_sectors[i].pgoff,
192 rbio->bio_sectors[i].page,
193 rbio->bio_sectors[i].pgoff,
194 rbio->bioc->fs_info->sectorsize);
195 rbio->stripe_sectors[i].uptodate = 1;
196 }
197 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
198}
199
200/*
201 * we hash on the first logical address of the stripe
202 */
203static int rbio_bucket(struct btrfs_raid_bio *rbio)
204{
205 u64 num = rbio->bioc->full_stripe_logical;
206
207 /*
208 * we shift down quite a bit. We're using byte
209 * addressing, and most of the lower bits are zeros.
210 * This tends to upset hash_64, and it consistently
211 * returns just one or two different values.
212 *
213 * shifting off the lower bits fixes things.
214 */
215 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
216}
217
218static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
219 unsigned int page_nr)
220{
221 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
222 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
223 int i;
224
225 ASSERT(page_nr < rbio->nr_pages);
226
227 for (i = sectors_per_page * page_nr;
228 i < sectors_per_page * page_nr + sectors_per_page;
229 i++) {
230 if (!rbio->stripe_sectors[i].uptodate)
231 return false;
232 }
233 return true;
234}
235
236/*
237 * Update the stripe_sectors[] array to use correct page and pgoff
238 *
239 * Should be called every time any page pointer in stripes_pages[] got modified.
240 */
241static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
242{
243 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
244 u32 offset;
245 int i;
246
247 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
248 int page_index = offset >> PAGE_SHIFT;
249
250 ASSERT(page_index < rbio->nr_pages);
251 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
252 rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
253 }
254}
255
256static void steal_rbio_page(struct btrfs_raid_bio *src,
257 struct btrfs_raid_bio *dest, int page_nr)
258{
259 const u32 sectorsize = src->bioc->fs_info->sectorsize;
260 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
261 int i;
262
263 if (dest->stripe_pages[page_nr])
264 __free_page(dest->stripe_pages[page_nr]);
265 dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
266 src->stripe_pages[page_nr] = NULL;
267
268 /* Also update the sector->uptodate bits. */
269 for (i = sectors_per_page * page_nr;
270 i < sectors_per_page * page_nr + sectors_per_page; i++)
271 dest->stripe_sectors[i].uptodate = true;
272}
273
274static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
275{
276 const int sector_nr = (page_nr << PAGE_SHIFT) >>
277 rbio->bioc->fs_info->sectorsize_bits;
278
279 /*
280 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
281 * we won't have a page which is half data half parity.
282 *
283 * Thus if the first sector of the page belongs to data stripes, then
284 * the full page belongs to data stripes.
285 */
286 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
287}
288
289/*
290 * Stealing an rbio means taking all the uptodate pages from the stripe array
291 * in the source rbio and putting them into the destination rbio.
292 *
293 * This will also update the involved stripe_sectors[] which are referring to
294 * the old pages.
295 */
296static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
297{
298 int i;
299
300 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
301 return;
302
303 for (i = 0; i < dest->nr_pages; i++) {
304 struct page *p = src->stripe_pages[i];
305
306 /*
307 * We don't need to steal P/Q pages as they will always be
308 * regenerated for RMW or full write anyway.
309 */
310 if (!is_data_stripe_page(src, i))
311 continue;
312
313 /*
314 * If @src already has RBIO_CACHE_READY_BIT, it should have
315 * all data stripe pages present and uptodate.
316 */
317 ASSERT(p);
318 ASSERT(full_page_sectors_uptodate(src, i));
319 steal_rbio_page(src, dest, i);
320 }
321 index_stripe_sectors(dest);
322 index_stripe_sectors(src);
323}
324
325/*
326 * merging means we take the bio_list from the victim and
327 * splice it into the destination. The victim should
328 * be discarded afterwards.
329 *
330 * must be called with dest->rbio_list_lock held
331 */
332static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
334{
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
337 /* Also inherit the bitmaps from @victim. */
338 bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
339 dest->stripe_nsectors);
340 bio_list_init(&victim->bio_list);
341}
342
343/*
344 * used to prune items that are in the cache. The caller
345 * must hold the hash table lock.
346 */
347static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
348{
349 int bucket = rbio_bucket(rbio);
350 struct btrfs_stripe_hash_table *table;
351 struct btrfs_stripe_hash *h;
352 int freeit = 0;
353
354 /*
355 * check the bit again under the hash table lock.
356 */
357 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
358 return;
359
360 table = rbio->bioc->fs_info->stripe_hash_table;
361 h = table->table + bucket;
362
363 /* hold the lock for the bucket because we may be
364 * removing it from the hash table
365 */
366 spin_lock(&h->lock);
367
368 /*
369 * hold the lock for the bio list because we need
370 * to make sure the bio list is empty
371 */
372 spin_lock(&rbio->bio_list_lock);
373
374 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
375 list_del_init(&rbio->stripe_cache);
376 table->cache_size -= 1;
377 freeit = 1;
378
379 /* if the bio list isn't empty, this rbio is
380 * still involved in an IO. We take it out
381 * of the cache list, and drop the ref that
382 * was held for the list.
383 *
384 * If the bio_list was empty, we also remove
385 * the rbio from the hash_table, and drop
386 * the corresponding ref
387 */
388 if (bio_list_empty(&rbio->bio_list)) {
389 if (!list_empty(&rbio->hash_list)) {
390 list_del_init(&rbio->hash_list);
391 refcount_dec(&rbio->refs);
392 BUG_ON(!list_empty(&rbio->plug_list));
393 }
394 }
395 }
396
397 spin_unlock(&rbio->bio_list_lock);
398 spin_unlock(&h->lock);
399
400 if (freeit)
401 free_raid_bio(rbio);
402}
403
404/*
405 * prune a given rbio from the cache
406 */
407static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
408{
409 struct btrfs_stripe_hash_table *table;
410
411 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
412 return;
413
414 table = rbio->bioc->fs_info->stripe_hash_table;
415
416 spin_lock(&table->cache_lock);
417 __remove_rbio_from_cache(rbio);
418 spin_unlock(&table->cache_lock);
419}
420
421/*
422 * remove everything in the cache
423 */
424static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
425{
426 struct btrfs_stripe_hash_table *table;
427 struct btrfs_raid_bio *rbio;
428
429 table = info->stripe_hash_table;
430
431 spin_lock(&table->cache_lock);
432 while (!list_empty(&table->stripe_cache)) {
433 rbio = list_entry(table->stripe_cache.next,
434 struct btrfs_raid_bio,
435 stripe_cache);
436 __remove_rbio_from_cache(rbio);
437 }
438 spin_unlock(&table->cache_lock);
439}
440
441/*
442 * remove all cached entries and free the hash table
443 * used by unmount
444 */
445void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
446{
447 if (!info->stripe_hash_table)
448 return;
449 btrfs_clear_rbio_cache(info);
450 kvfree(info->stripe_hash_table);
451 info->stripe_hash_table = NULL;
452}
453
454/*
455 * insert an rbio into the stripe cache. It
456 * must have already been prepared by calling
457 * cache_rbio_pages
458 *
459 * If this rbio was already cached, it gets
460 * moved to the front of the lru.
461 *
462 * If the size of the rbio cache is too big, we
463 * prune an item.
464 */
465static void cache_rbio(struct btrfs_raid_bio *rbio)
466{
467 struct btrfs_stripe_hash_table *table;
468
469 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
470 return;
471
472 table = rbio->bioc->fs_info->stripe_hash_table;
473
474 spin_lock(&table->cache_lock);
475 spin_lock(&rbio->bio_list_lock);
476
477 /* bump our ref if we were not in the list before */
478 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
479 refcount_inc(&rbio->refs);
480
481 if (!list_empty(&rbio->stripe_cache)){
482 list_move(&rbio->stripe_cache, &table->stripe_cache);
483 } else {
484 list_add(&rbio->stripe_cache, &table->stripe_cache);
485 table->cache_size += 1;
486 }
487
488 spin_unlock(&rbio->bio_list_lock);
489
490 if (table->cache_size > RBIO_CACHE_SIZE) {
491 struct btrfs_raid_bio *found;
492
493 found = list_entry(table->stripe_cache.prev,
494 struct btrfs_raid_bio,
495 stripe_cache);
496
497 if (found != rbio)
498 __remove_rbio_from_cache(found);
499 }
500
501 spin_unlock(&table->cache_lock);
502}
503
504/*
505 * helper function to run the xor_blocks api. It is only
506 * able to do MAX_XOR_BLOCKS at a time, so we need to
507 * loop through.
508 */
509static void run_xor(void **pages, int src_cnt, ssize_t len)
510{
511 int src_off = 0;
512 int xor_src_cnt = 0;
513 void *dest = pages[src_cnt];
514
515 while(src_cnt > 0) {
516 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
517 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
518
519 src_cnt -= xor_src_cnt;
520 src_off += xor_src_cnt;
521 }
522}
523
524/*
525 * Returns true if the bio list inside this rbio covers an entire stripe (no
526 * rmw required).
527 */
528static int rbio_is_full(struct btrfs_raid_bio *rbio)
529{
530 unsigned long size = rbio->bio_list_bytes;
531 int ret = 1;
532
533 spin_lock(&rbio->bio_list_lock);
534 if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
535 ret = 0;
536 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
537 spin_unlock(&rbio->bio_list_lock);
538
539 return ret;
540}
541
542/*
543 * returns 1 if it is safe to merge two rbios together.
544 * The merging is safe if the two rbios correspond to
545 * the same stripe and if they are both going in the same
546 * direction (read vs write), and if neither one is
547 * locked for final IO
548 *
549 * The caller is responsible for locking such that
550 * rmw_locked is safe to test
551 */
552static int rbio_can_merge(struct btrfs_raid_bio *last,
553 struct btrfs_raid_bio *cur)
554{
555 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
556 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
557 return 0;
558
559 /*
560 * we can't merge with cached rbios, since the
561 * idea is that when we merge the destination
562 * rbio is going to run our IO for us. We can
563 * steal from cached rbios though, other functions
564 * handle that.
565 */
566 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
567 test_bit(RBIO_CACHE_BIT, &cur->flags))
568 return 0;
569
570 if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical)
571 return 0;
572
573 /* we can't merge with different operations */
574 if (last->operation != cur->operation)
575 return 0;
576 /*
577 * We've need read the full stripe from the drive.
578 * check and repair the parity and write the new results.
579 *
580 * We're not allowed to add any new bios to the
581 * bio list here, anyone else that wants to
582 * change this stripe needs to do their own rmw.
583 */
584 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
585 return 0;
586
587 if (last->operation == BTRFS_RBIO_READ_REBUILD)
588 return 0;
589
590 return 1;
591}
592
593static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
594 unsigned int stripe_nr,
595 unsigned int sector_nr)
596{
597 ASSERT(stripe_nr < rbio->real_stripes);
598 ASSERT(sector_nr < rbio->stripe_nsectors);
599
600 return stripe_nr * rbio->stripe_nsectors + sector_nr;
601}
602
603/* Return a sector from rbio->stripe_sectors, not from the bio list */
604static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
605 unsigned int stripe_nr,
606 unsigned int sector_nr)
607{
608 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
609 sector_nr)];
610}
611
612/* Grab a sector inside P stripe */
613static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
614 unsigned int sector_nr)
615{
616 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
617}
618
619/* Grab a sector inside Q stripe, return NULL if not RAID6 */
620static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
621 unsigned int sector_nr)
622{
623 if (rbio->nr_data + 1 == rbio->real_stripes)
624 return NULL;
625 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
626}
627
628/*
629 * The first stripe in the table for a logical address
630 * has the lock. rbios are added in one of three ways:
631 *
632 * 1) Nobody has the stripe locked yet. The rbio is given
633 * the lock and 0 is returned. The caller must start the IO
634 * themselves.
635 *
636 * 2) Someone has the stripe locked, but we're able to merge
637 * with the lock owner. The rbio is freed and the IO will
638 * start automatically along with the existing rbio. 1 is returned.
639 *
640 * 3) Someone has the stripe locked, but we're not able to merge.
641 * The rbio is added to the lock owner's plug list, or merged into
642 * an rbio already on the plug list. When the lock owner unlocks,
643 * the next rbio on the list is run and the IO is started automatically.
644 * 1 is returned
645 *
646 * If we return 0, the caller still owns the rbio and must continue with
647 * IO submission. If we return 1, the caller must assume the rbio has
648 * already been freed.
649 */
650static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
651{
652 struct btrfs_stripe_hash *h;
653 struct btrfs_raid_bio *cur;
654 struct btrfs_raid_bio *pending;
655 struct btrfs_raid_bio *freeit = NULL;
656 struct btrfs_raid_bio *cache_drop = NULL;
657 int ret = 0;
658
659 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
660
661 spin_lock(&h->lock);
662 list_for_each_entry(cur, &h->hash_list, hash_list) {
663 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical)
664 continue;
665
666 spin_lock(&cur->bio_list_lock);
667
668 /* Can we steal this cached rbio's pages? */
669 if (bio_list_empty(&cur->bio_list) &&
670 list_empty(&cur->plug_list) &&
671 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
672 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
673 list_del_init(&cur->hash_list);
674 refcount_dec(&cur->refs);
675
676 steal_rbio(cur, rbio);
677 cache_drop = cur;
678 spin_unlock(&cur->bio_list_lock);
679
680 goto lockit;
681 }
682
683 /* Can we merge into the lock owner? */
684 if (rbio_can_merge(cur, rbio)) {
685 merge_rbio(cur, rbio);
686 spin_unlock(&cur->bio_list_lock);
687 freeit = rbio;
688 ret = 1;
689 goto out;
690 }
691
692
693 /*
694 * We couldn't merge with the running rbio, see if we can merge
695 * with the pending ones. We don't have to check for rmw_locked
696 * because there is no way they are inside finish_rmw right now
697 */
698 list_for_each_entry(pending, &cur->plug_list, plug_list) {
699 if (rbio_can_merge(pending, rbio)) {
700 merge_rbio(pending, rbio);
701 spin_unlock(&cur->bio_list_lock);
702 freeit = rbio;
703 ret = 1;
704 goto out;
705 }
706 }
707
708 /*
709 * No merging, put us on the tail of the plug list, our rbio
710 * will be started with the currently running rbio unlocks
711 */
712 list_add_tail(&rbio->plug_list, &cur->plug_list);
713 spin_unlock(&cur->bio_list_lock);
714 ret = 1;
715 goto out;
716 }
717lockit:
718 refcount_inc(&rbio->refs);
719 list_add(&rbio->hash_list, &h->hash_list);
720out:
721 spin_unlock(&h->lock);
722 if (cache_drop)
723 remove_rbio_from_cache(cache_drop);
724 if (freeit)
725 free_raid_bio(freeit);
726 return ret;
727}
728
729static void recover_rbio_work_locked(struct work_struct *work);
730
731/*
732 * called as rmw or parity rebuild is completed. If the plug list has more
733 * rbios waiting for this stripe, the next one on the list will be started
734 */
735static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
736{
737 int bucket;
738 struct btrfs_stripe_hash *h;
739 int keep_cache = 0;
740
741 bucket = rbio_bucket(rbio);
742 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
743
744 if (list_empty(&rbio->plug_list))
745 cache_rbio(rbio);
746
747 spin_lock(&h->lock);
748 spin_lock(&rbio->bio_list_lock);
749
750 if (!list_empty(&rbio->hash_list)) {
751 /*
752 * if we're still cached and there is no other IO
753 * to perform, just leave this rbio here for others
754 * to steal from later
755 */
756 if (list_empty(&rbio->plug_list) &&
757 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
758 keep_cache = 1;
759 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
760 BUG_ON(!bio_list_empty(&rbio->bio_list));
761 goto done;
762 }
763
764 list_del_init(&rbio->hash_list);
765 refcount_dec(&rbio->refs);
766
767 /*
768 * we use the plug list to hold all the rbios
769 * waiting for the chance to lock this stripe.
770 * hand the lock over to one of them.
771 */
772 if (!list_empty(&rbio->plug_list)) {
773 struct btrfs_raid_bio *next;
774 struct list_head *head = rbio->plug_list.next;
775
776 next = list_entry(head, struct btrfs_raid_bio,
777 plug_list);
778
779 list_del_init(&rbio->plug_list);
780
781 list_add(&next->hash_list, &h->hash_list);
782 refcount_inc(&next->refs);
783 spin_unlock(&rbio->bio_list_lock);
784 spin_unlock(&h->lock);
785
786 if (next->operation == BTRFS_RBIO_READ_REBUILD) {
787 start_async_work(next, recover_rbio_work_locked);
788 } else if (next->operation == BTRFS_RBIO_WRITE) {
789 steal_rbio(rbio, next);
790 start_async_work(next, rmw_rbio_work_locked);
791 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
792 steal_rbio(rbio, next);
793 start_async_work(next, scrub_rbio_work_locked);
794 }
795
796 goto done_nolock;
797 }
798 }
799done:
800 spin_unlock(&rbio->bio_list_lock);
801 spin_unlock(&h->lock);
802
803done_nolock:
804 if (!keep_cache)
805 remove_rbio_from_cache(rbio);
806}
807
808static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
809{
810 struct bio *next;
811
812 while (cur) {
813 next = cur->bi_next;
814 cur->bi_next = NULL;
815 cur->bi_status = err;
816 bio_endio(cur);
817 cur = next;
818 }
819}
820
821/*
822 * this frees the rbio and runs through all the bios in the
823 * bio_list and calls end_io on them
824 */
825static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
826{
827 struct bio *cur = bio_list_get(&rbio->bio_list);
828 struct bio *extra;
829
830 kfree(rbio->csum_buf);
831 bitmap_free(rbio->csum_bitmap);
832 rbio->csum_buf = NULL;
833 rbio->csum_bitmap = NULL;
834
835 /*
836 * Clear the data bitmap, as the rbio may be cached for later usage.
837 * do this before before unlock_stripe() so there will be no new bio
838 * for this bio.
839 */
840 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
841
842 /*
843 * At this moment, rbio->bio_list is empty, however since rbio does not
844 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
845 * hash list, rbio may be merged with others so that rbio->bio_list
846 * becomes non-empty.
847 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
848 * more and we can call bio_endio() on all queued bios.
849 */
850 unlock_stripe(rbio);
851 extra = bio_list_get(&rbio->bio_list);
852 free_raid_bio(rbio);
853
854 rbio_endio_bio_list(cur, err);
855 if (extra)
856 rbio_endio_bio_list(extra, err);
857}
858
859/*
860 * Get a sector pointer specified by its @stripe_nr and @sector_nr.
861 *
862 * @rbio: The raid bio
863 * @stripe_nr: Stripe number, valid range [0, real_stripe)
864 * @sector_nr: Sector number inside the stripe,
865 * valid range [0, stripe_nsectors)
866 * @bio_list_only: Whether to use sectors inside the bio list only.
867 *
868 * The read/modify/write code wants to reuse the original bio page as much
869 * as possible, and only use stripe_sectors as fallback.
870 */
871static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
872 int stripe_nr, int sector_nr,
873 bool bio_list_only)
874{
875 struct sector_ptr *sector;
876 int index;
877
878 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
879 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
880
881 index = stripe_nr * rbio->stripe_nsectors + sector_nr;
882 ASSERT(index >= 0 && index < rbio->nr_sectors);
883
884 spin_lock(&rbio->bio_list_lock);
885 sector = &rbio->bio_sectors[index];
886 if (sector->page || bio_list_only) {
887 /* Don't return sector without a valid page pointer */
888 if (!sector->page)
889 sector = NULL;
890 spin_unlock(&rbio->bio_list_lock);
891 return sector;
892 }
893 spin_unlock(&rbio->bio_list_lock);
894
895 return &rbio->stripe_sectors[index];
896}
897
898/*
899 * allocation and initial setup for the btrfs_raid_bio. Not
900 * this does not allocate any pages for rbio->pages.
901 */
902static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
903 struct btrfs_io_context *bioc)
904{
905 const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes;
906 const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
907 const unsigned int num_pages = stripe_npages * real_stripes;
908 const unsigned int stripe_nsectors =
909 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
910 const unsigned int num_sectors = stripe_nsectors * real_stripes;
911 struct btrfs_raid_bio *rbio;
912
913 /* PAGE_SIZE must also be aligned to sectorsize for subpage support */
914 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
915 /*
916 * Our current stripe len should be fixed to 64k thus stripe_nsectors
917 * (at most 16) should be no larger than BITS_PER_LONG.
918 */
919 ASSERT(stripe_nsectors <= BITS_PER_LONG);
920
921 rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
922 if (!rbio)
923 return ERR_PTR(-ENOMEM);
924 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
925 GFP_NOFS);
926 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
927 GFP_NOFS);
928 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
929 GFP_NOFS);
930 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
931 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
932
933 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
934 !rbio->finish_pointers || !rbio->error_bitmap) {
935 free_raid_bio_pointers(rbio);
936 kfree(rbio);
937 return ERR_PTR(-ENOMEM);
938 }
939
940 bio_list_init(&rbio->bio_list);
941 init_waitqueue_head(&rbio->io_wait);
942 INIT_LIST_HEAD(&rbio->plug_list);
943 spin_lock_init(&rbio->bio_list_lock);
944 INIT_LIST_HEAD(&rbio->stripe_cache);
945 INIT_LIST_HEAD(&rbio->hash_list);
946 btrfs_get_bioc(bioc);
947 rbio->bioc = bioc;
948 rbio->nr_pages = num_pages;
949 rbio->nr_sectors = num_sectors;
950 rbio->real_stripes = real_stripes;
951 rbio->stripe_npages = stripe_npages;
952 rbio->stripe_nsectors = stripe_nsectors;
953 refcount_set(&rbio->refs, 1);
954 atomic_set(&rbio->stripes_pending, 0);
955
956 ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
957 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
958
959 return rbio;
960}
961
962/* allocate pages for all the stripes in the bio, including parity */
963static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
964{
965 int ret;
966
967 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, 0);
968 if (ret < 0)
969 return ret;
970 /* Mapping all sectors */
971 index_stripe_sectors(rbio);
972 return 0;
973}
974
975/* only allocate pages for p/q stripes */
976static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
977{
978 const int data_pages = rbio->nr_data * rbio->stripe_npages;
979 int ret;
980
981 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
982 rbio->stripe_pages + data_pages, 0);
983 if (ret < 0)
984 return ret;
985
986 index_stripe_sectors(rbio);
987 return 0;
988}
989
990/*
991 * Return the total number of errors found in the vertical stripe of @sector_nr.
992 *
993 * @faila and @failb will also be updated to the first and second stripe
994 * number of the errors.
995 */
996static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
997 int *faila, int *failb)
998{
999 int stripe_nr;
1000 int found_errors = 0;
1001
1002 if (faila || failb) {
1003 /*
1004 * Both @faila and @failb should be valid pointers if any of
1005 * them is specified.
1006 */
1007 ASSERT(faila && failb);
1008 *faila = -1;
1009 *failb = -1;
1010 }
1011
1012 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1013 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1014
1015 if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1016 found_errors++;
1017 if (faila) {
1018 /* Update faila and failb. */
1019 if (*faila < 0)
1020 *faila = stripe_nr;
1021 else if (*failb < 0)
1022 *failb = stripe_nr;
1023 }
1024 }
1025 }
1026 return found_errors;
1027}
1028
1029/*
1030 * Add a single sector @sector into our list of bios for IO.
1031 *
1032 * Return 0 if everything went well.
1033 * Return <0 for error.
1034 */
1035static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
1036 struct bio_list *bio_list,
1037 struct sector_ptr *sector,
1038 unsigned int stripe_nr,
1039 unsigned int sector_nr,
1040 enum req_op op)
1041{
1042 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1043 struct bio *last = bio_list->tail;
1044 int ret;
1045 struct bio *bio;
1046 struct btrfs_io_stripe *stripe;
1047 u64 disk_start;
1048
1049 /*
1050 * Note: here stripe_nr has taken device replace into consideration,
1051 * thus it can be larger than rbio->real_stripe.
1052 * So here we check against bioc->num_stripes, not rbio->real_stripes.
1053 */
1054 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
1055 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
1056 ASSERT(sector->page);
1057
1058 stripe = &rbio->bioc->stripes[stripe_nr];
1059 disk_start = stripe->physical + sector_nr * sectorsize;
1060
1061 /* if the device is missing, just fail this stripe */
1062 if (!stripe->dev->bdev) {
1063 int found_errors;
1064
1065 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
1066 rbio->error_bitmap);
1067
1068 /* Check if we have reached tolerance early. */
1069 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1070 NULL, NULL);
1071 if (found_errors > rbio->bioc->max_errors)
1072 return -EIO;
1073 return 0;
1074 }
1075
1076 /* see if we can add this page onto our existing bio */
1077 if (last) {
1078 u64 last_end = last->bi_iter.bi_sector << SECTOR_SHIFT;
1079 last_end += last->bi_iter.bi_size;
1080
1081 /*
1082 * we can't merge these if they are from different
1083 * devices or if they are not contiguous
1084 */
1085 if (last_end == disk_start && !last->bi_status &&
1086 last->bi_bdev == stripe->dev->bdev) {
1087 ret = bio_add_page(last, sector->page, sectorsize,
1088 sector->pgoff);
1089 if (ret == sectorsize)
1090 return 0;
1091 }
1092 }
1093
1094 /* put a new bio on the list */
1095 bio = bio_alloc(stripe->dev->bdev,
1096 max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1097 op, GFP_NOFS);
1098 bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT;
1099 bio->bi_private = rbio;
1100
1101 __bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
1102 bio_list_add(bio_list, bio);
1103 return 0;
1104}
1105
1106static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
1107{
1108 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1109 struct bio_vec bvec;
1110 struct bvec_iter iter;
1111 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1112 rbio->bioc->full_stripe_logical;
1113
1114 bio_for_each_segment(bvec, bio, iter) {
1115 u32 bvec_offset;
1116
1117 for (bvec_offset = 0; bvec_offset < bvec.bv_len;
1118 bvec_offset += sectorsize, offset += sectorsize) {
1119 int index = offset / sectorsize;
1120 struct sector_ptr *sector = &rbio->bio_sectors[index];
1121
1122 sector->page = bvec.bv_page;
1123 sector->pgoff = bvec.bv_offset + bvec_offset;
1124 ASSERT(sector->pgoff < PAGE_SIZE);
1125 }
1126 }
1127}
1128
1129/*
1130 * helper function to walk our bio list and populate the bio_pages array with
1131 * the result. This seems expensive, but it is faster than constantly
1132 * searching through the bio list as we setup the IO in finish_rmw or stripe
1133 * reconstruction.
1134 *
1135 * This must be called before you trust the answers from page_in_rbio
1136 */
1137static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1138{
1139 struct bio *bio;
1140
1141 spin_lock(&rbio->bio_list_lock);
1142 bio_list_for_each(bio, &rbio->bio_list)
1143 index_one_bio(rbio, bio);
1144
1145 spin_unlock(&rbio->bio_list_lock);
1146}
1147
1148static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1149 struct raid56_bio_trace_info *trace_info)
1150{
1151 const struct btrfs_io_context *bioc = rbio->bioc;
1152 int i;
1153
1154 ASSERT(bioc);
1155
1156 /* We rely on bio->bi_bdev to find the stripe number. */
1157 if (!bio->bi_bdev)
1158 goto not_found;
1159
1160 for (i = 0; i < bioc->num_stripes; i++) {
1161 if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1162 continue;
1163 trace_info->stripe_nr = i;
1164 trace_info->devid = bioc->stripes[i].dev->devid;
1165 trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1166 bioc->stripes[i].physical;
1167 return;
1168 }
1169
1170not_found:
1171 trace_info->devid = -1;
1172 trace_info->offset = -1;
1173 trace_info->stripe_nr = -1;
1174}
1175
1176static inline void bio_list_put(struct bio_list *bio_list)
1177{
1178 struct bio *bio;
1179
1180 while ((bio = bio_list_pop(bio_list)))
1181 bio_put(bio);
1182}
1183
1184/* Generate PQ for one vertical stripe. */
1185static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
1186{
1187 void **pointers = rbio->finish_pointers;
1188 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1189 struct sector_ptr *sector;
1190 int stripe;
1191 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
1192
1193 /* First collect one sector from each data stripe */
1194 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1195 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
1196 pointers[stripe] = kmap_local_page(sector->page) +
1197 sector->pgoff;
1198 }
1199
1200 /* Then add the parity stripe */
1201 sector = rbio_pstripe_sector(rbio, sectornr);
1202 sector->uptodate = 1;
1203 pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
1204
1205 if (has_qstripe) {
1206 /*
1207 * RAID6, add the qstripe and call the library function
1208 * to fill in our p/q
1209 */
1210 sector = rbio_qstripe_sector(rbio, sectornr);
1211 sector->uptodate = 1;
1212 pointers[stripe++] = kmap_local_page(sector->page) +
1213 sector->pgoff;
1214
1215 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
1216 pointers);
1217 } else {
1218 /* raid5 */
1219 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
1220 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
1221 }
1222 for (stripe = stripe - 1; stripe >= 0; stripe--)
1223 kunmap_local(pointers[stripe]);
1224}
1225
1226static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
1227 struct bio_list *bio_list)
1228{
1229 /* The total sector number inside the full stripe. */
1230 int total_sector_nr;
1231 int sectornr;
1232 int stripe;
1233 int ret;
1234
1235 ASSERT(bio_list_size(bio_list) == 0);
1236
1237 /* We should have at least one data sector. */
1238 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1239
1240 /*
1241 * Reset errors, as we may have errors inherited from from degraded
1242 * write.
1243 */
1244 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
1245
1246 /*
1247 * Start assembly. Make bios for everything from the higher layers (the
1248 * bio_list in our rbio) and our P/Q. Ignore everything else.
1249 */
1250 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1251 total_sector_nr++) {
1252 struct sector_ptr *sector;
1253
1254 stripe = total_sector_nr / rbio->stripe_nsectors;
1255 sectornr = total_sector_nr % rbio->stripe_nsectors;
1256
1257 /* This vertical stripe has no data, skip it. */
1258 if (!test_bit(sectornr, &rbio->dbitmap))
1259 continue;
1260
1261 if (stripe < rbio->nr_data) {
1262 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1263 if (!sector)
1264 continue;
1265 } else {
1266 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1267 }
1268
1269 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1270 sectornr, REQ_OP_WRITE);
1271 if (ret)
1272 goto error;
1273 }
1274
1275 if (likely(!rbio->bioc->replace_nr_stripes))
1276 return 0;
1277
1278 /*
1279 * Make a copy for the replace target device.
1280 *
1281 * Thus the source stripe number (in replace_stripe_src) should be valid.
1282 */
1283 ASSERT(rbio->bioc->replace_stripe_src >= 0);
1284
1285 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1286 total_sector_nr++) {
1287 struct sector_ptr *sector;
1288
1289 stripe = total_sector_nr / rbio->stripe_nsectors;
1290 sectornr = total_sector_nr % rbio->stripe_nsectors;
1291
1292 /*
1293 * For RAID56, there is only one device that can be replaced,
1294 * and replace_stripe_src[0] indicates the stripe number we
1295 * need to copy from.
1296 */
1297 if (stripe != rbio->bioc->replace_stripe_src) {
1298 /*
1299 * We can skip the whole stripe completely, note
1300 * total_sector_nr will be increased by one anyway.
1301 */
1302 ASSERT(sectornr == 0);
1303 total_sector_nr += rbio->stripe_nsectors - 1;
1304 continue;
1305 }
1306
1307 /* This vertical stripe has no data, skip it. */
1308 if (!test_bit(sectornr, &rbio->dbitmap))
1309 continue;
1310
1311 if (stripe < rbio->nr_data) {
1312 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1313 if (!sector)
1314 continue;
1315 } else {
1316 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1317 }
1318
1319 ret = rbio_add_io_sector(rbio, bio_list, sector,
1320 rbio->real_stripes,
1321 sectornr, REQ_OP_WRITE);
1322 if (ret)
1323 goto error;
1324 }
1325
1326 return 0;
1327error:
1328 bio_list_put(bio_list);
1329 return -EIO;
1330}
1331
1332static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
1333{
1334 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1335 u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1336 rbio->bioc->full_stripe_logical;
1337 int total_nr_sector = offset >> fs_info->sectorsize_bits;
1338
1339 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
1340
1341 bitmap_set(rbio->error_bitmap, total_nr_sector,
1342 bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
1343
1344 /*
1345 * Special handling for raid56_alloc_missing_rbio() used by
1346 * scrub/replace. Unlike call path in raid56_parity_recover(), they
1347 * pass an empty bio here. Thus we have to find out the missing device
1348 * and mark the stripe error instead.
1349 */
1350 if (bio->bi_iter.bi_size == 0) {
1351 bool found_missing = false;
1352 int stripe_nr;
1353
1354 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1355 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
1356 found_missing = true;
1357 bitmap_set(rbio->error_bitmap,
1358 stripe_nr * rbio->stripe_nsectors,
1359 rbio->stripe_nsectors);
1360 }
1361 }
1362 ASSERT(found_missing);
1363 }
1364}
1365
1366/*
1367 * For subpage case, we can no longer set page Up-to-date directly for
1368 * stripe_pages[], thus we need to locate the sector.
1369 */
1370static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
1371 struct page *page,
1372 unsigned int pgoff)
1373{
1374 int i;
1375
1376 for (i = 0; i < rbio->nr_sectors; i++) {
1377 struct sector_ptr *sector = &rbio->stripe_sectors[i];
1378
1379 if (sector->page == page && sector->pgoff == pgoff)
1380 return sector;
1381 }
1382 return NULL;
1383}
1384
1385/*
1386 * this sets each page in the bio uptodate. It should only be used on private
1387 * rbio pages, nothing that comes in from the higher layers
1388 */
1389static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
1390{
1391 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1392 struct bio_vec *bvec;
1393 struct bvec_iter_all iter_all;
1394
1395 ASSERT(!bio_flagged(bio, BIO_CLONED));
1396
1397 bio_for_each_segment_all(bvec, bio, iter_all) {
1398 struct sector_ptr *sector;
1399 int pgoff;
1400
1401 for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
1402 pgoff += sectorsize) {
1403 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
1404 ASSERT(sector);
1405 if (sector)
1406 sector->uptodate = 1;
1407 }
1408 }
1409}
1410
1411static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
1412{
1413 struct bio_vec *bv = bio_first_bvec_all(bio);
1414 int i;
1415
1416 for (i = 0; i < rbio->nr_sectors; i++) {
1417 struct sector_ptr *sector;
1418
1419 sector = &rbio->stripe_sectors[i];
1420 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1421 break;
1422 sector = &rbio->bio_sectors[i];
1423 if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1424 break;
1425 }
1426 ASSERT(i < rbio->nr_sectors);
1427 return i;
1428}
1429
1430static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
1431{
1432 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1433 u32 bio_size = 0;
1434 struct bio_vec *bvec;
1435 int i;
1436
1437 bio_for_each_bvec_all(bvec, bio, i)
1438 bio_size += bvec->bv_len;
1439
1440 /*
1441 * Since we can have multiple bios touching the error_bitmap, we cannot
1442 * call bitmap_set() without protection.
1443 *
1444 * Instead use set_bit() for each bit, as set_bit() itself is atomic.
1445 */
1446 for (i = total_sector_nr; i < total_sector_nr +
1447 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
1448 set_bit(i, rbio->error_bitmap);
1449}
1450
1451/* Verify the data sectors at read time. */
1452static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
1453 struct bio *bio)
1454{
1455 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1456 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1457 struct bio_vec *bvec;
1458 struct bvec_iter_all iter_all;
1459
1460 /* No data csum for the whole stripe, no need to verify. */
1461 if (!rbio->csum_bitmap || !rbio->csum_buf)
1462 return;
1463
1464 /* P/Q stripes, they have no data csum to verify against. */
1465 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
1466 return;
1467
1468 bio_for_each_segment_all(bvec, bio, iter_all) {
1469 int bv_offset;
1470
1471 for (bv_offset = bvec->bv_offset;
1472 bv_offset < bvec->bv_offset + bvec->bv_len;
1473 bv_offset += fs_info->sectorsize, total_sector_nr++) {
1474 u8 csum_buf[BTRFS_CSUM_SIZE];
1475 u8 *expected_csum = rbio->csum_buf +
1476 total_sector_nr * fs_info->csum_size;
1477 int ret;
1478
1479 /* No csum for this sector, skip to the next sector. */
1480 if (!test_bit(total_sector_nr, rbio->csum_bitmap))
1481 continue;
1482
1483 ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
1484 bv_offset, csum_buf, expected_csum);
1485 if (ret < 0)
1486 set_bit(total_sector_nr, rbio->error_bitmap);
1487 }
1488 }
1489}
1490
1491static void raid_wait_read_end_io(struct bio *bio)
1492{
1493 struct btrfs_raid_bio *rbio = bio->bi_private;
1494
1495 if (bio->bi_status) {
1496 rbio_update_error_bitmap(rbio, bio);
1497 } else {
1498 set_bio_pages_uptodate(rbio, bio);
1499 verify_bio_data_sectors(rbio, bio);
1500 }
1501
1502 bio_put(bio);
1503 if (atomic_dec_and_test(&rbio->stripes_pending))
1504 wake_up(&rbio->io_wait);
1505}
1506
1507static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio,
1508 struct bio_list *bio_list)
1509{
1510 struct bio *bio;
1511
1512 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1513 while ((bio = bio_list_pop(bio_list))) {
1514 bio->bi_end_io = raid_wait_read_end_io;
1515
1516 if (trace_raid56_read_enabled()) {
1517 struct raid56_bio_trace_info trace_info = { 0 };
1518
1519 bio_get_trace_info(rbio, bio, &trace_info);
1520 trace_raid56_read(rbio, bio, &trace_info);
1521 }
1522 submit_bio(bio);
1523 }
1524
1525 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
1526}
1527
1528static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
1529{
1530 const int data_pages = rbio->nr_data * rbio->stripe_npages;
1531 int ret;
1532
1533 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, 0);
1534 if (ret < 0)
1535 return ret;
1536
1537 index_stripe_sectors(rbio);
1538 return 0;
1539}
1540
1541/*
1542 * We use plugging call backs to collect full stripes.
1543 * Any time we get a partial stripe write while plugged
1544 * we collect it into a list. When the unplug comes down,
1545 * we sort the list by logical block number and merge
1546 * everything we can into the same rbios
1547 */
1548struct btrfs_plug_cb {
1549 struct blk_plug_cb cb;
1550 struct btrfs_fs_info *info;
1551 struct list_head rbio_list;
1552};
1553
1554/*
1555 * rbios on the plug list are sorted for easier merging.
1556 */
1557static int plug_cmp(void *priv, const struct list_head *a,
1558 const struct list_head *b)
1559{
1560 const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1561 plug_list);
1562 const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1563 plug_list);
1564 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1565 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1566
1567 if (a_sector < b_sector)
1568 return -1;
1569 if (a_sector > b_sector)
1570 return 1;
1571 return 0;
1572}
1573
1574static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1575{
1576 struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
1577 struct btrfs_raid_bio *cur;
1578 struct btrfs_raid_bio *last = NULL;
1579
1580 list_sort(NULL, &plug->rbio_list, plug_cmp);
1581
1582 while (!list_empty(&plug->rbio_list)) {
1583 cur = list_entry(plug->rbio_list.next,
1584 struct btrfs_raid_bio, plug_list);
1585 list_del_init(&cur->plug_list);
1586
1587 if (rbio_is_full(cur)) {
1588 /* We have a full stripe, queue it down. */
1589 start_async_work(cur, rmw_rbio_work);
1590 continue;
1591 }
1592 if (last) {
1593 if (rbio_can_merge(last, cur)) {
1594 merge_rbio(last, cur);
1595 free_raid_bio(cur);
1596 continue;
1597 }
1598 start_async_work(last, rmw_rbio_work);
1599 }
1600 last = cur;
1601 }
1602 if (last)
1603 start_async_work(last, rmw_rbio_work);
1604 kfree(plug);
1605}
1606
1607/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1608static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1609{
1610 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1611 const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1612 const u64 full_stripe_start = rbio->bioc->full_stripe_logical;
1613 const u32 orig_len = orig_bio->bi_iter.bi_size;
1614 const u32 sectorsize = fs_info->sectorsize;
1615 u64 cur_logical;
1616
1617 ASSERT(orig_logical >= full_stripe_start &&
1618 orig_logical + orig_len <= full_stripe_start +
1619 rbio->nr_data * BTRFS_STRIPE_LEN);
1620
1621 bio_list_add(&rbio->bio_list, orig_bio);
1622 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1623
1624 /* Update the dbitmap. */
1625 for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1626 cur_logical += sectorsize) {
1627 int bit = ((u32)(cur_logical - full_stripe_start) >>
1628 fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1629
1630 set_bit(bit, &rbio->dbitmap);
1631 }
1632}
1633
1634/*
1635 * our main entry point for writes from the rest of the FS.
1636 */
1637void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
1638{
1639 struct btrfs_fs_info *fs_info = bioc->fs_info;
1640 struct btrfs_raid_bio *rbio;
1641 struct btrfs_plug_cb *plug = NULL;
1642 struct blk_plug_cb *cb;
1643
1644 rbio = alloc_rbio(fs_info, bioc);
1645 if (IS_ERR(rbio)) {
1646 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
1647 bio_endio(bio);
1648 return;
1649 }
1650 rbio->operation = BTRFS_RBIO_WRITE;
1651 rbio_add_bio(rbio, bio);
1652
1653 /*
1654 * Don't plug on full rbios, just get them out the door
1655 * as quickly as we can
1656 */
1657 if (!rbio_is_full(rbio)) {
1658 cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
1659 if (cb) {
1660 plug = container_of(cb, struct btrfs_plug_cb, cb);
1661 if (!plug->info) {
1662 plug->info = fs_info;
1663 INIT_LIST_HEAD(&plug->rbio_list);
1664 }
1665 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1666 return;
1667 }
1668 }
1669
1670 /*
1671 * Either we don't have any existing plug, or we're doing a full stripe,
1672 * queue the rmw work now.
1673 */
1674 start_async_work(rbio, rmw_rbio_work);
1675}
1676
1677static int verify_one_sector(struct btrfs_raid_bio *rbio,
1678 int stripe_nr, int sector_nr)
1679{
1680 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1681 struct sector_ptr *sector;
1682 u8 csum_buf[BTRFS_CSUM_SIZE];
1683 u8 *csum_expected;
1684 int ret;
1685
1686 if (!rbio->csum_bitmap || !rbio->csum_buf)
1687 return 0;
1688
1689 /* No way to verify P/Q as they are not covered by data csum. */
1690 if (stripe_nr >= rbio->nr_data)
1691 return 0;
1692 /*
1693 * If we're rebuilding a read, we have to use pages from the
1694 * bio list if possible.
1695 */
1696 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1697 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1698 } else {
1699 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1700 }
1701
1702 ASSERT(sector->page);
1703
1704 csum_expected = rbio->csum_buf +
1705 (stripe_nr * rbio->stripe_nsectors + sector_nr) *
1706 fs_info->csum_size;
1707 ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff,
1708 csum_buf, csum_expected);
1709 return ret;
1710}
1711
1712/*
1713 * Recover a vertical stripe specified by @sector_nr.
1714 * @*pointers are the pre-allocated pointers by the caller, so we don't
1715 * need to allocate/free the pointers again and again.
1716 */
1717static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1718 void **pointers, void **unmap_array)
1719{
1720 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1721 struct sector_ptr *sector;
1722 const u32 sectorsize = fs_info->sectorsize;
1723 int found_errors;
1724 int faila;
1725 int failb;
1726 int stripe_nr;
1727 int ret = 0;
1728
1729 /*
1730 * Now we just use bitmap to mark the horizontal stripes in
1731 * which we have data when doing parity scrub.
1732 */
1733 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1734 !test_bit(sector_nr, &rbio->dbitmap))
1735 return 0;
1736
1737 found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1738 &failb);
1739 /*
1740 * No errors in the vertical stripe, skip it. Can happen for recovery
1741 * which only part of a stripe failed csum check.
1742 */
1743 if (!found_errors)
1744 return 0;
1745
1746 if (found_errors > rbio->bioc->max_errors)
1747 return -EIO;
1748
1749 /*
1750 * Setup our array of pointers with sectors from each stripe
1751 *
1752 * NOTE: store a duplicate array of pointers to preserve the
1753 * pointer order.
1754 */
1755 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1756 /*
1757 * If we're rebuilding a read, we have to use pages from the
1758 * bio list if possible.
1759 */
1760 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1761 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1762 } else {
1763 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1764 }
1765 ASSERT(sector->page);
1766 pointers[stripe_nr] = kmap_local_page(sector->page) +
1767 sector->pgoff;
1768 unmap_array[stripe_nr] = pointers[stripe_nr];
1769 }
1770
1771 /* All raid6 handling here */
1772 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1773 /* Single failure, rebuild from parity raid5 style */
1774 if (failb < 0) {
1775 if (faila == rbio->nr_data)
1776 /*
1777 * Just the P stripe has failed, without
1778 * a bad data or Q stripe.
1779 * We have nothing to do, just skip the
1780 * recovery for this stripe.
1781 */
1782 goto cleanup;
1783 /*
1784 * a single failure in raid6 is rebuilt
1785 * in the pstripe code below
1786 */
1787 goto pstripe;
1788 }
1789
1790 /*
1791 * If the q stripe is failed, do a pstripe reconstruction from
1792 * the xors.
1793 * If both the q stripe and the P stripe are failed, we're
1794 * here due to a crc mismatch and we can't give them the
1795 * data they want.
1796 */
1797 if (failb == rbio->real_stripes - 1) {
1798 if (faila == rbio->real_stripes - 2)
1799 /*
1800 * Only P and Q are corrupted.
1801 * We only care about data stripes recovery,
1802 * can skip this vertical stripe.
1803 */
1804 goto cleanup;
1805 /*
1806 * Otherwise we have one bad data stripe and
1807 * a good P stripe. raid5!
1808 */
1809 goto pstripe;
1810 }
1811
1812 if (failb == rbio->real_stripes - 2) {
1813 raid6_datap_recov(rbio->real_stripes, sectorsize,
1814 faila, pointers);
1815 } else {
1816 raid6_2data_recov(rbio->real_stripes, sectorsize,
1817 faila, failb, pointers);
1818 }
1819 } else {
1820 void *p;
1821
1822 /* Rebuild from P stripe here (raid5 or raid6). */
1823 ASSERT(failb == -1);
1824pstripe:
1825 /* Copy parity block into failed block to start with */
1826 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1827
1828 /* Rearrange the pointer array */
1829 p = pointers[faila];
1830 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
1831 stripe_nr++)
1832 pointers[stripe_nr] = pointers[stripe_nr + 1];
1833 pointers[rbio->nr_data - 1] = p;
1834
1835 /* Xor in the rest */
1836 run_xor(pointers, rbio->nr_data - 1, sectorsize);
1837
1838 }
1839
1840 /*
1841 * No matter if this is a RMW or recovery, we should have all
1842 * failed sectors repaired in the vertical stripe, thus they are now
1843 * uptodate.
1844 * Especially if we determine to cache the rbio, we need to
1845 * have at least all data sectors uptodate.
1846 *
1847 * If possible, also check if the repaired sector matches its data
1848 * checksum.
1849 */
1850 if (faila >= 0) {
1851 ret = verify_one_sector(rbio, faila, sector_nr);
1852 if (ret < 0)
1853 goto cleanup;
1854
1855 sector = rbio_stripe_sector(rbio, faila, sector_nr);
1856 sector->uptodate = 1;
1857 }
1858 if (failb >= 0) {
1859 ret = verify_one_sector(rbio, failb, sector_nr);
1860 if (ret < 0)
1861 goto cleanup;
1862
1863 sector = rbio_stripe_sector(rbio, failb, sector_nr);
1864 sector->uptodate = 1;
1865 }
1866
1867cleanup:
1868 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1869 kunmap_local(unmap_array[stripe_nr]);
1870 return ret;
1871}
1872
1873static int recover_sectors(struct btrfs_raid_bio *rbio)
1874{
1875 void **pointers = NULL;
1876 void **unmap_array = NULL;
1877 int sectornr;
1878 int ret = 0;
1879
1880 /*
1881 * @pointers array stores the pointer for each sector.
1882 *
1883 * @unmap_array stores copy of pointers that does not get reordered
1884 * during reconstruction so that kunmap_local works.
1885 */
1886 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1887 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1888 if (!pointers || !unmap_array) {
1889 ret = -ENOMEM;
1890 goto out;
1891 }
1892
1893 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1894 spin_lock(&rbio->bio_list_lock);
1895 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1896 spin_unlock(&rbio->bio_list_lock);
1897 }
1898
1899 index_rbio_pages(rbio);
1900
1901 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
1902 ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
1903 if (ret < 0)
1904 break;
1905 }
1906
1907out:
1908 kfree(pointers);
1909 kfree(unmap_array);
1910 return ret;
1911}
1912
1913static void recover_rbio(struct btrfs_raid_bio *rbio)
1914{
1915 struct bio_list bio_list = BIO_EMPTY_LIST;
1916 int total_sector_nr;
1917 int ret = 0;
1918
1919 /*
1920 * Either we're doing recover for a read failure or degraded write,
1921 * caller should have set error bitmap correctly.
1922 */
1923 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
1924
1925 /* For recovery, we need to read all sectors including P/Q. */
1926 ret = alloc_rbio_pages(rbio);
1927 if (ret < 0)
1928 goto out;
1929
1930 index_rbio_pages(rbio);
1931
1932 /*
1933 * Read everything that hasn't failed. However this time we will
1934 * not trust any cached sector.
1935 * As we may read out some stale data but higher layer is not reading
1936 * that stale part.
1937 *
1938 * So here we always re-read everything in recovery path.
1939 */
1940 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1941 total_sector_nr++) {
1942 int stripe = total_sector_nr / rbio->stripe_nsectors;
1943 int sectornr = total_sector_nr % rbio->stripe_nsectors;
1944 struct sector_ptr *sector;
1945
1946 /*
1947 * Skip the range which has error. It can be a range which is
1948 * marked error (for csum mismatch), or it can be a missing
1949 * device.
1950 */
1951 if (!rbio->bioc->stripes[stripe].dev->bdev ||
1952 test_bit(total_sector_nr, rbio->error_bitmap)) {
1953 /*
1954 * Also set the error bit for missing device, which
1955 * may not yet have its error bit set.
1956 */
1957 set_bit(total_sector_nr, rbio->error_bitmap);
1958 continue;
1959 }
1960
1961 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1962 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
1963 sectornr, REQ_OP_READ);
1964 if (ret < 0) {
1965 bio_list_put(&bio_list);
1966 goto out;
1967 }
1968 }
1969
1970 submit_read_wait_bio_list(rbio, &bio_list);
1971 ret = recover_sectors(rbio);
1972out:
1973 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
1974}
1975
1976static void recover_rbio_work(struct work_struct *work)
1977{
1978 struct btrfs_raid_bio *rbio;
1979
1980 rbio = container_of(work, struct btrfs_raid_bio, work);
1981 if (!lock_stripe_add(rbio))
1982 recover_rbio(rbio);
1983}
1984
1985static void recover_rbio_work_locked(struct work_struct *work)
1986{
1987 recover_rbio(container_of(work, struct btrfs_raid_bio, work));
1988}
1989
1990static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
1991{
1992 bool found = false;
1993 int sector_nr;
1994
1995 /*
1996 * This is for RAID6 extra recovery tries, thus mirror number should
1997 * be large than 2.
1998 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
1999 * RAID5 methods.
2000 */
2001 ASSERT(mirror_num > 2);
2002 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2003 int found_errors;
2004 int faila;
2005 int failb;
2006
2007 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2008 &faila, &failb);
2009 /* This vertical stripe doesn't have errors. */
2010 if (!found_errors)
2011 continue;
2012
2013 /*
2014 * If we found errors, there should be only one error marked
2015 * by previous set_rbio_range_error().
2016 */
2017 ASSERT(found_errors == 1);
2018 found = true;
2019
2020 /* Now select another stripe to mark as error. */
2021 failb = rbio->real_stripes - (mirror_num - 1);
2022 if (failb <= faila)
2023 failb--;
2024
2025 /* Set the extra bit in error bitmap. */
2026 if (failb >= 0)
2027 set_bit(failb * rbio->stripe_nsectors + sector_nr,
2028 rbio->error_bitmap);
2029 }
2030
2031 /* We should found at least one vertical stripe with error.*/
2032 ASSERT(found);
2033}
2034
2035/*
2036 * the main entry point for reads from the higher layers. This
2037 * is really only called when the normal read path had a failure,
2038 * so we assume the bio they send down corresponds to a failed part
2039 * of the drive.
2040 */
2041void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2042 int mirror_num)
2043{
2044 struct btrfs_fs_info *fs_info = bioc->fs_info;
2045 struct btrfs_raid_bio *rbio;
2046
2047 rbio = alloc_rbio(fs_info, bioc);
2048 if (IS_ERR(rbio)) {
2049 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2050 bio_endio(bio);
2051 return;
2052 }
2053
2054 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2055 rbio_add_bio(rbio, bio);
2056
2057 set_rbio_range_error(rbio, bio);
2058
2059 /*
2060 * Loop retry:
2061 * for 'mirror == 2', reconstruct from all other stripes.
2062 * for 'mirror_num > 2', select a stripe to fail on every retry.
2063 */
2064 if (mirror_num > 2)
2065 set_rbio_raid6_extra_error(rbio, mirror_num);
2066
2067 start_async_work(rbio, recover_rbio_work);
2068}
2069
2070static void fill_data_csums(struct btrfs_raid_bio *rbio)
2071{
2072 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
2073 struct btrfs_root *csum_root = btrfs_csum_root(fs_info,
2074 rbio->bioc->full_stripe_logical);
2075 const u64 start = rbio->bioc->full_stripe_logical;
2076 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
2077 fs_info->sectorsize_bits;
2078 int ret;
2079
2080 /* The rbio should not have its csum buffer initialized. */
2081 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
2082
2083 /*
2084 * Skip the csum search if:
2085 *
2086 * - The rbio doesn't belong to data block groups
2087 * Then we are doing IO for tree blocks, no need to search csums.
2088 *
2089 * - The rbio belongs to mixed block groups
2090 * This is to avoid deadlock, as we're already holding the full
2091 * stripe lock, if we trigger a metadata read, and it needs to do
2092 * raid56 recovery, we will deadlock.
2093 */
2094 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
2095 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
2096 return;
2097
2098 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
2099 fs_info->csum_size, GFP_NOFS);
2100 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
2101 GFP_NOFS);
2102 if (!rbio->csum_buf || !rbio->csum_bitmap) {
2103 ret = -ENOMEM;
2104 goto error;
2105 }
2106
2107 ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1,
2108 rbio->csum_buf, rbio->csum_bitmap);
2109 if (ret < 0)
2110 goto error;
2111 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
2112 goto no_csum;
2113 return;
2114
2115error:
2116 /*
2117 * We failed to allocate memory or grab the csum, but it's not fatal,
2118 * we can still continue. But better to warn users that RMW is no
2119 * longer safe for this particular sub-stripe write.
2120 */
2121 btrfs_warn_rl(fs_info,
2122"sub-stripe write for full stripe %llu is not safe, failed to get csum: %d",
2123 rbio->bioc->full_stripe_logical, ret);
2124no_csum:
2125 kfree(rbio->csum_buf);
2126 bitmap_free(rbio->csum_bitmap);
2127 rbio->csum_buf = NULL;
2128 rbio->csum_bitmap = NULL;
2129}
2130
2131static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
2132{
2133 struct bio_list bio_list = BIO_EMPTY_LIST;
2134 int total_sector_nr;
2135 int ret = 0;
2136
2137 /*
2138 * Fill the data csums we need for data verification. We need to fill
2139 * the csum_bitmap/csum_buf first, as our endio function will try to
2140 * verify the data sectors.
2141 */
2142 fill_data_csums(rbio);
2143
2144 /*
2145 * Build a list of bios to read all sectors (including data and P/Q).
2146 *
2147 * This behavior is to compensate the later csum verification and recovery.
2148 */
2149 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2150 total_sector_nr++) {
2151 struct sector_ptr *sector;
2152 int stripe = total_sector_nr / rbio->stripe_nsectors;
2153 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2154
2155 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2156 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2157 stripe, sectornr, REQ_OP_READ);
2158 if (ret) {
2159 bio_list_put(&bio_list);
2160 return ret;
2161 }
2162 }
2163
2164 /*
2165 * We may or may not have any corrupted sectors (including missing dev
2166 * and csum mismatch), just let recover_sectors() to handle them all.
2167 */
2168 submit_read_wait_bio_list(rbio, &bio_list);
2169 return recover_sectors(rbio);
2170}
2171
2172static void raid_wait_write_end_io(struct bio *bio)
2173{
2174 struct btrfs_raid_bio *rbio = bio->bi_private;
2175 blk_status_t err = bio->bi_status;
2176
2177 if (err)
2178 rbio_update_error_bitmap(rbio, bio);
2179 bio_put(bio);
2180 if (atomic_dec_and_test(&rbio->stripes_pending))
2181 wake_up(&rbio->io_wait);
2182}
2183
2184static void submit_write_bios(struct btrfs_raid_bio *rbio,
2185 struct bio_list *bio_list)
2186{
2187 struct bio *bio;
2188
2189 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
2190 while ((bio = bio_list_pop(bio_list))) {
2191 bio->bi_end_io = raid_wait_write_end_io;
2192
2193 if (trace_raid56_write_enabled()) {
2194 struct raid56_bio_trace_info trace_info = { 0 };
2195
2196 bio_get_trace_info(rbio, bio, &trace_info);
2197 trace_raid56_write(rbio, bio, &trace_info);
2198 }
2199 submit_bio(bio);
2200 }
2201}
2202
2203/*
2204 * To determine if we need to read any sector from the disk.
2205 * Should only be utilized in RMW path, to skip cached rbio.
2206 */
2207static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
2208{
2209 int i;
2210
2211 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
2212 struct sector_ptr *sector = &rbio->stripe_sectors[i];
2213
2214 /*
2215 * We have a sector which doesn't have page nor uptodate,
2216 * thus this rbio can not be cached one, as cached one must
2217 * have all its data sectors present and uptodate.
2218 */
2219 if (!sector->page || !sector->uptodate)
2220 return true;
2221 }
2222 return false;
2223}
2224
2225static void rmw_rbio(struct btrfs_raid_bio *rbio)
2226{
2227 struct bio_list bio_list;
2228 int sectornr;
2229 int ret = 0;
2230
2231 /*
2232 * Allocate the pages for parity first, as P/Q pages will always be
2233 * needed for both full-stripe and sub-stripe writes.
2234 */
2235 ret = alloc_rbio_parity_pages(rbio);
2236 if (ret < 0)
2237 goto out;
2238
2239 /*
2240 * Either full stripe write, or we have every data sector already
2241 * cached, can go to write path immediately.
2242 */
2243 if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) {
2244 /*
2245 * Now we're doing sub-stripe write, also need all data stripes
2246 * to do the full RMW.
2247 */
2248 ret = alloc_rbio_data_pages(rbio);
2249 if (ret < 0)
2250 goto out;
2251
2252 index_rbio_pages(rbio);
2253
2254 ret = rmw_read_wait_recover(rbio);
2255 if (ret < 0)
2256 goto out;
2257 }
2258
2259 /*
2260 * At this stage we're not allowed to add any new bios to the
2261 * bio list any more, anyone else that wants to change this stripe
2262 * needs to do their own rmw.
2263 */
2264 spin_lock(&rbio->bio_list_lock);
2265 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
2266 spin_unlock(&rbio->bio_list_lock);
2267
2268 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2269
2270 index_rbio_pages(rbio);
2271
2272 /*
2273 * We don't cache full rbios because we're assuming
2274 * the higher layers are unlikely to use this area of
2275 * the disk again soon. If they do use it again,
2276 * hopefully they will send another full bio.
2277 */
2278 if (!rbio_is_full(rbio))
2279 cache_rbio_pages(rbio);
2280 else
2281 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2282
2283 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
2284 generate_pq_vertical(rbio, sectornr);
2285
2286 bio_list_init(&bio_list);
2287 ret = rmw_assemble_write_bios(rbio, &bio_list);
2288 if (ret < 0)
2289 goto out;
2290
2291 /* We should have at least one bio assembled. */
2292 ASSERT(bio_list_size(&bio_list));
2293 submit_write_bios(rbio, &bio_list);
2294 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2295
2296 /* We may have more errors than our tolerance during the read. */
2297 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2298 int found_errors;
2299
2300 found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2301 if (found_errors > rbio->bioc->max_errors) {
2302 ret = -EIO;
2303 break;
2304 }
2305 }
2306out:
2307 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2308}
2309
2310static void rmw_rbio_work(struct work_struct *work)
2311{
2312 struct btrfs_raid_bio *rbio;
2313
2314 rbio = container_of(work, struct btrfs_raid_bio, work);
2315 if (lock_stripe_add(rbio) == 0)
2316 rmw_rbio(rbio);
2317}
2318
2319static void rmw_rbio_work_locked(struct work_struct *work)
2320{
2321 rmw_rbio(container_of(work, struct btrfs_raid_bio, work));
2322}
2323
2324/*
2325 * The following code is used to scrub/replace the parity stripe
2326 *
2327 * Caller must have already increased bio_counter for getting @bioc.
2328 *
2329 * Note: We need make sure all the pages that add into the scrub/replace
2330 * raid bio are correct and not be changed during the scrub/replace. That
2331 * is those pages just hold metadata or file data with checksum.
2332 */
2333
2334struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
2335 struct btrfs_io_context *bioc,
2336 struct btrfs_device *scrub_dev,
2337 unsigned long *dbitmap, int stripe_nsectors)
2338{
2339 struct btrfs_fs_info *fs_info = bioc->fs_info;
2340 struct btrfs_raid_bio *rbio;
2341 int i;
2342
2343 rbio = alloc_rbio(fs_info, bioc);
2344 if (IS_ERR(rbio))
2345 return NULL;
2346 bio_list_add(&rbio->bio_list, bio);
2347 /*
2348 * This is a special bio which is used to hold the completion handler
2349 * and make the scrub rbio is similar to the other types
2350 */
2351 ASSERT(!bio->bi_iter.bi_size);
2352 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2353
2354 /*
2355 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
2356 * to the end position, so this search can start from the first parity
2357 * stripe.
2358 */
2359 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2360 if (bioc->stripes[i].dev == scrub_dev) {
2361 rbio->scrubp = i;
2362 break;
2363 }
2364 }
2365 ASSERT(i < rbio->real_stripes);
2366
2367 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
2368 return rbio;
2369}
2370
2371/*
2372 * We just scrub the parity that we have correct data on the same horizontal,
2373 * so we needn't allocate all pages for all the stripes.
2374 */
2375static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2376{
2377 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2378 int total_sector_nr;
2379
2380 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2381 total_sector_nr++) {
2382 struct page *page;
2383 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2384 int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
2385
2386 if (!test_bit(sectornr, &rbio->dbitmap))
2387 continue;
2388 if (rbio->stripe_pages[index])
2389 continue;
2390 page = alloc_page(GFP_NOFS);
2391 if (!page)
2392 return -ENOMEM;
2393 rbio->stripe_pages[index] = page;
2394 }
2395 index_stripe_sectors(rbio);
2396 return 0;
2397}
2398
2399static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
2400{
2401 struct btrfs_io_context *bioc = rbio->bioc;
2402 const u32 sectorsize = bioc->fs_info->sectorsize;
2403 void **pointers = rbio->finish_pointers;
2404 unsigned long *pbitmap = &rbio->finish_pbitmap;
2405 int nr_data = rbio->nr_data;
2406 int stripe;
2407 int sectornr;
2408 bool has_qstripe;
2409 struct sector_ptr p_sector = { 0 };
2410 struct sector_ptr q_sector = { 0 };
2411 struct bio_list bio_list;
2412 int is_replace = 0;
2413 int ret;
2414
2415 bio_list_init(&bio_list);
2416
2417 if (rbio->real_stripes - rbio->nr_data == 1)
2418 has_qstripe = false;
2419 else if (rbio->real_stripes - rbio->nr_data == 2)
2420 has_qstripe = true;
2421 else
2422 BUG();
2423
2424 /*
2425 * Replace is running and our P/Q stripe is being replaced, then we
2426 * need to duplicate the final write to replace target.
2427 */
2428 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) {
2429 is_replace = 1;
2430 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
2431 }
2432
2433 /*
2434 * Because the higher layers(scrubber) are unlikely to
2435 * use this area of the disk again soon, so don't cache
2436 * it.
2437 */
2438 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2439
2440 p_sector.page = alloc_page(GFP_NOFS);
2441 if (!p_sector.page)
2442 return -ENOMEM;
2443 p_sector.pgoff = 0;
2444 p_sector.uptodate = 1;
2445
2446 if (has_qstripe) {
2447 /* RAID6, allocate and map temp space for the Q stripe */
2448 q_sector.page = alloc_page(GFP_NOFS);
2449 if (!q_sector.page) {
2450 __free_page(p_sector.page);
2451 p_sector.page = NULL;
2452 return -ENOMEM;
2453 }
2454 q_sector.pgoff = 0;
2455 q_sector.uptodate = 1;
2456 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
2457 }
2458
2459 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2460
2461 /* Map the parity stripe just once */
2462 pointers[nr_data] = kmap_local_page(p_sector.page);
2463
2464 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2465 struct sector_ptr *sector;
2466 void *parity;
2467
2468 /* first collect one page from each data stripe */
2469 for (stripe = 0; stripe < nr_data; stripe++) {
2470 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
2471 pointers[stripe] = kmap_local_page(sector->page) +
2472 sector->pgoff;
2473 }
2474
2475 if (has_qstripe) {
2476 /* RAID6, call the library function to fill in our P/Q */
2477 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
2478 pointers);
2479 } else {
2480 /* raid5 */
2481 memcpy(pointers[nr_data], pointers[0], sectorsize);
2482 run_xor(pointers + 1, nr_data - 1, sectorsize);
2483 }
2484
2485 /* Check scrubbing parity and repair it */
2486 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2487 parity = kmap_local_page(sector->page) + sector->pgoff;
2488 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2489 memcpy(parity, pointers[rbio->scrubp], sectorsize);
2490 else
2491 /* Parity is right, needn't writeback */
2492 bitmap_clear(&rbio->dbitmap, sectornr, 1);
2493 kunmap_local(parity);
2494
2495 for (stripe = nr_data - 1; stripe >= 0; stripe--)
2496 kunmap_local(pointers[stripe]);
2497 }
2498
2499 kunmap_local(pointers[nr_data]);
2500 __free_page(p_sector.page);
2501 p_sector.page = NULL;
2502 if (q_sector.page) {
2503 kunmap_local(pointers[rbio->real_stripes - 1]);
2504 __free_page(q_sector.page);
2505 q_sector.page = NULL;
2506 }
2507
2508 /*
2509 * time to start writing. Make bios for everything from the
2510 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2511 * everything else.
2512 */
2513 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2514 struct sector_ptr *sector;
2515
2516 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2517 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2518 sectornr, REQ_OP_WRITE);
2519 if (ret)
2520 goto cleanup;
2521 }
2522
2523 if (!is_replace)
2524 goto submit_write;
2525
2526 /*
2527 * Replace is running and our parity stripe needs to be duplicated to
2528 * the target device. Check we have a valid source stripe number.
2529 */
2530 ASSERT(rbio->bioc->replace_stripe_src >= 0);
2531 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
2532 struct sector_ptr *sector;
2533
2534 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2535 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2536 rbio->real_stripes,
2537 sectornr, REQ_OP_WRITE);
2538 if (ret)
2539 goto cleanup;
2540 }
2541
2542submit_write:
2543 submit_write_bios(rbio, &bio_list);
2544 return 0;
2545
2546cleanup:
2547 bio_list_put(&bio_list);
2548 return ret;
2549}
2550
2551static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2552{
2553 if (stripe >= 0 && stripe < rbio->nr_data)
2554 return 1;
2555 return 0;
2556}
2557
2558static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
2559{
2560 void **pointers = NULL;
2561 void **unmap_array = NULL;
2562 int sector_nr;
2563 int ret = 0;
2564
2565 /*
2566 * @pointers array stores the pointer for each sector.
2567 *
2568 * @unmap_array stores copy of pointers that does not get reordered
2569 * during reconstruction so that kunmap_local works.
2570 */
2571 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2572 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2573 if (!pointers || !unmap_array) {
2574 ret = -ENOMEM;
2575 goto out;
2576 }
2577
2578 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2579 int dfail = 0, failp = -1;
2580 int faila;
2581 int failb;
2582 int found_errors;
2583
2584 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2585 &faila, &failb);
2586 if (found_errors > rbio->bioc->max_errors) {
2587 ret = -EIO;
2588 goto out;
2589 }
2590 if (found_errors == 0)
2591 continue;
2592
2593 /* We should have at least one error here. */
2594 ASSERT(faila >= 0 || failb >= 0);
2595
2596 if (is_data_stripe(rbio, faila))
2597 dfail++;
2598 else if (is_parity_stripe(faila))
2599 failp = faila;
2600
2601 if (is_data_stripe(rbio, failb))
2602 dfail++;
2603 else if (is_parity_stripe(failb))
2604 failp = failb;
2605 /*
2606 * Because we can not use a scrubbing parity to repair the
2607 * data, so the capability of the repair is declined. (In the
2608 * case of RAID5, we can not repair anything.)
2609 */
2610 if (dfail > rbio->bioc->max_errors - 1) {
2611 ret = -EIO;
2612 goto out;
2613 }
2614 /*
2615 * If all data is good, only parity is correctly, just repair
2616 * the parity, no need to recover data stripes.
2617 */
2618 if (dfail == 0)
2619 continue;
2620
2621 /*
2622 * Here means we got one corrupted data stripe and one
2623 * corrupted parity on RAID6, if the corrupted parity is
2624 * scrubbing parity, luckily, use the other one to repair the
2625 * data, or we can not repair the data stripe.
2626 */
2627 if (failp != rbio->scrubp) {
2628 ret = -EIO;
2629 goto out;
2630 }
2631
2632 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2633 if (ret < 0)
2634 goto out;
2635 }
2636out:
2637 kfree(pointers);
2638 kfree(unmap_array);
2639 return ret;
2640}
2641
2642static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
2643{
2644 struct bio_list bio_list = BIO_EMPTY_LIST;
2645 int total_sector_nr;
2646 int ret = 0;
2647
2648 /* Build a list of bios to read all the missing parts. */
2649 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2650 total_sector_nr++) {
2651 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2652 int stripe = total_sector_nr / rbio->stripe_nsectors;
2653 struct sector_ptr *sector;
2654
2655 /* No data in the vertical stripe, no need to read. */
2656 if (!test_bit(sectornr, &rbio->dbitmap))
2657 continue;
2658
2659 /*
2660 * We want to find all the sectors missing from the rbio and
2661 * read them from the disk. If sector_in_rbio() finds a sector
2662 * in the bio list we don't need to read it off the stripe.
2663 */
2664 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
2665 if (sector)
2666 continue;
2667
2668 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2669 /*
2670 * The bio cache may have handed us an uptodate sector. If so,
2671 * use it.
2672 */
2673 if (sector->uptodate)
2674 continue;
2675
2676 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
2677 sectornr, REQ_OP_READ);
2678 if (ret) {
2679 bio_list_put(&bio_list);
2680 return ret;
2681 }
2682 }
2683
2684 submit_read_wait_bio_list(rbio, &bio_list);
2685 return 0;
2686}
2687
2688static void scrub_rbio(struct btrfs_raid_bio *rbio)
2689{
2690 int sector_nr;
2691 int ret;
2692
2693 ret = alloc_rbio_essential_pages(rbio);
2694 if (ret)
2695 goto out;
2696
2697 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2698
2699 ret = scrub_assemble_read_bios(rbio);
2700 if (ret < 0)
2701 goto out;
2702
2703 /* We may have some failures, recover the failed sectors first. */
2704 ret = recover_scrub_rbio(rbio);
2705 if (ret < 0)
2706 goto out;
2707
2708 /*
2709 * We have every sector properly prepared. Can finish the scrub
2710 * and writeback the good content.
2711 */
2712 ret = finish_parity_scrub(rbio);
2713 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2714 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2715 int found_errors;
2716
2717 found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2718 if (found_errors > rbio->bioc->max_errors) {
2719 ret = -EIO;
2720 break;
2721 }
2722 }
2723out:
2724 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2725}
2726
2727static void scrub_rbio_work_locked(struct work_struct *work)
2728{
2729 scrub_rbio(container_of(work, struct btrfs_raid_bio, work));
2730}
2731
2732void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2733{
2734 if (!lock_stripe_add(rbio))
2735 start_async_work(rbio, scrub_rbio_work_locked);
2736}
2737
2738/*
2739 * This is for scrub call sites where we already have correct data contents.
2740 * This allows us to avoid reading data stripes again.
2741 *
2742 * Unfortunately here we have to do page copy, other than reusing the pages.
2743 * This is due to the fact rbio has its own page management for its cache.
2744 */
2745void raid56_parity_cache_data_pages(struct btrfs_raid_bio *rbio,
2746 struct page **data_pages, u64 data_logical)
2747{
2748 const u64 offset_in_full_stripe = data_logical -
2749 rbio->bioc->full_stripe_logical;
2750 const int page_index = offset_in_full_stripe >> PAGE_SHIFT;
2751 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2752 const u32 sectors_per_page = PAGE_SIZE / sectorsize;
2753 int ret;
2754
2755 /*
2756 * If we hit ENOMEM temporarily, but later at
2757 * raid56_parity_submit_scrub_rbio() time it succeeded, we just do
2758 * the extra read, not a big deal.
2759 *
2760 * If we hit ENOMEM later at raid56_parity_submit_scrub_rbio() time,
2761 * the bio would got proper error number set.
2762 */
2763 ret = alloc_rbio_data_pages(rbio);
2764 if (ret < 0)
2765 return;
2766
2767 /* data_logical must be at stripe boundary and inside the full stripe. */
2768 ASSERT(IS_ALIGNED(offset_in_full_stripe, BTRFS_STRIPE_LEN));
2769 ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT));
2770
2771 for (int page_nr = 0; page_nr < (BTRFS_STRIPE_LEN >> PAGE_SHIFT); page_nr++) {
2772 struct page *dst = rbio->stripe_pages[page_nr + page_index];
2773 struct page *src = data_pages[page_nr];
2774
2775 memcpy_page(dst, 0, src, 0, PAGE_SIZE);
2776 for (int sector_nr = sectors_per_page * page_index;
2777 sector_nr < sectors_per_page * (page_index + 1);
2778 sector_nr++)
2779 rbio->stripe_sectors[sector_nr].uptodate = true;
2780 }
2781}
1/*
2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/bio.h>
22#include <linux/slab.h>
23#include <linux/buffer_head.h>
24#include <linux/blkdev.h>
25#include <linux/random.h>
26#include <linux/iocontext.h>
27#include <linux/capability.h>
28#include <linux/ratelimit.h>
29#include <linux/kthread.h>
30#include <linux/raid/pq.h>
31#include <linux/hash.h>
32#include <linux/list_sort.h>
33#include <linux/raid/xor.h>
34#include <linux/vmalloc.h>
35#include <asm/div64.h>
36#include "ctree.h"
37#include "extent_map.h"
38#include "disk-io.h"
39#include "transaction.h"
40#include "print-tree.h"
41#include "volumes.h"
42#include "raid56.h"
43#include "async-thread.h"
44#include "check-integrity.h"
45#include "rcu-string.h"
46
47/* set when additional merges to this rbio are not allowed */
48#define RBIO_RMW_LOCKED_BIT 1
49
50/*
51 * set when this rbio is sitting in the hash, but it is just a cache
52 * of past RMW
53 */
54#define RBIO_CACHE_BIT 2
55
56/*
57 * set when it is safe to trust the stripe_pages for caching
58 */
59#define RBIO_CACHE_READY_BIT 3
60
61#define RBIO_CACHE_SIZE 1024
62
63enum btrfs_rbio_ops {
64 BTRFS_RBIO_WRITE,
65 BTRFS_RBIO_READ_REBUILD,
66 BTRFS_RBIO_PARITY_SCRUB,
67 BTRFS_RBIO_REBUILD_MISSING,
68};
69
70struct btrfs_raid_bio {
71 struct btrfs_fs_info *fs_info;
72 struct btrfs_bio *bbio;
73
74 /* while we're doing rmw on a stripe
75 * we put it into a hash table so we can
76 * lock the stripe and merge more rbios
77 * into it.
78 */
79 struct list_head hash_list;
80
81 /*
82 * LRU list for the stripe cache
83 */
84 struct list_head stripe_cache;
85
86 /*
87 * for scheduling work in the helper threads
88 */
89 struct btrfs_work work;
90
91 /*
92 * bio list and bio_list_lock are used
93 * to add more bios into the stripe
94 * in hopes of avoiding the full rmw
95 */
96 struct bio_list bio_list;
97 spinlock_t bio_list_lock;
98
99 /* also protected by the bio_list_lock, the
100 * plug list is used by the plugging code
101 * to collect partial bios while plugged. The
102 * stripe locking code also uses it to hand off
103 * the stripe lock to the next pending IO
104 */
105 struct list_head plug_list;
106
107 /*
108 * flags that tell us if it is safe to
109 * merge with this bio
110 */
111 unsigned long flags;
112
113 /* size of each individual stripe on disk */
114 int stripe_len;
115
116 /* number of data stripes (no p/q) */
117 int nr_data;
118
119 int real_stripes;
120
121 int stripe_npages;
122 /*
123 * set if we're doing a parity rebuild
124 * for a read from higher up, which is handled
125 * differently from a parity rebuild as part of
126 * rmw
127 */
128 enum btrfs_rbio_ops operation;
129
130 /* first bad stripe */
131 int faila;
132
133 /* second bad stripe (for raid6 use) */
134 int failb;
135
136 int scrubp;
137 /*
138 * number of pages needed to represent the full
139 * stripe
140 */
141 int nr_pages;
142
143 /*
144 * size of all the bios in the bio_list. This
145 * helps us decide if the rbio maps to a full
146 * stripe or not
147 */
148 int bio_list_bytes;
149
150 int generic_bio_cnt;
151
152 atomic_t refs;
153
154 atomic_t stripes_pending;
155
156 atomic_t error;
157 /*
158 * these are two arrays of pointers. We allocate the
159 * rbio big enough to hold them both and setup their
160 * locations when the rbio is allocated
161 */
162
163 /* pointers to pages that we allocated for
164 * reading/writing stripes directly from the disk (including P/Q)
165 */
166 struct page **stripe_pages;
167
168 /*
169 * pointers to the pages in the bio_list. Stored
170 * here for faster lookup
171 */
172 struct page **bio_pages;
173
174 /*
175 * bitmap to record which horizontal stripe has data
176 */
177 unsigned long *dbitmap;
178};
179
180static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
182static void rmw_work(struct btrfs_work *work);
183static void read_rebuild_work(struct btrfs_work *work);
184static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
191
192static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
193 int need_check);
194static void async_scrub_parity(struct btrfs_raid_bio *rbio);
195
196/*
197 * the stripe hash table is used for locking, and to collect
198 * bios in hopes of making a full stripe
199 */
200int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
201{
202 struct btrfs_stripe_hash_table *table;
203 struct btrfs_stripe_hash_table *x;
204 struct btrfs_stripe_hash *cur;
205 struct btrfs_stripe_hash *h;
206 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
207 int i;
208 int table_size;
209
210 if (info->stripe_hash_table)
211 return 0;
212
213 /*
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
216 *
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
219 */
220 table_size = sizeof(*table) + sizeof(*h) * num_entries;
221 table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
222 if (!table) {
223 table = vzalloc(table_size);
224 if (!table)
225 return -ENOMEM;
226 }
227
228 spin_lock_init(&table->cache_lock);
229 INIT_LIST_HEAD(&table->stripe_cache);
230
231 h = table->table;
232
233 for (i = 0; i < num_entries; i++) {
234 cur = h + i;
235 INIT_LIST_HEAD(&cur->hash_list);
236 spin_lock_init(&cur->lock);
237 init_waitqueue_head(&cur->wait);
238 }
239
240 x = cmpxchg(&info->stripe_hash_table, NULL, table);
241 if (x)
242 kvfree(x);
243 return 0;
244}
245
246/*
247 * caching an rbio means to copy anything from the
248 * bio_pages array into the stripe_pages array. We
249 * use the page uptodate bit in the stripe cache array
250 * to indicate if it has valid data
251 *
252 * once the caching is done, we set the cache ready
253 * bit.
254 */
255static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
256{
257 int i;
258 char *s;
259 char *d;
260 int ret;
261
262 ret = alloc_rbio_pages(rbio);
263 if (ret)
264 return;
265
266 for (i = 0; i < rbio->nr_pages; i++) {
267 if (!rbio->bio_pages[i])
268 continue;
269
270 s = kmap(rbio->bio_pages[i]);
271 d = kmap(rbio->stripe_pages[i]);
272
273 memcpy(d, s, PAGE_SIZE);
274
275 kunmap(rbio->bio_pages[i]);
276 kunmap(rbio->stripe_pages[i]);
277 SetPageUptodate(rbio->stripe_pages[i]);
278 }
279 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
280}
281
282/*
283 * we hash on the first logical address of the stripe
284 */
285static int rbio_bucket(struct btrfs_raid_bio *rbio)
286{
287 u64 num = rbio->bbio->raid_map[0];
288
289 /*
290 * we shift down quite a bit. We're using byte
291 * addressing, and most of the lower bits are zeros.
292 * This tends to upset hash_64, and it consistently
293 * returns just one or two different values.
294 *
295 * shifting off the lower bits fixes things.
296 */
297 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
298}
299
300/*
301 * stealing an rbio means taking all the uptodate pages from the stripe
302 * array in the source rbio and putting them into the destination rbio
303 */
304static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
305{
306 int i;
307 struct page *s;
308 struct page *d;
309
310 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
311 return;
312
313 for (i = 0; i < dest->nr_pages; i++) {
314 s = src->stripe_pages[i];
315 if (!s || !PageUptodate(s)) {
316 continue;
317 }
318
319 d = dest->stripe_pages[i];
320 if (d)
321 __free_page(d);
322
323 dest->stripe_pages[i] = s;
324 src->stripe_pages[i] = NULL;
325 }
326}
327
328/*
329 * merging means we take the bio_list from the victim and
330 * splice it into the destination. The victim should
331 * be discarded afterwards.
332 *
333 * must be called with dest->rbio_list_lock held
334 */
335static void merge_rbio(struct btrfs_raid_bio *dest,
336 struct btrfs_raid_bio *victim)
337{
338 bio_list_merge(&dest->bio_list, &victim->bio_list);
339 dest->bio_list_bytes += victim->bio_list_bytes;
340 dest->generic_bio_cnt += victim->generic_bio_cnt;
341 bio_list_init(&victim->bio_list);
342}
343
344/*
345 * used to prune items that are in the cache. The caller
346 * must hold the hash table lock.
347 */
348static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
349{
350 int bucket = rbio_bucket(rbio);
351 struct btrfs_stripe_hash_table *table;
352 struct btrfs_stripe_hash *h;
353 int freeit = 0;
354
355 /*
356 * check the bit again under the hash table lock.
357 */
358 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
359 return;
360
361 table = rbio->fs_info->stripe_hash_table;
362 h = table->table + bucket;
363
364 /* hold the lock for the bucket because we may be
365 * removing it from the hash table
366 */
367 spin_lock(&h->lock);
368
369 /*
370 * hold the lock for the bio list because we need
371 * to make sure the bio list is empty
372 */
373 spin_lock(&rbio->bio_list_lock);
374
375 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
376 list_del_init(&rbio->stripe_cache);
377 table->cache_size -= 1;
378 freeit = 1;
379
380 /* if the bio list isn't empty, this rbio is
381 * still involved in an IO. We take it out
382 * of the cache list, and drop the ref that
383 * was held for the list.
384 *
385 * If the bio_list was empty, we also remove
386 * the rbio from the hash_table, and drop
387 * the corresponding ref
388 */
389 if (bio_list_empty(&rbio->bio_list)) {
390 if (!list_empty(&rbio->hash_list)) {
391 list_del_init(&rbio->hash_list);
392 atomic_dec(&rbio->refs);
393 BUG_ON(!list_empty(&rbio->plug_list));
394 }
395 }
396 }
397
398 spin_unlock(&rbio->bio_list_lock);
399 spin_unlock(&h->lock);
400
401 if (freeit)
402 __free_raid_bio(rbio);
403}
404
405/*
406 * prune a given rbio from the cache
407 */
408static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
409{
410 struct btrfs_stripe_hash_table *table;
411 unsigned long flags;
412
413 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
414 return;
415
416 table = rbio->fs_info->stripe_hash_table;
417
418 spin_lock_irqsave(&table->cache_lock, flags);
419 __remove_rbio_from_cache(rbio);
420 spin_unlock_irqrestore(&table->cache_lock, flags);
421}
422
423/*
424 * remove everything in the cache
425 */
426static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
427{
428 struct btrfs_stripe_hash_table *table;
429 unsigned long flags;
430 struct btrfs_raid_bio *rbio;
431
432 table = info->stripe_hash_table;
433
434 spin_lock_irqsave(&table->cache_lock, flags);
435 while (!list_empty(&table->stripe_cache)) {
436 rbio = list_entry(table->stripe_cache.next,
437 struct btrfs_raid_bio,
438 stripe_cache);
439 __remove_rbio_from_cache(rbio);
440 }
441 spin_unlock_irqrestore(&table->cache_lock, flags);
442}
443
444/*
445 * remove all cached entries and free the hash table
446 * used by unmount
447 */
448void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
449{
450 if (!info->stripe_hash_table)
451 return;
452 btrfs_clear_rbio_cache(info);
453 kvfree(info->stripe_hash_table);
454 info->stripe_hash_table = NULL;
455}
456
457/*
458 * insert an rbio into the stripe cache. It
459 * must have already been prepared by calling
460 * cache_rbio_pages
461 *
462 * If this rbio was already cached, it gets
463 * moved to the front of the lru.
464 *
465 * If the size of the rbio cache is too big, we
466 * prune an item.
467 */
468static void cache_rbio(struct btrfs_raid_bio *rbio)
469{
470 struct btrfs_stripe_hash_table *table;
471 unsigned long flags;
472
473 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
474 return;
475
476 table = rbio->fs_info->stripe_hash_table;
477
478 spin_lock_irqsave(&table->cache_lock, flags);
479 spin_lock(&rbio->bio_list_lock);
480
481 /* bump our ref if we were not in the list before */
482 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
483 atomic_inc(&rbio->refs);
484
485 if (!list_empty(&rbio->stripe_cache)){
486 list_move(&rbio->stripe_cache, &table->stripe_cache);
487 } else {
488 list_add(&rbio->stripe_cache, &table->stripe_cache);
489 table->cache_size += 1;
490 }
491
492 spin_unlock(&rbio->bio_list_lock);
493
494 if (table->cache_size > RBIO_CACHE_SIZE) {
495 struct btrfs_raid_bio *found;
496
497 found = list_entry(table->stripe_cache.prev,
498 struct btrfs_raid_bio,
499 stripe_cache);
500
501 if (found != rbio)
502 __remove_rbio_from_cache(found);
503 }
504
505 spin_unlock_irqrestore(&table->cache_lock, flags);
506}
507
508/*
509 * helper function to run the xor_blocks api. It is only
510 * able to do MAX_XOR_BLOCKS at a time, so we need to
511 * loop through.
512 */
513static void run_xor(void **pages, int src_cnt, ssize_t len)
514{
515 int src_off = 0;
516 int xor_src_cnt = 0;
517 void *dest = pages[src_cnt];
518
519 while(src_cnt > 0) {
520 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
521 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
522
523 src_cnt -= xor_src_cnt;
524 src_off += xor_src_cnt;
525 }
526}
527
528/*
529 * returns true if the bio list inside this rbio
530 * covers an entire stripe (no rmw required).
531 * Must be called with the bio list lock held, or
532 * at a time when you know it is impossible to add
533 * new bios into the list
534 */
535static int __rbio_is_full(struct btrfs_raid_bio *rbio)
536{
537 unsigned long size = rbio->bio_list_bytes;
538 int ret = 1;
539
540 if (size != rbio->nr_data * rbio->stripe_len)
541 ret = 0;
542
543 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
544 return ret;
545}
546
547static int rbio_is_full(struct btrfs_raid_bio *rbio)
548{
549 unsigned long flags;
550 int ret;
551
552 spin_lock_irqsave(&rbio->bio_list_lock, flags);
553 ret = __rbio_is_full(rbio);
554 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
555 return ret;
556}
557
558/*
559 * returns 1 if it is safe to merge two rbios together.
560 * The merging is safe if the two rbios correspond to
561 * the same stripe and if they are both going in the same
562 * direction (read vs write), and if neither one is
563 * locked for final IO
564 *
565 * The caller is responsible for locking such that
566 * rmw_locked is safe to test
567 */
568static int rbio_can_merge(struct btrfs_raid_bio *last,
569 struct btrfs_raid_bio *cur)
570{
571 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
572 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
573 return 0;
574
575 /*
576 * we can't merge with cached rbios, since the
577 * idea is that when we merge the destination
578 * rbio is going to run our IO for us. We can
579 * steal from cached rbio's though, other functions
580 * handle that.
581 */
582 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
583 test_bit(RBIO_CACHE_BIT, &cur->flags))
584 return 0;
585
586 if (last->bbio->raid_map[0] !=
587 cur->bbio->raid_map[0])
588 return 0;
589
590 /* we can't merge with different operations */
591 if (last->operation != cur->operation)
592 return 0;
593 /*
594 * We've need read the full stripe from the drive.
595 * check and repair the parity and write the new results.
596 *
597 * We're not allowed to add any new bios to the
598 * bio list here, anyone else that wants to
599 * change this stripe needs to do their own rmw.
600 */
601 if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
602 cur->operation == BTRFS_RBIO_PARITY_SCRUB)
603 return 0;
604
605 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
606 cur->operation == BTRFS_RBIO_REBUILD_MISSING)
607 return 0;
608
609 return 1;
610}
611
612static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
613 int index)
614{
615 return stripe * rbio->stripe_npages + index;
616}
617
618/*
619 * these are just the pages from the rbio array, not from anything
620 * the FS sent down to us
621 */
622static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
623 int index)
624{
625 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
626}
627
628/*
629 * helper to index into the pstripe
630 */
631static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
632{
633 return rbio_stripe_page(rbio, rbio->nr_data, index);
634}
635
636/*
637 * helper to index into the qstripe, returns null
638 * if there is no qstripe
639 */
640static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
641{
642 if (rbio->nr_data + 1 == rbio->real_stripes)
643 return NULL;
644 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
645}
646
647/*
648 * The first stripe in the table for a logical address
649 * has the lock. rbios are added in one of three ways:
650 *
651 * 1) Nobody has the stripe locked yet. The rbio is given
652 * the lock and 0 is returned. The caller must start the IO
653 * themselves.
654 *
655 * 2) Someone has the stripe locked, but we're able to merge
656 * with the lock owner. The rbio is freed and the IO will
657 * start automatically along with the existing rbio. 1 is returned.
658 *
659 * 3) Someone has the stripe locked, but we're not able to merge.
660 * The rbio is added to the lock owner's plug list, or merged into
661 * an rbio already on the plug list. When the lock owner unlocks,
662 * the next rbio on the list is run and the IO is started automatically.
663 * 1 is returned
664 *
665 * If we return 0, the caller still owns the rbio and must continue with
666 * IO submission. If we return 1, the caller must assume the rbio has
667 * already been freed.
668 */
669static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
670{
671 int bucket = rbio_bucket(rbio);
672 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
673 struct btrfs_raid_bio *cur;
674 struct btrfs_raid_bio *pending;
675 unsigned long flags;
676 DEFINE_WAIT(wait);
677 struct btrfs_raid_bio *freeit = NULL;
678 struct btrfs_raid_bio *cache_drop = NULL;
679 int ret = 0;
680 int walk = 0;
681
682 spin_lock_irqsave(&h->lock, flags);
683 list_for_each_entry(cur, &h->hash_list, hash_list) {
684 walk++;
685 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
686 spin_lock(&cur->bio_list_lock);
687
688 /* can we steal this cached rbio's pages? */
689 if (bio_list_empty(&cur->bio_list) &&
690 list_empty(&cur->plug_list) &&
691 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
693 list_del_init(&cur->hash_list);
694 atomic_dec(&cur->refs);
695
696 steal_rbio(cur, rbio);
697 cache_drop = cur;
698 spin_unlock(&cur->bio_list_lock);
699
700 goto lockit;
701 }
702
703 /* can we merge into the lock owner? */
704 if (rbio_can_merge(cur, rbio)) {
705 merge_rbio(cur, rbio);
706 spin_unlock(&cur->bio_list_lock);
707 freeit = rbio;
708 ret = 1;
709 goto out;
710 }
711
712
713 /*
714 * we couldn't merge with the running
715 * rbio, see if we can merge with the
716 * pending ones. We don't have to
717 * check for rmw_locked because there
718 * is no way they are inside finish_rmw
719 * right now
720 */
721 list_for_each_entry(pending, &cur->plug_list,
722 plug_list) {
723 if (rbio_can_merge(pending, rbio)) {
724 merge_rbio(pending, rbio);
725 spin_unlock(&cur->bio_list_lock);
726 freeit = rbio;
727 ret = 1;
728 goto out;
729 }
730 }
731
732 /* no merging, put us on the tail of the plug list,
733 * our rbio will be started with the currently
734 * running rbio unlocks
735 */
736 list_add_tail(&rbio->plug_list, &cur->plug_list);
737 spin_unlock(&cur->bio_list_lock);
738 ret = 1;
739 goto out;
740 }
741 }
742lockit:
743 atomic_inc(&rbio->refs);
744 list_add(&rbio->hash_list, &h->hash_list);
745out:
746 spin_unlock_irqrestore(&h->lock, flags);
747 if (cache_drop)
748 remove_rbio_from_cache(cache_drop);
749 if (freeit)
750 __free_raid_bio(freeit);
751 return ret;
752}
753
754/*
755 * called as rmw or parity rebuild is completed. If the plug list has more
756 * rbios waiting for this stripe, the next one on the list will be started
757 */
758static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
759{
760 int bucket;
761 struct btrfs_stripe_hash *h;
762 unsigned long flags;
763 int keep_cache = 0;
764
765 bucket = rbio_bucket(rbio);
766 h = rbio->fs_info->stripe_hash_table->table + bucket;
767
768 if (list_empty(&rbio->plug_list))
769 cache_rbio(rbio);
770
771 spin_lock_irqsave(&h->lock, flags);
772 spin_lock(&rbio->bio_list_lock);
773
774 if (!list_empty(&rbio->hash_list)) {
775 /*
776 * if we're still cached and there is no other IO
777 * to perform, just leave this rbio here for others
778 * to steal from later
779 */
780 if (list_empty(&rbio->plug_list) &&
781 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
782 keep_cache = 1;
783 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
784 BUG_ON(!bio_list_empty(&rbio->bio_list));
785 goto done;
786 }
787
788 list_del_init(&rbio->hash_list);
789 atomic_dec(&rbio->refs);
790
791 /*
792 * we use the plug list to hold all the rbios
793 * waiting for the chance to lock this stripe.
794 * hand the lock over to one of them.
795 */
796 if (!list_empty(&rbio->plug_list)) {
797 struct btrfs_raid_bio *next;
798 struct list_head *head = rbio->plug_list.next;
799
800 next = list_entry(head, struct btrfs_raid_bio,
801 plug_list);
802
803 list_del_init(&rbio->plug_list);
804
805 list_add(&next->hash_list, &h->hash_list);
806 atomic_inc(&next->refs);
807 spin_unlock(&rbio->bio_list_lock);
808 spin_unlock_irqrestore(&h->lock, flags);
809
810 if (next->operation == BTRFS_RBIO_READ_REBUILD)
811 async_read_rebuild(next);
812 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
813 steal_rbio(rbio, next);
814 async_read_rebuild(next);
815 } else if (next->operation == BTRFS_RBIO_WRITE) {
816 steal_rbio(rbio, next);
817 async_rmw_stripe(next);
818 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
819 steal_rbio(rbio, next);
820 async_scrub_parity(next);
821 }
822
823 goto done_nolock;
824 /*
825 * The barrier for this waitqueue_active is not needed,
826 * we're protected by h->lock and can't miss a wakeup.
827 */
828 } else if (waitqueue_active(&h->wait)) {
829 spin_unlock(&rbio->bio_list_lock);
830 spin_unlock_irqrestore(&h->lock, flags);
831 wake_up(&h->wait);
832 goto done_nolock;
833 }
834 }
835done:
836 spin_unlock(&rbio->bio_list_lock);
837 spin_unlock_irqrestore(&h->lock, flags);
838
839done_nolock:
840 if (!keep_cache)
841 remove_rbio_from_cache(rbio);
842}
843
844static void __free_raid_bio(struct btrfs_raid_bio *rbio)
845{
846 int i;
847
848 WARN_ON(atomic_read(&rbio->refs) < 0);
849 if (!atomic_dec_and_test(&rbio->refs))
850 return;
851
852 WARN_ON(!list_empty(&rbio->stripe_cache));
853 WARN_ON(!list_empty(&rbio->hash_list));
854 WARN_ON(!bio_list_empty(&rbio->bio_list));
855
856 for (i = 0; i < rbio->nr_pages; i++) {
857 if (rbio->stripe_pages[i]) {
858 __free_page(rbio->stripe_pages[i]);
859 rbio->stripe_pages[i] = NULL;
860 }
861 }
862
863 btrfs_put_bbio(rbio->bbio);
864 kfree(rbio);
865}
866
867static void free_raid_bio(struct btrfs_raid_bio *rbio)
868{
869 unlock_stripe(rbio);
870 __free_raid_bio(rbio);
871}
872
873/*
874 * this frees the rbio and runs through all the bios in the
875 * bio_list and calls end_io on them
876 */
877static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
878{
879 struct bio *cur = bio_list_get(&rbio->bio_list);
880 struct bio *next;
881
882 if (rbio->generic_bio_cnt)
883 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
884
885 free_raid_bio(rbio);
886
887 while (cur) {
888 next = cur->bi_next;
889 cur->bi_next = NULL;
890 cur->bi_error = err;
891 bio_endio(cur);
892 cur = next;
893 }
894}
895
896/*
897 * end io function used by finish_rmw. When we finally
898 * get here, we've written a full stripe
899 */
900static void raid_write_end_io(struct bio *bio)
901{
902 struct btrfs_raid_bio *rbio = bio->bi_private;
903 int err = bio->bi_error;
904 int max_errors;
905
906 if (err)
907 fail_bio_stripe(rbio, bio);
908
909 bio_put(bio);
910
911 if (!atomic_dec_and_test(&rbio->stripes_pending))
912 return;
913
914 err = 0;
915
916 /* OK, we have read all the stripes we need to. */
917 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
918 0 : rbio->bbio->max_errors;
919 if (atomic_read(&rbio->error) > max_errors)
920 err = -EIO;
921
922 rbio_orig_end_io(rbio, err);
923}
924
925/*
926 * the read/modify/write code wants to use the original bio for
927 * any pages it included, and then use the rbio for everything
928 * else. This function decides if a given index (stripe number)
929 * and page number in that stripe fall inside the original bio
930 * or the rbio.
931 *
932 * if you set bio_list_only, you'll get a NULL back for any ranges
933 * that are outside the bio_list
934 *
935 * This doesn't take any refs on anything, you get a bare page pointer
936 * and the caller must bump refs as required.
937 *
938 * You must call index_rbio_pages once before you can trust
939 * the answers from this function.
940 */
941static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
942 int index, int pagenr, int bio_list_only)
943{
944 int chunk_page;
945 struct page *p = NULL;
946
947 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
948
949 spin_lock_irq(&rbio->bio_list_lock);
950 p = rbio->bio_pages[chunk_page];
951 spin_unlock_irq(&rbio->bio_list_lock);
952
953 if (p || bio_list_only)
954 return p;
955
956 return rbio->stripe_pages[chunk_page];
957}
958
959/*
960 * number of pages we need for the entire stripe across all the
961 * drives
962 */
963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
964{
965 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
966}
967
968/*
969 * allocation and initial setup for the btrfs_raid_bio. Not
970 * this does not allocate any pages for rbio->pages.
971 */
972static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
973 struct btrfs_bio *bbio, u64 stripe_len)
974{
975 struct btrfs_raid_bio *rbio;
976 int nr_data = 0;
977 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
978 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
979 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
980 void *p;
981
982 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
983 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
984 sizeof(long), GFP_NOFS);
985 if (!rbio)
986 return ERR_PTR(-ENOMEM);
987
988 bio_list_init(&rbio->bio_list);
989 INIT_LIST_HEAD(&rbio->plug_list);
990 spin_lock_init(&rbio->bio_list_lock);
991 INIT_LIST_HEAD(&rbio->stripe_cache);
992 INIT_LIST_HEAD(&rbio->hash_list);
993 rbio->bbio = bbio;
994 rbio->fs_info = root->fs_info;
995 rbio->stripe_len = stripe_len;
996 rbio->nr_pages = num_pages;
997 rbio->real_stripes = real_stripes;
998 rbio->stripe_npages = stripe_npages;
999 rbio->faila = -1;
1000 rbio->failb = -1;
1001 atomic_set(&rbio->refs, 1);
1002 atomic_set(&rbio->error, 0);
1003 atomic_set(&rbio->stripes_pending, 0);
1004
1005 /*
1006 * the stripe_pages and bio_pages array point to the extra
1007 * memory we allocated past the end of the rbio
1008 */
1009 p = rbio + 1;
1010 rbio->stripe_pages = p;
1011 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1012 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1013
1014 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1015 nr_data = real_stripes - 1;
1016 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1017 nr_data = real_stripes - 2;
1018 else
1019 BUG();
1020
1021 rbio->nr_data = nr_data;
1022 return rbio;
1023}
1024
1025/* allocate pages for all the stripes in the bio, including parity */
1026static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1027{
1028 int i;
1029 struct page *page;
1030
1031 for (i = 0; i < rbio->nr_pages; i++) {
1032 if (rbio->stripe_pages[i])
1033 continue;
1034 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1035 if (!page)
1036 return -ENOMEM;
1037 rbio->stripe_pages[i] = page;
1038 }
1039 return 0;
1040}
1041
1042/* only allocate pages for p/q stripes */
1043static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1044{
1045 int i;
1046 struct page *page;
1047
1048 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1049
1050 for (; i < rbio->nr_pages; i++) {
1051 if (rbio->stripe_pages[i])
1052 continue;
1053 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1054 if (!page)
1055 return -ENOMEM;
1056 rbio->stripe_pages[i] = page;
1057 }
1058 return 0;
1059}
1060
1061/*
1062 * add a single page from a specific stripe into our list of bios for IO
1063 * this will try to merge into existing bios if possible, and returns
1064 * zero if all went well.
1065 */
1066static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1067 struct bio_list *bio_list,
1068 struct page *page,
1069 int stripe_nr,
1070 unsigned long page_index,
1071 unsigned long bio_max_len)
1072{
1073 struct bio *last = bio_list->tail;
1074 u64 last_end = 0;
1075 int ret;
1076 struct bio *bio;
1077 struct btrfs_bio_stripe *stripe;
1078 u64 disk_start;
1079
1080 stripe = &rbio->bbio->stripes[stripe_nr];
1081 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1082
1083 /* if the device is missing, just fail this stripe */
1084 if (!stripe->dev->bdev)
1085 return fail_rbio_index(rbio, stripe_nr);
1086
1087 /* see if we can add this page onto our existing bio */
1088 if (last) {
1089 last_end = (u64)last->bi_iter.bi_sector << 9;
1090 last_end += last->bi_iter.bi_size;
1091
1092 /*
1093 * we can't merge these if they are from different
1094 * devices or if they are not contiguous
1095 */
1096 if (last_end == disk_start && stripe->dev->bdev &&
1097 !last->bi_error &&
1098 last->bi_bdev == stripe->dev->bdev) {
1099 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1100 if (ret == PAGE_SIZE)
1101 return 0;
1102 }
1103 }
1104
1105 /* put a new bio on the list */
1106 bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1107 if (!bio)
1108 return -ENOMEM;
1109
1110 bio->bi_iter.bi_size = 0;
1111 bio->bi_bdev = stripe->dev->bdev;
1112 bio->bi_iter.bi_sector = disk_start >> 9;
1113
1114 bio_add_page(bio, page, PAGE_SIZE, 0);
1115 bio_list_add(bio_list, bio);
1116 return 0;
1117}
1118
1119/*
1120 * while we're doing the read/modify/write cycle, we could
1121 * have errors in reading pages off the disk. This checks
1122 * for errors and if we're not able to read the page it'll
1123 * trigger parity reconstruction. The rmw will be finished
1124 * after we've reconstructed the failed stripes
1125 */
1126static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1127{
1128 if (rbio->faila >= 0 || rbio->failb >= 0) {
1129 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1130 __raid56_parity_recover(rbio);
1131 } else {
1132 finish_rmw(rbio);
1133 }
1134}
1135
1136/*
1137 * helper function to walk our bio list and populate the bio_pages array with
1138 * the result. This seems expensive, but it is faster than constantly
1139 * searching through the bio list as we setup the IO in finish_rmw or stripe
1140 * reconstruction.
1141 *
1142 * This must be called before you trust the answers from page_in_rbio
1143 */
1144static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1145{
1146 struct bio *bio;
1147 u64 start;
1148 unsigned long stripe_offset;
1149 unsigned long page_index;
1150 struct page *p;
1151 int i;
1152
1153 spin_lock_irq(&rbio->bio_list_lock);
1154 bio_list_for_each(bio, &rbio->bio_list) {
1155 start = (u64)bio->bi_iter.bi_sector << 9;
1156 stripe_offset = start - rbio->bbio->raid_map[0];
1157 page_index = stripe_offset >> PAGE_SHIFT;
1158
1159 for (i = 0; i < bio->bi_vcnt; i++) {
1160 p = bio->bi_io_vec[i].bv_page;
1161 rbio->bio_pages[page_index + i] = p;
1162 }
1163 }
1164 spin_unlock_irq(&rbio->bio_list_lock);
1165}
1166
1167/*
1168 * this is called from one of two situations. We either
1169 * have a full stripe from the higher layers, or we've read all
1170 * the missing bits off disk.
1171 *
1172 * This will calculate the parity and then send down any
1173 * changed blocks.
1174 */
1175static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1176{
1177 struct btrfs_bio *bbio = rbio->bbio;
1178 void *pointers[rbio->real_stripes];
1179 int nr_data = rbio->nr_data;
1180 int stripe;
1181 int pagenr;
1182 int p_stripe = -1;
1183 int q_stripe = -1;
1184 struct bio_list bio_list;
1185 struct bio *bio;
1186 int ret;
1187
1188 bio_list_init(&bio_list);
1189
1190 if (rbio->real_stripes - rbio->nr_data == 1) {
1191 p_stripe = rbio->real_stripes - 1;
1192 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1193 p_stripe = rbio->real_stripes - 2;
1194 q_stripe = rbio->real_stripes - 1;
1195 } else {
1196 BUG();
1197 }
1198
1199 /* at this point we either have a full stripe,
1200 * or we've read the full stripe from the drive.
1201 * recalculate the parity and write the new results.
1202 *
1203 * We're not allowed to add any new bios to the
1204 * bio list here, anyone else that wants to
1205 * change this stripe needs to do their own rmw.
1206 */
1207 spin_lock_irq(&rbio->bio_list_lock);
1208 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1209 spin_unlock_irq(&rbio->bio_list_lock);
1210
1211 atomic_set(&rbio->error, 0);
1212
1213 /*
1214 * now that we've set rmw_locked, run through the
1215 * bio list one last time and map the page pointers
1216 *
1217 * We don't cache full rbios because we're assuming
1218 * the higher layers are unlikely to use this area of
1219 * the disk again soon. If they do use it again,
1220 * hopefully they will send another full bio.
1221 */
1222 index_rbio_pages(rbio);
1223 if (!rbio_is_full(rbio))
1224 cache_rbio_pages(rbio);
1225 else
1226 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1227
1228 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1229 struct page *p;
1230 /* first collect one page from each data stripe */
1231 for (stripe = 0; stripe < nr_data; stripe++) {
1232 p = page_in_rbio(rbio, stripe, pagenr, 0);
1233 pointers[stripe] = kmap(p);
1234 }
1235
1236 /* then add the parity stripe */
1237 p = rbio_pstripe_page(rbio, pagenr);
1238 SetPageUptodate(p);
1239 pointers[stripe++] = kmap(p);
1240
1241 if (q_stripe != -1) {
1242
1243 /*
1244 * raid6, add the qstripe and call the
1245 * library function to fill in our p/q
1246 */
1247 p = rbio_qstripe_page(rbio, pagenr);
1248 SetPageUptodate(p);
1249 pointers[stripe++] = kmap(p);
1250
1251 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1252 pointers);
1253 } else {
1254 /* raid5 */
1255 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1256 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1257 }
1258
1259
1260 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1261 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1262 }
1263
1264 /*
1265 * time to start writing. Make bios for everything from the
1266 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1267 * everything else.
1268 */
1269 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1270 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1271 struct page *page;
1272 if (stripe < rbio->nr_data) {
1273 page = page_in_rbio(rbio, stripe, pagenr, 1);
1274 if (!page)
1275 continue;
1276 } else {
1277 page = rbio_stripe_page(rbio, stripe, pagenr);
1278 }
1279
1280 ret = rbio_add_io_page(rbio, &bio_list,
1281 page, stripe, pagenr, rbio->stripe_len);
1282 if (ret)
1283 goto cleanup;
1284 }
1285 }
1286
1287 if (likely(!bbio->num_tgtdevs))
1288 goto write_data;
1289
1290 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1291 if (!bbio->tgtdev_map[stripe])
1292 continue;
1293
1294 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1295 struct page *page;
1296 if (stripe < rbio->nr_data) {
1297 page = page_in_rbio(rbio, stripe, pagenr, 1);
1298 if (!page)
1299 continue;
1300 } else {
1301 page = rbio_stripe_page(rbio, stripe, pagenr);
1302 }
1303
1304 ret = rbio_add_io_page(rbio, &bio_list, page,
1305 rbio->bbio->tgtdev_map[stripe],
1306 pagenr, rbio->stripe_len);
1307 if (ret)
1308 goto cleanup;
1309 }
1310 }
1311
1312write_data:
1313 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1314 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1315
1316 while (1) {
1317 bio = bio_list_pop(&bio_list);
1318 if (!bio)
1319 break;
1320
1321 bio->bi_private = rbio;
1322 bio->bi_end_io = raid_write_end_io;
1323 submit_bio(WRITE, bio);
1324 }
1325 return;
1326
1327cleanup:
1328 rbio_orig_end_io(rbio, -EIO);
1329}
1330
1331/*
1332 * helper to find the stripe number for a given bio. Used to figure out which
1333 * stripe has failed. This expects the bio to correspond to a physical disk,
1334 * so it looks up based on physical sector numbers.
1335 */
1336static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1337 struct bio *bio)
1338{
1339 u64 physical = bio->bi_iter.bi_sector;
1340 u64 stripe_start;
1341 int i;
1342 struct btrfs_bio_stripe *stripe;
1343
1344 physical <<= 9;
1345
1346 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1347 stripe = &rbio->bbio->stripes[i];
1348 stripe_start = stripe->physical;
1349 if (physical >= stripe_start &&
1350 physical < stripe_start + rbio->stripe_len &&
1351 bio->bi_bdev == stripe->dev->bdev) {
1352 return i;
1353 }
1354 }
1355 return -1;
1356}
1357
1358/*
1359 * helper to find the stripe number for a given
1360 * bio (before mapping). Used to figure out which stripe has
1361 * failed. This looks up based on logical block numbers.
1362 */
1363static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1364 struct bio *bio)
1365{
1366 u64 logical = bio->bi_iter.bi_sector;
1367 u64 stripe_start;
1368 int i;
1369
1370 logical <<= 9;
1371
1372 for (i = 0; i < rbio->nr_data; i++) {
1373 stripe_start = rbio->bbio->raid_map[i];
1374 if (logical >= stripe_start &&
1375 logical < stripe_start + rbio->stripe_len) {
1376 return i;
1377 }
1378 }
1379 return -1;
1380}
1381
1382/*
1383 * returns -EIO if we had too many failures
1384 */
1385static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1386{
1387 unsigned long flags;
1388 int ret = 0;
1389
1390 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1391
1392 /* we already know this stripe is bad, move on */
1393 if (rbio->faila == failed || rbio->failb == failed)
1394 goto out;
1395
1396 if (rbio->faila == -1) {
1397 /* first failure on this rbio */
1398 rbio->faila = failed;
1399 atomic_inc(&rbio->error);
1400 } else if (rbio->failb == -1) {
1401 /* second failure on this rbio */
1402 rbio->failb = failed;
1403 atomic_inc(&rbio->error);
1404 } else {
1405 ret = -EIO;
1406 }
1407out:
1408 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1409
1410 return ret;
1411}
1412
1413/*
1414 * helper to fail a stripe based on a physical disk
1415 * bio.
1416 */
1417static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1418 struct bio *bio)
1419{
1420 int failed = find_bio_stripe(rbio, bio);
1421
1422 if (failed < 0)
1423 return -EIO;
1424
1425 return fail_rbio_index(rbio, failed);
1426}
1427
1428/*
1429 * this sets each page in the bio uptodate. It should only be used on private
1430 * rbio pages, nothing that comes in from the higher layers
1431 */
1432static void set_bio_pages_uptodate(struct bio *bio)
1433{
1434 int i;
1435 struct page *p;
1436
1437 for (i = 0; i < bio->bi_vcnt; i++) {
1438 p = bio->bi_io_vec[i].bv_page;
1439 SetPageUptodate(p);
1440 }
1441}
1442
1443/*
1444 * end io for the read phase of the rmw cycle. All the bios here are physical
1445 * stripe bios we've read from the disk so we can recalculate the parity of the
1446 * stripe.
1447 *
1448 * This will usually kick off finish_rmw once all the bios are read in, but it
1449 * may trigger parity reconstruction if we had any errors along the way
1450 */
1451static void raid_rmw_end_io(struct bio *bio)
1452{
1453 struct btrfs_raid_bio *rbio = bio->bi_private;
1454
1455 if (bio->bi_error)
1456 fail_bio_stripe(rbio, bio);
1457 else
1458 set_bio_pages_uptodate(bio);
1459
1460 bio_put(bio);
1461
1462 if (!atomic_dec_and_test(&rbio->stripes_pending))
1463 return;
1464
1465 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1466 goto cleanup;
1467
1468 /*
1469 * this will normally call finish_rmw to start our write
1470 * but if there are any failed stripes we'll reconstruct
1471 * from parity first
1472 */
1473 validate_rbio_for_rmw(rbio);
1474 return;
1475
1476cleanup:
1477
1478 rbio_orig_end_io(rbio, -EIO);
1479}
1480
1481static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1482{
1483 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1484 rmw_work, NULL, NULL);
1485
1486 btrfs_queue_work(rbio->fs_info->rmw_workers,
1487 &rbio->work);
1488}
1489
1490static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1491{
1492 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1493 read_rebuild_work, NULL, NULL);
1494
1495 btrfs_queue_work(rbio->fs_info->rmw_workers,
1496 &rbio->work);
1497}
1498
1499/*
1500 * the stripe must be locked by the caller. It will
1501 * unlock after all the writes are done
1502 */
1503static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1504{
1505 int bios_to_read = 0;
1506 struct bio_list bio_list;
1507 int ret;
1508 int pagenr;
1509 int stripe;
1510 struct bio *bio;
1511
1512 bio_list_init(&bio_list);
1513
1514 ret = alloc_rbio_pages(rbio);
1515 if (ret)
1516 goto cleanup;
1517
1518 index_rbio_pages(rbio);
1519
1520 atomic_set(&rbio->error, 0);
1521 /*
1522 * build a list of bios to read all the missing parts of this
1523 * stripe
1524 */
1525 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1526 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1527 struct page *page;
1528 /*
1529 * we want to find all the pages missing from
1530 * the rbio and read them from the disk. If
1531 * page_in_rbio finds a page in the bio list
1532 * we don't need to read it off the stripe.
1533 */
1534 page = page_in_rbio(rbio, stripe, pagenr, 1);
1535 if (page)
1536 continue;
1537
1538 page = rbio_stripe_page(rbio, stripe, pagenr);
1539 /*
1540 * the bio cache may have handed us an uptodate
1541 * page. If so, be happy and use it
1542 */
1543 if (PageUptodate(page))
1544 continue;
1545
1546 ret = rbio_add_io_page(rbio, &bio_list, page,
1547 stripe, pagenr, rbio->stripe_len);
1548 if (ret)
1549 goto cleanup;
1550 }
1551 }
1552
1553 bios_to_read = bio_list_size(&bio_list);
1554 if (!bios_to_read) {
1555 /*
1556 * this can happen if others have merged with
1557 * us, it means there is nothing left to read.
1558 * But if there are missing devices it may not be
1559 * safe to do the full stripe write yet.
1560 */
1561 goto finish;
1562 }
1563
1564 /*
1565 * the bbio may be freed once we submit the last bio. Make sure
1566 * not to touch it after that
1567 */
1568 atomic_set(&rbio->stripes_pending, bios_to_read);
1569 while (1) {
1570 bio = bio_list_pop(&bio_list);
1571 if (!bio)
1572 break;
1573
1574 bio->bi_private = rbio;
1575 bio->bi_end_io = raid_rmw_end_io;
1576
1577 btrfs_bio_wq_end_io(rbio->fs_info, bio,
1578 BTRFS_WQ_ENDIO_RAID56);
1579
1580 submit_bio(READ, bio);
1581 }
1582 /* the actual write will happen once the reads are done */
1583 return 0;
1584
1585cleanup:
1586 rbio_orig_end_io(rbio, -EIO);
1587 return -EIO;
1588
1589finish:
1590 validate_rbio_for_rmw(rbio);
1591 return 0;
1592}
1593
1594/*
1595 * if the upper layers pass in a full stripe, we thank them by only allocating
1596 * enough pages to hold the parity, and sending it all down quickly.
1597 */
1598static int full_stripe_write(struct btrfs_raid_bio *rbio)
1599{
1600 int ret;
1601
1602 ret = alloc_rbio_parity_pages(rbio);
1603 if (ret) {
1604 __free_raid_bio(rbio);
1605 return ret;
1606 }
1607
1608 ret = lock_stripe_add(rbio);
1609 if (ret == 0)
1610 finish_rmw(rbio);
1611 return 0;
1612}
1613
1614/*
1615 * partial stripe writes get handed over to async helpers.
1616 * We're really hoping to merge a few more writes into this
1617 * rbio before calculating new parity
1618 */
1619static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1620{
1621 int ret;
1622
1623 ret = lock_stripe_add(rbio);
1624 if (ret == 0)
1625 async_rmw_stripe(rbio);
1626 return 0;
1627}
1628
1629/*
1630 * sometimes while we were reading from the drive to
1631 * recalculate parity, enough new bios come into create
1632 * a full stripe. So we do a check here to see if we can
1633 * go directly to finish_rmw
1634 */
1635static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1636{
1637 /* head off into rmw land if we don't have a full stripe */
1638 if (!rbio_is_full(rbio))
1639 return partial_stripe_write(rbio);
1640 return full_stripe_write(rbio);
1641}
1642
1643/*
1644 * We use plugging call backs to collect full stripes.
1645 * Any time we get a partial stripe write while plugged
1646 * we collect it into a list. When the unplug comes down,
1647 * we sort the list by logical block number and merge
1648 * everything we can into the same rbios
1649 */
1650struct btrfs_plug_cb {
1651 struct blk_plug_cb cb;
1652 struct btrfs_fs_info *info;
1653 struct list_head rbio_list;
1654 struct btrfs_work work;
1655};
1656
1657/*
1658 * rbios on the plug list are sorted for easier merging.
1659 */
1660static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1661{
1662 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1663 plug_list);
1664 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1665 plug_list);
1666 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1667 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1668
1669 if (a_sector < b_sector)
1670 return -1;
1671 if (a_sector > b_sector)
1672 return 1;
1673 return 0;
1674}
1675
1676static void run_plug(struct btrfs_plug_cb *plug)
1677{
1678 struct btrfs_raid_bio *cur;
1679 struct btrfs_raid_bio *last = NULL;
1680
1681 /*
1682 * sort our plug list then try to merge
1683 * everything we can in hopes of creating full
1684 * stripes.
1685 */
1686 list_sort(NULL, &plug->rbio_list, plug_cmp);
1687 while (!list_empty(&plug->rbio_list)) {
1688 cur = list_entry(plug->rbio_list.next,
1689 struct btrfs_raid_bio, plug_list);
1690 list_del_init(&cur->plug_list);
1691
1692 if (rbio_is_full(cur)) {
1693 /* we have a full stripe, send it down */
1694 full_stripe_write(cur);
1695 continue;
1696 }
1697 if (last) {
1698 if (rbio_can_merge(last, cur)) {
1699 merge_rbio(last, cur);
1700 __free_raid_bio(cur);
1701 continue;
1702
1703 }
1704 __raid56_parity_write(last);
1705 }
1706 last = cur;
1707 }
1708 if (last) {
1709 __raid56_parity_write(last);
1710 }
1711 kfree(plug);
1712}
1713
1714/*
1715 * if the unplug comes from schedule, we have to push the
1716 * work off to a helper thread
1717 */
1718static void unplug_work(struct btrfs_work *work)
1719{
1720 struct btrfs_plug_cb *plug;
1721 plug = container_of(work, struct btrfs_plug_cb, work);
1722 run_plug(plug);
1723}
1724
1725static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1726{
1727 struct btrfs_plug_cb *plug;
1728 plug = container_of(cb, struct btrfs_plug_cb, cb);
1729
1730 if (from_schedule) {
1731 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1732 unplug_work, NULL, NULL);
1733 btrfs_queue_work(plug->info->rmw_workers,
1734 &plug->work);
1735 return;
1736 }
1737 run_plug(plug);
1738}
1739
1740/*
1741 * our main entry point for writes from the rest of the FS.
1742 */
1743int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1744 struct btrfs_bio *bbio, u64 stripe_len)
1745{
1746 struct btrfs_raid_bio *rbio;
1747 struct btrfs_plug_cb *plug = NULL;
1748 struct blk_plug_cb *cb;
1749 int ret;
1750
1751 rbio = alloc_rbio(root, bbio, stripe_len);
1752 if (IS_ERR(rbio)) {
1753 btrfs_put_bbio(bbio);
1754 return PTR_ERR(rbio);
1755 }
1756 bio_list_add(&rbio->bio_list, bio);
1757 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1758 rbio->operation = BTRFS_RBIO_WRITE;
1759
1760 btrfs_bio_counter_inc_noblocked(root->fs_info);
1761 rbio->generic_bio_cnt = 1;
1762
1763 /*
1764 * don't plug on full rbios, just get them out the door
1765 * as quickly as we can
1766 */
1767 if (rbio_is_full(rbio)) {
1768 ret = full_stripe_write(rbio);
1769 if (ret)
1770 btrfs_bio_counter_dec(root->fs_info);
1771 return ret;
1772 }
1773
1774 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1775 sizeof(*plug));
1776 if (cb) {
1777 plug = container_of(cb, struct btrfs_plug_cb, cb);
1778 if (!plug->info) {
1779 plug->info = root->fs_info;
1780 INIT_LIST_HEAD(&plug->rbio_list);
1781 }
1782 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1783 ret = 0;
1784 } else {
1785 ret = __raid56_parity_write(rbio);
1786 if (ret)
1787 btrfs_bio_counter_dec(root->fs_info);
1788 }
1789 return ret;
1790}
1791
1792/*
1793 * all parity reconstruction happens here. We've read in everything
1794 * we can find from the drives and this does the heavy lifting of
1795 * sorting the good from the bad.
1796 */
1797static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1798{
1799 int pagenr, stripe;
1800 void **pointers;
1801 int faila = -1, failb = -1;
1802 struct page *page;
1803 int err;
1804 int i;
1805
1806 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1807 if (!pointers) {
1808 err = -ENOMEM;
1809 goto cleanup_io;
1810 }
1811
1812 faila = rbio->faila;
1813 failb = rbio->failb;
1814
1815 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1816 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1817 spin_lock_irq(&rbio->bio_list_lock);
1818 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1819 spin_unlock_irq(&rbio->bio_list_lock);
1820 }
1821
1822 index_rbio_pages(rbio);
1823
1824 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1825 /*
1826 * Now we just use bitmap to mark the horizontal stripes in
1827 * which we have data when doing parity scrub.
1828 */
1829 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1830 !test_bit(pagenr, rbio->dbitmap))
1831 continue;
1832
1833 /* setup our array of pointers with pages
1834 * from each stripe
1835 */
1836 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1837 /*
1838 * if we're rebuilding a read, we have to use
1839 * pages from the bio list
1840 */
1841 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1842 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1843 (stripe == faila || stripe == failb)) {
1844 page = page_in_rbio(rbio, stripe, pagenr, 0);
1845 } else {
1846 page = rbio_stripe_page(rbio, stripe, pagenr);
1847 }
1848 pointers[stripe] = kmap(page);
1849 }
1850
1851 /* all raid6 handling here */
1852 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1853 /*
1854 * single failure, rebuild from parity raid5
1855 * style
1856 */
1857 if (failb < 0) {
1858 if (faila == rbio->nr_data) {
1859 /*
1860 * Just the P stripe has failed, without
1861 * a bad data or Q stripe.
1862 * TODO, we should redo the xor here.
1863 */
1864 err = -EIO;
1865 goto cleanup;
1866 }
1867 /*
1868 * a single failure in raid6 is rebuilt
1869 * in the pstripe code below
1870 */
1871 goto pstripe;
1872 }
1873
1874 /* make sure our ps and qs are in order */
1875 if (faila > failb) {
1876 int tmp = failb;
1877 failb = faila;
1878 faila = tmp;
1879 }
1880
1881 /* if the q stripe is failed, do a pstripe reconstruction
1882 * from the xors.
1883 * If both the q stripe and the P stripe are failed, we're
1884 * here due to a crc mismatch and we can't give them the
1885 * data they want
1886 */
1887 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1888 if (rbio->bbio->raid_map[faila] ==
1889 RAID5_P_STRIPE) {
1890 err = -EIO;
1891 goto cleanup;
1892 }
1893 /*
1894 * otherwise we have one bad data stripe and
1895 * a good P stripe. raid5!
1896 */
1897 goto pstripe;
1898 }
1899
1900 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1901 raid6_datap_recov(rbio->real_stripes,
1902 PAGE_SIZE, faila, pointers);
1903 } else {
1904 raid6_2data_recov(rbio->real_stripes,
1905 PAGE_SIZE, faila, failb,
1906 pointers);
1907 }
1908 } else {
1909 void *p;
1910
1911 /* rebuild from P stripe here (raid5 or raid6) */
1912 BUG_ON(failb != -1);
1913pstripe:
1914 /* Copy parity block into failed block to start with */
1915 memcpy(pointers[faila],
1916 pointers[rbio->nr_data],
1917 PAGE_SIZE);
1918
1919 /* rearrange the pointer array */
1920 p = pointers[faila];
1921 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1922 pointers[stripe] = pointers[stripe + 1];
1923 pointers[rbio->nr_data - 1] = p;
1924
1925 /* xor in the rest */
1926 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1927 }
1928 /* if we're doing this rebuild as part of an rmw, go through
1929 * and set all of our private rbio pages in the
1930 * failed stripes as uptodate. This way finish_rmw will
1931 * know they can be trusted. If this was a read reconstruction,
1932 * other endio functions will fiddle the uptodate bits
1933 */
1934 if (rbio->operation == BTRFS_RBIO_WRITE) {
1935 for (i = 0; i < rbio->stripe_npages; i++) {
1936 if (faila != -1) {
1937 page = rbio_stripe_page(rbio, faila, i);
1938 SetPageUptodate(page);
1939 }
1940 if (failb != -1) {
1941 page = rbio_stripe_page(rbio, failb, i);
1942 SetPageUptodate(page);
1943 }
1944 }
1945 }
1946 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1947 /*
1948 * if we're rebuilding a read, we have to use
1949 * pages from the bio list
1950 */
1951 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1952 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1953 (stripe == faila || stripe == failb)) {
1954 page = page_in_rbio(rbio, stripe, pagenr, 0);
1955 } else {
1956 page = rbio_stripe_page(rbio, stripe, pagenr);
1957 }
1958 kunmap(page);
1959 }
1960 }
1961
1962 err = 0;
1963cleanup:
1964 kfree(pointers);
1965
1966cleanup_io:
1967 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1968 if (err == 0)
1969 cache_rbio_pages(rbio);
1970 else
1971 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1972
1973 rbio_orig_end_io(rbio, err);
1974 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1975 rbio_orig_end_io(rbio, err);
1976 } else if (err == 0) {
1977 rbio->faila = -1;
1978 rbio->failb = -1;
1979
1980 if (rbio->operation == BTRFS_RBIO_WRITE)
1981 finish_rmw(rbio);
1982 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1983 finish_parity_scrub(rbio, 0);
1984 else
1985 BUG();
1986 } else {
1987 rbio_orig_end_io(rbio, err);
1988 }
1989}
1990
1991/*
1992 * This is called only for stripes we've read from disk to
1993 * reconstruct the parity.
1994 */
1995static void raid_recover_end_io(struct bio *bio)
1996{
1997 struct btrfs_raid_bio *rbio = bio->bi_private;
1998
1999 /*
2000 * we only read stripe pages off the disk, set them
2001 * up to date if there were no errors
2002 */
2003 if (bio->bi_error)
2004 fail_bio_stripe(rbio, bio);
2005 else
2006 set_bio_pages_uptodate(bio);
2007 bio_put(bio);
2008
2009 if (!atomic_dec_and_test(&rbio->stripes_pending))
2010 return;
2011
2012 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2013 rbio_orig_end_io(rbio, -EIO);
2014 else
2015 __raid_recover_end_io(rbio);
2016}
2017
2018/*
2019 * reads everything we need off the disk to reconstruct
2020 * the parity. endio handlers trigger final reconstruction
2021 * when the IO is done.
2022 *
2023 * This is used both for reads from the higher layers and for
2024 * parity construction required to finish a rmw cycle.
2025 */
2026static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2027{
2028 int bios_to_read = 0;
2029 struct bio_list bio_list;
2030 int ret;
2031 int pagenr;
2032 int stripe;
2033 struct bio *bio;
2034
2035 bio_list_init(&bio_list);
2036
2037 ret = alloc_rbio_pages(rbio);
2038 if (ret)
2039 goto cleanup;
2040
2041 atomic_set(&rbio->error, 0);
2042
2043 /*
2044 * read everything that hasn't failed. Thanks to the
2045 * stripe cache, it is possible that some or all of these
2046 * pages are going to be uptodate.
2047 */
2048 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2049 if (rbio->faila == stripe || rbio->failb == stripe) {
2050 atomic_inc(&rbio->error);
2051 continue;
2052 }
2053
2054 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2055 struct page *p;
2056
2057 /*
2058 * the rmw code may have already read this
2059 * page in
2060 */
2061 p = rbio_stripe_page(rbio, stripe, pagenr);
2062 if (PageUptodate(p))
2063 continue;
2064
2065 ret = rbio_add_io_page(rbio, &bio_list,
2066 rbio_stripe_page(rbio, stripe, pagenr),
2067 stripe, pagenr, rbio->stripe_len);
2068 if (ret < 0)
2069 goto cleanup;
2070 }
2071 }
2072
2073 bios_to_read = bio_list_size(&bio_list);
2074 if (!bios_to_read) {
2075 /*
2076 * we might have no bios to read just because the pages
2077 * were up to date, or we might have no bios to read because
2078 * the devices were gone.
2079 */
2080 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2081 __raid_recover_end_io(rbio);
2082 goto out;
2083 } else {
2084 goto cleanup;
2085 }
2086 }
2087
2088 /*
2089 * the bbio may be freed once we submit the last bio. Make sure
2090 * not to touch it after that
2091 */
2092 atomic_set(&rbio->stripes_pending, bios_to_read);
2093 while (1) {
2094 bio = bio_list_pop(&bio_list);
2095 if (!bio)
2096 break;
2097
2098 bio->bi_private = rbio;
2099 bio->bi_end_io = raid_recover_end_io;
2100
2101 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2102 BTRFS_WQ_ENDIO_RAID56);
2103
2104 submit_bio(READ, bio);
2105 }
2106out:
2107 return 0;
2108
2109cleanup:
2110 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2111 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2112 rbio_orig_end_io(rbio, -EIO);
2113 return -EIO;
2114}
2115
2116/*
2117 * the main entry point for reads from the higher layers. This
2118 * is really only called when the normal read path had a failure,
2119 * so we assume the bio they send down corresponds to a failed part
2120 * of the drive.
2121 */
2122int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2123 struct btrfs_bio *bbio, u64 stripe_len,
2124 int mirror_num, int generic_io)
2125{
2126 struct btrfs_raid_bio *rbio;
2127 int ret;
2128
2129 rbio = alloc_rbio(root, bbio, stripe_len);
2130 if (IS_ERR(rbio)) {
2131 if (generic_io)
2132 btrfs_put_bbio(bbio);
2133 return PTR_ERR(rbio);
2134 }
2135
2136 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2137 bio_list_add(&rbio->bio_list, bio);
2138 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2139
2140 rbio->faila = find_logical_bio_stripe(rbio, bio);
2141 if (rbio->faila == -1) {
2142 BUG();
2143 if (generic_io)
2144 btrfs_put_bbio(bbio);
2145 kfree(rbio);
2146 return -EIO;
2147 }
2148
2149 if (generic_io) {
2150 btrfs_bio_counter_inc_noblocked(root->fs_info);
2151 rbio->generic_bio_cnt = 1;
2152 } else {
2153 btrfs_get_bbio(bbio);
2154 }
2155
2156 /*
2157 * reconstruct from the q stripe if they are
2158 * asking for mirror 3
2159 */
2160 if (mirror_num == 3)
2161 rbio->failb = rbio->real_stripes - 2;
2162
2163 ret = lock_stripe_add(rbio);
2164
2165 /*
2166 * __raid56_parity_recover will end the bio with
2167 * any errors it hits. We don't want to return
2168 * its error value up the stack because our caller
2169 * will end up calling bio_endio with any nonzero
2170 * return
2171 */
2172 if (ret == 0)
2173 __raid56_parity_recover(rbio);
2174 /*
2175 * our rbio has been added to the list of
2176 * rbios that will be handled after the
2177 * currently lock owner is done
2178 */
2179 return 0;
2180
2181}
2182
2183static void rmw_work(struct btrfs_work *work)
2184{
2185 struct btrfs_raid_bio *rbio;
2186
2187 rbio = container_of(work, struct btrfs_raid_bio, work);
2188 raid56_rmw_stripe(rbio);
2189}
2190
2191static void read_rebuild_work(struct btrfs_work *work)
2192{
2193 struct btrfs_raid_bio *rbio;
2194
2195 rbio = container_of(work, struct btrfs_raid_bio, work);
2196 __raid56_parity_recover(rbio);
2197}
2198
2199/*
2200 * The following code is used to scrub/replace the parity stripe
2201 *
2202 * Note: We need make sure all the pages that add into the scrub/replace
2203 * raid bio are correct and not be changed during the scrub/replace. That
2204 * is those pages just hold metadata or file data with checksum.
2205 */
2206
2207struct btrfs_raid_bio *
2208raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2209 struct btrfs_bio *bbio, u64 stripe_len,
2210 struct btrfs_device *scrub_dev,
2211 unsigned long *dbitmap, int stripe_nsectors)
2212{
2213 struct btrfs_raid_bio *rbio;
2214 int i;
2215
2216 rbio = alloc_rbio(root, bbio, stripe_len);
2217 if (IS_ERR(rbio))
2218 return NULL;
2219 bio_list_add(&rbio->bio_list, bio);
2220 /*
2221 * This is a special bio which is used to hold the completion handler
2222 * and make the scrub rbio is similar to the other types
2223 */
2224 ASSERT(!bio->bi_iter.bi_size);
2225 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2226
2227 for (i = 0; i < rbio->real_stripes; i++) {
2228 if (bbio->stripes[i].dev == scrub_dev) {
2229 rbio->scrubp = i;
2230 break;
2231 }
2232 }
2233
2234 /* Now we just support the sectorsize equals to page size */
2235 ASSERT(root->sectorsize == PAGE_SIZE);
2236 ASSERT(rbio->stripe_npages == stripe_nsectors);
2237 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2238
2239 return rbio;
2240}
2241
2242/* Used for both parity scrub and missing. */
2243void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2244 u64 logical)
2245{
2246 int stripe_offset;
2247 int index;
2248
2249 ASSERT(logical >= rbio->bbio->raid_map[0]);
2250 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2251 rbio->stripe_len * rbio->nr_data);
2252 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2253 index = stripe_offset >> PAGE_SHIFT;
2254 rbio->bio_pages[index] = page;
2255}
2256
2257/*
2258 * We just scrub the parity that we have correct data on the same horizontal,
2259 * so we needn't allocate all pages for all the stripes.
2260 */
2261static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2262{
2263 int i;
2264 int bit;
2265 int index;
2266 struct page *page;
2267
2268 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2269 for (i = 0; i < rbio->real_stripes; i++) {
2270 index = i * rbio->stripe_npages + bit;
2271 if (rbio->stripe_pages[index])
2272 continue;
2273
2274 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2275 if (!page)
2276 return -ENOMEM;
2277 rbio->stripe_pages[index] = page;
2278 }
2279 }
2280 return 0;
2281}
2282
2283static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2284 int need_check)
2285{
2286 struct btrfs_bio *bbio = rbio->bbio;
2287 void *pointers[rbio->real_stripes];
2288 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2289 int nr_data = rbio->nr_data;
2290 int stripe;
2291 int pagenr;
2292 int p_stripe = -1;
2293 int q_stripe = -1;
2294 struct page *p_page = NULL;
2295 struct page *q_page = NULL;
2296 struct bio_list bio_list;
2297 struct bio *bio;
2298 int is_replace = 0;
2299 int ret;
2300
2301 bio_list_init(&bio_list);
2302
2303 if (rbio->real_stripes - rbio->nr_data == 1) {
2304 p_stripe = rbio->real_stripes - 1;
2305 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2306 p_stripe = rbio->real_stripes - 2;
2307 q_stripe = rbio->real_stripes - 1;
2308 } else {
2309 BUG();
2310 }
2311
2312 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2313 is_replace = 1;
2314 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2315 }
2316
2317 /*
2318 * Because the higher layers(scrubber) are unlikely to
2319 * use this area of the disk again soon, so don't cache
2320 * it.
2321 */
2322 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2323
2324 if (!need_check)
2325 goto writeback;
2326
2327 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2328 if (!p_page)
2329 goto cleanup;
2330 SetPageUptodate(p_page);
2331
2332 if (q_stripe != -1) {
2333 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2334 if (!q_page) {
2335 __free_page(p_page);
2336 goto cleanup;
2337 }
2338 SetPageUptodate(q_page);
2339 }
2340
2341 atomic_set(&rbio->error, 0);
2342
2343 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2344 struct page *p;
2345 void *parity;
2346 /* first collect one page from each data stripe */
2347 for (stripe = 0; stripe < nr_data; stripe++) {
2348 p = page_in_rbio(rbio, stripe, pagenr, 0);
2349 pointers[stripe] = kmap(p);
2350 }
2351
2352 /* then add the parity stripe */
2353 pointers[stripe++] = kmap(p_page);
2354
2355 if (q_stripe != -1) {
2356
2357 /*
2358 * raid6, add the qstripe and call the
2359 * library function to fill in our p/q
2360 */
2361 pointers[stripe++] = kmap(q_page);
2362
2363 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2364 pointers);
2365 } else {
2366 /* raid5 */
2367 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2368 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2369 }
2370
2371 /* Check scrubbing pairty and repair it */
2372 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2373 parity = kmap(p);
2374 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2375 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2376 else
2377 /* Parity is right, needn't writeback */
2378 bitmap_clear(rbio->dbitmap, pagenr, 1);
2379 kunmap(p);
2380
2381 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2382 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2383 }
2384
2385 __free_page(p_page);
2386 if (q_page)
2387 __free_page(q_page);
2388
2389writeback:
2390 /*
2391 * time to start writing. Make bios for everything from the
2392 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2393 * everything else.
2394 */
2395 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2396 struct page *page;
2397
2398 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2399 ret = rbio_add_io_page(rbio, &bio_list,
2400 page, rbio->scrubp, pagenr, rbio->stripe_len);
2401 if (ret)
2402 goto cleanup;
2403 }
2404
2405 if (!is_replace)
2406 goto submit_write;
2407
2408 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2409 struct page *page;
2410
2411 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2412 ret = rbio_add_io_page(rbio, &bio_list, page,
2413 bbio->tgtdev_map[rbio->scrubp],
2414 pagenr, rbio->stripe_len);
2415 if (ret)
2416 goto cleanup;
2417 }
2418
2419submit_write:
2420 nr_data = bio_list_size(&bio_list);
2421 if (!nr_data) {
2422 /* Every parity is right */
2423 rbio_orig_end_io(rbio, 0);
2424 return;
2425 }
2426
2427 atomic_set(&rbio->stripes_pending, nr_data);
2428
2429 while (1) {
2430 bio = bio_list_pop(&bio_list);
2431 if (!bio)
2432 break;
2433
2434 bio->bi_private = rbio;
2435 bio->bi_end_io = raid_write_end_io;
2436 submit_bio(WRITE, bio);
2437 }
2438 return;
2439
2440cleanup:
2441 rbio_orig_end_io(rbio, -EIO);
2442}
2443
2444static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2445{
2446 if (stripe >= 0 && stripe < rbio->nr_data)
2447 return 1;
2448 return 0;
2449}
2450
2451/*
2452 * While we're doing the parity check and repair, we could have errors
2453 * in reading pages off the disk. This checks for errors and if we're
2454 * not able to read the page it'll trigger parity reconstruction. The
2455 * parity scrub will be finished after we've reconstructed the failed
2456 * stripes
2457 */
2458static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2459{
2460 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2461 goto cleanup;
2462
2463 if (rbio->faila >= 0 || rbio->failb >= 0) {
2464 int dfail = 0, failp = -1;
2465
2466 if (is_data_stripe(rbio, rbio->faila))
2467 dfail++;
2468 else if (is_parity_stripe(rbio->faila))
2469 failp = rbio->faila;
2470
2471 if (is_data_stripe(rbio, rbio->failb))
2472 dfail++;
2473 else if (is_parity_stripe(rbio->failb))
2474 failp = rbio->failb;
2475
2476 /*
2477 * Because we can not use a scrubbing parity to repair
2478 * the data, so the capability of the repair is declined.
2479 * (In the case of RAID5, we can not repair anything)
2480 */
2481 if (dfail > rbio->bbio->max_errors - 1)
2482 goto cleanup;
2483
2484 /*
2485 * If all data is good, only parity is correctly, just
2486 * repair the parity.
2487 */
2488 if (dfail == 0) {
2489 finish_parity_scrub(rbio, 0);
2490 return;
2491 }
2492
2493 /*
2494 * Here means we got one corrupted data stripe and one
2495 * corrupted parity on RAID6, if the corrupted parity
2496 * is scrubbing parity, luckly, use the other one to repair
2497 * the data, or we can not repair the data stripe.
2498 */
2499 if (failp != rbio->scrubp)
2500 goto cleanup;
2501
2502 __raid_recover_end_io(rbio);
2503 } else {
2504 finish_parity_scrub(rbio, 1);
2505 }
2506 return;
2507
2508cleanup:
2509 rbio_orig_end_io(rbio, -EIO);
2510}
2511
2512/*
2513 * end io for the read phase of the rmw cycle. All the bios here are physical
2514 * stripe bios we've read from the disk so we can recalculate the parity of the
2515 * stripe.
2516 *
2517 * This will usually kick off finish_rmw once all the bios are read in, but it
2518 * may trigger parity reconstruction if we had any errors along the way
2519 */
2520static void raid56_parity_scrub_end_io(struct bio *bio)
2521{
2522 struct btrfs_raid_bio *rbio = bio->bi_private;
2523
2524 if (bio->bi_error)
2525 fail_bio_stripe(rbio, bio);
2526 else
2527 set_bio_pages_uptodate(bio);
2528
2529 bio_put(bio);
2530
2531 if (!atomic_dec_and_test(&rbio->stripes_pending))
2532 return;
2533
2534 /*
2535 * this will normally call finish_rmw to start our write
2536 * but if there are any failed stripes we'll reconstruct
2537 * from parity first
2538 */
2539 validate_rbio_for_parity_scrub(rbio);
2540}
2541
2542static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2543{
2544 int bios_to_read = 0;
2545 struct bio_list bio_list;
2546 int ret;
2547 int pagenr;
2548 int stripe;
2549 struct bio *bio;
2550
2551 ret = alloc_rbio_essential_pages(rbio);
2552 if (ret)
2553 goto cleanup;
2554
2555 bio_list_init(&bio_list);
2556
2557 atomic_set(&rbio->error, 0);
2558 /*
2559 * build a list of bios to read all the missing parts of this
2560 * stripe
2561 */
2562 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2563 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2564 struct page *page;
2565 /*
2566 * we want to find all the pages missing from
2567 * the rbio and read them from the disk. If
2568 * page_in_rbio finds a page in the bio list
2569 * we don't need to read it off the stripe.
2570 */
2571 page = page_in_rbio(rbio, stripe, pagenr, 1);
2572 if (page)
2573 continue;
2574
2575 page = rbio_stripe_page(rbio, stripe, pagenr);
2576 /*
2577 * the bio cache may have handed us an uptodate
2578 * page. If so, be happy and use it
2579 */
2580 if (PageUptodate(page))
2581 continue;
2582
2583 ret = rbio_add_io_page(rbio, &bio_list, page,
2584 stripe, pagenr, rbio->stripe_len);
2585 if (ret)
2586 goto cleanup;
2587 }
2588 }
2589
2590 bios_to_read = bio_list_size(&bio_list);
2591 if (!bios_to_read) {
2592 /*
2593 * this can happen if others have merged with
2594 * us, it means there is nothing left to read.
2595 * But if there are missing devices it may not be
2596 * safe to do the full stripe write yet.
2597 */
2598 goto finish;
2599 }
2600
2601 /*
2602 * the bbio may be freed once we submit the last bio. Make sure
2603 * not to touch it after that
2604 */
2605 atomic_set(&rbio->stripes_pending, bios_to_read);
2606 while (1) {
2607 bio = bio_list_pop(&bio_list);
2608 if (!bio)
2609 break;
2610
2611 bio->bi_private = rbio;
2612 bio->bi_end_io = raid56_parity_scrub_end_io;
2613
2614 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2615 BTRFS_WQ_ENDIO_RAID56);
2616
2617 submit_bio(READ, bio);
2618 }
2619 /* the actual write will happen once the reads are done */
2620 return;
2621
2622cleanup:
2623 rbio_orig_end_io(rbio, -EIO);
2624 return;
2625
2626finish:
2627 validate_rbio_for_parity_scrub(rbio);
2628}
2629
2630static void scrub_parity_work(struct btrfs_work *work)
2631{
2632 struct btrfs_raid_bio *rbio;
2633
2634 rbio = container_of(work, struct btrfs_raid_bio, work);
2635 raid56_parity_scrub_stripe(rbio);
2636}
2637
2638static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2639{
2640 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2641 scrub_parity_work, NULL, NULL);
2642
2643 btrfs_queue_work(rbio->fs_info->rmw_workers,
2644 &rbio->work);
2645}
2646
2647void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2648{
2649 if (!lock_stripe_add(rbio))
2650 async_scrub_parity(rbio);
2651}
2652
2653/* The following code is used for dev replace of a missing RAID 5/6 device. */
2654
2655struct btrfs_raid_bio *
2656raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
2657 struct btrfs_bio *bbio, u64 length)
2658{
2659 struct btrfs_raid_bio *rbio;
2660
2661 rbio = alloc_rbio(root, bbio, length);
2662 if (IS_ERR(rbio))
2663 return NULL;
2664
2665 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2666 bio_list_add(&rbio->bio_list, bio);
2667 /*
2668 * This is a special bio which is used to hold the completion handler
2669 * and make the scrub rbio is similar to the other types
2670 */
2671 ASSERT(!bio->bi_iter.bi_size);
2672
2673 rbio->faila = find_logical_bio_stripe(rbio, bio);
2674 if (rbio->faila == -1) {
2675 BUG();
2676 kfree(rbio);
2677 return NULL;
2678 }
2679
2680 return rbio;
2681}
2682
2683static void missing_raid56_work(struct btrfs_work *work)
2684{
2685 struct btrfs_raid_bio *rbio;
2686
2687 rbio = container_of(work, struct btrfs_raid_bio, work);
2688 __raid56_parity_recover(rbio);
2689}
2690
2691static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2692{
2693 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2694 missing_raid56_work, NULL, NULL);
2695
2696 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2697}
2698
2699void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2700{
2701 if (!lock_stripe_add(rbio))
2702 async_missing_raid56(rbio);
2703}