Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * raid1.c : Multiple Devices driver for Linux
4 *
5 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
6 *
7 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
8 *
9 * RAID-1 management functions.
10 *
11 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
12 *
13 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
14 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
15 *
16 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
17 * bitmapped intelligence in resync:
18 *
19 * - bitmap marked during normal i/o
20 * - bitmap used to skip nondirty blocks during sync
21 *
22 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
23 * - persistent bitmap code
24 */
25
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/blkdev.h>
29#include <linux/module.h>
30#include <linux/seq_file.h>
31#include <linux/ratelimit.h>
32
33#include <trace/events/block.h>
34
35#include "md.h"
36#include "raid1.h"
37#include "md-bitmap.h"
38
39#define UNSUPPORTED_MDDEV_FLAGS \
40 ((1L << MD_HAS_JOURNAL) | \
41 (1L << MD_JOURNAL_CLEAN) | \
42 (1L << MD_HAS_PPL) | \
43 (1L << MD_HAS_MULTIPLE_PPLS))
44
45static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
46static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
47
48#define raid1_log(md, fmt, args...) \
49 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
50
51#include "raid1-10.c"
52
53static int check_and_add_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
54{
55 struct wb_info *wi, *temp_wi;
56 unsigned long flags;
57 int ret = 0;
58 struct mddev *mddev = rdev->mddev;
59
60 wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO);
61
62 spin_lock_irqsave(&rdev->wb_list_lock, flags);
63 list_for_each_entry(temp_wi, &rdev->wb_list, list) {
64 /* collision happened */
65 if (hi > temp_wi->lo && lo < temp_wi->hi) {
66 ret = -EBUSY;
67 break;
68 }
69 }
70
71 if (!ret) {
72 wi->lo = lo;
73 wi->hi = hi;
74 list_add(&wi->list, &rdev->wb_list);
75 } else
76 mempool_free(wi, mddev->wb_info_pool);
77 spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
78
79 return ret;
80}
81
82static void remove_wb(struct md_rdev *rdev, sector_t lo, sector_t hi)
83{
84 struct wb_info *wi;
85 unsigned long flags;
86 int found = 0;
87 struct mddev *mddev = rdev->mddev;
88
89 spin_lock_irqsave(&rdev->wb_list_lock, flags);
90 list_for_each_entry(wi, &rdev->wb_list, list)
91 if (hi == wi->hi && lo == wi->lo) {
92 list_del(&wi->list);
93 mempool_free(wi, mddev->wb_info_pool);
94 found = 1;
95 break;
96 }
97
98 if (!found)
99 WARN(1, "The write behind IO is not recorded\n");
100 spin_unlock_irqrestore(&rdev->wb_list_lock, flags);
101 wake_up(&rdev->wb_io_wait);
102}
103
104/*
105 * for resync bio, r1bio pointer can be retrieved from the per-bio
106 * 'struct resync_pages'.
107 */
108static inline struct r1bio *get_resync_r1bio(struct bio *bio)
109{
110 return get_resync_pages(bio)->raid_bio;
111}
112
113static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
114{
115 struct pool_info *pi = data;
116 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
117
118 /* allocate a r1bio with room for raid_disks entries in the bios array */
119 return kzalloc(size, gfp_flags);
120}
121
122#define RESYNC_DEPTH 32
123#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
124#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
125#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
126#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
127#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
128
129static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
130{
131 struct pool_info *pi = data;
132 struct r1bio *r1_bio;
133 struct bio *bio;
134 int need_pages;
135 int j;
136 struct resync_pages *rps;
137
138 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
139 if (!r1_bio)
140 return NULL;
141
142 rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
143 gfp_flags);
144 if (!rps)
145 goto out_free_r1bio;
146
147 /*
148 * Allocate bios : 1 for reading, n-1 for writing
149 */
150 for (j = pi->raid_disks ; j-- ; ) {
151 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
152 if (!bio)
153 goto out_free_bio;
154 r1_bio->bios[j] = bio;
155 }
156 /*
157 * Allocate RESYNC_PAGES data pages and attach them to
158 * the first bio.
159 * If this is a user-requested check/repair, allocate
160 * RESYNC_PAGES for each bio.
161 */
162 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
163 need_pages = pi->raid_disks;
164 else
165 need_pages = 1;
166 for (j = 0; j < pi->raid_disks; j++) {
167 struct resync_pages *rp = &rps[j];
168
169 bio = r1_bio->bios[j];
170
171 if (j < need_pages) {
172 if (resync_alloc_pages(rp, gfp_flags))
173 goto out_free_pages;
174 } else {
175 memcpy(rp, &rps[0], sizeof(*rp));
176 resync_get_all_pages(rp);
177 }
178
179 rp->raid_bio = r1_bio;
180 bio->bi_private = rp;
181 }
182
183 r1_bio->master_bio = NULL;
184
185 return r1_bio;
186
187out_free_pages:
188 while (--j >= 0)
189 resync_free_pages(&rps[j]);
190
191out_free_bio:
192 while (++j < pi->raid_disks)
193 bio_put(r1_bio->bios[j]);
194 kfree(rps);
195
196out_free_r1bio:
197 rbio_pool_free(r1_bio, data);
198 return NULL;
199}
200
201static void r1buf_pool_free(void *__r1_bio, void *data)
202{
203 struct pool_info *pi = data;
204 int i;
205 struct r1bio *r1bio = __r1_bio;
206 struct resync_pages *rp = NULL;
207
208 for (i = pi->raid_disks; i--; ) {
209 rp = get_resync_pages(r1bio->bios[i]);
210 resync_free_pages(rp);
211 bio_put(r1bio->bios[i]);
212 }
213
214 /* resync pages array stored in the 1st bio's .bi_private */
215 kfree(rp);
216
217 rbio_pool_free(r1bio, data);
218}
219
220static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
221{
222 int i;
223
224 for (i = 0; i < conf->raid_disks * 2; i++) {
225 struct bio **bio = r1_bio->bios + i;
226 if (!BIO_SPECIAL(*bio))
227 bio_put(*bio);
228 *bio = NULL;
229 }
230}
231
232static void free_r1bio(struct r1bio *r1_bio)
233{
234 struct r1conf *conf = r1_bio->mddev->private;
235
236 put_all_bios(conf, r1_bio);
237 mempool_free(r1_bio, &conf->r1bio_pool);
238}
239
240static void put_buf(struct r1bio *r1_bio)
241{
242 struct r1conf *conf = r1_bio->mddev->private;
243 sector_t sect = r1_bio->sector;
244 int i;
245
246 for (i = 0; i < conf->raid_disks * 2; i++) {
247 struct bio *bio = r1_bio->bios[i];
248 if (bio->bi_end_io)
249 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
250 }
251
252 mempool_free(r1_bio, &conf->r1buf_pool);
253
254 lower_barrier(conf, sect);
255}
256
257static void reschedule_retry(struct r1bio *r1_bio)
258{
259 unsigned long flags;
260 struct mddev *mddev = r1_bio->mddev;
261 struct r1conf *conf = mddev->private;
262 int idx;
263
264 idx = sector_to_idx(r1_bio->sector);
265 spin_lock_irqsave(&conf->device_lock, flags);
266 list_add(&r1_bio->retry_list, &conf->retry_list);
267 atomic_inc(&conf->nr_queued[idx]);
268 spin_unlock_irqrestore(&conf->device_lock, flags);
269
270 wake_up(&conf->wait_barrier);
271 md_wakeup_thread(mddev->thread);
272}
273
274/*
275 * raid_end_bio_io() is called when we have finished servicing a mirrored
276 * operation and are ready to return a success/failure code to the buffer
277 * cache layer.
278 */
279static void call_bio_endio(struct r1bio *r1_bio)
280{
281 struct bio *bio = r1_bio->master_bio;
282 struct r1conf *conf = r1_bio->mddev->private;
283
284 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
285 bio->bi_status = BLK_STS_IOERR;
286
287 bio_endio(bio);
288 /*
289 * Wake up any possible resync thread that waits for the device
290 * to go idle.
291 */
292 allow_barrier(conf, r1_bio->sector);
293}
294
295static void raid_end_bio_io(struct r1bio *r1_bio)
296{
297 struct bio *bio = r1_bio->master_bio;
298
299 /* if nobody has done the final endio yet, do it now */
300 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
301 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
302 (bio_data_dir(bio) == WRITE) ? "write" : "read",
303 (unsigned long long) bio->bi_iter.bi_sector,
304 (unsigned long long) bio_end_sector(bio) - 1);
305
306 call_bio_endio(r1_bio);
307 }
308 free_r1bio(r1_bio);
309}
310
311/*
312 * Update disk head position estimator based on IRQ completion info.
313 */
314static inline void update_head_pos(int disk, struct r1bio *r1_bio)
315{
316 struct r1conf *conf = r1_bio->mddev->private;
317
318 conf->mirrors[disk].head_position =
319 r1_bio->sector + (r1_bio->sectors);
320}
321
322/*
323 * Find the disk number which triggered given bio
324 */
325static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
326{
327 int mirror;
328 struct r1conf *conf = r1_bio->mddev->private;
329 int raid_disks = conf->raid_disks;
330
331 for (mirror = 0; mirror < raid_disks * 2; mirror++)
332 if (r1_bio->bios[mirror] == bio)
333 break;
334
335 BUG_ON(mirror == raid_disks * 2);
336 update_head_pos(mirror, r1_bio);
337
338 return mirror;
339}
340
341static void raid1_end_read_request(struct bio *bio)
342{
343 int uptodate = !bio->bi_status;
344 struct r1bio *r1_bio = bio->bi_private;
345 struct r1conf *conf = r1_bio->mddev->private;
346 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
347
348 /*
349 * this branch is our 'one mirror IO has finished' event handler:
350 */
351 update_head_pos(r1_bio->read_disk, r1_bio);
352
353 if (uptodate)
354 set_bit(R1BIO_Uptodate, &r1_bio->state);
355 else if (test_bit(FailFast, &rdev->flags) &&
356 test_bit(R1BIO_FailFast, &r1_bio->state))
357 /* This was a fail-fast read so we definitely
358 * want to retry */
359 ;
360 else {
361 /* If all other devices have failed, we want to return
362 * the error upwards rather than fail the last device.
363 * Here we redefine "uptodate" to mean "Don't want to retry"
364 */
365 unsigned long flags;
366 spin_lock_irqsave(&conf->device_lock, flags);
367 if (r1_bio->mddev->degraded == conf->raid_disks ||
368 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
369 test_bit(In_sync, &rdev->flags)))
370 uptodate = 1;
371 spin_unlock_irqrestore(&conf->device_lock, flags);
372 }
373
374 if (uptodate) {
375 raid_end_bio_io(r1_bio);
376 rdev_dec_pending(rdev, conf->mddev);
377 } else {
378 /*
379 * oops, read error:
380 */
381 char b[BDEVNAME_SIZE];
382 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
383 mdname(conf->mddev),
384 bdevname(rdev->bdev, b),
385 (unsigned long long)r1_bio->sector);
386 set_bit(R1BIO_ReadError, &r1_bio->state);
387 reschedule_retry(r1_bio);
388 /* don't drop the reference on read_disk yet */
389 }
390}
391
392static void close_write(struct r1bio *r1_bio)
393{
394 /* it really is the end of this request */
395 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
396 bio_free_pages(r1_bio->behind_master_bio);
397 bio_put(r1_bio->behind_master_bio);
398 r1_bio->behind_master_bio = NULL;
399 }
400 /* clear the bitmap if all writes complete successfully */
401 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
402 r1_bio->sectors,
403 !test_bit(R1BIO_Degraded, &r1_bio->state),
404 test_bit(R1BIO_BehindIO, &r1_bio->state));
405 md_write_end(r1_bio->mddev);
406}
407
408static void r1_bio_write_done(struct r1bio *r1_bio)
409{
410 if (!atomic_dec_and_test(&r1_bio->remaining))
411 return;
412
413 if (test_bit(R1BIO_WriteError, &r1_bio->state))
414 reschedule_retry(r1_bio);
415 else {
416 close_write(r1_bio);
417 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
418 reschedule_retry(r1_bio);
419 else
420 raid_end_bio_io(r1_bio);
421 }
422}
423
424static void raid1_end_write_request(struct bio *bio)
425{
426 struct r1bio *r1_bio = bio->bi_private;
427 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
428 struct r1conf *conf = r1_bio->mddev->private;
429 struct bio *to_put = NULL;
430 int mirror = find_bio_disk(r1_bio, bio);
431 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
432 bool discard_error;
433
434 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
435
436 /*
437 * 'one mirror IO has finished' event handler:
438 */
439 if (bio->bi_status && !discard_error) {
440 set_bit(WriteErrorSeen, &rdev->flags);
441 if (!test_and_set_bit(WantReplacement, &rdev->flags))
442 set_bit(MD_RECOVERY_NEEDED, &
443 conf->mddev->recovery);
444
445 if (test_bit(FailFast, &rdev->flags) &&
446 (bio->bi_opf & MD_FAILFAST) &&
447 /* We never try FailFast to WriteMostly devices */
448 !test_bit(WriteMostly, &rdev->flags)) {
449 md_error(r1_bio->mddev, rdev);
450 }
451
452 /*
453 * When the device is faulty, it is not necessary to
454 * handle write error.
455 * For failfast, this is the only remaining device,
456 * We need to retry the write without FailFast.
457 */
458 if (!test_bit(Faulty, &rdev->flags))
459 set_bit(R1BIO_WriteError, &r1_bio->state);
460 else {
461 /* Finished with this branch */
462 r1_bio->bios[mirror] = NULL;
463 to_put = bio;
464 }
465 } else {
466 /*
467 * Set R1BIO_Uptodate in our master bio, so that we
468 * will return a good error code for to the higher
469 * levels even if IO on some other mirrored buffer
470 * fails.
471 *
472 * The 'master' represents the composite IO operation
473 * to user-side. So if something waits for IO, then it
474 * will wait for the 'master' bio.
475 */
476 sector_t first_bad;
477 int bad_sectors;
478
479 r1_bio->bios[mirror] = NULL;
480 to_put = bio;
481 /*
482 * Do not set R1BIO_Uptodate if the current device is
483 * rebuilding or Faulty. This is because we cannot use
484 * such device for properly reading the data back (we could
485 * potentially use it, if the current write would have felt
486 * before rdev->recovery_offset, but for simplicity we don't
487 * check this here.
488 */
489 if (test_bit(In_sync, &rdev->flags) &&
490 !test_bit(Faulty, &rdev->flags))
491 set_bit(R1BIO_Uptodate, &r1_bio->state);
492
493 /* Maybe we can clear some bad blocks. */
494 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
495 &first_bad, &bad_sectors) && !discard_error) {
496 r1_bio->bios[mirror] = IO_MADE_GOOD;
497 set_bit(R1BIO_MadeGood, &r1_bio->state);
498 }
499 }
500
501 if (behind) {
502 if (test_bit(WBCollisionCheck, &rdev->flags)) {
503 sector_t lo = r1_bio->sector;
504 sector_t hi = r1_bio->sector + r1_bio->sectors;
505
506 remove_wb(rdev, lo, hi);
507 }
508 if (test_bit(WriteMostly, &rdev->flags))
509 atomic_dec(&r1_bio->behind_remaining);
510
511 /*
512 * In behind mode, we ACK the master bio once the I/O
513 * has safely reached all non-writemostly
514 * disks. Setting the Returned bit ensures that this
515 * gets done only once -- we don't ever want to return
516 * -EIO here, instead we'll wait
517 */
518 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
519 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
520 /* Maybe we can return now */
521 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
522 struct bio *mbio = r1_bio->master_bio;
523 pr_debug("raid1: behind end write sectors"
524 " %llu-%llu\n",
525 (unsigned long long) mbio->bi_iter.bi_sector,
526 (unsigned long long) bio_end_sector(mbio) - 1);
527 call_bio_endio(r1_bio);
528 }
529 }
530 }
531 if (r1_bio->bios[mirror] == NULL)
532 rdev_dec_pending(rdev, conf->mddev);
533
534 /*
535 * Let's see if all mirrored write operations have finished
536 * already.
537 */
538 r1_bio_write_done(r1_bio);
539
540 if (to_put)
541 bio_put(to_put);
542}
543
544static sector_t align_to_barrier_unit_end(sector_t start_sector,
545 sector_t sectors)
546{
547 sector_t len;
548
549 WARN_ON(sectors == 0);
550 /*
551 * len is the number of sectors from start_sector to end of the
552 * barrier unit which start_sector belongs to.
553 */
554 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
555 start_sector;
556
557 if (len > sectors)
558 len = sectors;
559
560 return len;
561}
562
563/*
564 * This routine returns the disk from which the requested read should
565 * be done. There is a per-array 'next expected sequential IO' sector
566 * number - if this matches on the next IO then we use the last disk.
567 * There is also a per-disk 'last know head position' sector that is
568 * maintained from IRQ contexts, both the normal and the resync IO
569 * completion handlers update this position correctly. If there is no
570 * perfect sequential match then we pick the disk whose head is closest.
571 *
572 * If there are 2 mirrors in the same 2 devices, performance degrades
573 * because position is mirror, not device based.
574 *
575 * The rdev for the device selected will have nr_pending incremented.
576 */
577static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
578{
579 const sector_t this_sector = r1_bio->sector;
580 int sectors;
581 int best_good_sectors;
582 int best_disk, best_dist_disk, best_pending_disk;
583 int has_nonrot_disk;
584 int disk;
585 sector_t best_dist;
586 unsigned int min_pending;
587 struct md_rdev *rdev;
588 int choose_first;
589 int choose_next_idle;
590
591 rcu_read_lock();
592 /*
593 * Check if we can balance. We can balance on the whole
594 * device if no resync is going on, or below the resync window.
595 * We take the first readable disk when above the resync window.
596 */
597 retry:
598 sectors = r1_bio->sectors;
599 best_disk = -1;
600 best_dist_disk = -1;
601 best_dist = MaxSector;
602 best_pending_disk = -1;
603 min_pending = UINT_MAX;
604 best_good_sectors = 0;
605 has_nonrot_disk = 0;
606 choose_next_idle = 0;
607 clear_bit(R1BIO_FailFast, &r1_bio->state);
608
609 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
610 (mddev_is_clustered(conf->mddev) &&
611 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
612 this_sector + sectors)))
613 choose_first = 1;
614 else
615 choose_first = 0;
616
617 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
618 sector_t dist;
619 sector_t first_bad;
620 int bad_sectors;
621 unsigned int pending;
622 bool nonrot;
623
624 rdev = rcu_dereference(conf->mirrors[disk].rdev);
625 if (r1_bio->bios[disk] == IO_BLOCKED
626 || rdev == NULL
627 || test_bit(Faulty, &rdev->flags))
628 continue;
629 if (!test_bit(In_sync, &rdev->flags) &&
630 rdev->recovery_offset < this_sector + sectors)
631 continue;
632 if (test_bit(WriteMostly, &rdev->flags)) {
633 /* Don't balance among write-mostly, just
634 * use the first as a last resort */
635 if (best_dist_disk < 0) {
636 if (is_badblock(rdev, this_sector, sectors,
637 &first_bad, &bad_sectors)) {
638 if (first_bad <= this_sector)
639 /* Cannot use this */
640 continue;
641 best_good_sectors = first_bad - this_sector;
642 } else
643 best_good_sectors = sectors;
644 best_dist_disk = disk;
645 best_pending_disk = disk;
646 }
647 continue;
648 }
649 /* This is a reasonable device to use. It might
650 * even be best.
651 */
652 if (is_badblock(rdev, this_sector, sectors,
653 &first_bad, &bad_sectors)) {
654 if (best_dist < MaxSector)
655 /* already have a better device */
656 continue;
657 if (first_bad <= this_sector) {
658 /* cannot read here. If this is the 'primary'
659 * device, then we must not read beyond
660 * bad_sectors from another device..
661 */
662 bad_sectors -= (this_sector - first_bad);
663 if (choose_first && sectors > bad_sectors)
664 sectors = bad_sectors;
665 if (best_good_sectors > sectors)
666 best_good_sectors = sectors;
667
668 } else {
669 sector_t good_sectors = first_bad - this_sector;
670 if (good_sectors > best_good_sectors) {
671 best_good_sectors = good_sectors;
672 best_disk = disk;
673 }
674 if (choose_first)
675 break;
676 }
677 continue;
678 } else {
679 if ((sectors > best_good_sectors) && (best_disk >= 0))
680 best_disk = -1;
681 best_good_sectors = sectors;
682 }
683
684 if (best_disk >= 0)
685 /* At least two disks to choose from so failfast is OK */
686 set_bit(R1BIO_FailFast, &r1_bio->state);
687
688 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
689 has_nonrot_disk |= nonrot;
690 pending = atomic_read(&rdev->nr_pending);
691 dist = abs(this_sector - conf->mirrors[disk].head_position);
692 if (choose_first) {
693 best_disk = disk;
694 break;
695 }
696 /* Don't change to another disk for sequential reads */
697 if (conf->mirrors[disk].next_seq_sect == this_sector
698 || dist == 0) {
699 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
700 struct raid1_info *mirror = &conf->mirrors[disk];
701
702 best_disk = disk;
703 /*
704 * If buffered sequential IO size exceeds optimal
705 * iosize, check if there is idle disk. If yes, choose
706 * the idle disk. read_balance could already choose an
707 * idle disk before noticing it's a sequential IO in
708 * this disk. This doesn't matter because this disk
709 * will idle, next time it will be utilized after the
710 * first disk has IO size exceeds optimal iosize. In
711 * this way, iosize of the first disk will be optimal
712 * iosize at least. iosize of the second disk might be
713 * small, but not a big deal since when the second disk
714 * starts IO, the first disk is likely still busy.
715 */
716 if (nonrot && opt_iosize > 0 &&
717 mirror->seq_start != MaxSector &&
718 mirror->next_seq_sect > opt_iosize &&
719 mirror->next_seq_sect - opt_iosize >=
720 mirror->seq_start) {
721 choose_next_idle = 1;
722 continue;
723 }
724 break;
725 }
726
727 if (choose_next_idle)
728 continue;
729
730 if (min_pending > pending) {
731 min_pending = pending;
732 best_pending_disk = disk;
733 }
734
735 if (dist < best_dist) {
736 best_dist = dist;
737 best_dist_disk = disk;
738 }
739 }
740
741 /*
742 * If all disks are rotational, choose the closest disk. If any disk is
743 * non-rotational, choose the disk with less pending request even the
744 * disk is rotational, which might/might not be optimal for raids with
745 * mixed ratation/non-rotational disks depending on workload.
746 */
747 if (best_disk == -1) {
748 if (has_nonrot_disk || min_pending == 0)
749 best_disk = best_pending_disk;
750 else
751 best_disk = best_dist_disk;
752 }
753
754 if (best_disk >= 0) {
755 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
756 if (!rdev)
757 goto retry;
758 atomic_inc(&rdev->nr_pending);
759 sectors = best_good_sectors;
760
761 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
762 conf->mirrors[best_disk].seq_start = this_sector;
763
764 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
765 }
766 rcu_read_unlock();
767 *max_sectors = sectors;
768
769 return best_disk;
770}
771
772static int raid1_congested(struct mddev *mddev, int bits)
773{
774 struct r1conf *conf = mddev->private;
775 int i, ret = 0;
776
777 if ((bits & (1 << WB_async_congested)) &&
778 conf->pending_count >= max_queued_requests)
779 return 1;
780
781 rcu_read_lock();
782 for (i = 0; i < conf->raid_disks * 2; i++) {
783 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
784 if (rdev && !test_bit(Faulty, &rdev->flags)) {
785 struct request_queue *q = bdev_get_queue(rdev->bdev);
786
787 BUG_ON(!q);
788
789 /* Note the '|| 1' - when read_balance prefers
790 * non-congested targets, it can be removed
791 */
792 if ((bits & (1 << WB_async_congested)) || 1)
793 ret |= bdi_congested(q->backing_dev_info, bits);
794 else
795 ret &= bdi_congested(q->backing_dev_info, bits);
796 }
797 }
798 rcu_read_unlock();
799 return ret;
800}
801
802static void flush_bio_list(struct r1conf *conf, struct bio *bio)
803{
804 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
805 md_bitmap_unplug(conf->mddev->bitmap);
806 wake_up(&conf->wait_barrier);
807
808 while (bio) { /* submit pending writes */
809 struct bio *next = bio->bi_next;
810 struct md_rdev *rdev = (void *)bio->bi_disk;
811 bio->bi_next = NULL;
812 bio_set_dev(bio, rdev->bdev);
813 if (test_bit(Faulty, &rdev->flags)) {
814 bio_io_error(bio);
815 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
816 !blk_queue_discard(bio->bi_disk->queue)))
817 /* Just ignore it */
818 bio_endio(bio);
819 else
820 generic_make_request(bio);
821 bio = next;
822 }
823}
824
825static void flush_pending_writes(struct r1conf *conf)
826{
827 /* Any writes that have been queued but are awaiting
828 * bitmap updates get flushed here.
829 */
830 spin_lock_irq(&conf->device_lock);
831
832 if (conf->pending_bio_list.head) {
833 struct blk_plug plug;
834 struct bio *bio;
835
836 bio = bio_list_get(&conf->pending_bio_list);
837 conf->pending_count = 0;
838 spin_unlock_irq(&conf->device_lock);
839
840 /*
841 * As this is called in a wait_event() loop (see freeze_array),
842 * current->state might be TASK_UNINTERRUPTIBLE which will
843 * cause a warning when we prepare to wait again. As it is
844 * rare that this path is taken, it is perfectly safe to force
845 * us to go around the wait_event() loop again, so the warning
846 * is a false-positive. Silence the warning by resetting
847 * thread state
848 */
849 __set_current_state(TASK_RUNNING);
850 blk_start_plug(&plug);
851 flush_bio_list(conf, bio);
852 blk_finish_plug(&plug);
853 } else
854 spin_unlock_irq(&conf->device_lock);
855}
856
857/* Barriers....
858 * Sometimes we need to suspend IO while we do something else,
859 * either some resync/recovery, or reconfigure the array.
860 * To do this we raise a 'barrier'.
861 * The 'barrier' is a counter that can be raised multiple times
862 * to count how many activities are happening which preclude
863 * normal IO.
864 * We can only raise the barrier if there is no pending IO.
865 * i.e. if nr_pending == 0.
866 * We choose only to raise the barrier if no-one is waiting for the
867 * barrier to go down. This means that as soon as an IO request
868 * is ready, no other operations which require a barrier will start
869 * until the IO request has had a chance.
870 *
871 * So: regular IO calls 'wait_barrier'. When that returns there
872 * is no backgroup IO happening, It must arrange to call
873 * allow_barrier when it has finished its IO.
874 * backgroup IO calls must call raise_barrier. Once that returns
875 * there is no normal IO happeing. It must arrange to call
876 * lower_barrier when the particular background IO completes.
877 *
878 * If resync/recovery is interrupted, returns -EINTR;
879 * Otherwise, returns 0.
880 */
881static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
882{
883 int idx = sector_to_idx(sector_nr);
884
885 spin_lock_irq(&conf->resync_lock);
886
887 /* Wait until no block IO is waiting */
888 wait_event_lock_irq(conf->wait_barrier,
889 !atomic_read(&conf->nr_waiting[idx]),
890 conf->resync_lock);
891
892 /* block any new IO from starting */
893 atomic_inc(&conf->barrier[idx]);
894 /*
895 * In raise_barrier() we firstly increase conf->barrier[idx] then
896 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
897 * increase conf->nr_pending[idx] then check conf->barrier[idx].
898 * A memory barrier here to make sure conf->nr_pending[idx] won't
899 * be fetched before conf->barrier[idx] is increased. Otherwise
900 * there will be a race between raise_barrier() and _wait_barrier().
901 */
902 smp_mb__after_atomic();
903
904 /* For these conditions we must wait:
905 * A: while the array is in frozen state
906 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
907 * existing in corresponding I/O barrier bucket.
908 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
909 * max resync count which allowed on current I/O barrier bucket.
910 */
911 wait_event_lock_irq(conf->wait_barrier,
912 (!conf->array_frozen &&
913 !atomic_read(&conf->nr_pending[idx]) &&
914 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
915 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
916 conf->resync_lock);
917
918 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
919 atomic_dec(&conf->barrier[idx]);
920 spin_unlock_irq(&conf->resync_lock);
921 wake_up(&conf->wait_barrier);
922 return -EINTR;
923 }
924
925 atomic_inc(&conf->nr_sync_pending);
926 spin_unlock_irq(&conf->resync_lock);
927
928 return 0;
929}
930
931static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
932{
933 int idx = sector_to_idx(sector_nr);
934
935 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
936
937 atomic_dec(&conf->barrier[idx]);
938 atomic_dec(&conf->nr_sync_pending);
939 wake_up(&conf->wait_barrier);
940}
941
942static void _wait_barrier(struct r1conf *conf, int idx)
943{
944 /*
945 * We need to increase conf->nr_pending[idx] very early here,
946 * then raise_barrier() can be blocked when it waits for
947 * conf->nr_pending[idx] to be 0. Then we can avoid holding
948 * conf->resync_lock when there is no barrier raised in same
949 * barrier unit bucket. Also if the array is frozen, I/O
950 * should be blocked until array is unfrozen.
951 */
952 atomic_inc(&conf->nr_pending[idx]);
953 /*
954 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
955 * check conf->barrier[idx]. In raise_barrier() we firstly increase
956 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
957 * barrier is necessary here to make sure conf->barrier[idx] won't be
958 * fetched before conf->nr_pending[idx] is increased. Otherwise there
959 * will be a race between _wait_barrier() and raise_barrier().
960 */
961 smp_mb__after_atomic();
962
963 /*
964 * Don't worry about checking two atomic_t variables at same time
965 * here. If during we check conf->barrier[idx], the array is
966 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
967 * 0, it is safe to return and make the I/O continue. Because the
968 * array is frozen, all I/O returned here will eventually complete
969 * or be queued, no race will happen. See code comment in
970 * frozen_array().
971 */
972 if (!READ_ONCE(conf->array_frozen) &&
973 !atomic_read(&conf->barrier[idx]))
974 return;
975
976 /*
977 * After holding conf->resync_lock, conf->nr_pending[idx]
978 * should be decreased before waiting for barrier to drop.
979 * Otherwise, we may encounter a race condition because
980 * raise_barrer() might be waiting for conf->nr_pending[idx]
981 * to be 0 at same time.
982 */
983 spin_lock_irq(&conf->resync_lock);
984 atomic_inc(&conf->nr_waiting[idx]);
985 atomic_dec(&conf->nr_pending[idx]);
986 /*
987 * In case freeze_array() is waiting for
988 * get_unqueued_pending() == extra
989 */
990 wake_up(&conf->wait_barrier);
991 /* Wait for the barrier in same barrier unit bucket to drop. */
992 wait_event_lock_irq(conf->wait_barrier,
993 !conf->array_frozen &&
994 !atomic_read(&conf->barrier[idx]),
995 conf->resync_lock);
996 atomic_inc(&conf->nr_pending[idx]);
997 atomic_dec(&conf->nr_waiting[idx]);
998 spin_unlock_irq(&conf->resync_lock);
999}
1000
1001static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
1002{
1003 int idx = sector_to_idx(sector_nr);
1004
1005 /*
1006 * Very similar to _wait_barrier(). The difference is, for read
1007 * I/O we don't need wait for sync I/O, but if the whole array
1008 * is frozen, the read I/O still has to wait until the array is
1009 * unfrozen. Since there is no ordering requirement with
1010 * conf->barrier[idx] here, memory barrier is unnecessary as well.
1011 */
1012 atomic_inc(&conf->nr_pending[idx]);
1013
1014 if (!READ_ONCE(conf->array_frozen))
1015 return;
1016
1017 spin_lock_irq(&conf->resync_lock);
1018 atomic_inc(&conf->nr_waiting[idx]);
1019 atomic_dec(&conf->nr_pending[idx]);
1020 /*
1021 * In case freeze_array() is waiting for
1022 * get_unqueued_pending() == extra
1023 */
1024 wake_up(&conf->wait_barrier);
1025 /* Wait for array to be unfrozen */
1026 wait_event_lock_irq(conf->wait_barrier,
1027 !conf->array_frozen,
1028 conf->resync_lock);
1029 atomic_inc(&conf->nr_pending[idx]);
1030 atomic_dec(&conf->nr_waiting[idx]);
1031 spin_unlock_irq(&conf->resync_lock);
1032}
1033
1034static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1035{
1036 int idx = sector_to_idx(sector_nr);
1037
1038 _wait_barrier(conf, idx);
1039}
1040
1041static void _allow_barrier(struct r1conf *conf, int idx)
1042{
1043 atomic_dec(&conf->nr_pending[idx]);
1044 wake_up(&conf->wait_barrier);
1045}
1046
1047static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1048{
1049 int idx = sector_to_idx(sector_nr);
1050
1051 _allow_barrier(conf, idx);
1052}
1053
1054/* conf->resync_lock should be held */
1055static int get_unqueued_pending(struct r1conf *conf)
1056{
1057 int idx, ret;
1058
1059 ret = atomic_read(&conf->nr_sync_pending);
1060 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1061 ret += atomic_read(&conf->nr_pending[idx]) -
1062 atomic_read(&conf->nr_queued[idx]);
1063
1064 return ret;
1065}
1066
1067static void freeze_array(struct r1conf *conf, int extra)
1068{
1069 /* Stop sync I/O and normal I/O and wait for everything to
1070 * go quiet.
1071 * This is called in two situations:
1072 * 1) management command handlers (reshape, remove disk, quiesce).
1073 * 2) one normal I/O request failed.
1074
1075 * After array_frozen is set to 1, new sync IO will be blocked at
1076 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1077 * or wait_read_barrier(). The flying I/Os will either complete or be
1078 * queued. When everything goes quite, there are only queued I/Os left.
1079
1080 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1081 * barrier bucket index which this I/O request hits. When all sync and
1082 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1083 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1084 * in handle_read_error(), we may call freeze_array() before trying to
1085 * fix the read error. In this case, the error read I/O is not queued,
1086 * so get_unqueued_pending() == 1.
1087 *
1088 * Therefore before this function returns, we need to wait until
1089 * get_unqueued_pendings(conf) gets equal to extra. For
1090 * normal I/O context, extra is 1, in rested situations extra is 0.
1091 */
1092 spin_lock_irq(&conf->resync_lock);
1093 conf->array_frozen = 1;
1094 raid1_log(conf->mddev, "wait freeze");
1095 wait_event_lock_irq_cmd(
1096 conf->wait_barrier,
1097 get_unqueued_pending(conf) == extra,
1098 conf->resync_lock,
1099 flush_pending_writes(conf));
1100 spin_unlock_irq(&conf->resync_lock);
1101}
1102static void unfreeze_array(struct r1conf *conf)
1103{
1104 /* reverse the effect of the freeze */
1105 spin_lock_irq(&conf->resync_lock);
1106 conf->array_frozen = 0;
1107 spin_unlock_irq(&conf->resync_lock);
1108 wake_up(&conf->wait_barrier);
1109}
1110
1111static void alloc_behind_master_bio(struct r1bio *r1_bio,
1112 struct bio *bio)
1113{
1114 int size = bio->bi_iter.bi_size;
1115 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1116 int i = 0;
1117 struct bio *behind_bio = NULL;
1118
1119 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1120 if (!behind_bio)
1121 return;
1122
1123 /* discard op, we don't support writezero/writesame yet */
1124 if (!bio_has_data(bio)) {
1125 behind_bio->bi_iter.bi_size = size;
1126 goto skip_copy;
1127 }
1128
1129 behind_bio->bi_write_hint = bio->bi_write_hint;
1130
1131 while (i < vcnt && size) {
1132 struct page *page;
1133 int len = min_t(int, PAGE_SIZE, size);
1134
1135 page = alloc_page(GFP_NOIO);
1136 if (unlikely(!page))
1137 goto free_pages;
1138
1139 bio_add_page(behind_bio, page, len, 0);
1140
1141 size -= len;
1142 i++;
1143 }
1144
1145 bio_copy_data(behind_bio, bio);
1146skip_copy:
1147 r1_bio->behind_master_bio = behind_bio;
1148 set_bit(R1BIO_BehindIO, &r1_bio->state);
1149
1150 return;
1151
1152free_pages:
1153 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1154 bio->bi_iter.bi_size);
1155 bio_free_pages(behind_bio);
1156 bio_put(behind_bio);
1157}
1158
1159struct raid1_plug_cb {
1160 struct blk_plug_cb cb;
1161 struct bio_list pending;
1162 int pending_cnt;
1163};
1164
1165static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1166{
1167 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1168 cb);
1169 struct mddev *mddev = plug->cb.data;
1170 struct r1conf *conf = mddev->private;
1171 struct bio *bio;
1172
1173 if (from_schedule || current->bio_list) {
1174 spin_lock_irq(&conf->device_lock);
1175 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1176 conf->pending_count += plug->pending_cnt;
1177 spin_unlock_irq(&conf->device_lock);
1178 wake_up(&conf->wait_barrier);
1179 md_wakeup_thread(mddev->thread);
1180 kfree(plug);
1181 return;
1182 }
1183
1184 /* we aren't scheduling, so we can do the write-out directly. */
1185 bio = bio_list_get(&plug->pending);
1186 flush_bio_list(conf, bio);
1187 kfree(plug);
1188}
1189
1190static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1191{
1192 r1_bio->master_bio = bio;
1193 r1_bio->sectors = bio_sectors(bio);
1194 r1_bio->state = 0;
1195 r1_bio->mddev = mddev;
1196 r1_bio->sector = bio->bi_iter.bi_sector;
1197}
1198
1199static inline struct r1bio *
1200alloc_r1bio(struct mddev *mddev, struct bio *bio)
1201{
1202 struct r1conf *conf = mddev->private;
1203 struct r1bio *r1_bio;
1204
1205 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1206 /* Ensure no bio records IO_BLOCKED */
1207 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1208 init_r1bio(r1_bio, mddev, bio);
1209 return r1_bio;
1210}
1211
1212static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1213 int max_read_sectors, struct r1bio *r1_bio)
1214{
1215 struct r1conf *conf = mddev->private;
1216 struct raid1_info *mirror;
1217 struct bio *read_bio;
1218 struct bitmap *bitmap = mddev->bitmap;
1219 const int op = bio_op(bio);
1220 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1221 int max_sectors;
1222 int rdisk;
1223 bool print_msg = !!r1_bio;
1224 char b[BDEVNAME_SIZE];
1225
1226 /*
1227 * If r1_bio is set, we are blocking the raid1d thread
1228 * so there is a tiny risk of deadlock. So ask for
1229 * emergency memory if needed.
1230 */
1231 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1232
1233 if (print_msg) {
1234 /* Need to get the block device name carefully */
1235 struct md_rdev *rdev;
1236 rcu_read_lock();
1237 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1238 if (rdev)
1239 bdevname(rdev->bdev, b);
1240 else
1241 strcpy(b, "???");
1242 rcu_read_unlock();
1243 }
1244
1245 /*
1246 * Still need barrier for READ in case that whole
1247 * array is frozen.
1248 */
1249 wait_read_barrier(conf, bio->bi_iter.bi_sector);
1250
1251 if (!r1_bio)
1252 r1_bio = alloc_r1bio(mddev, bio);
1253 else
1254 init_r1bio(r1_bio, mddev, bio);
1255 r1_bio->sectors = max_read_sectors;
1256
1257 /*
1258 * make_request() can abort the operation when read-ahead is being
1259 * used and no empty request is available.
1260 */
1261 rdisk = read_balance(conf, r1_bio, &max_sectors);
1262
1263 if (rdisk < 0) {
1264 /* couldn't find anywhere to read from */
1265 if (print_msg) {
1266 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1267 mdname(mddev),
1268 b,
1269 (unsigned long long)r1_bio->sector);
1270 }
1271 raid_end_bio_io(r1_bio);
1272 return;
1273 }
1274 mirror = conf->mirrors + rdisk;
1275
1276 if (print_msg)
1277 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1278 mdname(mddev),
1279 (unsigned long long)r1_bio->sector,
1280 bdevname(mirror->rdev->bdev, b));
1281
1282 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1283 bitmap) {
1284 /*
1285 * Reading from a write-mostly device must take care not to
1286 * over-take any writes that are 'behind'
1287 */
1288 raid1_log(mddev, "wait behind writes");
1289 wait_event(bitmap->behind_wait,
1290 atomic_read(&bitmap->behind_writes) == 0);
1291 }
1292
1293 if (max_sectors < bio_sectors(bio)) {
1294 struct bio *split = bio_split(bio, max_sectors,
1295 gfp, &conf->bio_split);
1296 bio_chain(split, bio);
1297 generic_make_request(bio);
1298 bio = split;
1299 r1_bio->master_bio = bio;
1300 r1_bio->sectors = max_sectors;
1301 }
1302
1303 r1_bio->read_disk = rdisk;
1304
1305 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1306
1307 r1_bio->bios[rdisk] = read_bio;
1308
1309 read_bio->bi_iter.bi_sector = r1_bio->sector +
1310 mirror->rdev->data_offset;
1311 bio_set_dev(read_bio, mirror->rdev->bdev);
1312 read_bio->bi_end_io = raid1_end_read_request;
1313 bio_set_op_attrs(read_bio, op, do_sync);
1314 if (test_bit(FailFast, &mirror->rdev->flags) &&
1315 test_bit(R1BIO_FailFast, &r1_bio->state))
1316 read_bio->bi_opf |= MD_FAILFAST;
1317 read_bio->bi_private = r1_bio;
1318
1319 if (mddev->gendisk)
1320 trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
1321 disk_devt(mddev->gendisk), r1_bio->sector);
1322
1323 generic_make_request(read_bio);
1324}
1325
1326static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1327 int max_write_sectors)
1328{
1329 struct r1conf *conf = mddev->private;
1330 struct r1bio *r1_bio;
1331 int i, disks;
1332 struct bitmap *bitmap = mddev->bitmap;
1333 unsigned long flags;
1334 struct md_rdev *blocked_rdev;
1335 struct blk_plug_cb *cb;
1336 struct raid1_plug_cb *plug = NULL;
1337 int first_clone;
1338 int max_sectors;
1339
1340 if (mddev_is_clustered(mddev) &&
1341 md_cluster_ops->area_resyncing(mddev, WRITE,
1342 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1343
1344 DEFINE_WAIT(w);
1345 for (;;) {
1346 prepare_to_wait(&conf->wait_barrier,
1347 &w, TASK_IDLE);
1348 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1349 bio->bi_iter.bi_sector,
1350 bio_end_sector(bio)))
1351 break;
1352 schedule();
1353 }
1354 finish_wait(&conf->wait_barrier, &w);
1355 }
1356
1357 /*
1358 * Register the new request and wait if the reconstruction
1359 * thread has put up a bar for new requests.
1360 * Continue immediately if no resync is active currently.
1361 */
1362 wait_barrier(conf, bio->bi_iter.bi_sector);
1363
1364 r1_bio = alloc_r1bio(mddev, bio);
1365 r1_bio->sectors = max_write_sectors;
1366
1367 if (conf->pending_count >= max_queued_requests) {
1368 md_wakeup_thread(mddev->thread);
1369 raid1_log(mddev, "wait queued");
1370 wait_event(conf->wait_barrier,
1371 conf->pending_count < max_queued_requests);
1372 }
1373 /* first select target devices under rcu_lock and
1374 * inc refcount on their rdev. Record them by setting
1375 * bios[x] to bio
1376 * If there are known/acknowledged bad blocks on any device on
1377 * which we have seen a write error, we want to avoid writing those
1378 * blocks.
1379 * This potentially requires several writes to write around
1380 * the bad blocks. Each set of writes gets it's own r1bio
1381 * with a set of bios attached.
1382 */
1383
1384 disks = conf->raid_disks * 2;
1385 retry_write:
1386 blocked_rdev = NULL;
1387 rcu_read_lock();
1388 max_sectors = r1_bio->sectors;
1389 for (i = 0; i < disks; i++) {
1390 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1391 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1392 atomic_inc(&rdev->nr_pending);
1393 blocked_rdev = rdev;
1394 break;
1395 }
1396 r1_bio->bios[i] = NULL;
1397 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1398 if (i < conf->raid_disks)
1399 set_bit(R1BIO_Degraded, &r1_bio->state);
1400 continue;
1401 }
1402
1403 atomic_inc(&rdev->nr_pending);
1404 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1405 sector_t first_bad;
1406 int bad_sectors;
1407 int is_bad;
1408
1409 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1410 &first_bad, &bad_sectors);
1411 if (is_bad < 0) {
1412 /* mustn't write here until the bad block is
1413 * acknowledged*/
1414 set_bit(BlockedBadBlocks, &rdev->flags);
1415 blocked_rdev = rdev;
1416 break;
1417 }
1418 if (is_bad && first_bad <= r1_bio->sector) {
1419 /* Cannot write here at all */
1420 bad_sectors -= (r1_bio->sector - first_bad);
1421 if (bad_sectors < max_sectors)
1422 /* mustn't write more than bad_sectors
1423 * to other devices yet
1424 */
1425 max_sectors = bad_sectors;
1426 rdev_dec_pending(rdev, mddev);
1427 /* We don't set R1BIO_Degraded as that
1428 * only applies if the disk is
1429 * missing, so it might be re-added,
1430 * and we want to know to recover this
1431 * chunk.
1432 * In this case the device is here,
1433 * and the fact that this chunk is not
1434 * in-sync is recorded in the bad
1435 * block log
1436 */
1437 continue;
1438 }
1439 if (is_bad) {
1440 int good_sectors = first_bad - r1_bio->sector;
1441 if (good_sectors < max_sectors)
1442 max_sectors = good_sectors;
1443 }
1444 }
1445 r1_bio->bios[i] = bio;
1446 }
1447 rcu_read_unlock();
1448
1449 if (unlikely(blocked_rdev)) {
1450 /* Wait for this device to become unblocked */
1451 int j;
1452
1453 for (j = 0; j < i; j++)
1454 if (r1_bio->bios[j])
1455 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1456 r1_bio->state = 0;
1457 allow_barrier(conf, bio->bi_iter.bi_sector);
1458 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1459 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1460 wait_barrier(conf, bio->bi_iter.bi_sector);
1461 goto retry_write;
1462 }
1463
1464 if (max_sectors < bio_sectors(bio)) {
1465 struct bio *split = bio_split(bio, max_sectors,
1466 GFP_NOIO, &conf->bio_split);
1467 bio_chain(split, bio);
1468 generic_make_request(bio);
1469 bio = split;
1470 r1_bio->master_bio = bio;
1471 r1_bio->sectors = max_sectors;
1472 }
1473
1474 atomic_set(&r1_bio->remaining, 1);
1475 atomic_set(&r1_bio->behind_remaining, 0);
1476
1477 first_clone = 1;
1478
1479 for (i = 0; i < disks; i++) {
1480 struct bio *mbio = NULL;
1481 if (!r1_bio->bios[i])
1482 continue;
1483
1484 if (first_clone) {
1485 /* do behind I/O ?
1486 * Not if there are too many, or cannot
1487 * allocate memory, or a reader on WriteMostly
1488 * is waiting for behind writes to flush */
1489 if (bitmap &&
1490 (atomic_read(&bitmap->behind_writes)
1491 < mddev->bitmap_info.max_write_behind) &&
1492 !waitqueue_active(&bitmap->behind_wait)) {
1493 alloc_behind_master_bio(r1_bio, bio);
1494 }
1495
1496 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1497 test_bit(R1BIO_BehindIO, &r1_bio->state));
1498 first_clone = 0;
1499 }
1500
1501 if (r1_bio->behind_master_bio)
1502 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1503 GFP_NOIO, &mddev->bio_set);
1504 else
1505 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1506
1507 if (r1_bio->behind_master_bio) {
1508 struct md_rdev *rdev = conf->mirrors[i].rdev;
1509
1510 if (test_bit(WBCollisionCheck, &rdev->flags)) {
1511 sector_t lo = r1_bio->sector;
1512 sector_t hi = r1_bio->sector + r1_bio->sectors;
1513
1514 wait_event(rdev->wb_io_wait,
1515 check_and_add_wb(rdev, lo, hi) == 0);
1516 }
1517 if (test_bit(WriteMostly, &rdev->flags))
1518 atomic_inc(&r1_bio->behind_remaining);
1519 }
1520
1521 r1_bio->bios[i] = mbio;
1522
1523 mbio->bi_iter.bi_sector = (r1_bio->sector +
1524 conf->mirrors[i].rdev->data_offset);
1525 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1526 mbio->bi_end_io = raid1_end_write_request;
1527 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1528 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1529 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1530 conf->raid_disks - mddev->degraded > 1)
1531 mbio->bi_opf |= MD_FAILFAST;
1532 mbio->bi_private = r1_bio;
1533
1534 atomic_inc(&r1_bio->remaining);
1535
1536 if (mddev->gendisk)
1537 trace_block_bio_remap(mbio->bi_disk->queue,
1538 mbio, disk_devt(mddev->gendisk),
1539 r1_bio->sector);
1540 /* flush_pending_writes() needs access to the rdev so...*/
1541 mbio->bi_disk = (void *)conf->mirrors[i].rdev;
1542
1543 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1544 if (cb)
1545 plug = container_of(cb, struct raid1_plug_cb, cb);
1546 else
1547 plug = NULL;
1548 if (plug) {
1549 bio_list_add(&plug->pending, mbio);
1550 plug->pending_cnt++;
1551 } else {
1552 spin_lock_irqsave(&conf->device_lock, flags);
1553 bio_list_add(&conf->pending_bio_list, mbio);
1554 conf->pending_count++;
1555 spin_unlock_irqrestore(&conf->device_lock, flags);
1556 md_wakeup_thread(mddev->thread);
1557 }
1558 }
1559
1560 r1_bio_write_done(r1_bio);
1561
1562 /* In case raid1d snuck in to freeze_array */
1563 wake_up(&conf->wait_barrier);
1564}
1565
1566static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1567{
1568 sector_t sectors;
1569
1570 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1571 md_flush_request(mddev, bio);
1572 return true;
1573 }
1574
1575 /*
1576 * There is a limit to the maximum size, but
1577 * the read/write handler might find a lower limit
1578 * due to bad blocks. To avoid multiple splits,
1579 * we pass the maximum number of sectors down
1580 * and let the lower level perform the split.
1581 */
1582 sectors = align_to_barrier_unit_end(
1583 bio->bi_iter.bi_sector, bio_sectors(bio));
1584
1585 if (bio_data_dir(bio) == READ)
1586 raid1_read_request(mddev, bio, sectors, NULL);
1587 else {
1588 if (!md_write_start(mddev,bio))
1589 return false;
1590 raid1_write_request(mddev, bio, sectors);
1591 }
1592 return true;
1593}
1594
1595static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1596{
1597 struct r1conf *conf = mddev->private;
1598 int i;
1599
1600 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1601 conf->raid_disks - mddev->degraded);
1602 rcu_read_lock();
1603 for (i = 0; i < conf->raid_disks; i++) {
1604 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1605 seq_printf(seq, "%s",
1606 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1607 }
1608 rcu_read_unlock();
1609 seq_printf(seq, "]");
1610}
1611
1612static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1613{
1614 char b[BDEVNAME_SIZE];
1615 struct r1conf *conf = mddev->private;
1616 unsigned long flags;
1617
1618 /*
1619 * If it is not operational, then we have already marked it as dead
1620 * else if it is the last working disks with "fail_last_dev == false",
1621 * ignore the error, let the next level up know.
1622 * else mark the drive as failed
1623 */
1624 spin_lock_irqsave(&conf->device_lock, flags);
1625 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1626 && (conf->raid_disks - mddev->degraded) == 1) {
1627 /*
1628 * Don't fail the drive, act as though we were just a
1629 * normal single drive.
1630 * However don't try a recovery from this drive as
1631 * it is very likely to fail.
1632 */
1633 conf->recovery_disabled = mddev->recovery_disabled;
1634 spin_unlock_irqrestore(&conf->device_lock, flags);
1635 return;
1636 }
1637 set_bit(Blocked, &rdev->flags);
1638 if (test_and_clear_bit(In_sync, &rdev->flags))
1639 mddev->degraded++;
1640 set_bit(Faulty, &rdev->flags);
1641 spin_unlock_irqrestore(&conf->device_lock, flags);
1642 /*
1643 * if recovery is running, make sure it aborts.
1644 */
1645 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1646 set_mask_bits(&mddev->sb_flags, 0,
1647 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1648 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1649 "md/raid1:%s: Operation continuing on %d devices.\n",
1650 mdname(mddev), bdevname(rdev->bdev, b),
1651 mdname(mddev), conf->raid_disks - mddev->degraded);
1652}
1653
1654static void print_conf(struct r1conf *conf)
1655{
1656 int i;
1657
1658 pr_debug("RAID1 conf printout:\n");
1659 if (!conf) {
1660 pr_debug("(!conf)\n");
1661 return;
1662 }
1663 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1664 conf->raid_disks);
1665
1666 rcu_read_lock();
1667 for (i = 0; i < conf->raid_disks; i++) {
1668 char b[BDEVNAME_SIZE];
1669 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1670 if (rdev)
1671 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1672 i, !test_bit(In_sync, &rdev->flags),
1673 !test_bit(Faulty, &rdev->flags),
1674 bdevname(rdev->bdev,b));
1675 }
1676 rcu_read_unlock();
1677}
1678
1679static void close_sync(struct r1conf *conf)
1680{
1681 int idx;
1682
1683 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1684 _wait_barrier(conf, idx);
1685 _allow_barrier(conf, idx);
1686 }
1687
1688 mempool_exit(&conf->r1buf_pool);
1689}
1690
1691static int raid1_spare_active(struct mddev *mddev)
1692{
1693 int i;
1694 struct r1conf *conf = mddev->private;
1695 int count = 0;
1696 unsigned long flags;
1697
1698 /*
1699 * Find all failed disks within the RAID1 configuration
1700 * and mark them readable.
1701 * Called under mddev lock, so rcu protection not needed.
1702 * device_lock used to avoid races with raid1_end_read_request
1703 * which expects 'In_sync' flags and ->degraded to be consistent.
1704 */
1705 spin_lock_irqsave(&conf->device_lock, flags);
1706 for (i = 0; i < conf->raid_disks; i++) {
1707 struct md_rdev *rdev = conf->mirrors[i].rdev;
1708 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1709 if (repl
1710 && !test_bit(Candidate, &repl->flags)
1711 && repl->recovery_offset == MaxSector
1712 && !test_bit(Faulty, &repl->flags)
1713 && !test_and_set_bit(In_sync, &repl->flags)) {
1714 /* replacement has just become active */
1715 if (!rdev ||
1716 !test_and_clear_bit(In_sync, &rdev->flags))
1717 count++;
1718 if (rdev) {
1719 /* Replaced device not technically
1720 * faulty, but we need to be sure
1721 * it gets removed and never re-added
1722 */
1723 set_bit(Faulty, &rdev->flags);
1724 sysfs_notify_dirent_safe(
1725 rdev->sysfs_state);
1726 }
1727 }
1728 if (rdev
1729 && rdev->recovery_offset == MaxSector
1730 && !test_bit(Faulty, &rdev->flags)
1731 && !test_and_set_bit(In_sync, &rdev->flags)) {
1732 count++;
1733 sysfs_notify_dirent_safe(rdev->sysfs_state);
1734 }
1735 }
1736 mddev->degraded -= count;
1737 spin_unlock_irqrestore(&conf->device_lock, flags);
1738
1739 print_conf(conf);
1740 return count;
1741}
1742
1743static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1744{
1745 struct r1conf *conf = mddev->private;
1746 int err = -EEXIST;
1747 int mirror = 0;
1748 struct raid1_info *p;
1749 int first = 0;
1750 int last = conf->raid_disks - 1;
1751
1752 if (mddev->recovery_disabled == conf->recovery_disabled)
1753 return -EBUSY;
1754
1755 if (md_integrity_add_rdev(rdev, mddev))
1756 return -ENXIO;
1757
1758 if (rdev->raid_disk >= 0)
1759 first = last = rdev->raid_disk;
1760
1761 /*
1762 * find the disk ... but prefer rdev->saved_raid_disk
1763 * if possible.
1764 */
1765 if (rdev->saved_raid_disk >= 0 &&
1766 rdev->saved_raid_disk >= first &&
1767 rdev->saved_raid_disk < conf->raid_disks &&
1768 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1769 first = last = rdev->saved_raid_disk;
1770
1771 for (mirror = first; mirror <= last; mirror++) {
1772 p = conf->mirrors + mirror;
1773 if (!p->rdev) {
1774 if (mddev->gendisk)
1775 disk_stack_limits(mddev->gendisk, rdev->bdev,
1776 rdev->data_offset << 9);
1777
1778 p->head_position = 0;
1779 rdev->raid_disk = mirror;
1780 err = 0;
1781 /* As all devices are equivalent, we don't need a full recovery
1782 * if this was recently any drive of the array
1783 */
1784 if (rdev->saved_raid_disk < 0)
1785 conf->fullsync = 1;
1786 rcu_assign_pointer(p->rdev, rdev);
1787 break;
1788 }
1789 if (test_bit(WantReplacement, &p->rdev->flags) &&
1790 p[conf->raid_disks].rdev == NULL) {
1791 /* Add this device as a replacement */
1792 clear_bit(In_sync, &rdev->flags);
1793 set_bit(Replacement, &rdev->flags);
1794 rdev->raid_disk = mirror;
1795 err = 0;
1796 conf->fullsync = 1;
1797 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1798 break;
1799 }
1800 }
1801 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1802 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1803 print_conf(conf);
1804 return err;
1805}
1806
1807static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1808{
1809 struct r1conf *conf = mddev->private;
1810 int err = 0;
1811 int number = rdev->raid_disk;
1812 struct raid1_info *p = conf->mirrors + number;
1813
1814 if (rdev != p->rdev)
1815 p = conf->mirrors + conf->raid_disks + number;
1816
1817 print_conf(conf);
1818 if (rdev == p->rdev) {
1819 if (test_bit(In_sync, &rdev->flags) ||
1820 atomic_read(&rdev->nr_pending)) {
1821 err = -EBUSY;
1822 goto abort;
1823 }
1824 /* Only remove non-faulty devices if recovery
1825 * is not possible.
1826 */
1827 if (!test_bit(Faulty, &rdev->flags) &&
1828 mddev->recovery_disabled != conf->recovery_disabled &&
1829 mddev->degraded < conf->raid_disks) {
1830 err = -EBUSY;
1831 goto abort;
1832 }
1833 p->rdev = NULL;
1834 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1835 synchronize_rcu();
1836 if (atomic_read(&rdev->nr_pending)) {
1837 /* lost the race, try later */
1838 err = -EBUSY;
1839 p->rdev = rdev;
1840 goto abort;
1841 }
1842 }
1843 if (conf->mirrors[conf->raid_disks + number].rdev) {
1844 /* We just removed a device that is being replaced.
1845 * Move down the replacement. We drain all IO before
1846 * doing this to avoid confusion.
1847 */
1848 struct md_rdev *repl =
1849 conf->mirrors[conf->raid_disks + number].rdev;
1850 freeze_array(conf, 0);
1851 if (atomic_read(&repl->nr_pending)) {
1852 /* It means that some queued IO of retry_list
1853 * hold repl. Thus, we cannot set replacement
1854 * as NULL, avoiding rdev NULL pointer
1855 * dereference in sync_request_write and
1856 * handle_write_finished.
1857 */
1858 err = -EBUSY;
1859 unfreeze_array(conf);
1860 goto abort;
1861 }
1862 clear_bit(Replacement, &repl->flags);
1863 p->rdev = repl;
1864 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1865 unfreeze_array(conf);
1866 }
1867
1868 clear_bit(WantReplacement, &rdev->flags);
1869 err = md_integrity_register(mddev);
1870 }
1871abort:
1872
1873 print_conf(conf);
1874 return err;
1875}
1876
1877static void end_sync_read(struct bio *bio)
1878{
1879 struct r1bio *r1_bio = get_resync_r1bio(bio);
1880
1881 update_head_pos(r1_bio->read_disk, r1_bio);
1882
1883 /*
1884 * we have read a block, now it needs to be re-written,
1885 * or re-read if the read failed.
1886 * We don't do much here, just schedule handling by raid1d
1887 */
1888 if (!bio->bi_status)
1889 set_bit(R1BIO_Uptodate, &r1_bio->state);
1890
1891 if (atomic_dec_and_test(&r1_bio->remaining))
1892 reschedule_retry(r1_bio);
1893}
1894
1895static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1896{
1897 sector_t sync_blocks = 0;
1898 sector_t s = r1_bio->sector;
1899 long sectors_to_go = r1_bio->sectors;
1900
1901 /* make sure these bits don't get cleared. */
1902 do {
1903 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1904 s += sync_blocks;
1905 sectors_to_go -= sync_blocks;
1906 } while (sectors_to_go > 0);
1907}
1908
1909static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1910{
1911 if (atomic_dec_and_test(&r1_bio->remaining)) {
1912 struct mddev *mddev = r1_bio->mddev;
1913 int s = r1_bio->sectors;
1914
1915 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1916 test_bit(R1BIO_WriteError, &r1_bio->state))
1917 reschedule_retry(r1_bio);
1918 else {
1919 put_buf(r1_bio);
1920 md_done_sync(mddev, s, uptodate);
1921 }
1922 }
1923}
1924
1925static void end_sync_write(struct bio *bio)
1926{
1927 int uptodate = !bio->bi_status;
1928 struct r1bio *r1_bio = get_resync_r1bio(bio);
1929 struct mddev *mddev = r1_bio->mddev;
1930 struct r1conf *conf = mddev->private;
1931 sector_t first_bad;
1932 int bad_sectors;
1933 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1934
1935 if (!uptodate) {
1936 abort_sync_write(mddev, r1_bio);
1937 set_bit(WriteErrorSeen, &rdev->flags);
1938 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1939 set_bit(MD_RECOVERY_NEEDED, &
1940 mddev->recovery);
1941 set_bit(R1BIO_WriteError, &r1_bio->state);
1942 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1943 &first_bad, &bad_sectors) &&
1944 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1945 r1_bio->sector,
1946 r1_bio->sectors,
1947 &first_bad, &bad_sectors)
1948 )
1949 set_bit(R1BIO_MadeGood, &r1_bio->state);
1950
1951 put_sync_write_buf(r1_bio, uptodate);
1952}
1953
1954static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1955 int sectors, struct page *page, int rw)
1956{
1957 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1958 /* success */
1959 return 1;
1960 if (rw == WRITE) {
1961 set_bit(WriteErrorSeen, &rdev->flags);
1962 if (!test_and_set_bit(WantReplacement,
1963 &rdev->flags))
1964 set_bit(MD_RECOVERY_NEEDED, &
1965 rdev->mddev->recovery);
1966 }
1967 /* need to record an error - either for the block or the device */
1968 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1969 md_error(rdev->mddev, rdev);
1970 return 0;
1971}
1972
1973static int fix_sync_read_error(struct r1bio *r1_bio)
1974{
1975 /* Try some synchronous reads of other devices to get
1976 * good data, much like with normal read errors. Only
1977 * read into the pages we already have so we don't
1978 * need to re-issue the read request.
1979 * We don't need to freeze the array, because being in an
1980 * active sync request, there is no normal IO, and
1981 * no overlapping syncs.
1982 * We don't need to check is_badblock() again as we
1983 * made sure that anything with a bad block in range
1984 * will have bi_end_io clear.
1985 */
1986 struct mddev *mddev = r1_bio->mddev;
1987 struct r1conf *conf = mddev->private;
1988 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1989 struct page **pages = get_resync_pages(bio)->pages;
1990 sector_t sect = r1_bio->sector;
1991 int sectors = r1_bio->sectors;
1992 int idx = 0;
1993 struct md_rdev *rdev;
1994
1995 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1996 if (test_bit(FailFast, &rdev->flags)) {
1997 /* Don't try recovering from here - just fail it
1998 * ... unless it is the last working device of course */
1999 md_error(mddev, rdev);
2000 if (test_bit(Faulty, &rdev->flags))
2001 /* Don't try to read from here, but make sure
2002 * put_buf does it's thing
2003 */
2004 bio->bi_end_io = end_sync_write;
2005 }
2006
2007 while(sectors) {
2008 int s = sectors;
2009 int d = r1_bio->read_disk;
2010 int success = 0;
2011 int start;
2012
2013 if (s > (PAGE_SIZE>>9))
2014 s = PAGE_SIZE >> 9;
2015 do {
2016 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2017 /* No rcu protection needed here devices
2018 * can only be removed when no resync is
2019 * active, and resync is currently active
2020 */
2021 rdev = conf->mirrors[d].rdev;
2022 if (sync_page_io(rdev, sect, s<<9,
2023 pages[idx],
2024 REQ_OP_READ, 0, false)) {
2025 success = 1;
2026 break;
2027 }
2028 }
2029 d++;
2030 if (d == conf->raid_disks * 2)
2031 d = 0;
2032 } while (!success && d != r1_bio->read_disk);
2033
2034 if (!success) {
2035 char b[BDEVNAME_SIZE];
2036 int abort = 0;
2037 /* Cannot read from anywhere, this block is lost.
2038 * Record a bad block on each device. If that doesn't
2039 * work just disable and interrupt the recovery.
2040 * Don't fail devices as that won't really help.
2041 */
2042 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2043 mdname(mddev), bio_devname(bio, b),
2044 (unsigned long long)r1_bio->sector);
2045 for (d = 0; d < conf->raid_disks * 2; d++) {
2046 rdev = conf->mirrors[d].rdev;
2047 if (!rdev || test_bit(Faulty, &rdev->flags))
2048 continue;
2049 if (!rdev_set_badblocks(rdev, sect, s, 0))
2050 abort = 1;
2051 }
2052 if (abort) {
2053 conf->recovery_disabled =
2054 mddev->recovery_disabled;
2055 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2056 md_done_sync(mddev, r1_bio->sectors, 0);
2057 put_buf(r1_bio);
2058 return 0;
2059 }
2060 /* Try next page */
2061 sectors -= s;
2062 sect += s;
2063 idx++;
2064 continue;
2065 }
2066
2067 start = d;
2068 /* write it back and re-read */
2069 while (d != r1_bio->read_disk) {
2070 if (d == 0)
2071 d = conf->raid_disks * 2;
2072 d--;
2073 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2074 continue;
2075 rdev = conf->mirrors[d].rdev;
2076 if (r1_sync_page_io(rdev, sect, s,
2077 pages[idx],
2078 WRITE) == 0) {
2079 r1_bio->bios[d]->bi_end_io = NULL;
2080 rdev_dec_pending(rdev, mddev);
2081 }
2082 }
2083 d = start;
2084 while (d != r1_bio->read_disk) {
2085 if (d == 0)
2086 d = conf->raid_disks * 2;
2087 d--;
2088 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2089 continue;
2090 rdev = conf->mirrors[d].rdev;
2091 if (r1_sync_page_io(rdev, sect, s,
2092 pages[idx],
2093 READ) != 0)
2094 atomic_add(s, &rdev->corrected_errors);
2095 }
2096 sectors -= s;
2097 sect += s;
2098 idx ++;
2099 }
2100 set_bit(R1BIO_Uptodate, &r1_bio->state);
2101 bio->bi_status = 0;
2102 return 1;
2103}
2104
2105static void process_checks(struct r1bio *r1_bio)
2106{
2107 /* We have read all readable devices. If we haven't
2108 * got the block, then there is no hope left.
2109 * If we have, then we want to do a comparison
2110 * and skip the write if everything is the same.
2111 * If any blocks failed to read, then we need to
2112 * attempt an over-write
2113 */
2114 struct mddev *mddev = r1_bio->mddev;
2115 struct r1conf *conf = mddev->private;
2116 int primary;
2117 int i;
2118 int vcnt;
2119
2120 /* Fix variable parts of all bios */
2121 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2122 for (i = 0; i < conf->raid_disks * 2; i++) {
2123 blk_status_t status;
2124 struct bio *b = r1_bio->bios[i];
2125 struct resync_pages *rp = get_resync_pages(b);
2126 if (b->bi_end_io != end_sync_read)
2127 continue;
2128 /* fixup the bio for reuse, but preserve errno */
2129 status = b->bi_status;
2130 bio_reset(b);
2131 b->bi_status = status;
2132 b->bi_iter.bi_sector = r1_bio->sector +
2133 conf->mirrors[i].rdev->data_offset;
2134 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2135 b->bi_end_io = end_sync_read;
2136 rp->raid_bio = r1_bio;
2137 b->bi_private = rp;
2138
2139 /* initialize bvec table again */
2140 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2141 }
2142 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2143 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2144 !r1_bio->bios[primary]->bi_status) {
2145 r1_bio->bios[primary]->bi_end_io = NULL;
2146 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2147 break;
2148 }
2149 r1_bio->read_disk = primary;
2150 for (i = 0; i < conf->raid_disks * 2; i++) {
2151 int j = 0;
2152 struct bio *pbio = r1_bio->bios[primary];
2153 struct bio *sbio = r1_bio->bios[i];
2154 blk_status_t status = sbio->bi_status;
2155 struct page **ppages = get_resync_pages(pbio)->pages;
2156 struct page **spages = get_resync_pages(sbio)->pages;
2157 struct bio_vec *bi;
2158 int page_len[RESYNC_PAGES] = { 0 };
2159 struct bvec_iter_all iter_all;
2160
2161 if (sbio->bi_end_io != end_sync_read)
2162 continue;
2163 /* Now we can 'fixup' the error value */
2164 sbio->bi_status = 0;
2165
2166 bio_for_each_segment_all(bi, sbio, iter_all)
2167 page_len[j++] = bi->bv_len;
2168
2169 if (!status) {
2170 for (j = vcnt; j-- ; ) {
2171 if (memcmp(page_address(ppages[j]),
2172 page_address(spages[j]),
2173 page_len[j]))
2174 break;
2175 }
2176 } else
2177 j = 0;
2178 if (j >= 0)
2179 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2180 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2181 && !status)) {
2182 /* No need to write to this device. */
2183 sbio->bi_end_io = NULL;
2184 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2185 continue;
2186 }
2187
2188 bio_copy_data(sbio, pbio);
2189 }
2190}
2191
2192static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2193{
2194 struct r1conf *conf = mddev->private;
2195 int i;
2196 int disks = conf->raid_disks * 2;
2197 struct bio *wbio;
2198
2199 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2200 /* ouch - failed to read all of that. */
2201 if (!fix_sync_read_error(r1_bio))
2202 return;
2203
2204 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2205 process_checks(r1_bio);
2206
2207 /*
2208 * schedule writes
2209 */
2210 atomic_set(&r1_bio->remaining, 1);
2211 for (i = 0; i < disks ; i++) {
2212 wbio = r1_bio->bios[i];
2213 if (wbio->bi_end_io == NULL ||
2214 (wbio->bi_end_io == end_sync_read &&
2215 (i == r1_bio->read_disk ||
2216 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2217 continue;
2218 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2219 abort_sync_write(mddev, r1_bio);
2220 continue;
2221 }
2222
2223 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2224 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2225 wbio->bi_opf |= MD_FAILFAST;
2226
2227 wbio->bi_end_io = end_sync_write;
2228 atomic_inc(&r1_bio->remaining);
2229 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2230
2231 generic_make_request(wbio);
2232 }
2233
2234 put_sync_write_buf(r1_bio, 1);
2235}
2236
2237/*
2238 * This is a kernel thread which:
2239 *
2240 * 1. Retries failed read operations on working mirrors.
2241 * 2. Updates the raid superblock when problems encounter.
2242 * 3. Performs writes following reads for array synchronising.
2243 */
2244
2245static void fix_read_error(struct r1conf *conf, int read_disk,
2246 sector_t sect, int sectors)
2247{
2248 struct mddev *mddev = conf->mddev;
2249 while(sectors) {
2250 int s = sectors;
2251 int d = read_disk;
2252 int success = 0;
2253 int start;
2254 struct md_rdev *rdev;
2255
2256 if (s > (PAGE_SIZE>>9))
2257 s = PAGE_SIZE >> 9;
2258
2259 do {
2260 sector_t first_bad;
2261 int bad_sectors;
2262
2263 rcu_read_lock();
2264 rdev = rcu_dereference(conf->mirrors[d].rdev);
2265 if (rdev &&
2266 (test_bit(In_sync, &rdev->flags) ||
2267 (!test_bit(Faulty, &rdev->flags) &&
2268 rdev->recovery_offset >= sect + s)) &&
2269 is_badblock(rdev, sect, s,
2270 &first_bad, &bad_sectors) == 0) {
2271 atomic_inc(&rdev->nr_pending);
2272 rcu_read_unlock();
2273 if (sync_page_io(rdev, sect, s<<9,
2274 conf->tmppage, REQ_OP_READ, 0, false))
2275 success = 1;
2276 rdev_dec_pending(rdev, mddev);
2277 if (success)
2278 break;
2279 } else
2280 rcu_read_unlock();
2281 d++;
2282 if (d == conf->raid_disks * 2)
2283 d = 0;
2284 } while (!success && d != read_disk);
2285
2286 if (!success) {
2287 /* Cannot read from anywhere - mark it bad */
2288 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2289 if (!rdev_set_badblocks(rdev, sect, s, 0))
2290 md_error(mddev, rdev);
2291 break;
2292 }
2293 /* write it back and re-read */
2294 start = d;
2295 while (d != read_disk) {
2296 if (d==0)
2297 d = conf->raid_disks * 2;
2298 d--;
2299 rcu_read_lock();
2300 rdev = rcu_dereference(conf->mirrors[d].rdev);
2301 if (rdev &&
2302 !test_bit(Faulty, &rdev->flags)) {
2303 atomic_inc(&rdev->nr_pending);
2304 rcu_read_unlock();
2305 r1_sync_page_io(rdev, sect, s,
2306 conf->tmppage, WRITE);
2307 rdev_dec_pending(rdev, mddev);
2308 } else
2309 rcu_read_unlock();
2310 }
2311 d = start;
2312 while (d != read_disk) {
2313 char b[BDEVNAME_SIZE];
2314 if (d==0)
2315 d = conf->raid_disks * 2;
2316 d--;
2317 rcu_read_lock();
2318 rdev = rcu_dereference(conf->mirrors[d].rdev);
2319 if (rdev &&
2320 !test_bit(Faulty, &rdev->flags)) {
2321 atomic_inc(&rdev->nr_pending);
2322 rcu_read_unlock();
2323 if (r1_sync_page_io(rdev, sect, s,
2324 conf->tmppage, READ)) {
2325 atomic_add(s, &rdev->corrected_errors);
2326 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2327 mdname(mddev), s,
2328 (unsigned long long)(sect +
2329 rdev->data_offset),
2330 bdevname(rdev->bdev, b));
2331 }
2332 rdev_dec_pending(rdev, mddev);
2333 } else
2334 rcu_read_unlock();
2335 }
2336 sectors -= s;
2337 sect += s;
2338 }
2339}
2340
2341static int narrow_write_error(struct r1bio *r1_bio, int i)
2342{
2343 struct mddev *mddev = r1_bio->mddev;
2344 struct r1conf *conf = mddev->private;
2345 struct md_rdev *rdev = conf->mirrors[i].rdev;
2346
2347 /* bio has the data to be written to device 'i' where
2348 * we just recently had a write error.
2349 * We repeatedly clone the bio and trim down to one block,
2350 * then try the write. Where the write fails we record
2351 * a bad block.
2352 * It is conceivable that the bio doesn't exactly align with
2353 * blocks. We must handle this somehow.
2354 *
2355 * We currently own a reference on the rdev.
2356 */
2357
2358 int block_sectors;
2359 sector_t sector;
2360 int sectors;
2361 int sect_to_write = r1_bio->sectors;
2362 int ok = 1;
2363
2364 if (rdev->badblocks.shift < 0)
2365 return 0;
2366
2367 block_sectors = roundup(1 << rdev->badblocks.shift,
2368 bdev_logical_block_size(rdev->bdev) >> 9);
2369 sector = r1_bio->sector;
2370 sectors = ((sector + block_sectors)
2371 & ~(sector_t)(block_sectors - 1))
2372 - sector;
2373
2374 while (sect_to_write) {
2375 struct bio *wbio;
2376 if (sectors > sect_to_write)
2377 sectors = sect_to_write;
2378 /* Write at 'sector' for 'sectors'*/
2379
2380 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2381 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2382 GFP_NOIO,
2383 &mddev->bio_set);
2384 } else {
2385 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2386 &mddev->bio_set);
2387 }
2388
2389 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2390 wbio->bi_iter.bi_sector = r1_bio->sector;
2391 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2392
2393 bio_trim(wbio, sector - r1_bio->sector, sectors);
2394 wbio->bi_iter.bi_sector += rdev->data_offset;
2395 bio_set_dev(wbio, rdev->bdev);
2396
2397 if (submit_bio_wait(wbio) < 0)
2398 /* failure! */
2399 ok = rdev_set_badblocks(rdev, sector,
2400 sectors, 0)
2401 && ok;
2402
2403 bio_put(wbio);
2404 sect_to_write -= sectors;
2405 sector += sectors;
2406 sectors = block_sectors;
2407 }
2408 return ok;
2409}
2410
2411static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2412{
2413 int m;
2414 int s = r1_bio->sectors;
2415 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2416 struct md_rdev *rdev = conf->mirrors[m].rdev;
2417 struct bio *bio = r1_bio->bios[m];
2418 if (bio->bi_end_io == NULL)
2419 continue;
2420 if (!bio->bi_status &&
2421 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2422 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2423 }
2424 if (bio->bi_status &&
2425 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2426 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2427 md_error(conf->mddev, rdev);
2428 }
2429 }
2430 put_buf(r1_bio);
2431 md_done_sync(conf->mddev, s, 1);
2432}
2433
2434static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2435{
2436 int m, idx;
2437 bool fail = false;
2438
2439 for (m = 0; m < conf->raid_disks * 2 ; m++)
2440 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2441 struct md_rdev *rdev = conf->mirrors[m].rdev;
2442 rdev_clear_badblocks(rdev,
2443 r1_bio->sector,
2444 r1_bio->sectors, 0);
2445 rdev_dec_pending(rdev, conf->mddev);
2446 } else if (r1_bio->bios[m] != NULL) {
2447 /* This drive got a write error. We need to
2448 * narrow down and record precise write
2449 * errors.
2450 */
2451 fail = true;
2452 if (!narrow_write_error(r1_bio, m)) {
2453 md_error(conf->mddev,
2454 conf->mirrors[m].rdev);
2455 /* an I/O failed, we can't clear the bitmap */
2456 set_bit(R1BIO_Degraded, &r1_bio->state);
2457 }
2458 rdev_dec_pending(conf->mirrors[m].rdev,
2459 conf->mddev);
2460 }
2461 if (fail) {
2462 spin_lock_irq(&conf->device_lock);
2463 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2464 idx = sector_to_idx(r1_bio->sector);
2465 atomic_inc(&conf->nr_queued[idx]);
2466 spin_unlock_irq(&conf->device_lock);
2467 /*
2468 * In case freeze_array() is waiting for condition
2469 * get_unqueued_pending() == extra to be true.
2470 */
2471 wake_up(&conf->wait_barrier);
2472 md_wakeup_thread(conf->mddev->thread);
2473 } else {
2474 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2475 close_write(r1_bio);
2476 raid_end_bio_io(r1_bio);
2477 }
2478}
2479
2480static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2481{
2482 struct mddev *mddev = conf->mddev;
2483 struct bio *bio;
2484 struct md_rdev *rdev;
2485
2486 clear_bit(R1BIO_ReadError, &r1_bio->state);
2487 /* we got a read error. Maybe the drive is bad. Maybe just
2488 * the block and we can fix it.
2489 * We freeze all other IO, and try reading the block from
2490 * other devices. When we find one, we re-write
2491 * and check it that fixes the read error.
2492 * This is all done synchronously while the array is
2493 * frozen
2494 */
2495
2496 bio = r1_bio->bios[r1_bio->read_disk];
2497 bio_put(bio);
2498 r1_bio->bios[r1_bio->read_disk] = NULL;
2499
2500 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2501 if (mddev->ro == 0
2502 && !test_bit(FailFast, &rdev->flags)) {
2503 freeze_array(conf, 1);
2504 fix_read_error(conf, r1_bio->read_disk,
2505 r1_bio->sector, r1_bio->sectors);
2506 unfreeze_array(conf);
2507 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2508 md_error(mddev, rdev);
2509 } else {
2510 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2511 }
2512
2513 rdev_dec_pending(rdev, conf->mddev);
2514 allow_barrier(conf, r1_bio->sector);
2515 bio = r1_bio->master_bio;
2516
2517 /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2518 r1_bio->state = 0;
2519 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2520}
2521
2522static void raid1d(struct md_thread *thread)
2523{
2524 struct mddev *mddev = thread->mddev;
2525 struct r1bio *r1_bio;
2526 unsigned long flags;
2527 struct r1conf *conf = mddev->private;
2528 struct list_head *head = &conf->retry_list;
2529 struct blk_plug plug;
2530 int idx;
2531
2532 md_check_recovery(mddev);
2533
2534 if (!list_empty_careful(&conf->bio_end_io_list) &&
2535 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2536 LIST_HEAD(tmp);
2537 spin_lock_irqsave(&conf->device_lock, flags);
2538 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2539 list_splice_init(&conf->bio_end_io_list, &tmp);
2540 spin_unlock_irqrestore(&conf->device_lock, flags);
2541 while (!list_empty(&tmp)) {
2542 r1_bio = list_first_entry(&tmp, struct r1bio,
2543 retry_list);
2544 list_del(&r1_bio->retry_list);
2545 idx = sector_to_idx(r1_bio->sector);
2546 atomic_dec(&conf->nr_queued[idx]);
2547 if (mddev->degraded)
2548 set_bit(R1BIO_Degraded, &r1_bio->state);
2549 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2550 close_write(r1_bio);
2551 raid_end_bio_io(r1_bio);
2552 }
2553 }
2554
2555 blk_start_plug(&plug);
2556 for (;;) {
2557
2558 flush_pending_writes(conf);
2559
2560 spin_lock_irqsave(&conf->device_lock, flags);
2561 if (list_empty(head)) {
2562 spin_unlock_irqrestore(&conf->device_lock, flags);
2563 break;
2564 }
2565 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2566 list_del(head->prev);
2567 idx = sector_to_idx(r1_bio->sector);
2568 atomic_dec(&conf->nr_queued[idx]);
2569 spin_unlock_irqrestore(&conf->device_lock, flags);
2570
2571 mddev = r1_bio->mddev;
2572 conf = mddev->private;
2573 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2574 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2575 test_bit(R1BIO_WriteError, &r1_bio->state))
2576 handle_sync_write_finished(conf, r1_bio);
2577 else
2578 sync_request_write(mddev, r1_bio);
2579 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2580 test_bit(R1BIO_WriteError, &r1_bio->state))
2581 handle_write_finished(conf, r1_bio);
2582 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2583 handle_read_error(conf, r1_bio);
2584 else
2585 WARN_ON_ONCE(1);
2586
2587 cond_resched();
2588 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2589 md_check_recovery(mddev);
2590 }
2591 blk_finish_plug(&plug);
2592}
2593
2594static int init_resync(struct r1conf *conf)
2595{
2596 int buffs;
2597
2598 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2599 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2600
2601 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2602 r1buf_pool_free, conf->poolinfo);
2603}
2604
2605static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2606{
2607 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2608 struct resync_pages *rps;
2609 struct bio *bio;
2610 int i;
2611
2612 for (i = conf->poolinfo->raid_disks; i--; ) {
2613 bio = r1bio->bios[i];
2614 rps = bio->bi_private;
2615 bio_reset(bio);
2616 bio->bi_private = rps;
2617 }
2618 r1bio->master_bio = NULL;
2619 return r1bio;
2620}
2621
2622/*
2623 * perform a "sync" on one "block"
2624 *
2625 * We need to make sure that no normal I/O request - particularly write
2626 * requests - conflict with active sync requests.
2627 *
2628 * This is achieved by tracking pending requests and a 'barrier' concept
2629 * that can be installed to exclude normal IO requests.
2630 */
2631
2632static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2633 int *skipped)
2634{
2635 struct r1conf *conf = mddev->private;
2636 struct r1bio *r1_bio;
2637 struct bio *bio;
2638 sector_t max_sector, nr_sectors;
2639 int disk = -1;
2640 int i;
2641 int wonly = -1;
2642 int write_targets = 0, read_targets = 0;
2643 sector_t sync_blocks;
2644 int still_degraded = 0;
2645 int good_sectors = RESYNC_SECTORS;
2646 int min_bad = 0; /* number of sectors that are bad in all devices */
2647 int idx = sector_to_idx(sector_nr);
2648 int page_idx = 0;
2649
2650 if (!mempool_initialized(&conf->r1buf_pool))
2651 if (init_resync(conf))
2652 return 0;
2653
2654 max_sector = mddev->dev_sectors;
2655 if (sector_nr >= max_sector) {
2656 /* If we aborted, we need to abort the
2657 * sync on the 'current' bitmap chunk (there will
2658 * only be one in raid1 resync.
2659 * We can find the current addess in mddev->curr_resync
2660 */
2661 if (mddev->curr_resync < max_sector) /* aborted */
2662 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2663 &sync_blocks, 1);
2664 else /* completed sync */
2665 conf->fullsync = 0;
2666
2667 md_bitmap_close_sync(mddev->bitmap);
2668 close_sync(conf);
2669
2670 if (mddev_is_clustered(mddev)) {
2671 conf->cluster_sync_low = 0;
2672 conf->cluster_sync_high = 0;
2673 }
2674 return 0;
2675 }
2676
2677 if (mddev->bitmap == NULL &&
2678 mddev->recovery_cp == MaxSector &&
2679 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2680 conf->fullsync == 0) {
2681 *skipped = 1;
2682 return max_sector - sector_nr;
2683 }
2684 /* before building a request, check if we can skip these blocks..
2685 * This call the bitmap_start_sync doesn't actually record anything
2686 */
2687 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2688 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2689 /* We can skip this block, and probably several more */
2690 *skipped = 1;
2691 return sync_blocks;
2692 }
2693
2694 /*
2695 * If there is non-resync activity waiting for a turn, then let it
2696 * though before starting on this new sync request.
2697 */
2698 if (atomic_read(&conf->nr_waiting[idx]))
2699 schedule_timeout_uninterruptible(1);
2700
2701 /* we are incrementing sector_nr below. To be safe, we check against
2702 * sector_nr + two times RESYNC_SECTORS
2703 */
2704
2705 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2706 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2707
2708
2709 if (raise_barrier(conf, sector_nr))
2710 return 0;
2711
2712 r1_bio = raid1_alloc_init_r1buf(conf);
2713
2714 rcu_read_lock();
2715 /*
2716 * If we get a correctably read error during resync or recovery,
2717 * we might want to read from a different device. So we
2718 * flag all drives that could conceivably be read from for READ,
2719 * and any others (which will be non-In_sync devices) for WRITE.
2720 * If a read fails, we try reading from something else for which READ
2721 * is OK.
2722 */
2723
2724 r1_bio->mddev = mddev;
2725 r1_bio->sector = sector_nr;
2726 r1_bio->state = 0;
2727 set_bit(R1BIO_IsSync, &r1_bio->state);
2728 /* make sure good_sectors won't go across barrier unit boundary */
2729 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2730
2731 for (i = 0; i < conf->raid_disks * 2; i++) {
2732 struct md_rdev *rdev;
2733 bio = r1_bio->bios[i];
2734
2735 rdev = rcu_dereference(conf->mirrors[i].rdev);
2736 if (rdev == NULL ||
2737 test_bit(Faulty, &rdev->flags)) {
2738 if (i < conf->raid_disks)
2739 still_degraded = 1;
2740 } else if (!test_bit(In_sync, &rdev->flags)) {
2741 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2742 bio->bi_end_io = end_sync_write;
2743 write_targets ++;
2744 } else {
2745 /* may need to read from here */
2746 sector_t first_bad = MaxSector;
2747 int bad_sectors;
2748
2749 if (is_badblock(rdev, sector_nr, good_sectors,
2750 &first_bad, &bad_sectors)) {
2751 if (first_bad > sector_nr)
2752 good_sectors = first_bad - sector_nr;
2753 else {
2754 bad_sectors -= (sector_nr - first_bad);
2755 if (min_bad == 0 ||
2756 min_bad > bad_sectors)
2757 min_bad = bad_sectors;
2758 }
2759 }
2760 if (sector_nr < first_bad) {
2761 if (test_bit(WriteMostly, &rdev->flags)) {
2762 if (wonly < 0)
2763 wonly = i;
2764 } else {
2765 if (disk < 0)
2766 disk = i;
2767 }
2768 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2769 bio->bi_end_io = end_sync_read;
2770 read_targets++;
2771 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2772 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2773 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2774 /*
2775 * The device is suitable for reading (InSync),
2776 * but has bad block(s) here. Let's try to correct them,
2777 * if we are doing resync or repair. Otherwise, leave
2778 * this device alone for this sync request.
2779 */
2780 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2781 bio->bi_end_io = end_sync_write;
2782 write_targets++;
2783 }
2784 }
2785 if (bio->bi_end_io) {
2786 atomic_inc(&rdev->nr_pending);
2787 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2788 bio_set_dev(bio, rdev->bdev);
2789 if (test_bit(FailFast, &rdev->flags))
2790 bio->bi_opf |= MD_FAILFAST;
2791 }
2792 }
2793 rcu_read_unlock();
2794 if (disk < 0)
2795 disk = wonly;
2796 r1_bio->read_disk = disk;
2797
2798 if (read_targets == 0 && min_bad > 0) {
2799 /* These sectors are bad on all InSync devices, so we
2800 * need to mark them bad on all write targets
2801 */
2802 int ok = 1;
2803 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2804 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2805 struct md_rdev *rdev = conf->mirrors[i].rdev;
2806 ok = rdev_set_badblocks(rdev, sector_nr,
2807 min_bad, 0
2808 ) && ok;
2809 }
2810 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2811 *skipped = 1;
2812 put_buf(r1_bio);
2813
2814 if (!ok) {
2815 /* Cannot record the badblocks, so need to
2816 * abort the resync.
2817 * If there are multiple read targets, could just
2818 * fail the really bad ones ???
2819 */
2820 conf->recovery_disabled = mddev->recovery_disabled;
2821 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2822 return 0;
2823 } else
2824 return min_bad;
2825
2826 }
2827 if (min_bad > 0 && min_bad < good_sectors) {
2828 /* only resync enough to reach the next bad->good
2829 * transition */
2830 good_sectors = min_bad;
2831 }
2832
2833 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2834 /* extra read targets are also write targets */
2835 write_targets += read_targets-1;
2836
2837 if (write_targets == 0 || read_targets == 0) {
2838 /* There is nowhere to write, so all non-sync
2839 * drives must be failed - so we are finished
2840 */
2841 sector_t rv;
2842 if (min_bad > 0)
2843 max_sector = sector_nr + min_bad;
2844 rv = max_sector - sector_nr;
2845 *skipped = 1;
2846 put_buf(r1_bio);
2847 return rv;
2848 }
2849
2850 if (max_sector > mddev->resync_max)
2851 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2852 if (max_sector > sector_nr + good_sectors)
2853 max_sector = sector_nr + good_sectors;
2854 nr_sectors = 0;
2855 sync_blocks = 0;
2856 do {
2857 struct page *page;
2858 int len = PAGE_SIZE;
2859 if (sector_nr + (len>>9) > max_sector)
2860 len = (max_sector - sector_nr) << 9;
2861 if (len == 0)
2862 break;
2863 if (sync_blocks == 0) {
2864 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2865 &sync_blocks, still_degraded) &&
2866 !conf->fullsync &&
2867 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2868 break;
2869 if ((len >> 9) > sync_blocks)
2870 len = sync_blocks<<9;
2871 }
2872
2873 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2874 struct resync_pages *rp;
2875
2876 bio = r1_bio->bios[i];
2877 rp = get_resync_pages(bio);
2878 if (bio->bi_end_io) {
2879 page = resync_fetch_page(rp, page_idx);
2880
2881 /*
2882 * won't fail because the vec table is big
2883 * enough to hold all these pages
2884 */
2885 bio_add_page(bio, page, len, 0);
2886 }
2887 }
2888 nr_sectors += len>>9;
2889 sector_nr += len>>9;
2890 sync_blocks -= (len>>9);
2891 } while (++page_idx < RESYNC_PAGES);
2892
2893 r1_bio->sectors = nr_sectors;
2894
2895 if (mddev_is_clustered(mddev) &&
2896 conf->cluster_sync_high < sector_nr + nr_sectors) {
2897 conf->cluster_sync_low = mddev->curr_resync_completed;
2898 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2899 /* Send resync message */
2900 md_cluster_ops->resync_info_update(mddev,
2901 conf->cluster_sync_low,
2902 conf->cluster_sync_high);
2903 }
2904
2905 /* For a user-requested sync, we read all readable devices and do a
2906 * compare
2907 */
2908 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2909 atomic_set(&r1_bio->remaining, read_targets);
2910 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2911 bio = r1_bio->bios[i];
2912 if (bio->bi_end_io == end_sync_read) {
2913 read_targets--;
2914 md_sync_acct_bio(bio, nr_sectors);
2915 if (read_targets == 1)
2916 bio->bi_opf &= ~MD_FAILFAST;
2917 generic_make_request(bio);
2918 }
2919 }
2920 } else {
2921 atomic_set(&r1_bio->remaining, 1);
2922 bio = r1_bio->bios[r1_bio->read_disk];
2923 md_sync_acct_bio(bio, nr_sectors);
2924 if (read_targets == 1)
2925 bio->bi_opf &= ~MD_FAILFAST;
2926 generic_make_request(bio);
2927 }
2928 return nr_sectors;
2929}
2930
2931static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2932{
2933 if (sectors)
2934 return sectors;
2935
2936 return mddev->dev_sectors;
2937}
2938
2939static struct r1conf *setup_conf(struct mddev *mddev)
2940{
2941 struct r1conf *conf;
2942 int i;
2943 struct raid1_info *disk;
2944 struct md_rdev *rdev;
2945 int err = -ENOMEM;
2946
2947 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2948 if (!conf)
2949 goto abort;
2950
2951 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2952 sizeof(atomic_t), GFP_KERNEL);
2953 if (!conf->nr_pending)
2954 goto abort;
2955
2956 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2957 sizeof(atomic_t), GFP_KERNEL);
2958 if (!conf->nr_waiting)
2959 goto abort;
2960
2961 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2962 sizeof(atomic_t), GFP_KERNEL);
2963 if (!conf->nr_queued)
2964 goto abort;
2965
2966 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2967 sizeof(atomic_t), GFP_KERNEL);
2968 if (!conf->barrier)
2969 goto abort;
2970
2971 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2972 mddev->raid_disks, 2),
2973 GFP_KERNEL);
2974 if (!conf->mirrors)
2975 goto abort;
2976
2977 conf->tmppage = alloc_page(GFP_KERNEL);
2978 if (!conf->tmppage)
2979 goto abort;
2980
2981 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2982 if (!conf->poolinfo)
2983 goto abort;
2984 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2985 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
2986 rbio_pool_free, conf->poolinfo);
2987 if (err)
2988 goto abort;
2989
2990 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2991 if (err)
2992 goto abort;
2993
2994 conf->poolinfo->mddev = mddev;
2995
2996 err = -EINVAL;
2997 spin_lock_init(&conf->device_lock);
2998 rdev_for_each(rdev, mddev) {
2999 int disk_idx = rdev->raid_disk;
3000 if (disk_idx >= mddev->raid_disks
3001 || disk_idx < 0)
3002 continue;
3003 if (test_bit(Replacement, &rdev->flags))
3004 disk = conf->mirrors + mddev->raid_disks + disk_idx;
3005 else
3006 disk = conf->mirrors + disk_idx;
3007
3008 if (disk->rdev)
3009 goto abort;
3010 disk->rdev = rdev;
3011 disk->head_position = 0;
3012 disk->seq_start = MaxSector;
3013 }
3014 conf->raid_disks = mddev->raid_disks;
3015 conf->mddev = mddev;
3016 INIT_LIST_HEAD(&conf->retry_list);
3017 INIT_LIST_HEAD(&conf->bio_end_io_list);
3018
3019 spin_lock_init(&conf->resync_lock);
3020 init_waitqueue_head(&conf->wait_barrier);
3021
3022 bio_list_init(&conf->pending_bio_list);
3023 conf->pending_count = 0;
3024 conf->recovery_disabled = mddev->recovery_disabled - 1;
3025
3026 err = -EIO;
3027 for (i = 0; i < conf->raid_disks * 2; i++) {
3028
3029 disk = conf->mirrors + i;
3030
3031 if (i < conf->raid_disks &&
3032 disk[conf->raid_disks].rdev) {
3033 /* This slot has a replacement. */
3034 if (!disk->rdev) {
3035 /* No original, just make the replacement
3036 * a recovering spare
3037 */
3038 disk->rdev =
3039 disk[conf->raid_disks].rdev;
3040 disk[conf->raid_disks].rdev = NULL;
3041 } else if (!test_bit(In_sync, &disk->rdev->flags))
3042 /* Original is not in_sync - bad */
3043 goto abort;
3044 }
3045
3046 if (!disk->rdev ||
3047 !test_bit(In_sync, &disk->rdev->flags)) {
3048 disk->head_position = 0;
3049 if (disk->rdev &&
3050 (disk->rdev->saved_raid_disk < 0))
3051 conf->fullsync = 1;
3052 }
3053 }
3054
3055 err = -ENOMEM;
3056 conf->thread = md_register_thread(raid1d, mddev, "raid1");
3057 if (!conf->thread)
3058 goto abort;
3059
3060 return conf;
3061
3062 abort:
3063 if (conf) {
3064 mempool_exit(&conf->r1bio_pool);
3065 kfree(conf->mirrors);
3066 safe_put_page(conf->tmppage);
3067 kfree(conf->poolinfo);
3068 kfree(conf->nr_pending);
3069 kfree(conf->nr_waiting);
3070 kfree(conf->nr_queued);
3071 kfree(conf->barrier);
3072 bioset_exit(&conf->bio_split);
3073 kfree(conf);
3074 }
3075 return ERR_PTR(err);
3076}
3077
3078static void raid1_free(struct mddev *mddev, void *priv);
3079static int raid1_run(struct mddev *mddev)
3080{
3081 struct r1conf *conf;
3082 int i;
3083 struct md_rdev *rdev;
3084 int ret;
3085 bool discard_supported = false;
3086
3087 if (mddev->level != 1) {
3088 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3089 mdname(mddev), mddev->level);
3090 return -EIO;
3091 }
3092 if (mddev->reshape_position != MaxSector) {
3093 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3094 mdname(mddev));
3095 return -EIO;
3096 }
3097 if (mddev_init_writes_pending(mddev) < 0)
3098 return -ENOMEM;
3099 /*
3100 * copy the already verified devices into our private RAID1
3101 * bookkeeping area. [whatever we allocate in run(),
3102 * should be freed in raid1_free()]
3103 */
3104 if (mddev->private == NULL)
3105 conf = setup_conf(mddev);
3106 else
3107 conf = mddev->private;
3108
3109 if (IS_ERR(conf))
3110 return PTR_ERR(conf);
3111
3112 if (mddev->queue) {
3113 blk_queue_max_write_same_sectors(mddev->queue, 0);
3114 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3115 }
3116
3117 rdev_for_each(rdev, mddev) {
3118 if (!mddev->gendisk)
3119 continue;
3120 disk_stack_limits(mddev->gendisk, rdev->bdev,
3121 rdev->data_offset << 9);
3122 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3123 discard_supported = true;
3124 }
3125
3126 mddev->degraded = 0;
3127 for (i = 0; i < conf->raid_disks; i++)
3128 if (conf->mirrors[i].rdev == NULL ||
3129 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3130 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3131 mddev->degraded++;
3132 /*
3133 * RAID1 needs at least one disk in active
3134 */
3135 if (conf->raid_disks - mddev->degraded < 1) {
3136 ret = -EINVAL;
3137 goto abort;
3138 }
3139
3140 if (conf->raid_disks - mddev->degraded == 1)
3141 mddev->recovery_cp = MaxSector;
3142
3143 if (mddev->recovery_cp != MaxSector)
3144 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3145 mdname(mddev));
3146 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3147 mdname(mddev), mddev->raid_disks - mddev->degraded,
3148 mddev->raid_disks);
3149
3150 /*
3151 * Ok, everything is just fine now
3152 */
3153 mddev->thread = conf->thread;
3154 conf->thread = NULL;
3155 mddev->private = conf;
3156 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3157
3158 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3159
3160 if (mddev->queue) {
3161 if (discard_supported)
3162 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3163 mddev->queue);
3164 else
3165 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3166 mddev->queue);
3167 }
3168
3169 ret = md_integrity_register(mddev);
3170 if (ret) {
3171 md_unregister_thread(&mddev->thread);
3172 goto abort;
3173 }
3174 return 0;
3175
3176abort:
3177 raid1_free(mddev, conf);
3178 return ret;
3179}
3180
3181static void raid1_free(struct mddev *mddev, void *priv)
3182{
3183 struct r1conf *conf = priv;
3184
3185 mempool_exit(&conf->r1bio_pool);
3186 kfree(conf->mirrors);
3187 safe_put_page(conf->tmppage);
3188 kfree(conf->poolinfo);
3189 kfree(conf->nr_pending);
3190 kfree(conf->nr_waiting);
3191 kfree(conf->nr_queued);
3192 kfree(conf->barrier);
3193 bioset_exit(&conf->bio_split);
3194 kfree(conf);
3195}
3196
3197static int raid1_resize(struct mddev *mddev, sector_t sectors)
3198{
3199 /* no resync is happening, and there is enough space
3200 * on all devices, so we can resize.
3201 * We need to make sure resync covers any new space.
3202 * If the array is shrinking we should possibly wait until
3203 * any io in the removed space completes, but it hardly seems
3204 * worth it.
3205 */
3206 sector_t newsize = raid1_size(mddev, sectors, 0);
3207 if (mddev->external_size &&
3208 mddev->array_sectors > newsize)
3209 return -EINVAL;
3210 if (mddev->bitmap) {
3211 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3212 if (ret)
3213 return ret;
3214 }
3215 md_set_array_sectors(mddev, newsize);
3216 if (sectors > mddev->dev_sectors &&
3217 mddev->recovery_cp > mddev->dev_sectors) {
3218 mddev->recovery_cp = mddev->dev_sectors;
3219 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3220 }
3221 mddev->dev_sectors = sectors;
3222 mddev->resync_max_sectors = sectors;
3223 return 0;
3224}
3225
3226static int raid1_reshape(struct mddev *mddev)
3227{
3228 /* We need to:
3229 * 1/ resize the r1bio_pool
3230 * 2/ resize conf->mirrors
3231 *
3232 * We allocate a new r1bio_pool if we can.
3233 * Then raise a device barrier and wait until all IO stops.
3234 * Then resize conf->mirrors and swap in the new r1bio pool.
3235 *
3236 * At the same time, we "pack" the devices so that all the missing
3237 * devices have the higher raid_disk numbers.
3238 */
3239 mempool_t newpool, oldpool;
3240 struct pool_info *newpoolinfo;
3241 struct raid1_info *newmirrors;
3242 struct r1conf *conf = mddev->private;
3243 int cnt, raid_disks;
3244 unsigned long flags;
3245 int d, d2;
3246 int ret;
3247
3248 memset(&newpool, 0, sizeof(newpool));
3249 memset(&oldpool, 0, sizeof(oldpool));
3250
3251 /* Cannot change chunk_size, layout, or level */
3252 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3253 mddev->layout != mddev->new_layout ||
3254 mddev->level != mddev->new_level) {
3255 mddev->new_chunk_sectors = mddev->chunk_sectors;
3256 mddev->new_layout = mddev->layout;
3257 mddev->new_level = mddev->level;
3258 return -EINVAL;
3259 }
3260
3261 if (!mddev_is_clustered(mddev))
3262 md_allow_write(mddev);
3263
3264 raid_disks = mddev->raid_disks + mddev->delta_disks;
3265
3266 if (raid_disks < conf->raid_disks) {
3267 cnt=0;
3268 for (d= 0; d < conf->raid_disks; d++)
3269 if (conf->mirrors[d].rdev)
3270 cnt++;
3271 if (cnt > raid_disks)
3272 return -EBUSY;
3273 }
3274
3275 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3276 if (!newpoolinfo)
3277 return -ENOMEM;
3278 newpoolinfo->mddev = mddev;
3279 newpoolinfo->raid_disks = raid_disks * 2;
3280
3281 ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
3282 rbio_pool_free, newpoolinfo);
3283 if (ret) {
3284 kfree(newpoolinfo);
3285 return ret;
3286 }
3287 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3288 raid_disks, 2),
3289 GFP_KERNEL);
3290 if (!newmirrors) {
3291 kfree(newpoolinfo);
3292 mempool_exit(&newpool);
3293 return -ENOMEM;
3294 }
3295
3296 freeze_array(conf, 0);
3297
3298 /* ok, everything is stopped */
3299 oldpool = conf->r1bio_pool;
3300 conf->r1bio_pool = newpool;
3301
3302 for (d = d2 = 0; d < conf->raid_disks; d++) {
3303 struct md_rdev *rdev = conf->mirrors[d].rdev;
3304 if (rdev && rdev->raid_disk != d2) {
3305 sysfs_unlink_rdev(mddev, rdev);
3306 rdev->raid_disk = d2;
3307 sysfs_unlink_rdev(mddev, rdev);
3308 if (sysfs_link_rdev(mddev, rdev))
3309 pr_warn("md/raid1:%s: cannot register rd%d\n",
3310 mdname(mddev), rdev->raid_disk);
3311 }
3312 if (rdev)
3313 newmirrors[d2++].rdev = rdev;
3314 }
3315 kfree(conf->mirrors);
3316 conf->mirrors = newmirrors;
3317 kfree(conf->poolinfo);
3318 conf->poolinfo = newpoolinfo;
3319
3320 spin_lock_irqsave(&conf->device_lock, flags);
3321 mddev->degraded += (raid_disks - conf->raid_disks);
3322 spin_unlock_irqrestore(&conf->device_lock, flags);
3323 conf->raid_disks = mddev->raid_disks = raid_disks;
3324 mddev->delta_disks = 0;
3325
3326 unfreeze_array(conf);
3327
3328 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3329 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3330 md_wakeup_thread(mddev->thread);
3331
3332 mempool_exit(&oldpool);
3333 return 0;
3334}
3335
3336static void raid1_quiesce(struct mddev *mddev, int quiesce)
3337{
3338 struct r1conf *conf = mddev->private;
3339
3340 if (quiesce)
3341 freeze_array(conf, 0);
3342 else
3343 unfreeze_array(conf);
3344}
3345
3346static void *raid1_takeover(struct mddev *mddev)
3347{
3348 /* raid1 can take over:
3349 * raid5 with 2 devices, any layout or chunk size
3350 */
3351 if (mddev->level == 5 && mddev->raid_disks == 2) {
3352 struct r1conf *conf;
3353 mddev->new_level = 1;
3354 mddev->new_layout = 0;
3355 mddev->new_chunk_sectors = 0;
3356 conf = setup_conf(mddev);
3357 if (!IS_ERR(conf)) {
3358 /* Array must appear to be quiesced */
3359 conf->array_frozen = 1;
3360 mddev_clear_unsupported_flags(mddev,
3361 UNSUPPORTED_MDDEV_FLAGS);
3362 }
3363 return conf;
3364 }
3365 return ERR_PTR(-EINVAL);
3366}
3367
3368static struct md_personality raid1_personality =
3369{
3370 .name = "raid1",
3371 .level = 1,
3372 .owner = THIS_MODULE,
3373 .make_request = raid1_make_request,
3374 .run = raid1_run,
3375 .free = raid1_free,
3376 .status = raid1_status,
3377 .error_handler = raid1_error,
3378 .hot_add_disk = raid1_add_disk,
3379 .hot_remove_disk= raid1_remove_disk,
3380 .spare_active = raid1_spare_active,
3381 .sync_request = raid1_sync_request,
3382 .resize = raid1_resize,
3383 .size = raid1_size,
3384 .check_reshape = raid1_reshape,
3385 .quiesce = raid1_quiesce,
3386 .takeover = raid1_takeover,
3387 .congested = raid1_congested,
3388};
3389
3390static int __init raid_init(void)
3391{
3392 return register_md_personality(&raid1_personality);
3393}
3394
3395static void raid_exit(void)
3396{
3397 unregister_md_personality(&raid1_personality);
3398}
3399
3400module_init(raid_init);
3401module_exit(raid_exit);
3402MODULE_LICENSE("GPL");
3403MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3404MODULE_ALIAS("md-personality-3"); /* RAID1 */
3405MODULE_ALIAS("md-raid1");
3406MODULE_ALIAS("md-level-1");
3407
3408module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
1/*
2 * raid1.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * RAID-1 management functions.
9 *
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11 *
12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14 *
15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
17 *
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
20 *
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34#include <linux/slab.h>
35#include <linux/delay.h>
36#include <linux/blkdev.h>
37#include <linux/module.h>
38#include <linux/seq_file.h>
39#include <linux/ratelimit.h>
40
41#include <trace/events/block.h>
42
43#include "md.h"
44#include "raid1.h"
45#include "md-bitmap.h"
46
47#define UNSUPPORTED_MDDEV_FLAGS \
48 ((1L << MD_HAS_JOURNAL) | \
49 (1L << MD_JOURNAL_CLEAN) | \
50 (1L << MD_HAS_PPL) | \
51 (1L << MD_HAS_MULTIPLE_PPLS))
52
53/*
54 * Number of guaranteed r1bios in case of extreme VM load:
55 */
56#define NR_RAID1_BIOS 256
57
58/* when we get a read error on a read-only array, we redirect to another
59 * device without failing the first device, or trying to over-write to
60 * correct the read error. To keep track of bad blocks on a per-bio
61 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
62 */
63#define IO_BLOCKED ((struct bio *)1)
64/* When we successfully write to a known bad-block, we need to remove the
65 * bad-block marking which must be done from process context. So we record
66 * the success by setting devs[n].bio to IO_MADE_GOOD
67 */
68#define IO_MADE_GOOD ((struct bio *)2)
69
70#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
71
72/* When there are this many requests queue to be written by
73 * the raid1 thread, we become 'congested' to provide back-pressure
74 * for writeback.
75 */
76static int max_queued_requests = 1024;
77
78static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
79static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
80
81#define raid1_log(md, fmt, args...) \
82 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
83
84#include "raid1-10.c"
85
86/*
87 * for resync bio, r1bio pointer can be retrieved from the per-bio
88 * 'struct resync_pages'.
89 */
90static inline struct r1bio *get_resync_r1bio(struct bio *bio)
91{
92 return get_resync_pages(bio)->raid_bio;
93}
94
95static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
96{
97 struct pool_info *pi = data;
98 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
99
100 /* allocate a r1bio with room for raid_disks entries in the bios array */
101 return kzalloc(size, gfp_flags);
102}
103
104static void r1bio_pool_free(void *r1_bio, void *data)
105{
106 kfree(r1_bio);
107}
108
109#define RESYNC_DEPTH 32
110#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
111#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
112#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
113#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
114#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
115
116static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
117{
118 struct pool_info *pi = data;
119 struct r1bio *r1_bio;
120 struct bio *bio;
121 int need_pages;
122 int j;
123 struct resync_pages *rps;
124
125 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
126 if (!r1_bio)
127 return NULL;
128
129 rps = kmalloc(sizeof(struct resync_pages) * pi->raid_disks,
130 gfp_flags);
131 if (!rps)
132 goto out_free_r1bio;
133
134 /*
135 * Allocate bios : 1 for reading, n-1 for writing
136 */
137 for (j = pi->raid_disks ; j-- ; ) {
138 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
139 if (!bio)
140 goto out_free_bio;
141 r1_bio->bios[j] = bio;
142 }
143 /*
144 * Allocate RESYNC_PAGES data pages and attach them to
145 * the first bio.
146 * If this is a user-requested check/repair, allocate
147 * RESYNC_PAGES for each bio.
148 */
149 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
150 need_pages = pi->raid_disks;
151 else
152 need_pages = 1;
153 for (j = 0; j < pi->raid_disks; j++) {
154 struct resync_pages *rp = &rps[j];
155
156 bio = r1_bio->bios[j];
157
158 if (j < need_pages) {
159 if (resync_alloc_pages(rp, gfp_flags))
160 goto out_free_pages;
161 } else {
162 memcpy(rp, &rps[0], sizeof(*rp));
163 resync_get_all_pages(rp);
164 }
165
166 rp->raid_bio = r1_bio;
167 bio->bi_private = rp;
168 }
169
170 r1_bio->master_bio = NULL;
171
172 return r1_bio;
173
174out_free_pages:
175 while (--j >= 0)
176 resync_free_pages(&rps[j]);
177
178out_free_bio:
179 while (++j < pi->raid_disks)
180 bio_put(r1_bio->bios[j]);
181 kfree(rps);
182
183out_free_r1bio:
184 r1bio_pool_free(r1_bio, data);
185 return NULL;
186}
187
188static void r1buf_pool_free(void *__r1_bio, void *data)
189{
190 struct pool_info *pi = data;
191 int i;
192 struct r1bio *r1bio = __r1_bio;
193 struct resync_pages *rp = NULL;
194
195 for (i = pi->raid_disks; i--; ) {
196 rp = get_resync_pages(r1bio->bios[i]);
197 resync_free_pages(rp);
198 bio_put(r1bio->bios[i]);
199 }
200
201 /* resync pages array stored in the 1st bio's .bi_private */
202 kfree(rp);
203
204 r1bio_pool_free(r1bio, data);
205}
206
207static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
208{
209 int i;
210
211 for (i = 0; i < conf->raid_disks * 2; i++) {
212 struct bio **bio = r1_bio->bios + i;
213 if (!BIO_SPECIAL(*bio))
214 bio_put(*bio);
215 *bio = NULL;
216 }
217}
218
219static void free_r1bio(struct r1bio *r1_bio)
220{
221 struct r1conf *conf = r1_bio->mddev->private;
222
223 put_all_bios(conf, r1_bio);
224 mempool_free(r1_bio, conf->r1bio_pool);
225}
226
227static void put_buf(struct r1bio *r1_bio)
228{
229 struct r1conf *conf = r1_bio->mddev->private;
230 sector_t sect = r1_bio->sector;
231 int i;
232
233 for (i = 0; i < conf->raid_disks * 2; i++) {
234 struct bio *bio = r1_bio->bios[i];
235 if (bio->bi_end_io)
236 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
237 }
238
239 mempool_free(r1_bio, conf->r1buf_pool);
240
241 lower_barrier(conf, sect);
242}
243
244static void reschedule_retry(struct r1bio *r1_bio)
245{
246 unsigned long flags;
247 struct mddev *mddev = r1_bio->mddev;
248 struct r1conf *conf = mddev->private;
249 int idx;
250
251 idx = sector_to_idx(r1_bio->sector);
252 spin_lock_irqsave(&conf->device_lock, flags);
253 list_add(&r1_bio->retry_list, &conf->retry_list);
254 atomic_inc(&conf->nr_queued[idx]);
255 spin_unlock_irqrestore(&conf->device_lock, flags);
256
257 wake_up(&conf->wait_barrier);
258 md_wakeup_thread(mddev->thread);
259}
260
261/*
262 * raid_end_bio_io() is called when we have finished servicing a mirrored
263 * operation and are ready to return a success/failure code to the buffer
264 * cache layer.
265 */
266static void call_bio_endio(struct r1bio *r1_bio)
267{
268 struct bio *bio = r1_bio->master_bio;
269 struct r1conf *conf = r1_bio->mddev->private;
270
271 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
272 bio->bi_status = BLK_STS_IOERR;
273
274 bio_endio(bio);
275 /*
276 * Wake up any possible resync thread that waits for the device
277 * to go idle.
278 */
279 allow_barrier(conf, r1_bio->sector);
280}
281
282static void raid_end_bio_io(struct r1bio *r1_bio)
283{
284 struct bio *bio = r1_bio->master_bio;
285
286 /* if nobody has done the final endio yet, do it now */
287 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
288 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
289 (bio_data_dir(bio) == WRITE) ? "write" : "read",
290 (unsigned long long) bio->bi_iter.bi_sector,
291 (unsigned long long) bio_end_sector(bio) - 1);
292
293 call_bio_endio(r1_bio);
294 }
295 free_r1bio(r1_bio);
296}
297
298/*
299 * Update disk head position estimator based on IRQ completion info.
300 */
301static inline void update_head_pos(int disk, struct r1bio *r1_bio)
302{
303 struct r1conf *conf = r1_bio->mddev->private;
304
305 conf->mirrors[disk].head_position =
306 r1_bio->sector + (r1_bio->sectors);
307}
308
309/*
310 * Find the disk number which triggered given bio
311 */
312static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
313{
314 int mirror;
315 struct r1conf *conf = r1_bio->mddev->private;
316 int raid_disks = conf->raid_disks;
317
318 for (mirror = 0; mirror < raid_disks * 2; mirror++)
319 if (r1_bio->bios[mirror] == bio)
320 break;
321
322 BUG_ON(mirror == raid_disks * 2);
323 update_head_pos(mirror, r1_bio);
324
325 return mirror;
326}
327
328static void raid1_end_read_request(struct bio *bio)
329{
330 int uptodate = !bio->bi_status;
331 struct r1bio *r1_bio = bio->bi_private;
332 struct r1conf *conf = r1_bio->mddev->private;
333 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
334
335 /*
336 * this branch is our 'one mirror IO has finished' event handler:
337 */
338 update_head_pos(r1_bio->read_disk, r1_bio);
339
340 if (uptodate)
341 set_bit(R1BIO_Uptodate, &r1_bio->state);
342 else if (test_bit(FailFast, &rdev->flags) &&
343 test_bit(R1BIO_FailFast, &r1_bio->state))
344 /* This was a fail-fast read so we definitely
345 * want to retry */
346 ;
347 else {
348 /* If all other devices have failed, we want to return
349 * the error upwards rather than fail the last device.
350 * Here we redefine "uptodate" to mean "Don't want to retry"
351 */
352 unsigned long flags;
353 spin_lock_irqsave(&conf->device_lock, flags);
354 if (r1_bio->mddev->degraded == conf->raid_disks ||
355 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
356 test_bit(In_sync, &rdev->flags)))
357 uptodate = 1;
358 spin_unlock_irqrestore(&conf->device_lock, flags);
359 }
360
361 if (uptodate) {
362 raid_end_bio_io(r1_bio);
363 rdev_dec_pending(rdev, conf->mddev);
364 } else {
365 /*
366 * oops, read error:
367 */
368 char b[BDEVNAME_SIZE];
369 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
370 mdname(conf->mddev),
371 bdevname(rdev->bdev, b),
372 (unsigned long long)r1_bio->sector);
373 set_bit(R1BIO_ReadError, &r1_bio->state);
374 reschedule_retry(r1_bio);
375 /* don't drop the reference on read_disk yet */
376 }
377}
378
379static void close_write(struct r1bio *r1_bio)
380{
381 /* it really is the end of this request */
382 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
383 bio_free_pages(r1_bio->behind_master_bio);
384 bio_put(r1_bio->behind_master_bio);
385 r1_bio->behind_master_bio = NULL;
386 }
387 /* clear the bitmap if all writes complete successfully */
388 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
389 r1_bio->sectors,
390 !test_bit(R1BIO_Degraded, &r1_bio->state),
391 test_bit(R1BIO_BehindIO, &r1_bio->state));
392 md_write_end(r1_bio->mddev);
393}
394
395static void r1_bio_write_done(struct r1bio *r1_bio)
396{
397 if (!atomic_dec_and_test(&r1_bio->remaining))
398 return;
399
400 if (test_bit(R1BIO_WriteError, &r1_bio->state))
401 reschedule_retry(r1_bio);
402 else {
403 close_write(r1_bio);
404 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
405 reschedule_retry(r1_bio);
406 else
407 raid_end_bio_io(r1_bio);
408 }
409}
410
411static void raid1_end_write_request(struct bio *bio)
412{
413 struct r1bio *r1_bio = bio->bi_private;
414 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
415 struct r1conf *conf = r1_bio->mddev->private;
416 struct bio *to_put = NULL;
417 int mirror = find_bio_disk(r1_bio, bio);
418 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
419 bool discard_error;
420
421 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
422
423 /*
424 * 'one mirror IO has finished' event handler:
425 */
426 if (bio->bi_status && !discard_error) {
427 set_bit(WriteErrorSeen, &rdev->flags);
428 if (!test_and_set_bit(WantReplacement, &rdev->flags))
429 set_bit(MD_RECOVERY_NEEDED, &
430 conf->mddev->recovery);
431
432 if (test_bit(FailFast, &rdev->flags) &&
433 (bio->bi_opf & MD_FAILFAST) &&
434 /* We never try FailFast to WriteMostly devices */
435 !test_bit(WriteMostly, &rdev->flags)) {
436 md_error(r1_bio->mddev, rdev);
437 if (!test_bit(Faulty, &rdev->flags))
438 /* This is the only remaining device,
439 * We need to retry the write without
440 * FailFast
441 */
442 set_bit(R1BIO_WriteError, &r1_bio->state);
443 else {
444 /* Finished with this branch */
445 r1_bio->bios[mirror] = NULL;
446 to_put = bio;
447 }
448 } else
449 set_bit(R1BIO_WriteError, &r1_bio->state);
450 } else {
451 /*
452 * Set R1BIO_Uptodate in our master bio, so that we
453 * will return a good error code for to the higher
454 * levels even if IO on some other mirrored buffer
455 * fails.
456 *
457 * The 'master' represents the composite IO operation
458 * to user-side. So if something waits for IO, then it
459 * will wait for the 'master' bio.
460 */
461 sector_t first_bad;
462 int bad_sectors;
463
464 r1_bio->bios[mirror] = NULL;
465 to_put = bio;
466 /*
467 * Do not set R1BIO_Uptodate if the current device is
468 * rebuilding or Faulty. This is because we cannot use
469 * such device for properly reading the data back (we could
470 * potentially use it, if the current write would have felt
471 * before rdev->recovery_offset, but for simplicity we don't
472 * check this here.
473 */
474 if (test_bit(In_sync, &rdev->flags) &&
475 !test_bit(Faulty, &rdev->flags))
476 set_bit(R1BIO_Uptodate, &r1_bio->state);
477
478 /* Maybe we can clear some bad blocks. */
479 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
480 &first_bad, &bad_sectors) && !discard_error) {
481 r1_bio->bios[mirror] = IO_MADE_GOOD;
482 set_bit(R1BIO_MadeGood, &r1_bio->state);
483 }
484 }
485
486 if (behind) {
487 if (test_bit(WriteMostly, &rdev->flags))
488 atomic_dec(&r1_bio->behind_remaining);
489
490 /*
491 * In behind mode, we ACK the master bio once the I/O
492 * has safely reached all non-writemostly
493 * disks. Setting the Returned bit ensures that this
494 * gets done only once -- we don't ever want to return
495 * -EIO here, instead we'll wait
496 */
497 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
498 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
499 /* Maybe we can return now */
500 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
501 struct bio *mbio = r1_bio->master_bio;
502 pr_debug("raid1: behind end write sectors"
503 " %llu-%llu\n",
504 (unsigned long long) mbio->bi_iter.bi_sector,
505 (unsigned long long) bio_end_sector(mbio) - 1);
506 call_bio_endio(r1_bio);
507 }
508 }
509 }
510 if (r1_bio->bios[mirror] == NULL)
511 rdev_dec_pending(rdev, conf->mddev);
512
513 /*
514 * Let's see if all mirrored write operations have finished
515 * already.
516 */
517 r1_bio_write_done(r1_bio);
518
519 if (to_put)
520 bio_put(to_put);
521}
522
523static sector_t align_to_barrier_unit_end(sector_t start_sector,
524 sector_t sectors)
525{
526 sector_t len;
527
528 WARN_ON(sectors == 0);
529 /*
530 * len is the number of sectors from start_sector to end of the
531 * barrier unit which start_sector belongs to.
532 */
533 len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
534 start_sector;
535
536 if (len > sectors)
537 len = sectors;
538
539 return len;
540}
541
542/*
543 * This routine returns the disk from which the requested read should
544 * be done. There is a per-array 'next expected sequential IO' sector
545 * number - if this matches on the next IO then we use the last disk.
546 * There is also a per-disk 'last know head position' sector that is
547 * maintained from IRQ contexts, both the normal and the resync IO
548 * completion handlers update this position correctly. If there is no
549 * perfect sequential match then we pick the disk whose head is closest.
550 *
551 * If there are 2 mirrors in the same 2 devices, performance degrades
552 * because position is mirror, not device based.
553 *
554 * The rdev for the device selected will have nr_pending incremented.
555 */
556static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
557{
558 const sector_t this_sector = r1_bio->sector;
559 int sectors;
560 int best_good_sectors;
561 int best_disk, best_dist_disk, best_pending_disk;
562 int has_nonrot_disk;
563 int disk;
564 sector_t best_dist;
565 unsigned int min_pending;
566 struct md_rdev *rdev;
567 int choose_first;
568 int choose_next_idle;
569
570 rcu_read_lock();
571 /*
572 * Check if we can balance. We can balance on the whole
573 * device if no resync is going on, or below the resync window.
574 * We take the first readable disk when above the resync window.
575 */
576 retry:
577 sectors = r1_bio->sectors;
578 best_disk = -1;
579 best_dist_disk = -1;
580 best_dist = MaxSector;
581 best_pending_disk = -1;
582 min_pending = UINT_MAX;
583 best_good_sectors = 0;
584 has_nonrot_disk = 0;
585 choose_next_idle = 0;
586 clear_bit(R1BIO_FailFast, &r1_bio->state);
587
588 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
589 (mddev_is_clustered(conf->mddev) &&
590 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
591 this_sector + sectors)))
592 choose_first = 1;
593 else
594 choose_first = 0;
595
596 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
597 sector_t dist;
598 sector_t first_bad;
599 int bad_sectors;
600 unsigned int pending;
601 bool nonrot;
602
603 rdev = rcu_dereference(conf->mirrors[disk].rdev);
604 if (r1_bio->bios[disk] == IO_BLOCKED
605 || rdev == NULL
606 || test_bit(Faulty, &rdev->flags))
607 continue;
608 if (!test_bit(In_sync, &rdev->flags) &&
609 rdev->recovery_offset < this_sector + sectors)
610 continue;
611 if (test_bit(WriteMostly, &rdev->flags)) {
612 /* Don't balance among write-mostly, just
613 * use the first as a last resort */
614 if (best_dist_disk < 0) {
615 if (is_badblock(rdev, this_sector, sectors,
616 &first_bad, &bad_sectors)) {
617 if (first_bad <= this_sector)
618 /* Cannot use this */
619 continue;
620 best_good_sectors = first_bad - this_sector;
621 } else
622 best_good_sectors = sectors;
623 best_dist_disk = disk;
624 best_pending_disk = disk;
625 }
626 continue;
627 }
628 /* This is a reasonable device to use. It might
629 * even be best.
630 */
631 if (is_badblock(rdev, this_sector, sectors,
632 &first_bad, &bad_sectors)) {
633 if (best_dist < MaxSector)
634 /* already have a better device */
635 continue;
636 if (first_bad <= this_sector) {
637 /* cannot read here. If this is the 'primary'
638 * device, then we must not read beyond
639 * bad_sectors from another device..
640 */
641 bad_sectors -= (this_sector - first_bad);
642 if (choose_first && sectors > bad_sectors)
643 sectors = bad_sectors;
644 if (best_good_sectors > sectors)
645 best_good_sectors = sectors;
646
647 } else {
648 sector_t good_sectors = first_bad - this_sector;
649 if (good_sectors > best_good_sectors) {
650 best_good_sectors = good_sectors;
651 best_disk = disk;
652 }
653 if (choose_first)
654 break;
655 }
656 continue;
657 } else {
658 if ((sectors > best_good_sectors) && (best_disk >= 0))
659 best_disk = -1;
660 best_good_sectors = sectors;
661 }
662
663 if (best_disk >= 0)
664 /* At least two disks to choose from so failfast is OK */
665 set_bit(R1BIO_FailFast, &r1_bio->state);
666
667 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
668 has_nonrot_disk |= nonrot;
669 pending = atomic_read(&rdev->nr_pending);
670 dist = abs(this_sector - conf->mirrors[disk].head_position);
671 if (choose_first) {
672 best_disk = disk;
673 break;
674 }
675 /* Don't change to another disk for sequential reads */
676 if (conf->mirrors[disk].next_seq_sect == this_sector
677 || dist == 0) {
678 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
679 struct raid1_info *mirror = &conf->mirrors[disk];
680
681 best_disk = disk;
682 /*
683 * If buffered sequential IO size exceeds optimal
684 * iosize, check if there is idle disk. If yes, choose
685 * the idle disk. read_balance could already choose an
686 * idle disk before noticing it's a sequential IO in
687 * this disk. This doesn't matter because this disk
688 * will idle, next time it will be utilized after the
689 * first disk has IO size exceeds optimal iosize. In
690 * this way, iosize of the first disk will be optimal
691 * iosize at least. iosize of the second disk might be
692 * small, but not a big deal since when the second disk
693 * starts IO, the first disk is likely still busy.
694 */
695 if (nonrot && opt_iosize > 0 &&
696 mirror->seq_start != MaxSector &&
697 mirror->next_seq_sect > opt_iosize &&
698 mirror->next_seq_sect - opt_iosize >=
699 mirror->seq_start) {
700 choose_next_idle = 1;
701 continue;
702 }
703 break;
704 }
705
706 if (choose_next_idle)
707 continue;
708
709 if (min_pending > pending) {
710 min_pending = pending;
711 best_pending_disk = disk;
712 }
713
714 if (dist < best_dist) {
715 best_dist = dist;
716 best_dist_disk = disk;
717 }
718 }
719
720 /*
721 * If all disks are rotational, choose the closest disk. If any disk is
722 * non-rotational, choose the disk with less pending request even the
723 * disk is rotational, which might/might not be optimal for raids with
724 * mixed ratation/non-rotational disks depending on workload.
725 */
726 if (best_disk == -1) {
727 if (has_nonrot_disk || min_pending == 0)
728 best_disk = best_pending_disk;
729 else
730 best_disk = best_dist_disk;
731 }
732
733 if (best_disk >= 0) {
734 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
735 if (!rdev)
736 goto retry;
737 atomic_inc(&rdev->nr_pending);
738 sectors = best_good_sectors;
739
740 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
741 conf->mirrors[best_disk].seq_start = this_sector;
742
743 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
744 }
745 rcu_read_unlock();
746 *max_sectors = sectors;
747
748 return best_disk;
749}
750
751static int raid1_congested(struct mddev *mddev, int bits)
752{
753 struct r1conf *conf = mddev->private;
754 int i, ret = 0;
755
756 if ((bits & (1 << WB_async_congested)) &&
757 conf->pending_count >= max_queued_requests)
758 return 1;
759
760 rcu_read_lock();
761 for (i = 0; i < conf->raid_disks * 2; i++) {
762 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
763 if (rdev && !test_bit(Faulty, &rdev->flags)) {
764 struct request_queue *q = bdev_get_queue(rdev->bdev);
765
766 BUG_ON(!q);
767
768 /* Note the '|| 1' - when read_balance prefers
769 * non-congested targets, it can be removed
770 */
771 if ((bits & (1 << WB_async_congested)) || 1)
772 ret |= bdi_congested(q->backing_dev_info, bits);
773 else
774 ret &= bdi_congested(q->backing_dev_info, bits);
775 }
776 }
777 rcu_read_unlock();
778 return ret;
779}
780
781static void flush_bio_list(struct r1conf *conf, struct bio *bio)
782{
783 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
784 bitmap_unplug(conf->mddev->bitmap);
785 wake_up(&conf->wait_barrier);
786
787 while (bio) { /* submit pending writes */
788 struct bio *next = bio->bi_next;
789 struct md_rdev *rdev = (void *)bio->bi_disk;
790 bio->bi_next = NULL;
791 bio_set_dev(bio, rdev->bdev);
792 if (test_bit(Faulty, &rdev->flags)) {
793 bio_io_error(bio);
794 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
795 !blk_queue_discard(bio->bi_disk->queue)))
796 /* Just ignore it */
797 bio_endio(bio);
798 else
799 generic_make_request(bio);
800 bio = next;
801 }
802}
803
804static void flush_pending_writes(struct r1conf *conf)
805{
806 /* Any writes that have been queued but are awaiting
807 * bitmap updates get flushed here.
808 */
809 spin_lock_irq(&conf->device_lock);
810
811 if (conf->pending_bio_list.head) {
812 struct blk_plug plug;
813 struct bio *bio;
814
815 bio = bio_list_get(&conf->pending_bio_list);
816 conf->pending_count = 0;
817 spin_unlock_irq(&conf->device_lock);
818
819 /*
820 * As this is called in a wait_event() loop (see freeze_array),
821 * current->state might be TASK_UNINTERRUPTIBLE which will
822 * cause a warning when we prepare to wait again. As it is
823 * rare that this path is taken, it is perfectly safe to force
824 * us to go around the wait_event() loop again, so the warning
825 * is a false-positive. Silence the warning by resetting
826 * thread state
827 */
828 __set_current_state(TASK_RUNNING);
829 blk_start_plug(&plug);
830 flush_bio_list(conf, bio);
831 blk_finish_plug(&plug);
832 } else
833 spin_unlock_irq(&conf->device_lock);
834}
835
836/* Barriers....
837 * Sometimes we need to suspend IO while we do something else,
838 * either some resync/recovery, or reconfigure the array.
839 * To do this we raise a 'barrier'.
840 * The 'barrier' is a counter that can be raised multiple times
841 * to count how many activities are happening which preclude
842 * normal IO.
843 * We can only raise the barrier if there is no pending IO.
844 * i.e. if nr_pending == 0.
845 * We choose only to raise the barrier if no-one is waiting for the
846 * barrier to go down. This means that as soon as an IO request
847 * is ready, no other operations which require a barrier will start
848 * until the IO request has had a chance.
849 *
850 * So: regular IO calls 'wait_barrier'. When that returns there
851 * is no backgroup IO happening, It must arrange to call
852 * allow_barrier when it has finished its IO.
853 * backgroup IO calls must call raise_barrier. Once that returns
854 * there is no normal IO happeing. It must arrange to call
855 * lower_barrier when the particular background IO completes.
856 */
857static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
858{
859 int idx = sector_to_idx(sector_nr);
860
861 spin_lock_irq(&conf->resync_lock);
862
863 /* Wait until no block IO is waiting */
864 wait_event_lock_irq(conf->wait_barrier,
865 !atomic_read(&conf->nr_waiting[idx]),
866 conf->resync_lock);
867
868 /* block any new IO from starting */
869 atomic_inc(&conf->barrier[idx]);
870 /*
871 * In raise_barrier() we firstly increase conf->barrier[idx] then
872 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
873 * increase conf->nr_pending[idx] then check conf->barrier[idx].
874 * A memory barrier here to make sure conf->nr_pending[idx] won't
875 * be fetched before conf->barrier[idx] is increased. Otherwise
876 * there will be a race between raise_barrier() and _wait_barrier().
877 */
878 smp_mb__after_atomic();
879
880 /* For these conditions we must wait:
881 * A: while the array is in frozen state
882 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
883 * existing in corresponding I/O barrier bucket.
884 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
885 * max resync count which allowed on current I/O barrier bucket.
886 */
887 wait_event_lock_irq(conf->wait_barrier,
888 (!conf->array_frozen &&
889 !atomic_read(&conf->nr_pending[idx]) &&
890 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
891 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
892 conf->resync_lock);
893
894 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
895 atomic_dec(&conf->barrier[idx]);
896 spin_unlock_irq(&conf->resync_lock);
897 wake_up(&conf->wait_barrier);
898 return -EINTR;
899 }
900
901 atomic_inc(&conf->nr_sync_pending);
902 spin_unlock_irq(&conf->resync_lock);
903
904 return 0;
905}
906
907static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
908{
909 int idx = sector_to_idx(sector_nr);
910
911 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
912
913 atomic_dec(&conf->barrier[idx]);
914 atomic_dec(&conf->nr_sync_pending);
915 wake_up(&conf->wait_barrier);
916}
917
918static void _wait_barrier(struct r1conf *conf, int idx)
919{
920 /*
921 * We need to increase conf->nr_pending[idx] very early here,
922 * then raise_barrier() can be blocked when it waits for
923 * conf->nr_pending[idx] to be 0. Then we can avoid holding
924 * conf->resync_lock when there is no barrier raised in same
925 * barrier unit bucket. Also if the array is frozen, I/O
926 * should be blocked until array is unfrozen.
927 */
928 atomic_inc(&conf->nr_pending[idx]);
929 /*
930 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
931 * check conf->barrier[idx]. In raise_barrier() we firstly increase
932 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
933 * barrier is necessary here to make sure conf->barrier[idx] won't be
934 * fetched before conf->nr_pending[idx] is increased. Otherwise there
935 * will be a race between _wait_barrier() and raise_barrier().
936 */
937 smp_mb__after_atomic();
938
939 /*
940 * Don't worry about checking two atomic_t variables at same time
941 * here. If during we check conf->barrier[idx], the array is
942 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
943 * 0, it is safe to return and make the I/O continue. Because the
944 * array is frozen, all I/O returned here will eventually complete
945 * or be queued, no race will happen. See code comment in
946 * frozen_array().
947 */
948 if (!READ_ONCE(conf->array_frozen) &&
949 !atomic_read(&conf->barrier[idx]))
950 return;
951
952 /*
953 * After holding conf->resync_lock, conf->nr_pending[idx]
954 * should be decreased before waiting for barrier to drop.
955 * Otherwise, we may encounter a race condition because
956 * raise_barrer() might be waiting for conf->nr_pending[idx]
957 * to be 0 at same time.
958 */
959 spin_lock_irq(&conf->resync_lock);
960 atomic_inc(&conf->nr_waiting[idx]);
961 atomic_dec(&conf->nr_pending[idx]);
962 /*
963 * In case freeze_array() is waiting for
964 * get_unqueued_pending() == extra
965 */
966 wake_up(&conf->wait_barrier);
967 /* Wait for the barrier in same barrier unit bucket to drop. */
968 wait_event_lock_irq(conf->wait_barrier,
969 !conf->array_frozen &&
970 !atomic_read(&conf->barrier[idx]),
971 conf->resync_lock);
972 atomic_inc(&conf->nr_pending[idx]);
973 atomic_dec(&conf->nr_waiting[idx]);
974 spin_unlock_irq(&conf->resync_lock);
975}
976
977static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
978{
979 int idx = sector_to_idx(sector_nr);
980
981 /*
982 * Very similar to _wait_barrier(). The difference is, for read
983 * I/O we don't need wait for sync I/O, but if the whole array
984 * is frozen, the read I/O still has to wait until the array is
985 * unfrozen. Since there is no ordering requirement with
986 * conf->barrier[idx] here, memory barrier is unnecessary as well.
987 */
988 atomic_inc(&conf->nr_pending[idx]);
989
990 if (!READ_ONCE(conf->array_frozen))
991 return;
992
993 spin_lock_irq(&conf->resync_lock);
994 atomic_inc(&conf->nr_waiting[idx]);
995 atomic_dec(&conf->nr_pending[idx]);
996 /*
997 * In case freeze_array() is waiting for
998 * get_unqueued_pending() == extra
999 */
1000 wake_up(&conf->wait_barrier);
1001 /* Wait for array to be unfrozen */
1002 wait_event_lock_irq(conf->wait_barrier,
1003 !conf->array_frozen,
1004 conf->resync_lock);
1005 atomic_inc(&conf->nr_pending[idx]);
1006 atomic_dec(&conf->nr_waiting[idx]);
1007 spin_unlock_irq(&conf->resync_lock);
1008}
1009
1010static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1011{
1012 int idx = sector_to_idx(sector_nr);
1013
1014 _wait_barrier(conf, idx);
1015}
1016
1017static void _allow_barrier(struct r1conf *conf, int idx)
1018{
1019 atomic_dec(&conf->nr_pending[idx]);
1020 wake_up(&conf->wait_barrier);
1021}
1022
1023static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1024{
1025 int idx = sector_to_idx(sector_nr);
1026
1027 _allow_barrier(conf, idx);
1028}
1029
1030/* conf->resync_lock should be held */
1031static int get_unqueued_pending(struct r1conf *conf)
1032{
1033 int idx, ret;
1034
1035 ret = atomic_read(&conf->nr_sync_pending);
1036 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1037 ret += atomic_read(&conf->nr_pending[idx]) -
1038 atomic_read(&conf->nr_queued[idx]);
1039
1040 return ret;
1041}
1042
1043static void freeze_array(struct r1conf *conf, int extra)
1044{
1045 /* Stop sync I/O and normal I/O and wait for everything to
1046 * go quiet.
1047 * This is called in two situations:
1048 * 1) management command handlers (reshape, remove disk, quiesce).
1049 * 2) one normal I/O request failed.
1050
1051 * After array_frozen is set to 1, new sync IO will be blocked at
1052 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1053 * or wait_read_barrier(). The flying I/Os will either complete or be
1054 * queued. When everything goes quite, there are only queued I/Os left.
1055
1056 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1057 * barrier bucket index which this I/O request hits. When all sync and
1058 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1059 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1060 * in handle_read_error(), we may call freeze_array() before trying to
1061 * fix the read error. In this case, the error read I/O is not queued,
1062 * so get_unqueued_pending() == 1.
1063 *
1064 * Therefore before this function returns, we need to wait until
1065 * get_unqueued_pendings(conf) gets equal to extra. For
1066 * normal I/O context, extra is 1, in rested situations extra is 0.
1067 */
1068 spin_lock_irq(&conf->resync_lock);
1069 conf->array_frozen = 1;
1070 raid1_log(conf->mddev, "wait freeze");
1071 wait_event_lock_irq_cmd(
1072 conf->wait_barrier,
1073 get_unqueued_pending(conf) == extra,
1074 conf->resync_lock,
1075 flush_pending_writes(conf));
1076 spin_unlock_irq(&conf->resync_lock);
1077}
1078static void unfreeze_array(struct r1conf *conf)
1079{
1080 /* reverse the effect of the freeze */
1081 spin_lock_irq(&conf->resync_lock);
1082 conf->array_frozen = 0;
1083 spin_unlock_irq(&conf->resync_lock);
1084 wake_up(&conf->wait_barrier);
1085}
1086
1087static void alloc_behind_master_bio(struct r1bio *r1_bio,
1088 struct bio *bio)
1089{
1090 int size = bio->bi_iter.bi_size;
1091 unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1092 int i = 0;
1093 struct bio *behind_bio = NULL;
1094
1095 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1096 if (!behind_bio)
1097 return;
1098
1099 /* discard op, we don't support writezero/writesame yet */
1100 if (!bio_has_data(bio)) {
1101 behind_bio->bi_iter.bi_size = size;
1102 goto skip_copy;
1103 }
1104
1105 behind_bio->bi_write_hint = bio->bi_write_hint;
1106
1107 while (i < vcnt && size) {
1108 struct page *page;
1109 int len = min_t(int, PAGE_SIZE, size);
1110
1111 page = alloc_page(GFP_NOIO);
1112 if (unlikely(!page))
1113 goto free_pages;
1114
1115 bio_add_page(behind_bio, page, len, 0);
1116
1117 size -= len;
1118 i++;
1119 }
1120
1121 bio_copy_data(behind_bio, bio);
1122skip_copy:
1123 r1_bio->behind_master_bio = behind_bio;
1124 set_bit(R1BIO_BehindIO, &r1_bio->state);
1125
1126 return;
1127
1128free_pages:
1129 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1130 bio->bi_iter.bi_size);
1131 bio_free_pages(behind_bio);
1132 bio_put(behind_bio);
1133}
1134
1135struct raid1_plug_cb {
1136 struct blk_plug_cb cb;
1137 struct bio_list pending;
1138 int pending_cnt;
1139};
1140
1141static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1142{
1143 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1144 cb);
1145 struct mddev *mddev = plug->cb.data;
1146 struct r1conf *conf = mddev->private;
1147 struct bio *bio;
1148
1149 if (from_schedule || current->bio_list) {
1150 spin_lock_irq(&conf->device_lock);
1151 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1152 conf->pending_count += plug->pending_cnt;
1153 spin_unlock_irq(&conf->device_lock);
1154 wake_up(&conf->wait_barrier);
1155 md_wakeup_thread(mddev->thread);
1156 kfree(plug);
1157 return;
1158 }
1159
1160 /* we aren't scheduling, so we can do the write-out directly. */
1161 bio = bio_list_get(&plug->pending);
1162 flush_bio_list(conf, bio);
1163 kfree(plug);
1164}
1165
1166static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1167{
1168 r1_bio->master_bio = bio;
1169 r1_bio->sectors = bio_sectors(bio);
1170 r1_bio->state = 0;
1171 r1_bio->mddev = mddev;
1172 r1_bio->sector = bio->bi_iter.bi_sector;
1173}
1174
1175static inline struct r1bio *
1176alloc_r1bio(struct mddev *mddev, struct bio *bio)
1177{
1178 struct r1conf *conf = mddev->private;
1179 struct r1bio *r1_bio;
1180
1181 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1182 /* Ensure no bio records IO_BLOCKED */
1183 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1184 init_r1bio(r1_bio, mddev, bio);
1185 return r1_bio;
1186}
1187
1188static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1189 int max_read_sectors, struct r1bio *r1_bio)
1190{
1191 struct r1conf *conf = mddev->private;
1192 struct raid1_info *mirror;
1193 struct bio *read_bio;
1194 struct bitmap *bitmap = mddev->bitmap;
1195 const int op = bio_op(bio);
1196 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1197 int max_sectors;
1198 int rdisk;
1199 bool print_msg = !!r1_bio;
1200 char b[BDEVNAME_SIZE];
1201
1202 /*
1203 * If r1_bio is set, we are blocking the raid1d thread
1204 * so there is a tiny risk of deadlock. So ask for
1205 * emergency memory if needed.
1206 */
1207 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1208
1209 if (print_msg) {
1210 /* Need to get the block device name carefully */
1211 struct md_rdev *rdev;
1212 rcu_read_lock();
1213 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1214 if (rdev)
1215 bdevname(rdev->bdev, b);
1216 else
1217 strcpy(b, "???");
1218 rcu_read_unlock();
1219 }
1220
1221 /*
1222 * Still need barrier for READ in case that whole
1223 * array is frozen.
1224 */
1225 wait_read_barrier(conf, bio->bi_iter.bi_sector);
1226
1227 if (!r1_bio)
1228 r1_bio = alloc_r1bio(mddev, bio);
1229 else
1230 init_r1bio(r1_bio, mddev, bio);
1231 r1_bio->sectors = max_read_sectors;
1232
1233 /*
1234 * make_request() can abort the operation when read-ahead is being
1235 * used and no empty request is available.
1236 */
1237 rdisk = read_balance(conf, r1_bio, &max_sectors);
1238
1239 if (rdisk < 0) {
1240 /* couldn't find anywhere to read from */
1241 if (print_msg) {
1242 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1243 mdname(mddev),
1244 b,
1245 (unsigned long long)r1_bio->sector);
1246 }
1247 raid_end_bio_io(r1_bio);
1248 return;
1249 }
1250 mirror = conf->mirrors + rdisk;
1251
1252 if (print_msg)
1253 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1254 mdname(mddev),
1255 (unsigned long long)r1_bio->sector,
1256 bdevname(mirror->rdev->bdev, b));
1257
1258 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1259 bitmap) {
1260 /*
1261 * Reading from a write-mostly device must take care not to
1262 * over-take any writes that are 'behind'
1263 */
1264 raid1_log(mddev, "wait behind writes");
1265 wait_event(bitmap->behind_wait,
1266 atomic_read(&bitmap->behind_writes) == 0);
1267 }
1268
1269 if (max_sectors < bio_sectors(bio)) {
1270 struct bio *split = bio_split(bio, max_sectors,
1271 gfp, conf->bio_split);
1272 bio_chain(split, bio);
1273 generic_make_request(bio);
1274 bio = split;
1275 r1_bio->master_bio = bio;
1276 r1_bio->sectors = max_sectors;
1277 }
1278
1279 r1_bio->read_disk = rdisk;
1280
1281 read_bio = bio_clone_fast(bio, gfp, mddev->bio_set);
1282
1283 r1_bio->bios[rdisk] = read_bio;
1284
1285 read_bio->bi_iter.bi_sector = r1_bio->sector +
1286 mirror->rdev->data_offset;
1287 bio_set_dev(read_bio, mirror->rdev->bdev);
1288 read_bio->bi_end_io = raid1_end_read_request;
1289 bio_set_op_attrs(read_bio, op, do_sync);
1290 if (test_bit(FailFast, &mirror->rdev->flags) &&
1291 test_bit(R1BIO_FailFast, &r1_bio->state))
1292 read_bio->bi_opf |= MD_FAILFAST;
1293 read_bio->bi_private = r1_bio;
1294
1295 if (mddev->gendisk)
1296 trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
1297 disk_devt(mddev->gendisk), r1_bio->sector);
1298
1299 generic_make_request(read_bio);
1300}
1301
1302static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1303 int max_write_sectors)
1304{
1305 struct r1conf *conf = mddev->private;
1306 struct r1bio *r1_bio;
1307 int i, disks;
1308 struct bitmap *bitmap = mddev->bitmap;
1309 unsigned long flags;
1310 struct md_rdev *blocked_rdev;
1311 struct blk_plug_cb *cb;
1312 struct raid1_plug_cb *plug = NULL;
1313 int first_clone;
1314 int max_sectors;
1315
1316 if (mddev_is_clustered(mddev) &&
1317 md_cluster_ops->area_resyncing(mddev, WRITE,
1318 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1319
1320 DEFINE_WAIT(w);
1321 for (;;) {
1322 prepare_to_wait(&conf->wait_barrier,
1323 &w, TASK_IDLE);
1324 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1325 bio->bi_iter.bi_sector,
1326 bio_end_sector(bio)))
1327 break;
1328 schedule();
1329 }
1330 finish_wait(&conf->wait_barrier, &w);
1331 }
1332
1333 /*
1334 * Register the new request and wait if the reconstruction
1335 * thread has put up a bar for new requests.
1336 * Continue immediately if no resync is active currently.
1337 */
1338 wait_barrier(conf, bio->bi_iter.bi_sector);
1339
1340 r1_bio = alloc_r1bio(mddev, bio);
1341 r1_bio->sectors = max_write_sectors;
1342
1343 if (conf->pending_count >= max_queued_requests) {
1344 md_wakeup_thread(mddev->thread);
1345 raid1_log(mddev, "wait queued");
1346 wait_event(conf->wait_barrier,
1347 conf->pending_count < max_queued_requests);
1348 }
1349 /* first select target devices under rcu_lock and
1350 * inc refcount on their rdev. Record them by setting
1351 * bios[x] to bio
1352 * If there are known/acknowledged bad blocks on any device on
1353 * which we have seen a write error, we want to avoid writing those
1354 * blocks.
1355 * This potentially requires several writes to write around
1356 * the bad blocks. Each set of writes gets it's own r1bio
1357 * with a set of bios attached.
1358 */
1359
1360 disks = conf->raid_disks * 2;
1361 retry_write:
1362 blocked_rdev = NULL;
1363 rcu_read_lock();
1364 max_sectors = r1_bio->sectors;
1365 for (i = 0; i < disks; i++) {
1366 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1367 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1368 atomic_inc(&rdev->nr_pending);
1369 blocked_rdev = rdev;
1370 break;
1371 }
1372 r1_bio->bios[i] = NULL;
1373 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1374 if (i < conf->raid_disks)
1375 set_bit(R1BIO_Degraded, &r1_bio->state);
1376 continue;
1377 }
1378
1379 atomic_inc(&rdev->nr_pending);
1380 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1381 sector_t first_bad;
1382 int bad_sectors;
1383 int is_bad;
1384
1385 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1386 &first_bad, &bad_sectors);
1387 if (is_bad < 0) {
1388 /* mustn't write here until the bad block is
1389 * acknowledged*/
1390 set_bit(BlockedBadBlocks, &rdev->flags);
1391 blocked_rdev = rdev;
1392 break;
1393 }
1394 if (is_bad && first_bad <= r1_bio->sector) {
1395 /* Cannot write here at all */
1396 bad_sectors -= (r1_bio->sector - first_bad);
1397 if (bad_sectors < max_sectors)
1398 /* mustn't write more than bad_sectors
1399 * to other devices yet
1400 */
1401 max_sectors = bad_sectors;
1402 rdev_dec_pending(rdev, mddev);
1403 /* We don't set R1BIO_Degraded as that
1404 * only applies if the disk is
1405 * missing, so it might be re-added,
1406 * and we want to know to recover this
1407 * chunk.
1408 * In this case the device is here,
1409 * and the fact that this chunk is not
1410 * in-sync is recorded in the bad
1411 * block log
1412 */
1413 continue;
1414 }
1415 if (is_bad) {
1416 int good_sectors = first_bad - r1_bio->sector;
1417 if (good_sectors < max_sectors)
1418 max_sectors = good_sectors;
1419 }
1420 }
1421 r1_bio->bios[i] = bio;
1422 }
1423 rcu_read_unlock();
1424
1425 if (unlikely(blocked_rdev)) {
1426 /* Wait for this device to become unblocked */
1427 int j;
1428
1429 for (j = 0; j < i; j++)
1430 if (r1_bio->bios[j])
1431 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1432 r1_bio->state = 0;
1433 allow_barrier(conf, bio->bi_iter.bi_sector);
1434 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1435 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1436 wait_barrier(conf, bio->bi_iter.bi_sector);
1437 goto retry_write;
1438 }
1439
1440 if (max_sectors < bio_sectors(bio)) {
1441 struct bio *split = bio_split(bio, max_sectors,
1442 GFP_NOIO, conf->bio_split);
1443 bio_chain(split, bio);
1444 generic_make_request(bio);
1445 bio = split;
1446 r1_bio->master_bio = bio;
1447 r1_bio->sectors = max_sectors;
1448 }
1449
1450 atomic_set(&r1_bio->remaining, 1);
1451 atomic_set(&r1_bio->behind_remaining, 0);
1452
1453 first_clone = 1;
1454
1455 for (i = 0; i < disks; i++) {
1456 struct bio *mbio = NULL;
1457 if (!r1_bio->bios[i])
1458 continue;
1459
1460
1461 if (first_clone) {
1462 /* do behind I/O ?
1463 * Not if there are too many, or cannot
1464 * allocate memory, or a reader on WriteMostly
1465 * is waiting for behind writes to flush */
1466 if (bitmap &&
1467 (atomic_read(&bitmap->behind_writes)
1468 < mddev->bitmap_info.max_write_behind) &&
1469 !waitqueue_active(&bitmap->behind_wait)) {
1470 alloc_behind_master_bio(r1_bio, bio);
1471 }
1472
1473 bitmap_startwrite(bitmap, r1_bio->sector,
1474 r1_bio->sectors,
1475 test_bit(R1BIO_BehindIO,
1476 &r1_bio->state));
1477 first_clone = 0;
1478 }
1479
1480 if (r1_bio->behind_master_bio)
1481 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1482 GFP_NOIO, mddev->bio_set);
1483 else
1484 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1485
1486 if (r1_bio->behind_master_bio) {
1487 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1488 atomic_inc(&r1_bio->behind_remaining);
1489 }
1490
1491 r1_bio->bios[i] = mbio;
1492
1493 mbio->bi_iter.bi_sector = (r1_bio->sector +
1494 conf->mirrors[i].rdev->data_offset);
1495 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1496 mbio->bi_end_io = raid1_end_write_request;
1497 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1498 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1499 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1500 conf->raid_disks - mddev->degraded > 1)
1501 mbio->bi_opf |= MD_FAILFAST;
1502 mbio->bi_private = r1_bio;
1503
1504 atomic_inc(&r1_bio->remaining);
1505
1506 if (mddev->gendisk)
1507 trace_block_bio_remap(mbio->bi_disk->queue,
1508 mbio, disk_devt(mddev->gendisk),
1509 r1_bio->sector);
1510 /* flush_pending_writes() needs access to the rdev so...*/
1511 mbio->bi_disk = (void *)conf->mirrors[i].rdev;
1512
1513 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1514 if (cb)
1515 plug = container_of(cb, struct raid1_plug_cb, cb);
1516 else
1517 plug = NULL;
1518 if (plug) {
1519 bio_list_add(&plug->pending, mbio);
1520 plug->pending_cnt++;
1521 } else {
1522 spin_lock_irqsave(&conf->device_lock, flags);
1523 bio_list_add(&conf->pending_bio_list, mbio);
1524 conf->pending_count++;
1525 spin_unlock_irqrestore(&conf->device_lock, flags);
1526 md_wakeup_thread(mddev->thread);
1527 }
1528 }
1529
1530 r1_bio_write_done(r1_bio);
1531
1532 /* In case raid1d snuck in to freeze_array */
1533 wake_up(&conf->wait_barrier);
1534}
1535
1536static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1537{
1538 sector_t sectors;
1539
1540 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1541 md_flush_request(mddev, bio);
1542 return true;
1543 }
1544
1545 /*
1546 * There is a limit to the maximum size, but
1547 * the read/write handler might find a lower limit
1548 * due to bad blocks. To avoid multiple splits,
1549 * we pass the maximum number of sectors down
1550 * and let the lower level perform the split.
1551 */
1552 sectors = align_to_barrier_unit_end(
1553 bio->bi_iter.bi_sector, bio_sectors(bio));
1554
1555 if (bio_data_dir(bio) == READ)
1556 raid1_read_request(mddev, bio, sectors, NULL);
1557 else {
1558 if (!md_write_start(mddev,bio))
1559 return false;
1560 raid1_write_request(mddev, bio, sectors);
1561 }
1562 return true;
1563}
1564
1565static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1566{
1567 struct r1conf *conf = mddev->private;
1568 int i;
1569
1570 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1571 conf->raid_disks - mddev->degraded);
1572 rcu_read_lock();
1573 for (i = 0; i < conf->raid_disks; i++) {
1574 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1575 seq_printf(seq, "%s",
1576 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1577 }
1578 rcu_read_unlock();
1579 seq_printf(seq, "]");
1580}
1581
1582static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1583{
1584 char b[BDEVNAME_SIZE];
1585 struct r1conf *conf = mddev->private;
1586 unsigned long flags;
1587
1588 /*
1589 * If it is not operational, then we have already marked it as dead
1590 * else if it is the last working disks, ignore the error, let the
1591 * next level up know.
1592 * else mark the drive as failed
1593 */
1594 spin_lock_irqsave(&conf->device_lock, flags);
1595 if (test_bit(In_sync, &rdev->flags)
1596 && (conf->raid_disks - mddev->degraded) == 1) {
1597 /*
1598 * Don't fail the drive, act as though we were just a
1599 * normal single drive.
1600 * However don't try a recovery from this drive as
1601 * it is very likely to fail.
1602 */
1603 conf->recovery_disabled = mddev->recovery_disabled;
1604 spin_unlock_irqrestore(&conf->device_lock, flags);
1605 return;
1606 }
1607 set_bit(Blocked, &rdev->flags);
1608 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1609 mddev->degraded++;
1610 set_bit(Faulty, &rdev->flags);
1611 } else
1612 set_bit(Faulty, &rdev->flags);
1613 spin_unlock_irqrestore(&conf->device_lock, flags);
1614 /*
1615 * if recovery is running, make sure it aborts.
1616 */
1617 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1618 set_mask_bits(&mddev->sb_flags, 0,
1619 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1620 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1621 "md/raid1:%s: Operation continuing on %d devices.\n",
1622 mdname(mddev), bdevname(rdev->bdev, b),
1623 mdname(mddev), conf->raid_disks - mddev->degraded);
1624}
1625
1626static void print_conf(struct r1conf *conf)
1627{
1628 int i;
1629
1630 pr_debug("RAID1 conf printout:\n");
1631 if (!conf) {
1632 pr_debug("(!conf)\n");
1633 return;
1634 }
1635 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1636 conf->raid_disks);
1637
1638 rcu_read_lock();
1639 for (i = 0; i < conf->raid_disks; i++) {
1640 char b[BDEVNAME_SIZE];
1641 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1642 if (rdev)
1643 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1644 i, !test_bit(In_sync, &rdev->flags),
1645 !test_bit(Faulty, &rdev->flags),
1646 bdevname(rdev->bdev,b));
1647 }
1648 rcu_read_unlock();
1649}
1650
1651static void close_sync(struct r1conf *conf)
1652{
1653 int idx;
1654
1655 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1656 _wait_barrier(conf, idx);
1657 _allow_barrier(conf, idx);
1658 }
1659
1660 mempool_destroy(conf->r1buf_pool);
1661 conf->r1buf_pool = NULL;
1662}
1663
1664static int raid1_spare_active(struct mddev *mddev)
1665{
1666 int i;
1667 struct r1conf *conf = mddev->private;
1668 int count = 0;
1669 unsigned long flags;
1670
1671 /*
1672 * Find all failed disks within the RAID1 configuration
1673 * and mark them readable.
1674 * Called under mddev lock, so rcu protection not needed.
1675 * device_lock used to avoid races with raid1_end_read_request
1676 * which expects 'In_sync' flags and ->degraded to be consistent.
1677 */
1678 spin_lock_irqsave(&conf->device_lock, flags);
1679 for (i = 0; i < conf->raid_disks; i++) {
1680 struct md_rdev *rdev = conf->mirrors[i].rdev;
1681 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1682 if (repl
1683 && !test_bit(Candidate, &repl->flags)
1684 && repl->recovery_offset == MaxSector
1685 && !test_bit(Faulty, &repl->flags)
1686 && !test_and_set_bit(In_sync, &repl->flags)) {
1687 /* replacement has just become active */
1688 if (!rdev ||
1689 !test_and_clear_bit(In_sync, &rdev->flags))
1690 count++;
1691 if (rdev) {
1692 /* Replaced device not technically
1693 * faulty, but we need to be sure
1694 * it gets removed and never re-added
1695 */
1696 set_bit(Faulty, &rdev->flags);
1697 sysfs_notify_dirent_safe(
1698 rdev->sysfs_state);
1699 }
1700 }
1701 if (rdev
1702 && rdev->recovery_offset == MaxSector
1703 && !test_bit(Faulty, &rdev->flags)
1704 && !test_and_set_bit(In_sync, &rdev->flags)) {
1705 count++;
1706 sysfs_notify_dirent_safe(rdev->sysfs_state);
1707 }
1708 }
1709 mddev->degraded -= count;
1710 spin_unlock_irqrestore(&conf->device_lock, flags);
1711
1712 print_conf(conf);
1713 return count;
1714}
1715
1716static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1717{
1718 struct r1conf *conf = mddev->private;
1719 int err = -EEXIST;
1720 int mirror = 0;
1721 struct raid1_info *p;
1722 int first = 0;
1723 int last = conf->raid_disks - 1;
1724
1725 if (mddev->recovery_disabled == conf->recovery_disabled)
1726 return -EBUSY;
1727
1728 if (md_integrity_add_rdev(rdev, mddev))
1729 return -ENXIO;
1730
1731 if (rdev->raid_disk >= 0)
1732 first = last = rdev->raid_disk;
1733
1734 /*
1735 * find the disk ... but prefer rdev->saved_raid_disk
1736 * if possible.
1737 */
1738 if (rdev->saved_raid_disk >= 0 &&
1739 rdev->saved_raid_disk >= first &&
1740 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1741 first = last = rdev->saved_raid_disk;
1742
1743 for (mirror = first; mirror <= last; mirror++) {
1744 p = conf->mirrors+mirror;
1745 if (!p->rdev) {
1746
1747 if (mddev->gendisk)
1748 disk_stack_limits(mddev->gendisk, rdev->bdev,
1749 rdev->data_offset << 9);
1750
1751 p->head_position = 0;
1752 rdev->raid_disk = mirror;
1753 err = 0;
1754 /* As all devices are equivalent, we don't need a full recovery
1755 * if this was recently any drive of the array
1756 */
1757 if (rdev->saved_raid_disk < 0)
1758 conf->fullsync = 1;
1759 rcu_assign_pointer(p->rdev, rdev);
1760 break;
1761 }
1762 if (test_bit(WantReplacement, &p->rdev->flags) &&
1763 p[conf->raid_disks].rdev == NULL) {
1764 /* Add this device as a replacement */
1765 clear_bit(In_sync, &rdev->flags);
1766 set_bit(Replacement, &rdev->flags);
1767 rdev->raid_disk = mirror;
1768 err = 0;
1769 conf->fullsync = 1;
1770 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1771 break;
1772 }
1773 }
1774 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1775 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1776 print_conf(conf);
1777 return err;
1778}
1779
1780static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1781{
1782 struct r1conf *conf = mddev->private;
1783 int err = 0;
1784 int number = rdev->raid_disk;
1785 struct raid1_info *p = conf->mirrors + number;
1786
1787 if (rdev != p->rdev)
1788 p = conf->mirrors + conf->raid_disks + number;
1789
1790 print_conf(conf);
1791 if (rdev == p->rdev) {
1792 if (test_bit(In_sync, &rdev->flags) ||
1793 atomic_read(&rdev->nr_pending)) {
1794 err = -EBUSY;
1795 goto abort;
1796 }
1797 /* Only remove non-faulty devices if recovery
1798 * is not possible.
1799 */
1800 if (!test_bit(Faulty, &rdev->flags) &&
1801 mddev->recovery_disabled != conf->recovery_disabled &&
1802 mddev->degraded < conf->raid_disks) {
1803 err = -EBUSY;
1804 goto abort;
1805 }
1806 p->rdev = NULL;
1807 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1808 synchronize_rcu();
1809 if (atomic_read(&rdev->nr_pending)) {
1810 /* lost the race, try later */
1811 err = -EBUSY;
1812 p->rdev = rdev;
1813 goto abort;
1814 }
1815 }
1816 if (conf->mirrors[conf->raid_disks + number].rdev) {
1817 /* We just removed a device that is being replaced.
1818 * Move down the replacement. We drain all IO before
1819 * doing this to avoid confusion.
1820 */
1821 struct md_rdev *repl =
1822 conf->mirrors[conf->raid_disks + number].rdev;
1823 freeze_array(conf, 0);
1824 if (atomic_read(&repl->nr_pending)) {
1825 /* It means that some queued IO of retry_list
1826 * hold repl. Thus, we cannot set replacement
1827 * as NULL, avoiding rdev NULL pointer
1828 * dereference in sync_request_write and
1829 * handle_write_finished.
1830 */
1831 err = -EBUSY;
1832 unfreeze_array(conf);
1833 goto abort;
1834 }
1835 clear_bit(Replacement, &repl->flags);
1836 p->rdev = repl;
1837 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1838 unfreeze_array(conf);
1839 }
1840
1841 clear_bit(WantReplacement, &rdev->flags);
1842 err = md_integrity_register(mddev);
1843 }
1844abort:
1845
1846 print_conf(conf);
1847 return err;
1848}
1849
1850static void end_sync_read(struct bio *bio)
1851{
1852 struct r1bio *r1_bio = get_resync_r1bio(bio);
1853
1854 update_head_pos(r1_bio->read_disk, r1_bio);
1855
1856 /*
1857 * we have read a block, now it needs to be re-written,
1858 * or re-read if the read failed.
1859 * We don't do much here, just schedule handling by raid1d
1860 */
1861 if (!bio->bi_status)
1862 set_bit(R1BIO_Uptodate, &r1_bio->state);
1863
1864 if (atomic_dec_and_test(&r1_bio->remaining))
1865 reschedule_retry(r1_bio);
1866}
1867
1868static void end_sync_write(struct bio *bio)
1869{
1870 int uptodate = !bio->bi_status;
1871 struct r1bio *r1_bio = get_resync_r1bio(bio);
1872 struct mddev *mddev = r1_bio->mddev;
1873 struct r1conf *conf = mddev->private;
1874 sector_t first_bad;
1875 int bad_sectors;
1876 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1877
1878 if (!uptodate) {
1879 sector_t sync_blocks = 0;
1880 sector_t s = r1_bio->sector;
1881 long sectors_to_go = r1_bio->sectors;
1882 /* make sure these bits doesn't get cleared. */
1883 do {
1884 bitmap_end_sync(mddev->bitmap, s,
1885 &sync_blocks, 1);
1886 s += sync_blocks;
1887 sectors_to_go -= sync_blocks;
1888 } while (sectors_to_go > 0);
1889 set_bit(WriteErrorSeen, &rdev->flags);
1890 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1891 set_bit(MD_RECOVERY_NEEDED, &
1892 mddev->recovery);
1893 set_bit(R1BIO_WriteError, &r1_bio->state);
1894 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1895 &first_bad, &bad_sectors) &&
1896 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1897 r1_bio->sector,
1898 r1_bio->sectors,
1899 &first_bad, &bad_sectors)
1900 )
1901 set_bit(R1BIO_MadeGood, &r1_bio->state);
1902
1903 if (atomic_dec_and_test(&r1_bio->remaining)) {
1904 int s = r1_bio->sectors;
1905 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1906 test_bit(R1BIO_WriteError, &r1_bio->state))
1907 reschedule_retry(r1_bio);
1908 else {
1909 put_buf(r1_bio);
1910 md_done_sync(mddev, s, uptodate);
1911 }
1912 }
1913}
1914
1915static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1916 int sectors, struct page *page, int rw)
1917{
1918 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1919 /* success */
1920 return 1;
1921 if (rw == WRITE) {
1922 set_bit(WriteErrorSeen, &rdev->flags);
1923 if (!test_and_set_bit(WantReplacement,
1924 &rdev->flags))
1925 set_bit(MD_RECOVERY_NEEDED, &
1926 rdev->mddev->recovery);
1927 }
1928 /* need to record an error - either for the block or the device */
1929 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1930 md_error(rdev->mddev, rdev);
1931 return 0;
1932}
1933
1934static int fix_sync_read_error(struct r1bio *r1_bio)
1935{
1936 /* Try some synchronous reads of other devices to get
1937 * good data, much like with normal read errors. Only
1938 * read into the pages we already have so we don't
1939 * need to re-issue the read request.
1940 * We don't need to freeze the array, because being in an
1941 * active sync request, there is no normal IO, and
1942 * no overlapping syncs.
1943 * We don't need to check is_badblock() again as we
1944 * made sure that anything with a bad block in range
1945 * will have bi_end_io clear.
1946 */
1947 struct mddev *mddev = r1_bio->mddev;
1948 struct r1conf *conf = mddev->private;
1949 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1950 struct page **pages = get_resync_pages(bio)->pages;
1951 sector_t sect = r1_bio->sector;
1952 int sectors = r1_bio->sectors;
1953 int idx = 0;
1954 struct md_rdev *rdev;
1955
1956 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1957 if (test_bit(FailFast, &rdev->flags)) {
1958 /* Don't try recovering from here - just fail it
1959 * ... unless it is the last working device of course */
1960 md_error(mddev, rdev);
1961 if (test_bit(Faulty, &rdev->flags))
1962 /* Don't try to read from here, but make sure
1963 * put_buf does it's thing
1964 */
1965 bio->bi_end_io = end_sync_write;
1966 }
1967
1968 while(sectors) {
1969 int s = sectors;
1970 int d = r1_bio->read_disk;
1971 int success = 0;
1972 int start;
1973
1974 if (s > (PAGE_SIZE>>9))
1975 s = PAGE_SIZE >> 9;
1976 do {
1977 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1978 /* No rcu protection needed here devices
1979 * can only be removed when no resync is
1980 * active, and resync is currently active
1981 */
1982 rdev = conf->mirrors[d].rdev;
1983 if (sync_page_io(rdev, sect, s<<9,
1984 pages[idx],
1985 REQ_OP_READ, 0, false)) {
1986 success = 1;
1987 break;
1988 }
1989 }
1990 d++;
1991 if (d == conf->raid_disks * 2)
1992 d = 0;
1993 } while (!success && d != r1_bio->read_disk);
1994
1995 if (!success) {
1996 char b[BDEVNAME_SIZE];
1997 int abort = 0;
1998 /* Cannot read from anywhere, this block is lost.
1999 * Record a bad block on each device. If that doesn't
2000 * work just disable and interrupt the recovery.
2001 * Don't fail devices as that won't really help.
2002 */
2003 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2004 mdname(mddev), bio_devname(bio, b),
2005 (unsigned long long)r1_bio->sector);
2006 for (d = 0; d < conf->raid_disks * 2; d++) {
2007 rdev = conf->mirrors[d].rdev;
2008 if (!rdev || test_bit(Faulty, &rdev->flags))
2009 continue;
2010 if (!rdev_set_badblocks(rdev, sect, s, 0))
2011 abort = 1;
2012 }
2013 if (abort) {
2014 conf->recovery_disabled =
2015 mddev->recovery_disabled;
2016 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2017 md_done_sync(mddev, r1_bio->sectors, 0);
2018 put_buf(r1_bio);
2019 return 0;
2020 }
2021 /* Try next page */
2022 sectors -= s;
2023 sect += s;
2024 idx++;
2025 continue;
2026 }
2027
2028 start = d;
2029 /* write it back and re-read */
2030 while (d != r1_bio->read_disk) {
2031 if (d == 0)
2032 d = conf->raid_disks * 2;
2033 d--;
2034 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2035 continue;
2036 rdev = conf->mirrors[d].rdev;
2037 if (r1_sync_page_io(rdev, sect, s,
2038 pages[idx],
2039 WRITE) == 0) {
2040 r1_bio->bios[d]->bi_end_io = NULL;
2041 rdev_dec_pending(rdev, mddev);
2042 }
2043 }
2044 d = start;
2045 while (d != r1_bio->read_disk) {
2046 if (d == 0)
2047 d = conf->raid_disks * 2;
2048 d--;
2049 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2050 continue;
2051 rdev = conf->mirrors[d].rdev;
2052 if (r1_sync_page_io(rdev, sect, s,
2053 pages[idx],
2054 READ) != 0)
2055 atomic_add(s, &rdev->corrected_errors);
2056 }
2057 sectors -= s;
2058 sect += s;
2059 idx ++;
2060 }
2061 set_bit(R1BIO_Uptodate, &r1_bio->state);
2062 bio->bi_status = 0;
2063 return 1;
2064}
2065
2066static void process_checks(struct r1bio *r1_bio)
2067{
2068 /* We have read all readable devices. If we haven't
2069 * got the block, then there is no hope left.
2070 * If we have, then we want to do a comparison
2071 * and skip the write if everything is the same.
2072 * If any blocks failed to read, then we need to
2073 * attempt an over-write
2074 */
2075 struct mddev *mddev = r1_bio->mddev;
2076 struct r1conf *conf = mddev->private;
2077 int primary;
2078 int i;
2079 int vcnt;
2080
2081 /* Fix variable parts of all bios */
2082 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2083 for (i = 0; i < conf->raid_disks * 2; i++) {
2084 blk_status_t status;
2085 struct bio *b = r1_bio->bios[i];
2086 struct resync_pages *rp = get_resync_pages(b);
2087 if (b->bi_end_io != end_sync_read)
2088 continue;
2089 /* fixup the bio for reuse, but preserve errno */
2090 status = b->bi_status;
2091 bio_reset(b);
2092 b->bi_status = status;
2093 b->bi_iter.bi_sector = r1_bio->sector +
2094 conf->mirrors[i].rdev->data_offset;
2095 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2096 b->bi_end_io = end_sync_read;
2097 rp->raid_bio = r1_bio;
2098 b->bi_private = rp;
2099
2100 /* initialize bvec table again */
2101 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2102 }
2103 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2104 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2105 !r1_bio->bios[primary]->bi_status) {
2106 r1_bio->bios[primary]->bi_end_io = NULL;
2107 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2108 break;
2109 }
2110 r1_bio->read_disk = primary;
2111 for (i = 0; i < conf->raid_disks * 2; i++) {
2112 int j;
2113 struct bio *pbio = r1_bio->bios[primary];
2114 struct bio *sbio = r1_bio->bios[i];
2115 blk_status_t status = sbio->bi_status;
2116 struct page **ppages = get_resync_pages(pbio)->pages;
2117 struct page **spages = get_resync_pages(sbio)->pages;
2118 struct bio_vec *bi;
2119 int page_len[RESYNC_PAGES] = { 0 };
2120
2121 if (sbio->bi_end_io != end_sync_read)
2122 continue;
2123 /* Now we can 'fixup' the error value */
2124 sbio->bi_status = 0;
2125
2126 bio_for_each_segment_all(bi, sbio, j)
2127 page_len[j] = bi->bv_len;
2128
2129 if (!status) {
2130 for (j = vcnt; j-- ; ) {
2131 if (memcmp(page_address(ppages[j]),
2132 page_address(spages[j]),
2133 page_len[j]))
2134 break;
2135 }
2136 } else
2137 j = 0;
2138 if (j >= 0)
2139 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2140 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2141 && !status)) {
2142 /* No need to write to this device. */
2143 sbio->bi_end_io = NULL;
2144 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2145 continue;
2146 }
2147
2148 bio_copy_data(sbio, pbio);
2149 }
2150}
2151
2152static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2153{
2154 struct r1conf *conf = mddev->private;
2155 int i;
2156 int disks = conf->raid_disks * 2;
2157 struct bio *wbio;
2158
2159 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2160 /* ouch - failed to read all of that. */
2161 if (!fix_sync_read_error(r1_bio))
2162 return;
2163
2164 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2165 process_checks(r1_bio);
2166
2167 /*
2168 * schedule writes
2169 */
2170 atomic_set(&r1_bio->remaining, 1);
2171 for (i = 0; i < disks ; i++) {
2172 wbio = r1_bio->bios[i];
2173 if (wbio->bi_end_io == NULL ||
2174 (wbio->bi_end_io == end_sync_read &&
2175 (i == r1_bio->read_disk ||
2176 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2177 continue;
2178 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2179 continue;
2180
2181 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2182 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2183 wbio->bi_opf |= MD_FAILFAST;
2184
2185 wbio->bi_end_io = end_sync_write;
2186 atomic_inc(&r1_bio->remaining);
2187 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2188
2189 generic_make_request(wbio);
2190 }
2191
2192 if (atomic_dec_and_test(&r1_bio->remaining)) {
2193 /* if we're here, all write(s) have completed, so clean up */
2194 int s = r1_bio->sectors;
2195 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2196 test_bit(R1BIO_WriteError, &r1_bio->state))
2197 reschedule_retry(r1_bio);
2198 else {
2199 put_buf(r1_bio);
2200 md_done_sync(mddev, s, 1);
2201 }
2202 }
2203}
2204
2205/*
2206 * This is a kernel thread which:
2207 *
2208 * 1. Retries failed read operations on working mirrors.
2209 * 2. Updates the raid superblock when problems encounter.
2210 * 3. Performs writes following reads for array synchronising.
2211 */
2212
2213static void fix_read_error(struct r1conf *conf, int read_disk,
2214 sector_t sect, int sectors)
2215{
2216 struct mddev *mddev = conf->mddev;
2217 while(sectors) {
2218 int s = sectors;
2219 int d = read_disk;
2220 int success = 0;
2221 int start;
2222 struct md_rdev *rdev;
2223
2224 if (s > (PAGE_SIZE>>9))
2225 s = PAGE_SIZE >> 9;
2226
2227 do {
2228 sector_t first_bad;
2229 int bad_sectors;
2230
2231 rcu_read_lock();
2232 rdev = rcu_dereference(conf->mirrors[d].rdev);
2233 if (rdev &&
2234 (test_bit(In_sync, &rdev->flags) ||
2235 (!test_bit(Faulty, &rdev->flags) &&
2236 rdev->recovery_offset >= sect + s)) &&
2237 is_badblock(rdev, sect, s,
2238 &first_bad, &bad_sectors) == 0) {
2239 atomic_inc(&rdev->nr_pending);
2240 rcu_read_unlock();
2241 if (sync_page_io(rdev, sect, s<<9,
2242 conf->tmppage, REQ_OP_READ, 0, false))
2243 success = 1;
2244 rdev_dec_pending(rdev, mddev);
2245 if (success)
2246 break;
2247 } else
2248 rcu_read_unlock();
2249 d++;
2250 if (d == conf->raid_disks * 2)
2251 d = 0;
2252 } while (!success && d != read_disk);
2253
2254 if (!success) {
2255 /* Cannot read from anywhere - mark it bad */
2256 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2257 if (!rdev_set_badblocks(rdev, sect, s, 0))
2258 md_error(mddev, rdev);
2259 break;
2260 }
2261 /* write it back and re-read */
2262 start = d;
2263 while (d != read_disk) {
2264 if (d==0)
2265 d = conf->raid_disks * 2;
2266 d--;
2267 rcu_read_lock();
2268 rdev = rcu_dereference(conf->mirrors[d].rdev);
2269 if (rdev &&
2270 !test_bit(Faulty, &rdev->flags)) {
2271 atomic_inc(&rdev->nr_pending);
2272 rcu_read_unlock();
2273 r1_sync_page_io(rdev, sect, s,
2274 conf->tmppage, WRITE);
2275 rdev_dec_pending(rdev, mddev);
2276 } else
2277 rcu_read_unlock();
2278 }
2279 d = start;
2280 while (d != read_disk) {
2281 char b[BDEVNAME_SIZE];
2282 if (d==0)
2283 d = conf->raid_disks * 2;
2284 d--;
2285 rcu_read_lock();
2286 rdev = rcu_dereference(conf->mirrors[d].rdev);
2287 if (rdev &&
2288 !test_bit(Faulty, &rdev->flags)) {
2289 atomic_inc(&rdev->nr_pending);
2290 rcu_read_unlock();
2291 if (r1_sync_page_io(rdev, sect, s,
2292 conf->tmppage, READ)) {
2293 atomic_add(s, &rdev->corrected_errors);
2294 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2295 mdname(mddev), s,
2296 (unsigned long long)(sect +
2297 rdev->data_offset),
2298 bdevname(rdev->bdev, b));
2299 }
2300 rdev_dec_pending(rdev, mddev);
2301 } else
2302 rcu_read_unlock();
2303 }
2304 sectors -= s;
2305 sect += s;
2306 }
2307}
2308
2309static int narrow_write_error(struct r1bio *r1_bio, int i)
2310{
2311 struct mddev *mddev = r1_bio->mddev;
2312 struct r1conf *conf = mddev->private;
2313 struct md_rdev *rdev = conf->mirrors[i].rdev;
2314
2315 /* bio has the data to be written to device 'i' where
2316 * we just recently had a write error.
2317 * We repeatedly clone the bio and trim down to one block,
2318 * then try the write. Where the write fails we record
2319 * a bad block.
2320 * It is conceivable that the bio doesn't exactly align with
2321 * blocks. We must handle this somehow.
2322 *
2323 * We currently own a reference on the rdev.
2324 */
2325
2326 int block_sectors;
2327 sector_t sector;
2328 int sectors;
2329 int sect_to_write = r1_bio->sectors;
2330 int ok = 1;
2331
2332 if (rdev->badblocks.shift < 0)
2333 return 0;
2334
2335 block_sectors = roundup(1 << rdev->badblocks.shift,
2336 bdev_logical_block_size(rdev->bdev) >> 9);
2337 sector = r1_bio->sector;
2338 sectors = ((sector + block_sectors)
2339 & ~(sector_t)(block_sectors - 1))
2340 - sector;
2341
2342 while (sect_to_write) {
2343 struct bio *wbio;
2344 if (sectors > sect_to_write)
2345 sectors = sect_to_write;
2346 /* Write at 'sector' for 'sectors'*/
2347
2348 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2349 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2350 GFP_NOIO,
2351 mddev->bio_set);
2352 } else {
2353 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2354 mddev->bio_set);
2355 }
2356
2357 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2358 wbio->bi_iter.bi_sector = r1_bio->sector;
2359 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2360
2361 bio_trim(wbio, sector - r1_bio->sector, sectors);
2362 wbio->bi_iter.bi_sector += rdev->data_offset;
2363 bio_set_dev(wbio, rdev->bdev);
2364
2365 if (submit_bio_wait(wbio) < 0)
2366 /* failure! */
2367 ok = rdev_set_badblocks(rdev, sector,
2368 sectors, 0)
2369 && ok;
2370
2371 bio_put(wbio);
2372 sect_to_write -= sectors;
2373 sector += sectors;
2374 sectors = block_sectors;
2375 }
2376 return ok;
2377}
2378
2379static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2380{
2381 int m;
2382 int s = r1_bio->sectors;
2383 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2384 struct md_rdev *rdev = conf->mirrors[m].rdev;
2385 struct bio *bio = r1_bio->bios[m];
2386 if (bio->bi_end_io == NULL)
2387 continue;
2388 if (!bio->bi_status &&
2389 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2390 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2391 }
2392 if (bio->bi_status &&
2393 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2394 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2395 md_error(conf->mddev, rdev);
2396 }
2397 }
2398 put_buf(r1_bio);
2399 md_done_sync(conf->mddev, s, 1);
2400}
2401
2402static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2403{
2404 int m, idx;
2405 bool fail = false;
2406
2407 for (m = 0; m < conf->raid_disks * 2 ; m++)
2408 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2409 struct md_rdev *rdev = conf->mirrors[m].rdev;
2410 rdev_clear_badblocks(rdev,
2411 r1_bio->sector,
2412 r1_bio->sectors, 0);
2413 rdev_dec_pending(rdev, conf->mddev);
2414 } else if (r1_bio->bios[m] != NULL) {
2415 /* This drive got a write error. We need to
2416 * narrow down and record precise write
2417 * errors.
2418 */
2419 fail = true;
2420 if (!narrow_write_error(r1_bio, m)) {
2421 md_error(conf->mddev,
2422 conf->mirrors[m].rdev);
2423 /* an I/O failed, we can't clear the bitmap */
2424 set_bit(R1BIO_Degraded, &r1_bio->state);
2425 }
2426 rdev_dec_pending(conf->mirrors[m].rdev,
2427 conf->mddev);
2428 }
2429 if (fail) {
2430 spin_lock_irq(&conf->device_lock);
2431 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2432 idx = sector_to_idx(r1_bio->sector);
2433 atomic_inc(&conf->nr_queued[idx]);
2434 spin_unlock_irq(&conf->device_lock);
2435 /*
2436 * In case freeze_array() is waiting for condition
2437 * get_unqueued_pending() == extra to be true.
2438 */
2439 wake_up(&conf->wait_barrier);
2440 md_wakeup_thread(conf->mddev->thread);
2441 } else {
2442 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2443 close_write(r1_bio);
2444 raid_end_bio_io(r1_bio);
2445 }
2446}
2447
2448static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2449{
2450 struct mddev *mddev = conf->mddev;
2451 struct bio *bio;
2452 struct md_rdev *rdev;
2453 sector_t bio_sector;
2454
2455 clear_bit(R1BIO_ReadError, &r1_bio->state);
2456 /* we got a read error. Maybe the drive is bad. Maybe just
2457 * the block and we can fix it.
2458 * We freeze all other IO, and try reading the block from
2459 * other devices. When we find one, we re-write
2460 * and check it that fixes the read error.
2461 * This is all done synchronously while the array is
2462 * frozen
2463 */
2464
2465 bio = r1_bio->bios[r1_bio->read_disk];
2466 bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
2467 bio_put(bio);
2468 r1_bio->bios[r1_bio->read_disk] = NULL;
2469
2470 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2471 if (mddev->ro == 0
2472 && !test_bit(FailFast, &rdev->flags)) {
2473 freeze_array(conf, 1);
2474 fix_read_error(conf, r1_bio->read_disk,
2475 r1_bio->sector, r1_bio->sectors);
2476 unfreeze_array(conf);
2477 } else {
2478 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2479 }
2480
2481 rdev_dec_pending(rdev, conf->mddev);
2482 allow_barrier(conf, r1_bio->sector);
2483 bio = r1_bio->master_bio;
2484
2485 /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2486 r1_bio->state = 0;
2487 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2488}
2489
2490static void raid1d(struct md_thread *thread)
2491{
2492 struct mddev *mddev = thread->mddev;
2493 struct r1bio *r1_bio;
2494 unsigned long flags;
2495 struct r1conf *conf = mddev->private;
2496 struct list_head *head = &conf->retry_list;
2497 struct blk_plug plug;
2498 int idx;
2499
2500 md_check_recovery(mddev);
2501
2502 if (!list_empty_careful(&conf->bio_end_io_list) &&
2503 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2504 LIST_HEAD(tmp);
2505 spin_lock_irqsave(&conf->device_lock, flags);
2506 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2507 list_splice_init(&conf->bio_end_io_list, &tmp);
2508 spin_unlock_irqrestore(&conf->device_lock, flags);
2509 while (!list_empty(&tmp)) {
2510 r1_bio = list_first_entry(&tmp, struct r1bio,
2511 retry_list);
2512 list_del(&r1_bio->retry_list);
2513 idx = sector_to_idx(r1_bio->sector);
2514 atomic_dec(&conf->nr_queued[idx]);
2515 if (mddev->degraded)
2516 set_bit(R1BIO_Degraded, &r1_bio->state);
2517 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2518 close_write(r1_bio);
2519 raid_end_bio_io(r1_bio);
2520 }
2521 }
2522
2523 blk_start_plug(&plug);
2524 for (;;) {
2525
2526 flush_pending_writes(conf);
2527
2528 spin_lock_irqsave(&conf->device_lock, flags);
2529 if (list_empty(head)) {
2530 spin_unlock_irqrestore(&conf->device_lock, flags);
2531 break;
2532 }
2533 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2534 list_del(head->prev);
2535 idx = sector_to_idx(r1_bio->sector);
2536 atomic_dec(&conf->nr_queued[idx]);
2537 spin_unlock_irqrestore(&conf->device_lock, flags);
2538
2539 mddev = r1_bio->mddev;
2540 conf = mddev->private;
2541 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2542 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2543 test_bit(R1BIO_WriteError, &r1_bio->state))
2544 handle_sync_write_finished(conf, r1_bio);
2545 else
2546 sync_request_write(mddev, r1_bio);
2547 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2548 test_bit(R1BIO_WriteError, &r1_bio->state))
2549 handle_write_finished(conf, r1_bio);
2550 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2551 handle_read_error(conf, r1_bio);
2552 else
2553 WARN_ON_ONCE(1);
2554
2555 cond_resched();
2556 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2557 md_check_recovery(mddev);
2558 }
2559 blk_finish_plug(&plug);
2560}
2561
2562static int init_resync(struct r1conf *conf)
2563{
2564 int buffs;
2565
2566 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2567 BUG_ON(conf->r1buf_pool);
2568 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2569 conf->poolinfo);
2570 if (!conf->r1buf_pool)
2571 return -ENOMEM;
2572 return 0;
2573}
2574
2575static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2576{
2577 struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2578 struct resync_pages *rps;
2579 struct bio *bio;
2580 int i;
2581
2582 for (i = conf->poolinfo->raid_disks; i--; ) {
2583 bio = r1bio->bios[i];
2584 rps = bio->bi_private;
2585 bio_reset(bio);
2586 bio->bi_private = rps;
2587 }
2588 r1bio->master_bio = NULL;
2589 return r1bio;
2590}
2591
2592/*
2593 * perform a "sync" on one "block"
2594 *
2595 * We need to make sure that no normal I/O request - particularly write
2596 * requests - conflict with active sync requests.
2597 *
2598 * This is achieved by tracking pending requests and a 'barrier' concept
2599 * that can be installed to exclude normal IO requests.
2600 */
2601
2602static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2603 int *skipped)
2604{
2605 struct r1conf *conf = mddev->private;
2606 struct r1bio *r1_bio;
2607 struct bio *bio;
2608 sector_t max_sector, nr_sectors;
2609 int disk = -1;
2610 int i;
2611 int wonly = -1;
2612 int write_targets = 0, read_targets = 0;
2613 sector_t sync_blocks;
2614 int still_degraded = 0;
2615 int good_sectors = RESYNC_SECTORS;
2616 int min_bad = 0; /* number of sectors that are bad in all devices */
2617 int idx = sector_to_idx(sector_nr);
2618 int page_idx = 0;
2619
2620 if (!conf->r1buf_pool)
2621 if (init_resync(conf))
2622 return 0;
2623
2624 max_sector = mddev->dev_sectors;
2625 if (sector_nr >= max_sector) {
2626 /* If we aborted, we need to abort the
2627 * sync on the 'current' bitmap chunk (there will
2628 * only be one in raid1 resync.
2629 * We can find the current addess in mddev->curr_resync
2630 */
2631 if (mddev->curr_resync < max_sector) /* aborted */
2632 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2633 &sync_blocks, 1);
2634 else /* completed sync */
2635 conf->fullsync = 0;
2636
2637 bitmap_close_sync(mddev->bitmap);
2638 close_sync(conf);
2639
2640 if (mddev_is_clustered(mddev)) {
2641 conf->cluster_sync_low = 0;
2642 conf->cluster_sync_high = 0;
2643 }
2644 return 0;
2645 }
2646
2647 if (mddev->bitmap == NULL &&
2648 mddev->recovery_cp == MaxSector &&
2649 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2650 conf->fullsync == 0) {
2651 *skipped = 1;
2652 return max_sector - sector_nr;
2653 }
2654 /* before building a request, check if we can skip these blocks..
2655 * This call the bitmap_start_sync doesn't actually record anything
2656 */
2657 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2658 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2659 /* We can skip this block, and probably several more */
2660 *skipped = 1;
2661 return sync_blocks;
2662 }
2663
2664 /*
2665 * If there is non-resync activity waiting for a turn, then let it
2666 * though before starting on this new sync request.
2667 */
2668 if (atomic_read(&conf->nr_waiting[idx]))
2669 schedule_timeout_uninterruptible(1);
2670
2671 /* we are incrementing sector_nr below. To be safe, we check against
2672 * sector_nr + two times RESYNC_SECTORS
2673 */
2674
2675 bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2676 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2677
2678
2679 if (raise_barrier(conf, sector_nr))
2680 return 0;
2681
2682 r1_bio = raid1_alloc_init_r1buf(conf);
2683
2684 rcu_read_lock();
2685 /*
2686 * If we get a correctably read error during resync or recovery,
2687 * we might want to read from a different device. So we
2688 * flag all drives that could conceivably be read from for READ,
2689 * and any others (which will be non-In_sync devices) for WRITE.
2690 * If a read fails, we try reading from something else for which READ
2691 * is OK.
2692 */
2693
2694 r1_bio->mddev = mddev;
2695 r1_bio->sector = sector_nr;
2696 r1_bio->state = 0;
2697 set_bit(R1BIO_IsSync, &r1_bio->state);
2698 /* make sure good_sectors won't go across barrier unit boundary */
2699 good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2700
2701 for (i = 0; i < conf->raid_disks * 2; i++) {
2702 struct md_rdev *rdev;
2703 bio = r1_bio->bios[i];
2704
2705 rdev = rcu_dereference(conf->mirrors[i].rdev);
2706 if (rdev == NULL ||
2707 test_bit(Faulty, &rdev->flags)) {
2708 if (i < conf->raid_disks)
2709 still_degraded = 1;
2710 } else if (!test_bit(In_sync, &rdev->flags)) {
2711 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2712 bio->bi_end_io = end_sync_write;
2713 write_targets ++;
2714 } else {
2715 /* may need to read from here */
2716 sector_t first_bad = MaxSector;
2717 int bad_sectors;
2718
2719 if (is_badblock(rdev, sector_nr, good_sectors,
2720 &first_bad, &bad_sectors)) {
2721 if (first_bad > sector_nr)
2722 good_sectors = first_bad - sector_nr;
2723 else {
2724 bad_sectors -= (sector_nr - first_bad);
2725 if (min_bad == 0 ||
2726 min_bad > bad_sectors)
2727 min_bad = bad_sectors;
2728 }
2729 }
2730 if (sector_nr < first_bad) {
2731 if (test_bit(WriteMostly, &rdev->flags)) {
2732 if (wonly < 0)
2733 wonly = i;
2734 } else {
2735 if (disk < 0)
2736 disk = i;
2737 }
2738 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2739 bio->bi_end_io = end_sync_read;
2740 read_targets++;
2741 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2742 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2743 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2744 /*
2745 * The device is suitable for reading (InSync),
2746 * but has bad block(s) here. Let's try to correct them,
2747 * if we are doing resync or repair. Otherwise, leave
2748 * this device alone for this sync request.
2749 */
2750 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2751 bio->bi_end_io = end_sync_write;
2752 write_targets++;
2753 }
2754 }
2755 if (bio->bi_end_io) {
2756 atomic_inc(&rdev->nr_pending);
2757 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2758 bio_set_dev(bio, rdev->bdev);
2759 if (test_bit(FailFast, &rdev->flags))
2760 bio->bi_opf |= MD_FAILFAST;
2761 }
2762 }
2763 rcu_read_unlock();
2764 if (disk < 0)
2765 disk = wonly;
2766 r1_bio->read_disk = disk;
2767
2768 if (read_targets == 0 && min_bad > 0) {
2769 /* These sectors are bad on all InSync devices, so we
2770 * need to mark them bad on all write targets
2771 */
2772 int ok = 1;
2773 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2774 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2775 struct md_rdev *rdev = conf->mirrors[i].rdev;
2776 ok = rdev_set_badblocks(rdev, sector_nr,
2777 min_bad, 0
2778 ) && ok;
2779 }
2780 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2781 *skipped = 1;
2782 put_buf(r1_bio);
2783
2784 if (!ok) {
2785 /* Cannot record the badblocks, so need to
2786 * abort the resync.
2787 * If there are multiple read targets, could just
2788 * fail the really bad ones ???
2789 */
2790 conf->recovery_disabled = mddev->recovery_disabled;
2791 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2792 return 0;
2793 } else
2794 return min_bad;
2795
2796 }
2797 if (min_bad > 0 && min_bad < good_sectors) {
2798 /* only resync enough to reach the next bad->good
2799 * transition */
2800 good_sectors = min_bad;
2801 }
2802
2803 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2804 /* extra read targets are also write targets */
2805 write_targets += read_targets-1;
2806
2807 if (write_targets == 0 || read_targets == 0) {
2808 /* There is nowhere to write, so all non-sync
2809 * drives must be failed - so we are finished
2810 */
2811 sector_t rv;
2812 if (min_bad > 0)
2813 max_sector = sector_nr + min_bad;
2814 rv = max_sector - sector_nr;
2815 *skipped = 1;
2816 put_buf(r1_bio);
2817 return rv;
2818 }
2819
2820 if (max_sector > mddev->resync_max)
2821 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2822 if (max_sector > sector_nr + good_sectors)
2823 max_sector = sector_nr + good_sectors;
2824 nr_sectors = 0;
2825 sync_blocks = 0;
2826 do {
2827 struct page *page;
2828 int len = PAGE_SIZE;
2829 if (sector_nr + (len>>9) > max_sector)
2830 len = (max_sector - sector_nr) << 9;
2831 if (len == 0)
2832 break;
2833 if (sync_blocks == 0) {
2834 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2835 &sync_blocks, still_degraded) &&
2836 !conf->fullsync &&
2837 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2838 break;
2839 if ((len >> 9) > sync_blocks)
2840 len = sync_blocks<<9;
2841 }
2842
2843 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2844 struct resync_pages *rp;
2845
2846 bio = r1_bio->bios[i];
2847 rp = get_resync_pages(bio);
2848 if (bio->bi_end_io) {
2849 page = resync_fetch_page(rp, page_idx);
2850
2851 /*
2852 * won't fail because the vec table is big
2853 * enough to hold all these pages
2854 */
2855 bio_add_page(bio, page, len, 0);
2856 }
2857 }
2858 nr_sectors += len>>9;
2859 sector_nr += len>>9;
2860 sync_blocks -= (len>>9);
2861 } while (++page_idx < RESYNC_PAGES);
2862
2863 r1_bio->sectors = nr_sectors;
2864
2865 if (mddev_is_clustered(mddev) &&
2866 conf->cluster_sync_high < sector_nr + nr_sectors) {
2867 conf->cluster_sync_low = mddev->curr_resync_completed;
2868 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2869 /* Send resync message */
2870 md_cluster_ops->resync_info_update(mddev,
2871 conf->cluster_sync_low,
2872 conf->cluster_sync_high);
2873 }
2874
2875 /* For a user-requested sync, we read all readable devices and do a
2876 * compare
2877 */
2878 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2879 atomic_set(&r1_bio->remaining, read_targets);
2880 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2881 bio = r1_bio->bios[i];
2882 if (bio->bi_end_io == end_sync_read) {
2883 read_targets--;
2884 md_sync_acct_bio(bio, nr_sectors);
2885 if (read_targets == 1)
2886 bio->bi_opf &= ~MD_FAILFAST;
2887 generic_make_request(bio);
2888 }
2889 }
2890 } else {
2891 atomic_set(&r1_bio->remaining, 1);
2892 bio = r1_bio->bios[r1_bio->read_disk];
2893 md_sync_acct_bio(bio, nr_sectors);
2894 if (read_targets == 1)
2895 bio->bi_opf &= ~MD_FAILFAST;
2896 generic_make_request(bio);
2897
2898 }
2899 return nr_sectors;
2900}
2901
2902static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2903{
2904 if (sectors)
2905 return sectors;
2906
2907 return mddev->dev_sectors;
2908}
2909
2910static struct r1conf *setup_conf(struct mddev *mddev)
2911{
2912 struct r1conf *conf;
2913 int i;
2914 struct raid1_info *disk;
2915 struct md_rdev *rdev;
2916 int err = -ENOMEM;
2917
2918 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2919 if (!conf)
2920 goto abort;
2921
2922 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2923 sizeof(atomic_t), GFP_KERNEL);
2924 if (!conf->nr_pending)
2925 goto abort;
2926
2927 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2928 sizeof(atomic_t), GFP_KERNEL);
2929 if (!conf->nr_waiting)
2930 goto abort;
2931
2932 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2933 sizeof(atomic_t), GFP_KERNEL);
2934 if (!conf->nr_queued)
2935 goto abort;
2936
2937 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2938 sizeof(atomic_t), GFP_KERNEL);
2939 if (!conf->barrier)
2940 goto abort;
2941
2942 conf->mirrors = kzalloc(sizeof(struct raid1_info)
2943 * mddev->raid_disks * 2,
2944 GFP_KERNEL);
2945 if (!conf->mirrors)
2946 goto abort;
2947
2948 conf->tmppage = alloc_page(GFP_KERNEL);
2949 if (!conf->tmppage)
2950 goto abort;
2951
2952 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2953 if (!conf->poolinfo)
2954 goto abort;
2955 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2956 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2957 r1bio_pool_free,
2958 conf->poolinfo);
2959 if (!conf->r1bio_pool)
2960 goto abort;
2961
2962 conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
2963 if (!conf->bio_split)
2964 goto abort;
2965
2966 conf->poolinfo->mddev = mddev;
2967
2968 err = -EINVAL;
2969 spin_lock_init(&conf->device_lock);
2970 rdev_for_each(rdev, mddev) {
2971 int disk_idx = rdev->raid_disk;
2972 if (disk_idx >= mddev->raid_disks
2973 || disk_idx < 0)
2974 continue;
2975 if (test_bit(Replacement, &rdev->flags))
2976 disk = conf->mirrors + mddev->raid_disks + disk_idx;
2977 else
2978 disk = conf->mirrors + disk_idx;
2979
2980 if (disk->rdev)
2981 goto abort;
2982 disk->rdev = rdev;
2983 disk->head_position = 0;
2984 disk->seq_start = MaxSector;
2985 }
2986 conf->raid_disks = mddev->raid_disks;
2987 conf->mddev = mddev;
2988 INIT_LIST_HEAD(&conf->retry_list);
2989 INIT_LIST_HEAD(&conf->bio_end_io_list);
2990
2991 spin_lock_init(&conf->resync_lock);
2992 init_waitqueue_head(&conf->wait_barrier);
2993
2994 bio_list_init(&conf->pending_bio_list);
2995 conf->pending_count = 0;
2996 conf->recovery_disabled = mddev->recovery_disabled - 1;
2997
2998 err = -EIO;
2999 for (i = 0; i < conf->raid_disks * 2; i++) {
3000
3001 disk = conf->mirrors + i;
3002
3003 if (i < conf->raid_disks &&
3004 disk[conf->raid_disks].rdev) {
3005 /* This slot has a replacement. */
3006 if (!disk->rdev) {
3007 /* No original, just make the replacement
3008 * a recovering spare
3009 */
3010 disk->rdev =
3011 disk[conf->raid_disks].rdev;
3012 disk[conf->raid_disks].rdev = NULL;
3013 } else if (!test_bit(In_sync, &disk->rdev->flags))
3014 /* Original is not in_sync - bad */
3015 goto abort;
3016 }
3017
3018 if (!disk->rdev ||
3019 !test_bit(In_sync, &disk->rdev->flags)) {
3020 disk->head_position = 0;
3021 if (disk->rdev &&
3022 (disk->rdev->saved_raid_disk < 0))
3023 conf->fullsync = 1;
3024 }
3025 }
3026
3027 err = -ENOMEM;
3028 conf->thread = md_register_thread(raid1d, mddev, "raid1");
3029 if (!conf->thread)
3030 goto abort;
3031
3032 return conf;
3033
3034 abort:
3035 if (conf) {
3036 mempool_destroy(conf->r1bio_pool);
3037 kfree(conf->mirrors);
3038 safe_put_page(conf->tmppage);
3039 kfree(conf->poolinfo);
3040 kfree(conf->nr_pending);
3041 kfree(conf->nr_waiting);
3042 kfree(conf->nr_queued);
3043 kfree(conf->barrier);
3044 if (conf->bio_split)
3045 bioset_free(conf->bio_split);
3046 kfree(conf);
3047 }
3048 return ERR_PTR(err);
3049}
3050
3051static void raid1_free(struct mddev *mddev, void *priv);
3052static int raid1_run(struct mddev *mddev)
3053{
3054 struct r1conf *conf;
3055 int i;
3056 struct md_rdev *rdev;
3057 int ret;
3058 bool discard_supported = false;
3059
3060 if (mddev->level != 1) {
3061 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3062 mdname(mddev), mddev->level);
3063 return -EIO;
3064 }
3065 if (mddev->reshape_position != MaxSector) {
3066 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3067 mdname(mddev));
3068 return -EIO;
3069 }
3070 if (mddev_init_writes_pending(mddev) < 0)
3071 return -ENOMEM;
3072 /*
3073 * copy the already verified devices into our private RAID1
3074 * bookkeeping area. [whatever we allocate in run(),
3075 * should be freed in raid1_free()]
3076 */
3077 if (mddev->private == NULL)
3078 conf = setup_conf(mddev);
3079 else
3080 conf = mddev->private;
3081
3082 if (IS_ERR(conf))
3083 return PTR_ERR(conf);
3084
3085 if (mddev->queue) {
3086 blk_queue_max_write_same_sectors(mddev->queue, 0);
3087 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3088 }
3089
3090 rdev_for_each(rdev, mddev) {
3091 if (!mddev->gendisk)
3092 continue;
3093 disk_stack_limits(mddev->gendisk, rdev->bdev,
3094 rdev->data_offset << 9);
3095 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3096 discard_supported = true;
3097 }
3098
3099 mddev->degraded = 0;
3100 for (i=0; i < conf->raid_disks; i++)
3101 if (conf->mirrors[i].rdev == NULL ||
3102 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3103 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3104 mddev->degraded++;
3105
3106 if (conf->raid_disks - mddev->degraded == 1)
3107 mddev->recovery_cp = MaxSector;
3108
3109 if (mddev->recovery_cp != MaxSector)
3110 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3111 mdname(mddev));
3112 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3113 mdname(mddev), mddev->raid_disks - mddev->degraded,
3114 mddev->raid_disks);
3115
3116 /*
3117 * Ok, everything is just fine now
3118 */
3119 mddev->thread = conf->thread;
3120 conf->thread = NULL;
3121 mddev->private = conf;
3122 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3123
3124 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3125
3126 if (mddev->queue) {
3127 if (discard_supported)
3128 blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3129 mddev->queue);
3130 else
3131 blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3132 mddev->queue);
3133 }
3134
3135 ret = md_integrity_register(mddev);
3136 if (ret) {
3137 md_unregister_thread(&mddev->thread);
3138 raid1_free(mddev, conf);
3139 }
3140 return ret;
3141}
3142
3143static void raid1_free(struct mddev *mddev, void *priv)
3144{
3145 struct r1conf *conf = priv;
3146
3147 mempool_destroy(conf->r1bio_pool);
3148 kfree(conf->mirrors);
3149 safe_put_page(conf->tmppage);
3150 kfree(conf->poolinfo);
3151 kfree(conf->nr_pending);
3152 kfree(conf->nr_waiting);
3153 kfree(conf->nr_queued);
3154 kfree(conf->barrier);
3155 if (conf->bio_split)
3156 bioset_free(conf->bio_split);
3157 kfree(conf);
3158}
3159
3160static int raid1_resize(struct mddev *mddev, sector_t sectors)
3161{
3162 /* no resync is happening, and there is enough space
3163 * on all devices, so we can resize.
3164 * We need to make sure resync covers any new space.
3165 * If the array is shrinking we should possibly wait until
3166 * any io in the removed space completes, but it hardly seems
3167 * worth it.
3168 */
3169 sector_t newsize = raid1_size(mddev, sectors, 0);
3170 if (mddev->external_size &&
3171 mddev->array_sectors > newsize)
3172 return -EINVAL;
3173 if (mddev->bitmap) {
3174 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
3175 if (ret)
3176 return ret;
3177 }
3178 md_set_array_sectors(mddev, newsize);
3179 if (sectors > mddev->dev_sectors &&
3180 mddev->recovery_cp > mddev->dev_sectors) {
3181 mddev->recovery_cp = mddev->dev_sectors;
3182 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3183 }
3184 mddev->dev_sectors = sectors;
3185 mddev->resync_max_sectors = sectors;
3186 return 0;
3187}
3188
3189static int raid1_reshape(struct mddev *mddev)
3190{
3191 /* We need to:
3192 * 1/ resize the r1bio_pool
3193 * 2/ resize conf->mirrors
3194 *
3195 * We allocate a new r1bio_pool if we can.
3196 * Then raise a device barrier and wait until all IO stops.
3197 * Then resize conf->mirrors and swap in the new r1bio pool.
3198 *
3199 * At the same time, we "pack" the devices so that all the missing
3200 * devices have the higher raid_disk numbers.
3201 */
3202 mempool_t *newpool, *oldpool;
3203 struct pool_info *newpoolinfo;
3204 struct raid1_info *newmirrors;
3205 struct r1conf *conf = mddev->private;
3206 int cnt, raid_disks;
3207 unsigned long flags;
3208 int d, d2;
3209
3210 /* Cannot change chunk_size, layout, or level */
3211 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3212 mddev->layout != mddev->new_layout ||
3213 mddev->level != mddev->new_level) {
3214 mddev->new_chunk_sectors = mddev->chunk_sectors;
3215 mddev->new_layout = mddev->layout;
3216 mddev->new_level = mddev->level;
3217 return -EINVAL;
3218 }
3219
3220 if (!mddev_is_clustered(mddev))
3221 md_allow_write(mddev);
3222
3223 raid_disks = mddev->raid_disks + mddev->delta_disks;
3224
3225 if (raid_disks < conf->raid_disks) {
3226 cnt=0;
3227 for (d= 0; d < conf->raid_disks; d++)
3228 if (conf->mirrors[d].rdev)
3229 cnt++;
3230 if (cnt > raid_disks)
3231 return -EBUSY;
3232 }
3233
3234 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3235 if (!newpoolinfo)
3236 return -ENOMEM;
3237 newpoolinfo->mddev = mddev;
3238 newpoolinfo->raid_disks = raid_disks * 2;
3239
3240 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
3241 r1bio_pool_free, newpoolinfo);
3242 if (!newpool) {
3243 kfree(newpoolinfo);
3244 return -ENOMEM;
3245 }
3246 newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
3247 GFP_KERNEL);
3248 if (!newmirrors) {
3249 kfree(newpoolinfo);
3250 mempool_destroy(newpool);
3251 return -ENOMEM;
3252 }
3253
3254 freeze_array(conf, 0);
3255
3256 /* ok, everything is stopped */
3257 oldpool = conf->r1bio_pool;
3258 conf->r1bio_pool = newpool;
3259
3260 for (d = d2 = 0; d < conf->raid_disks; d++) {
3261 struct md_rdev *rdev = conf->mirrors[d].rdev;
3262 if (rdev && rdev->raid_disk != d2) {
3263 sysfs_unlink_rdev(mddev, rdev);
3264 rdev->raid_disk = d2;
3265 sysfs_unlink_rdev(mddev, rdev);
3266 if (sysfs_link_rdev(mddev, rdev))
3267 pr_warn("md/raid1:%s: cannot register rd%d\n",
3268 mdname(mddev), rdev->raid_disk);
3269 }
3270 if (rdev)
3271 newmirrors[d2++].rdev = rdev;
3272 }
3273 kfree(conf->mirrors);
3274 conf->mirrors = newmirrors;
3275 kfree(conf->poolinfo);
3276 conf->poolinfo = newpoolinfo;
3277
3278 spin_lock_irqsave(&conf->device_lock, flags);
3279 mddev->degraded += (raid_disks - conf->raid_disks);
3280 spin_unlock_irqrestore(&conf->device_lock, flags);
3281 conf->raid_disks = mddev->raid_disks = raid_disks;
3282 mddev->delta_disks = 0;
3283
3284 unfreeze_array(conf);
3285
3286 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3287 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3288 md_wakeup_thread(mddev->thread);
3289
3290 mempool_destroy(oldpool);
3291 return 0;
3292}
3293
3294static void raid1_quiesce(struct mddev *mddev, int quiesce)
3295{
3296 struct r1conf *conf = mddev->private;
3297
3298 if (quiesce)
3299 freeze_array(conf, 0);
3300 else
3301 unfreeze_array(conf);
3302}
3303
3304static void *raid1_takeover(struct mddev *mddev)
3305{
3306 /* raid1 can take over:
3307 * raid5 with 2 devices, any layout or chunk size
3308 */
3309 if (mddev->level == 5 && mddev->raid_disks == 2) {
3310 struct r1conf *conf;
3311 mddev->new_level = 1;
3312 mddev->new_layout = 0;
3313 mddev->new_chunk_sectors = 0;
3314 conf = setup_conf(mddev);
3315 if (!IS_ERR(conf)) {
3316 /* Array must appear to be quiesced */
3317 conf->array_frozen = 1;
3318 mddev_clear_unsupported_flags(mddev,
3319 UNSUPPORTED_MDDEV_FLAGS);
3320 }
3321 return conf;
3322 }
3323 return ERR_PTR(-EINVAL);
3324}
3325
3326static struct md_personality raid1_personality =
3327{
3328 .name = "raid1",
3329 .level = 1,
3330 .owner = THIS_MODULE,
3331 .make_request = raid1_make_request,
3332 .run = raid1_run,
3333 .free = raid1_free,
3334 .status = raid1_status,
3335 .error_handler = raid1_error,
3336 .hot_add_disk = raid1_add_disk,
3337 .hot_remove_disk= raid1_remove_disk,
3338 .spare_active = raid1_spare_active,
3339 .sync_request = raid1_sync_request,
3340 .resize = raid1_resize,
3341 .size = raid1_size,
3342 .check_reshape = raid1_reshape,
3343 .quiesce = raid1_quiesce,
3344 .takeover = raid1_takeover,
3345 .congested = raid1_congested,
3346};
3347
3348static int __init raid_init(void)
3349{
3350 return register_md_personality(&raid1_personality);
3351}
3352
3353static void raid_exit(void)
3354{
3355 unregister_md_personality(&raid1_personality);
3356}
3357
3358module_init(raid_init);
3359module_exit(raid_exit);
3360MODULE_LICENSE("GPL");
3361MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3362MODULE_ALIAS("md-personality-3"); /* RAID1 */
3363MODULE_ALIAS("md-raid1");
3364MODULE_ALIAS("md-level-1");
3365
3366module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);