Loading...
1/*
2 * raid1.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * RAID-1 management functions.
9 *
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11 *
12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14 *
15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
17 *
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
20 *
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34#include <linux/slab.h>
35#include <linux/delay.h>
36#include <linux/blkdev.h>
37#include <linux/module.h>
38#include <linux/seq_file.h>
39#include <linux/ratelimit.h>
40#include <trace/events/block.h>
41#include "md.h"
42#include "raid1.h"
43#include "bitmap.h"
44
45#define UNSUPPORTED_MDDEV_FLAGS \
46 ((1L << MD_HAS_JOURNAL) | \
47 (1L << MD_JOURNAL_CLEAN))
48
49/*
50 * Number of guaranteed r1bios in case of extreme VM load:
51 */
52#define NR_RAID1_BIOS 256
53
54/* when we get a read error on a read-only array, we redirect to another
55 * device without failing the first device, or trying to over-write to
56 * correct the read error. To keep track of bad blocks on a per-bio
57 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
58 */
59#define IO_BLOCKED ((struct bio *)1)
60/* When we successfully write to a known bad-block, we need to remove the
61 * bad-block marking which must be done from process context. So we record
62 * the success by setting devs[n].bio to IO_MADE_GOOD
63 */
64#define IO_MADE_GOOD ((struct bio *)2)
65
66#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
67
68/* When there are this many requests queue to be written by
69 * the raid1 thread, we become 'congested' to provide back-pressure
70 * for writeback.
71 */
72static int max_queued_requests = 1024;
73
74static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
75 sector_t bi_sector);
76static void lower_barrier(struct r1conf *conf);
77
78#define raid1_log(md, fmt, args...) \
79 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
80
81static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
82{
83 struct pool_info *pi = data;
84 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
85
86 /* allocate a r1bio with room for raid_disks entries in the bios array */
87 return kzalloc(size, gfp_flags);
88}
89
90static void r1bio_pool_free(void *r1_bio, void *data)
91{
92 kfree(r1_bio);
93}
94
95#define RESYNC_BLOCK_SIZE (64*1024)
96#define RESYNC_DEPTH 32
97#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
98#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
99#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
100#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
101#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
102#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
103#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
104
105static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
106{
107 struct pool_info *pi = data;
108 struct r1bio *r1_bio;
109 struct bio *bio;
110 int need_pages;
111 int i, j;
112
113 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
114 if (!r1_bio)
115 return NULL;
116
117 /*
118 * Allocate bios : 1 for reading, n-1 for writing
119 */
120 for (j = pi->raid_disks ; j-- ; ) {
121 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
122 if (!bio)
123 goto out_free_bio;
124 r1_bio->bios[j] = bio;
125 }
126 /*
127 * Allocate RESYNC_PAGES data pages and attach them to
128 * the first bio.
129 * If this is a user-requested check/repair, allocate
130 * RESYNC_PAGES for each bio.
131 */
132 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
133 need_pages = pi->raid_disks;
134 else
135 need_pages = 1;
136 for (j = 0; j < need_pages; j++) {
137 bio = r1_bio->bios[j];
138 bio->bi_vcnt = RESYNC_PAGES;
139
140 if (bio_alloc_pages(bio, gfp_flags))
141 goto out_free_pages;
142 }
143 /* If not user-requests, copy the page pointers to all bios */
144 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
145 for (i=0; i<RESYNC_PAGES ; i++)
146 for (j=1; j<pi->raid_disks; j++)
147 r1_bio->bios[j]->bi_io_vec[i].bv_page =
148 r1_bio->bios[0]->bi_io_vec[i].bv_page;
149 }
150
151 r1_bio->master_bio = NULL;
152
153 return r1_bio;
154
155out_free_pages:
156 while (--j >= 0)
157 bio_free_pages(r1_bio->bios[j]);
158
159out_free_bio:
160 while (++j < pi->raid_disks)
161 bio_put(r1_bio->bios[j]);
162 r1bio_pool_free(r1_bio, data);
163 return NULL;
164}
165
166static void r1buf_pool_free(void *__r1_bio, void *data)
167{
168 struct pool_info *pi = data;
169 int i,j;
170 struct r1bio *r1bio = __r1_bio;
171
172 for (i = 0; i < RESYNC_PAGES; i++)
173 for (j = pi->raid_disks; j-- ;) {
174 if (j == 0 ||
175 r1bio->bios[j]->bi_io_vec[i].bv_page !=
176 r1bio->bios[0]->bi_io_vec[i].bv_page)
177 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
178 }
179 for (i=0 ; i < pi->raid_disks; i++)
180 bio_put(r1bio->bios[i]);
181
182 r1bio_pool_free(r1bio, data);
183}
184
185static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
186{
187 int i;
188
189 for (i = 0; i < conf->raid_disks * 2; i++) {
190 struct bio **bio = r1_bio->bios + i;
191 if (!BIO_SPECIAL(*bio))
192 bio_put(*bio);
193 *bio = NULL;
194 }
195}
196
197static void free_r1bio(struct r1bio *r1_bio)
198{
199 struct r1conf *conf = r1_bio->mddev->private;
200
201 put_all_bios(conf, r1_bio);
202 mempool_free(r1_bio, conf->r1bio_pool);
203}
204
205static void put_buf(struct r1bio *r1_bio)
206{
207 struct r1conf *conf = r1_bio->mddev->private;
208 int i;
209
210 for (i = 0; i < conf->raid_disks * 2; i++) {
211 struct bio *bio = r1_bio->bios[i];
212 if (bio->bi_end_io)
213 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
214 }
215
216 mempool_free(r1_bio, conf->r1buf_pool);
217
218 lower_barrier(conf);
219}
220
221static void reschedule_retry(struct r1bio *r1_bio)
222{
223 unsigned long flags;
224 struct mddev *mddev = r1_bio->mddev;
225 struct r1conf *conf = mddev->private;
226
227 spin_lock_irqsave(&conf->device_lock, flags);
228 list_add(&r1_bio->retry_list, &conf->retry_list);
229 conf->nr_queued ++;
230 spin_unlock_irqrestore(&conf->device_lock, flags);
231
232 wake_up(&conf->wait_barrier);
233 md_wakeup_thread(mddev->thread);
234}
235
236/*
237 * raid_end_bio_io() is called when we have finished servicing a mirrored
238 * operation and are ready to return a success/failure code to the buffer
239 * cache layer.
240 */
241static void call_bio_endio(struct r1bio *r1_bio)
242{
243 struct bio *bio = r1_bio->master_bio;
244 int done;
245 struct r1conf *conf = r1_bio->mddev->private;
246 sector_t start_next_window = r1_bio->start_next_window;
247 sector_t bi_sector = bio->bi_iter.bi_sector;
248
249 if (bio->bi_phys_segments) {
250 unsigned long flags;
251 spin_lock_irqsave(&conf->device_lock, flags);
252 bio->bi_phys_segments--;
253 done = (bio->bi_phys_segments == 0);
254 spin_unlock_irqrestore(&conf->device_lock, flags);
255 /*
256 * make_request() might be waiting for
257 * bi_phys_segments to decrease
258 */
259 wake_up(&conf->wait_barrier);
260 } else
261 done = 1;
262
263 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
264 bio->bi_error = -EIO;
265
266 if (done) {
267 bio_endio(bio);
268 /*
269 * Wake up any possible resync thread that waits for the device
270 * to go idle.
271 */
272 allow_barrier(conf, start_next_window, bi_sector);
273 }
274}
275
276static void raid_end_bio_io(struct r1bio *r1_bio)
277{
278 struct bio *bio = r1_bio->master_bio;
279
280 /* if nobody has done the final endio yet, do it now */
281 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
282 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
283 (bio_data_dir(bio) == WRITE) ? "write" : "read",
284 (unsigned long long) bio->bi_iter.bi_sector,
285 (unsigned long long) bio_end_sector(bio) - 1);
286
287 call_bio_endio(r1_bio);
288 }
289 free_r1bio(r1_bio);
290}
291
292/*
293 * Update disk head position estimator based on IRQ completion info.
294 */
295static inline void update_head_pos(int disk, struct r1bio *r1_bio)
296{
297 struct r1conf *conf = r1_bio->mddev->private;
298
299 conf->mirrors[disk].head_position =
300 r1_bio->sector + (r1_bio->sectors);
301}
302
303/*
304 * Find the disk number which triggered given bio
305 */
306static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
307{
308 int mirror;
309 struct r1conf *conf = r1_bio->mddev->private;
310 int raid_disks = conf->raid_disks;
311
312 for (mirror = 0; mirror < raid_disks * 2; mirror++)
313 if (r1_bio->bios[mirror] == bio)
314 break;
315
316 BUG_ON(mirror == raid_disks * 2);
317 update_head_pos(mirror, r1_bio);
318
319 return mirror;
320}
321
322static void raid1_end_read_request(struct bio *bio)
323{
324 int uptodate = !bio->bi_error;
325 struct r1bio *r1_bio = bio->bi_private;
326 struct r1conf *conf = r1_bio->mddev->private;
327 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
328
329 /*
330 * this branch is our 'one mirror IO has finished' event handler:
331 */
332 update_head_pos(r1_bio->read_disk, r1_bio);
333
334 if (uptodate)
335 set_bit(R1BIO_Uptodate, &r1_bio->state);
336 else if (test_bit(FailFast, &rdev->flags) &&
337 test_bit(R1BIO_FailFast, &r1_bio->state))
338 /* This was a fail-fast read so we definitely
339 * want to retry */
340 ;
341 else {
342 /* If all other devices have failed, we want to return
343 * the error upwards rather than fail the last device.
344 * Here we redefine "uptodate" to mean "Don't want to retry"
345 */
346 unsigned long flags;
347 spin_lock_irqsave(&conf->device_lock, flags);
348 if (r1_bio->mddev->degraded == conf->raid_disks ||
349 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
350 test_bit(In_sync, &rdev->flags)))
351 uptodate = 1;
352 spin_unlock_irqrestore(&conf->device_lock, flags);
353 }
354
355 if (uptodate) {
356 raid_end_bio_io(r1_bio);
357 rdev_dec_pending(rdev, conf->mddev);
358 } else {
359 /*
360 * oops, read error:
361 */
362 char b[BDEVNAME_SIZE];
363 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
364 mdname(conf->mddev),
365 bdevname(rdev->bdev, b),
366 (unsigned long long)r1_bio->sector);
367 set_bit(R1BIO_ReadError, &r1_bio->state);
368 reschedule_retry(r1_bio);
369 /* don't drop the reference on read_disk yet */
370 }
371}
372
373static void close_write(struct r1bio *r1_bio)
374{
375 /* it really is the end of this request */
376 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
377 /* free extra copy of the data pages */
378 int i = r1_bio->behind_page_count;
379 while (i--)
380 safe_put_page(r1_bio->behind_bvecs[i].bv_page);
381 kfree(r1_bio->behind_bvecs);
382 r1_bio->behind_bvecs = NULL;
383 }
384 /* clear the bitmap if all writes complete successfully */
385 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
386 r1_bio->sectors,
387 !test_bit(R1BIO_Degraded, &r1_bio->state),
388 test_bit(R1BIO_BehindIO, &r1_bio->state));
389 md_write_end(r1_bio->mddev);
390}
391
392static void r1_bio_write_done(struct r1bio *r1_bio)
393{
394 if (!atomic_dec_and_test(&r1_bio->remaining))
395 return;
396
397 if (test_bit(R1BIO_WriteError, &r1_bio->state))
398 reschedule_retry(r1_bio);
399 else {
400 close_write(r1_bio);
401 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
402 reschedule_retry(r1_bio);
403 else
404 raid_end_bio_io(r1_bio);
405 }
406}
407
408static void raid1_end_write_request(struct bio *bio)
409{
410 struct r1bio *r1_bio = bio->bi_private;
411 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
412 struct r1conf *conf = r1_bio->mddev->private;
413 struct bio *to_put = NULL;
414 int mirror = find_bio_disk(r1_bio, bio);
415 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
416 bool discard_error;
417
418 discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
419
420 /*
421 * 'one mirror IO has finished' event handler:
422 */
423 if (bio->bi_error && !discard_error) {
424 set_bit(WriteErrorSeen, &rdev->flags);
425 if (!test_and_set_bit(WantReplacement, &rdev->flags))
426 set_bit(MD_RECOVERY_NEEDED, &
427 conf->mddev->recovery);
428
429 if (test_bit(FailFast, &rdev->flags) &&
430 (bio->bi_opf & MD_FAILFAST) &&
431 /* We never try FailFast to WriteMostly devices */
432 !test_bit(WriteMostly, &rdev->flags)) {
433 md_error(r1_bio->mddev, rdev);
434 if (!test_bit(Faulty, &rdev->flags))
435 /* This is the only remaining device,
436 * We need to retry the write without
437 * FailFast
438 */
439 set_bit(R1BIO_WriteError, &r1_bio->state);
440 else {
441 /* Finished with this branch */
442 r1_bio->bios[mirror] = NULL;
443 to_put = bio;
444 }
445 } else
446 set_bit(R1BIO_WriteError, &r1_bio->state);
447 } else {
448 /*
449 * Set R1BIO_Uptodate in our master bio, so that we
450 * will return a good error code for to the higher
451 * levels even if IO on some other mirrored buffer
452 * fails.
453 *
454 * The 'master' represents the composite IO operation
455 * to user-side. So if something waits for IO, then it
456 * will wait for the 'master' bio.
457 */
458 sector_t first_bad;
459 int bad_sectors;
460
461 r1_bio->bios[mirror] = NULL;
462 to_put = bio;
463 /*
464 * Do not set R1BIO_Uptodate if the current device is
465 * rebuilding or Faulty. This is because we cannot use
466 * such device for properly reading the data back (we could
467 * potentially use it, if the current write would have felt
468 * before rdev->recovery_offset, but for simplicity we don't
469 * check this here.
470 */
471 if (test_bit(In_sync, &rdev->flags) &&
472 !test_bit(Faulty, &rdev->flags))
473 set_bit(R1BIO_Uptodate, &r1_bio->state);
474
475 /* Maybe we can clear some bad blocks. */
476 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
477 &first_bad, &bad_sectors) && !discard_error) {
478 r1_bio->bios[mirror] = IO_MADE_GOOD;
479 set_bit(R1BIO_MadeGood, &r1_bio->state);
480 }
481 }
482
483 if (behind) {
484 if (test_bit(WriteMostly, &rdev->flags))
485 atomic_dec(&r1_bio->behind_remaining);
486
487 /*
488 * In behind mode, we ACK the master bio once the I/O
489 * has safely reached all non-writemostly
490 * disks. Setting the Returned bit ensures that this
491 * gets done only once -- we don't ever want to return
492 * -EIO here, instead we'll wait
493 */
494 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
495 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
496 /* Maybe we can return now */
497 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
498 struct bio *mbio = r1_bio->master_bio;
499 pr_debug("raid1: behind end write sectors"
500 " %llu-%llu\n",
501 (unsigned long long) mbio->bi_iter.bi_sector,
502 (unsigned long long) bio_end_sector(mbio) - 1);
503 call_bio_endio(r1_bio);
504 }
505 }
506 }
507 if (r1_bio->bios[mirror] == NULL)
508 rdev_dec_pending(rdev, conf->mddev);
509
510 /*
511 * Let's see if all mirrored write operations have finished
512 * already.
513 */
514 r1_bio_write_done(r1_bio);
515
516 if (to_put)
517 bio_put(to_put);
518}
519
520/*
521 * This routine returns the disk from which the requested read should
522 * be done. There is a per-array 'next expected sequential IO' sector
523 * number - if this matches on the next IO then we use the last disk.
524 * There is also a per-disk 'last know head position' sector that is
525 * maintained from IRQ contexts, both the normal and the resync IO
526 * completion handlers update this position correctly. If there is no
527 * perfect sequential match then we pick the disk whose head is closest.
528 *
529 * If there are 2 mirrors in the same 2 devices, performance degrades
530 * because position is mirror, not device based.
531 *
532 * The rdev for the device selected will have nr_pending incremented.
533 */
534static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
535{
536 const sector_t this_sector = r1_bio->sector;
537 int sectors;
538 int best_good_sectors;
539 int best_disk, best_dist_disk, best_pending_disk;
540 int has_nonrot_disk;
541 int disk;
542 sector_t best_dist;
543 unsigned int min_pending;
544 struct md_rdev *rdev;
545 int choose_first;
546 int choose_next_idle;
547
548 rcu_read_lock();
549 /*
550 * Check if we can balance. We can balance on the whole
551 * device if no resync is going on, or below the resync window.
552 * We take the first readable disk when above the resync window.
553 */
554 retry:
555 sectors = r1_bio->sectors;
556 best_disk = -1;
557 best_dist_disk = -1;
558 best_dist = MaxSector;
559 best_pending_disk = -1;
560 min_pending = UINT_MAX;
561 best_good_sectors = 0;
562 has_nonrot_disk = 0;
563 choose_next_idle = 0;
564 clear_bit(R1BIO_FailFast, &r1_bio->state);
565
566 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
567 (mddev_is_clustered(conf->mddev) &&
568 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
569 this_sector + sectors)))
570 choose_first = 1;
571 else
572 choose_first = 0;
573
574 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
575 sector_t dist;
576 sector_t first_bad;
577 int bad_sectors;
578 unsigned int pending;
579 bool nonrot;
580
581 rdev = rcu_dereference(conf->mirrors[disk].rdev);
582 if (r1_bio->bios[disk] == IO_BLOCKED
583 || rdev == NULL
584 || test_bit(Faulty, &rdev->flags))
585 continue;
586 if (!test_bit(In_sync, &rdev->flags) &&
587 rdev->recovery_offset < this_sector + sectors)
588 continue;
589 if (test_bit(WriteMostly, &rdev->flags)) {
590 /* Don't balance among write-mostly, just
591 * use the first as a last resort */
592 if (best_dist_disk < 0) {
593 if (is_badblock(rdev, this_sector, sectors,
594 &first_bad, &bad_sectors)) {
595 if (first_bad <= this_sector)
596 /* Cannot use this */
597 continue;
598 best_good_sectors = first_bad - this_sector;
599 } else
600 best_good_sectors = sectors;
601 best_dist_disk = disk;
602 best_pending_disk = disk;
603 }
604 continue;
605 }
606 /* This is a reasonable device to use. It might
607 * even be best.
608 */
609 if (is_badblock(rdev, this_sector, sectors,
610 &first_bad, &bad_sectors)) {
611 if (best_dist < MaxSector)
612 /* already have a better device */
613 continue;
614 if (first_bad <= this_sector) {
615 /* cannot read here. If this is the 'primary'
616 * device, then we must not read beyond
617 * bad_sectors from another device..
618 */
619 bad_sectors -= (this_sector - first_bad);
620 if (choose_first && sectors > bad_sectors)
621 sectors = bad_sectors;
622 if (best_good_sectors > sectors)
623 best_good_sectors = sectors;
624
625 } else {
626 sector_t good_sectors = first_bad - this_sector;
627 if (good_sectors > best_good_sectors) {
628 best_good_sectors = good_sectors;
629 best_disk = disk;
630 }
631 if (choose_first)
632 break;
633 }
634 continue;
635 } else
636 best_good_sectors = sectors;
637
638 if (best_disk >= 0)
639 /* At least two disks to choose from so failfast is OK */
640 set_bit(R1BIO_FailFast, &r1_bio->state);
641
642 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
643 has_nonrot_disk |= nonrot;
644 pending = atomic_read(&rdev->nr_pending);
645 dist = abs(this_sector - conf->mirrors[disk].head_position);
646 if (choose_first) {
647 best_disk = disk;
648 break;
649 }
650 /* Don't change to another disk for sequential reads */
651 if (conf->mirrors[disk].next_seq_sect == this_sector
652 || dist == 0) {
653 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
654 struct raid1_info *mirror = &conf->mirrors[disk];
655
656 best_disk = disk;
657 /*
658 * If buffered sequential IO size exceeds optimal
659 * iosize, check if there is idle disk. If yes, choose
660 * the idle disk. read_balance could already choose an
661 * idle disk before noticing it's a sequential IO in
662 * this disk. This doesn't matter because this disk
663 * will idle, next time it will be utilized after the
664 * first disk has IO size exceeds optimal iosize. In
665 * this way, iosize of the first disk will be optimal
666 * iosize at least. iosize of the second disk might be
667 * small, but not a big deal since when the second disk
668 * starts IO, the first disk is likely still busy.
669 */
670 if (nonrot && opt_iosize > 0 &&
671 mirror->seq_start != MaxSector &&
672 mirror->next_seq_sect > opt_iosize &&
673 mirror->next_seq_sect - opt_iosize >=
674 mirror->seq_start) {
675 choose_next_idle = 1;
676 continue;
677 }
678 break;
679 }
680
681 if (choose_next_idle)
682 continue;
683
684 if (min_pending > pending) {
685 min_pending = pending;
686 best_pending_disk = disk;
687 }
688
689 if (dist < best_dist) {
690 best_dist = dist;
691 best_dist_disk = disk;
692 }
693 }
694
695 /*
696 * If all disks are rotational, choose the closest disk. If any disk is
697 * non-rotational, choose the disk with less pending request even the
698 * disk is rotational, which might/might not be optimal for raids with
699 * mixed ratation/non-rotational disks depending on workload.
700 */
701 if (best_disk == -1) {
702 if (has_nonrot_disk || min_pending == 0)
703 best_disk = best_pending_disk;
704 else
705 best_disk = best_dist_disk;
706 }
707
708 if (best_disk >= 0) {
709 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
710 if (!rdev)
711 goto retry;
712 atomic_inc(&rdev->nr_pending);
713 sectors = best_good_sectors;
714
715 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
716 conf->mirrors[best_disk].seq_start = this_sector;
717
718 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
719 }
720 rcu_read_unlock();
721 *max_sectors = sectors;
722
723 return best_disk;
724}
725
726static int raid1_congested(struct mddev *mddev, int bits)
727{
728 struct r1conf *conf = mddev->private;
729 int i, ret = 0;
730
731 if ((bits & (1 << WB_async_congested)) &&
732 conf->pending_count >= max_queued_requests)
733 return 1;
734
735 rcu_read_lock();
736 for (i = 0; i < conf->raid_disks * 2; i++) {
737 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
738 if (rdev && !test_bit(Faulty, &rdev->flags)) {
739 struct request_queue *q = bdev_get_queue(rdev->bdev);
740
741 BUG_ON(!q);
742
743 /* Note the '|| 1' - when read_balance prefers
744 * non-congested targets, it can be removed
745 */
746 if ((bits & (1 << WB_async_congested)) || 1)
747 ret |= bdi_congested(&q->backing_dev_info, bits);
748 else
749 ret &= bdi_congested(&q->backing_dev_info, bits);
750 }
751 }
752 rcu_read_unlock();
753 return ret;
754}
755
756static void flush_pending_writes(struct r1conf *conf)
757{
758 /* Any writes that have been queued but are awaiting
759 * bitmap updates get flushed here.
760 */
761 spin_lock_irq(&conf->device_lock);
762
763 if (conf->pending_bio_list.head) {
764 struct bio *bio;
765 bio = bio_list_get(&conf->pending_bio_list);
766 conf->pending_count = 0;
767 spin_unlock_irq(&conf->device_lock);
768 /* flush any pending bitmap writes to
769 * disk before proceeding w/ I/O */
770 bitmap_unplug(conf->mddev->bitmap);
771 wake_up(&conf->wait_barrier);
772
773 while (bio) { /* submit pending writes */
774 struct bio *next = bio->bi_next;
775 struct md_rdev *rdev = (void*)bio->bi_bdev;
776 bio->bi_next = NULL;
777 bio->bi_bdev = rdev->bdev;
778 if (test_bit(Faulty, &rdev->flags)) {
779 bio->bi_error = -EIO;
780 bio_endio(bio);
781 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
782 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
783 /* Just ignore it */
784 bio_endio(bio);
785 else
786 generic_make_request(bio);
787 bio = next;
788 }
789 } else
790 spin_unlock_irq(&conf->device_lock);
791}
792
793/* Barriers....
794 * Sometimes we need to suspend IO while we do something else,
795 * either some resync/recovery, or reconfigure the array.
796 * To do this we raise a 'barrier'.
797 * The 'barrier' is a counter that can be raised multiple times
798 * to count how many activities are happening which preclude
799 * normal IO.
800 * We can only raise the barrier if there is no pending IO.
801 * i.e. if nr_pending == 0.
802 * We choose only to raise the barrier if no-one is waiting for the
803 * barrier to go down. This means that as soon as an IO request
804 * is ready, no other operations which require a barrier will start
805 * until the IO request has had a chance.
806 *
807 * So: regular IO calls 'wait_barrier'. When that returns there
808 * is no backgroup IO happening, It must arrange to call
809 * allow_barrier when it has finished its IO.
810 * backgroup IO calls must call raise_barrier. Once that returns
811 * there is no normal IO happeing. It must arrange to call
812 * lower_barrier when the particular background IO completes.
813 */
814static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
815{
816 spin_lock_irq(&conf->resync_lock);
817
818 /* Wait until no block IO is waiting */
819 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
820 conf->resync_lock);
821
822 /* block any new IO from starting */
823 conf->barrier++;
824 conf->next_resync = sector_nr;
825
826 /* For these conditions we must wait:
827 * A: while the array is in frozen state
828 * B: while barrier >= RESYNC_DEPTH, meaning resync reach
829 * the max count which allowed.
830 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
831 * next resync will reach to the window which normal bios are
832 * handling.
833 * D: while there are any active requests in the current window.
834 */
835 wait_event_lock_irq(conf->wait_barrier,
836 !conf->array_frozen &&
837 conf->barrier < RESYNC_DEPTH &&
838 conf->current_window_requests == 0 &&
839 (conf->start_next_window >=
840 conf->next_resync + RESYNC_SECTORS),
841 conf->resync_lock);
842
843 conf->nr_pending++;
844 spin_unlock_irq(&conf->resync_lock);
845}
846
847static void lower_barrier(struct r1conf *conf)
848{
849 unsigned long flags;
850 BUG_ON(conf->barrier <= 0);
851 spin_lock_irqsave(&conf->resync_lock, flags);
852 conf->barrier--;
853 conf->nr_pending--;
854 spin_unlock_irqrestore(&conf->resync_lock, flags);
855 wake_up(&conf->wait_barrier);
856}
857
858static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
859{
860 bool wait = false;
861
862 if (conf->array_frozen || !bio)
863 wait = true;
864 else if (conf->barrier && bio_data_dir(bio) == WRITE) {
865 if ((conf->mddev->curr_resync_completed
866 >= bio_end_sector(bio)) ||
867 (conf->start_next_window + NEXT_NORMALIO_DISTANCE
868 <= bio->bi_iter.bi_sector))
869 wait = false;
870 else
871 wait = true;
872 }
873
874 return wait;
875}
876
877static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
878{
879 sector_t sector = 0;
880
881 spin_lock_irq(&conf->resync_lock);
882 if (need_to_wait_for_sync(conf, bio)) {
883 conf->nr_waiting++;
884 /* Wait for the barrier to drop.
885 * However if there are already pending
886 * requests (preventing the barrier from
887 * rising completely), and the
888 * per-process bio queue isn't empty,
889 * then don't wait, as we need to empty
890 * that queue to allow conf->start_next_window
891 * to increase.
892 */
893 raid1_log(conf->mddev, "wait barrier");
894 wait_event_lock_irq(conf->wait_barrier,
895 !conf->array_frozen &&
896 (!conf->barrier ||
897 ((conf->start_next_window <
898 conf->next_resync + RESYNC_SECTORS) &&
899 current->bio_list &&
900 !bio_list_empty(current->bio_list))),
901 conf->resync_lock);
902 conf->nr_waiting--;
903 }
904
905 if (bio && bio_data_dir(bio) == WRITE) {
906 if (bio->bi_iter.bi_sector >= conf->next_resync) {
907 if (conf->start_next_window == MaxSector)
908 conf->start_next_window =
909 conf->next_resync +
910 NEXT_NORMALIO_DISTANCE;
911
912 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
913 <= bio->bi_iter.bi_sector)
914 conf->next_window_requests++;
915 else
916 conf->current_window_requests++;
917 sector = conf->start_next_window;
918 }
919 }
920
921 conf->nr_pending++;
922 spin_unlock_irq(&conf->resync_lock);
923 return sector;
924}
925
926static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
927 sector_t bi_sector)
928{
929 unsigned long flags;
930
931 spin_lock_irqsave(&conf->resync_lock, flags);
932 conf->nr_pending--;
933 if (start_next_window) {
934 if (start_next_window == conf->start_next_window) {
935 if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
936 <= bi_sector)
937 conf->next_window_requests--;
938 else
939 conf->current_window_requests--;
940 } else
941 conf->current_window_requests--;
942
943 if (!conf->current_window_requests) {
944 if (conf->next_window_requests) {
945 conf->current_window_requests =
946 conf->next_window_requests;
947 conf->next_window_requests = 0;
948 conf->start_next_window +=
949 NEXT_NORMALIO_DISTANCE;
950 } else
951 conf->start_next_window = MaxSector;
952 }
953 }
954 spin_unlock_irqrestore(&conf->resync_lock, flags);
955 wake_up(&conf->wait_barrier);
956}
957
958static void freeze_array(struct r1conf *conf, int extra)
959{
960 /* stop syncio and normal IO and wait for everything to
961 * go quite.
962 * We wait until nr_pending match nr_queued+extra
963 * This is called in the context of one normal IO request
964 * that has failed. Thus any sync request that might be pending
965 * will be blocked by nr_pending, and we need to wait for
966 * pending IO requests to complete or be queued for re-try.
967 * Thus the number queued (nr_queued) plus this request (extra)
968 * must match the number of pending IOs (nr_pending) before
969 * we continue.
970 */
971 spin_lock_irq(&conf->resync_lock);
972 conf->array_frozen = 1;
973 raid1_log(conf->mddev, "wait freeze");
974 wait_event_lock_irq_cmd(conf->wait_barrier,
975 conf->nr_pending == conf->nr_queued+extra,
976 conf->resync_lock,
977 flush_pending_writes(conf));
978 spin_unlock_irq(&conf->resync_lock);
979}
980static void unfreeze_array(struct r1conf *conf)
981{
982 /* reverse the effect of the freeze */
983 spin_lock_irq(&conf->resync_lock);
984 conf->array_frozen = 0;
985 wake_up(&conf->wait_barrier);
986 spin_unlock_irq(&conf->resync_lock);
987}
988
989/* duplicate the data pages for behind I/O
990 */
991static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
992{
993 int i;
994 struct bio_vec *bvec;
995 struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
996 GFP_NOIO);
997 if (unlikely(!bvecs))
998 return;
999
1000 bio_for_each_segment_all(bvec, bio, i) {
1001 bvecs[i] = *bvec;
1002 bvecs[i].bv_page = alloc_page(GFP_NOIO);
1003 if (unlikely(!bvecs[i].bv_page))
1004 goto do_sync_io;
1005 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
1006 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
1007 kunmap(bvecs[i].bv_page);
1008 kunmap(bvec->bv_page);
1009 }
1010 r1_bio->behind_bvecs = bvecs;
1011 r1_bio->behind_page_count = bio->bi_vcnt;
1012 set_bit(R1BIO_BehindIO, &r1_bio->state);
1013 return;
1014
1015do_sync_io:
1016 for (i = 0; i < bio->bi_vcnt; i++)
1017 if (bvecs[i].bv_page)
1018 put_page(bvecs[i].bv_page);
1019 kfree(bvecs);
1020 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1021 bio->bi_iter.bi_size);
1022}
1023
1024struct raid1_plug_cb {
1025 struct blk_plug_cb cb;
1026 struct bio_list pending;
1027 int pending_cnt;
1028};
1029
1030static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1031{
1032 struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1033 cb);
1034 struct mddev *mddev = plug->cb.data;
1035 struct r1conf *conf = mddev->private;
1036 struct bio *bio;
1037
1038 if (from_schedule || current->bio_list) {
1039 spin_lock_irq(&conf->device_lock);
1040 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1041 conf->pending_count += plug->pending_cnt;
1042 spin_unlock_irq(&conf->device_lock);
1043 wake_up(&conf->wait_barrier);
1044 md_wakeup_thread(mddev->thread);
1045 kfree(plug);
1046 return;
1047 }
1048
1049 /* we aren't scheduling, so we can do the write-out directly. */
1050 bio = bio_list_get(&plug->pending);
1051 bitmap_unplug(mddev->bitmap);
1052 wake_up(&conf->wait_barrier);
1053
1054 while (bio) { /* submit pending writes */
1055 struct bio *next = bio->bi_next;
1056 struct md_rdev *rdev = (void*)bio->bi_bdev;
1057 bio->bi_next = NULL;
1058 bio->bi_bdev = rdev->bdev;
1059 if (test_bit(Faulty, &rdev->flags)) {
1060 bio->bi_error = -EIO;
1061 bio_endio(bio);
1062 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
1063 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1064 /* Just ignore it */
1065 bio_endio(bio);
1066 else
1067 generic_make_request(bio);
1068 bio = next;
1069 }
1070 kfree(plug);
1071}
1072
1073static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1074 struct r1bio *r1_bio)
1075{
1076 struct r1conf *conf = mddev->private;
1077 struct raid1_info *mirror;
1078 struct bio *read_bio;
1079 struct bitmap *bitmap = mddev->bitmap;
1080 const int op = bio_op(bio);
1081 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1082 int sectors_handled;
1083 int max_sectors;
1084 int rdisk;
1085
1086 wait_barrier(conf, bio);
1087
1088read_again:
1089 rdisk = read_balance(conf, r1_bio, &max_sectors);
1090
1091 if (rdisk < 0) {
1092 /* couldn't find anywhere to read from */
1093 raid_end_bio_io(r1_bio);
1094 return;
1095 }
1096 mirror = conf->mirrors + rdisk;
1097
1098 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1099 bitmap) {
1100 /*
1101 * Reading from a write-mostly device must take care not to
1102 * over-take any writes that are 'behind'
1103 */
1104 raid1_log(mddev, "wait behind writes");
1105 wait_event(bitmap->behind_wait,
1106 atomic_read(&bitmap->behind_writes) == 0);
1107 }
1108 r1_bio->read_disk = rdisk;
1109 r1_bio->start_next_window = 0;
1110
1111 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1112 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1113 max_sectors);
1114
1115 r1_bio->bios[rdisk] = read_bio;
1116
1117 read_bio->bi_iter.bi_sector = r1_bio->sector +
1118 mirror->rdev->data_offset;
1119 read_bio->bi_bdev = mirror->rdev->bdev;
1120 read_bio->bi_end_io = raid1_end_read_request;
1121 bio_set_op_attrs(read_bio, op, do_sync);
1122 if (test_bit(FailFast, &mirror->rdev->flags) &&
1123 test_bit(R1BIO_FailFast, &r1_bio->state))
1124 read_bio->bi_opf |= MD_FAILFAST;
1125 read_bio->bi_private = r1_bio;
1126
1127 if (mddev->gendisk)
1128 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
1129 read_bio, disk_devt(mddev->gendisk),
1130 r1_bio->sector);
1131
1132 if (max_sectors < r1_bio->sectors) {
1133 /*
1134 * could not read all from this device, so we will need another
1135 * r1_bio.
1136 */
1137 sectors_handled = (r1_bio->sector + max_sectors
1138 - bio->bi_iter.bi_sector);
1139 r1_bio->sectors = max_sectors;
1140 spin_lock_irq(&conf->device_lock);
1141 if (bio->bi_phys_segments == 0)
1142 bio->bi_phys_segments = 2;
1143 else
1144 bio->bi_phys_segments++;
1145 spin_unlock_irq(&conf->device_lock);
1146
1147 /*
1148 * Cannot call generic_make_request directly as that will be
1149 * queued in __make_request and subsequent mempool_alloc might
1150 * block waiting for it. So hand bio over to raid1d.
1151 */
1152 reschedule_retry(r1_bio);
1153
1154 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1155
1156 r1_bio->master_bio = bio;
1157 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1158 r1_bio->state = 0;
1159 r1_bio->mddev = mddev;
1160 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1161 goto read_again;
1162 } else
1163 generic_make_request(read_bio);
1164}
1165
1166static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1167 struct r1bio *r1_bio)
1168{
1169 struct r1conf *conf = mddev->private;
1170 int i, disks;
1171 struct bitmap *bitmap = mddev->bitmap;
1172 unsigned long flags;
1173 const int op = bio_op(bio);
1174 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1175 const unsigned long do_flush_fua = (bio->bi_opf &
1176 (REQ_PREFLUSH | REQ_FUA));
1177 struct md_rdev *blocked_rdev;
1178 struct blk_plug_cb *cb;
1179 struct raid1_plug_cb *plug = NULL;
1180 int first_clone;
1181 int sectors_handled;
1182 int max_sectors;
1183 sector_t start_next_window;
1184
1185 /*
1186 * Register the new request and wait if the reconstruction
1187 * thread has put up a bar for new requests.
1188 * Continue immediately if no resync is active currently.
1189 */
1190
1191 md_write_start(mddev, bio); /* wait on superblock update early */
1192
1193 if ((bio_end_sector(bio) > mddev->suspend_lo &&
1194 bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1195 (mddev_is_clustered(mddev) &&
1196 md_cluster_ops->area_resyncing(mddev, WRITE,
1197 bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
1198
1199 /*
1200 * As the suspend_* range is controlled by userspace, we want
1201 * an interruptible wait.
1202 */
1203 DEFINE_WAIT(w);
1204 for (;;) {
1205 flush_signals(current);
1206 prepare_to_wait(&conf->wait_barrier,
1207 &w, TASK_INTERRUPTIBLE);
1208 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1209 bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1210 (mddev_is_clustered(mddev) &&
1211 !md_cluster_ops->area_resyncing(mddev, WRITE,
1212 bio->bi_iter.bi_sector,
1213 bio_end_sector(bio))))
1214 break;
1215 schedule();
1216 }
1217 finish_wait(&conf->wait_barrier, &w);
1218 }
1219 start_next_window = wait_barrier(conf, bio);
1220
1221 if (conf->pending_count >= max_queued_requests) {
1222 md_wakeup_thread(mddev->thread);
1223 raid1_log(mddev, "wait queued");
1224 wait_event(conf->wait_barrier,
1225 conf->pending_count < max_queued_requests);
1226 }
1227 /* first select target devices under rcu_lock and
1228 * inc refcount on their rdev. Record them by setting
1229 * bios[x] to bio
1230 * If there are known/acknowledged bad blocks on any device on
1231 * which we have seen a write error, we want to avoid writing those
1232 * blocks.
1233 * This potentially requires several writes to write around
1234 * the bad blocks. Each set of writes gets it's own r1bio
1235 * with a set of bios attached.
1236 */
1237
1238 disks = conf->raid_disks * 2;
1239 retry_write:
1240 r1_bio->start_next_window = start_next_window;
1241 blocked_rdev = NULL;
1242 rcu_read_lock();
1243 max_sectors = r1_bio->sectors;
1244 for (i = 0; i < disks; i++) {
1245 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1246 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1247 atomic_inc(&rdev->nr_pending);
1248 blocked_rdev = rdev;
1249 break;
1250 }
1251 r1_bio->bios[i] = NULL;
1252 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1253 if (i < conf->raid_disks)
1254 set_bit(R1BIO_Degraded, &r1_bio->state);
1255 continue;
1256 }
1257
1258 atomic_inc(&rdev->nr_pending);
1259 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1260 sector_t first_bad;
1261 int bad_sectors;
1262 int is_bad;
1263
1264 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1265 &first_bad, &bad_sectors);
1266 if (is_bad < 0) {
1267 /* mustn't write here until the bad block is
1268 * acknowledged*/
1269 set_bit(BlockedBadBlocks, &rdev->flags);
1270 blocked_rdev = rdev;
1271 break;
1272 }
1273 if (is_bad && first_bad <= r1_bio->sector) {
1274 /* Cannot write here at all */
1275 bad_sectors -= (r1_bio->sector - first_bad);
1276 if (bad_sectors < max_sectors)
1277 /* mustn't write more than bad_sectors
1278 * to other devices yet
1279 */
1280 max_sectors = bad_sectors;
1281 rdev_dec_pending(rdev, mddev);
1282 /* We don't set R1BIO_Degraded as that
1283 * only applies if the disk is
1284 * missing, so it might be re-added,
1285 * and we want to know to recover this
1286 * chunk.
1287 * In this case the device is here,
1288 * and the fact that this chunk is not
1289 * in-sync is recorded in the bad
1290 * block log
1291 */
1292 continue;
1293 }
1294 if (is_bad) {
1295 int good_sectors = first_bad - r1_bio->sector;
1296 if (good_sectors < max_sectors)
1297 max_sectors = good_sectors;
1298 }
1299 }
1300 r1_bio->bios[i] = bio;
1301 }
1302 rcu_read_unlock();
1303
1304 if (unlikely(blocked_rdev)) {
1305 /* Wait for this device to become unblocked */
1306 int j;
1307 sector_t old = start_next_window;
1308
1309 for (j = 0; j < i; j++)
1310 if (r1_bio->bios[j])
1311 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1312 r1_bio->state = 0;
1313 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1314 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1315 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1316 start_next_window = wait_barrier(conf, bio);
1317 /*
1318 * We must make sure the multi r1bios of bio have
1319 * the same value of bi_phys_segments
1320 */
1321 if (bio->bi_phys_segments && old &&
1322 old != start_next_window)
1323 /* Wait for the former r1bio(s) to complete */
1324 wait_event(conf->wait_barrier,
1325 bio->bi_phys_segments == 1);
1326 goto retry_write;
1327 }
1328
1329 if (max_sectors < r1_bio->sectors) {
1330 /* We are splitting this write into multiple parts, so
1331 * we need to prepare for allocating another r1_bio.
1332 */
1333 r1_bio->sectors = max_sectors;
1334 spin_lock_irq(&conf->device_lock);
1335 if (bio->bi_phys_segments == 0)
1336 bio->bi_phys_segments = 2;
1337 else
1338 bio->bi_phys_segments++;
1339 spin_unlock_irq(&conf->device_lock);
1340 }
1341 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1342
1343 atomic_set(&r1_bio->remaining, 1);
1344 atomic_set(&r1_bio->behind_remaining, 0);
1345
1346 first_clone = 1;
1347 for (i = 0; i < disks; i++) {
1348 struct bio *mbio;
1349 if (!r1_bio->bios[i])
1350 continue;
1351
1352 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1353 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector,
1354 max_sectors);
1355
1356 if (first_clone) {
1357 /* do behind I/O ?
1358 * Not if there are too many, or cannot
1359 * allocate memory, or a reader on WriteMostly
1360 * is waiting for behind writes to flush */
1361 if (bitmap &&
1362 (atomic_read(&bitmap->behind_writes)
1363 < mddev->bitmap_info.max_write_behind) &&
1364 !waitqueue_active(&bitmap->behind_wait))
1365 alloc_behind_pages(mbio, r1_bio);
1366
1367 bitmap_startwrite(bitmap, r1_bio->sector,
1368 r1_bio->sectors,
1369 test_bit(R1BIO_BehindIO,
1370 &r1_bio->state));
1371 first_clone = 0;
1372 }
1373 if (r1_bio->behind_bvecs) {
1374 struct bio_vec *bvec;
1375 int j;
1376
1377 /*
1378 * We trimmed the bio, so _all is legit
1379 */
1380 bio_for_each_segment_all(bvec, mbio, j)
1381 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1382 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1383 atomic_inc(&r1_bio->behind_remaining);
1384 }
1385
1386 r1_bio->bios[i] = mbio;
1387
1388 mbio->bi_iter.bi_sector = (r1_bio->sector +
1389 conf->mirrors[i].rdev->data_offset);
1390 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1391 mbio->bi_end_io = raid1_end_write_request;
1392 bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
1393 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1394 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1395 conf->raid_disks - mddev->degraded > 1)
1396 mbio->bi_opf |= MD_FAILFAST;
1397 mbio->bi_private = r1_bio;
1398
1399 atomic_inc(&r1_bio->remaining);
1400
1401 if (mddev->gendisk)
1402 trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
1403 mbio, disk_devt(mddev->gendisk),
1404 r1_bio->sector);
1405 /* flush_pending_writes() needs access to the rdev so...*/
1406 mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
1407
1408 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1409 if (cb)
1410 plug = container_of(cb, struct raid1_plug_cb, cb);
1411 else
1412 plug = NULL;
1413 spin_lock_irqsave(&conf->device_lock, flags);
1414 if (plug) {
1415 bio_list_add(&plug->pending, mbio);
1416 plug->pending_cnt++;
1417 } else {
1418 bio_list_add(&conf->pending_bio_list, mbio);
1419 conf->pending_count++;
1420 }
1421 spin_unlock_irqrestore(&conf->device_lock, flags);
1422 if (!plug)
1423 md_wakeup_thread(mddev->thread);
1424 }
1425 /* Mustn't call r1_bio_write_done before this next test,
1426 * as it could result in the bio being freed.
1427 */
1428 if (sectors_handled < bio_sectors(bio)) {
1429 r1_bio_write_done(r1_bio);
1430 /* We need another r1_bio. It has already been counted
1431 * in bio->bi_phys_segments
1432 */
1433 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1434 r1_bio->master_bio = bio;
1435 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1436 r1_bio->state = 0;
1437 r1_bio->mddev = mddev;
1438 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1439 goto retry_write;
1440 }
1441
1442 r1_bio_write_done(r1_bio);
1443
1444 /* In case raid1d snuck in to freeze_array */
1445 wake_up(&conf->wait_barrier);
1446}
1447
1448static void raid1_make_request(struct mddev *mddev, struct bio *bio)
1449{
1450 struct r1conf *conf = mddev->private;
1451 struct r1bio *r1_bio;
1452
1453 /*
1454 * make_request() can abort the operation when read-ahead is being
1455 * used and no empty request is available.
1456 *
1457 */
1458 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1459
1460 r1_bio->master_bio = bio;
1461 r1_bio->sectors = bio_sectors(bio);
1462 r1_bio->state = 0;
1463 r1_bio->mddev = mddev;
1464 r1_bio->sector = bio->bi_iter.bi_sector;
1465
1466 /*
1467 * We might need to issue multiple reads to different devices if there
1468 * are bad blocks around, so we keep track of the number of reads in
1469 * bio->bi_phys_segments. If this is 0, there is only one r1_bio and
1470 * no locking will be needed when requests complete. If it is
1471 * non-zero, then it is the number of not-completed requests.
1472 */
1473 bio->bi_phys_segments = 0;
1474 bio_clear_flag(bio, BIO_SEG_VALID);
1475
1476 if (bio_data_dir(bio) == READ)
1477 raid1_read_request(mddev, bio, r1_bio);
1478 else
1479 raid1_write_request(mddev, bio, r1_bio);
1480}
1481
1482static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1483{
1484 struct r1conf *conf = mddev->private;
1485 int i;
1486
1487 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1488 conf->raid_disks - mddev->degraded);
1489 rcu_read_lock();
1490 for (i = 0; i < conf->raid_disks; i++) {
1491 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1492 seq_printf(seq, "%s",
1493 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1494 }
1495 rcu_read_unlock();
1496 seq_printf(seq, "]");
1497}
1498
1499static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1500{
1501 char b[BDEVNAME_SIZE];
1502 struct r1conf *conf = mddev->private;
1503 unsigned long flags;
1504
1505 /*
1506 * If it is not operational, then we have already marked it as dead
1507 * else if it is the last working disks, ignore the error, let the
1508 * next level up know.
1509 * else mark the drive as failed
1510 */
1511 spin_lock_irqsave(&conf->device_lock, flags);
1512 if (test_bit(In_sync, &rdev->flags)
1513 && (conf->raid_disks - mddev->degraded) == 1) {
1514 /*
1515 * Don't fail the drive, act as though we were just a
1516 * normal single drive.
1517 * However don't try a recovery from this drive as
1518 * it is very likely to fail.
1519 */
1520 conf->recovery_disabled = mddev->recovery_disabled;
1521 spin_unlock_irqrestore(&conf->device_lock, flags);
1522 return;
1523 }
1524 set_bit(Blocked, &rdev->flags);
1525 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1526 mddev->degraded++;
1527 set_bit(Faulty, &rdev->flags);
1528 } else
1529 set_bit(Faulty, &rdev->flags);
1530 spin_unlock_irqrestore(&conf->device_lock, flags);
1531 /*
1532 * if recovery is running, make sure it aborts.
1533 */
1534 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1535 set_mask_bits(&mddev->sb_flags, 0,
1536 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1537 pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1538 "md/raid1:%s: Operation continuing on %d devices.\n",
1539 mdname(mddev), bdevname(rdev->bdev, b),
1540 mdname(mddev), conf->raid_disks - mddev->degraded);
1541}
1542
1543static void print_conf(struct r1conf *conf)
1544{
1545 int i;
1546
1547 pr_debug("RAID1 conf printout:\n");
1548 if (!conf) {
1549 pr_debug("(!conf)\n");
1550 return;
1551 }
1552 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1553 conf->raid_disks);
1554
1555 rcu_read_lock();
1556 for (i = 0; i < conf->raid_disks; i++) {
1557 char b[BDEVNAME_SIZE];
1558 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1559 if (rdev)
1560 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1561 i, !test_bit(In_sync, &rdev->flags),
1562 !test_bit(Faulty, &rdev->flags),
1563 bdevname(rdev->bdev,b));
1564 }
1565 rcu_read_unlock();
1566}
1567
1568static void close_sync(struct r1conf *conf)
1569{
1570 wait_barrier(conf, NULL);
1571 allow_barrier(conf, 0, 0);
1572
1573 mempool_destroy(conf->r1buf_pool);
1574 conf->r1buf_pool = NULL;
1575
1576 spin_lock_irq(&conf->resync_lock);
1577 conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
1578 conf->start_next_window = MaxSector;
1579 conf->current_window_requests +=
1580 conf->next_window_requests;
1581 conf->next_window_requests = 0;
1582 spin_unlock_irq(&conf->resync_lock);
1583}
1584
1585static int raid1_spare_active(struct mddev *mddev)
1586{
1587 int i;
1588 struct r1conf *conf = mddev->private;
1589 int count = 0;
1590 unsigned long flags;
1591
1592 /*
1593 * Find all failed disks within the RAID1 configuration
1594 * and mark them readable.
1595 * Called under mddev lock, so rcu protection not needed.
1596 * device_lock used to avoid races with raid1_end_read_request
1597 * which expects 'In_sync' flags and ->degraded to be consistent.
1598 */
1599 spin_lock_irqsave(&conf->device_lock, flags);
1600 for (i = 0; i < conf->raid_disks; i++) {
1601 struct md_rdev *rdev = conf->mirrors[i].rdev;
1602 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1603 if (repl
1604 && !test_bit(Candidate, &repl->flags)
1605 && repl->recovery_offset == MaxSector
1606 && !test_bit(Faulty, &repl->flags)
1607 && !test_and_set_bit(In_sync, &repl->flags)) {
1608 /* replacement has just become active */
1609 if (!rdev ||
1610 !test_and_clear_bit(In_sync, &rdev->flags))
1611 count++;
1612 if (rdev) {
1613 /* Replaced device not technically
1614 * faulty, but we need to be sure
1615 * it gets removed and never re-added
1616 */
1617 set_bit(Faulty, &rdev->flags);
1618 sysfs_notify_dirent_safe(
1619 rdev->sysfs_state);
1620 }
1621 }
1622 if (rdev
1623 && rdev->recovery_offset == MaxSector
1624 && !test_bit(Faulty, &rdev->flags)
1625 && !test_and_set_bit(In_sync, &rdev->flags)) {
1626 count++;
1627 sysfs_notify_dirent_safe(rdev->sysfs_state);
1628 }
1629 }
1630 mddev->degraded -= count;
1631 spin_unlock_irqrestore(&conf->device_lock, flags);
1632
1633 print_conf(conf);
1634 return count;
1635}
1636
1637static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1638{
1639 struct r1conf *conf = mddev->private;
1640 int err = -EEXIST;
1641 int mirror = 0;
1642 struct raid1_info *p;
1643 int first = 0;
1644 int last = conf->raid_disks - 1;
1645
1646 if (mddev->recovery_disabled == conf->recovery_disabled)
1647 return -EBUSY;
1648
1649 if (md_integrity_add_rdev(rdev, mddev))
1650 return -ENXIO;
1651
1652 if (rdev->raid_disk >= 0)
1653 first = last = rdev->raid_disk;
1654
1655 /*
1656 * find the disk ... but prefer rdev->saved_raid_disk
1657 * if possible.
1658 */
1659 if (rdev->saved_raid_disk >= 0 &&
1660 rdev->saved_raid_disk >= first &&
1661 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1662 first = last = rdev->saved_raid_disk;
1663
1664 for (mirror = first; mirror <= last; mirror++) {
1665 p = conf->mirrors+mirror;
1666 if (!p->rdev) {
1667
1668 if (mddev->gendisk)
1669 disk_stack_limits(mddev->gendisk, rdev->bdev,
1670 rdev->data_offset << 9);
1671
1672 p->head_position = 0;
1673 rdev->raid_disk = mirror;
1674 err = 0;
1675 /* As all devices are equivalent, we don't need a full recovery
1676 * if this was recently any drive of the array
1677 */
1678 if (rdev->saved_raid_disk < 0)
1679 conf->fullsync = 1;
1680 rcu_assign_pointer(p->rdev, rdev);
1681 break;
1682 }
1683 if (test_bit(WantReplacement, &p->rdev->flags) &&
1684 p[conf->raid_disks].rdev == NULL) {
1685 /* Add this device as a replacement */
1686 clear_bit(In_sync, &rdev->flags);
1687 set_bit(Replacement, &rdev->flags);
1688 rdev->raid_disk = mirror;
1689 err = 0;
1690 conf->fullsync = 1;
1691 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1692 break;
1693 }
1694 }
1695 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1696 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1697 print_conf(conf);
1698 return err;
1699}
1700
1701static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1702{
1703 struct r1conf *conf = mddev->private;
1704 int err = 0;
1705 int number = rdev->raid_disk;
1706 struct raid1_info *p = conf->mirrors + number;
1707
1708 if (rdev != p->rdev)
1709 p = conf->mirrors + conf->raid_disks + number;
1710
1711 print_conf(conf);
1712 if (rdev == p->rdev) {
1713 if (test_bit(In_sync, &rdev->flags) ||
1714 atomic_read(&rdev->nr_pending)) {
1715 err = -EBUSY;
1716 goto abort;
1717 }
1718 /* Only remove non-faulty devices if recovery
1719 * is not possible.
1720 */
1721 if (!test_bit(Faulty, &rdev->flags) &&
1722 mddev->recovery_disabled != conf->recovery_disabled &&
1723 mddev->degraded < conf->raid_disks) {
1724 err = -EBUSY;
1725 goto abort;
1726 }
1727 p->rdev = NULL;
1728 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1729 synchronize_rcu();
1730 if (atomic_read(&rdev->nr_pending)) {
1731 /* lost the race, try later */
1732 err = -EBUSY;
1733 p->rdev = rdev;
1734 goto abort;
1735 }
1736 }
1737 if (conf->mirrors[conf->raid_disks + number].rdev) {
1738 /* We just removed a device that is being replaced.
1739 * Move down the replacement. We drain all IO before
1740 * doing this to avoid confusion.
1741 */
1742 struct md_rdev *repl =
1743 conf->mirrors[conf->raid_disks + number].rdev;
1744 freeze_array(conf, 0);
1745 clear_bit(Replacement, &repl->flags);
1746 p->rdev = repl;
1747 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1748 unfreeze_array(conf);
1749 clear_bit(WantReplacement, &rdev->flags);
1750 } else
1751 clear_bit(WantReplacement, &rdev->flags);
1752 err = md_integrity_register(mddev);
1753 }
1754abort:
1755
1756 print_conf(conf);
1757 return err;
1758}
1759
1760static void end_sync_read(struct bio *bio)
1761{
1762 struct r1bio *r1_bio = bio->bi_private;
1763
1764 update_head_pos(r1_bio->read_disk, r1_bio);
1765
1766 /*
1767 * we have read a block, now it needs to be re-written,
1768 * or re-read if the read failed.
1769 * We don't do much here, just schedule handling by raid1d
1770 */
1771 if (!bio->bi_error)
1772 set_bit(R1BIO_Uptodate, &r1_bio->state);
1773
1774 if (atomic_dec_and_test(&r1_bio->remaining))
1775 reschedule_retry(r1_bio);
1776}
1777
1778static void end_sync_write(struct bio *bio)
1779{
1780 int uptodate = !bio->bi_error;
1781 struct r1bio *r1_bio = bio->bi_private;
1782 struct mddev *mddev = r1_bio->mddev;
1783 struct r1conf *conf = mddev->private;
1784 sector_t first_bad;
1785 int bad_sectors;
1786 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1787
1788 if (!uptodate) {
1789 sector_t sync_blocks = 0;
1790 sector_t s = r1_bio->sector;
1791 long sectors_to_go = r1_bio->sectors;
1792 /* make sure these bits doesn't get cleared. */
1793 do {
1794 bitmap_end_sync(mddev->bitmap, s,
1795 &sync_blocks, 1);
1796 s += sync_blocks;
1797 sectors_to_go -= sync_blocks;
1798 } while (sectors_to_go > 0);
1799 set_bit(WriteErrorSeen, &rdev->flags);
1800 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1801 set_bit(MD_RECOVERY_NEEDED, &
1802 mddev->recovery);
1803 set_bit(R1BIO_WriteError, &r1_bio->state);
1804 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1805 &first_bad, &bad_sectors) &&
1806 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1807 r1_bio->sector,
1808 r1_bio->sectors,
1809 &first_bad, &bad_sectors)
1810 )
1811 set_bit(R1BIO_MadeGood, &r1_bio->state);
1812
1813 if (atomic_dec_and_test(&r1_bio->remaining)) {
1814 int s = r1_bio->sectors;
1815 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1816 test_bit(R1BIO_WriteError, &r1_bio->state))
1817 reschedule_retry(r1_bio);
1818 else {
1819 put_buf(r1_bio);
1820 md_done_sync(mddev, s, uptodate);
1821 }
1822 }
1823}
1824
1825static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1826 int sectors, struct page *page, int rw)
1827{
1828 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1829 /* success */
1830 return 1;
1831 if (rw == WRITE) {
1832 set_bit(WriteErrorSeen, &rdev->flags);
1833 if (!test_and_set_bit(WantReplacement,
1834 &rdev->flags))
1835 set_bit(MD_RECOVERY_NEEDED, &
1836 rdev->mddev->recovery);
1837 }
1838 /* need to record an error - either for the block or the device */
1839 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1840 md_error(rdev->mddev, rdev);
1841 return 0;
1842}
1843
1844static int fix_sync_read_error(struct r1bio *r1_bio)
1845{
1846 /* Try some synchronous reads of other devices to get
1847 * good data, much like with normal read errors. Only
1848 * read into the pages we already have so we don't
1849 * need to re-issue the read request.
1850 * We don't need to freeze the array, because being in an
1851 * active sync request, there is no normal IO, and
1852 * no overlapping syncs.
1853 * We don't need to check is_badblock() again as we
1854 * made sure that anything with a bad block in range
1855 * will have bi_end_io clear.
1856 */
1857 struct mddev *mddev = r1_bio->mddev;
1858 struct r1conf *conf = mddev->private;
1859 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1860 sector_t sect = r1_bio->sector;
1861 int sectors = r1_bio->sectors;
1862 int idx = 0;
1863 struct md_rdev *rdev;
1864
1865 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1866 if (test_bit(FailFast, &rdev->flags)) {
1867 /* Don't try recovering from here - just fail it
1868 * ... unless it is the last working device of course */
1869 md_error(mddev, rdev);
1870 if (test_bit(Faulty, &rdev->flags))
1871 /* Don't try to read from here, but make sure
1872 * put_buf does it's thing
1873 */
1874 bio->bi_end_io = end_sync_write;
1875 }
1876
1877 while(sectors) {
1878 int s = sectors;
1879 int d = r1_bio->read_disk;
1880 int success = 0;
1881 int start;
1882
1883 if (s > (PAGE_SIZE>>9))
1884 s = PAGE_SIZE >> 9;
1885 do {
1886 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1887 /* No rcu protection needed here devices
1888 * can only be removed when no resync is
1889 * active, and resync is currently active
1890 */
1891 rdev = conf->mirrors[d].rdev;
1892 if (sync_page_io(rdev, sect, s<<9,
1893 bio->bi_io_vec[idx].bv_page,
1894 REQ_OP_READ, 0, false)) {
1895 success = 1;
1896 break;
1897 }
1898 }
1899 d++;
1900 if (d == conf->raid_disks * 2)
1901 d = 0;
1902 } while (!success && d != r1_bio->read_disk);
1903
1904 if (!success) {
1905 char b[BDEVNAME_SIZE];
1906 int abort = 0;
1907 /* Cannot read from anywhere, this block is lost.
1908 * Record a bad block on each device. If that doesn't
1909 * work just disable and interrupt the recovery.
1910 * Don't fail devices as that won't really help.
1911 */
1912 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1913 mdname(mddev),
1914 bdevname(bio->bi_bdev, b),
1915 (unsigned long long)r1_bio->sector);
1916 for (d = 0; d < conf->raid_disks * 2; d++) {
1917 rdev = conf->mirrors[d].rdev;
1918 if (!rdev || test_bit(Faulty, &rdev->flags))
1919 continue;
1920 if (!rdev_set_badblocks(rdev, sect, s, 0))
1921 abort = 1;
1922 }
1923 if (abort) {
1924 conf->recovery_disabled =
1925 mddev->recovery_disabled;
1926 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1927 md_done_sync(mddev, r1_bio->sectors, 0);
1928 put_buf(r1_bio);
1929 return 0;
1930 }
1931 /* Try next page */
1932 sectors -= s;
1933 sect += s;
1934 idx++;
1935 continue;
1936 }
1937
1938 start = d;
1939 /* write it back and re-read */
1940 while (d != r1_bio->read_disk) {
1941 if (d == 0)
1942 d = conf->raid_disks * 2;
1943 d--;
1944 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1945 continue;
1946 rdev = conf->mirrors[d].rdev;
1947 if (r1_sync_page_io(rdev, sect, s,
1948 bio->bi_io_vec[idx].bv_page,
1949 WRITE) == 0) {
1950 r1_bio->bios[d]->bi_end_io = NULL;
1951 rdev_dec_pending(rdev, mddev);
1952 }
1953 }
1954 d = start;
1955 while (d != r1_bio->read_disk) {
1956 if (d == 0)
1957 d = conf->raid_disks * 2;
1958 d--;
1959 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1960 continue;
1961 rdev = conf->mirrors[d].rdev;
1962 if (r1_sync_page_io(rdev, sect, s,
1963 bio->bi_io_vec[idx].bv_page,
1964 READ) != 0)
1965 atomic_add(s, &rdev->corrected_errors);
1966 }
1967 sectors -= s;
1968 sect += s;
1969 idx ++;
1970 }
1971 set_bit(R1BIO_Uptodate, &r1_bio->state);
1972 bio->bi_error = 0;
1973 return 1;
1974}
1975
1976static void process_checks(struct r1bio *r1_bio)
1977{
1978 /* We have read all readable devices. If we haven't
1979 * got the block, then there is no hope left.
1980 * If we have, then we want to do a comparison
1981 * and skip the write if everything is the same.
1982 * If any blocks failed to read, then we need to
1983 * attempt an over-write
1984 */
1985 struct mddev *mddev = r1_bio->mddev;
1986 struct r1conf *conf = mddev->private;
1987 int primary;
1988 int i;
1989 int vcnt;
1990
1991 /* Fix variable parts of all bios */
1992 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1993 for (i = 0; i < conf->raid_disks * 2; i++) {
1994 int j;
1995 int size;
1996 int error;
1997 struct bio *b = r1_bio->bios[i];
1998 if (b->bi_end_io != end_sync_read)
1999 continue;
2000 /* fixup the bio for reuse, but preserve errno */
2001 error = b->bi_error;
2002 bio_reset(b);
2003 b->bi_error = error;
2004 b->bi_vcnt = vcnt;
2005 b->bi_iter.bi_size = r1_bio->sectors << 9;
2006 b->bi_iter.bi_sector = r1_bio->sector +
2007 conf->mirrors[i].rdev->data_offset;
2008 b->bi_bdev = conf->mirrors[i].rdev->bdev;
2009 b->bi_end_io = end_sync_read;
2010 b->bi_private = r1_bio;
2011
2012 size = b->bi_iter.bi_size;
2013 for (j = 0; j < vcnt ; j++) {
2014 struct bio_vec *bi;
2015 bi = &b->bi_io_vec[j];
2016 bi->bv_offset = 0;
2017 if (size > PAGE_SIZE)
2018 bi->bv_len = PAGE_SIZE;
2019 else
2020 bi->bv_len = size;
2021 size -= PAGE_SIZE;
2022 }
2023 }
2024 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2025 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2026 !r1_bio->bios[primary]->bi_error) {
2027 r1_bio->bios[primary]->bi_end_io = NULL;
2028 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2029 break;
2030 }
2031 r1_bio->read_disk = primary;
2032 for (i = 0; i < conf->raid_disks * 2; i++) {
2033 int j;
2034 struct bio *pbio = r1_bio->bios[primary];
2035 struct bio *sbio = r1_bio->bios[i];
2036 int error = sbio->bi_error;
2037
2038 if (sbio->bi_end_io != end_sync_read)
2039 continue;
2040 /* Now we can 'fixup' the error value */
2041 sbio->bi_error = 0;
2042
2043 if (!error) {
2044 for (j = vcnt; j-- ; ) {
2045 struct page *p, *s;
2046 p = pbio->bi_io_vec[j].bv_page;
2047 s = sbio->bi_io_vec[j].bv_page;
2048 if (memcmp(page_address(p),
2049 page_address(s),
2050 sbio->bi_io_vec[j].bv_len))
2051 break;
2052 }
2053 } else
2054 j = 0;
2055 if (j >= 0)
2056 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2057 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2058 && !error)) {
2059 /* No need to write to this device. */
2060 sbio->bi_end_io = NULL;
2061 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2062 continue;
2063 }
2064
2065 bio_copy_data(sbio, pbio);
2066 }
2067}
2068
2069static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2070{
2071 struct r1conf *conf = mddev->private;
2072 int i;
2073 int disks = conf->raid_disks * 2;
2074 struct bio *bio, *wbio;
2075
2076 bio = r1_bio->bios[r1_bio->read_disk];
2077
2078 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2079 /* ouch - failed to read all of that. */
2080 if (!fix_sync_read_error(r1_bio))
2081 return;
2082
2083 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2084 process_checks(r1_bio);
2085
2086 /*
2087 * schedule writes
2088 */
2089 atomic_set(&r1_bio->remaining, 1);
2090 for (i = 0; i < disks ; i++) {
2091 wbio = r1_bio->bios[i];
2092 if (wbio->bi_end_io == NULL ||
2093 (wbio->bi_end_io == end_sync_read &&
2094 (i == r1_bio->read_disk ||
2095 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2096 continue;
2097
2098 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2099 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2100 wbio->bi_opf |= MD_FAILFAST;
2101
2102 wbio->bi_end_io = end_sync_write;
2103 atomic_inc(&r1_bio->remaining);
2104 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2105
2106 generic_make_request(wbio);
2107 }
2108
2109 if (atomic_dec_and_test(&r1_bio->remaining)) {
2110 /* if we're here, all write(s) have completed, so clean up */
2111 int s = r1_bio->sectors;
2112 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2113 test_bit(R1BIO_WriteError, &r1_bio->state))
2114 reschedule_retry(r1_bio);
2115 else {
2116 put_buf(r1_bio);
2117 md_done_sync(mddev, s, 1);
2118 }
2119 }
2120}
2121
2122/*
2123 * This is a kernel thread which:
2124 *
2125 * 1. Retries failed read operations on working mirrors.
2126 * 2. Updates the raid superblock when problems encounter.
2127 * 3. Performs writes following reads for array synchronising.
2128 */
2129
2130static void fix_read_error(struct r1conf *conf, int read_disk,
2131 sector_t sect, int sectors)
2132{
2133 struct mddev *mddev = conf->mddev;
2134 while(sectors) {
2135 int s = sectors;
2136 int d = read_disk;
2137 int success = 0;
2138 int start;
2139 struct md_rdev *rdev;
2140
2141 if (s > (PAGE_SIZE>>9))
2142 s = PAGE_SIZE >> 9;
2143
2144 do {
2145 sector_t first_bad;
2146 int bad_sectors;
2147
2148 rcu_read_lock();
2149 rdev = rcu_dereference(conf->mirrors[d].rdev);
2150 if (rdev &&
2151 (test_bit(In_sync, &rdev->flags) ||
2152 (!test_bit(Faulty, &rdev->flags) &&
2153 rdev->recovery_offset >= sect + s)) &&
2154 is_badblock(rdev, sect, s,
2155 &first_bad, &bad_sectors) == 0) {
2156 atomic_inc(&rdev->nr_pending);
2157 rcu_read_unlock();
2158 if (sync_page_io(rdev, sect, s<<9,
2159 conf->tmppage, REQ_OP_READ, 0, false))
2160 success = 1;
2161 rdev_dec_pending(rdev, mddev);
2162 if (success)
2163 break;
2164 } else
2165 rcu_read_unlock();
2166 d++;
2167 if (d == conf->raid_disks * 2)
2168 d = 0;
2169 } while (!success && d != read_disk);
2170
2171 if (!success) {
2172 /* Cannot read from anywhere - mark it bad */
2173 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2174 if (!rdev_set_badblocks(rdev, sect, s, 0))
2175 md_error(mddev, rdev);
2176 break;
2177 }
2178 /* write it back and re-read */
2179 start = d;
2180 while (d != read_disk) {
2181 if (d==0)
2182 d = conf->raid_disks * 2;
2183 d--;
2184 rcu_read_lock();
2185 rdev = rcu_dereference(conf->mirrors[d].rdev);
2186 if (rdev &&
2187 !test_bit(Faulty, &rdev->flags)) {
2188 atomic_inc(&rdev->nr_pending);
2189 rcu_read_unlock();
2190 r1_sync_page_io(rdev, sect, s,
2191 conf->tmppage, WRITE);
2192 rdev_dec_pending(rdev, mddev);
2193 } else
2194 rcu_read_unlock();
2195 }
2196 d = start;
2197 while (d != read_disk) {
2198 char b[BDEVNAME_SIZE];
2199 if (d==0)
2200 d = conf->raid_disks * 2;
2201 d--;
2202 rcu_read_lock();
2203 rdev = rcu_dereference(conf->mirrors[d].rdev);
2204 if (rdev &&
2205 !test_bit(Faulty, &rdev->flags)) {
2206 atomic_inc(&rdev->nr_pending);
2207 rcu_read_unlock();
2208 if (r1_sync_page_io(rdev, sect, s,
2209 conf->tmppage, READ)) {
2210 atomic_add(s, &rdev->corrected_errors);
2211 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2212 mdname(mddev), s,
2213 (unsigned long long)(sect +
2214 rdev->data_offset),
2215 bdevname(rdev->bdev, b));
2216 }
2217 rdev_dec_pending(rdev, mddev);
2218 } else
2219 rcu_read_unlock();
2220 }
2221 sectors -= s;
2222 sect += s;
2223 }
2224}
2225
2226static int narrow_write_error(struct r1bio *r1_bio, int i)
2227{
2228 struct mddev *mddev = r1_bio->mddev;
2229 struct r1conf *conf = mddev->private;
2230 struct md_rdev *rdev = conf->mirrors[i].rdev;
2231
2232 /* bio has the data to be written to device 'i' where
2233 * we just recently had a write error.
2234 * We repeatedly clone the bio and trim down to one block,
2235 * then try the write. Where the write fails we record
2236 * a bad block.
2237 * It is conceivable that the bio doesn't exactly align with
2238 * blocks. We must handle this somehow.
2239 *
2240 * We currently own a reference on the rdev.
2241 */
2242
2243 int block_sectors;
2244 sector_t sector;
2245 int sectors;
2246 int sect_to_write = r1_bio->sectors;
2247 int ok = 1;
2248
2249 if (rdev->badblocks.shift < 0)
2250 return 0;
2251
2252 block_sectors = roundup(1 << rdev->badblocks.shift,
2253 bdev_logical_block_size(rdev->bdev) >> 9);
2254 sector = r1_bio->sector;
2255 sectors = ((sector + block_sectors)
2256 & ~(sector_t)(block_sectors - 1))
2257 - sector;
2258
2259 while (sect_to_write) {
2260 struct bio *wbio;
2261 if (sectors > sect_to_write)
2262 sectors = sect_to_write;
2263 /* Write at 'sector' for 'sectors'*/
2264
2265 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2266 unsigned vcnt = r1_bio->behind_page_count;
2267 struct bio_vec *vec = r1_bio->behind_bvecs;
2268
2269 while (!vec->bv_page) {
2270 vec++;
2271 vcnt--;
2272 }
2273
2274 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2275 memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2276
2277 wbio->bi_vcnt = vcnt;
2278 } else {
2279 wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2280 }
2281
2282 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2283 wbio->bi_iter.bi_sector = r1_bio->sector;
2284 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2285
2286 bio_trim(wbio, sector - r1_bio->sector, sectors);
2287 wbio->bi_iter.bi_sector += rdev->data_offset;
2288 wbio->bi_bdev = rdev->bdev;
2289
2290 if (submit_bio_wait(wbio) < 0)
2291 /* failure! */
2292 ok = rdev_set_badblocks(rdev, sector,
2293 sectors, 0)
2294 && ok;
2295
2296 bio_put(wbio);
2297 sect_to_write -= sectors;
2298 sector += sectors;
2299 sectors = block_sectors;
2300 }
2301 return ok;
2302}
2303
2304static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2305{
2306 int m;
2307 int s = r1_bio->sectors;
2308 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2309 struct md_rdev *rdev = conf->mirrors[m].rdev;
2310 struct bio *bio = r1_bio->bios[m];
2311 if (bio->bi_end_io == NULL)
2312 continue;
2313 if (!bio->bi_error &&
2314 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2315 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2316 }
2317 if (bio->bi_error &&
2318 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2319 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2320 md_error(conf->mddev, rdev);
2321 }
2322 }
2323 put_buf(r1_bio);
2324 md_done_sync(conf->mddev, s, 1);
2325}
2326
2327static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2328{
2329 int m;
2330 bool fail = false;
2331 for (m = 0; m < conf->raid_disks * 2 ; m++)
2332 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2333 struct md_rdev *rdev = conf->mirrors[m].rdev;
2334 rdev_clear_badblocks(rdev,
2335 r1_bio->sector,
2336 r1_bio->sectors, 0);
2337 rdev_dec_pending(rdev, conf->mddev);
2338 } else if (r1_bio->bios[m] != NULL) {
2339 /* This drive got a write error. We need to
2340 * narrow down and record precise write
2341 * errors.
2342 */
2343 fail = true;
2344 if (!narrow_write_error(r1_bio, m)) {
2345 md_error(conf->mddev,
2346 conf->mirrors[m].rdev);
2347 /* an I/O failed, we can't clear the bitmap */
2348 set_bit(R1BIO_Degraded, &r1_bio->state);
2349 }
2350 rdev_dec_pending(conf->mirrors[m].rdev,
2351 conf->mddev);
2352 }
2353 if (fail) {
2354 spin_lock_irq(&conf->device_lock);
2355 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2356 conf->nr_queued++;
2357 spin_unlock_irq(&conf->device_lock);
2358 md_wakeup_thread(conf->mddev->thread);
2359 } else {
2360 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2361 close_write(r1_bio);
2362 raid_end_bio_io(r1_bio);
2363 }
2364}
2365
2366static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2367{
2368 int disk;
2369 int max_sectors;
2370 struct mddev *mddev = conf->mddev;
2371 struct bio *bio;
2372 char b[BDEVNAME_SIZE];
2373 struct md_rdev *rdev;
2374 dev_t bio_dev;
2375 sector_t bio_sector;
2376
2377 clear_bit(R1BIO_ReadError, &r1_bio->state);
2378 /* we got a read error. Maybe the drive is bad. Maybe just
2379 * the block and we can fix it.
2380 * We freeze all other IO, and try reading the block from
2381 * other devices. When we find one, we re-write
2382 * and check it that fixes the read error.
2383 * This is all done synchronously while the array is
2384 * frozen
2385 */
2386
2387 bio = r1_bio->bios[r1_bio->read_disk];
2388 bdevname(bio->bi_bdev, b);
2389 bio_dev = bio->bi_bdev->bd_dev;
2390 bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
2391 bio_put(bio);
2392 r1_bio->bios[r1_bio->read_disk] = NULL;
2393
2394 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2395 if (mddev->ro == 0
2396 && !test_bit(FailFast, &rdev->flags)) {
2397 freeze_array(conf, 1);
2398 fix_read_error(conf, r1_bio->read_disk,
2399 r1_bio->sector, r1_bio->sectors);
2400 unfreeze_array(conf);
2401 } else {
2402 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2403 }
2404
2405 rdev_dec_pending(rdev, conf->mddev);
2406
2407read_more:
2408 disk = read_balance(conf, r1_bio, &max_sectors);
2409 if (disk == -1) {
2410 pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2411 mdname(mddev), b, (unsigned long long)r1_bio->sector);
2412 raid_end_bio_io(r1_bio);
2413 } else {
2414 const unsigned long do_sync
2415 = r1_bio->master_bio->bi_opf & REQ_SYNC;
2416 r1_bio->read_disk = disk;
2417 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2418 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2419 max_sectors);
2420 r1_bio->bios[r1_bio->read_disk] = bio;
2421 rdev = conf->mirrors[disk].rdev;
2422 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
2423 mdname(mddev),
2424 (unsigned long long)r1_bio->sector,
2425 bdevname(rdev->bdev, b));
2426 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2427 bio->bi_bdev = rdev->bdev;
2428 bio->bi_end_io = raid1_end_read_request;
2429 bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
2430 if (test_bit(FailFast, &rdev->flags) &&
2431 test_bit(R1BIO_FailFast, &r1_bio->state))
2432 bio->bi_opf |= MD_FAILFAST;
2433 bio->bi_private = r1_bio;
2434 if (max_sectors < r1_bio->sectors) {
2435 /* Drat - have to split this up more */
2436 struct bio *mbio = r1_bio->master_bio;
2437 int sectors_handled = (r1_bio->sector + max_sectors
2438 - mbio->bi_iter.bi_sector);
2439 r1_bio->sectors = max_sectors;
2440 spin_lock_irq(&conf->device_lock);
2441 if (mbio->bi_phys_segments == 0)
2442 mbio->bi_phys_segments = 2;
2443 else
2444 mbio->bi_phys_segments++;
2445 spin_unlock_irq(&conf->device_lock);
2446 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
2447 bio, bio_dev, bio_sector);
2448 generic_make_request(bio);
2449 bio = NULL;
2450
2451 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2452
2453 r1_bio->master_bio = mbio;
2454 r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2455 r1_bio->state = 0;
2456 set_bit(R1BIO_ReadError, &r1_bio->state);
2457 r1_bio->mddev = mddev;
2458 r1_bio->sector = mbio->bi_iter.bi_sector +
2459 sectors_handled;
2460
2461 goto read_more;
2462 } else {
2463 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
2464 bio, bio_dev, bio_sector);
2465 generic_make_request(bio);
2466 }
2467 }
2468}
2469
2470static void raid1d(struct md_thread *thread)
2471{
2472 struct mddev *mddev = thread->mddev;
2473 struct r1bio *r1_bio;
2474 unsigned long flags;
2475 struct r1conf *conf = mddev->private;
2476 struct list_head *head = &conf->retry_list;
2477 struct blk_plug plug;
2478
2479 md_check_recovery(mddev);
2480
2481 if (!list_empty_careful(&conf->bio_end_io_list) &&
2482 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2483 LIST_HEAD(tmp);
2484 spin_lock_irqsave(&conf->device_lock, flags);
2485 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2486 while (!list_empty(&conf->bio_end_io_list)) {
2487 list_move(conf->bio_end_io_list.prev, &tmp);
2488 conf->nr_queued--;
2489 }
2490 }
2491 spin_unlock_irqrestore(&conf->device_lock, flags);
2492 while (!list_empty(&tmp)) {
2493 r1_bio = list_first_entry(&tmp, struct r1bio,
2494 retry_list);
2495 list_del(&r1_bio->retry_list);
2496 if (mddev->degraded)
2497 set_bit(R1BIO_Degraded, &r1_bio->state);
2498 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2499 close_write(r1_bio);
2500 raid_end_bio_io(r1_bio);
2501 }
2502 }
2503
2504 blk_start_plug(&plug);
2505 for (;;) {
2506
2507 flush_pending_writes(conf);
2508
2509 spin_lock_irqsave(&conf->device_lock, flags);
2510 if (list_empty(head)) {
2511 spin_unlock_irqrestore(&conf->device_lock, flags);
2512 break;
2513 }
2514 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2515 list_del(head->prev);
2516 conf->nr_queued--;
2517 spin_unlock_irqrestore(&conf->device_lock, flags);
2518
2519 mddev = r1_bio->mddev;
2520 conf = mddev->private;
2521 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2522 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2523 test_bit(R1BIO_WriteError, &r1_bio->state))
2524 handle_sync_write_finished(conf, r1_bio);
2525 else
2526 sync_request_write(mddev, r1_bio);
2527 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2528 test_bit(R1BIO_WriteError, &r1_bio->state))
2529 handle_write_finished(conf, r1_bio);
2530 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2531 handle_read_error(conf, r1_bio);
2532 else
2533 /* just a partial read to be scheduled from separate
2534 * context
2535 */
2536 generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2537
2538 cond_resched();
2539 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2540 md_check_recovery(mddev);
2541 }
2542 blk_finish_plug(&plug);
2543}
2544
2545static int init_resync(struct r1conf *conf)
2546{
2547 int buffs;
2548
2549 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2550 BUG_ON(conf->r1buf_pool);
2551 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2552 conf->poolinfo);
2553 if (!conf->r1buf_pool)
2554 return -ENOMEM;
2555 conf->next_resync = 0;
2556 return 0;
2557}
2558
2559/*
2560 * perform a "sync" on one "block"
2561 *
2562 * We need to make sure that no normal I/O request - particularly write
2563 * requests - conflict with active sync requests.
2564 *
2565 * This is achieved by tracking pending requests and a 'barrier' concept
2566 * that can be installed to exclude normal IO requests.
2567 */
2568
2569static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2570 int *skipped)
2571{
2572 struct r1conf *conf = mddev->private;
2573 struct r1bio *r1_bio;
2574 struct bio *bio;
2575 sector_t max_sector, nr_sectors;
2576 int disk = -1;
2577 int i;
2578 int wonly = -1;
2579 int write_targets = 0, read_targets = 0;
2580 sector_t sync_blocks;
2581 int still_degraded = 0;
2582 int good_sectors = RESYNC_SECTORS;
2583 int min_bad = 0; /* number of sectors that are bad in all devices */
2584
2585 if (!conf->r1buf_pool)
2586 if (init_resync(conf))
2587 return 0;
2588
2589 max_sector = mddev->dev_sectors;
2590 if (sector_nr >= max_sector) {
2591 /* If we aborted, we need to abort the
2592 * sync on the 'current' bitmap chunk (there will
2593 * only be one in raid1 resync.
2594 * We can find the current addess in mddev->curr_resync
2595 */
2596 if (mddev->curr_resync < max_sector) /* aborted */
2597 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2598 &sync_blocks, 1);
2599 else /* completed sync */
2600 conf->fullsync = 0;
2601
2602 bitmap_close_sync(mddev->bitmap);
2603 close_sync(conf);
2604
2605 if (mddev_is_clustered(mddev)) {
2606 conf->cluster_sync_low = 0;
2607 conf->cluster_sync_high = 0;
2608 }
2609 return 0;
2610 }
2611
2612 if (mddev->bitmap == NULL &&
2613 mddev->recovery_cp == MaxSector &&
2614 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2615 conf->fullsync == 0) {
2616 *skipped = 1;
2617 return max_sector - sector_nr;
2618 }
2619 /* before building a request, check if we can skip these blocks..
2620 * This call the bitmap_start_sync doesn't actually record anything
2621 */
2622 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2623 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2624 /* We can skip this block, and probably several more */
2625 *skipped = 1;
2626 return sync_blocks;
2627 }
2628
2629 /*
2630 * If there is non-resync activity waiting for a turn, then let it
2631 * though before starting on this new sync request.
2632 */
2633 if (conf->nr_waiting)
2634 schedule_timeout_uninterruptible(1);
2635
2636 /* we are incrementing sector_nr below. To be safe, we check against
2637 * sector_nr + two times RESYNC_SECTORS
2638 */
2639
2640 bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2641 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2642 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2643
2644 raise_barrier(conf, sector_nr);
2645
2646 rcu_read_lock();
2647 /*
2648 * If we get a correctably read error during resync or recovery,
2649 * we might want to read from a different device. So we
2650 * flag all drives that could conceivably be read from for READ,
2651 * and any others (which will be non-In_sync devices) for WRITE.
2652 * If a read fails, we try reading from something else for which READ
2653 * is OK.
2654 */
2655
2656 r1_bio->mddev = mddev;
2657 r1_bio->sector = sector_nr;
2658 r1_bio->state = 0;
2659 set_bit(R1BIO_IsSync, &r1_bio->state);
2660
2661 for (i = 0; i < conf->raid_disks * 2; i++) {
2662 struct md_rdev *rdev;
2663 bio = r1_bio->bios[i];
2664 bio_reset(bio);
2665
2666 rdev = rcu_dereference(conf->mirrors[i].rdev);
2667 if (rdev == NULL ||
2668 test_bit(Faulty, &rdev->flags)) {
2669 if (i < conf->raid_disks)
2670 still_degraded = 1;
2671 } else if (!test_bit(In_sync, &rdev->flags)) {
2672 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2673 bio->bi_end_io = end_sync_write;
2674 write_targets ++;
2675 } else {
2676 /* may need to read from here */
2677 sector_t first_bad = MaxSector;
2678 int bad_sectors;
2679
2680 if (is_badblock(rdev, sector_nr, good_sectors,
2681 &first_bad, &bad_sectors)) {
2682 if (first_bad > sector_nr)
2683 good_sectors = first_bad - sector_nr;
2684 else {
2685 bad_sectors -= (sector_nr - first_bad);
2686 if (min_bad == 0 ||
2687 min_bad > bad_sectors)
2688 min_bad = bad_sectors;
2689 }
2690 }
2691 if (sector_nr < first_bad) {
2692 if (test_bit(WriteMostly, &rdev->flags)) {
2693 if (wonly < 0)
2694 wonly = i;
2695 } else {
2696 if (disk < 0)
2697 disk = i;
2698 }
2699 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2700 bio->bi_end_io = end_sync_read;
2701 read_targets++;
2702 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2703 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2704 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2705 /*
2706 * The device is suitable for reading (InSync),
2707 * but has bad block(s) here. Let's try to correct them,
2708 * if we are doing resync or repair. Otherwise, leave
2709 * this device alone for this sync request.
2710 */
2711 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2712 bio->bi_end_io = end_sync_write;
2713 write_targets++;
2714 }
2715 }
2716 if (bio->bi_end_io) {
2717 atomic_inc(&rdev->nr_pending);
2718 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2719 bio->bi_bdev = rdev->bdev;
2720 bio->bi_private = r1_bio;
2721 if (test_bit(FailFast, &rdev->flags))
2722 bio->bi_opf |= MD_FAILFAST;
2723 }
2724 }
2725 rcu_read_unlock();
2726 if (disk < 0)
2727 disk = wonly;
2728 r1_bio->read_disk = disk;
2729
2730 if (read_targets == 0 && min_bad > 0) {
2731 /* These sectors are bad on all InSync devices, so we
2732 * need to mark them bad on all write targets
2733 */
2734 int ok = 1;
2735 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2736 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2737 struct md_rdev *rdev = conf->mirrors[i].rdev;
2738 ok = rdev_set_badblocks(rdev, sector_nr,
2739 min_bad, 0
2740 ) && ok;
2741 }
2742 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2743 *skipped = 1;
2744 put_buf(r1_bio);
2745
2746 if (!ok) {
2747 /* Cannot record the badblocks, so need to
2748 * abort the resync.
2749 * If there are multiple read targets, could just
2750 * fail the really bad ones ???
2751 */
2752 conf->recovery_disabled = mddev->recovery_disabled;
2753 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2754 return 0;
2755 } else
2756 return min_bad;
2757
2758 }
2759 if (min_bad > 0 && min_bad < good_sectors) {
2760 /* only resync enough to reach the next bad->good
2761 * transition */
2762 good_sectors = min_bad;
2763 }
2764
2765 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2766 /* extra read targets are also write targets */
2767 write_targets += read_targets-1;
2768
2769 if (write_targets == 0 || read_targets == 0) {
2770 /* There is nowhere to write, so all non-sync
2771 * drives must be failed - so we are finished
2772 */
2773 sector_t rv;
2774 if (min_bad > 0)
2775 max_sector = sector_nr + min_bad;
2776 rv = max_sector - sector_nr;
2777 *skipped = 1;
2778 put_buf(r1_bio);
2779 return rv;
2780 }
2781
2782 if (max_sector > mddev->resync_max)
2783 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2784 if (max_sector > sector_nr + good_sectors)
2785 max_sector = sector_nr + good_sectors;
2786 nr_sectors = 0;
2787 sync_blocks = 0;
2788 do {
2789 struct page *page;
2790 int len = PAGE_SIZE;
2791 if (sector_nr + (len>>9) > max_sector)
2792 len = (max_sector - sector_nr) << 9;
2793 if (len == 0)
2794 break;
2795 if (sync_blocks == 0) {
2796 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2797 &sync_blocks, still_degraded) &&
2798 !conf->fullsync &&
2799 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2800 break;
2801 if ((len >> 9) > sync_blocks)
2802 len = sync_blocks<<9;
2803 }
2804
2805 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2806 bio = r1_bio->bios[i];
2807 if (bio->bi_end_io) {
2808 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2809 if (bio_add_page(bio, page, len, 0) == 0) {
2810 /* stop here */
2811 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2812 while (i > 0) {
2813 i--;
2814 bio = r1_bio->bios[i];
2815 if (bio->bi_end_io==NULL)
2816 continue;
2817 /* remove last page from this bio */
2818 bio->bi_vcnt--;
2819 bio->bi_iter.bi_size -= len;
2820 bio_clear_flag(bio, BIO_SEG_VALID);
2821 }
2822 goto bio_full;
2823 }
2824 }
2825 }
2826 nr_sectors += len>>9;
2827 sector_nr += len>>9;
2828 sync_blocks -= (len>>9);
2829 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2830 bio_full:
2831 r1_bio->sectors = nr_sectors;
2832
2833 if (mddev_is_clustered(mddev) &&
2834 conf->cluster_sync_high < sector_nr + nr_sectors) {
2835 conf->cluster_sync_low = mddev->curr_resync_completed;
2836 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2837 /* Send resync message */
2838 md_cluster_ops->resync_info_update(mddev,
2839 conf->cluster_sync_low,
2840 conf->cluster_sync_high);
2841 }
2842
2843 /* For a user-requested sync, we read all readable devices and do a
2844 * compare
2845 */
2846 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2847 atomic_set(&r1_bio->remaining, read_targets);
2848 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2849 bio = r1_bio->bios[i];
2850 if (bio->bi_end_io == end_sync_read) {
2851 read_targets--;
2852 md_sync_acct(bio->bi_bdev, nr_sectors);
2853 if (read_targets == 1)
2854 bio->bi_opf &= ~MD_FAILFAST;
2855 generic_make_request(bio);
2856 }
2857 }
2858 } else {
2859 atomic_set(&r1_bio->remaining, 1);
2860 bio = r1_bio->bios[r1_bio->read_disk];
2861 md_sync_acct(bio->bi_bdev, nr_sectors);
2862 if (read_targets == 1)
2863 bio->bi_opf &= ~MD_FAILFAST;
2864 generic_make_request(bio);
2865
2866 }
2867 return nr_sectors;
2868}
2869
2870static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2871{
2872 if (sectors)
2873 return sectors;
2874
2875 return mddev->dev_sectors;
2876}
2877
2878static struct r1conf *setup_conf(struct mddev *mddev)
2879{
2880 struct r1conf *conf;
2881 int i;
2882 struct raid1_info *disk;
2883 struct md_rdev *rdev;
2884 int err = -ENOMEM;
2885
2886 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2887 if (!conf)
2888 goto abort;
2889
2890 conf->mirrors = kzalloc(sizeof(struct raid1_info)
2891 * mddev->raid_disks * 2,
2892 GFP_KERNEL);
2893 if (!conf->mirrors)
2894 goto abort;
2895
2896 conf->tmppage = alloc_page(GFP_KERNEL);
2897 if (!conf->tmppage)
2898 goto abort;
2899
2900 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2901 if (!conf->poolinfo)
2902 goto abort;
2903 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2904 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2905 r1bio_pool_free,
2906 conf->poolinfo);
2907 if (!conf->r1bio_pool)
2908 goto abort;
2909
2910 conf->poolinfo->mddev = mddev;
2911
2912 err = -EINVAL;
2913 spin_lock_init(&conf->device_lock);
2914 rdev_for_each(rdev, mddev) {
2915 struct request_queue *q;
2916 int disk_idx = rdev->raid_disk;
2917 if (disk_idx >= mddev->raid_disks
2918 || disk_idx < 0)
2919 continue;
2920 if (test_bit(Replacement, &rdev->flags))
2921 disk = conf->mirrors + mddev->raid_disks + disk_idx;
2922 else
2923 disk = conf->mirrors + disk_idx;
2924
2925 if (disk->rdev)
2926 goto abort;
2927 disk->rdev = rdev;
2928 q = bdev_get_queue(rdev->bdev);
2929
2930 disk->head_position = 0;
2931 disk->seq_start = MaxSector;
2932 }
2933 conf->raid_disks = mddev->raid_disks;
2934 conf->mddev = mddev;
2935 INIT_LIST_HEAD(&conf->retry_list);
2936 INIT_LIST_HEAD(&conf->bio_end_io_list);
2937
2938 spin_lock_init(&conf->resync_lock);
2939 init_waitqueue_head(&conf->wait_barrier);
2940
2941 bio_list_init(&conf->pending_bio_list);
2942 conf->pending_count = 0;
2943 conf->recovery_disabled = mddev->recovery_disabled - 1;
2944
2945 conf->start_next_window = MaxSector;
2946 conf->current_window_requests = conf->next_window_requests = 0;
2947
2948 err = -EIO;
2949 for (i = 0; i < conf->raid_disks * 2; i++) {
2950
2951 disk = conf->mirrors + i;
2952
2953 if (i < conf->raid_disks &&
2954 disk[conf->raid_disks].rdev) {
2955 /* This slot has a replacement. */
2956 if (!disk->rdev) {
2957 /* No original, just make the replacement
2958 * a recovering spare
2959 */
2960 disk->rdev =
2961 disk[conf->raid_disks].rdev;
2962 disk[conf->raid_disks].rdev = NULL;
2963 } else if (!test_bit(In_sync, &disk->rdev->flags))
2964 /* Original is not in_sync - bad */
2965 goto abort;
2966 }
2967
2968 if (!disk->rdev ||
2969 !test_bit(In_sync, &disk->rdev->flags)) {
2970 disk->head_position = 0;
2971 if (disk->rdev &&
2972 (disk->rdev->saved_raid_disk < 0))
2973 conf->fullsync = 1;
2974 }
2975 }
2976
2977 err = -ENOMEM;
2978 conf->thread = md_register_thread(raid1d, mddev, "raid1");
2979 if (!conf->thread)
2980 goto abort;
2981
2982 return conf;
2983
2984 abort:
2985 if (conf) {
2986 mempool_destroy(conf->r1bio_pool);
2987 kfree(conf->mirrors);
2988 safe_put_page(conf->tmppage);
2989 kfree(conf->poolinfo);
2990 kfree(conf);
2991 }
2992 return ERR_PTR(err);
2993}
2994
2995static void raid1_free(struct mddev *mddev, void *priv);
2996static int raid1_run(struct mddev *mddev)
2997{
2998 struct r1conf *conf;
2999 int i;
3000 struct md_rdev *rdev;
3001 int ret;
3002 bool discard_supported = false;
3003
3004 if (mddev->level != 1) {
3005 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3006 mdname(mddev), mddev->level);
3007 return -EIO;
3008 }
3009 if (mddev->reshape_position != MaxSector) {
3010 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3011 mdname(mddev));
3012 return -EIO;
3013 }
3014 /*
3015 * copy the already verified devices into our private RAID1
3016 * bookkeeping area. [whatever we allocate in run(),
3017 * should be freed in raid1_free()]
3018 */
3019 if (mddev->private == NULL)
3020 conf = setup_conf(mddev);
3021 else
3022 conf = mddev->private;
3023
3024 if (IS_ERR(conf))
3025 return PTR_ERR(conf);
3026
3027 if (mddev->queue)
3028 blk_queue_max_write_same_sectors(mddev->queue, 0);
3029
3030 rdev_for_each(rdev, mddev) {
3031 if (!mddev->gendisk)
3032 continue;
3033 disk_stack_limits(mddev->gendisk, rdev->bdev,
3034 rdev->data_offset << 9);
3035 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3036 discard_supported = true;
3037 }
3038
3039 mddev->degraded = 0;
3040 for (i=0; i < conf->raid_disks; i++)
3041 if (conf->mirrors[i].rdev == NULL ||
3042 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3043 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3044 mddev->degraded++;
3045
3046 if (conf->raid_disks - mddev->degraded == 1)
3047 mddev->recovery_cp = MaxSector;
3048
3049 if (mddev->recovery_cp != MaxSector)
3050 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3051 mdname(mddev));
3052 pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3053 mdname(mddev), mddev->raid_disks - mddev->degraded,
3054 mddev->raid_disks);
3055
3056 /*
3057 * Ok, everything is just fine now
3058 */
3059 mddev->thread = conf->thread;
3060 conf->thread = NULL;
3061 mddev->private = conf;
3062 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3063
3064 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3065
3066 if (mddev->queue) {
3067 if (discard_supported)
3068 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3069 mddev->queue);
3070 else
3071 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3072 mddev->queue);
3073 }
3074
3075 ret = md_integrity_register(mddev);
3076 if (ret) {
3077 md_unregister_thread(&mddev->thread);
3078 raid1_free(mddev, conf);
3079 }
3080 return ret;
3081}
3082
3083static void raid1_free(struct mddev *mddev, void *priv)
3084{
3085 struct r1conf *conf = priv;
3086
3087 mempool_destroy(conf->r1bio_pool);
3088 kfree(conf->mirrors);
3089 safe_put_page(conf->tmppage);
3090 kfree(conf->poolinfo);
3091 kfree(conf);
3092}
3093
3094static int raid1_resize(struct mddev *mddev, sector_t sectors)
3095{
3096 /* no resync is happening, and there is enough space
3097 * on all devices, so we can resize.
3098 * We need to make sure resync covers any new space.
3099 * If the array is shrinking we should possibly wait until
3100 * any io in the removed space completes, but it hardly seems
3101 * worth it.
3102 */
3103 sector_t newsize = raid1_size(mddev, sectors, 0);
3104 if (mddev->external_size &&
3105 mddev->array_sectors > newsize)
3106 return -EINVAL;
3107 if (mddev->bitmap) {
3108 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
3109 if (ret)
3110 return ret;
3111 }
3112 md_set_array_sectors(mddev, newsize);
3113 set_capacity(mddev->gendisk, mddev->array_sectors);
3114 revalidate_disk(mddev->gendisk);
3115 if (sectors > mddev->dev_sectors &&
3116 mddev->recovery_cp > mddev->dev_sectors) {
3117 mddev->recovery_cp = mddev->dev_sectors;
3118 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3119 }
3120 mddev->dev_sectors = sectors;
3121 mddev->resync_max_sectors = sectors;
3122 return 0;
3123}
3124
3125static int raid1_reshape(struct mddev *mddev)
3126{
3127 /* We need to:
3128 * 1/ resize the r1bio_pool
3129 * 2/ resize conf->mirrors
3130 *
3131 * We allocate a new r1bio_pool if we can.
3132 * Then raise a device barrier and wait until all IO stops.
3133 * Then resize conf->mirrors and swap in the new r1bio pool.
3134 *
3135 * At the same time, we "pack" the devices so that all the missing
3136 * devices have the higher raid_disk numbers.
3137 */
3138 mempool_t *newpool, *oldpool;
3139 struct pool_info *newpoolinfo;
3140 struct raid1_info *newmirrors;
3141 struct r1conf *conf = mddev->private;
3142 int cnt, raid_disks;
3143 unsigned long flags;
3144 int d, d2, err;
3145
3146 /* Cannot change chunk_size, layout, or level */
3147 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3148 mddev->layout != mddev->new_layout ||
3149 mddev->level != mddev->new_level) {
3150 mddev->new_chunk_sectors = mddev->chunk_sectors;
3151 mddev->new_layout = mddev->layout;
3152 mddev->new_level = mddev->level;
3153 return -EINVAL;
3154 }
3155
3156 if (!mddev_is_clustered(mddev)) {
3157 err = md_allow_write(mddev);
3158 if (err)
3159 return err;
3160 }
3161
3162 raid_disks = mddev->raid_disks + mddev->delta_disks;
3163
3164 if (raid_disks < conf->raid_disks) {
3165 cnt=0;
3166 for (d= 0; d < conf->raid_disks; d++)
3167 if (conf->mirrors[d].rdev)
3168 cnt++;
3169 if (cnt > raid_disks)
3170 return -EBUSY;
3171 }
3172
3173 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3174 if (!newpoolinfo)
3175 return -ENOMEM;
3176 newpoolinfo->mddev = mddev;
3177 newpoolinfo->raid_disks = raid_disks * 2;
3178
3179 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
3180 r1bio_pool_free, newpoolinfo);
3181 if (!newpool) {
3182 kfree(newpoolinfo);
3183 return -ENOMEM;
3184 }
3185 newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
3186 GFP_KERNEL);
3187 if (!newmirrors) {
3188 kfree(newpoolinfo);
3189 mempool_destroy(newpool);
3190 return -ENOMEM;
3191 }
3192
3193 freeze_array(conf, 0);
3194
3195 /* ok, everything is stopped */
3196 oldpool = conf->r1bio_pool;
3197 conf->r1bio_pool = newpool;
3198
3199 for (d = d2 = 0; d < conf->raid_disks; d++) {
3200 struct md_rdev *rdev = conf->mirrors[d].rdev;
3201 if (rdev && rdev->raid_disk != d2) {
3202 sysfs_unlink_rdev(mddev, rdev);
3203 rdev->raid_disk = d2;
3204 sysfs_unlink_rdev(mddev, rdev);
3205 if (sysfs_link_rdev(mddev, rdev))
3206 pr_warn("md/raid1:%s: cannot register rd%d\n",
3207 mdname(mddev), rdev->raid_disk);
3208 }
3209 if (rdev)
3210 newmirrors[d2++].rdev = rdev;
3211 }
3212 kfree(conf->mirrors);
3213 conf->mirrors = newmirrors;
3214 kfree(conf->poolinfo);
3215 conf->poolinfo = newpoolinfo;
3216
3217 spin_lock_irqsave(&conf->device_lock, flags);
3218 mddev->degraded += (raid_disks - conf->raid_disks);
3219 spin_unlock_irqrestore(&conf->device_lock, flags);
3220 conf->raid_disks = mddev->raid_disks = raid_disks;
3221 mddev->delta_disks = 0;
3222
3223 unfreeze_array(conf);
3224
3225 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3226 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3227 md_wakeup_thread(mddev->thread);
3228
3229 mempool_destroy(oldpool);
3230 return 0;
3231}
3232
3233static void raid1_quiesce(struct mddev *mddev, int state)
3234{
3235 struct r1conf *conf = mddev->private;
3236
3237 switch(state) {
3238 case 2: /* wake for suspend */
3239 wake_up(&conf->wait_barrier);
3240 break;
3241 case 1:
3242 freeze_array(conf, 0);
3243 break;
3244 case 0:
3245 unfreeze_array(conf);
3246 break;
3247 }
3248}
3249
3250static void *raid1_takeover(struct mddev *mddev)
3251{
3252 /* raid1 can take over:
3253 * raid5 with 2 devices, any layout or chunk size
3254 */
3255 if (mddev->level == 5 && mddev->raid_disks == 2) {
3256 struct r1conf *conf;
3257 mddev->new_level = 1;
3258 mddev->new_layout = 0;
3259 mddev->new_chunk_sectors = 0;
3260 conf = setup_conf(mddev);
3261 if (!IS_ERR(conf)) {
3262 /* Array must appear to be quiesced */
3263 conf->array_frozen = 1;
3264 mddev_clear_unsupported_flags(mddev,
3265 UNSUPPORTED_MDDEV_FLAGS);
3266 }
3267 return conf;
3268 }
3269 return ERR_PTR(-EINVAL);
3270}
3271
3272static struct md_personality raid1_personality =
3273{
3274 .name = "raid1",
3275 .level = 1,
3276 .owner = THIS_MODULE,
3277 .make_request = raid1_make_request,
3278 .run = raid1_run,
3279 .free = raid1_free,
3280 .status = raid1_status,
3281 .error_handler = raid1_error,
3282 .hot_add_disk = raid1_add_disk,
3283 .hot_remove_disk= raid1_remove_disk,
3284 .spare_active = raid1_spare_active,
3285 .sync_request = raid1_sync_request,
3286 .resize = raid1_resize,
3287 .size = raid1_size,
3288 .check_reshape = raid1_reshape,
3289 .quiesce = raid1_quiesce,
3290 .takeover = raid1_takeover,
3291 .congested = raid1_congested,
3292};
3293
3294static int __init raid_init(void)
3295{
3296 return register_md_personality(&raid1_personality);
3297}
3298
3299static void raid_exit(void)
3300{
3301 unregister_md_personality(&raid1_personality);
3302}
3303
3304module_init(raid_init);
3305module_exit(raid_exit);
3306MODULE_LICENSE("GPL");
3307MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3308MODULE_ALIAS("md-personality-3"); /* RAID1 */
3309MODULE_ALIAS("md-raid1");
3310MODULE_ALIAS("md-level-1");
3311
3312module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
1/*
2 * raid1.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * RAID-1 management functions.
9 *
10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11 *
12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14 *
15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
16 * bitmapped intelligence in resync:
17 *
18 * - bitmap marked during normal i/o
19 * - bitmap used to skip nondirty blocks during sync
20 *
21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
22 * - persistent bitmap code
23 *
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
27 * any later version.
28 *
29 * You should have received a copy of the GNU General Public License
30 * (for example /usr/src/linux/COPYING); if not, write to the Free
31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34#include <linux/slab.h>
35#include <linux/delay.h>
36#include <linux/blkdev.h>
37#include <linux/module.h>
38#include <linux/seq_file.h>
39#include <linux/ratelimit.h>
40#include "md.h"
41#include "raid1.h"
42#include "bitmap.h"
43
44/*
45 * Number of guaranteed r1bios in case of extreme VM load:
46 */
47#define NR_RAID1_BIOS 256
48
49/* When there are this many requests queue to be written by
50 * the raid1 thread, we become 'congested' to provide back-pressure
51 * for writeback.
52 */
53static int max_queued_requests = 1024;
54
55static void allow_barrier(struct r1conf *conf);
56static void lower_barrier(struct r1conf *conf);
57
58static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
59{
60 struct pool_info *pi = data;
61 int size = offsetof(struct r1bio, bios[pi->raid_disks]);
62
63 /* allocate a r1bio with room for raid_disks entries in the bios array */
64 return kzalloc(size, gfp_flags);
65}
66
67static void r1bio_pool_free(void *r1_bio, void *data)
68{
69 kfree(r1_bio);
70}
71
72#define RESYNC_BLOCK_SIZE (64*1024)
73//#define RESYNC_BLOCK_SIZE PAGE_SIZE
74#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
75#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
76#define RESYNC_WINDOW (2048*1024)
77
78static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
79{
80 struct pool_info *pi = data;
81 struct page *page;
82 struct r1bio *r1_bio;
83 struct bio *bio;
84 int i, j;
85
86 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
87 if (!r1_bio)
88 return NULL;
89
90 /*
91 * Allocate bios : 1 for reading, n-1 for writing
92 */
93 for (j = pi->raid_disks ; j-- ; ) {
94 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
95 if (!bio)
96 goto out_free_bio;
97 r1_bio->bios[j] = bio;
98 }
99 /*
100 * Allocate RESYNC_PAGES data pages and attach them to
101 * the first bio.
102 * If this is a user-requested check/repair, allocate
103 * RESYNC_PAGES for each bio.
104 */
105 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
106 j = pi->raid_disks;
107 else
108 j = 1;
109 while(j--) {
110 bio = r1_bio->bios[j];
111 for (i = 0; i < RESYNC_PAGES; i++) {
112 page = alloc_page(gfp_flags);
113 if (unlikely(!page))
114 goto out_free_pages;
115
116 bio->bi_io_vec[i].bv_page = page;
117 bio->bi_vcnt = i+1;
118 }
119 }
120 /* If not user-requests, copy the page pointers to all bios */
121 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
122 for (i=0; i<RESYNC_PAGES ; i++)
123 for (j=1; j<pi->raid_disks; j++)
124 r1_bio->bios[j]->bi_io_vec[i].bv_page =
125 r1_bio->bios[0]->bi_io_vec[i].bv_page;
126 }
127
128 r1_bio->master_bio = NULL;
129
130 return r1_bio;
131
132out_free_pages:
133 for (j=0 ; j < pi->raid_disks; j++)
134 for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
135 put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
136 j = -1;
137out_free_bio:
138 while (++j < pi->raid_disks)
139 bio_put(r1_bio->bios[j]);
140 r1bio_pool_free(r1_bio, data);
141 return NULL;
142}
143
144static void r1buf_pool_free(void *__r1_bio, void *data)
145{
146 struct pool_info *pi = data;
147 int i,j;
148 struct r1bio *r1bio = __r1_bio;
149
150 for (i = 0; i < RESYNC_PAGES; i++)
151 for (j = pi->raid_disks; j-- ;) {
152 if (j == 0 ||
153 r1bio->bios[j]->bi_io_vec[i].bv_page !=
154 r1bio->bios[0]->bi_io_vec[i].bv_page)
155 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
156 }
157 for (i=0 ; i < pi->raid_disks; i++)
158 bio_put(r1bio->bios[i]);
159
160 r1bio_pool_free(r1bio, data);
161}
162
163static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
164{
165 int i;
166
167 for (i = 0; i < conf->raid_disks * 2; i++) {
168 struct bio **bio = r1_bio->bios + i;
169 if (!BIO_SPECIAL(*bio))
170 bio_put(*bio);
171 *bio = NULL;
172 }
173}
174
175static void free_r1bio(struct r1bio *r1_bio)
176{
177 struct r1conf *conf = r1_bio->mddev->private;
178
179 put_all_bios(conf, r1_bio);
180 mempool_free(r1_bio, conf->r1bio_pool);
181}
182
183static void put_buf(struct r1bio *r1_bio)
184{
185 struct r1conf *conf = r1_bio->mddev->private;
186 int i;
187
188 for (i = 0; i < conf->raid_disks * 2; i++) {
189 struct bio *bio = r1_bio->bios[i];
190 if (bio->bi_end_io)
191 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
192 }
193
194 mempool_free(r1_bio, conf->r1buf_pool);
195
196 lower_barrier(conf);
197}
198
199static void reschedule_retry(struct r1bio *r1_bio)
200{
201 unsigned long flags;
202 struct mddev *mddev = r1_bio->mddev;
203 struct r1conf *conf = mddev->private;
204
205 spin_lock_irqsave(&conf->device_lock, flags);
206 list_add(&r1_bio->retry_list, &conf->retry_list);
207 conf->nr_queued ++;
208 spin_unlock_irqrestore(&conf->device_lock, flags);
209
210 wake_up(&conf->wait_barrier);
211 md_wakeup_thread(mddev->thread);
212}
213
214/*
215 * raid_end_bio_io() is called when we have finished servicing a mirrored
216 * operation and are ready to return a success/failure code to the buffer
217 * cache layer.
218 */
219static void call_bio_endio(struct r1bio *r1_bio)
220{
221 struct bio *bio = r1_bio->master_bio;
222 int done;
223 struct r1conf *conf = r1_bio->mddev->private;
224
225 if (bio->bi_phys_segments) {
226 unsigned long flags;
227 spin_lock_irqsave(&conf->device_lock, flags);
228 bio->bi_phys_segments--;
229 done = (bio->bi_phys_segments == 0);
230 spin_unlock_irqrestore(&conf->device_lock, flags);
231 } else
232 done = 1;
233
234 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
235 clear_bit(BIO_UPTODATE, &bio->bi_flags);
236 if (done) {
237 bio_endio(bio, 0);
238 /*
239 * Wake up any possible resync thread that waits for the device
240 * to go idle.
241 */
242 allow_barrier(conf);
243 }
244}
245
246static void raid_end_bio_io(struct r1bio *r1_bio)
247{
248 struct bio *bio = r1_bio->master_bio;
249
250 /* if nobody has done the final endio yet, do it now */
251 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
252 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
253 (bio_data_dir(bio) == WRITE) ? "write" : "read",
254 (unsigned long long) bio->bi_sector,
255 (unsigned long long) bio->bi_sector +
256 (bio->bi_size >> 9) - 1);
257
258 call_bio_endio(r1_bio);
259 }
260 free_r1bio(r1_bio);
261}
262
263/*
264 * Update disk head position estimator based on IRQ completion info.
265 */
266static inline void update_head_pos(int disk, struct r1bio *r1_bio)
267{
268 struct r1conf *conf = r1_bio->mddev->private;
269
270 conf->mirrors[disk].head_position =
271 r1_bio->sector + (r1_bio->sectors);
272}
273
274/*
275 * Find the disk number which triggered given bio
276 */
277static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
278{
279 int mirror;
280 struct r1conf *conf = r1_bio->mddev->private;
281 int raid_disks = conf->raid_disks;
282
283 for (mirror = 0; mirror < raid_disks * 2; mirror++)
284 if (r1_bio->bios[mirror] == bio)
285 break;
286
287 BUG_ON(mirror == raid_disks * 2);
288 update_head_pos(mirror, r1_bio);
289
290 return mirror;
291}
292
293static void raid1_end_read_request(struct bio *bio, int error)
294{
295 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
296 struct r1bio *r1_bio = bio->bi_private;
297 int mirror;
298 struct r1conf *conf = r1_bio->mddev->private;
299
300 mirror = r1_bio->read_disk;
301 /*
302 * this branch is our 'one mirror IO has finished' event handler:
303 */
304 update_head_pos(mirror, r1_bio);
305
306 if (uptodate)
307 set_bit(R1BIO_Uptodate, &r1_bio->state);
308 else {
309 /* If all other devices have failed, we want to return
310 * the error upwards rather than fail the last device.
311 * Here we redefine "uptodate" to mean "Don't want to retry"
312 */
313 unsigned long flags;
314 spin_lock_irqsave(&conf->device_lock, flags);
315 if (r1_bio->mddev->degraded == conf->raid_disks ||
316 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
317 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
318 uptodate = 1;
319 spin_unlock_irqrestore(&conf->device_lock, flags);
320 }
321
322 if (uptodate)
323 raid_end_bio_io(r1_bio);
324 else {
325 /*
326 * oops, read error:
327 */
328 char b[BDEVNAME_SIZE];
329 printk_ratelimited(
330 KERN_ERR "md/raid1:%s: %s: "
331 "rescheduling sector %llu\n",
332 mdname(conf->mddev),
333 bdevname(conf->mirrors[mirror].rdev->bdev,
334 b),
335 (unsigned long long)r1_bio->sector);
336 set_bit(R1BIO_ReadError, &r1_bio->state);
337 reschedule_retry(r1_bio);
338 }
339
340 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
341}
342
343static void close_write(struct r1bio *r1_bio)
344{
345 /* it really is the end of this request */
346 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
347 /* free extra copy of the data pages */
348 int i = r1_bio->behind_page_count;
349 while (i--)
350 safe_put_page(r1_bio->behind_bvecs[i].bv_page);
351 kfree(r1_bio->behind_bvecs);
352 r1_bio->behind_bvecs = NULL;
353 }
354 /* clear the bitmap if all writes complete successfully */
355 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
356 r1_bio->sectors,
357 !test_bit(R1BIO_Degraded, &r1_bio->state),
358 test_bit(R1BIO_BehindIO, &r1_bio->state));
359 md_write_end(r1_bio->mddev);
360}
361
362static void r1_bio_write_done(struct r1bio *r1_bio)
363{
364 if (!atomic_dec_and_test(&r1_bio->remaining))
365 return;
366
367 if (test_bit(R1BIO_WriteError, &r1_bio->state))
368 reschedule_retry(r1_bio);
369 else {
370 close_write(r1_bio);
371 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
372 reschedule_retry(r1_bio);
373 else
374 raid_end_bio_io(r1_bio);
375 }
376}
377
378static void raid1_end_write_request(struct bio *bio, int error)
379{
380 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
381 struct r1bio *r1_bio = bio->bi_private;
382 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
383 struct r1conf *conf = r1_bio->mddev->private;
384 struct bio *to_put = NULL;
385
386 mirror = find_bio_disk(r1_bio, bio);
387
388 /*
389 * 'one mirror IO has finished' event handler:
390 */
391 if (!uptodate) {
392 set_bit(WriteErrorSeen,
393 &conf->mirrors[mirror].rdev->flags);
394 if (!test_and_set_bit(WantReplacement,
395 &conf->mirrors[mirror].rdev->flags))
396 set_bit(MD_RECOVERY_NEEDED, &
397 conf->mddev->recovery);
398
399 set_bit(R1BIO_WriteError, &r1_bio->state);
400 } else {
401 /*
402 * Set R1BIO_Uptodate in our master bio, so that we
403 * will return a good error code for to the higher
404 * levels even if IO on some other mirrored buffer
405 * fails.
406 *
407 * The 'master' represents the composite IO operation
408 * to user-side. So if something waits for IO, then it
409 * will wait for the 'master' bio.
410 */
411 sector_t first_bad;
412 int bad_sectors;
413
414 r1_bio->bios[mirror] = NULL;
415 to_put = bio;
416 set_bit(R1BIO_Uptodate, &r1_bio->state);
417
418 /* Maybe we can clear some bad blocks. */
419 if (is_badblock(conf->mirrors[mirror].rdev,
420 r1_bio->sector, r1_bio->sectors,
421 &first_bad, &bad_sectors)) {
422 r1_bio->bios[mirror] = IO_MADE_GOOD;
423 set_bit(R1BIO_MadeGood, &r1_bio->state);
424 }
425 }
426
427 if (behind) {
428 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
429 atomic_dec(&r1_bio->behind_remaining);
430
431 /*
432 * In behind mode, we ACK the master bio once the I/O
433 * has safely reached all non-writemostly
434 * disks. Setting the Returned bit ensures that this
435 * gets done only once -- we don't ever want to return
436 * -EIO here, instead we'll wait
437 */
438 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
439 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
440 /* Maybe we can return now */
441 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
442 struct bio *mbio = r1_bio->master_bio;
443 pr_debug("raid1: behind end write sectors"
444 " %llu-%llu\n",
445 (unsigned long long) mbio->bi_sector,
446 (unsigned long long) mbio->bi_sector +
447 (mbio->bi_size >> 9) - 1);
448 call_bio_endio(r1_bio);
449 }
450 }
451 }
452 if (r1_bio->bios[mirror] == NULL)
453 rdev_dec_pending(conf->mirrors[mirror].rdev,
454 conf->mddev);
455
456 /*
457 * Let's see if all mirrored write operations have finished
458 * already.
459 */
460 r1_bio_write_done(r1_bio);
461
462 if (to_put)
463 bio_put(to_put);
464}
465
466
467/*
468 * This routine returns the disk from which the requested read should
469 * be done. There is a per-array 'next expected sequential IO' sector
470 * number - if this matches on the next IO then we use the last disk.
471 * There is also a per-disk 'last know head position' sector that is
472 * maintained from IRQ contexts, both the normal and the resync IO
473 * completion handlers update this position correctly. If there is no
474 * perfect sequential match then we pick the disk whose head is closest.
475 *
476 * If there are 2 mirrors in the same 2 devices, performance degrades
477 * because position is mirror, not device based.
478 *
479 * The rdev for the device selected will have nr_pending incremented.
480 */
481static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
482{
483 const sector_t this_sector = r1_bio->sector;
484 int sectors;
485 int best_good_sectors;
486 int start_disk;
487 int best_disk;
488 int i;
489 sector_t best_dist;
490 struct md_rdev *rdev;
491 int choose_first;
492
493 rcu_read_lock();
494 /*
495 * Check if we can balance. We can balance on the whole
496 * device if no resync is going on, or below the resync window.
497 * We take the first readable disk when above the resync window.
498 */
499 retry:
500 sectors = r1_bio->sectors;
501 best_disk = -1;
502 best_dist = MaxSector;
503 best_good_sectors = 0;
504
505 if (conf->mddev->recovery_cp < MaxSector &&
506 (this_sector + sectors >= conf->next_resync)) {
507 choose_first = 1;
508 start_disk = 0;
509 } else {
510 choose_first = 0;
511 start_disk = conf->last_used;
512 }
513
514 for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
515 sector_t dist;
516 sector_t first_bad;
517 int bad_sectors;
518
519 int disk = start_disk + i;
520 if (disk >= conf->raid_disks * 2)
521 disk -= conf->raid_disks * 2;
522
523 rdev = rcu_dereference(conf->mirrors[disk].rdev);
524 if (r1_bio->bios[disk] == IO_BLOCKED
525 || rdev == NULL
526 || test_bit(Unmerged, &rdev->flags)
527 || test_bit(Faulty, &rdev->flags))
528 continue;
529 if (!test_bit(In_sync, &rdev->flags) &&
530 rdev->recovery_offset < this_sector + sectors)
531 continue;
532 if (test_bit(WriteMostly, &rdev->flags)) {
533 /* Don't balance among write-mostly, just
534 * use the first as a last resort */
535 if (best_disk < 0) {
536 if (is_badblock(rdev, this_sector, sectors,
537 &first_bad, &bad_sectors)) {
538 if (first_bad < this_sector)
539 /* Cannot use this */
540 continue;
541 best_good_sectors = first_bad - this_sector;
542 } else
543 best_good_sectors = sectors;
544 best_disk = disk;
545 }
546 continue;
547 }
548 /* This is a reasonable device to use. It might
549 * even be best.
550 */
551 if (is_badblock(rdev, this_sector, sectors,
552 &first_bad, &bad_sectors)) {
553 if (best_dist < MaxSector)
554 /* already have a better device */
555 continue;
556 if (first_bad <= this_sector) {
557 /* cannot read here. If this is the 'primary'
558 * device, then we must not read beyond
559 * bad_sectors from another device..
560 */
561 bad_sectors -= (this_sector - first_bad);
562 if (choose_first && sectors > bad_sectors)
563 sectors = bad_sectors;
564 if (best_good_sectors > sectors)
565 best_good_sectors = sectors;
566
567 } else {
568 sector_t good_sectors = first_bad - this_sector;
569 if (good_sectors > best_good_sectors) {
570 best_good_sectors = good_sectors;
571 best_disk = disk;
572 }
573 if (choose_first)
574 break;
575 }
576 continue;
577 } else
578 best_good_sectors = sectors;
579
580 dist = abs(this_sector - conf->mirrors[disk].head_position);
581 if (choose_first
582 /* Don't change to another disk for sequential reads */
583 || conf->next_seq_sect == this_sector
584 || dist == 0
585 /* If device is idle, use it */
586 || atomic_read(&rdev->nr_pending) == 0) {
587 best_disk = disk;
588 break;
589 }
590 if (dist < best_dist) {
591 best_dist = dist;
592 best_disk = disk;
593 }
594 }
595
596 if (best_disk >= 0) {
597 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
598 if (!rdev)
599 goto retry;
600 atomic_inc(&rdev->nr_pending);
601 if (test_bit(Faulty, &rdev->flags)) {
602 /* cannot risk returning a device that failed
603 * before we inc'ed nr_pending
604 */
605 rdev_dec_pending(rdev, conf->mddev);
606 goto retry;
607 }
608 sectors = best_good_sectors;
609 conf->next_seq_sect = this_sector + sectors;
610 conf->last_used = best_disk;
611 }
612 rcu_read_unlock();
613 *max_sectors = sectors;
614
615 return best_disk;
616}
617
618static int raid1_mergeable_bvec(struct request_queue *q,
619 struct bvec_merge_data *bvm,
620 struct bio_vec *biovec)
621{
622 struct mddev *mddev = q->queuedata;
623 struct r1conf *conf = mddev->private;
624 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
625 int max = biovec->bv_len;
626
627 if (mddev->merge_check_needed) {
628 int disk;
629 rcu_read_lock();
630 for (disk = 0; disk < conf->raid_disks * 2; disk++) {
631 struct md_rdev *rdev = rcu_dereference(
632 conf->mirrors[disk].rdev);
633 if (rdev && !test_bit(Faulty, &rdev->flags)) {
634 struct request_queue *q =
635 bdev_get_queue(rdev->bdev);
636 if (q->merge_bvec_fn) {
637 bvm->bi_sector = sector +
638 rdev->data_offset;
639 bvm->bi_bdev = rdev->bdev;
640 max = min(max, q->merge_bvec_fn(
641 q, bvm, biovec));
642 }
643 }
644 }
645 rcu_read_unlock();
646 }
647 return max;
648
649}
650
651int md_raid1_congested(struct mddev *mddev, int bits)
652{
653 struct r1conf *conf = mddev->private;
654 int i, ret = 0;
655
656 if ((bits & (1 << BDI_async_congested)) &&
657 conf->pending_count >= max_queued_requests)
658 return 1;
659
660 rcu_read_lock();
661 for (i = 0; i < conf->raid_disks * 2; i++) {
662 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
663 if (rdev && !test_bit(Faulty, &rdev->flags)) {
664 struct request_queue *q = bdev_get_queue(rdev->bdev);
665
666 BUG_ON(!q);
667
668 /* Note the '|| 1' - when read_balance prefers
669 * non-congested targets, it can be removed
670 */
671 if ((bits & (1<<BDI_async_congested)) || 1)
672 ret |= bdi_congested(&q->backing_dev_info, bits);
673 else
674 ret &= bdi_congested(&q->backing_dev_info, bits);
675 }
676 }
677 rcu_read_unlock();
678 return ret;
679}
680EXPORT_SYMBOL_GPL(md_raid1_congested);
681
682static int raid1_congested(void *data, int bits)
683{
684 struct mddev *mddev = data;
685
686 return mddev_congested(mddev, bits) ||
687 md_raid1_congested(mddev, bits);
688}
689
690static void flush_pending_writes(struct r1conf *conf)
691{
692 /* Any writes that have been queued but are awaiting
693 * bitmap updates get flushed here.
694 */
695 spin_lock_irq(&conf->device_lock);
696
697 if (conf->pending_bio_list.head) {
698 struct bio *bio;
699 bio = bio_list_get(&conf->pending_bio_list);
700 conf->pending_count = 0;
701 spin_unlock_irq(&conf->device_lock);
702 /* flush any pending bitmap writes to
703 * disk before proceeding w/ I/O */
704 bitmap_unplug(conf->mddev->bitmap);
705 wake_up(&conf->wait_barrier);
706
707 while (bio) { /* submit pending writes */
708 struct bio *next = bio->bi_next;
709 bio->bi_next = NULL;
710 generic_make_request(bio);
711 bio = next;
712 }
713 } else
714 spin_unlock_irq(&conf->device_lock);
715}
716
717/* Barriers....
718 * Sometimes we need to suspend IO while we do something else,
719 * either some resync/recovery, or reconfigure the array.
720 * To do this we raise a 'barrier'.
721 * The 'barrier' is a counter that can be raised multiple times
722 * to count how many activities are happening which preclude
723 * normal IO.
724 * We can only raise the barrier if there is no pending IO.
725 * i.e. if nr_pending == 0.
726 * We choose only to raise the barrier if no-one is waiting for the
727 * barrier to go down. This means that as soon as an IO request
728 * is ready, no other operations which require a barrier will start
729 * until the IO request has had a chance.
730 *
731 * So: regular IO calls 'wait_barrier'. When that returns there
732 * is no backgroup IO happening, It must arrange to call
733 * allow_barrier when it has finished its IO.
734 * backgroup IO calls must call raise_barrier. Once that returns
735 * there is no normal IO happeing. It must arrange to call
736 * lower_barrier when the particular background IO completes.
737 */
738#define RESYNC_DEPTH 32
739
740static void raise_barrier(struct r1conf *conf)
741{
742 spin_lock_irq(&conf->resync_lock);
743
744 /* Wait until no block IO is waiting */
745 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
746 conf->resync_lock, );
747
748 /* block any new IO from starting */
749 conf->barrier++;
750
751 /* Now wait for all pending IO to complete */
752 wait_event_lock_irq(conf->wait_barrier,
753 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
754 conf->resync_lock, );
755
756 spin_unlock_irq(&conf->resync_lock);
757}
758
759static void lower_barrier(struct r1conf *conf)
760{
761 unsigned long flags;
762 BUG_ON(conf->barrier <= 0);
763 spin_lock_irqsave(&conf->resync_lock, flags);
764 conf->barrier--;
765 spin_unlock_irqrestore(&conf->resync_lock, flags);
766 wake_up(&conf->wait_barrier);
767}
768
769static void wait_barrier(struct r1conf *conf)
770{
771 spin_lock_irq(&conf->resync_lock);
772 if (conf->barrier) {
773 conf->nr_waiting++;
774 /* Wait for the barrier to drop.
775 * However if there are already pending
776 * requests (preventing the barrier from
777 * rising completely), and the
778 * pre-process bio queue isn't empty,
779 * then don't wait, as we need to empty
780 * that queue to get the nr_pending
781 * count down.
782 */
783 wait_event_lock_irq(conf->wait_barrier,
784 !conf->barrier ||
785 (conf->nr_pending &&
786 current->bio_list &&
787 !bio_list_empty(current->bio_list)),
788 conf->resync_lock,
789 );
790 conf->nr_waiting--;
791 }
792 conf->nr_pending++;
793 spin_unlock_irq(&conf->resync_lock);
794}
795
796static void allow_barrier(struct r1conf *conf)
797{
798 unsigned long flags;
799 spin_lock_irqsave(&conf->resync_lock, flags);
800 conf->nr_pending--;
801 spin_unlock_irqrestore(&conf->resync_lock, flags);
802 wake_up(&conf->wait_barrier);
803}
804
805static void freeze_array(struct r1conf *conf)
806{
807 /* stop syncio and normal IO and wait for everything to
808 * go quite.
809 * We increment barrier and nr_waiting, and then
810 * wait until nr_pending match nr_queued+1
811 * This is called in the context of one normal IO request
812 * that has failed. Thus any sync request that might be pending
813 * will be blocked by nr_pending, and we need to wait for
814 * pending IO requests to complete or be queued for re-try.
815 * Thus the number queued (nr_queued) plus this request (1)
816 * must match the number of pending IOs (nr_pending) before
817 * we continue.
818 */
819 spin_lock_irq(&conf->resync_lock);
820 conf->barrier++;
821 conf->nr_waiting++;
822 wait_event_lock_irq(conf->wait_barrier,
823 conf->nr_pending == conf->nr_queued+1,
824 conf->resync_lock,
825 flush_pending_writes(conf));
826 spin_unlock_irq(&conf->resync_lock);
827}
828static void unfreeze_array(struct r1conf *conf)
829{
830 /* reverse the effect of the freeze */
831 spin_lock_irq(&conf->resync_lock);
832 conf->barrier--;
833 conf->nr_waiting--;
834 wake_up(&conf->wait_barrier);
835 spin_unlock_irq(&conf->resync_lock);
836}
837
838
839/* duplicate the data pages for behind I/O
840 */
841static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
842{
843 int i;
844 struct bio_vec *bvec;
845 struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
846 GFP_NOIO);
847 if (unlikely(!bvecs))
848 return;
849
850 bio_for_each_segment(bvec, bio, i) {
851 bvecs[i] = *bvec;
852 bvecs[i].bv_page = alloc_page(GFP_NOIO);
853 if (unlikely(!bvecs[i].bv_page))
854 goto do_sync_io;
855 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
856 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
857 kunmap(bvecs[i].bv_page);
858 kunmap(bvec->bv_page);
859 }
860 r1_bio->behind_bvecs = bvecs;
861 r1_bio->behind_page_count = bio->bi_vcnt;
862 set_bit(R1BIO_BehindIO, &r1_bio->state);
863 return;
864
865do_sync_io:
866 for (i = 0; i < bio->bi_vcnt; i++)
867 if (bvecs[i].bv_page)
868 put_page(bvecs[i].bv_page);
869 kfree(bvecs);
870 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
871}
872
873static void make_request(struct mddev *mddev, struct bio * bio)
874{
875 struct r1conf *conf = mddev->private;
876 struct mirror_info *mirror;
877 struct r1bio *r1_bio;
878 struct bio *read_bio;
879 int i, disks;
880 struct bitmap *bitmap;
881 unsigned long flags;
882 const int rw = bio_data_dir(bio);
883 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
884 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
885 struct md_rdev *blocked_rdev;
886 int first_clone;
887 int sectors_handled;
888 int max_sectors;
889
890 /*
891 * Register the new request and wait if the reconstruction
892 * thread has put up a bar for new requests.
893 * Continue immediately if no resync is active currently.
894 */
895
896 md_write_start(mddev, bio); /* wait on superblock update early */
897
898 if (bio_data_dir(bio) == WRITE &&
899 bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
900 bio->bi_sector < mddev->suspend_hi) {
901 /* As the suspend_* range is controlled by
902 * userspace, we want an interruptible
903 * wait.
904 */
905 DEFINE_WAIT(w);
906 for (;;) {
907 flush_signals(current);
908 prepare_to_wait(&conf->wait_barrier,
909 &w, TASK_INTERRUPTIBLE);
910 if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
911 bio->bi_sector >= mddev->suspend_hi)
912 break;
913 schedule();
914 }
915 finish_wait(&conf->wait_barrier, &w);
916 }
917
918 wait_barrier(conf);
919
920 bitmap = mddev->bitmap;
921
922 /*
923 * make_request() can abort the operation when READA is being
924 * used and no empty request is available.
925 *
926 */
927 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
928
929 r1_bio->master_bio = bio;
930 r1_bio->sectors = bio->bi_size >> 9;
931 r1_bio->state = 0;
932 r1_bio->mddev = mddev;
933 r1_bio->sector = bio->bi_sector;
934
935 /* We might need to issue multiple reads to different
936 * devices if there are bad blocks around, so we keep
937 * track of the number of reads in bio->bi_phys_segments.
938 * If this is 0, there is only one r1_bio and no locking
939 * will be needed when requests complete. If it is
940 * non-zero, then it is the number of not-completed requests.
941 */
942 bio->bi_phys_segments = 0;
943 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
944
945 if (rw == READ) {
946 /*
947 * read balancing logic:
948 */
949 int rdisk;
950
951read_again:
952 rdisk = read_balance(conf, r1_bio, &max_sectors);
953
954 if (rdisk < 0) {
955 /* couldn't find anywhere to read from */
956 raid_end_bio_io(r1_bio);
957 return;
958 }
959 mirror = conf->mirrors + rdisk;
960
961 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
962 bitmap) {
963 /* Reading from a write-mostly device must
964 * take care not to over-take any writes
965 * that are 'behind'
966 */
967 wait_event(bitmap->behind_wait,
968 atomic_read(&bitmap->behind_writes) == 0);
969 }
970 r1_bio->read_disk = rdisk;
971
972 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
973 md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
974 max_sectors);
975
976 r1_bio->bios[rdisk] = read_bio;
977
978 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
979 read_bio->bi_bdev = mirror->rdev->bdev;
980 read_bio->bi_end_io = raid1_end_read_request;
981 read_bio->bi_rw = READ | do_sync;
982 read_bio->bi_private = r1_bio;
983
984 if (max_sectors < r1_bio->sectors) {
985 /* could not read all from this device, so we will
986 * need another r1_bio.
987 */
988
989 sectors_handled = (r1_bio->sector + max_sectors
990 - bio->bi_sector);
991 r1_bio->sectors = max_sectors;
992 spin_lock_irq(&conf->device_lock);
993 if (bio->bi_phys_segments == 0)
994 bio->bi_phys_segments = 2;
995 else
996 bio->bi_phys_segments++;
997 spin_unlock_irq(&conf->device_lock);
998 /* Cannot call generic_make_request directly
999 * as that will be queued in __make_request
1000 * and subsequent mempool_alloc might block waiting
1001 * for it. So hand bio over to raid1d.
1002 */
1003 reschedule_retry(r1_bio);
1004
1005 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1006
1007 r1_bio->master_bio = bio;
1008 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1009 r1_bio->state = 0;
1010 r1_bio->mddev = mddev;
1011 r1_bio->sector = bio->bi_sector + sectors_handled;
1012 goto read_again;
1013 } else
1014 generic_make_request(read_bio);
1015 return;
1016 }
1017
1018 /*
1019 * WRITE:
1020 */
1021 if (conf->pending_count >= max_queued_requests) {
1022 md_wakeup_thread(mddev->thread);
1023 wait_event(conf->wait_barrier,
1024 conf->pending_count < max_queued_requests);
1025 }
1026 /* first select target devices under rcu_lock and
1027 * inc refcount on their rdev. Record them by setting
1028 * bios[x] to bio
1029 * If there are known/acknowledged bad blocks on any device on
1030 * which we have seen a write error, we want to avoid writing those
1031 * blocks.
1032 * This potentially requires several writes to write around
1033 * the bad blocks. Each set of writes gets it's own r1bio
1034 * with a set of bios attached.
1035 */
1036
1037 disks = conf->raid_disks * 2;
1038 retry_write:
1039 blocked_rdev = NULL;
1040 rcu_read_lock();
1041 max_sectors = r1_bio->sectors;
1042 for (i = 0; i < disks; i++) {
1043 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1044 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1045 atomic_inc(&rdev->nr_pending);
1046 blocked_rdev = rdev;
1047 break;
1048 }
1049 r1_bio->bios[i] = NULL;
1050 if (!rdev || test_bit(Faulty, &rdev->flags)
1051 || test_bit(Unmerged, &rdev->flags)) {
1052 if (i < conf->raid_disks)
1053 set_bit(R1BIO_Degraded, &r1_bio->state);
1054 continue;
1055 }
1056
1057 atomic_inc(&rdev->nr_pending);
1058 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1059 sector_t first_bad;
1060 int bad_sectors;
1061 int is_bad;
1062
1063 is_bad = is_badblock(rdev, r1_bio->sector,
1064 max_sectors,
1065 &first_bad, &bad_sectors);
1066 if (is_bad < 0) {
1067 /* mustn't write here until the bad block is
1068 * acknowledged*/
1069 set_bit(BlockedBadBlocks, &rdev->flags);
1070 blocked_rdev = rdev;
1071 break;
1072 }
1073 if (is_bad && first_bad <= r1_bio->sector) {
1074 /* Cannot write here at all */
1075 bad_sectors -= (r1_bio->sector - first_bad);
1076 if (bad_sectors < max_sectors)
1077 /* mustn't write more than bad_sectors
1078 * to other devices yet
1079 */
1080 max_sectors = bad_sectors;
1081 rdev_dec_pending(rdev, mddev);
1082 /* We don't set R1BIO_Degraded as that
1083 * only applies if the disk is
1084 * missing, so it might be re-added,
1085 * and we want to know to recover this
1086 * chunk.
1087 * In this case the device is here,
1088 * and the fact that this chunk is not
1089 * in-sync is recorded in the bad
1090 * block log
1091 */
1092 continue;
1093 }
1094 if (is_bad) {
1095 int good_sectors = first_bad - r1_bio->sector;
1096 if (good_sectors < max_sectors)
1097 max_sectors = good_sectors;
1098 }
1099 }
1100 r1_bio->bios[i] = bio;
1101 }
1102 rcu_read_unlock();
1103
1104 if (unlikely(blocked_rdev)) {
1105 /* Wait for this device to become unblocked */
1106 int j;
1107
1108 for (j = 0; j < i; j++)
1109 if (r1_bio->bios[j])
1110 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1111 r1_bio->state = 0;
1112 allow_barrier(conf);
1113 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1114 wait_barrier(conf);
1115 goto retry_write;
1116 }
1117
1118 if (max_sectors < r1_bio->sectors) {
1119 /* We are splitting this write into multiple parts, so
1120 * we need to prepare for allocating another r1_bio.
1121 */
1122 r1_bio->sectors = max_sectors;
1123 spin_lock_irq(&conf->device_lock);
1124 if (bio->bi_phys_segments == 0)
1125 bio->bi_phys_segments = 2;
1126 else
1127 bio->bi_phys_segments++;
1128 spin_unlock_irq(&conf->device_lock);
1129 }
1130 sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
1131
1132 atomic_set(&r1_bio->remaining, 1);
1133 atomic_set(&r1_bio->behind_remaining, 0);
1134
1135 first_clone = 1;
1136 for (i = 0; i < disks; i++) {
1137 struct bio *mbio;
1138 if (!r1_bio->bios[i])
1139 continue;
1140
1141 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1142 md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
1143
1144 if (first_clone) {
1145 /* do behind I/O ?
1146 * Not if there are too many, or cannot
1147 * allocate memory, or a reader on WriteMostly
1148 * is waiting for behind writes to flush */
1149 if (bitmap &&
1150 (atomic_read(&bitmap->behind_writes)
1151 < mddev->bitmap_info.max_write_behind) &&
1152 !waitqueue_active(&bitmap->behind_wait))
1153 alloc_behind_pages(mbio, r1_bio);
1154
1155 bitmap_startwrite(bitmap, r1_bio->sector,
1156 r1_bio->sectors,
1157 test_bit(R1BIO_BehindIO,
1158 &r1_bio->state));
1159 first_clone = 0;
1160 }
1161 if (r1_bio->behind_bvecs) {
1162 struct bio_vec *bvec;
1163 int j;
1164
1165 /* Yes, I really want the '__' version so that
1166 * we clear any unused pointer in the io_vec, rather
1167 * than leave them unchanged. This is important
1168 * because when we come to free the pages, we won't
1169 * know the original bi_idx, so we just free
1170 * them all
1171 */
1172 __bio_for_each_segment(bvec, mbio, j, 0)
1173 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1174 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1175 atomic_inc(&r1_bio->behind_remaining);
1176 }
1177
1178 r1_bio->bios[i] = mbio;
1179
1180 mbio->bi_sector = (r1_bio->sector +
1181 conf->mirrors[i].rdev->data_offset);
1182 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1183 mbio->bi_end_io = raid1_end_write_request;
1184 mbio->bi_rw = WRITE | do_flush_fua | do_sync;
1185 mbio->bi_private = r1_bio;
1186
1187 atomic_inc(&r1_bio->remaining);
1188 spin_lock_irqsave(&conf->device_lock, flags);
1189 bio_list_add(&conf->pending_bio_list, mbio);
1190 conf->pending_count++;
1191 spin_unlock_irqrestore(&conf->device_lock, flags);
1192 if (!mddev_check_plugged(mddev))
1193 md_wakeup_thread(mddev->thread);
1194 }
1195 /* Mustn't call r1_bio_write_done before this next test,
1196 * as it could result in the bio being freed.
1197 */
1198 if (sectors_handled < (bio->bi_size >> 9)) {
1199 r1_bio_write_done(r1_bio);
1200 /* We need another r1_bio. It has already been counted
1201 * in bio->bi_phys_segments
1202 */
1203 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1204 r1_bio->master_bio = bio;
1205 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1206 r1_bio->state = 0;
1207 r1_bio->mddev = mddev;
1208 r1_bio->sector = bio->bi_sector + sectors_handled;
1209 goto retry_write;
1210 }
1211
1212 r1_bio_write_done(r1_bio);
1213
1214 /* In case raid1d snuck in to freeze_array */
1215 wake_up(&conf->wait_barrier);
1216}
1217
1218static void status(struct seq_file *seq, struct mddev *mddev)
1219{
1220 struct r1conf *conf = mddev->private;
1221 int i;
1222
1223 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1224 conf->raid_disks - mddev->degraded);
1225 rcu_read_lock();
1226 for (i = 0; i < conf->raid_disks; i++) {
1227 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1228 seq_printf(seq, "%s",
1229 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1230 }
1231 rcu_read_unlock();
1232 seq_printf(seq, "]");
1233}
1234
1235
1236static void error(struct mddev *mddev, struct md_rdev *rdev)
1237{
1238 char b[BDEVNAME_SIZE];
1239 struct r1conf *conf = mddev->private;
1240
1241 /*
1242 * If it is not operational, then we have already marked it as dead
1243 * else if it is the last working disks, ignore the error, let the
1244 * next level up know.
1245 * else mark the drive as failed
1246 */
1247 if (test_bit(In_sync, &rdev->flags)
1248 && (conf->raid_disks - mddev->degraded) == 1) {
1249 /*
1250 * Don't fail the drive, act as though we were just a
1251 * normal single drive.
1252 * However don't try a recovery from this drive as
1253 * it is very likely to fail.
1254 */
1255 conf->recovery_disabled = mddev->recovery_disabled;
1256 return;
1257 }
1258 set_bit(Blocked, &rdev->flags);
1259 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1260 unsigned long flags;
1261 spin_lock_irqsave(&conf->device_lock, flags);
1262 mddev->degraded++;
1263 set_bit(Faulty, &rdev->flags);
1264 spin_unlock_irqrestore(&conf->device_lock, flags);
1265 /*
1266 * if recovery is running, make sure it aborts.
1267 */
1268 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1269 } else
1270 set_bit(Faulty, &rdev->flags);
1271 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1272 printk(KERN_ALERT
1273 "md/raid1:%s: Disk failure on %s, disabling device.\n"
1274 "md/raid1:%s: Operation continuing on %d devices.\n",
1275 mdname(mddev), bdevname(rdev->bdev, b),
1276 mdname(mddev), conf->raid_disks - mddev->degraded);
1277}
1278
1279static void print_conf(struct r1conf *conf)
1280{
1281 int i;
1282
1283 printk(KERN_DEBUG "RAID1 conf printout:\n");
1284 if (!conf) {
1285 printk(KERN_DEBUG "(!conf)\n");
1286 return;
1287 }
1288 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1289 conf->raid_disks);
1290
1291 rcu_read_lock();
1292 for (i = 0; i < conf->raid_disks; i++) {
1293 char b[BDEVNAME_SIZE];
1294 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1295 if (rdev)
1296 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1297 i, !test_bit(In_sync, &rdev->flags),
1298 !test_bit(Faulty, &rdev->flags),
1299 bdevname(rdev->bdev,b));
1300 }
1301 rcu_read_unlock();
1302}
1303
1304static void close_sync(struct r1conf *conf)
1305{
1306 wait_barrier(conf);
1307 allow_barrier(conf);
1308
1309 mempool_destroy(conf->r1buf_pool);
1310 conf->r1buf_pool = NULL;
1311}
1312
1313static int raid1_spare_active(struct mddev *mddev)
1314{
1315 int i;
1316 struct r1conf *conf = mddev->private;
1317 int count = 0;
1318 unsigned long flags;
1319
1320 /*
1321 * Find all failed disks within the RAID1 configuration
1322 * and mark them readable.
1323 * Called under mddev lock, so rcu protection not needed.
1324 */
1325 for (i = 0; i < conf->raid_disks; i++) {
1326 struct md_rdev *rdev = conf->mirrors[i].rdev;
1327 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1328 if (repl
1329 && repl->recovery_offset == MaxSector
1330 && !test_bit(Faulty, &repl->flags)
1331 && !test_and_set_bit(In_sync, &repl->flags)) {
1332 /* replacement has just become active */
1333 if (!rdev ||
1334 !test_and_clear_bit(In_sync, &rdev->flags))
1335 count++;
1336 if (rdev) {
1337 /* Replaced device not technically
1338 * faulty, but we need to be sure
1339 * it gets removed and never re-added
1340 */
1341 set_bit(Faulty, &rdev->flags);
1342 sysfs_notify_dirent_safe(
1343 rdev->sysfs_state);
1344 }
1345 }
1346 if (rdev
1347 && !test_bit(Faulty, &rdev->flags)
1348 && !test_and_set_bit(In_sync, &rdev->flags)) {
1349 count++;
1350 sysfs_notify_dirent_safe(rdev->sysfs_state);
1351 }
1352 }
1353 spin_lock_irqsave(&conf->device_lock, flags);
1354 mddev->degraded -= count;
1355 spin_unlock_irqrestore(&conf->device_lock, flags);
1356
1357 print_conf(conf);
1358 return count;
1359}
1360
1361
1362static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1363{
1364 struct r1conf *conf = mddev->private;
1365 int err = -EEXIST;
1366 int mirror = 0;
1367 struct mirror_info *p;
1368 int first = 0;
1369 int last = conf->raid_disks - 1;
1370 struct request_queue *q = bdev_get_queue(rdev->bdev);
1371
1372 if (mddev->recovery_disabled == conf->recovery_disabled)
1373 return -EBUSY;
1374
1375 if (rdev->raid_disk >= 0)
1376 first = last = rdev->raid_disk;
1377
1378 if (q->merge_bvec_fn) {
1379 set_bit(Unmerged, &rdev->flags);
1380 mddev->merge_check_needed = 1;
1381 }
1382
1383 for (mirror = first; mirror <= last; mirror++) {
1384 p = conf->mirrors+mirror;
1385 if (!p->rdev) {
1386
1387 disk_stack_limits(mddev->gendisk, rdev->bdev,
1388 rdev->data_offset << 9);
1389
1390 p->head_position = 0;
1391 rdev->raid_disk = mirror;
1392 err = 0;
1393 /* As all devices are equivalent, we don't need a full recovery
1394 * if this was recently any drive of the array
1395 */
1396 if (rdev->saved_raid_disk < 0)
1397 conf->fullsync = 1;
1398 rcu_assign_pointer(p->rdev, rdev);
1399 break;
1400 }
1401 if (test_bit(WantReplacement, &p->rdev->flags) &&
1402 p[conf->raid_disks].rdev == NULL) {
1403 /* Add this device as a replacement */
1404 clear_bit(In_sync, &rdev->flags);
1405 set_bit(Replacement, &rdev->flags);
1406 rdev->raid_disk = mirror;
1407 err = 0;
1408 conf->fullsync = 1;
1409 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1410 break;
1411 }
1412 }
1413 if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1414 /* Some requests might not have seen this new
1415 * merge_bvec_fn. We must wait for them to complete
1416 * before merging the device fully.
1417 * First we make sure any code which has tested
1418 * our function has submitted the request, then
1419 * we wait for all outstanding requests to complete.
1420 */
1421 synchronize_sched();
1422 raise_barrier(conf);
1423 lower_barrier(conf);
1424 clear_bit(Unmerged, &rdev->flags);
1425 }
1426 md_integrity_add_rdev(rdev, mddev);
1427 print_conf(conf);
1428 return err;
1429}
1430
1431static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1432{
1433 struct r1conf *conf = mddev->private;
1434 int err = 0;
1435 int number = rdev->raid_disk;
1436 struct mirror_info *p = conf->mirrors+ number;
1437
1438 if (rdev != p->rdev)
1439 p = conf->mirrors + conf->raid_disks + number;
1440
1441 print_conf(conf);
1442 if (rdev == p->rdev) {
1443 if (test_bit(In_sync, &rdev->flags) ||
1444 atomic_read(&rdev->nr_pending)) {
1445 err = -EBUSY;
1446 goto abort;
1447 }
1448 /* Only remove non-faulty devices if recovery
1449 * is not possible.
1450 */
1451 if (!test_bit(Faulty, &rdev->flags) &&
1452 mddev->recovery_disabled != conf->recovery_disabled &&
1453 mddev->degraded < conf->raid_disks) {
1454 err = -EBUSY;
1455 goto abort;
1456 }
1457 p->rdev = NULL;
1458 synchronize_rcu();
1459 if (atomic_read(&rdev->nr_pending)) {
1460 /* lost the race, try later */
1461 err = -EBUSY;
1462 p->rdev = rdev;
1463 goto abort;
1464 } else if (conf->mirrors[conf->raid_disks + number].rdev) {
1465 /* We just removed a device that is being replaced.
1466 * Move down the replacement. We drain all IO before
1467 * doing this to avoid confusion.
1468 */
1469 struct md_rdev *repl =
1470 conf->mirrors[conf->raid_disks + number].rdev;
1471 raise_barrier(conf);
1472 clear_bit(Replacement, &repl->flags);
1473 p->rdev = repl;
1474 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1475 lower_barrier(conf);
1476 clear_bit(WantReplacement, &rdev->flags);
1477 } else
1478 clear_bit(WantReplacement, &rdev->flags);
1479 err = md_integrity_register(mddev);
1480 }
1481abort:
1482
1483 print_conf(conf);
1484 return err;
1485}
1486
1487
1488static void end_sync_read(struct bio *bio, int error)
1489{
1490 struct r1bio *r1_bio = bio->bi_private;
1491
1492 update_head_pos(r1_bio->read_disk, r1_bio);
1493
1494 /*
1495 * we have read a block, now it needs to be re-written,
1496 * or re-read if the read failed.
1497 * We don't do much here, just schedule handling by raid1d
1498 */
1499 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1500 set_bit(R1BIO_Uptodate, &r1_bio->state);
1501
1502 if (atomic_dec_and_test(&r1_bio->remaining))
1503 reschedule_retry(r1_bio);
1504}
1505
1506static void end_sync_write(struct bio *bio, int error)
1507{
1508 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1509 struct r1bio *r1_bio = bio->bi_private;
1510 struct mddev *mddev = r1_bio->mddev;
1511 struct r1conf *conf = mddev->private;
1512 int mirror=0;
1513 sector_t first_bad;
1514 int bad_sectors;
1515
1516 mirror = find_bio_disk(r1_bio, bio);
1517
1518 if (!uptodate) {
1519 sector_t sync_blocks = 0;
1520 sector_t s = r1_bio->sector;
1521 long sectors_to_go = r1_bio->sectors;
1522 /* make sure these bits doesn't get cleared. */
1523 do {
1524 bitmap_end_sync(mddev->bitmap, s,
1525 &sync_blocks, 1);
1526 s += sync_blocks;
1527 sectors_to_go -= sync_blocks;
1528 } while (sectors_to_go > 0);
1529 set_bit(WriteErrorSeen,
1530 &conf->mirrors[mirror].rdev->flags);
1531 if (!test_and_set_bit(WantReplacement,
1532 &conf->mirrors[mirror].rdev->flags))
1533 set_bit(MD_RECOVERY_NEEDED, &
1534 mddev->recovery);
1535 set_bit(R1BIO_WriteError, &r1_bio->state);
1536 } else if (is_badblock(conf->mirrors[mirror].rdev,
1537 r1_bio->sector,
1538 r1_bio->sectors,
1539 &first_bad, &bad_sectors) &&
1540 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1541 r1_bio->sector,
1542 r1_bio->sectors,
1543 &first_bad, &bad_sectors)
1544 )
1545 set_bit(R1BIO_MadeGood, &r1_bio->state);
1546
1547 if (atomic_dec_and_test(&r1_bio->remaining)) {
1548 int s = r1_bio->sectors;
1549 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1550 test_bit(R1BIO_WriteError, &r1_bio->state))
1551 reschedule_retry(r1_bio);
1552 else {
1553 put_buf(r1_bio);
1554 md_done_sync(mddev, s, uptodate);
1555 }
1556 }
1557}
1558
1559static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1560 int sectors, struct page *page, int rw)
1561{
1562 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1563 /* success */
1564 return 1;
1565 if (rw == WRITE) {
1566 set_bit(WriteErrorSeen, &rdev->flags);
1567 if (!test_and_set_bit(WantReplacement,
1568 &rdev->flags))
1569 set_bit(MD_RECOVERY_NEEDED, &
1570 rdev->mddev->recovery);
1571 }
1572 /* need to record an error - either for the block or the device */
1573 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1574 md_error(rdev->mddev, rdev);
1575 return 0;
1576}
1577
1578static int fix_sync_read_error(struct r1bio *r1_bio)
1579{
1580 /* Try some synchronous reads of other devices to get
1581 * good data, much like with normal read errors. Only
1582 * read into the pages we already have so we don't
1583 * need to re-issue the read request.
1584 * We don't need to freeze the array, because being in an
1585 * active sync request, there is no normal IO, and
1586 * no overlapping syncs.
1587 * We don't need to check is_badblock() again as we
1588 * made sure that anything with a bad block in range
1589 * will have bi_end_io clear.
1590 */
1591 struct mddev *mddev = r1_bio->mddev;
1592 struct r1conf *conf = mddev->private;
1593 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1594 sector_t sect = r1_bio->sector;
1595 int sectors = r1_bio->sectors;
1596 int idx = 0;
1597
1598 while(sectors) {
1599 int s = sectors;
1600 int d = r1_bio->read_disk;
1601 int success = 0;
1602 struct md_rdev *rdev;
1603 int start;
1604
1605 if (s > (PAGE_SIZE>>9))
1606 s = PAGE_SIZE >> 9;
1607 do {
1608 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1609 /* No rcu protection needed here devices
1610 * can only be removed when no resync is
1611 * active, and resync is currently active
1612 */
1613 rdev = conf->mirrors[d].rdev;
1614 if (sync_page_io(rdev, sect, s<<9,
1615 bio->bi_io_vec[idx].bv_page,
1616 READ, false)) {
1617 success = 1;
1618 break;
1619 }
1620 }
1621 d++;
1622 if (d == conf->raid_disks * 2)
1623 d = 0;
1624 } while (!success && d != r1_bio->read_disk);
1625
1626 if (!success) {
1627 char b[BDEVNAME_SIZE];
1628 int abort = 0;
1629 /* Cannot read from anywhere, this block is lost.
1630 * Record a bad block on each device. If that doesn't
1631 * work just disable and interrupt the recovery.
1632 * Don't fail devices as that won't really help.
1633 */
1634 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1635 " for block %llu\n",
1636 mdname(mddev),
1637 bdevname(bio->bi_bdev, b),
1638 (unsigned long long)r1_bio->sector);
1639 for (d = 0; d < conf->raid_disks * 2; d++) {
1640 rdev = conf->mirrors[d].rdev;
1641 if (!rdev || test_bit(Faulty, &rdev->flags))
1642 continue;
1643 if (!rdev_set_badblocks(rdev, sect, s, 0))
1644 abort = 1;
1645 }
1646 if (abort) {
1647 conf->recovery_disabled =
1648 mddev->recovery_disabled;
1649 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1650 md_done_sync(mddev, r1_bio->sectors, 0);
1651 put_buf(r1_bio);
1652 return 0;
1653 }
1654 /* Try next page */
1655 sectors -= s;
1656 sect += s;
1657 idx++;
1658 continue;
1659 }
1660
1661 start = d;
1662 /* write it back and re-read */
1663 while (d != r1_bio->read_disk) {
1664 if (d == 0)
1665 d = conf->raid_disks * 2;
1666 d--;
1667 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1668 continue;
1669 rdev = conf->mirrors[d].rdev;
1670 if (r1_sync_page_io(rdev, sect, s,
1671 bio->bi_io_vec[idx].bv_page,
1672 WRITE) == 0) {
1673 r1_bio->bios[d]->bi_end_io = NULL;
1674 rdev_dec_pending(rdev, mddev);
1675 }
1676 }
1677 d = start;
1678 while (d != r1_bio->read_disk) {
1679 if (d == 0)
1680 d = conf->raid_disks * 2;
1681 d--;
1682 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1683 continue;
1684 rdev = conf->mirrors[d].rdev;
1685 if (r1_sync_page_io(rdev, sect, s,
1686 bio->bi_io_vec[idx].bv_page,
1687 READ) != 0)
1688 atomic_add(s, &rdev->corrected_errors);
1689 }
1690 sectors -= s;
1691 sect += s;
1692 idx ++;
1693 }
1694 set_bit(R1BIO_Uptodate, &r1_bio->state);
1695 set_bit(BIO_UPTODATE, &bio->bi_flags);
1696 return 1;
1697}
1698
1699static int process_checks(struct r1bio *r1_bio)
1700{
1701 /* We have read all readable devices. If we haven't
1702 * got the block, then there is no hope left.
1703 * If we have, then we want to do a comparison
1704 * and skip the write if everything is the same.
1705 * If any blocks failed to read, then we need to
1706 * attempt an over-write
1707 */
1708 struct mddev *mddev = r1_bio->mddev;
1709 struct r1conf *conf = mddev->private;
1710 int primary;
1711 int i;
1712 int vcnt;
1713
1714 for (primary = 0; primary < conf->raid_disks * 2; primary++)
1715 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1716 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1717 r1_bio->bios[primary]->bi_end_io = NULL;
1718 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1719 break;
1720 }
1721 r1_bio->read_disk = primary;
1722 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1723 for (i = 0; i < conf->raid_disks * 2; i++) {
1724 int j;
1725 struct bio *pbio = r1_bio->bios[primary];
1726 struct bio *sbio = r1_bio->bios[i];
1727 int size;
1728
1729 if (r1_bio->bios[i]->bi_end_io != end_sync_read)
1730 continue;
1731
1732 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1733 for (j = vcnt; j-- ; ) {
1734 struct page *p, *s;
1735 p = pbio->bi_io_vec[j].bv_page;
1736 s = sbio->bi_io_vec[j].bv_page;
1737 if (memcmp(page_address(p),
1738 page_address(s),
1739 sbio->bi_io_vec[j].bv_len))
1740 break;
1741 }
1742 } else
1743 j = 0;
1744 if (j >= 0)
1745 mddev->resync_mismatches += r1_bio->sectors;
1746 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1747 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1748 /* No need to write to this device. */
1749 sbio->bi_end_io = NULL;
1750 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1751 continue;
1752 }
1753 /* fixup the bio for reuse */
1754 sbio->bi_vcnt = vcnt;
1755 sbio->bi_size = r1_bio->sectors << 9;
1756 sbio->bi_idx = 0;
1757 sbio->bi_phys_segments = 0;
1758 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1759 sbio->bi_flags |= 1 << BIO_UPTODATE;
1760 sbio->bi_next = NULL;
1761 sbio->bi_sector = r1_bio->sector +
1762 conf->mirrors[i].rdev->data_offset;
1763 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1764 size = sbio->bi_size;
1765 for (j = 0; j < vcnt ; j++) {
1766 struct bio_vec *bi;
1767 bi = &sbio->bi_io_vec[j];
1768 bi->bv_offset = 0;
1769 if (size > PAGE_SIZE)
1770 bi->bv_len = PAGE_SIZE;
1771 else
1772 bi->bv_len = size;
1773 size -= PAGE_SIZE;
1774 memcpy(page_address(bi->bv_page),
1775 page_address(pbio->bi_io_vec[j].bv_page),
1776 PAGE_SIZE);
1777 }
1778 }
1779 return 0;
1780}
1781
1782static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1783{
1784 struct r1conf *conf = mddev->private;
1785 int i;
1786 int disks = conf->raid_disks * 2;
1787 struct bio *bio, *wbio;
1788
1789 bio = r1_bio->bios[r1_bio->read_disk];
1790
1791 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
1792 /* ouch - failed to read all of that. */
1793 if (!fix_sync_read_error(r1_bio))
1794 return;
1795
1796 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1797 if (process_checks(r1_bio) < 0)
1798 return;
1799 /*
1800 * schedule writes
1801 */
1802 atomic_set(&r1_bio->remaining, 1);
1803 for (i = 0; i < disks ; i++) {
1804 wbio = r1_bio->bios[i];
1805 if (wbio->bi_end_io == NULL ||
1806 (wbio->bi_end_io == end_sync_read &&
1807 (i == r1_bio->read_disk ||
1808 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1809 continue;
1810
1811 wbio->bi_rw = WRITE;
1812 wbio->bi_end_io = end_sync_write;
1813 atomic_inc(&r1_bio->remaining);
1814 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1815
1816 generic_make_request(wbio);
1817 }
1818
1819 if (atomic_dec_and_test(&r1_bio->remaining)) {
1820 /* if we're here, all write(s) have completed, so clean up */
1821 int s = r1_bio->sectors;
1822 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1823 test_bit(R1BIO_WriteError, &r1_bio->state))
1824 reschedule_retry(r1_bio);
1825 else {
1826 put_buf(r1_bio);
1827 md_done_sync(mddev, s, 1);
1828 }
1829 }
1830}
1831
1832/*
1833 * This is a kernel thread which:
1834 *
1835 * 1. Retries failed read operations on working mirrors.
1836 * 2. Updates the raid superblock when problems encounter.
1837 * 3. Performs writes following reads for array synchronising.
1838 */
1839
1840static void fix_read_error(struct r1conf *conf, int read_disk,
1841 sector_t sect, int sectors)
1842{
1843 struct mddev *mddev = conf->mddev;
1844 while(sectors) {
1845 int s = sectors;
1846 int d = read_disk;
1847 int success = 0;
1848 int start;
1849 struct md_rdev *rdev;
1850
1851 if (s > (PAGE_SIZE>>9))
1852 s = PAGE_SIZE >> 9;
1853
1854 do {
1855 /* Note: no rcu protection needed here
1856 * as this is synchronous in the raid1d thread
1857 * which is the thread that might remove
1858 * a device. If raid1d ever becomes multi-threaded....
1859 */
1860 sector_t first_bad;
1861 int bad_sectors;
1862
1863 rdev = conf->mirrors[d].rdev;
1864 if (rdev &&
1865 (test_bit(In_sync, &rdev->flags) ||
1866 (!test_bit(Faulty, &rdev->flags) &&
1867 rdev->recovery_offset >= sect + s)) &&
1868 is_badblock(rdev, sect, s,
1869 &first_bad, &bad_sectors) == 0 &&
1870 sync_page_io(rdev, sect, s<<9,
1871 conf->tmppage, READ, false))
1872 success = 1;
1873 else {
1874 d++;
1875 if (d == conf->raid_disks * 2)
1876 d = 0;
1877 }
1878 } while (!success && d != read_disk);
1879
1880 if (!success) {
1881 /* Cannot read from anywhere - mark it bad */
1882 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
1883 if (!rdev_set_badblocks(rdev, sect, s, 0))
1884 md_error(mddev, rdev);
1885 break;
1886 }
1887 /* write it back and re-read */
1888 start = d;
1889 while (d != read_disk) {
1890 if (d==0)
1891 d = conf->raid_disks * 2;
1892 d--;
1893 rdev = conf->mirrors[d].rdev;
1894 if (rdev &&
1895 test_bit(In_sync, &rdev->flags))
1896 r1_sync_page_io(rdev, sect, s,
1897 conf->tmppage, WRITE);
1898 }
1899 d = start;
1900 while (d != read_disk) {
1901 char b[BDEVNAME_SIZE];
1902 if (d==0)
1903 d = conf->raid_disks * 2;
1904 d--;
1905 rdev = conf->mirrors[d].rdev;
1906 if (rdev &&
1907 test_bit(In_sync, &rdev->flags)) {
1908 if (r1_sync_page_io(rdev, sect, s,
1909 conf->tmppage, READ)) {
1910 atomic_add(s, &rdev->corrected_errors);
1911 printk(KERN_INFO
1912 "md/raid1:%s: read error corrected "
1913 "(%d sectors at %llu on %s)\n",
1914 mdname(mddev), s,
1915 (unsigned long long)(sect +
1916 rdev->data_offset),
1917 bdevname(rdev->bdev, b));
1918 }
1919 }
1920 }
1921 sectors -= s;
1922 sect += s;
1923 }
1924}
1925
1926static void bi_complete(struct bio *bio, int error)
1927{
1928 complete((struct completion *)bio->bi_private);
1929}
1930
1931static int submit_bio_wait(int rw, struct bio *bio)
1932{
1933 struct completion event;
1934 rw |= REQ_SYNC;
1935
1936 init_completion(&event);
1937 bio->bi_private = &event;
1938 bio->bi_end_io = bi_complete;
1939 submit_bio(rw, bio);
1940 wait_for_completion(&event);
1941
1942 return test_bit(BIO_UPTODATE, &bio->bi_flags);
1943}
1944
1945static int narrow_write_error(struct r1bio *r1_bio, int i)
1946{
1947 struct mddev *mddev = r1_bio->mddev;
1948 struct r1conf *conf = mddev->private;
1949 struct md_rdev *rdev = conf->mirrors[i].rdev;
1950 int vcnt, idx;
1951 struct bio_vec *vec;
1952
1953 /* bio has the data to be written to device 'i' where
1954 * we just recently had a write error.
1955 * We repeatedly clone the bio and trim down to one block,
1956 * then try the write. Where the write fails we record
1957 * a bad block.
1958 * It is conceivable that the bio doesn't exactly align with
1959 * blocks. We must handle this somehow.
1960 *
1961 * We currently own a reference on the rdev.
1962 */
1963
1964 int block_sectors;
1965 sector_t sector;
1966 int sectors;
1967 int sect_to_write = r1_bio->sectors;
1968 int ok = 1;
1969
1970 if (rdev->badblocks.shift < 0)
1971 return 0;
1972
1973 block_sectors = 1 << rdev->badblocks.shift;
1974 sector = r1_bio->sector;
1975 sectors = ((sector + block_sectors)
1976 & ~(sector_t)(block_sectors - 1))
1977 - sector;
1978
1979 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
1980 vcnt = r1_bio->behind_page_count;
1981 vec = r1_bio->behind_bvecs;
1982 idx = 0;
1983 while (vec[idx].bv_page == NULL)
1984 idx++;
1985 } else {
1986 vcnt = r1_bio->master_bio->bi_vcnt;
1987 vec = r1_bio->master_bio->bi_io_vec;
1988 idx = r1_bio->master_bio->bi_idx;
1989 }
1990 while (sect_to_write) {
1991 struct bio *wbio;
1992 if (sectors > sect_to_write)
1993 sectors = sect_to_write;
1994 /* Write at 'sector' for 'sectors'*/
1995
1996 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
1997 memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
1998 wbio->bi_sector = r1_bio->sector;
1999 wbio->bi_rw = WRITE;
2000 wbio->bi_vcnt = vcnt;
2001 wbio->bi_size = r1_bio->sectors << 9;
2002 wbio->bi_idx = idx;
2003
2004 md_trim_bio(wbio, sector - r1_bio->sector, sectors);
2005 wbio->bi_sector += rdev->data_offset;
2006 wbio->bi_bdev = rdev->bdev;
2007 if (submit_bio_wait(WRITE, wbio) == 0)
2008 /* failure! */
2009 ok = rdev_set_badblocks(rdev, sector,
2010 sectors, 0)
2011 && ok;
2012
2013 bio_put(wbio);
2014 sect_to_write -= sectors;
2015 sector += sectors;
2016 sectors = block_sectors;
2017 }
2018 return ok;
2019}
2020
2021static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2022{
2023 int m;
2024 int s = r1_bio->sectors;
2025 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2026 struct md_rdev *rdev = conf->mirrors[m].rdev;
2027 struct bio *bio = r1_bio->bios[m];
2028 if (bio->bi_end_io == NULL)
2029 continue;
2030 if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
2031 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2032 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2033 }
2034 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
2035 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2036 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2037 md_error(conf->mddev, rdev);
2038 }
2039 }
2040 put_buf(r1_bio);
2041 md_done_sync(conf->mddev, s, 1);
2042}
2043
2044static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2045{
2046 int m;
2047 for (m = 0; m < conf->raid_disks * 2 ; m++)
2048 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2049 struct md_rdev *rdev = conf->mirrors[m].rdev;
2050 rdev_clear_badblocks(rdev,
2051 r1_bio->sector,
2052 r1_bio->sectors, 0);
2053 rdev_dec_pending(rdev, conf->mddev);
2054 } else if (r1_bio->bios[m] != NULL) {
2055 /* This drive got a write error. We need to
2056 * narrow down and record precise write
2057 * errors.
2058 */
2059 if (!narrow_write_error(r1_bio, m)) {
2060 md_error(conf->mddev,
2061 conf->mirrors[m].rdev);
2062 /* an I/O failed, we can't clear the bitmap */
2063 set_bit(R1BIO_Degraded, &r1_bio->state);
2064 }
2065 rdev_dec_pending(conf->mirrors[m].rdev,
2066 conf->mddev);
2067 }
2068 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2069 close_write(r1_bio);
2070 raid_end_bio_io(r1_bio);
2071}
2072
2073static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2074{
2075 int disk;
2076 int max_sectors;
2077 struct mddev *mddev = conf->mddev;
2078 struct bio *bio;
2079 char b[BDEVNAME_SIZE];
2080 struct md_rdev *rdev;
2081
2082 clear_bit(R1BIO_ReadError, &r1_bio->state);
2083 /* we got a read error. Maybe the drive is bad. Maybe just
2084 * the block and we can fix it.
2085 * We freeze all other IO, and try reading the block from
2086 * other devices. When we find one, we re-write
2087 * and check it that fixes the read error.
2088 * This is all done synchronously while the array is
2089 * frozen
2090 */
2091 if (mddev->ro == 0) {
2092 freeze_array(conf);
2093 fix_read_error(conf, r1_bio->read_disk,
2094 r1_bio->sector, r1_bio->sectors);
2095 unfreeze_array(conf);
2096 } else
2097 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2098
2099 bio = r1_bio->bios[r1_bio->read_disk];
2100 bdevname(bio->bi_bdev, b);
2101read_more:
2102 disk = read_balance(conf, r1_bio, &max_sectors);
2103 if (disk == -1) {
2104 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
2105 " read error for block %llu\n",
2106 mdname(mddev), b, (unsigned long long)r1_bio->sector);
2107 raid_end_bio_io(r1_bio);
2108 } else {
2109 const unsigned long do_sync
2110 = r1_bio->master_bio->bi_rw & REQ_SYNC;
2111 if (bio) {
2112 r1_bio->bios[r1_bio->read_disk] =
2113 mddev->ro ? IO_BLOCKED : NULL;
2114 bio_put(bio);
2115 }
2116 r1_bio->read_disk = disk;
2117 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2118 md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
2119 r1_bio->bios[r1_bio->read_disk] = bio;
2120 rdev = conf->mirrors[disk].rdev;
2121 printk_ratelimited(KERN_ERR
2122 "md/raid1:%s: redirecting sector %llu"
2123 " to other mirror: %s\n",
2124 mdname(mddev),
2125 (unsigned long long)r1_bio->sector,
2126 bdevname(rdev->bdev, b));
2127 bio->bi_sector = r1_bio->sector + rdev->data_offset;
2128 bio->bi_bdev = rdev->bdev;
2129 bio->bi_end_io = raid1_end_read_request;
2130 bio->bi_rw = READ | do_sync;
2131 bio->bi_private = r1_bio;
2132 if (max_sectors < r1_bio->sectors) {
2133 /* Drat - have to split this up more */
2134 struct bio *mbio = r1_bio->master_bio;
2135 int sectors_handled = (r1_bio->sector + max_sectors
2136 - mbio->bi_sector);
2137 r1_bio->sectors = max_sectors;
2138 spin_lock_irq(&conf->device_lock);
2139 if (mbio->bi_phys_segments == 0)
2140 mbio->bi_phys_segments = 2;
2141 else
2142 mbio->bi_phys_segments++;
2143 spin_unlock_irq(&conf->device_lock);
2144 generic_make_request(bio);
2145 bio = NULL;
2146
2147 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2148
2149 r1_bio->master_bio = mbio;
2150 r1_bio->sectors = (mbio->bi_size >> 9)
2151 - sectors_handled;
2152 r1_bio->state = 0;
2153 set_bit(R1BIO_ReadError, &r1_bio->state);
2154 r1_bio->mddev = mddev;
2155 r1_bio->sector = mbio->bi_sector + sectors_handled;
2156
2157 goto read_more;
2158 } else
2159 generic_make_request(bio);
2160 }
2161}
2162
2163static void raid1d(struct mddev *mddev)
2164{
2165 struct r1bio *r1_bio;
2166 unsigned long flags;
2167 struct r1conf *conf = mddev->private;
2168 struct list_head *head = &conf->retry_list;
2169 struct blk_plug plug;
2170
2171 md_check_recovery(mddev);
2172
2173 blk_start_plug(&plug);
2174 for (;;) {
2175
2176 if (atomic_read(&mddev->plug_cnt) == 0)
2177 flush_pending_writes(conf);
2178
2179 spin_lock_irqsave(&conf->device_lock, flags);
2180 if (list_empty(head)) {
2181 spin_unlock_irqrestore(&conf->device_lock, flags);
2182 break;
2183 }
2184 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2185 list_del(head->prev);
2186 conf->nr_queued--;
2187 spin_unlock_irqrestore(&conf->device_lock, flags);
2188
2189 mddev = r1_bio->mddev;
2190 conf = mddev->private;
2191 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2192 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2193 test_bit(R1BIO_WriteError, &r1_bio->state))
2194 handle_sync_write_finished(conf, r1_bio);
2195 else
2196 sync_request_write(mddev, r1_bio);
2197 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2198 test_bit(R1BIO_WriteError, &r1_bio->state))
2199 handle_write_finished(conf, r1_bio);
2200 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2201 handle_read_error(conf, r1_bio);
2202 else
2203 /* just a partial read to be scheduled from separate
2204 * context
2205 */
2206 generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2207
2208 cond_resched();
2209 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2210 md_check_recovery(mddev);
2211 }
2212 blk_finish_plug(&plug);
2213}
2214
2215
2216static int init_resync(struct r1conf *conf)
2217{
2218 int buffs;
2219
2220 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2221 BUG_ON(conf->r1buf_pool);
2222 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2223 conf->poolinfo);
2224 if (!conf->r1buf_pool)
2225 return -ENOMEM;
2226 conf->next_resync = 0;
2227 return 0;
2228}
2229
2230/*
2231 * perform a "sync" on one "block"
2232 *
2233 * We need to make sure that no normal I/O request - particularly write
2234 * requests - conflict with active sync requests.
2235 *
2236 * This is achieved by tracking pending requests and a 'barrier' concept
2237 * that can be installed to exclude normal IO requests.
2238 */
2239
2240static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
2241{
2242 struct r1conf *conf = mddev->private;
2243 struct r1bio *r1_bio;
2244 struct bio *bio;
2245 sector_t max_sector, nr_sectors;
2246 int disk = -1;
2247 int i;
2248 int wonly = -1;
2249 int write_targets = 0, read_targets = 0;
2250 sector_t sync_blocks;
2251 int still_degraded = 0;
2252 int good_sectors = RESYNC_SECTORS;
2253 int min_bad = 0; /* number of sectors that are bad in all devices */
2254
2255 if (!conf->r1buf_pool)
2256 if (init_resync(conf))
2257 return 0;
2258
2259 max_sector = mddev->dev_sectors;
2260 if (sector_nr >= max_sector) {
2261 /* If we aborted, we need to abort the
2262 * sync on the 'current' bitmap chunk (there will
2263 * only be one in raid1 resync.
2264 * We can find the current addess in mddev->curr_resync
2265 */
2266 if (mddev->curr_resync < max_sector) /* aborted */
2267 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2268 &sync_blocks, 1);
2269 else /* completed sync */
2270 conf->fullsync = 0;
2271
2272 bitmap_close_sync(mddev->bitmap);
2273 close_sync(conf);
2274 return 0;
2275 }
2276
2277 if (mddev->bitmap == NULL &&
2278 mddev->recovery_cp == MaxSector &&
2279 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2280 conf->fullsync == 0) {
2281 *skipped = 1;
2282 return max_sector - sector_nr;
2283 }
2284 /* before building a request, check if we can skip these blocks..
2285 * This call the bitmap_start_sync doesn't actually record anything
2286 */
2287 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2288 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2289 /* We can skip this block, and probably several more */
2290 *skipped = 1;
2291 return sync_blocks;
2292 }
2293 /*
2294 * If there is non-resync activity waiting for a turn,
2295 * and resync is going fast enough,
2296 * then let it though before starting on this new sync request.
2297 */
2298 if (!go_faster && conf->nr_waiting)
2299 msleep_interruptible(1000);
2300
2301 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2302 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2303 raise_barrier(conf);
2304
2305 conf->next_resync = sector_nr;
2306
2307 rcu_read_lock();
2308 /*
2309 * If we get a correctably read error during resync or recovery,
2310 * we might want to read from a different device. So we
2311 * flag all drives that could conceivably be read from for READ,
2312 * and any others (which will be non-In_sync devices) for WRITE.
2313 * If a read fails, we try reading from something else for which READ
2314 * is OK.
2315 */
2316
2317 r1_bio->mddev = mddev;
2318 r1_bio->sector = sector_nr;
2319 r1_bio->state = 0;
2320 set_bit(R1BIO_IsSync, &r1_bio->state);
2321
2322 for (i = 0; i < conf->raid_disks * 2; i++) {
2323 struct md_rdev *rdev;
2324 bio = r1_bio->bios[i];
2325
2326 /* take from bio_init */
2327 bio->bi_next = NULL;
2328 bio->bi_flags &= ~(BIO_POOL_MASK-1);
2329 bio->bi_flags |= 1 << BIO_UPTODATE;
2330 bio->bi_rw = READ;
2331 bio->bi_vcnt = 0;
2332 bio->bi_idx = 0;
2333 bio->bi_phys_segments = 0;
2334 bio->bi_size = 0;
2335 bio->bi_end_io = NULL;
2336 bio->bi_private = NULL;
2337
2338 rdev = rcu_dereference(conf->mirrors[i].rdev);
2339 if (rdev == NULL ||
2340 test_bit(Faulty, &rdev->flags)) {
2341 if (i < conf->raid_disks)
2342 still_degraded = 1;
2343 } else if (!test_bit(In_sync, &rdev->flags)) {
2344 bio->bi_rw = WRITE;
2345 bio->bi_end_io = end_sync_write;
2346 write_targets ++;
2347 } else {
2348 /* may need to read from here */
2349 sector_t first_bad = MaxSector;
2350 int bad_sectors;
2351
2352 if (is_badblock(rdev, sector_nr, good_sectors,
2353 &first_bad, &bad_sectors)) {
2354 if (first_bad > sector_nr)
2355 good_sectors = first_bad - sector_nr;
2356 else {
2357 bad_sectors -= (sector_nr - first_bad);
2358 if (min_bad == 0 ||
2359 min_bad > bad_sectors)
2360 min_bad = bad_sectors;
2361 }
2362 }
2363 if (sector_nr < first_bad) {
2364 if (test_bit(WriteMostly, &rdev->flags)) {
2365 if (wonly < 0)
2366 wonly = i;
2367 } else {
2368 if (disk < 0)
2369 disk = i;
2370 }
2371 bio->bi_rw = READ;
2372 bio->bi_end_io = end_sync_read;
2373 read_targets++;
2374 }
2375 }
2376 if (bio->bi_end_io) {
2377 atomic_inc(&rdev->nr_pending);
2378 bio->bi_sector = sector_nr + rdev->data_offset;
2379 bio->bi_bdev = rdev->bdev;
2380 bio->bi_private = r1_bio;
2381 }
2382 }
2383 rcu_read_unlock();
2384 if (disk < 0)
2385 disk = wonly;
2386 r1_bio->read_disk = disk;
2387
2388 if (read_targets == 0 && min_bad > 0) {
2389 /* These sectors are bad on all InSync devices, so we
2390 * need to mark them bad on all write targets
2391 */
2392 int ok = 1;
2393 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2394 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2395 struct md_rdev *rdev = conf->mirrors[i].rdev;
2396 ok = rdev_set_badblocks(rdev, sector_nr,
2397 min_bad, 0
2398 ) && ok;
2399 }
2400 set_bit(MD_CHANGE_DEVS, &mddev->flags);
2401 *skipped = 1;
2402 put_buf(r1_bio);
2403
2404 if (!ok) {
2405 /* Cannot record the badblocks, so need to
2406 * abort the resync.
2407 * If there are multiple read targets, could just
2408 * fail the really bad ones ???
2409 */
2410 conf->recovery_disabled = mddev->recovery_disabled;
2411 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2412 return 0;
2413 } else
2414 return min_bad;
2415
2416 }
2417 if (min_bad > 0 && min_bad < good_sectors) {
2418 /* only resync enough to reach the next bad->good
2419 * transition */
2420 good_sectors = min_bad;
2421 }
2422
2423 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2424 /* extra read targets are also write targets */
2425 write_targets += read_targets-1;
2426
2427 if (write_targets == 0 || read_targets == 0) {
2428 /* There is nowhere to write, so all non-sync
2429 * drives must be failed - so we are finished
2430 */
2431 sector_t rv;
2432 if (min_bad > 0)
2433 max_sector = sector_nr + min_bad;
2434 rv = max_sector - sector_nr;
2435 *skipped = 1;
2436 put_buf(r1_bio);
2437 return rv;
2438 }
2439
2440 if (max_sector > mddev->resync_max)
2441 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2442 if (max_sector > sector_nr + good_sectors)
2443 max_sector = sector_nr + good_sectors;
2444 nr_sectors = 0;
2445 sync_blocks = 0;
2446 do {
2447 struct page *page;
2448 int len = PAGE_SIZE;
2449 if (sector_nr + (len>>9) > max_sector)
2450 len = (max_sector - sector_nr) << 9;
2451 if (len == 0)
2452 break;
2453 if (sync_blocks == 0) {
2454 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2455 &sync_blocks, still_degraded) &&
2456 !conf->fullsync &&
2457 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2458 break;
2459 BUG_ON(sync_blocks < (PAGE_SIZE>>9));
2460 if ((len >> 9) > sync_blocks)
2461 len = sync_blocks<<9;
2462 }
2463
2464 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2465 bio = r1_bio->bios[i];
2466 if (bio->bi_end_io) {
2467 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2468 if (bio_add_page(bio, page, len, 0) == 0) {
2469 /* stop here */
2470 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2471 while (i > 0) {
2472 i--;
2473 bio = r1_bio->bios[i];
2474 if (bio->bi_end_io==NULL)
2475 continue;
2476 /* remove last page from this bio */
2477 bio->bi_vcnt--;
2478 bio->bi_size -= len;
2479 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2480 }
2481 goto bio_full;
2482 }
2483 }
2484 }
2485 nr_sectors += len>>9;
2486 sector_nr += len>>9;
2487 sync_blocks -= (len>>9);
2488 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2489 bio_full:
2490 r1_bio->sectors = nr_sectors;
2491
2492 /* For a user-requested sync, we read all readable devices and do a
2493 * compare
2494 */
2495 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2496 atomic_set(&r1_bio->remaining, read_targets);
2497 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2498 bio = r1_bio->bios[i];
2499 if (bio->bi_end_io == end_sync_read) {
2500 read_targets--;
2501 md_sync_acct(bio->bi_bdev, nr_sectors);
2502 generic_make_request(bio);
2503 }
2504 }
2505 } else {
2506 atomic_set(&r1_bio->remaining, 1);
2507 bio = r1_bio->bios[r1_bio->read_disk];
2508 md_sync_acct(bio->bi_bdev, nr_sectors);
2509 generic_make_request(bio);
2510
2511 }
2512 return nr_sectors;
2513}
2514
2515static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2516{
2517 if (sectors)
2518 return sectors;
2519
2520 return mddev->dev_sectors;
2521}
2522
2523static struct r1conf *setup_conf(struct mddev *mddev)
2524{
2525 struct r1conf *conf;
2526 int i;
2527 struct mirror_info *disk;
2528 struct md_rdev *rdev;
2529 int err = -ENOMEM;
2530
2531 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2532 if (!conf)
2533 goto abort;
2534
2535 conf->mirrors = kzalloc(sizeof(struct mirror_info)
2536 * mddev->raid_disks * 2,
2537 GFP_KERNEL);
2538 if (!conf->mirrors)
2539 goto abort;
2540
2541 conf->tmppage = alloc_page(GFP_KERNEL);
2542 if (!conf->tmppage)
2543 goto abort;
2544
2545 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2546 if (!conf->poolinfo)
2547 goto abort;
2548 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2549 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2550 r1bio_pool_free,
2551 conf->poolinfo);
2552 if (!conf->r1bio_pool)
2553 goto abort;
2554
2555 conf->poolinfo->mddev = mddev;
2556
2557 err = -EINVAL;
2558 spin_lock_init(&conf->device_lock);
2559 rdev_for_each(rdev, mddev) {
2560 struct request_queue *q;
2561 int disk_idx = rdev->raid_disk;
2562 if (disk_idx >= mddev->raid_disks
2563 || disk_idx < 0)
2564 continue;
2565 if (test_bit(Replacement, &rdev->flags))
2566 disk = conf->mirrors + conf->raid_disks + disk_idx;
2567 else
2568 disk = conf->mirrors + disk_idx;
2569
2570 if (disk->rdev)
2571 goto abort;
2572 disk->rdev = rdev;
2573 q = bdev_get_queue(rdev->bdev);
2574 if (q->merge_bvec_fn)
2575 mddev->merge_check_needed = 1;
2576
2577 disk->head_position = 0;
2578 }
2579 conf->raid_disks = mddev->raid_disks;
2580 conf->mddev = mddev;
2581 INIT_LIST_HEAD(&conf->retry_list);
2582
2583 spin_lock_init(&conf->resync_lock);
2584 init_waitqueue_head(&conf->wait_barrier);
2585
2586 bio_list_init(&conf->pending_bio_list);
2587 conf->pending_count = 0;
2588 conf->recovery_disabled = mddev->recovery_disabled - 1;
2589
2590 err = -EIO;
2591 conf->last_used = -1;
2592 for (i = 0; i < conf->raid_disks * 2; i++) {
2593
2594 disk = conf->mirrors + i;
2595
2596 if (i < conf->raid_disks &&
2597 disk[conf->raid_disks].rdev) {
2598 /* This slot has a replacement. */
2599 if (!disk->rdev) {
2600 /* No original, just make the replacement
2601 * a recovering spare
2602 */
2603 disk->rdev =
2604 disk[conf->raid_disks].rdev;
2605 disk[conf->raid_disks].rdev = NULL;
2606 } else if (!test_bit(In_sync, &disk->rdev->flags))
2607 /* Original is not in_sync - bad */
2608 goto abort;
2609 }
2610
2611 if (!disk->rdev ||
2612 !test_bit(In_sync, &disk->rdev->flags)) {
2613 disk->head_position = 0;
2614 if (disk->rdev &&
2615 (disk->rdev->saved_raid_disk < 0))
2616 conf->fullsync = 1;
2617 } else if (conf->last_used < 0)
2618 /*
2619 * The first working device is used as a
2620 * starting point to read balancing.
2621 */
2622 conf->last_used = i;
2623 }
2624
2625 if (conf->last_used < 0) {
2626 printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
2627 mdname(mddev));
2628 goto abort;
2629 }
2630 err = -ENOMEM;
2631 conf->thread = md_register_thread(raid1d, mddev, "raid1");
2632 if (!conf->thread) {
2633 printk(KERN_ERR
2634 "md/raid1:%s: couldn't allocate thread\n",
2635 mdname(mddev));
2636 goto abort;
2637 }
2638
2639 return conf;
2640
2641 abort:
2642 if (conf) {
2643 if (conf->r1bio_pool)
2644 mempool_destroy(conf->r1bio_pool);
2645 kfree(conf->mirrors);
2646 safe_put_page(conf->tmppage);
2647 kfree(conf->poolinfo);
2648 kfree(conf);
2649 }
2650 return ERR_PTR(err);
2651}
2652
2653static int stop(struct mddev *mddev);
2654static int run(struct mddev *mddev)
2655{
2656 struct r1conf *conf;
2657 int i;
2658 struct md_rdev *rdev;
2659 int ret;
2660
2661 if (mddev->level != 1) {
2662 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2663 mdname(mddev), mddev->level);
2664 return -EIO;
2665 }
2666 if (mddev->reshape_position != MaxSector) {
2667 printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2668 mdname(mddev));
2669 return -EIO;
2670 }
2671 /*
2672 * copy the already verified devices into our private RAID1
2673 * bookkeeping area. [whatever we allocate in run(),
2674 * should be freed in stop()]
2675 */
2676 if (mddev->private == NULL)
2677 conf = setup_conf(mddev);
2678 else
2679 conf = mddev->private;
2680
2681 if (IS_ERR(conf))
2682 return PTR_ERR(conf);
2683
2684 rdev_for_each(rdev, mddev) {
2685 if (!mddev->gendisk)
2686 continue;
2687 disk_stack_limits(mddev->gendisk, rdev->bdev,
2688 rdev->data_offset << 9);
2689 }
2690
2691 mddev->degraded = 0;
2692 for (i=0; i < conf->raid_disks; i++)
2693 if (conf->mirrors[i].rdev == NULL ||
2694 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2695 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2696 mddev->degraded++;
2697
2698 if (conf->raid_disks - mddev->degraded == 1)
2699 mddev->recovery_cp = MaxSector;
2700
2701 if (mddev->recovery_cp != MaxSector)
2702 printk(KERN_NOTICE "md/raid1:%s: not clean"
2703 " -- starting background reconstruction\n",
2704 mdname(mddev));
2705 printk(KERN_INFO
2706 "md/raid1:%s: active with %d out of %d mirrors\n",
2707 mdname(mddev), mddev->raid_disks - mddev->degraded,
2708 mddev->raid_disks);
2709
2710 /*
2711 * Ok, everything is just fine now
2712 */
2713 mddev->thread = conf->thread;
2714 conf->thread = NULL;
2715 mddev->private = conf;
2716
2717 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2718
2719 if (mddev->queue) {
2720 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2721 mddev->queue->backing_dev_info.congested_data = mddev;
2722 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
2723 }
2724
2725 ret = md_integrity_register(mddev);
2726 if (ret)
2727 stop(mddev);
2728 return ret;
2729}
2730
2731static int stop(struct mddev *mddev)
2732{
2733 struct r1conf *conf = mddev->private;
2734 struct bitmap *bitmap = mddev->bitmap;
2735
2736 /* wait for behind writes to complete */
2737 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2738 printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
2739 mdname(mddev));
2740 /* need to kick something here to make sure I/O goes? */
2741 wait_event(bitmap->behind_wait,
2742 atomic_read(&bitmap->behind_writes) == 0);
2743 }
2744
2745 raise_barrier(conf);
2746 lower_barrier(conf);
2747
2748 md_unregister_thread(&mddev->thread);
2749 if (conf->r1bio_pool)
2750 mempool_destroy(conf->r1bio_pool);
2751 kfree(conf->mirrors);
2752 kfree(conf->poolinfo);
2753 kfree(conf);
2754 mddev->private = NULL;
2755 return 0;
2756}
2757
2758static int raid1_resize(struct mddev *mddev, sector_t sectors)
2759{
2760 /* no resync is happening, and there is enough space
2761 * on all devices, so we can resize.
2762 * We need to make sure resync covers any new space.
2763 * If the array is shrinking we should possibly wait until
2764 * any io in the removed space completes, but it hardly seems
2765 * worth it.
2766 */
2767 sector_t newsize = raid1_size(mddev, sectors, 0);
2768 if (mddev->external_size &&
2769 mddev->array_sectors > newsize)
2770 return -EINVAL;
2771 if (mddev->bitmap) {
2772 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
2773 if (ret)
2774 return ret;
2775 }
2776 md_set_array_sectors(mddev, newsize);
2777 set_capacity(mddev->gendisk, mddev->array_sectors);
2778 revalidate_disk(mddev->gendisk);
2779 if (sectors > mddev->dev_sectors &&
2780 mddev->recovery_cp > mddev->dev_sectors) {
2781 mddev->recovery_cp = mddev->dev_sectors;
2782 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2783 }
2784 mddev->dev_sectors = sectors;
2785 mddev->resync_max_sectors = sectors;
2786 return 0;
2787}
2788
2789static int raid1_reshape(struct mddev *mddev)
2790{
2791 /* We need to:
2792 * 1/ resize the r1bio_pool
2793 * 2/ resize conf->mirrors
2794 *
2795 * We allocate a new r1bio_pool if we can.
2796 * Then raise a device barrier and wait until all IO stops.
2797 * Then resize conf->mirrors and swap in the new r1bio pool.
2798 *
2799 * At the same time, we "pack" the devices so that all the missing
2800 * devices have the higher raid_disk numbers.
2801 */
2802 mempool_t *newpool, *oldpool;
2803 struct pool_info *newpoolinfo;
2804 struct mirror_info *newmirrors;
2805 struct r1conf *conf = mddev->private;
2806 int cnt, raid_disks;
2807 unsigned long flags;
2808 int d, d2, err;
2809
2810 /* Cannot change chunk_size, layout, or level */
2811 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
2812 mddev->layout != mddev->new_layout ||
2813 mddev->level != mddev->new_level) {
2814 mddev->new_chunk_sectors = mddev->chunk_sectors;
2815 mddev->new_layout = mddev->layout;
2816 mddev->new_level = mddev->level;
2817 return -EINVAL;
2818 }
2819
2820 err = md_allow_write(mddev);
2821 if (err)
2822 return err;
2823
2824 raid_disks = mddev->raid_disks + mddev->delta_disks;
2825
2826 if (raid_disks < conf->raid_disks) {
2827 cnt=0;
2828 for (d= 0; d < conf->raid_disks; d++)
2829 if (conf->mirrors[d].rdev)
2830 cnt++;
2831 if (cnt > raid_disks)
2832 return -EBUSY;
2833 }
2834
2835 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2836 if (!newpoolinfo)
2837 return -ENOMEM;
2838 newpoolinfo->mddev = mddev;
2839 newpoolinfo->raid_disks = raid_disks * 2;
2840
2841 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2842 r1bio_pool_free, newpoolinfo);
2843 if (!newpool) {
2844 kfree(newpoolinfo);
2845 return -ENOMEM;
2846 }
2847 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
2848 GFP_KERNEL);
2849 if (!newmirrors) {
2850 kfree(newpoolinfo);
2851 mempool_destroy(newpool);
2852 return -ENOMEM;
2853 }
2854
2855 raise_barrier(conf);
2856
2857 /* ok, everything is stopped */
2858 oldpool = conf->r1bio_pool;
2859 conf->r1bio_pool = newpool;
2860
2861 for (d = d2 = 0; d < conf->raid_disks; d++) {
2862 struct md_rdev *rdev = conf->mirrors[d].rdev;
2863 if (rdev && rdev->raid_disk != d2) {
2864 sysfs_unlink_rdev(mddev, rdev);
2865 rdev->raid_disk = d2;
2866 sysfs_unlink_rdev(mddev, rdev);
2867 if (sysfs_link_rdev(mddev, rdev))
2868 printk(KERN_WARNING
2869 "md/raid1:%s: cannot register rd%d\n",
2870 mdname(mddev), rdev->raid_disk);
2871 }
2872 if (rdev)
2873 newmirrors[d2++].rdev = rdev;
2874 }
2875 kfree(conf->mirrors);
2876 conf->mirrors = newmirrors;
2877 kfree(conf->poolinfo);
2878 conf->poolinfo = newpoolinfo;
2879
2880 spin_lock_irqsave(&conf->device_lock, flags);
2881 mddev->degraded += (raid_disks - conf->raid_disks);
2882 spin_unlock_irqrestore(&conf->device_lock, flags);
2883 conf->raid_disks = mddev->raid_disks = raid_disks;
2884 mddev->delta_disks = 0;
2885
2886 conf->last_used = 0; /* just make sure it is in-range */
2887 lower_barrier(conf);
2888
2889 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2890 md_wakeup_thread(mddev->thread);
2891
2892 mempool_destroy(oldpool);
2893 return 0;
2894}
2895
2896static void raid1_quiesce(struct mddev *mddev, int state)
2897{
2898 struct r1conf *conf = mddev->private;
2899
2900 switch(state) {
2901 case 2: /* wake for suspend */
2902 wake_up(&conf->wait_barrier);
2903 break;
2904 case 1:
2905 raise_barrier(conf);
2906 break;
2907 case 0:
2908 lower_barrier(conf);
2909 break;
2910 }
2911}
2912
2913static void *raid1_takeover(struct mddev *mddev)
2914{
2915 /* raid1 can take over:
2916 * raid5 with 2 devices, any layout or chunk size
2917 */
2918 if (mddev->level == 5 && mddev->raid_disks == 2) {
2919 struct r1conf *conf;
2920 mddev->new_level = 1;
2921 mddev->new_layout = 0;
2922 mddev->new_chunk_sectors = 0;
2923 conf = setup_conf(mddev);
2924 if (!IS_ERR(conf))
2925 conf->barrier = 1;
2926 return conf;
2927 }
2928 return ERR_PTR(-EINVAL);
2929}
2930
2931static struct md_personality raid1_personality =
2932{
2933 .name = "raid1",
2934 .level = 1,
2935 .owner = THIS_MODULE,
2936 .make_request = make_request,
2937 .run = run,
2938 .stop = stop,
2939 .status = status,
2940 .error_handler = error,
2941 .hot_add_disk = raid1_add_disk,
2942 .hot_remove_disk= raid1_remove_disk,
2943 .spare_active = raid1_spare_active,
2944 .sync_request = sync_request,
2945 .resize = raid1_resize,
2946 .size = raid1_size,
2947 .check_reshape = raid1_reshape,
2948 .quiesce = raid1_quiesce,
2949 .takeover = raid1_takeover,
2950};
2951
2952static int __init raid_init(void)
2953{
2954 return register_md_personality(&raid1_personality);
2955}
2956
2957static void raid_exit(void)
2958{
2959 unregister_md_personality(&raid1_personality);
2960}
2961
2962module_init(raid_init);
2963module_exit(raid_exit);
2964MODULE_LICENSE("GPL");
2965MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
2966MODULE_ALIAS("md-personality-3"); /* RAID1 */
2967MODULE_ALIAS("md-raid1");
2968MODULE_ALIAS("md-level-1");
2969
2970module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);