Loading...
1/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
8 * Base on code in raid1.c. See raid1.c for further copyright information.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/blkdev.h>
24#include <linux/seq_file.h>
25#include <linux/ratelimit.h>
26#include "md.h"
27#include "raid10.h"
28#include "raid0.h"
29#include "bitmap.h"
30
31/*
32 * RAID10 provides a combination of RAID0 and RAID1 functionality.
33 * The layout of data is defined by
34 * chunk_size
35 * raid_disks
36 * near_copies (stored in low byte of layout)
37 * far_copies (stored in second byte of layout)
38 * far_offset (stored in bit 16 of layout )
39 *
40 * The data to be stored is divided into chunks using chunksize.
41 * Each device is divided into far_copies sections.
42 * In each section, chunks are laid out in a style similar to raid0, but
43 * near_copies copies of each chunk is stored (each on a different drive).
44 * The starting device for each section is offset near_copies from the starting
45 * device of the previous section.
46 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
47 * drive.
48 * near_copies and far_copies must be at least one, and their product is at most
49 * raid_disks.
50 *
51 * If far_offset is true, then the far_copies are handled a bit differently.
52 * The copies are still in different stripes, but instead of be very far apart
53 * on disk, there are adjacent stripes.
54 */
55
56/*
57 * Number of guaranteed r10bios in case of extreme VM load:
58 */
59#define NR_RAID10_BIOS 256
60
61static void allow_barrier(conf_t *conf);
62static void lower_barrier(conf_t *conf);
63
64static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
65{
66 conf_t *conf = data;
67 int size = offsetof(struct r10bio_s, devs[conf->copies]);
68
69 /* allocate a r10bio with room for raid_disks entries in the bios array */
70 return kzalloc(size, gfp_flags);
71}
72
73static void r10bio_pool_free(void *r10_bio, void *data)
74{
75 kfree(r10_bio);
76}
77
78/* Maximum size of each resync request */
79#define RESYNC_BLOCK_SIZE (64*1024)
80#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
81/* amount of memory to reserve for resync requests */
82#define RESYNC_WINDOW (1024*1024)
83/* maximum number of concurrent requests, memory permitting */
84#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
85
86/*
87 * When performing a resync, we need to read and compare, so
88 * we need as many pages are there are copies.
89 * When performing a recovery, we need 2 bios, one for read,
90 * one for write (we recover only one drive per r10buf)
91 *
92 */
93static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
94{
95 conf_t *conf = data;
96 struct page *page;
97 r10bio_t *r10_bio;
98 struct bio *bio;
99 int i, j;
100 int nalloc;
101
102 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
103 if (!r10_bio)
104 return NULL;
105
106 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
107 nalloc = conf->copies; /* resync */
108 else
109 nalloc = 2; /* recovery */
110
111 /*
112 * Allocate bios.
113 */
114 for (j = nalloc ; j-- ; ) {
115 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
116 if (!bio)
117 goto out_free_bio;
118 r10_bio->devs[j].bio = bio;
119 }
120 /*
121 * Allocate RESYNC_PAGES data pages and attach them
122 * where needed.
123 */
124 for (j = 0 ; j < nalloc; j++) {
125 bio = r10_bio->devs[j].bio;
126 for (i = 0; i < RESYNC_PAGES; i++) {
127 if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
128 &conf->mddev->recovery)) {
129 /* we can share bv_page's during recovery */
130 struct bio *rbio = r10_bio->devs[0].bio;
131 page = rbio->bi_io_vec[i].bv_page;
132 get_page(page);
133 } else
134 page = alloc_page(gfp_flags);
135 if (unlikely(!page))
136 goto out_free_pages;
137
138 bio->bi_io_vec[i].bv_page = page;
139 }
140 }
141
142 return r10_bio;
143
144out_free_pages:
145 for ( ; i > 0 ; i--)
146 safe_put_page(bio->bi_io_vec[i-1].bv_page);
147 while (j--)
148 for (i = 0; i < RESYNC_PAGES ; i++)
149 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
150 j = -1;
151out_free_bio:
152 while ( ++j < nalloc )
153 bio_put(r10_bio->devs[j].bio);
154 r10bio_pool_free(r10_bio, conf);
155 return NULL;
156}
157
158static void r10buf_pool_free(void *__r10_bio, void *data)
159{
160 int i;
161 conf_t *conf = data;
162 r10bio_t *r10bio = __r10_bio;
163 int j;
164
165 for (j=0; j < conf->copies; j++) {
166 struct bio *bio = r10bio->devs[j].bio;
167 if (bio) {
168 for (i = 0; i < RESYNC_PAGES; i++) {
169 safe_put_page(bio->bi_io_vec[i].bv_page);
170 bio->bi_io_vec[i].bv_page = NULL;
171 }
172 bio_put(bio);
173 }
174 }
175 r10bio_pool_free(r10bio, conf);
176}
177
178static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
179{
180 int i;
181
182 for (i = 0; i < conf->copies; i++) {
183 struct bio **bio = & r10_bio->devs[i].bio;
184 if (!BIO_SPECIAL(*bio))
185 bio_put(*bio);
186 *bio = NULL;
187 }
188}
189
190static void free_r10bio(r10bio_t *r10_bio)
191{
192 conf_t *conf = r10_bio->mddev->private;
193
194 put_all_bios(conf, r10_bio);
195 mempool_free(r10_bio, conf->r10bio_pool);
196}
197
198static void put_buf(r10bio_t *r10_bio)
199{
200 conf_t *conf = r10_bio->mddev->private;
201
202 mempool_free(r10_bio, conf->r10buf_pool);
203
204 lower_barrier(conf);
205}
206
207static void reschedule_retry(r10bio_t *r10_bio)
208{
209 unsigned long flags;
210 mddev_t *mddev = r10_bio->mddev;
211 conf_t *conf = mddev->private;
212
213 spin_lock_irqsave(&conf->device_lock, flags);
214 list_add(&r10_bio->retry_list, &conf->retry_list);
215 conf->nr_queued ++;
216 spin_unlock_irqrestore(&conf->device_lock, flags);
217
218 /* wake up frozen array... */
219 wake_up(&conf->wait_barrier);
220
221 md_wakeup_thread(mddev->thread);
222}
223
224/*
225 * raid_end_bio_io() is called when we have finished servicing a mirrored
226 * operation and are ready to return a success/failure code to the buffer
227 * cache layer.
228 */
229static void raid_end_bio_io(r10bio_t *r10_bio)
230{
231 struct bio *bio = r10_bio->master_bio;
232 int done;
233 conf_t *conf = r10_bio->mddev->private;
234
235 if (bio->bi_phys_segments) {
236 unsigned long flags;
237 spin_lock_irqsave(&conf->device_lock, flags);
238 bio->bi_phys_segments--;
239 done = (bio->bi_phys_segments == 0);
240 spin_unlock_irqrestore(&conf->device_lock, flags);
241 } else
242 done = 1;
243 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
244 clear_bit(BIO_UPTODATE, &bio->bi_flags);
245 if (done) {
246 bio_endio(bio, 0);
247 /*
248 * Wake up any possible resync thread that waits for the device
249 * to go idle.
250 */
251 allow_barrier(conf);
252 }
253 free_r10bio(r10_bio);
254}
255
256/*
257 * Update disk head position estimator based on IRQ completion info.
258 */
259static inline void update_head_pos(int slot, r10bio_t *r10_bio)
260{
261 conf_t *conf = r10_bio->mddev->private;
262
263 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
264 r10_bio->devs[slot].addr + (r10_bio->sectors);
265}
266
267/*
268 * Find the disk number which triggered given bio
269 */
270static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio,
271 struct bio *bio, int *slotp)
272{
273 int slot;
274
275 for (slot = 0; slot < conf->copies; slot++)
276 if (r10_bio->devs[slot].bio == bio)
277 break;
278
279 BUG_ON(slot == conf->copies);
280 update_head_pos(slot, r10_bio);
281
282 if (slotp)
283 *slotp = slot;
284 return r10_bio->devs[slot].devnum;
285}
286
287static void raid10_end_read_request(struct bio *bio, int error)
288{
289 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
290 r10bio_t *r10_bio = bio->bi_private;
291 int slot, dev;
292 conf_t *conf = r10_bio->mddev->private;
293
294
295 slot = r10_bio->read_slot;
296 dev = r10_bio->devs[slot].devnum;
297 /*
298 * this branch is our 'one mirror IO has finished' event handler:
299 */
300 update_head_pos(slot, r10_bio);
301
302 if (uptodate) {
303 /*
304 * Set R10BIO_Uptodate in our master bio, so that
305 * we will return a good error code to the higher
306 * levels even if IO on some other mirrored buffer fails.
307 *
308 * The 'master' represents the composite IO operation to
309 * user-side. So if something waits for IO, then it will
310 * wait for the 'master' bio.
311 */
312 set_bit(R10BIO_Uptodate, &r10_bio->state);
313 raid_end_bio_io(r10_bio);
314 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
315 } else {
316 /*
317 * oops, read error - keep the refcount on the rdev
318 */
319 char b[BDEVNAME_SIZE];
320 printk_ratelimited(KERN_ERR
321 "md/raid10:%s: %s: rescheduling sector %llu\n",
322 mdname(conf->mddev),
323 bdevname(conf->mirrors[dev].rdev->bdev, b),
324 (unsigned long long)r10_bio->sector);
325 set_bit(R10BIO_ReadError, &r10_bio->state);
326 reschedule_retry(r10_bio);
327 }
328}
329
330static void close_write(r10bio_t *r10_bio)
331{
332 /* clear the bitmap if all writes complete successfully */
333 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
334 r10_bio->sectors,
335 !test_bit(R10BIO_Degraded, &r10_bio->state),
336 0);
337 md_write_end(r10_bio->mddev);
338}
339
340static void one_write_done(r10bio_t *r10_bio)
341{
342 if (atomic_dec_and_test(&r10_bio->remaining)) {
343 if (test_bit(R10BIO_WriteError, &r10_bio->state))
344 reschedule_retry(r10_bio);
345 else {
346 close_write(r10_bio);
347 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
348 reschedule_retry(r10_bio);
349 else
350 raid_end_bio_io(r10_bio);
351 }
352 }
353}
354
355static void raid10_end_write_request(struct bio *bio, int error)
356{
357 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
358 r10bio_t *r10_bio = bio->bi_private;
359 int dev;
360 int dec_rdev = 1;
361 conf_t *conf = r10_bio->mddev->private;
362 int slot;
363
364 dev = find_bio_disk(conf, r10_bio, bio, &slot);
365
366 /*
367 * this branch is our 'one mirror IO has finished' event handler:
368 */
369 if (!uptodate) {
370 set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags);
371 set_bit(R10BIO_WriteError, &r10_bio->state);
372 dec_rdev = 0;
373 } else {
374 /*
375 * Set R10BIO_Uptodate in our master bio, so that
376 * we will return a good error code for to the higher
377 * levels even if IO on some other mirrored buffer fails.
378 *
379 * The 'master' represents the composite IO operation to
380 * user-side. So if something waits for IO, then it will
381 * wait for the 'master' bio.
382 */
383 sector_t first_bad;
384 int bad_sectors;
385
386 set_bit(R10BIO_Uptodate, &r10_bio->state);
387
388 /* Maybe we can clear some bad blocks. */
389 if (is_badblock(conf->mirrors[dev].rdev,
390 r10_bio->devs[slot].addr,
391 r10_bio->sectors,
392 &first_bad, &bad_sectors)) {
393 bio_put(bio);
394 r10_bio->devs[slot].bio = IO_MADE_GOOD;
395 dec_rdev = 0;
396 set_bit(R10BIO_MadeGood, &r10_bio->state);
397 }
398 }
399
400 /*
401 *
402 * Let's see if all mirrored write operations have finished
403 * already.
404 */
405 one_write_done(r10_bio);
406 if (dec_rdev)
407 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
408}
409
410
411/*
412 * RAID10 layout manager
413 * As well as the chunksize and raid_disks count, there are two
414 * parameters: near_copies and far_copies.
415 * near_copies * far_copies must be <= raid_disks.
416 * Normally one of these will be 1.
417 * If both are 1, we get raid0.
418 * If near_copies == raid_disks, we get raid1.
419 *
420 * Chunks are laid out in raid0 style with near_copies copies of the
421 * first chunk, followed by near_copies copies of the next chunk and
422 * so on.
423 * If far_copies > 1, then after 1/far_copies of the array has been assigned
424 * as described above, we start again with a device offset of near_copies.
425 * So we effectively have another copy of the whole array further down all
426 * the drives, but with blocks on different drives.
427 * With this layout, and block is never stored twice on the one device.
428 *
429 * raid10_find_phys finds the sector offset of a given virtual sector
430 * on each device that it is on.
431 *
432 * raid10_find_virt does the reverse mapping, from a device and a
433 * sector offset to a virtual address
434 */
435
436static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
437{
438 int n,f;
439 sector_t sector;
440 sector_t chunk;
441 sector_t stripe;
442 int dev;
443
444 int slot = 0;
445
446 /* now calculate first sector/dev */
447 chunk = r10bio->sector >> conf->chunk_shift;
448 sector = r10bio->sector & conf->chunk_mask;
449
450 chunk *= conf->near_copies;
451 stripe = chunk;
452 dev = sector_div(stripe, conf->raid_disks);
453 if (conf->far_offset)
454 stripe *= conf->far_copies;
455
456 sector += stripe << conf->chunk_shift;
457
458 /* and calculate all the others */
459 for (n=0; n < conf->near_copies; n++) {
460 int d = dev;
461 sector_t s = sector;
462 r10bio->devs[slot].addr = sector;
463 r10bio->devs[slot].devnum = d;
464 slot++;
465
466 for (f = 1; f < conf->far_copies; f++) {
467 d += conf->near_copies;
468 if (d >= conf->raid_disks)
469 d -= conf->raid_disks;
470 s += conf->stride;
471 r10bio->devs[slot].devnum = d;
472 r10bio->devs[slot].addr = s;
473 slot++;
474 }
475 dev++;
476 if (dev >= conf->raid_disks) {
477 dev = 0;
478 sector += (conf->chunk_mask + 1);
479 }
480 }
481 BUG_ON(slot != conf->copies);
482}
483
484static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
485{
486 sector_t offset, chunk, vchunk;
487
488 offset = sector & conf->chunk_mask;
489 if (conf->far_offset) {
490 int fc;
491 chunk = sector >> conf->chunk_shift;
492 fc = sector_div(chunk, conf->far_copies);
493 dev -= fc * conf->near_copies;
494 if (dev < 0)
495 dev += conf->raid_disks;
496 } else {
497 while (sector >= conf->stride) {
498 sector -= conf->stride;
499 if (dev < conf->near_copies)
500 dev += conf->raid_disks - conf->near_copies;
501 else
502 dev -= conf->near_copies;
503 }
504 chunk = sector >> conf->chunk_shift;
505 }
506 vchunk = chunk * conf->raid_disks + dev;
507 sector_div(vchunk, conf->near_copies);
508 return (vchunk << conf->chunk_shift) + offset;
509}
510
511/**
512 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
513 * @q: request queue
514 * @bvm: properties of new bio
515 * @biovec: the request that could be merged to it.
516 *
517 * Return amount of bytes we can accept at this offset
518 * If near_copies == raid_disk, there are no striping issues,
519 * but in that case, the function isn't called at all.
520 */
521static int raid10_mergeable_bvec(struct request_queue *q,
522 struct bvec_merge_data *bvm,
523 struct bio_vec *biovec)
524{
525 mddev_t *mddev = q->queuedata;
526 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
527 int max;
528 unsigned int chunk_sectors = mddev->chunk_sectors;
529 unsigned int bio_sectors = bvm->bi_size >> 9;
530
531 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
532 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
533 if (max <= biovec->bv_len && bio_sectors == 0)
534 return biovec->bv_len;
535 else
536 return max;
537}
538
539/*
540 * This routine returns the disk from which the requested read should
541 * be done. There is a per-array 'next expected sequential IO' sector
542 * number - if this matches on the next IO then we use the last disk.
543 * There is also a per-disk 'last know head position' sector that is
544 * maintained from IRQ contexts, both the normal and the resync IO
545 * completion handlers update this position correctly. If there is no
546 * perfect sequential match then we pick the disk whose head is closest.
547 *
548 * If there are 2 mirrors in the same 2 devices, performance degrades
549 * because position is mirror, not device based.
550 *
551 * The rdev for the device selected will have nr_pending incremented.
552 */
553
554/*
555 * FIXME: possibly should rethink readbalancing and do it differently
556 * depending on near_copies / far_copies geometry.
557 */
558static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
559{
560 const sector_t this_sector = r10_bio->sector;
561 int disk, slot;
562 int sectors = r10_bio->sectors;
563 int best_good_sectors;
564 sector_t new_distance, best_dist;
565 mdk_rdev_t *rdev;
566 int do_balance;
567 int best_slot;
568
569 raid10_find_phys(conf, r10_bio);
570 rcu_read_lock();
571retry:
572 sectors = r10_bio->sectors;
573 best_slot = -1;
574 best_dist = MaxSector;
575 best_good_sectors = 0;
576 do_balance = 1;
577 /*
578 * Check if we can balance. We can balance on the whole
579 * device if no resync is going on (recovery is ok), or below
580 * the resync window. We take the first readable disk when
581 * above the resync window.
582 */
583 if (conf->mddev->recovery_cp < MaxSector
584 && (this_sector + sectors >= conf->next_resync))
585 do_balance = 0;
586
587 for (slot = 0; slot < conf->copies ; slot++) {
588 sector_t first_bad;
589 int bad_sectors;
590 sector_t dev_sector;
591
592 if (r10_bio->devs[slot].bio == IO_BLOCKED)
593 continue;
594 disk = r10_bio->devs[slot].devnum;
595 rdev = rcu_dereference(conf->mirrors[disk].rdev);
596 if (rdev == NULL)
597 continue;
598 if (!test_bit(In_sync, &rdev->flags))
599 continue;
600
601 dev_sector = r10_bio->devs[slot].addr;
602 if (is_badblock(rdev, dev_sector, sectors,
603 &first_bad, &bad_sectors)) {
604 if (best_dist < MaxSector)
605 /* Already have a better slot */
606 continue;
607 if (first_bad <= dev_sector) {
608 /* Cannot read here. If this is the
609 * 'primary' device, then we must not read
610 * beyond 'bad_sectors' from another device.
611 */
612 bad_sectors -= (dev_sector - first_bad);
613 if (!do_balance && sectors > bad_sectors)
614 sectors = bad_sectors;
615 if (best_good_sectors > sectors)
616 best_good_sectors = sectors;
617 } else {
618 sector_t good_sectors =
619 first_bad - dev_sector;
620 if (good_sectors > best_good_sectors) {
621 best_good_sectors = good_sectors;
622 best_slot = slot;
623 }
624 if (!do_balance)
625 /* Must read from here */
626 break;
627 }
628 continue;
629 } else
630 best_good_sectors = sectors;
631
632 if (!do_balance)
633 break;
634
635 /* This optimisation is debatable, and completely destroys
636 * sequential read speed for 'far copies' arrays. So only
637 * keep it for 'near' arrays, and review those later.
638 */
639 if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending))
640 break;
641
642 /* for far > 1 always use the lowest address */
643 if (conf->far_copies > 1)
644 new_distance = r10_bio->devs[slot].addr;
645 else
646 new_distance = abs(r10_bio->devs[slot].addr -
647 conf->mirrors[disk].head_position);
648 if (new_distance < best_dist) {
649 best_dist = new_distance;
650 best_slot = slot;
651 }
652 }
653 if (slot == conf->copies)
654 slot = best_slot;
655
656 if (slot >= 0) {
657 disk = r10_bio->devs[slot].devnum;
658 rdev = rcu_dereference(conf->mirrors[disk].rdev);
659 if (!rdev)
660 goto retry;
661 atomic_inc(&rdev->nr_pending);
662 if (test_bit(Faulty, &rdev->flags)) {
663 /* Cannot risk returning a device that failed
664 * before we inc'ed nr_pending
665 */
666 rdev_dec_pending(rdev, conf->mddev);
667 goto retry;
668 }
669 r10_bio->read_slot = slot;
670 } else
671 disk = -1;
672 rcu_read_unlock();
673 *max_sectors = best_good_sectors;
674
675 return disk;
676}
677
678static int raid10_congested(void *data, int bits)
679{
680 mddev_t *mddev = data;
681 conf_t *conf = mddev->private;
682 int i, ret = 0;
683
684 if (mddev_congested(mddev, bits))
685 return 1;
686 rcu_read_lock();
687 for (i = 0; i < conf->raid_disks && ret == 0; i++) {
688 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
689 if (rdev && !test_bit(Faulty, &rdev->flags)) {
690 struct request_queue *q = bdev_get_queue(rdev->bdev);
691
692 ret |= bdi_congested(&q->backing_dev_info, bits);
693 }
694 }
695 rcu_read_unlock();
696 return ret;
697}
698
699static void flush_pending_writes(conf_t *conf)
700{
701 /* Any writes that have been queued but are awaiting
702 * bitmap updates get flushed here.
703 */
704 spin_lock_irq(&conf->device_lock);
705
706 if (conf->pending_bio_list.head) {
707 struct bio *bio;
708 bio = bio_list_get(&conf->pending_bio_list);
709 spin_unlock_irq(&conf->device_lock);
710 /* flush any pending bitmap writes to disk
711 * before proceeding w/ I/O */
712 bitmap_unplug(conf->mddev->bitmap);
713
714 while (bio) { /* submit pending writes */
715 struct bio *next = bio->bi_next;
716 bio->bi_next = NULL;
717 generic_make_request(bio);
718 bio = next;
719 }
720 } else
721 spin_unlock_irq(&conf->device_lock);
722}
723
724/* Barriers....
725 * Sometimes we need to suspend IO while we do something else,
726 * either some resync/recovery, or reconfigure the array.
727 * To do this we raise a 'barrier'.
728 * The 'barrier' is a counter that can be raised multiple times
729 * to count how many activities are happening which preclude
730 * normal IO.
731 * We can only raise the barrier if there is no pending IO.
732 * i.e. if nr_pending == 0.
733 * We choose only to raise the barrier if no-one is waiting for the
734 * barrier to go down. This means that as soon as an IO request
735 * is ready, no other operations which require a barrier will start
736 * until the IO request has had a chance.
737 *
738 * So: regular IO calls 'wait_barrier'. When that returns there
739 * is no backgroup IO happening, It must arrange to call
740 * allow_barrier when it has finished its IO.
741 * backgroup IO calls must call raise_barrier. Once that returns
742 * there is no normal IO happeing. It must arrange to call
743 * lower_barrier when the particular background IO completes.
744 */
745
746static void raise_barrier(conf_t *conf, int force)
747{
748 BUG_ON(force && !conf->barrier);
749 spin_lock_irq(&conf->resync_lock);
750
751 /* Wait until no block IO is waiting (unless 'force') */
752 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
753 conf->resync_lock, );
754
755 /* block any new IO from starting */
756 conf->barrier++;
757
758 /* Now wait for all pending IO to complete */
759 wait_event_lock_irq(conf->wait_barrier,
760 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
761 conf->resync_lock, );
762
763 spin_unlock_irq(&conf->resync_lock);
764}
765
766static void lower_barrier(conf_t *conf)
767{
768 unsigned long flags;
769 spin_lock_irqsave(&conf->resync_lock, flags);
770 conf->barrier--;
771 spin_unlock_irqrestore(&conf->resync_lock, flags);
772 wake_up(&conf->wait_barrier);
773}
774
775static void wait_barrier(conf_t *conf)
776{
777 spin_lock_irq(&conf->resync_lock);
778 if (conf->barrier) {
779 conf->nr_waiting++;
780 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
781 conf->resync_lock,
782 );
783 conf->nr_waiting--;
784 }
785 conf->nr_pending++;
786 spin_unlock_irq(&conf->resync_lock);
787}
788
789static void allow_barrier(conf_t *conf)
790{
791 unsigned long flags;
792 spin_lock_irqsave(&conf->resync_lock, flags);
793 conf->nr_pending--;
794 spin_unlock_irqrestore(&conf->resync_lock, flags);
795 wake_up(&conf->wait_barrier);
796}
797
798static void freeze_array(conf_t *conf)
799{
800 /* stop syncio and normal IO and wait for everything to
801 * go quiet.
802 * We increment barrier and nr_waiting, and then
803 * wait until nr_pending match nr_queued+1
804 * This is called in the context of one normal IO request
805 * that has failed. Thus any sync request that might be pending
806 * will be blocked by nr_pending, and we need to wait for
807 * pending IO requests to complete or be queued for re-try.
808 * Thus the number queued (nr_queued) plus this request (1)
809 * must match the number of pending IOs (nr_pending) before
810 * we continue.
811 */
812 spin_lock_irq(&conf->resync_lock);
813 conf->barrier++;
814 conf->nr_waiting++;
815 wait_event_lock_irq(conf->wait_barrier,
816 conf->nr_pending == conf->nr_queued+1,
817 conf->resync_lock,
818 flush_pending_writes(conf));
819
820 spin_unlock_irq(&conf->resync_lock);
821}
822
823static void unfreeze_array(conf_t *conf)
824{
825 /* reverse the effect of the freeze */
826 spin_lock_irq(&conf->resync_lock);
827 conf->barrier--;
828 conf->nr_waiting--;
829 wake_up(&conf->wait_barrier);
830 spin_unlock_irq(&conf->resync_lock);
831}
832
833static int make_request(mddev_t *mddev, struct bio * bio)
834{
835 conf_t *conf = mddev->private;
836 mirror_info_t *mirror;
837 r10bio_t *r10_bio;
838 struct bio *read_bio;
839 int i;
840 int chunk_sects = conf->chunk_mask + 1;
841 const int rw = bio_data_dir(bio);
842 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
843 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
844 unsigned long flags;
845 mdk_rdev_t *blocked_rdev;
846 int plugged;
847 int sectors_handled;
848 int max_sectors;
849
850 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
851 md_flush_request(mddev, bio);
852 return 0;
853 }
854
855 /* If this request crosses a chunk boundary, we need to
856 * split it. This will only happen for 1 PAGE (or less) requests.
857 */
858 if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
859 > chunk_sects &&
860 conf->near_copies < conf->raid_disks)) {
861 struct bio_pair *bp;
862 /* Sanity check -- queue functions should prevent this happening */
863 if (bio->bi_vcnt != 1 ||
864 bio->bi_idx != 0)
865 goto bad_map;
866 /* This is a one page bio that upper layers
867 * refuse to split for us, so we need to split it.
868 */
869 bp = bio_split(bio,
870 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
871
872 /* Each of these 'make_request' calls will call 'wait_barrier'.
873 * If the first succeeds but the second blocks due to the resync
874 * thread raising the barrier, we will deadlock because the
875 * IO to the underlying device will be queued in generic_make_request
876 * and will never complete, so will never reduce nr_pending.
877 * So increment nr_waiting here so no new raise_barriers will
878 * succeed, and so the second wait_barrier cannot block.
879 */
880 spin_lock_irq(&conf->resync_lock);
881 conf->nr_waiting++;
882 spin_unlock_irq(&conf->resync_lock);
883
884 if (make_request(mddev, &bp->bio1))
885 generic_make_request(&bp->bio1);
886 if (make_request(mddev, &bp->bio2))
887 generic_make_request(&bp->bio2);
888
889 spin_lock_irq(&conf->resync_lock);
890 conf->nr_waiting--;
891 wake_up(&conf->wait_barrier);
892 spin_unlock_irq(&conf->resync_lock);
893
894 bio_pair_release(bp);
895 return 0;
896 bad_map:
897 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
898 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
899 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
900
901 bio_io_error(bio);
902 return 0;
903 }
904
905 md_write_start(mddev, bio);
906
907 /*
908 * Register the new request and wait if the reconstruction
909 * thread has put up a bar for new requests.
910 * Continue immediately if no resync is active currently.
911 */
912 wait_barrier(conf);
913
914 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
915
916 r10_bio->master_bio = bio;
917 r10_bio->sectors = bio->bi_size >> 9;
918
919 r10_bio->mddev = mddev;
920 r10_bio->sector = bio->bi_sector;
921 r10_bio->state = 0;
922
923 /* We might need to issue multiple reads to different
924 * devices if there are bad blocks around, so we keep
925 * track of the number of reads in bio->bi_phys_segments.
926 * If this is 0, there is only one r10_bio and no locking
927 * will be needed when the request completes. If it is
928 * non-zero, then it is the number of not-completed requests.
929 */
930 bio->bi_phys_segments = 0;
931 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
932
933 if (rw == READ) {
934 /*
935 * read balancing logic:
936 */
937 int disk;
938 int slot;
939
940read_again:
941 disk = read_balance(conf, r10_bio, &max_sectors);
942 slot = r10_bio->read_slot;
943 if (disk < 0) {
944 raid_end_bio_io(r10_bio);
945 return 0;
946 }
947 mirror = conf->mirrors + disk;
948
949 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
950 md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
951 max_sectors);
952
953 r10_bio->devs[slot].bio = read_bio;
954
955 read_bio->bi_sector = r10_bio->devs[slot].addr +
956 mirror->rdev->data_offset;
957 read_bio->bi_bdev = mirror->rdev->bdev;
958 read_bio->bi_end_io = raid10_end_read_request;
959 read_bio->bi_rw = READ | do_sync;
960 read_bio->bi_private = r10_bio;
961
962 if (max_sectors < r10_bio->sectors) {
963 /* Could not read all from this device, so we will
964 * need another r10_bio.
965 */
966 sectors_handled = (r10_bio->sectors + max_sectors
967 - bio->bi_sector);
968 r10_bio->sectors = max_sectors;
969 spin_lock_irq(&conf->device_lock);
970 if (bio->bi_phys_segments == 0)
971 bio->bi_phys_segments = 2;
972 else
973 bio->bi_phys_segments++;
974 spin_unlock(&conf->device_lock);
975 /* Cannot call generic_make_request directly
976 * as that will be queued in __generic_make_request
977 * and subsequent mempool_alloc might block
978 * waiting for it. so hand bio over to raid10d.
979 */
980 reschedule_retry(r10_bio);
981
982 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
983
984 r10_bio->master_bio = bio;
985 r10_bio->sectors = ((bio->bi_size >> 9)
986 - sectors_handled);
987 r10_bio->state = 0;
988 r10_bio->mddev = mddev;
989 r10_bio->sector = bio->bi_sector + sectors_handled;
990 goto read_again;
991 } else
992 generic_make_request(read_bio);
993 return 0;
994 }
995
996 /*
997 * WRITE:
998 */
999 /* first select target devices under rcu_lock and
1000 * inc refcount on their rdev. Record them by setting
1001 * bios[x] to bio
1002 * If there are known/acknowledged bad blocks on any device
1003 * on which we have seen a write error, we want to avoid
1004 * writing to those blocks. This potentially requires several
1005 * writes to write around the bad blocks. Each set of writes
1006 * gets its own r10_bio with a set of bios attached. The number
1007 * of r10_bios is recored in bio->bi_phys_segments just as with
1008 * the read case.
1009 */
1010 plugged = mddev_check_plugged(mddev);
1011
1012 raid10_find_phys(conf, r10_bio);
1013retry_write:
1014 blocked_rdev = NULL;
1015 rcu_read_lock();
1016 max_sectors = r10_bio->sectors;
1017
1018 for (i = 0; i < conf->copies; i++) {
1019 int d = r10_bio->devs[i].devnum;
1020 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
1021 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1022 atomic_inc(&rdev->nr_pending);
1023 blocked_rdev = rdev;
1024 break;
1025 }
1026 r10_bio->devs[i].bio = NULL;
1027 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1028 set_bit(R10BIO_Degraded, &r10_bio->state);
1029 continue;
1030 }
1031 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1032 sector_t first_bad;
1033 sector_t dev_sector = r10_bio->devs[i].addr;
1034 int bad_sectors;
1035 int is_bad;
1036
1037 is_bad = is_badblock(rdev, dev_sector,
1038 max_sectors,
1039 &first_bad, &bad_sectors);
1040 if (is_bad < 0) {
1041 /* Mustn't write here until the bad block
1042 * is acknowledged
1043 */
1044 atomic_inc(&rdev->nr_pending);
1045 set_bit(BlockedBadBlocks, &rdev->flags);
1046 blocked_rdev = rdev;
1047 break;
1048 }
1049 if (is_bad && first_bad <= dev_sector) {
1050 /* Cannot write here at all */
1051 bad_sectors -= (dev_sector - first_bad);
1052 if (bad_sectors < max_sectors)
1053 /* Mustn't write more than bad_sectors
1054 * to other devices yet
1055 */
1056 max_sectors = bad_sectors;
1057 /* We don't set R10BIO_Degraded as that
1058 * only applies if the disk is missing,
1059 * so it might be re-added, and we want to
1060 * know to recover this chunk.
1061 * In this case the device is here, and the
1062 * fact that this chunk is not in-sync is
1063 * recorded in the bad block log.
1064 */
1065 continue;
1066 }
1067 if (is_bad) {
1068 int good_sectors = first_bad - dev_sector;
1069 if (good_sectors < max_sectors)
1070 max_sectors = good_sectors;
1071 }
1072 }
1073 r10_bio->devs[i].bio = bio;
1074 atomic_inc(&rdev->nr_pending);
1075 }
1076 rcu_read_unlock();
1077
1078 if (unlikely(blocked_rdev)) {
1079 /* Have to wait for this device to get unblocked, then retry */
1080 int j;
1081 int d;
1082
1083 for (j = 0; j < i; j++)
1084 if (r10_bio->devs[j].bio) {
1085 d = r10_bio->devs[j].devnum;
1086 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1087 }
1088 allow_barrier(conf);
1089 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1090 wait_barrier(conf);
1091 goto retry_write;
1092 }
1093
1094 if (max_sectors < r10_bio->sectors) {
1095 /* We are splitting this into multiple parts, so
1096 * we need to prepare for allocating another r10_bio.
1097 */
1098 r10_bio->sectors = max_sectors;
1099 spin_lock_irq(&conf->device_lock);
1100 if (bio->bi_phys_segments == 0)
1101 bio->bi_phys_segments = 2;
1102 else
1103 bio->bi_phys_segments++;
1104 spin_unlock_irq(&conf->device_lock);
1105 }
1106 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
1107
1108 atomic_set(&r10_bio->remaining, 1);
1109 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1110
1111 for (i = 0; i < conf->copies; i++) {
1112 struct bio *mbio;
1113 int d = r10_bio->devs[i].devnum;
1114 if (!r10_bio->devs[i].bio)
1115 continue;
1116
1117 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1118 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1119 max_sectors);
1120 r10_bio->devs[i].bio = mbio;
1121
1122 mbio->bi_sector = (r10_bio->devs[i].addr+
1123 conf->mirrors[d].rdev->data_offset);
1124 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1125 mbio->bi_end_io = raid10_end_write_request;
1126 mbio->bi_rw = WRITE | do_sync | do_fua;
1127 mbio->bi_private = r10_bio;
1128
1129 atomic_inc(&r10_bio->remaining);
1130 spin_lock_irqsave(&conf->device_lock, flags);
1131 bio_list_add(&conf->pending_bio_list, mbio);
1132 spin_unlock_irqrestore(&conf->device_lock, flags);
1133 }
1134
1135 /* Don't remove the bias on 'remaining' (one_write_done) until
1136 * after checking if we need to go around again.
1137 */
1138
1139 if (sectors_handled < (bio->bi_size >> 9)) {
1140 one_write_done(r10_bio);
1141 /* We need another r10_bio. It has already been counted
1142 * in bio->bi_phys_segments.
1143 */
1144 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1145
1146 r10_bio->master_bio = bio;
1147 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1148
1149 r10_bio->mddev = mddev;
1150 r10_bio->sector = bio->bi_sector + sectors_handled;
1151 r10_bio->state = 0;
1152 goto retry_write;
1153 }
1154 one_write_done(r10_bio);
1155
1156 /* In case raid10d snuck in to freeze_array */
1157 wake_up(&conf->wait_barrier);
1158
1159 if (do_sync || !mddev->bitmap || !plugged)
1160 md_wakeup_thread(mddev->thread);
1161 return 0;
1162}
1163
1164static void status(struct seq_file *seq, mddev_t *mddev)
1165{
1166 conf_t *conf = mddev->private;
1167 int i;
1168
1169 if (conf->near_copies < conf->raid_disks)
1170 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1171 if (conf->near_copies > 1)
1172 seq_printf(seq, " %d near-copies", conf->near_copies);
1173 if (conf->far_copies > 1) {
1174 if (conf->far_offset)
1175 seq_printf(seq, " %d offset-copies", conf->far_copies);
1176 else
1177 seq_printf(seq, " %d far-copies", conf->far_copies);
1178 }
1179 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1180 conf->raid_disks - mddev->degraded);
1181 for (i = 0; i < conf->raid_disks; i++)
1182 seq_printf(seq, "%s",
1183 conf->mirrors[i].rdev &&
1184 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
1185 seq_printf(seq, "]");
1186}
1187
1188/* check if there are enough drives for
1189 * every block to appear on atleast one.
1190 * Don't consider the device numbered 'ignore'
1191 * as we might be about to remove it.
1192 */
1193static int enough(conf_t *conf, int ignore)
1194{
1195 int first = 0;
1196
1197 do {
1198 int n = conf->copies;
1199 int cnt = 0;
1200 while (n--) {
1201 if (conf->mirrors[first].rdev &&
1202 first != ignore)
1203 cnt++;
1204 first = (first+1) % conf->raid_disks;
1205 }
1206 if (cnt == 0)
1207 return 0;
1208 } while (first != 0);
1209 return 1;
1210}
1211
1212static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1213{
1214 char b[BDEVNAME_SIZE];
1215 conf_t *conf = mddev->private;
1216
1217 /*
1218 * If it is not operational, then we have already marked it as dead
1219 * else if it is the last working disks, ignore the error, let the
1220 * next level up know.
1221 * else mark the drive as failed
1222 */
1223 if (test_bit(In_sync, &rdev->flags)
1224 && !enough(conf, rdev->raid_disk))
1225 /*
1226 * Don't fail the drive, just return an IO error.
1227 */
1228 return;
1229 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1230 unsigned long flags;
1231 spin_lock_irqsave(&conf->device_lock, flags);
1232 mddev->degraded++;
1233 spin_unlock_irqrestore(&conf->device_lock, flags);
1234 /*
1235 * if recovery is running, make sure it aborts.
1236 */
1237 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1238 }
1239 set_bit(Blocked, &rdev->flags);
1240 set_bit(Faulty, &rdev->flags);
1241 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1242 printk(KERN_ALERT
1243 "md/raid10:%s: Disk failure on %s, disabling device.\n"
1244 "md/raid10:%s: Operation continuing on %d devices.\n",
1245 mdname(mddev), bdevname(rdev->bdev, b),
1246 mdname(mddev), conf->raid_disks - mddev->degraded);
1247}
1248
1249static void print_conf(conf_t *conf)
1250{
1251 int i;
1252 mirror_info_t *tmp;
1253
1254 printk(KERN_DEBUG "RAID10 conf printout:\n");
1255 if (!conf) {
1256 printk(KERN_DEBUG "(!conf)\n");
1257 return;
1258 }
1259 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1260 conf->raid_disks);
1261
1262 for (i = 0; i < conf->raid_disks; i++) {
1263 char b[BDEVNAME_SIZE];
1264 tmp = conf->mirrors + i;
1265 if (tmp->rdev)
1266 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1267 i, !test_bit(In_sync, &tmp->rdev->flags),
1268 !test_bit(Faulty, &tmp->rdev->flags),
1269 bdevname(tmp->rdev->bdev,b));
1270 }
1271}
1272
1273static void close_sync(conf_t *conf)
1274{
1275 wait_barrier(conf);
1276 allow_barrier(conf);
1277
1278 mempool_destroy(conf->r10buf_pool);
1279 conf->r10buf_pool = NULL;
1280}
1281
1282static int raid10_spare_active(mddev_t *mddev)
1283{
1284 int i;
1285 conf_t *conf = mddev->private;
1286 mirror_info_t *tmp;
1287 int count = 0;
1288 unsigned long flags;
1289
1290 /*
1291 * Find all non-in_sync disks within the RAID10 configuration
1292 * and mark them in_sync
1293 */
1294 for (i = 0; i < conf->raid_disks; i++) {
1295 tmp = conf->mirrors + i;
1296 if (tmp->rdev
1297 && !test_bit(Faulty, &tmp->rdev->flags)
1298 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1299 count++;
1300 sysfs_notify_dirent(tmp->rdev->sysfs_state);
1301 }
1302 }
1303 spin_lock_irqsave(&conf->device_lock, flags);
1304 mddev->degraded -= count;
1305 spin_unlock_irqrestore(&conf->device_lock, flags);
1306
1307 print_conf(conf);
1308 return count;
1309}
1310
1311
1312static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1313{
1314 conf_t *conf = mddev->private;
1315 int err = -EEXIST;
1316 int mirror;
1317 int first = 0;
1318 int last = conf->raid_disks - 1;
1319
1320 if (mddev->recovery_cp < MaxSector)
1321 /* only hot-add to in-sync arrays, as recovery is
1322 * very different from resync
1323 */
1324 return -EBUSY;
1325 if (!enough(conf, -1))
1326 return -EINVAL;
1327
1328 if (rdev->raid_disk >= 0)
1329 first = last = rdev->raid_disk;
1330
1331 if (rdev->saved_raid_disk >= first &&
1332 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1333 mirror = rdev->saved_raid_disk;
1334 else
1335 mirror = first;
1336 for ( ; mirror <= last ; mirror++) {
1337 mirror_info_t *p = &conf->mirrors[mirror];
1338 if (p->recovery_disabled == mddev->recovery_disabled)
1339 continue;
1340 if (!p->rdev)
1341 continue;
1342
1343 disk_stack_limits(mddev->gendisk, rdev->bdev,
1344 rdev->data_offset << 9);
1345 /* as we don't honour merge_bvec_fn, we must
1346 * never risk violating it, so limit
1347 * ->max_segments to one lying with a single
1348 * page, as a one page request is never in
1349 * violation.
1350 */
1351 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1352 blk_queue_max_segments(mddev->queue, 1);
1353 blk_queue_segment_boundary(mddev->queue,
1354 PAGE_CACHE_SIZE - 1);
1355 }
1356
1357 p->head_position = 0;
1358 rdev->raid_disk = mirror;
1359 err = 0;
1360 if (rdev->saved_raid_disk != mirror)
1361 conf->fullsync = 1;
1362 rcu_assign_pointer(p->rdev, rdev);
1363 break;
1364 }
1365
1366 md_integrity_add_rdev(rdev, mddev);
1367 print_conf(conf);
1368 return err;
1369}
1370
1371static int raid10_remove_disk(mddev_t *mddev, int number)
1372{
1373 conf_t *conf = mddev->private;
1374 int err = 0;
1375 mdk_rdev_t *rdev;
1376 mirror_info_t *p = conf->mirrors+ number;
1377
1378 print_conf(conf);
1379 rdev = p->rdev;
1380 if (rdev) {
1381 if (test_bit(In_sync, &rdev->flags) ||
1382 atomic_read(&rdev->nr_pending)) {
1383 err = -EBUSY;
1384 goto abort;
1385 }
1386 /* Only remove faulty devices in recovery
1387 * is not possible.
1388 */
1389 if (!test_bit(Faulty, &rdev->flags) &&
1390 mddev->recovery_disabled != p->recovery_disabled &&
1391 enough(conf, -1)) {
1392 err = -EBUSY;
1393 goto abort;
1394 }
1395 p->rdev = NULL;
1396 synchronize_rcu();
1397 if (atomic_read(&rdev->nr_pending)) {
1398 /* lost the race, try later */
1399 err = -EBUSY;
1400 p->rdev = rdev;
1401 goto abort;
1402 }
1403 err = md_integrity_register(mddev);
1404 }
1405abort:
1406
1407 print_conf(conf);
1408 return err;
1409}
1410
1411
1412static void end_sync_read(struct bio *bio, int error)
1413{
1414 r10bio_t *r10_bio = bio->bi_private;
1415 conf_t *conf = r10_bio->mddev->private;
1416 int d;
1417
1418 d = find_bio_disk(conf, r10_bio, bio, NULL);
1419
1420 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1421 set_bit(R10BIO_Uptodate, &r10_bio->state);
1422 else
1423 /* The write handler will notice the lack of
1424 * R10BIO_Uptodate and record any errors etc
1425 */
1426 atomic_add(r10_bio->sectors,
1427 &conf->mirrors[d].rdev->corrected_errors);
1428
1429 /* for reconstruct, we always reschedule after a read.
1430 * for resync, only after all reads
1431 */
1432 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1433 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1434 atomic_dec_and_test(&r10_bio->remaining)) {
1435 /* we have read all the blocks,
1436 * do the comparison in process context in raid10d
1437 */
1438 reschedule_retry(r10_bio);
1439 }
1440}
1441
1442static void end_sync_request(r10bio_t *r10_bio)
1443{
1444 mddev_t *mddev = r10_bio->mddev;
1445
1446 while (atomic_dec_and_test(&r10_bio->remaining)) {
1447 if (r10_bio->master_bio == NULL) {
1448 /* the primary of several recovery bios */
1449 sector_t s = r10_bio->sectors;
1450 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1451 test_bit(R10BIO_WriteError, &r10_bio->state))
1452 reschedule_retry(r10_bio);
1453 else
1454 put_buf(r10_bio);
1455 md_done_sync(mddev, s, 1);
1456 break;
1457 } else {
1458 r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1459 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1460 test_bit(R10BIO_WriteError, &r10_bio->state))
1461 reschedule_retry(r10_bio);
1462 else
1463 put_buf(r10_bio);
1464 r10_bio = r10_bio2;
1465 }
1466 }
1467}
1468
1469static void end_sync_write(struct bio *bio, int error)
1470{
1471 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1472 r10bio_t *r10_bio = bio->bi_private;
1473 mddev_t *mddev = r10_bio->mddev;
1474 conf_t *conf = mddev->private;
1475 int d;
1476 sector_t first_bad;
1477 int bad_sectors;
1478 int slot;
1479
1480 d = find_bio_disk(conf, r10_bio, bio, &slot);
1481
1482 if (!uptodate) {
1483 set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
1484 set_bit(R10BIO_WriteError, &r10_bio->state);
1485 } else if (is_badblock(conf->mirrors[d].rdev,
1486 r10_bio->devs[slot].addr,
1487 r10_bio->sectors,
1488 &first_bad, &bad_sectors))
1489 set_bit(R10BIO_MadeGood, &r10_bio->state);
1490
1491 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1492
1493 end_sync_request(r10_bio);
1494}
1495
1496/*
1497 * Note: sync and recover and handled very differently for raid10
1498 * This code is for resync.
1499 * For resync, we read through virtual addresses and read all blocks.
1500 * If there is any error, we schedule a write. The lowest numbered
1501 * drive is authoritative.
1502 * However requests come for physical address, so we need to map.
1503 * For every physical address there are raid_disks/copies virtual addresses,
1504 * which is always are least one, but is not necessarly an integer.
1505 * This means that a physical address can span multiple chunks, so we may
1506 * have to submit multiple io requests for a single sync request.
1507 */
1508/*
1509 * We check if all blocks are in-sync and only write to blocks that
1510 * aren't in sync
1511 */
1512static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1513{
1514 conf_t *conf = mddev->private;
1515 int i, first;
1516 struct bio *tbio, *fbio;
1517
1518 atomic_set(&r10_bio->remaining, 1);
1519
1520 /* find the first device with a block */
1521 for (i=0; i<conf->copies; i++)
1522 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1523 break;
1524
1525 if (i == conf->copies)
1526 goto done;
1527
1528 first = i;
1529 fbio = r10_bio->devs[i].bio;
1530
1531 /* now find blocks with errors */
1532 for (i=0 ; i < conf->copies ; i++) {
1533 int j, d;
1534 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1535
1536 tbio = r10_bio->devs[i].bio;
1537
1538 if (tbio->bi_end_io != end_sync_read)
1539 continue;
1540 if (i == first)
1541 continue;
1542 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1543 /* We know that the bi_io_vec layout is the same for
1544 * both 'first' and 'i', so we just compare them.
1545 * All vec entries are PAGE_SIZE;
1546 */
1547 for (j = 0; j < vcnt; j++)
1548 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1549 page_address(tbio->bi_io_vec[j].bv_page),
1550 PAGE_SIZE))
1551 break;
1552 if (j == vcnt)
1553 continue;
1554 mddev->resync_mismatches += r10_bio->sectors;
1555 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1556 /* Don't fix anything. */
1557 continue;
1558 }
1559 /* Ok, we need to write this bio, either to correct an
1560 * inconsistency or to correct an unreadable block.
1561 * First we need to fixup bv_offset, bv_len and
1562 * bi_vecs, as the read request might have corrupted these
1563 */
1564 tbio->bi_vcnt = vcnt;
1565 tbio->bi_size = r10_bio->sectors << 9;
1566 tbio->bi_idx = 0;
1567 tbio->bi_phys_segments = 0;
1568 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1569 tbio->bi_flags |= 1 << BIO_UPTODATE;
1570 tbio->bi_next = NULL;
1571 tbio->bi_rw = WRITE;
1572 tbio->bi_private = r10_bio;
1573 tbio->bi_sector = r10_bio->devs[i].addr;
1574
1575 for (j=0; j < vcnt ; j++) {
1576 tbio->bi_io_vec[j].bv_offset = 0;
1577 tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1578
1579 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1580 page_address(fbio->bi_io_vec[j].bv_page),
1581 PAGE_SIZE);
1582 }
1583 tbio->bi_end_io = end_sync_write;
1584
1585 d = r10_bio->devs[i].devnum;
1586 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1587 atomic_inc(&r10_bio->remaining);
1588 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1589
1590 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1591 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1592 generic_make_request(tbio);
1593 }
1594
1595done:
1596 if (atomic_dec_and_test(&r10_bio->remaining)) {
1597 md_done_sync(mddev, r10_bio->sectors, 1);
1598 put_buf(r10_bio);
1599 }
1600}
1601
1602/*
1603 * Now for the recovery code.
1604 * Recovery happens across physical sectors.
1605 * We recover all non-is_sync drives by finding the virtual address of
1606 * each, and then choose a working drive that also has that virt address.
1607 * There is a separate r10_bio for each non-in_sync drive.
1608 * Only the first two slots are in use. The first for reading,
1609 * The second for writing.
1610 *
1611 */
1612static void fix_recovery_read_error(r10bio_t *r10_bio)
1613{
1614 /* We got a read error during recovery.
1615 * We repeat the read in smaller page-sized sections.
1616 * If a read succeeds, write it to the new device or record
1617 * a bad block if we cannot.
1618 * If a read fails, record a bad block on both old and
1619 * new devices.
1620 */
1621 mddev_t *mddev = r10_bio->mddev;
1622 conf_t *conf = mddev->private;
1623 struct bio *bio = r10_bio->devs[0].bio;
1624 sector_t sect = 0;
1625 int sectors = r10_bio->sectors;
1626 int idx = 0;
1627 int dr = r10_bio->devs[0].devnum;
1628 int dw = r10_bio->devs[1].devnum;
1629
1630 while (sectors) {
1631 int s = sectors;
1632 mdk_rdev_t *rdev;
1633 sector_t addr;
1634 int ok;
1635
1636 if (s > (PAGE_SIZE>>9))
1637 s = PAGE_SIZE >> 9;
1638
1639 rdev = conf->mirrors[dr].rdev;
1640 addr = r10_bio->devs[0].addr + sect,
1641 ok = sync_page_io(rdev,
1642 addr,
1643 s << 9,
1644 bio->bi_io_vec[idx].bv_page,
1645 READ, false);
1646 if (ok) {
1647 rdev = conf->mirrors[dw].rdev;
1648 addr = r10_bio->devs[1].addr + sect;
1649 ok = sync_page_io(rdev,
1650 addr,
1651 s << 9,
1652 bio->bi_io_vec[idx].bv_page,
1653 WRITE, false);
1654 if (!ok)
1655 set_bit(WriteErrorSeen, &rdev->flags);
1656 }
1657 if (!ok) {
1658 /* We don't worry if we cannot set a bad block -
1659 * it really is bad so there is no loss in not
1660 * recording it yet
1661 */
1662 rdev_set_badblocks(rdev, addr, s, 0);
1663
1664 if (rdev != conf->mirrors[dw].rdev) {
1665 /* need bad block on destination too */
1666 mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev;
1667 addr = r10_bio->devs[1].addr + sect;
1668 ok = rdev_set_badblocks(rdev2, addr, s, 0);
1669 if (!ok) {
1670 /* just abort the recovery */
1671 printk(KERN_NOTICE
1672 "md/raid10:%s: recovery aborted"
1673 " due to read error\n",
1674 mdname(mddev));
1675
1676 conf->mirrors[dw].recovery_disabled
1677 = mddev->recovery_disabled;
1678 set_bit(MD_RECOVERY_INTR,
1679 &mddev->recovery);
1680 break;
1681 }
1682 }
1683 }
1684
1685 sectors -= s;
1686 sect += s;
1687 idx++;
1688 }
1689}
1690
1691static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1692{
1693 conf_t *conf = mddev->private;
1694 int d;
1695 struct bio *wbio;
1696
1697 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
1698 fix_recovery_read_error(r10_bio);
1699 end_sync_request(r10_bio);
1700 return;
1701 }
1702
1703 /*
1704 * share the pages with the first bio
1705 * and submit the write request
1706 */
1707 wbio = r10_bio->devs[1].bio;
1708 d = r10_bio->devs[1].devnum;
1709
1710 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1711 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1712 generic_make_request(wbio);
1713}
1714
1715
1716/*
1717 * Used by fix_read_error() to decay the per rdev read_errors.
1718 * We halve the read error count for every hour that has elapsed
1719 * since the last recorded read error.
1720 *
1721 */
1722static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
1723{
1724 struct timespec cur_time_mon;
1725 unsigned long hours_since_last;
1726 unsigned int read_errors = atomic_read(&rdev->read_errors);
1727
1728 ktime_get_ts(&cur_time_mon);
1729
1730 if (rdev->last_read_error.tv_sec == 0 &&
1731 rdev->last_read_error.tv_nsec == 0) {
1732 /* first time we've seen a read error */
1733 rdev->last_read_error = cur_time_mon;
1734 return;
1735 }
1736
1737 hours_since_last = (cur_time_mon.tv_sec -
1738 rdev->last_read_error.tv_sec) / 3600;
1739
1740 rdev->last_read_error = cur_time_mon;
1741
1742 /*
1743 * if hours_since_last is > the number of bits in read_errors
1744 * just set read errors to 0. We do this to avoid
1745 * overflowing the shift of read_errors by hours_since_last.
1746 */
1747 if (hours_since_last >= 8 * sizeof(read_errors))
1748 atomic_set(&rdev->read_errors, 0);
1749 else
1750 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
1751}
1752
1753static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
1754 int sectors, struct page *page, int rw)
1755{
1756 sector_t first_bad;
1757 int bad_sectors;
1758
1759 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
1760 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
1761 return -1;
1762 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1763 /* success */
1764 return 1;
1765 if (rw == WRITE)
1766 set_bit(WriteErrorSeen, &rdev->flags);
1767 /* need to record an error - either for the block or the device */
1768 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1769 md_error(rdev->mddev, rdev);
1770 return 0;
1771}
1772
1773/*
1774 * This is a kernel thread which:
1775 *
1776 * 1. Retries failed read operations on working mirrors.
1777 * 2. Updates the raid superblock when problems encounter.
1778 * 3. Performs writes following reads for array synchronising.
1779 */
1780
1781static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1782{
1783 int sect = 0; /* Offset from r10_bio->sector */
1784 int sectors = r10_bio->sectors;
1785 mdk_rdev_t*rdev;
1786 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
1787 int d = r10_bio->devs[r10_bio->read_slot].devnum;
1788
1789 /* still own a reference to this rdev, so it cannot
1790 * have been cleared recently.
1791 */
1792 rdev = conf->mirrors[d].rdev;
1793
1794 if (test_bit(Faulty, &rdev->flags))
1795 /* drive has already been failed, just ignore any
1796 more fix_read_error() attempts */
1797 return;
1798
1799 check_decay_read_errors(mddev, rdev);
1800 atomic_inc(&rdev->read_errors);
1801 if (atomic_read(&rdev->read_errors) > max_read_errors) {
1802 char b[BDEVNAME_SIZE];
1803 bdevname(rdev->bdev, b);
1804
1805 printk(KERN_NOTICE
1806 "md/raid10:%s: %s: Raid device exceeded "
1807 "read_error threshold [cur %d:max %d]\n",
1808 mdname(mddev), b,
1809 atomic_read(&rdev->read_errors), max_read_errors);
1810 printk(KERN_NOTICE
1811 "md/raid10:%s: %s: Failing raid device\n",
1812 mdname(mddev), b);
1813 md_error(mddev, conf->mirrors[d].rdev);
1814 return;
1815 }
1816
1817 while(sectors) {
1818 int s = sectors;
1819 int sl = r10_bio->read_slot;
1820 int success = 0;
1821 int start;
1822
1823 if (s > (PAGE_SIZE>>9))
1824 s = PAGE_SIZE >> 9;
1825
1826 rcu_read_lock();
1827 do {
1828 sector_t first_bad;
1829 int bad_sectors;
1830
1831 d = r10_bio->devs[sl].devnum;
1832 rdev = rcu_dereference(conf->mirrors[d].rdev);
1833 if (rdev &&
1834 test_bit(In_sync, &rdev->flags) &&
1835 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
1836 &first_bad, &bad_sectors) == 0) {
1837 atomic_inc(&rdev->nr_pending);
1838 rcu_read_unlock();
1839 success = sync_page_io(rdev,
1840 r10_bio->devs[sl].addr +
1841 sect,
1842 s<<9,
1843 conf->tmppage, READ, false);
1844 rdev_dec_pending(rdev, mddev);
1845 rcu_read_lock();
1846 if (success)
1847 break;
1848 }
1849 sl++;
1850 if (sl == conf->copies)
1851 sl = 0;
1852 } while (!success && sl != r10_bio->read_slot);
1853 rcu_read_unlock();
1854
1855 if (!success) {
1856 /* Cannot read from anywhere, just mark the block
1857 * as bad on the first device to discourage future
1858 * reads.
1859 */
1860 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
1861 rdev = conf->mirrors[dn].rdev;
1862
1863 if (!rdev_set_badblocks(
1864 rdev,
1865 r10_bio->devs[r10_bio->read_slot].addr
1866 + sect,
1867 s, 0))
1868 md_error(mddev, rdev);
1869 break;
1870 }
1871
1872 start = sl;
1873 /* write it back and re-read */
1874 rcu_read_lock();
1875 while (sl != r10_bio->read_slot) {
1876 char b[BDEVNAME_SIZE];
1877
1878 if (sl==0)
1879 sl = conf->copies;
1880 sl--;
1881 d = r10_bio->devs[sl].devnum;
1882 rdev = rcu_dereference(conf->mirrors[d].rdev);
1883 if (!rdev ||
1884 !test_bit(In_sync, &rdev->flags))
1885 continue;
1886
1887 atomic_inc(&rdev->nr_pending);
1888 rcu_read_unlock();
1889 if (r10_sync_page_io(rdev,
1890 r10_bio->devs[sl].addr +
1891 sect,
1892 s<<9, conf->tmppage, WRITE)
1893 == 0) {
1894 /* Well, this device is dead */
1895 printk(KERN_NOTICE
1896 "md/raid10:%s: read correction "
1897 "write failed"
1898 " (%d sectors at %llu on %s)\n",
1899 mdname(mddev), s,
1900 (unsigned long long)(
1901 sect + rdev->data_offset),
1902 bdevname(rdev->bdev, b));
1903 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
1904 "drive\n",
1905 mdname(mddev),
1906 bdevname(rdev->bdev, b));
1907 }
1908 rdev_dec_pending(rdev, mddev);
1909 rcu_read_lock();
1910 }
1911 sl = start;
1912 while (sl != r10_bio->read_slot) {
1913 char b[BDEVNAME_SIZE];
1914
1915 if (sl==0)
1916 sl = conf->copies;
1917 sl--;
1918 d = r10_bio->devs[sl].devnum;
1919 rdev = rcu_dereference(conf->mirrors[d].rdev);
1920 if (!rdev ||
1921 !test_bit(In_sync, &rdev->flags))
1922 continue;
1923
1924 atomic_inc(&rdev->nr_pending);
1925 rcu_read_unlock();
1926 switch (r10_sync_page_io(rdev,
1927 r10_bio->devs[sl].addr +
1928 sect,
1929 s<<9, conf->tmppage,
1930 READ)) {
1931 case 0:
1932 /* Well, this device is dead */
1933 printk(KERN_NOTICE
1934 "md/raid10:%s: unable to read back "
1935 "corrected sectors"
1936 " (%d sectors at %llu on %s)\n",
1937 mdname(mddev), s,
1938 (unsigned long long)(
1939 sect + rdev->data_offset),
1940 bdevname(rdev->bdev, b));
1941 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
1942 "drive\n",
1943 mdname(mddev),
1944 bdevname(rdev->bdev, b));
1945 break;
1946 case 1:
1947 printk(KERN_INFO
1948 "md/raid10:%s: read error corrected"
1949 " (%d sectors at %llu on %s)\n",
1950 mdname(mddev), s,
1951 (unsigned long long)(
1952 sect + rdev->data_offset),
1953 bdevname(rdev->bdev, b));
1954 atomic_add(s, &rdev->corrected_errors);
1955 }
1956
1957 rdev_dec_pending(rdev, mddev);
1958 rcu_read_lock();
1959 }
1960 rcu_read_unlock();
1961
1962 sectors -= s;
1963 sect += s;
1964 }
1965}
1966
1967static void bi_complete(struct bio *bio, int error)
1968{
1969 complete((struct completion *)bio->bi_private);
1970}
1971
1972static int submit_bio_wait(int rw, struct bio *bio)
1973{
1974 struct completion event;
1975 rw |= REQ_SYNC;
1976
1977 init_completion(&event);
1978 bio->bi_private = &event;
1979 bio->bi_end_io = bi_complete;
1980 submit_bio(rw, bio);
1981 wait_for_completion(&event);
1982
1983 return test_bit(BIO_UPTODATE, &bio->bi_flags);
1984}
1985
1986static int narrow_write_error(r10bio_t *r10_bio, int i)
1987{
1988 struct bio *bio = r10_bio->master_bio;
1989 mddev_t *mddev = r10_bio->mddev;
1990 conf_t *conf = mddev->private;
1991 mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
1992 /* bio has the data to be written to slot 'i' where
1993 * we just recently had a write error.
1994 * We repeatedly clone the bio and trim down to one block,
1995 * then try the write. Where the write fails we record
1996 * a bad block.
1997 * It is conceivable that the bio doesn't exactly align with
1998 * blocks. We must handle this.
1999 *
2000 * We currently own a reference to the rdev.
2001 */
2002
2003 int block_sectors;
2004 sector_t sector;
2005 int sectors;
2006 int sect_to_write = r10_bio->sectors;
2007 int ok = 1;
2008
2009 if (rdev->badblocks.shift < 0)
2010 return 0;
2011
2012 block_sectors = 1 << rdev->badblocks.shift;
2013 sector = r10_bio->sector;
2014 sectors = ((r10_bio->sector + block_sectors)
2015 & ~(sector_t)(block_sectors - 1))
2016 - sector;
2017
2018 while (sect_to_write) {
2019 struct bio *wbio;
2020 if (sectors > sect_to_write)
2021 sectors = sect_to_write;
2022 /* Write at 'sector' for 'sectors' */
2023 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2024 md_trim_bio(wbio, sector - bio->bi_sector, sectors);
2025 wbio->bi_sector = (r10_bio->devs[i].addr+
2026 rdev->data_offset+
2027 (sector - r10_bio->sector));
2028 wbio->bi_bdev = rdev->bdev;
2029 if (submit_bio_wait(WRITE, wbio) == 0)
2030 /* Failure! */
2031 ok = rdev_set_badblocks(rdev, sector,
2032 sectors, 0)
2033 && ok;
2034
2035 bio_put(wbio);
2036 sect_to_write -= sectors;
2037 sector += sectors;
2038 sectors = block_sectors;
2039 }
2040 return ok;
2041}
2042
2043static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
2044{
2045 int slot = r10_bio->read_slot;
2046 int mirror = r10_bio->devs[slot].devnum;
2047 struct bio *bio;
2048 conf_t *conf = mddev->private;
2049 mdk_rdev_t *rdev;
2050 char b[BDEVNAME_SIZE];
2051 unsigned long do_sync;
2052 int max_sectors;
2053
2054 /* we got a read error. Maybe the drive is bad. Maybe just
2055 * the block and we can fix it.
2056 * We freeze all other IO, and try reading the block from
2057 * other devices. When we find one, we re-write
2058 * and check it that fixes the read error.
2059 * This is all done synchronously while the array is
2060 * frozen.
2061 */
2062 if (mddev->ro == 0) {
2063 freeze_array(conf);
2064 fix_read_error(conf, mddev, r10_bio);
2065 unfreeze_array(conf);
2066 }
2067 rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
2068
2069 bio = r10_bio->devs[slot].bio;
2070 bdevname(bio->bi_bdev, b);
2071 r10_bio->devs[slot].bio =
2072 mddev->ro ? IO_BLOCKED : NULL;
2073read_more:
2074 mirror = read_balance(conf, r10_bio, &max_sectors);
2075 if (mirror == -1) {
2076 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2077 " read error for block %llu\n",
2078 mdname(mddev), b,
2079 (unsigned long long)r10_bio->sector);
2080 raid_end_bio_io(r10_bio);
2081 bio_put(bio);
2082 return;
2083 }
2084
2085 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2086 if (bio)
2087 bio_put(bio);
2088 slot = r10_bio->read_slot;
2089 rdev = conf->mirrors[mirror].rdev;
2090 printk_ratelimited(
2091 KERN_ERR
2092 "md/raid10:%s: %s: redirecting"
2093 "sector %llu to another mirror\n",
2094 mdname(mddev),
2095 bdevname(rdev->bdev, b),
2096 (unsigned long long)r10_bio->sector);
2097 bio = bio_clone_mddev(r10_bio->master_bio,
2098 GFP_NOIO, mddev);
2099 md_trim_bio(bio,
2100 r10_bio->sector - bio->bi_sector,
2101 max_sectors);
2102 r10_bio->devs[slot].bio = bio;
2103 bio->bi_sector = r10_bio->devs[slot].addr
2104 + rdev->data_offset;
2105 bio->bi_bdev = rdev->bdev;
2106 bio->bi_rw = READ | do_sync;
2107 bio->bi_private = r10_bio;
2108 bio->bi_end_io = raid10_end_read_request;
2109 if (max_sectors < r10_bio->sectors) {
2110 /* Drat - have to split this up more */
2111 struct bio *mbio = r10_bio->master_bio;
2112 int sectors_handled =
2113 r10_bio->sector + max_sectors
2114 - mbio->bi_sector;
2115 r10_bio->sectors = max_sectors;
2116 spin_lock_irq(&conf->device_lock);
2117 if (mbio->bi_phys_segments == 0)
2118 mbio->bi_phys_segments = 2;
2119 else
2120 mbio->bi_phys_segments++;
2121 spin_unlock_irq(&conf->device_lock);
2122 generic_make_request(bio);
2123 bio = NULL;
2124
2125 r10_bio = mempool_alloc(conf->r10bio_pool,
2126 GFP_NOIO);
2127 r10_bio->master_bio = mbio;
2128 r10_bio->sectors = (mbio->bi_size >> 9)
2129 - sectors_handled;
2130 r10_bio->state = 0;
2131 set_bit(R10BIO_ReadError,
2132 &r10_bio->state);
2133 r10_bio->mddev = mddev;
2134 r10_bio->sector = mbio->bi_sector
2135 + sectors_handled;
2136
2137 goto read_more;
2138 } else
2139 generic_make_request(bio);
2140}
2141
2142static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio)
2143{
2144 /* Some sort of write request has finished and it
2145 * succeeded in writing where we thought there was a
2146 * bad block. So forget the bad block.
2147 * Or possibly if failed and we need to record
2148 * a bad block.
2149 */
2150 int m;
2151 mdk_rdev_t *rdev;
2152
2153 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2154 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2155 for (m = 0; m < conf->copies; m++) {
2156 int dev = r10_bio->devs[m].devnum;
2157 rdev = conf->mirrors[dev].rdev;
2158 if (r10_bio->devs[m].bio == NULL)
2159 continue;
2160 if (test_bit(BIO_UPTODATE,
2161 &r10_bio->devs[m].bio->bi_flags)) {
2162 rdev_clear_badblocks(
2163 rdev,
2164 r10_bio->devs[m].addr,
2165 r10_bio->sectors);
2166 } else {
2167 if (!rdev_set_badblocks(
2168 rdev,
2169 r10_bio->devs[m].addr,
2170 r10_bio->sectors, 0))
2171 md_error(conf->mddev, rdev);
2172 }
2173 }
2174 put_buf(r10_bio);
2175 } else {
2176 for (m = 0; m < conf->copies; m++) {
2177 int dev = r10_bio->devs[m].devnum;
2178 struct bio *bio = r10_bio->devs[m].bio;
2179 rdev = conf->mirrors[dev].rdev;
2180 if (bio == IO_MADE_GOOD) {
2181 rdev_clear_badblocks(
2182 rdev,
2183 r10_bio->devs[m].addr,
2184 r10_bio->sectors);
2185 rdev_dec_pending(rdev, conf->mddev);
2186 } else if (bio != NULL &&
2187 !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2188 if (!narrow_write_error(r10_bio, m)) {
2189 md_error(conf->mddev, rdev);
2190 set_bit(R10BIO_Degraded,
2191 &r10_bio->state);
2192 }
2193 rdev_dec_pending(rdev, conf->mddev);
2194 }
2195 }
2196 if (test_bit(R10BIO_WriteError,
2197 &r10_bio->state))
2198 close_write(r10_bio);
2199 raid_end_bio_io(r10_bio);
2200 }
2201}
2202
2203static void raid10d(mddev_t *mddev)
2204{
2205 r10bio_t *r10_bio;
2206 unsigned long flags;
2207 conf_t *conf = mddev->private;
2208 struct list_head *head = &conf->retry_list;
2209 struct blk_plug plug;
2210
2211 md_check_recovery(mddev);
2212
2213 blk_start_plug(&plug);
2214 for (;;) {
2215
2216 flush_pending_writes(conf);
2217
2218 spin_lock_irqsave(&conf->device_lock, flags);
2219 if (list_empty(head)) {
2220 spin_unlock_irqrestore(&conf->device_lock, flags);
2221 break;
2222 }
2223 r10_bio = list_entry(head->prev, r10bio_t, retry_list);
2224 list_del(head->prev);
2225 conf->nr_queued--;
2226 spin_unlock_irqrestore(&conf->device_lock, flags);
2227
2228 mddev = r10_bio->mddev;
2229 conf = mddev->private;
2230 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2231 test_bit(R10BIO_WriteError, &r10_bio->state))
2232 handle_write_completed(conf, r10_bio);
2233 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2234 sync_request_write(mddev, r10_bio);
2235 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2236 recovery_request_write(mddev, r10_bio);
2237 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2238 handle_read_error(mddev, r10_bio);
2239 else {
2240 /* just a partial read to be scheduled from a
2241 * separate context
2242 */
2243 int slot = r10_bio->read_slot;
2244 generic_make_request(r10_bio->devs[slot].bio);
2245 }
2246
2247 cond_resched();
2248 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2249 md_check_recovery(mddev);
2250 }
2251 blk_finish_plug(&plug);
2252}
2253
2254
2255static int init_resync(conf_t *conf)
2256{
2257 int buffs;
2258
2259 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2260 BUG_ON(conf->r10buf_pool);
2261 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2262 if (!conf->r10buf_pool)
2263 return -ENOMEM;
2264 conf->next_resync = 0;
2265 return 0;
2266}
2267
2268/*
2269 * perform a "sync" on one "block"
2270 *
2271 * We need to make sure that no normal I/O request - particularly write
2272 * requests - conflict with active sync requests.
2273 *
2274 * This is achieved by tracking pending requests and a 'barrier' concept
2275 * that can be installed to exclude normal IO requests.
2276 *
2277 * Resync and recovery are handled very differently.
2278 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2279 *
2280 * For resync, we iterate over virtual addresses, read all copies,
2281 * and update if there are differences. If only one copy is live,
2282 * skip it.
2283 * For recovery, we iterate over physical addresses, read a good
2284 * value for each non-in_sync drive, and over-write.
2285 *
2286 * So, for recovery we may have several outstanding complex requests for a
2287 * given address, one for each out-of-sync device. We model this by allocating
2288 * a number of r10_bio structures, one for each out-of-sync device.
2289 * As we setup these structures, we collect all bio's together into a list
2290 * which we then process collectively to add pages, and then process again
2291 * to pass to generic_make_request.
2292 *
2293 * The r10_bio structures are linked using a borrowed master_bio pointer.
2294 * This link is counted in ->remaining. When the r10_bio that points to NULL
2295 * has its remaining count decremented to 0, the whole complex operation
2296 * is complete.
2297 *
2298 */
2299
2300static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
2301 int *skipped, int go_faster)
2302{
2303 conf_t *conf = mddev->private;
2304 r10bio_t *r10_bio;
2305 struct bio *biolist = NULL, *bio;
2306 sector_t max_sector, nr_sectors;
2307 int i;
2308 int max_sync;
2309 sector_t sync_blocks;
2310 sector_t sectors_skipped = 0;
2311 int chunks_skipped = 0;
2312
2313 if (!conf->r10buf_pool)
2314 if (init_resync(conf))
2315 return 0;
2316
2317 skipped:
2318 max_sector = mddev->dev_sectors;
2319 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2320 max_sector = mddev->resync_max_sectors;
2321 if (sector_nr >= max_sector) {
2322 /* If we aborted, we need to abort the
2323 * sync on the 'current' bitmap chucks (there can
2324 * be several when recovering multiple devices).
2325 * as we may have started syncing it but not finished.
2326 * We can find the current address in
2327 * mddev->curr_resync, but for recovery,
2328 * we need to convert that to several
2329 * virtual addresses.
2330 */
2331 if (mddev->curr_resync < max_sector) { /* aborted */
2332 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2333 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2334 &sync_blocks, 1);
2335 else for (i=0; i<conf->raid_disks; i++) {
2336 sector_t sect =
2337 raid10_find_virt(conf, mddev->curr_resync, i);
2338 bitmap_end_sync(mddev->bitmap, sect,
2339 &sync_blocks, 1);
2340 }
2341 } else /* completed sync */
2342 conf->fullsync = 0;
2343
2344 bitmap_close_sync(mddev->bitmap);
2345 close_sync(conf);
2346 *skipped = 1;
2347 return sectors_skipped;
2348 }
2349 if (chunks_skipped >= conf->raid_disks) {
2350 /* if there has been nothing to do on any drive,
2351 * then there is nothing to do at all..
2352 */
2353 *skipped = 1;
2354 return (max_sector - sector_nr) + sectors_skipped;
2355 }
2356
2357 if (max_sector > mddev->resync_max)
2358 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2359
2360 /* make sure whole request will fit in a chunk - if chunks
2361 * are meaningful
2362 */
2363 if (conf->near_copies < conf->raid_disks &&
2364 max_sector > (sector_nr | conf->chunk_mask))
2365 max_sector = (sector_nr | conf->chunk_mask) + 1;
2366 /*
2367 * If there is non-resync activity waiting for us then
2368 * put in a delay to throttle resync.
2369 */
2370 if (!go_faster && conf->nr_waiting)
2371 msleep_interruptible(1000);
2372
2373 /* Again, very different code for resync and recovery.
2374 * Both must result in an r10bio with a list of bios that
2375 * have bi_end_io, bi_sector, bi_bdev set,
2376 * and bi_private set to the r10bio.
2377 * For recovery, we may actually create several r10bios
2378 * with 2 bios in each, that correspond to the bios in the main one.
2379 * In this case, the subordinate r10bios link back through a
2380 * borrowed master_bio pointer, and the counter in the master
2381 * includes a ref from each subordinate.
2382 */
2383 /* First, we decide what to do and set ->bi_end_io
2384 * To end_sync_read if we want to read, and
2385 * end_sync_write if we will want to write.
2386 */
2387
2388 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
2389 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2390 /* recovery... the complicated one */
2391 int j;
2392 r10_bio = NULL;
2393
2394 for (i=0 ; i<conf->raid_disks; i++) {
2395 int still_degraded;
2396 r10bio_t *rb2;
2397 sector_t sect;
2398 int must_sync;
2399 int any_working;
2400
2401 if (conf->mirrors[i].rdev == NULL ||
2402 test_bit(In_sync, &conf->mirrors[i].rdev->flags))
2403 continue;
2404
2405 still_degraded = 0;
2406 /* want to reconstruct this device */
2407 rb2 = r10_bio;
2408 sect = raid10_find_virt(conf, sector_nr, i);
2409 /* Unless we are doing a full sync, we only need
2410 * to recover the block if it is set in the bitmap
2411 */
2412 must_sync = bitmap_start_sync(mddev->bitmap, sect,
2413 &sync_blocks, 1);
2414 if (sync_blocks < max_sync)
2415 max_sync = sync_blocks;
2416 if (!must_sync &&
2417 !conf->fullsync) {
2418 /* yep, skip the sync_blocks here, but don't assume
2419 * that there will never be anything to do here
2420 */
2421 chunks_skipped = -1;
2422 continue;
2423 }
2424
2425 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
2426 raise_barrier(conf, rb2 != NULL);
2427 atomic_set(&r10_bio->remaining, 0);
2428
2429 r10_bio->master_bio = (struct bio*)rb2;
2430 if (rb2)
2431 atomic_inc(&rb2->remaining);
2432 r10_bio->mddev = mddev;
2433 set_bit(R10BIO_IsRecover, &r10_bio->state);
2434 r10_bio->sector = sect;
2435
2436 raid10_find_phys(conf, r10_bio);
2437
2438 /* Need to check if the array will still be
2439 * degraded
2440 */
2441 for (j=0; j<conf->raid_disks; j++)
2442 if (conf->mirrors[j].rdev == NULL ||
2443 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
2444 still_degraded = 1;
2445 break;
2446 }
2447
2448 must_sync = bitmap_start_sync(mddev->bitmap, sect,
2449 &sync_blocks, still_degraded);
2450
2451 any_working = 0;
2452 for (j=0; j<conf->copies;j++) {
2453 int k;
2454 int d = r10_bio->devs[j].devnum;
2455 sector_t from_addr, to_addr;
2456 mdk_rdev_t *rdev;
2457 sector_t sector, first_bad;
2458 int bad_sectors;
2459 if (!conf->mirrors[d].rdev ||
2460 !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
2461 continue;
2462 /* This is where we read from */
2463 any_working = 1;
2464 rdev = conf->mirrors[d].rdev;
2465 sector = r10_bio->devs[j].addr;
2466
2467 if (is_badblock(rdev, sector, max_sync,
2468 &first_bad, &bad_sectors)) {
2469 if (first_bad > sector)
2470 max_sync = first_bad - sector;
2471 else {
2472 bad_sectors -= (sector
2473 - first_bad);
2474 if (max_sync > bad_sectors)
2475 max_sync = bad_sectors;
2476 continue;
2477 }
2478 }
2479 bio = r10_bio->devs[0].bio;
2480 bio->bi_next = biolist;
2481 biolist = bio;
2482 bio->bi_private = r10_bio;
2483 bio->bi_end_io = end_sync_read;
2484 bio->bi_rw = READ;
2485 from_addr = r10_bio->devs[j].addr;
2486 bio->bi_sector = from_addr +
2487 conf->mirrors[d].rdev->data_offset;
2488 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
2489 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2490 atomic_inc(&r10_bio->remaining);
2491 /* and we write to 'i' */
2492
2493 for (k=0; k<conf->copies; k++)
2494 if (r10_bio->devs[k].devnum == i)
2495 break;
2496 BUG_ON(k == conf->copies);
2497 bio = r10_bio->devs[1].bio;
2498 bio->bi_next = biolist;
2499 biolist = bio;
2500 bio->bi_private = r10_bio;
2501 bio->bi_end_io = end_sync_write;
2502 bio->bi_rw = WRITE;
2503 to_addr = r10_bio->devs[k].addr;
2504 bio->bi_sector = to_addr +
2505 conf->mirrors[i].rdev->data_offset;
2506 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
2507
2508 r10_bio->devs[0].devnum = d;
2509 r10_bio->devs[0].addr = from_addr;
2510 r10_bio->devs[1].devnum = i;
2511 r10_bio->devs[1].addr = to_addr;
2512
2513 break;
2514 }
2515 if (j == conf->copies) {
2516 /* Cannot recover, so abort the recovery or
2517 * record a bad block */
2518 put_buf(r10_bio);
2519 if (rb2)
2520 atomic_dec(&rb2->remaining);
2521 r10_bio = rb2;
2522 if (any_working) {
2523 /* problem is that there are bad blocks
2524 * on other device(s)
2525 */
2526 int k;
2527 for (k = 0; k < conf->copies; k++)
2528 if (r10_bio->devs[k].devnum == i)
2529 break;
2530 if (!rdev_set_badblocks(
2531 conf->mirrors[i].rdev,
2532 r10_bio->devs[k].addr,
2533 max_sync, 0))
2534 any_working = 0;
2535 }
2536 if (!any_working) {
2537 if (!test_and_set_bit(MD_RECOVERY_INTR,
2538 &mddev->recovery))
2539 printk(KERN_INFO "md/raid10:%s: insufficient "
2540 "working devices for recovery.\n",
2541 mdname(mddev));
2542 conf->mirrors[i].recovery_disabled
2543 = mddev->recovery_disabled;
2544 }
2545 break;
2546 }
2547 }
2548 if (biolist == NULL) {
2549 while (r10_bio) {
2550 r10bio_t *rb2 = r10_bio;
2551 r10_bio = (r10bio_t*) rb2->master_bio;
2552 rb2->master_bio = NULL;
2553 put_buf(rb2);
2554 }
2555 goto giveup;
2556 }
2557 } else {
2558 /* resync. Schedule a read for every block at this virt offset */
2559 int count = 0;
2560
2561 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2562
2563 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2564 &sync_blocks, mddev->degraded) &&
2565 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
2566 &mddev->recovery)) {
2567 /* We can skip this block */
2568 *skipped = 1;
2569 return sync_blocks + sectors_skipped;
2570 }
2571 if (sync_blocks < max_sync)
2572 max_sync = sync_blocks;
2573 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
2574
2575 r10_bio->mddev = mddev;
2576 atomic_set(&r10_bio->remaining, 0);
2577 raise_barrier(conf, 0);
2578 conf->next_resync = sector_nr;
2579
2580 r10_bio->master_bio = NULL;
2581 r10_bio->sector = sector_nr;
2582 set_bit(R10BIO_IsSync, &r10_bio->state);
2583 raid10_find_phys(conf, r10_bio);
2584 r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
2585
2586 for (i=0; i<conf->copies; i++) {
2587 int d = r10_bio->devs[i].devnum;
2588 sector_t first_bad, sector;
2589 int bad_sectors;
2590
2591 bio = r10_bio->devs[i].bio;
2592 bio->bi_end_io = NULL;
2593 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2594 if (conf->mirrors[d].rdev == NULL ||
2595 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
2596 continue;
2597 sector = r10_bio->devs[i].addr;
2598 if (is_badblock(conf->mirrors[d].rdev,
2599 sector, max_sync,
2600 &first_bad, &bad_sectors)) {
2601 if (first_bad > sector)
2602 max_sync = first_bad - sector;
2603 else {
2604 bad_sectors -= (sector - first_bad);
2605 if (max_sync > bad_sectors)
2606 max_sync = max_sync;
2607 continue;
2608 }
2609 }
2610 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2611 atomic_inc(&r10_bio->remaining);
2612 bio->bi_next = biolist;
2613 biolist = bio;
2614 bio->bi_private = r10_bio;
2615 bio->bi_end_io = end_sync_read;
2616 bio->bi_rw = READ;
2617 bio->bi_sector = sector +
2618 conf->mirrors[d].rdev->data_offset;
2619 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
2620 count++;
2621 }
2622
2623 if (count < 2) {
2624 for (i=0; i<conf->copies; i++) {
2625 int d = r10_bio->devs[i].devnum;
2626 if (r10_bio->devs[i].bio->bi_end_io)
2627 rdev_dec_pending(conf->mirrors[d].rdev,
2628 mddev);
2629 }
2630 put_buf(r10_bio);
2631 biolist = NULL;
2632 goto giveup;
2633 }
2634 }
2635
2636 for (bio = biolist; bio ; bio=bio->bi_next) {
2637
2638 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
2639 if (bio->bi_end_io)
2640 bio->bi_flags |= 1 << BIO_UPTODATE;
2641 bio->bi_vcnt = 0;
2642 bio->bi_idx = 0;
2643 bio->bi_phys_segments = 0;
2644 bio->bi_size = 0;
2645 }
2646
2647 nr_sectors = 0;
2648 if (sector_nr + max_sync < max_sector)
2649 max_sector = sector_nr + max_sync;
2650 do {
2651 struct page *page;
2652 int len = PAGE_SIZE;
2653 if (sector_nr + (len>>9) > max_sector)
2654 len = (max_sector - sector_nr) << 9;
2655 if (len == 0)
2656 break;
2657 for (bio= biolist ; bio ; bio=bio->bi_next) {
2658 struct bio *bio2;
2659 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2660 if (bio_add_page(bio, page, len, 0))
2661 continue;
2662
2663 /* stop here */
2664 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2665 for (bio2 = biolist;
2666 bio2 && bio2 != bio;
2667 bio2 = bio2->bi_next) {
2668 /* remove last page from this bio */
2669 bio2->bi_vcnt--;
2670 bio2->bi_size -= len;
2671 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
2672 }
2673 goto bio_full;
2674 }
2675 nr_sectors += len>>9;
2676 sector_nr += len>>9;
2677 } while (biolist->bi_vcnt < RESYNC_PAGES);
2678 bio_full:
2679 r10_bio->sectors = nr_sectors;
2680
2681 while (biolist) {
2682 bio = biolist;
2683 biolist = biolist->bi_next;
2684
2685 bio->bi_next = NULL;
2686 r10_bio = bio->bi_private;
2687 r10_bio->sectors = nr_sectors;
2688
2689 if (bio->bi_end_io == end_sync_read) {
2690 md_sync_acct(bio->bi_bdev, nr_sectors);
2691 generic_make_request(bio);
2692 }
2693 }
2694
2695 if (sectors_skipped)
2696 /* pretend they weren't skipped, it makes
2697 * no important difference in this case
2698 */
2699 md_done_sync(mddev, sectors_skipped, 1);
2700
2701 return sectors_skipped + nr_sectors;
2702 giveup:
2703 /* There is nowhere to write, so all non-sync
2704 * drives must be failed or in resync, all drives
2705 * have a bad block, so try the next chunk...
2706 */
2707 if (sector_nr + max_sync < max_sector)
2708 max_sector = sector_nr + max_sync;
2709
2710 sectors_skipped += (max_sector - sector_nr);
2711 chunks_skipped ++;
2712 sector_nr = max_sector;
2713 goto skipped;
2714}
2715
2716static sector_t
2717raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2718{
2719 sector_t size;
2720 conf_t *conf = mddev->private;
2721
2722 if (!raid_disks)
2723 raid_disks = conf->raid_disks;
2724 if (!sectors)
2725 sectors = conf->dev_sectors;
2726
2727 size = sectors >> conf->chunk_shift;
2728 sector_div(size, conf->far_copies);
2729 size = size * raid_disks;
2730 sector_div(size, conf->near_copies);
2731
2732 return size << conf->chunk_shift;
2733}
2734
2735
2736static conf_t *setup_conf(mddev_t *mddev)
2737{
2738 conf_t *conf = NULL;
2739 int nc, fc, fo;
2740 sector_t stride, size;
2741 int err = -EINVAL;
2742
2743 if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
2744 !is_power_of_2(mddev->new_chunk_sectors)) {
2745 printk(KERN_ERR "md/raid10:%s: chunk size must be "
2746 "at least PAGE_SIZE(%ld) and be a power of 2.\n",
2747 mdname(mddev), PAGE_SIZE);
2748 goto out;
2749 }
2750
2751 nc = mddev->new_layout & 255;
2752 fc = (mddev->new_layout >> 8) & 255;
2753 fo = mddev->new_layout & (1<<16);
2754
2755 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
2756 (mddev->new_layout >> 17)) {
2757 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
2758 mdname(mddev), mddev->new_layout);
2759 goto out;
2760 }
2761
2762 err = -ENOMEM;
2763 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
2764 if (!conf)
2765 goto out;
2766
2767 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
2768 GFP_KERNEL);
2769 if (!conf->mirrors)
2770 goto out;
2771
2772 conf->tmppage = alloc_page(GFP_KERNEL);
2773 if (!conf->tmppage)
2774 goto out;
2775
2776
2777 conf->raid_disks = mddev->raid_disks;
2778 conf->near_copies = nc;
2779 conf->far_copies = fc;
2780 conf->copies = nc*fc;
2781 conf->far_offset = fo;
2782 conf->chunk_mask = mddev->new_chunk_sectors - 1;
2783 conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
2784
2785 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
2786 r10bio_pool_free, conf);
2787 if (!conf->r10bio_pool)
2788 goto out;
2789
2790 size = mddev->dev_sectors >> conf->chunk_shift;
2791 sector_div(size, fc);
2792 size = size * conf->raid_disks;
2793 sector_div(size, nc);
2794 /* 'size' is now the number of chunks in the array */
2795 /* calculate "used chunks per device" in 'stride' */
2796 stride = size * conf->copies;
2797
2798 /* We need to round up when dividing by raid_disks to
2799 * get the stride size.
2800 */
2801 stride += conf->raid_disks - 1;
2802 sector_div(stride, conf->raid_disks);
2803
2804 conf->dev_sectors = stride << conf->chunk_shift;
2805
2806 if (fo)
2807 stride = 1;
2808 else
2809 sector_div(stride, fc);
2810 conf->stride = stride << conf->chunk_shift;
2811
2812
2813 spin_lock_init(&conf->device_lock);
2814 INIT_LIST_HEAD(&conf->retry_list);
2815
2816 spin_lock_init(&conf->resync_lock);
2817 init_waitqueue_head(&conf->wait_barrier);
2818
2819 conf->thread = md_register_thread(raid10d, mddev, NULL);
2820 if (!conf->thread)
2821 goto out;
2822
2823 conf->mddev = mddev;
2824 return conf;
2825
2826 out:
2827 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
2828 mdname(mddev));
2829 if (conf) {
2830 if (conf->r10bio_pool)
2831 mempool_destroy(conf->r10bio_pool);
2832 kfree(conf->mirrors);
2833 safe_put_page(conf->tmppage);
2834 kfree(conf);
2835 }
2836 return ERR_PTR(err);
2837}
2838
2839static int run(mddev_t *mddev)
2840{
2841 conf_t *conf;
2842 int i, disk_idx, chunk_size;
2843 mirror_info_t *disk;
2844 mdk_rdev_t *rdev;
2845 sector_t size;
2846
2847 /*
2848 * copy the already verified devices into our private RAID10
2849 * bookkeeping area. [whatever we allocate in run(),
2850 * should be freed in stop()]
2851 */
2852
2853 if (mddev->private == NULL) {
2854 conf = setup_conf(mddev);
2855 if (IS_ERR(conf))
2856 return PTR_ERR(conf);
2857 mddev->private = conf;
2858 }
2859 conf = mddev->private;
2860 if (!conf)
2861 goto out;
2862
2863 mddev->thread = conf->thread;
2864 conf->thread = NULL;
2865
2866 chunk_size = mddev->chunk_sectors << 9;
2867 blk_queue_io_min(mddev->queue, chunk_size);
2868 if (conf->raid_disks % conf->near_copies)
2869 blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2870 else
2871 blk_queue_io_opt(mddev->queue, chunk_size *
2872 (conf->raid_disks / conf->near_copies));
2873
2874 list_for_each_entry(rdev, &mddev->disks, same_set) {
2875
2876 disk_idx = rdev->raid_disk;
2877 if (disk_idx >= conf->raid_disks
2878 || disk_idx < 0)
2879 continue;
2880 disk = conf->mirrors + disk_idx;
2881
2882 disk->rdev = rdev;
2883 disk_stack_limits(mddev->gendisk, rdev->bdev,
2884 rdev->data_offset << 9);
2885 /* as we don't honour merge_bvec_fn, we must never risk
2886 * violating it, so limit max_segments to 1 lying
2887 * within a single page.
2888 */
2889 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2890 blk_queue_max_segments(mddev->queue, 1);
2891 blk_queue_segment_boundary(mddev->queue,
2892 PAGE_CACHE_SIZE - 1);
2893 }
2894
2895 disk->head_position = 0;
2896 }
2897 /* need to check that every block has at least one working mirror */
2898 if (!enough(conf, -1)) {
2899 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
2900 mdname(mddev));
2901 goto out_free_conf;
2902 }
2903
2904 mddev->degraded = 0;
2905 for (i = 0; i < conf->raid_disks; i++) {
2906
2907 disk = conf->mirrors + i;
2908
2909 if (!disk->rdev ||
2910 !test_bit(In_sync, &disk->rdev->flags)) {
2911 disk->head_position = 0;
2912 mddev->degraded++;
2913 if (disk->rdev)
2914 conf->fullsync = 1;
2915 }
2916 }
2917
2918 if (mddev->recovery_cp != MaxSector)
2919 printk(KERN_NOTICE "md/raid10:%s: not clean"
2920 " -- starting background reconstruction\n",
2921 mdname(mddev));
2922 printk(KERN_INFO
2923 "md/raid10:%s: active with %d out of %d devices\n",
2924 mdname(mddev), conf->raid_disks - mddev->degraded,
2925 conf->raid_disks);
2926 /*
2927 * Ok, everything is just fine now
2928 */
2929 mddev->dev_sectors = conf->dev_sectors;
2930 size = raid10_size(mddev, 0, 0);
2931 md_set_array_sectors(mddev, size);
2932 mddev->resync_max_sectors = size;
2933
2934 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2935 mddev->queue->backing_dev_info.congested_data = mddev;
2936
2937 /* Calculate max read-ahead size.
2938 * We need to readahead at least twice a whole stripe....
2939 * maybe...
2940 */
2941 {
2942 int stripe = conf->raid_disks *
2943 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
2944 stripe /= conf->near_copies;
2945 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2946 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2947 }
2948
2949 if (conf->near_copies < conf->raid_disks)
2950 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2951
2952 if (md_integrity_register(mddev))
2953 goto out_free_conf;
2954
2955 return 0;
2956
2957out_free_conf:
2958 md_unregister_thread(&mddev->thread);
2959 if (conf->r10bio_pool)
2960 mempool_destroy(conf->r10bio_pool);
2961 safe_put_page(conf->tmppage);
2962 kfree(conf->mirrors);
2963 kfree(conf);
2964 mddev->private = NULL;
2965out:
2966 return -EIO;
2967}
2968
2969static int stop(mddev_t *mddev)
2970{
2971 conf_t *conf = mddev->private;
2972
2973 raise_barrier(conf, 0);
2974 lower_barrier(conf);
2975
2976 md_unregister_thread(&mddev->thread);
2977 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2978 if (conf->r10bio_pool)
2979 mempool_destroy(conf->r10bio_pool);
2980 kfree(conf->mirrors);
2981 kfree(conf);
2982 mddev->private = NULL;
2983 return 0;
2984}
2985
2986static void raid10_quiesce(mddev_t *mddev, int state)
2987{
2988 conf_t *conf = mddev->private;
2989
2990 switch(state) {
2991 case 1:
2992 raise_barrier(conf, 0);
2993 break;
2994 case 0:
2995 lower_barrier(conf);
2996 break;
2997 }
2998}
2999
3000static void *raid10_takeover_raid0(mddev_t *mddev)
3001{
3002 mdk_rdev_t *rdev;
3003 conf_t *conf;
3004
3005 if (mddev->degraded > 0) {
3006 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3007 mdname(mddev));
3008 return ERR_PTR(-EINVAL);
3009 }
3010
3011 /* Set new parameters */
3012 mddev->new_level = 10;
3013 /* new layout: far_copies = 1, near_copies = 2 */
3014 mddev->new_layout = (1<<8) + 2;
3015 mddev->new_chunk_sectors = mddev->chunk_sectors;
3016 mddev->delta_disks = mddev->raid_disks;
3017 mddev->raid_disks *= 2;
3018 /* make sure it will be not marked as dirty */
3019 mddev->recovery_cp = MaxSector;
3020
3021 conf = setup_conf(mddev);
3022 if (!IS_ERR(conf)) {
3023 list_for_each_entry(rdev, &mddev->disks, same_set)
3024 if (rdev->raid_disk >= 0)
3025 rdev->new_raid_disk = rdev->raid_disk * 2;
3026 conf->barrier = 1;
3027 }
3028
3029 return conf;
3030}
3031
3032static void *raid10_takeover(mddev_t *mddev)
3033{
3034 struct raid0_private_data *raid0_priv;
3035
3036 /* raid10 can take over:
3037 * raid0 - providing it has only two drives
3038 */
3039 if (mddev->level == 0) {
3040 /* for raid0 takeover only one zone is supported */
3041 raid0_priv = mddev->private;
3042 if (raid0_priv->nr_strip_zones > 1) {
3043 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3044 " with more than one zone.\n",
3045 mdname(mddev));
3046 return ERR_PTR(-EINVAL);
3047 }
3048 return raid10_takeover_raid0(mddev);
3049 }
3050 return ERR_PTR(-EINVAL);
3051}
3052
3053static struct mdk_personality raid10_personality =
3054{
3055 .name = "raid10",
3056 .level = 10,
3057 .owner = THIS_MODULE,
3058 .make_request = make_request,
3059 .run = run,
3060 .stop = stop,
3061 .status = status,
3062 .error_handler = error,
3063 .hot_add_disk = raid10_add_disk,
3064 .hot_remove_disk= raid10_remove_disk,
3065 .spare_active = raid10_spare_active,
3066 .sync_request = sync_request,
3067 .quiesce = raid10_quiesce,
3068 .size = raid10_size,
3069 .takeover = raid10_takeover,
3070};
3071
3072static int __init raid_init(void)
3073{
3074 return register_md_personality(&raid10_personality);
3075}
3076
3077static void raid_exit(void)
3078{
3079 unregister_md_personality(&raid10_personality);
3080}
3081
3082module_init(raid_init);
3083module_exit(raid_exit);
3084MODULE_LICENSE("GPL");
3085MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
3086MODULE_ALIAS("md-personality-9"); /* RAID10 */
3087MODULE_ALIAS("md-raid10");
3088MODULE_ALIAS("md-level-10");
1/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
8 * Base on code in raid1.c. See raid1.c for further copyright information.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/blkdev.h>
24#include <linux/module.h>
25#include <linux/seq_file.h>
26#include <linux/ratelimit.h>
27#include <linux/kthread.h>
28#include "md.h"
29#include "raid10.h"
30#include "raid0.h"
31#include "bitmap.h"
32
33/*
34 * RAID10 provides a combination of RAID0 and RAID1 functionality.
35 * The layout of data is defined by
36 * chunk_size
37 * raid_disks
38 * near_copies (stored in low byte of layout)
39 * far_copies (stored in second byte of layout)
40 * far_offset (stored in bit 16 of layout )
41 *
42 * The data to be stored is divided into chunks using chunksize.
43 * Each device is divided into far_copies sections.
44 * In each section, chunks are laid out in a style similar to raid0, but
45 * near_copies copies of each chunk is stored (each on a different drive).
46 * The starting device for each section is offset near_copies from the starting
47 * device of the previous section.
48 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
49 * drive.
50 * near_copies and far_copies must be at least one, and their product is at most
51 * raid_disks.
52 *
53 * If far_offset is true, then the far_copies are handled a bit differently.
54 * The copies are still in different stripes, but instead of be very far apart
55 * on disk, there are adjacent stripes.
56 */
57
58/*
59 * Number of guaranteed r10bios in case of extreme VM load:
60 */
61#define NR_RAID10_BIOS 256
62
63/* When there are this many requests queue to be written by
64 * the raid10 thread, we become 'congested' to provide back-pressure
65 * for writeback.
66 */
67static int max_queued_requests = 1024;
68
69static void allow_barrier(struct r10conf *conf);
70static void lower_barrier(struct r10conf *conf);
71static int enough(struct r10conf *conf, int ignore);
72static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
73 int *skipped);
74static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
75static void end_reshape_write(struct bio *bio, int error);
76static void end_reshape(struct r10conf *conf);
77
78static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
79{
80 struct r10conf *conf = data;
81 int size = offsetof(struct r10bio, devs[conf->copies]);
82
83 /* allocate a r10bio with room for raid_disks entries in the
84 * bios array */
85 return kzalloc(size, gfp_flags);
86}
87
88static void r10bio_pool_free(void *r10_bio, void *data)
89{
90 kfree(r10_bio);
91}
92
93/* Maximum size of each resync request */
94#define RESYNC_BLOCK_SIZE (64*1024)
95#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
96/* amount of memory to reserve for resync requests */
97#define RESYNC_WINDOW (1024*1024)
98/* maximum number of concurrent requests, memory permitting */
99#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
100
101/*
102 * When performing a resync, we need to read and compare, so
103 * we need as many pages are there are copies.
104 * When performing a recovery, we need 2 bios, one for read,
105 * one for write (we recover only one drive per r10buf)
106 *
107 */
108static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
109{
110 struct r10conf *conf = data;
111 struct page *page;
112 struct r10bio *r10_bio;
113 struct bio *bio;
114 int i, j;
115 int nalloc;
116
117 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
118 if (!r10_bio)
119 return NULL;
120
121 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
122 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
123 nalloc = conf->copies; /* resync */
124 else
125 nalloc = 2; /* recovery */
126
127 /*
128 * Allocate bios.
129 */
130 for (j = nalloc ; j-- ; ) {
131 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
132 if (!bio)
133 goto out_free_bio;
134 r10_bio->devs[j].bio = bio;
135 if (!conf->have_replacement)
136 continue;
137 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
138 if (!bio)
139 goto out_free_bio;
140 r10_bio->devs[j].repl_bio = bio;
141 }
142 /*
143 * Allocate RESYNC_PAGES data pages and attach them
144 * where needed.
145 */
146 for (j = 0 ; j < nalloc; j++) {
147 struct bio *rbio = r10_bio->devs[j].repl_bio;
148 bio = r10_bio->devs[j].bio;
149 for (i = 0; i < RESYNC_PAGES; i++) {
150 if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
151 &conf->mddev->recovery)) {
152 /* we can share bv_page's during recovery
153 * and reshape */
154 struct bio *rbio = r10_bio->devs[0].bio;
155 page = rbio->bi_io_vec[i].bv_page;
156 get_page(page);
157 } else
158 page = alloc_page(gfp_flags);
159 if (unlikely(!page))
160 goto out_free_pages;
161
162 bio->bi_io_vec[i].bv_page = page;
163 if (rbio)
164 rbio->bi_io_vec[i].bv_page = page;
165 }
166 }
167
168 return r10_bio;
169
170out_free_pages:
171 for ( ; i > 0 ; i--)
172 safe_put_page(bio->bi_io_vec[i-1].bv_page);
173 while (j--)
174 for (i = 0; i < RESYNC_PAGES ; i++)
175 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
176 j = 0;
177out_free_bio:
178 for ( ; j < nalloc; j++) {
179 if (r10_bio->devs[j].bio)
180 bio_put(r10_bio->devs[j].bio);
181 if (r10_bio->devs[j].repl_bio)
182 bio_put(r10_bio->devs[j].repl_bio);
183 }
184 r10bio_pool_free(r10_bio, conf);
185 return NULL;
186}
187
188static void r10buf_pool_free(void *__r10_bio, void *data)
189{
190 int i;
191 struct r10conf *conf = data;
192 struct r10bio *r10bio = __r10_bio;
193 int j;
194
195 for (j=0; j < conf->copies; j++) {
196 struct bio *bio = r10bio->devs[j].bio;
197 if (bio) {
198 for (i = 0; i < RESYNC_PAGES; i++) {
199 safe_put_page(bio->bi_io_vec[i].bv_page);
200 bio->bi_io_vec[i].bv_page = NULL;
201 }
202 bio_put(bio);
203 }
204 bio = r10bio->devs[j].repl_bio;
205 if (bio)
206 bio_put(bio);
207 }
208 r10bio_pool_free(r10bio, conf);
209}
210
211static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
212{
213 int i;
214
215 for (i = 0; i < conf->copies; i++) {
216 struct bio **bio = & r10_bio->devs[i].bio;
217 if (!BIO_SPECIAL(*bio))
218 bio_put(*bio);
219 *bio = NULL;
220 bio = &r10_bio->devs[i].repl_bio;
221 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
222 bio_put(*bio);
223 *bio = NULL;
224 }
225}
226
227static void free_r10bio(struct r10bio *r10_bio)
228{
229 struct r10conf *conf = r10_bio->mddev->private;
230
231 put_all_bios(conf, r10_bio);
232 mempool_free(r10_bio, conf->r10bio_pool);
233}
234
235static void put_buf(struct r10bio *r10_bio)
236{
237 struct r10conf *conf = r10_bio->mddev->private;
238
239 mempool_free(r10_bio, conf->r10buf_pool);
240
241 lower_barrier(conf);
242}
243
244static void reschedule_retry(struct r10bio *r10_bio)
245{
246 unsigned long flags;
247 struct mddev *mddev = r10_bio->mddev;
248 struct r10conf *conf = mddev->private;
249
250 spin_lock_irqsave(&conf->device_lock, flags);
251 list_add(&r10_bio->retry_list, &conf->retry_list);
252 conf->nr_queued ++;
253 spin_unlock_irqrestore(&conf->device_lock, flags);
254
255 /* wake up frozen array... */
256 wake_up(&conf->wait_barrier);
257
258 md_wakeup_thread(mddev->thread);
259}
260
261/*
262 * raid_end_bio_io() is called when we have finished servicing a mirrored
263 * operation and are ready to return a success/failure code to the buffer
264 * cache layer.
265 */
266static void raid_end_bio_io(struct r10bio *r10_bio)
267{
268 struct bio *bio = r10_bio->master_bio;
269 int done;
270 struct r10conf *conf = r10_bio->mddev->private;
271
272 if (bio->bi_phys_segments) {
273 unsigned long flags;
274 spin_lock_irqsave(&conf->device_lock, flags);
275 bio->bi_phys_segments--;
276 done = (bio->bi_phys_segments == 0);
277 spin_unlock_irqrestore(&conf->device_lock, flags);
278 } else
279 done = 1;
280 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
281 clear_bit(BIO_UPTODATE, &bio->bi_flags);
282 if (done) {
283 bio_endio(bio, 0);
284 /*
285 * Wake up any possible resync thread that waits for the device
286 * to go idle.
287 */
288 allow_barrier(conf);
289 }
290 free_r10bio(r10_bio);
291}
292
293/*
294 * Update disk head position estimator based on IRQ completion info.
295 */
296static inline void update_head_pos(int slot, struct r10bio *r10_bio)
297{
298 struct r10conf *conf = r10_bio->mddev->private;
299
300 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
301 r10_bio->devs[slot].addr + (r10_bio->sectors);
302}
303
304/*
305 * Find the disk number which triggered given bio
306 */
307static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
308 struct bio *bio, int *slotp, int *replp)
309{
310 int slot;
311 int repl = 0;
312
313 for (slot = 0; slot < conf->copies; slot++) {
314 if (r10_bio->devs[slot].bio == bio)
315 break;
316 if (r10_bio->devs[slot].repl_bio == bio) {
317 repl = 1;
318 break;
319 }
320 }
321
322 BUG_ON(slot == conf->copies);
323 update_head_pos(slot, r10_bio);
324
325 if (slotp)
326 *slotp = slot;
327 if (replp)
328 *replp = repl;
329 return r10_bio->devs[slot].devnum;
330}
331
332static void raid10_end_read_request(struct bio *bio, int error)
333{
334 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
335 struct r10bio *r10_bio = bio->bi_private;
336 int slot, dev;
337 struct md_rdev *rdev;
338 struct r10conf *conf = r10_bio->mddev->private;
339
340
341 slot = r10_bio->read_slot;
342 dev = r10_bio->devs[slot].devnum;
343 rdev = r10_bio->devs[slot].rdev;
344 /*
345 * this branch is our 'one mirror IO has finished' event handler:
346 */
347 update_head_pos(slot, r10_bio);
348
349 if (uptodate) {
350 /*
351 * Set R10BIO_Uptodate in our master bio, so that
352 * we will return a good error code to the higher
353 * levels even if IO on some other mirrored buffer fails.
354 *
355 * The 'master' represents the composite IO operation to
356 * user-side. So if something waits for IO, then it will
357 * wait for the 'master' bio.
358 */
359 set_bit(R10BIO_Uptodate, &r10_bio->state);
360 } else {
361 /* If all other devices that store this block have
362 * failed, we want to return the error upwards rather
363 * than fail the last device. Here we redefine
364 * "uptodate" to mean "Don't want to retry"
365 */
366 unsigned long flags;
367 spin_lock_irqsave(&conf->device_lock, flags);
368 if (!enough(conf, rdev->raid_disk))
369 uptodate = 1;
370 spin_unlock_irqrestore(&conf->device_lock, flags);
371 }
372 if (uptodate) {
373 raid_end_bio_io(r10_bio);
374 rdev_dec_pending(rdev, conf->mddev);
375 } else {
376 /*
377 * oops, read error - keep the refcount on the rdev
378 */
379 char b[BDEVNAME_SIZE];
380 printk_ratelimited(KERN_ERR
381 "md/raid10:%s: %s: rescheduling sector %llu\n",
382 mdname(conf->mddev),
383 bdevname(rdev->bdev, b),
384 (unsigned long long)r10_bio->sector);
385 set_bit(R10BIO_ReadError, &r10_bio->state);
386 reschedule_retry(r10_bio);
387 }
388}
389
390static void close_write(struct r10bio *r10_bio)
391{
392 /* clear the bitmap if all writes complete successfully */
393 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
394 r10_bio->sectors,
395 !test_bit(R10BIO_Degraded, &r10_bio->state),
396 0);
397 md_write_end(r10_bio->mddev);
398}
399
400static void one_write_done(struct r10bio *r10_bio)
401{
402 if (atomic_dec_and_test(&r10_bio->remaining)) {
403 if (test_bit(R10BIO_WriteError, &r10_bio->state))
404 reschedule_retry(r10_bio);
405 else {
406 close_write(r10_bio);
407 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
408 reschedule_retry(r10_bio);
409 else
410 raid_end_bio_io(r10_bio);
411 }
412 }
413}
414
415static void raid10_end_write_request(struct bio *bio, int error)
416{
417 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
418 struct r10bio *r10_bio = bio->bi_private;
419 int dev;
420 int dec_rdev = 1;
421 struct r10conf *conf = r10_bio->mddev->private;
422 int slot, repl;
423 struct md_rdev *rdev = NULL;
424
425 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
426
427 if (repl)
428 rdev = conf->mirrors[dev].replacement;
429 if (!rdev) {
430 smp_rmb();
431 repl = 0;
432 rdev = conf->mirrors[dev].rdev;
433 }
434 /*
435 * this branch is our 'one mirror IO has finished' event handler:
436 */
437 if (!uptodate) {
438 if (repl)
439 /* Never record new bad blocks to replacement,
440 * just fail it.
441 */
442 md_error(rdev->mddev, rdev);
443 else {
444 set_bit(WriteErrorSeen, &rdev->flags);
445 if (!test_and_set_bit(WantReplacement, &rdev->flags))
446 set_bit(MD_RECOVERY_NEEDED,
447 &rdev->mddev->recovery);
448 set_bit(R10BIO_WriteError, &r10_bio->state);
449 dec_rdev = 0;
450 }
451 } else {
452 /*
453 * Set R10BIO_Uptodate in our master bio, so that
454 * we will return a good error code for to the higher
455 * levels even if IO on some other mirrored buffer fails.
456 *
457 * The 'master' represents the composite IO operation to
458 * user-side. So if something waits for IO, then it will
459 * wait for the 'master' bio.
460 */
461 sector_t first_bad;
462 int bad_sectors;
463
464 set_bit(R10BIO_Uptodate, &r10_bio->state);
465
466 /* Maybe we can clear some bad blocks. */
467 if (is_badblock(rdev,
468 r10_bio->devs[slot].addr,
469 r10_bio->sectors,
470 &first_bad, &bad_sectors)) {
471 bio_put(bio);
472 if (repl)
473 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
474 else
475 r10_bio->devs[slot].bio = IO_MADE_GOOD;
476 dec_rdev = 0;
477 set_bit(R10BIO_MadeGood, &r10_bio->state);
478 }
479 }
480
481 /*
482 *
483 * Let's see if all mirrored write operations have finished
484 * already.
485 */
486 one_write_done(r10_bio);
487 if (dec_rdev)
488 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
489}
490
491/*
492 * RAID10 layout manager
493 * As well as the chunksize and raid_disks count, there are two
494 * parameters: near_copies and far_copies.
495 * near_copies * far_copies must be <= raid_disks.
496 * Normally one of these will be 1.
497 * If both are 1, we get raid0.
498 * If near_copies == raid_disks, we get raid1.
499 *
500 * Chunks are laid out in raid0 style with near_copies copies of the
501 * first chunk, followed by near_copies copies of the next chunk and
502 * so on.
503 * If far_copies > 1, then after 1/far_copies of the array has been assigned
504 * as described above, we start again with a device offset of near_copies.
505 * So we effectively have another copy of the whole array further down all
506 * the drives, but with blocks on different drives.
507 * With this layout, and block is never stored twice on the one device.
508 *
509 * raid10_find_phys finds the sector offset of a given virtual sector
510 * on each device that it is on.
511 *
512 * raid10_find_virt does the reverse mapping, from a device and a
513 * sector offset to a virtual address
514 */
515
516static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
517{
518 int n,f;
519 sector_t sector;
520 sector_t chunk;
521 sector_t stripe;
522 int dev;
523 int slot = 0;
524
525 /* now calculate first sector/dev */
526 chunk = r10bio->sector >> geo->chunk_shift;
527 sector = r10bio->sector & geo->chunk_mask;
528
529 chunk *= geo->near_copies;
530 stripe = chunk;
531 dev = sector_div(stripe, geo->raid_disks);
532 if (geo->far_offset)
533 stripe *= geo->far_copies;
534
535 sector += stripe << geo->chunk_shift;
536
537 /* and calculate all the others */
538 for (n = 0; n < geo->near_copies; n++) {
539 int d = dev;
540 sector_t s = sector;
541 r10bio->devs[slot].addr = sector;
542 r10bio->devs[slot].devnum = d;
543 slot++;
544
545 for (f = 1; f < geo->far_copies; f++) {
546 d += geo->near_copies;
547 if (d >= geo->raid_disks)
548 d -= geo->raid_disks;
549 s += geo->stride;
550 r10bio->devs[slot].devnum = d;
551 r10bio->devs[slot].addr = s;
552 slot++;
553 }
554 dev++;
555 if (dev >= geo->raid_disks) {
556 dev = 0;
557 sector += (geo->chunk_mask + 1);
558 }
559 }
560}
561
562static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
563{
564 struct geom *geo = &conf->geo;
565
566 if (conf->reshape_progress != MaxSector &&
567 ((r10bio->sector >= conf->reshape_progress) !=
568 conf->mddev->reshape_backwards)) {
569 set_bit(R10BIO_Previous, &r10bio->state);
570 geo = &conf->prev;
571 } else
572 clear_bit(R10BIO_Previous, &r10bio->state);
573
574 __raid10_find_phys(geo, r10bio);
575}
576
577static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
578{
579 sector_t offset, chunk, vchunk;
580 /* Never use conf->prev as this is only called during resync
581 * or recovery, so reshape isn't happening
582 */
583 struct geom *geo = &conf->geo;
584
585 offset = sector & geo->chunk_mask;
586 if (geo->far_offset) {
587 int fc;
588 chunk = sector >> geo->chunk_shift;
589 fc = sector_div(chunk, geo->far_copies);
590 dev -= fc * geo->near_copies;
591 if (dev < 0)
592 dev += geo->raid_disks;
593 } else {
594 while (sector >= geo->stride) {
595 sector -= geo->stride;
596 if (dev < geo->near_copies)
597 dev += geo->raid_disks - geo->near_copies;
598 else
599 dev -= geo->near_copies;
600 }
601 chunk = sector >> geo->chunk_shift;
602 }
603 vchunk = chunk * geo->raid_disks + dev;
604 sector_div(vchunk, geo->near_copies);
605 return (vchunk << geo->chunk_shift) + offset;
606}
607
608/**
609 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
610 * @q: request queue
611 * @bvm: properties of new bio
612 * @biovec: the request that could be merged to it.
613 *
614 * Return amount of bytes we can accept at this offset
615 * This requires checking for end-of-chunk if near_copies != raid_disks,
616 * and for subordinate merge_bvec_fns if merge_check_needed.
617 */
618static int raid10_mergeable_bvec(struct request_queue *q,
619 struct bvec_merge_data *bvm,
620 struct bio_vec *biovec)
621{
622 struct mddev *mddev = q->queuedata;
623 struct r10conf *conf = mddev->private;
624 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
625 int max;
626 unsigned int chunk_sectors;
627 unsigned int bio_sectors = bvm->bi_size >> 9;
628 struct geom *geo = &conf->geo;
629
630 chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
631 if (conf->reshape_progress != MaxSector &&
632 ((sector >= conf->reshape_progress) !=
633 conf->mddev->reshape_backwards))
634 geo = &conf->prev;
635
636 if (geo->near_copies < geo->raid_disks) {
637 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
638 + bio_sectors)) << 9;
639 if (max < 0)
640 /* bio_add cannot handle a negative return */
641 max = 0;
642 if (max <= biovec->bv_len && bio_sectors == 0)
643 return biovec->bv_len;
644 } else
645 max = biovec->bv_len;
646
647 if (mddev->merge_check_needed) {
648 struct {
649 struct r10bio r10_bio;
650 struct r10dev devs[conf->copies];
651 } on_stack;
652 struct r10bio *r10_bio = &on_stack.r10_bio;
653 int s;
654 if (conf->reshape_progress != MaxSector) {
655 /* Cannot give any guidance during reshape */
656 if (max <= biovec->bv_len && bio_sectors == 0)
657 return biovec->bv_len;
658 return 0;
659 }
660 r10_bio->sector = sector;
661 raid10_find_phys(conf, r10_bio);
662 rcu_read_lock();
663 for (s = 0; s < conf->copies; s++) {
664 int disk = r10_bio->devs[s].devnum;
665 struct md_rdev *rdev = rcu_dereference(
666 conf->mirrors[disk].rdev);
667 if (rdev && !test_bit(Faulty, &rdev->flags)) {
668 struct request_queue *q =
669 bdev_get_queue(rdev->bdev);
670 if (q->merge_bvec_fn) {
671 bvm->bi_sector = r10_bio->devs[s].addr
672 + rdev->data_offset;
673 bvm->bi_bdev = rdev->bdev;
674 max = min(max, q->merge_bvec_fn(
675 q, bvm, biovec));
676 }
677 }
678 rdev = rcu_dereference(conf->mirrors[disk].replacement);
679 if (rdev && !test_bit(Faulty, &rdev->flags)) {
680 struct request_queue *q =
681 bdev_get_queue(rdev->bdev);
682 if (q->merge_bvec_fn) {
683 bvm->bi_sector = r10_bio->devs[s].addr
684 + rdev->data_offset;
685 bvm->bi_bdev = rdev->bdev;
686 max = min(max, q->merge_bvec_fn(
687 q, bvm, biovec));
688 }
689 }
690 }
691 rcu_read_unlock();
692 }
693 return max;
694}
695
696/*
697 * This routine returns the disk from which the requested read should
698 * be done. There is a per-array 'next expected sequential IO' sector
699 * number - if this matches on the next IO then we use the last disk.
700 * There is also a per-disk 'last know head position' sector that is
701 * maintained from IRQ contexts, both the normal and the resync IO
702 * completion handlers update this position correctly. If there is no
703 * perfect sequential match then we pick the disk whose head is closest.
704 *
705 * If there are 2 mirrors in the same 2 devices, performance degrades
706 * because position is mirror, not device based.
707 *
708 * The rdev for the device selected will have nr_pending incremented.
709 */
710
711/*
712 * FIXME: possibly should rethink readbalancing and do it differently
713 * depending on near_copies / far_copies geometry.
714 */
715static struct md_rdev *read_balance(struct r10conf *conf,
716 struct r10bio *r10_bio,
717 int *max_sectors)
718{
719 const sector_t this_sector = r10_bio->sector;
720 int disk, slot;
721 int sectors = r10_bio->sectors;
722 int best_good_sectors;
723 sector_t new_distance, best_dist;
724 struct md_rdev *rdev, *best_rdev;
725 int do_balance;
726 int best_slot;
727 struct geom *geo = &conf->geo;
728
729 raid10_find_phys(conf, r10_bio);
730 rcu_read_lock();
731retry:
732 sectors = r10_bio->sectors;
733 best_slot = -1;
734 best_rdev = NULL;
735 best_dist = MaxSector;
736 best_good_sectors = 0;
737 do_balance = 1;
738 /*
739 * Check if we can balance. We can balance on the whole
740 * device if no resync is going on (recovery is ok), or below
741 * the resync window. We take the first readable disk when
742 * above the resync window.
743 */
744 if (conf->mddev->recovery_cp < MaxSector
745 && (this_sector + sectors >= conf->next_resync))
746 do_balance = 0;
747
748 for (slot = 0; slot < conf->copies ; slot++) {
749 sector_t first_bad;
750 int bad_sectors;
751 sector_t dev_sector;
752
753 if (r10_bio->devs[slot].bio == IO_BLOCKED)
754 continue;
755 disk = r10_bio->devs[slot].devnum;
756 rdev = rcu_dereference(conf->mirrors[disk].replacement);
757 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
758 test_bit(Unmerged, &rdev->flags) ||
759 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
760 rdev = rcu_dereference(conf->mirrors[disk].rdev);
761 if (rdev == NULL ||
762 test_bit(Faulty, &rdev->flags) ||
763 test_bit(Unmerged, &rdev->flags))
764 continue;
765 if (!test_bit(In_sync, &rdev->flags) &&
766 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
767 continue;
768
769 dev_sector = r10_bio->devs[slot].addr;
770 if (is_badblock(rdev, dev_sector, sectors,
771 &first_bad, &bad_sectors)) {
772 if (best_dist < MaxSector)
773 /* Already have a better slot */
774 continue;
775 if (first_bad <= dev_sector) {
776 /* Cannot read here. If this is the
777 * 'primary' device, then we must not read
778 * beyond 'bad_sectors' from another device.
779 */
780 bad_sectors -= (dev_sector - first_bad);
781 if (!do_balance && sectors > bad_sectors)
782 sectors = bad_sectors;
783 if (best_good_sectors > sectors)
784 best_good_sectors = sectors;
785 } else {
786 sector_t good_sectors =
787 first_bad - dev_sector;
788 if (good_sectors > best_good_sectors) {
789 best_good_sectors = good_sectors;
790 best_slot = slot;
791 best_rdev = rdev;
792 }
793 if (!do_balance)
794 /* Must read from here */
795 break;
796 }
797 continue;
798 } else
799 best_good_sectors = sectors;
800
801 if (!do_balance)
802 break;
803
804 /* This optimisation is debatable, and completely destroys
805 * sequential read speed for 'far copies' arrays. So only
806 * keep it for 'near' arrays, and review those later.
807 */
808 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
809 break;
810
811 /* for far > 1 always use the lowest address */
812 if (geo->far_copies > 1)
813 new_distance = r10_bio->devs[slot].addr;
814 else
815 new_distance = abs(r10_bio->devs[slot].addr -
816 conf->mirrors[disk].head_position);
817 if (new_distance < best_dist) {
818 best_dist = new_distance;
819 best_slot = slot;
820 best_rdev = rdev;
821 }
822 }
823 if (slot >= conf->copies) {
824 slot = best_slot;
825 rdev = best_rdev;
826 }
827
828 if (slot >= 0) {
829 atomic_inc(&rdev->nr_pending);
830 if (test_bit(Faulty, &rdev->flags)) {
831 /* Cannot risk returning a device that failed
832 * before we inc'ed nr_pending
833 */
834 rdev_dec_pending(rdev, conf->mddev);
835 goto retry;
836 }
837 r10_bio->read_slot = slot;
838 } else
839 rdev = NULL;
840 rcu_read_unlock();
841 *max_sectors = best_good_sectors;
842
843 return rdev;
844}
845
846static int raid10_congested(void *data, int bits)
847{
848 struct mddev *mddev = data;
849 struct r10conf *conf = mddev->private;
850 int i, ret = 0;
851
852 if ((bits & (1 << BDI_async_congested)) &&
853 conf->pending_count >= max_queued_requests)
854 return 1;
855
856 if (mddev_congested(mddev, bits))
857 return 1;
858 rcu_read_lock();
859 for (i = 0;
860 (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
861 && ret == 0;
862 i++) {
863 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
864 if (rdev && !test_bit(Faulty, &rdev->flags)) {
865 struct request_queue *q = bdev_get_queue(rdev->bdev);
866
867 ret |= bdi_congested(&q->backing_dev_info, bits);
868 }
869 }
870 rcu_read_unlock();
871 return ret;
872}
873
874static void flush_pending_writes(struct r10conf *conf)
875{
876 /* Any writes that have been queued but are awaiting
877 * bitmap updates get flushed here.
878 */
879 spin_lock_irq(&conf->device_lock);
880
881 if (conf->pending_bio_list.head) {
882 struct bio *bio;
883 bio = bio_list_get(&conf->pending_bio_list);
884 conf->pending_count = 0;
885 spin_unlock_irq(&conf->device_lock);
886 /* flush any pending bitmap writes to disk
887 * before proceeding w/ I/O */
888 bitmap_unplug(conf->mddev->bitmap);
889 wake_up(&conf->wait_barrier);
890
891 while (bio) { /* submit pending writes */
892 struct bio *next = bio->bi_next;
893 bio->bi_next = NULL;
894 generic_make_request(bio);
895 bio = next;
896 }
897 } else
898 spin_unlock_irq(&conf->device_lock);
899}
900
901/* Barriers....
902 * Sometimes we need to suspend IO while we do something else,
903 * either some resync/recovery, or reconfigure the array.
904 * To do this we raise a 'barrier'.
905 * The 'barrier' is a counter that can be raised multiple times
906 * to count how many activities are happening which preclude
907 * normal IO.
908 * We can only raise the barrier if there is no pending IO.
909 * i.e. if nr_pending == 0.
910 * We choose only to raise the barrier if no-one is waiting for the
911 * barrier to go down. This means that as soon as an IO request
912 * is ready, no other operations which require a barrier will start
913 * until the IO request has had a chance.
914 *
915 * So: regular IO calls 'wait_barrier'. When that returns there
916 * is no backgroup IO happening, It must arrange to call
917 * allow_barrier when it has finished its IO.
918 * backgroup IO calls must call raise_barrier. Once that returns
919 * there is no normal IO happeing. It must arrange to call
920 * lower_barrier when the particular background IO completes.
921 */
922
923static void raise_barrier(struct r10conf *conf, int force)
924{
925 BUG_ON(force && !conf->barrier);
926 spin_lock_irq(&conf->resync_lock);
927
928 /* Wait until no block IO is waiting (unless 'force') */
929 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
930 conf->resync_lock, );
931
932 /* block any new IO from starting */
933 conf->barrier++;
934
935 /* Now wait for all pending IO to complete */
936 wait_event_lock_irq(conf->wait_barrier,
937 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
938 conf->resync_lock, );
939
940 spin_unlock_irq(&conf->resync_lock);
941}
942
943static void lower_barrier(struct r10conf *conf)
944{
945 unsigned long flags;
946 spin_lock_irqsave(&conf->resync_lock, flags);
947 conf->barrier--;
948 spin_unlock_irqrestore(&conf->resync_lock, flags);
949 wake_up(&conf->wait_barrier);
950}
951
952static void wait_barrier(struct r10conf *conf)
953{
954 spin_lock_irq(&conf->resync_lock);
955 if (conf->barrier) {
956 conf->nr_waiting++;
957 /* Wait for the barrier to drop.
958 * However if there are already pending
959 * requests (preventing the barrier from
960 * rising completely), and the
961 * pre-process bio queue isn't empty,
962 * then don't wait, as we need to empty
963 * that queue to get the nr_pending
964 * count down.
965 */
966 wait_event_lock_irq(conf->wait_barrier,
967 !conf->barrier ||
968 (conf->nr_pending &&
969 current->bio_list &&
970 !bio_list_empty(current->bio_list)),
971 conf->resync_lock,
972 );
973 conf->nr_waiting--;
974 }
975 conf->nr_pending++;
976 spin_unlock_irq(&conf->resync_lock);
977}
978
979static void allow_barrier(struct r10conf *conf)
980{
981 unsigned long flags;
982 spin_lock_irqsave(&conf->resync_lock, flags);
983 conf->nr_pending--;
984 spin_unlock_irqrestore(&conf->resync_lock, flags);
985 wake_up(&conf->wait_barrier);
986}
987
988static void freeze_array(struct r10conf *conf)
989{
990 /* stop syncio and normal IO and wait for everything to
991 * go quiet.
992 * We increment barrier and nr_waiting, and then
993 * wait until nr_pending match nr_queued+1
994 * This is called in the context of one normal IO request
995 * that has failed. Thus any sync request that might be pending
996 * will be blocked by nr_pending, and we need to wait for
997 * pending IO requests to complete or be queued for re-try.
998 * Thus the number queued (nr_queued) plus this request (1)
999 * must match the number of pending IOs (nr_pending) before
1000 * we continue.
1001 */
1002 spin_lock_irq(&conf->resync_lock);
1003 conf->barrier++;
1004 conf->nr_waiting++;
1005 wait_event_lock_irq(conf->wait_barrier,
1006 conf->nr_pending == conf->nr_queued+1,
1007 conf->resync_lock,
1008 flush_pending_writes(conf));
1009
1010 spin_unlock_irq(&conf->resync_lock);
1011}
1012
1013static void unfreeze_array(struct r10conf *conf)
1014{
1015 /* reverse the effect of the freeze */
1016 spin_lock_irq(&conf->resync_lock);
1017 conf->barrier--;
1018 conf->nr_waiting--;
1019 wake_up(&conf->wait_barrier);
1020 spin_unlock_irq(&conf->resync_lock);
1021}
1022
1023static sector_t choose_data_offset(struct r10bio *r10_bio,
1024 struct md_rdev *rdev)
1025{
1026 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1027 test_bit(R10BIO_Previous, &r10_bio->state))
1028 return rdev->data_offset;
1029 else
1030 return rdev->new_data_offset;
1031}
1032
1033static void make_request(struct mddev *mddev, struct bio * bio)
1034{
1035 struct r10conf *conf = mddev->private;
1036 struct r10bio *r10_bio;
1037 struct bio *read_bio;
1038 int i;
1039 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1040 int chunk_sects = chunk_mask + 1;
1041 const int rw = bio_data_dir(bio);
1042 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1043 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1044 unsigned long flags;
1045 struct md_rdev *blocked_rdev;
1046 int sectors_handled;
1047 int max_sectors;
1048 int sectors;
1049
1050 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1051 md_flush_request(mddev, bio);
1052 return;
1053 }
1054
1055 /* If this request crosses a chunk boundary, we need to
1056 * split it. This will only happen for 1 PAGE (or less) requests.
1057 */
1058 if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
1059 > chunk_sects
1060 && (conf->geo.near_copies < conf->geo.raid_disks
1061 || conf->prev.near_copies < conf->prev.raid_disks))) {
1062 struct bio_pair *bp;
1063 /* Sanity check -- queue functions should prevent this happening */
1064 if (bio->bi_vcnt != 1 ||
1065 bio->bi_idx != 0)
1066 goto bad_map;
1067 /* This is a one page bio that upper layers
1068 * refuse to split for us, so we need to split it.
1069 */
1070 bp = bio_split(bio,
1071 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
1072
1073 /* Each of these 'make_request' calls will call 'wait_barrier'.
1074 * If the first succeeds but the second blocks due to the resync
1075 * thread raising the barrier, we will deadlock because the
1076 * IO to the underlying device will be queued in generic_make_request
1077 * and will never complete, so will never reduce nr_pending.
1078 * So increment nr_waiting here so no new raise_barriers will
1079 * succeed, and so the second wait_barrier cannot block.
1080 */
1081 spin_lock_irq(&conf->resync_lock);
1082 conf->nr_waiting++;
1083 spin_unlock_irq(&conf->resync_lock);
1084
1085 make_request(mddev, &bp->bio1);
1086 make_request(mddev, &bp->bio2);
1087
1088 spin_lock_irq(&conf->resync_lock);
1089 conf->nr_waiting--;
1090 wake_up(&conf->wait_barrier);
1091 spin_unlock_irq(&conf->resync_lock);
1092
1093 bio_pair_release(bp);
1094 return;
1095 bad_map:
1096 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1097 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1098 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
1099
1100 bio_io_error(bio);
1101 return;
1102 }
1103
1104 md_write_start(mddev, bio);
1105
1106 /*
1107 * Register the new request and wait if the reconstruction
1108 * thread has put up a bar for new requests.
1109 * Continue immediately if no resync is active currently.
1110 */
1111 wait_barrier(conf);
1112
1113 sectors = bio->bi_size >> 9;
1114 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1115 bio->bi_sector < conf->reshape_progress &&
1116 bio->bi_sector + sectors > conf->reshape_progress) {
1117 /* IO spans the reshape position. Need to wait for
1118 * reshape to pass
1119 */
1120 allow_barrier(conf);
1121 wait_event(conf->wait_barrier,
1122 conf->reshape_progress <= bio->bi_sector ||
1123 conf->reshape_progress >= bio->bi_sector + sectors);
1124 wait_barrier(conf);
1125 }
1126 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1127 bio_data_dir(bio) == WRITE &&
1128 (mddev->reshape_backwards
1129 ? (bio->bi_sector < conf->reshape_safe &&
1130 bio->bi_sector + sectors > conf->reshape_progress)
1131 : (bio->bi_sector + sectors > conf->reshape_safe &&
1132 bio->bi_sector < conf->reshape_progress))) {
1133 /* Need to update reshape_position in metadata */
1134 mddev->reshape_position = conf->reshape_progress;
1135 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1136 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1137 md_wakeup_thread(mddev->thread);
1138 wait_event(mddev->sb_wait,
1139 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
1140
1141 conf->reshape_safe = mddev->reshape_position;
1142 }
1143
1144 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1145
1146 r10_bio->master_bio = bio;
1147 r10_bio->sectors = sectors;
1148
1149 r10_bio->mddev = mddev;
1150 r10_bio->sector = bio->bi_sector;
1151 r10_bio->state = 0;
1152
1153 /* We might need to issue multiple reads to different
1154 * devices if there are bad blocks around, so we keep
1155 * track of the number of reads in bio->bi_phys_segments.
1156 * If this is 0, there is only one r10_bio and no locking
1157 * will be needed when the request completes. If it is
1158 * non-zero, then it is the number of not-completed requests.
1159 */
1160 bio->bi_phys_segments = 0;
1161 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1162
1163 if (rw == READ) {
1164 /*
1165 * read balancing logic:
1166 */
1167 struct md_rdev *rdev;
1168 int slot;
1169
1170read_again:
1171 rdev = read_balance(conf, r10_bio, &max_sectors);
1172 if (!rdev) {
1173 raid_end_bio_io(r10_bio);
1174 return;
1175 }
1176 slot = r10_bio->read_slot;
1177
1178 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1179 md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
1180 max_sectors);
1181
1182 r10_bio->devs[slot].bio = read_bio;
1183 r10_bio->devs[slot].rdev = rdev;
1184
1185 read_bio->bi_sector = r10_bio->devs[slot].addr +
1186 choose_data_offset(r10_bio, rdev);
1187 read_bio->bi_bdev = rdev->bdev;
1188 read_bio->bi_end_io = raid10_end_read_request;
1189 read_bio->bi_rw = READ | do_sync;
1190 read_bio->bi_private = r10_bio;
1191
1192 if (max_sectors < r10_bio->sectors) {
1193 /* Could not read all from this device, so we will
1194 * need another r10_bio.
1195 */
1196 sectors_handled = (r10_bio->sectors + max_sectors
1197 - bio->bi_sector);
1198 r10_bio->sectors = max_sectors;
1199 spin_lock_irq(&conf->device_lock);
1200 if (bio->bi_phys_segments == 0)
1201 bio->bi_phys_segments = 2;
1202 else
1203 bio->bi_phys_segments++;
1204 spin_unlock(&conf->device_lock);
1205 /* Cannot call generic_make_request directly
1206 * as that will be queued in __generic_make_request
1207 * and subsequent mempool_alloc might block
1208 * waiting for it. so hand bio over to raid10d.
1209 */
1210 reschedule_retry(r10_bio);
1211
1212 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1213
1214 r10_bio->master_bio = bio;
1215 r10_bio->sectors = ((bio->bi_size >> 9)
1216 - sectors_handled);
1217 r10_bio->state = 0;
1218 r10_bio->mddev = mddev;
1219 r10_bio->sector = bio->bi_sector + sectors_handled;
1220 goto read_again;
1221 } else
1222 generic_make_request(read_bio);
1223 return;
1224 }
1225
1226 /*
1227 * WRITE:
1228 */
1229 if (conf->pending_count >= max_queued_requests) {
1230 md_wakeup_thread(mddev->thread);
1231 wait_event(conf->wait_barrier,
1232 conf->pending_count < max_queued_requests);
1233 }
1234 /* first select target devices under rcu_lock and
1235 * inc refcount on their rdev. Record them by setting
1236 * bios[x] to bio
1237 * If there are known/acknowledged bad blocks on any device
1238 * on which we have seen a write error, we want to avoid
1239 * writing to those blocks. This potentially requires several
1240 * writes to write around the bad blocks. Each set of writes
1241 * gets its own r10_bio with a set of bios attached. The number
1242 * of r10_bios is recored in bio->bi_phys_segments just as with
1243 * the read case.
1244 */
1245
1246 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1247 raid10_find_phys(conf, r10_bio);
1248retry_write:
1249 blocked_rdev = NULL;
1250 rcu_read_lock();
1251 max_sectors = r10_bio->sectors;
1252
1253 for (i = 0; i < conf->copies; i++) {
1254 int d = r10_bio->devs[i].devnum;
1255 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1256 struct md_rdev *rrdev = rcu_dereference(
1257 conf->mirrors[d].replacement);
1258 if (rdev == rrdev)
1259 rrdev = NULL;
1260 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1261 atomic_inc(&rdev->nr_pending);
1262 blocked_rdev = rdev;
1263 break;
1264 }
1265 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1266 atomic_inc(&rrdev->nr_pending);
1267 blocked_rdev = rrdev;
1268 break;
1269 }
1270 if (rrdev && (test_bit(Faulty, &rrdev->flags)
1271 || test_bit(Unmerged, &rrdev->flags)))
1272 rrdev = NULL;
1273
1274 r10_bio->devs[i].bio = NULL;
1275 r10_bio->devs[i].repl_bio = NULL;
1276 if (!rdev || test_bit(Faulty, &rdev->flags) ||
1277 test_bit(Unmerged, &rdev->flags)) {
1278 set_bit(R10BIO_Degraded, &r10_bio->state);
1279 continue;
1280 }
1281 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1282 sector_t first_bad;
1283 sector_t dev_sector = r10_bio->devs[i].addr;
1284 int bad_sectors;
1285 int is_bad;
1286
1287 is_bad = is_badblock(rdev, dev_sector,
1288 max_sectors,
1289 &first_bad, &bad_sectors);
1290 if (is_bad < 0) {
1291 /* Mustn't write here until the bad block
1292 * is acknowledged
1293 */
1294 atomic_inc(&rdev->nr_pending);
1295 set_bit(BlockedBadBlocks, &rdev->flags);
1296 blocked_rdev = rdev;
1297 break;
1298 }
1299 if (is_bad && first_bad <= dev_sector) {
1300 /* Cannot write here at all */
1301 bad_sectors -= (dev_sector - first_bad);
1302 if (bad_sectors < max_sectors)
1303 /* Mustn't write more than bad_sectors
1304 * to other devices yet
1305 */
1306 max_sectors = bad_sectors;
1307 /* We don't set R10BIO_Degraded as that
1308 * only applies if the disk is missing,
1309 * so it might be re-added, and we want to
1310 * know to recover this chunk.
1311 * In this case the device is here, and the
1312 * fact that this chunk is not in-sync is
1313 * recorded in the bad block log.
1314 */
1315 continue;
1316 }
1317 if (is_bad) {
1318 int good_sectors = first_bad - dev_sector;
1319 if (good_sectors < max_sectors)
1320 max_sectors = good_sectors;
1321 }
1322 }
1323 r10_bio->devs[i].bio = bio;
1324 atomic_inc(&rdev->nr_pending);
1325 if (rrdev) {
1326 r10_bio->devs[i].repl_bio = bio;
1327 atomic_inc(&rrdev->nr_pending);
1328 }
1329 }
1330 rcu_read_unlock();
1331
1332 if (unlikely(blocked_rdev)) {
1333 /* Have to wait for this device to get unblocked, then retry */
1334 int j;
1335 int d;
1336
1337 for (j = 0; j < i; j++) {
1338 if (r10_bio->devs[j].bio) {
1339 d = r10_bio->devs[j].devnum;
1340 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1341 }
1342 if (r10_bio->devs[j].repl_bio) {
1343 struct md_rdev *rdev;
1344 d = r10_bio->devs[j].devnum;
1345 rdev = conf->mirrors[d].replacement;
1346 if (!rdev) {
1347 /* Race with remove_disk */
1348 smp_mb();
1349 rdev = conf->mirrors[d].rdev;
1350 }
1351 rdev_dec_pending(rdev, mddev);
1352 }
1353 }
1354 allow_barrier(conf);
1355 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1356 wait_barrier(conf);
1357 goto retry_write;
1358 }
1359
1360 if (max_sectors < r10_bio->sectors) {
1361 /* We are splitting this into multiple parts, so
1362 * we need to prepare for allocating another r10_bio.
1363 */
1364 r10_bio->sectors = max_sectors;
1365 spin_lock_irq(&conf->device_lock);
1366 if (bio->bi_phys_segments == 0)
1367 bio->bi_phys_segments = 2;
1368 else
1369 bio->bi_phys_segments++;
1370 spin_unlock_irq(&conf->device_lock);
1371 }
1372 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
1373
1374 atomic_set(&r10_bio->remaining, 1);
1375 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1376
1377 for (i = 0; i < conf->copies; i++) {
1378 struct bio *mbio;
1379 int d = r10_bio->devs[i].devnum;
1380 if (!r10_bio->devs[i].bio)
1381 continue;
1382
1383 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1384 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1385 max_sectors);
1386 r10_bio->devs[i].bio = mbio;
1387
1388 mbio->bi_sector = (r10_bio->devs[i].addr+
1389 choose_data_offset(r10_bio,
1390 conf->mirrors[d].rdev));
1391 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1392 mbio->bi_end_io = raid10_end_write_request;
1393 mbio->bi_rw = WRITE | do_sync | do_fua;
1394 mbio->bi_private = r10_bio;
1395
1396 atomic_inc(&r10_bio->remaining);
1397 spin_lock_irqsave(&conf->device_lock, flags);
1398 bio_list_add(&conf->pending_bio_list, mbio);
1399 conf->pending_count++;
1400 spin_unlock_irqrestore(&conf->device_lock, flags);
1401 if (!mddev_check_plugged(mddev))
1402 md_wakeup_thread(mddev->thread);
1403
1404 if (!r10_bio->devs[i].repl_bio)
1405 continue;
1406
1407 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1408 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1409 max_sectors);
1410 r10_bio->devs[i].repl_bio = mbio;
1411
1412 /* We are actively writing to the original device
1413 * so it cannot disappear, so the replacement cannot
1414 * become NULL here
1415 */
1416 mbio->bi_sector = (r10_bio->devs[i].addr +
1417 choose_data_offset(
1418 r10_bio,
1419 conf->mirrors[d].replacement));
1420 mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
1421 mbio->bi_end_io = raid10_end_write_request;
1422 mbio->bi_rw = WRITE | do_sync | do_fua;
1423 mbio->bi_private = r10_bio;
1424
1425 atomic_inc(&r10_bio->remaining);
1426 spin_lock_irqsave(&conf->device_lock, flags);
1427 bio_list_add(&conf->pending_bio_list, mbio);
1428 conf->pending_count++;
1429 spin_unlock_irqrestore(&conf->device_lock, flags);
1430 if (!mddev_check_plugged(mddev))
1431 md_wakeup_thread(mddev->thread);
1432 }
1433
1434 /* Don't remove the bias on 'remaining' (one_write_done) until
1435 * after checking if we need to go around again.
1436 */
1437
1438 if (sectors_handled < (bio->bi_size >> 9)) {
1439 one_write_done(r10_bio);
1440 /* We need another r10_bio. It has already been counted
1441 * in bio->bi_phys_segments.
1442 */
1443 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1444
1445 r10_bio->master_bio = bio;
1446 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1447
1448 r10_bio->mddev = mddev;
1449 r10_bio->sector = bio->bi_sector + sectors_handled;
1450 r10_bio->state = 0;
1451 goto retry_write;
1452 }
1453 one_write_done(r10_bio);
1454
1455 /* In case raid10d snuck in to freeze_array */
1456 wake_up(&conf->wait_barrier);
1457}
1458
1459static void status(struct seq_file *seq, struct mddev *mddev)
1460{
1461 struct r10conf *conf = mddev->private;
1462 int i;
1463
1464 if (conf->geo.near_copies < conf->geo.raid_disks)
1465 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1466 if (conf->geo.near_copies > 1)
1467 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1468 if (conf->geo.far_copies > 1) {
1469 if (conf->geo.far_offset)
1470 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1471 else
1472 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1473 }
1474 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1475 conf->geo.raid_disks - mddev->degraded);
1476 for (i = 0; i < conf->geo.raid_disks; i++)
1477 seq_printf(seq, "%s",
1478 conf->mirrors[i].rdev &&
1479 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
1480 seq_printf(seq, "]");
1481}
1482
1483/* check if there are enough drives for
1484 * every block to appear on atleast one.
1485 * Don't consider the device numbered 'ignore'
1486 * as we might be about to remove it.
1487 */
1488static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
1489{
1490 int first = 0;
1491
1492 do {
1493 int n = conf->copies;
1494 int cnt = 0;
1495 int this = first;
1496 while (n--) {
1497 if (conf->mirrors[this].rdev &&
1498 this != ignore)
1499 cnt++;
1500 this = (this+1) % geo->raid_disks;
1501 }
1502 if (cnt == 0)
1503 return 0;
1504 first = (first + geo->near_copies) % geo->raid_disks;
1505 } while (first != 0);
1506 return 1;
1507}
1508
1509static int enough(struct r10conf *conf, int ignore)
1510{
1511 return _enough(conf, &conf->geo, ignore) &&
1512 _enough(conf, &conf->prev, ignore);
1513}
1514
1515static void error(struct mddev *mddev, struct md_rdev *rdev)
1516{
1517 char b[BDEVNAME_SIZE];
1518 struct r10conf *conf = mddev->private;
1519
1520 /*
1521 * If it is not operational, then we have already marked it as dead
1522 * else if it is the last working disks, ignore the error, let the
1523 * next level up know.
1524 * else mark the drive as failed
1525 */
1526 if (test_bit(In_sync, &rdev->flags)
1527 && !enough(conf, rdev->raid_disk))
1528 /*
1529 * Don't fail the drive, just return an IO error.
1530 */
1531 return;
1532 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1533 unsigned long flags;
1534 spin_lock_irqsave(&conf->device_lock, flags);
1535 mddev->degraded++;
1536 spin_unlock_irqrestore(&conf->device_lock, flags);
1537 /*
1538 * if recovery is running, make sure it aborts.
1539 */
1540 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1541 }
1542 set_bit(Blocked, &rdev->flags);
1543 set_bit(Faulty, &rdev->flags);
1544 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1545 printk(KERN_ALERT
1546 "md/raid10:%s: Disk failure on %s, disabling device.\n"
1547 "md/raid10:%s: Operation continuing on %d devices.\n",
1548 mdname(mddev), bdevname(rdev->bdev, b),
1549 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1550}
1551
1552static void print_conf(struct r10conf *conf)
1553{
1554 int i;
1555 struct mirror_info *tmp;
1556
1557 printk(KERN_DEBUG "RAID10 conf printout:\n");
1558 if (!conf) {
1559 printk(KERN_DEBUG "(!conf)\n");
1560 return;
1561 }
1562 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1563 conf->geo.raid_disks);
1564
1565 for (i = 0; i < conf->geo.raid_disks; i++) {
1566 char b[BDEVNAME_SIZE];
1567 tmp = conf->mirrors + i;
1568 if (tmp->rdev)
1569 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1570 i, !test_bit(In_sync, &tmp->rdev->flags),
1571 !test_bit(Faulty, &tmp->rdev->flags),
1572 bdevname(tmp->rdev->bdev,b));
1573 }
1574}
1575
1576static void close_sync(struct r10conf *conf)
1577{
1578 wait_barrier(conf);
1579 allow_barrier(conf);
1580
1581 mempool_destroy(conf->r10buf_pool);
1582 conf->r10buf_pool = NULL;
1583}
1584
1585static int raid10_spare_active(struct mddev *mddev)
1586{
1587 int i;
1588 struct r10conf *conf = mddev->private;
1589 struct mirror_info *tmp;
1590 int count = 0;
1591 unsigned long flags;
1592
1593 /*
1594 * Find all non-in_sync disks within the RAID10 configuration
1595 * and mark them in_sync
1596 */
1597 for (i = 0; i < conf->geo.raid_disks; i++) {
1598 tmp = conf->mirrors + i;
1599 if (tmp->replacement
1600 && tmp->replacement->recovery_offset == MaxSector
1601 && !test_bit(Faulty, &tmp->replacement->flags)
1602 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1603 /* Replacement has just become active */
1604 if (!tmp->rdev
1605 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1606 count++;
1607 if (tmp->rdev) {
1608 /* Replaced device not technically faulty,
1609 * but we need to be sure it gets removed
1610 * and never re-added.
1611 */
1612 set_bit(Faulty, &tmp->rdev->flags);
1613 sysfs_notify_dirent_safe(
1614 tmp->rdev->sysfs_state);
1615 }
1616 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1617 } else if (tmp->rdev
1618 && !test_bit(Faulty, &tmp->rdev->flags)
1619 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1620 count++;
1621 sysfs_notify_dirent(tmp->rdev->sysfs_state);
1622 }
1623 }
1624 spin_lock_irqsave(&conf->device_lock, flags);
1625 mddev->degraded -= count;
1626 spin_unlock_irqrestore(&conf->device_lock, flags);
1627
1628 print_conf(conf);
1629 return count;
1630}
1631
1632
1633static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1634{
1635 struct r10conf *conf = mddev->private;
1636 int err = -EEXIST;
1637 int mirror;
1638 int first = 0;
1639 int last = conf->geo.raid_disks - 1;
1640 struct request_queue *q = bdev_get_queue(rdev->bdev);
1641
1642 if (mddev->recovery_cp < MaxSector)
1643 /* only hot-add to in-sync arrays, as recovery is
1644 * very different from resync
1645 */
1646 return -EBUSY;
1647 if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1))
1648 return -EINVAL;
1649
1650 if (rdev->raid_disk >= 0)
1651 first = last = rdev->raid_disk;
1652
1653 if (q->merge_bvec_fn) {
1654 set_bit(Unmerged, &rdev->flags);
1655 mddev->merge_check_needed = 1;
1656 }
1657
1658 if (rdev->saved_raid_disk >= first &&
1659 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1660 mirror = rdev->saved_raid_disk;
1661 else
1662 mirror = first;
1663 for ( ; mirror <= last ; mirror++) {
1664 struct mirror_info *p = &conf->mirrors[mirror];
1665 if (p->recovery_disabled == mddev->recovery_disabled)
1666 continue;
1667 if (p->rdev) {
1668 if (!test_bit(WantReplacement, &p->rdev->flags) ||
1669 p->replacement != NULL)
1670 continue;
1671 clear_bit(In_sync, &rdev->flags);
1672 set_bit(Replacement, &rdev->flags);
1673 rdev->raid_disk = mirror;
1674 err = 0;
1675 disk_stack_limits(mddev->gendisk, rdev->bdev,
1676 rdev->data_offset << 9);
1677 conf->fullsync = 1;
1678 rcu_assign_pointer(p->replacement, rdev);
1679 break;
1680 }
1681
1682 disk_stack_limits(mddev->gendisk, rdev->bdev,
1683 rdev->data_offset << 9);
1684
1685 p->head_position = 0;
1686 p->recovery_disabled = mddev->recovery_disabled - 1;
1687 rdev->raid_disk = mirror;
1688 err = 0;
1689 if (rdev->saved_raid_disk != mirror)
1690 conf->fullsync = 1;
1691 rcu_assign_pointer(p->rdev, rdev);
1692 break;
1693 }
1694 if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1695 /* Some requests might not have seen this new
1696 * merge_bvec_fn. We must wait for them to complete
1697 * before merging the device fully.
1698 * First we make sure any code which has tested
1699 * our function has submitted the request, then
1700 * we wait for all outstanding requests to complete.
1701 */
1702 synchronize_sched();
1703 raise_barrier(conf, 0);
1704 lower_barrier(conf);
1705 clear_bit(Unmerged, &rdev->flags);
1706 }
1707 md_integrity_add_rdev(rdev, mddev);
1708 print_conf(conf);
1709 return err;
1710}
1711
1712static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1713{
1714 struct r10conf *conf = mddev->private;
1715 int err = 0;
1716 int number = rdev->raid_disk;
1717 struct md_rdev **rdevp;
1718 struct mirror_info *p = conf->mirrors + number;
1719
1720 print_conf(conf);
1721 if (rdev == p->rdev)
1722 rdevp = &p->rdev;
1723 else if (rdev == p->replacement)
1724 rdevp = &p->replacement;
1725 else
1726 return 0;
1727
1728 if (test_bit(In_sync, &rdev->flags) ||
1729 atomic_read(&rdev->nr_pending)) {
1730 err = -EBUSY;
1731 goto abort;
1732 }
1733 /* Only remove faulty devices if recovery
1734 * is not possible.
1735 */
1736 if (!test_bit(Faulty, &rdev->flags) &&
1737 mddev->recovery_disabled != p->recovery_disabled &&
1738 (!p->replacement || p->replacement == rdev) &&
1739 number < conf->geo.raid_disks &&
1740 enough(conf, -1)) {
1741 err = -EBUSY;
1742 goto abort;
1743 }
1744 *rdevp = NULL;
1745 synchronize_rcu();
1746 if (atomic_read(&rdev->nr_pending)) {
1747 /* lost the race, try later */
1748 err = -EBUSY;
1749 *rdevp = rdev;
1750 goto abort;
1751 } else if (p->replacement) {
1752 /* We must have just cleared 'rdev' */
1753 p->rdev = p->replacement;
1754 clear_bit(Replacement, &p->replacement->flags);
1755 smp_mb(); /* Make sure other CPUs may see both as identical
1756 * but will never see neither -- if they are careful.
1757 */
1758 p->replacement = NULL;
1759 clear_bit(WantReplacement, &rdev->flags);
1760 } else
1761 /* We might have just remove the Replacement as faulty
1762 * Clear the flag just in case
1763 */
1764 clear_bit(WantReplacement, &rdev->flags);
1765
1766 err = md_integrity_register(mddev);
1767
1768abort:
1769
1770 print_conf(conf);
1771 return err;
1772}
1773
1774
1775static void end_sync_read(struct bio *bio, int error)
1776{
1777 struct r10bio *r10_bio = bio->bi_private;
1778 struct r10conf *conf = r10_bio->mddev->private;
1779 int d;
1780
1781 if (bio == r10_bio->master_bio) {
1782 /* this is a reshape read */
1783 d = r10_bio->read_slot; /* really the read dev */
1784 } else
1785 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1786
1787 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1788 set_bit(R10BIO_Uptodate, &r10_bio->state);
1789 else
1790 /* The write handler will notice the lack of
1791 * R10BIO_Uptodate and record any errors etc
1792 */
1793 atomic_add(r10_bio->sectors,
1794 &conf->mirrors[d].rdev->corrected_errors);
1795
1796 /* for reconstruct, we always reschedule after a read.
1797 * for resync, only after all reads
1798 */
1799 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1800 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1801 atomic_dec_and_test(&r10_bio->remaining)) {
1802 /* we have read all the blocks,
1803 * do the comparison in process context in raid10d
1804 */
1805 reschedule_retry(r10_bio);
1806 }
1807}
1808
1809static void end_sync_request(struct r10bio *r10_bio)
1810{
1811 struct mddev *mddev = r10_bio->mddev;
1812
1813 while (atomic_dec_and_test(&r10_bio->remaining)) {
1814 if (r10_bio->master_bio == NULL) {
1815 /* the primary of several recovery bios */
1816 sector_t s = r10_bio->sectors;
1817 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1818 test_bit(R10BIO_WriteError, &r10_bio->state))
1819 reschedule_retry(r10_bio);
1820 else
1821 put_buf(r10_bio);
1822 md_done_sync(mddev, s, 1);
1823 break;
1824 } else {
1825 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1826 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1827 test_bit(R10BIO_WriteError, &r10_bio->state))
1828 reschedule_retry(r10_bio);
1829 else
1830 put_buf(r10_bio);
1831 r10_bio = r10_bio2;
1832 }
1833 }
1834}
1835
1836static void end_sync_write(struct bio *bio, int error)
1837{
1838 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1839 struct r10bio *r10_bio = bio->bi_private;
1840 struct mddev *mddev = r10_bio->mddev;
1841 struct r10conf *conf = mddev->private;
1842 int d;
1843 sector_t first_bad;
1844 int bad_sectors;
1845 int slot;
1846 int repl;
1847 struct md_rdev *rdev = NULL;
1848
1849 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1850 if (repl)
1851 rdev = conf->mirrors[d].replacement;
1852 else
1853 rdev = conf->mirrors[d].rdev;
1854
1855 if (!uptodate) {
1856 if (repl)
1857 md_error(mddev, rdev);
1858 else {
1859 set_bit(WriteErrorSeen, &rdev->flags);
1860 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1861 set_bit(MD_RECOVERY_NEEDED,
1862 &rdev->mddev->recovery);
1863 set_bit(R10BIO_WriteError, &r10_bio->state);
1864 }
1865 } else if (is_badblock(rdev,
1866 r10_bio->devs[slot].addr,
1867 r10_bio->sectors,
1868 &first_bad, &bad_sectors))
1869 set_bit(R10BIO_MadeGood, &r10_bio->state);
1870
1871 rdev_dec_pending(rdev, mddev);
1872
1873 end_sync_request(r10_bio);
1874}
1875
1876/*
1877 * Note: sync and recover and handled very differently for raid10
1878 * This code is for resync.
1879 * For resync, we read through virtual addresses and read all blocks.
1880 * If there is any error, we schedule a write. The lowest numbered
1881 * drive is authoritative.
1882 * However requests come for physical address, so we need to map.
1883 * For every physical address there are raid_disks/copies virtual addresses,
1884 * which is always are least one, but is not necessarly an integer.
1885 * This means that a physical address can span multiple chunks, so we may
1886 * have to submit multiple io requests for a single sync request.
1887 */
1888/*
1889 * We check if all blocks are in-sync and only write to blocks that
1890 * aren't in sync
1891 */
1892static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1893{
1894 struct r10conf *conf = mddev->private;
1895 int i, first;
1896 struct bio *tbio, *fbio;
1897 int vcnt;
1898
1899 atomic_set(&r10_bio->remaining, 1);
1900
1901 /* find the first device with a block */
1902 for (i=0; i<conf->copies; i++)
1903 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1904 break;
1905
1906 if (i == conf->copies)
1907 goto done;
1908
1909 first = i;
1910 fbio = r10_bio->devs[i].bio;
1911
1912 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
1913 /* now find blocks with errors */
1914 for (i=0 ; i < conf->copies ; i++) {
1915 int j, d;
1916
1917 tbio = r10_bio->devs[i].bio;
1918
1919 if (tbio->bi_end_io != end_sync_read)
1920 continue;
1921 if (i == first)
1922 continue;
1923 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1924 /* We know that the bi_io_vec layout is the same for
1925 * both 'first' and 'i', so we just compare them.
1926 * All vec entries are PAGE_SIZE;
1927 */
1928 for (j = 0; j < vcnt; j++)
1929 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1930 page_address(tbio->bi_io_vec[j].bv_page),
1931 fbio->bi_io_vec[j].bv_len))
1932 break;
1933 if (j == vcnt)
1934 continue;
1935 mddev->resync_mismatches += r10_bio->sectors;
1936 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1937 /* Don't fix anything. */
1938 continue;
1939 }
1940 /* Ok, we need to write this bio, either to correct an
1941 * inconsistency or to correct an unreadable block.
1942 * First we need to fixup bv_offset, bv_len and
1943 * bi_vecs, as the read request might have corrupted these
1944 */
1945 tbio->bi_vcnt = vcnt;
1946 tbio->bi_size = r10_bio->sectors << 9;
1947 tbio->bi_idx = 0;
1948 tbio->bi_phys_segments = 0;
1949 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1950 tbio->bi_flags |= 1 << BIO_UPTODATE;
1951 tbio->bi_next = NULL;
1952 tbio->bi_rw = WRITE;
1953 tbio->bi_private = r10_bio;
1954 tbio->bi_sector = r10_bio->devs[i].addr;
1955
1956 for (j=0; j < vcnt ; j++) {
1957 tbio->bi_io_vec[j].bv_offset = 0;
1958 tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1959
1960 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1961 page_address(fbio->bi_io_vec[j].bv_page),
1962 PAGE_SIZE);
1963 }
1964 tbio->bi_end_io = end_sync_write;
1965
1966 d = r10_bio->devs[i].devnum;
1967 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1968 atomic_inc(&r10_bio->remaining);
1969 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1970
1971 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1972 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1973 generic_make_request(tbio);
1974 }
1975
1976 /* Now write out to any replacement devices
1977 * that are active
1978 */
1979 for (i = 0; i < conf->copies; i++) {
1980 int j, d;
1981
1982 tbio = r10_bio->devs[i].repl_bio;
1983 if (!tbio || !tbio->bi_end_io)
1984 continue;
1985 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
1986 && r10_bio->devs[i].bio != fbio)
1987 for (j = 0; j < vcnt; j++)
1988 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1989 page_address(fbio->bi_io_vec[j].bv_page),
1990 PAGE_SIZE);
1991 d = r10_bio->devs[i].devnum;
1992 atomic_inc(&r10_bio->remaining);
1993 md_sync_acct(conf->mirrors[d].replacement->bdev,
1994 tbio->bi_size >> 9);
1995 generic_make_request(tbio);
1996 }
1997
1998done:
1999 if (atomic_dec_and_test(&r10_bio->remaining)) {
2000 md_done_sync(mddev, r10_bio->sectors, 1);
2001 put_buf(r10_bio);
2002 }
2003}
2004
2005/*
2006 * Now for the recovery code.
2007 * Recovery happens across physical sectors.
2008 * We recover all non-is_sync drives by finding the virtual address of
2009 * each, and then choose a working drive that also has that virt address.
2010 * There is a separate r10_bio for each non-in_sync drive.
2011 * Only the first two slots are in use. The first for reading,
2012 * The second for writing.
2013 *
2014 */
2015static void fix_recovery_read_error(struct r10bio *r10_bio)
2016{
2017 /* We got a read error during recovery.
2018 * We repeat the read in smaller page-sized sections.
2019 * If a read succeeds, write it to the new device or record
2020 * a bad block if we cannot.
2021 * If a read fails, record a bad block on both old and
2022 * new devices.
2023 */
2024 struct mddev *mddev = r10_bio->mddev;
2025 struct r10conf *conf = mddev->private;
2026 struct bio *bio = r10_bio->devs[0].bio;
2027 sector_t sect = 0;
2028 int sectors = r10_bio->sectors;
2029 int idx = 0;
2030 int dr = r10_bio->devs[0].devnum;
2031 int dw = r10_bio->devs[1].devnum;
2032
2033 while (sectors) {
2034 int s = sectors;
2035 struct md_rdev *rdev;
2036 sector_t addr;
2037 int ok;
2038
2039 if (s > (PAGE_SIZE>>9))
2040 s = PAGE_SIZE >> 9;
2041
2042 rdev = conf->mirrors[dr].rdev;
2043 addr = r10_bio->devs[0].addr + sect,
2044 ok = sync_page_io(rdev,
2045 addr,
2046 s << 9,
2047 bio->bi_io_vec[idx].bv_page,
2048 READ, false);
2049 if (ok) {
2050 rdev = conf->mirrors[dw].rdev;
2051 addr = r10_bio->devs[1].addr + sect;
2052 ok = sync_page_io(rdev,
2053 addr,
2054 s << 9,
2055 bio->bi_io_vec[idx].bv_page,
2056 WRITE, false);
2057 if (!ok) {
2058 set_bit(WriteErrorSeen, &rdev->flags);
2059 if (!test_and_set_bit(WantReplacement,
2060 &rdev->flags))
2061 set_bit(MD_RECOVERY_NEEDED,
2062 &rdev->mddev->recovery);
2063 }
2064 }
2065 if (!ok) {
2066 /* We don't worry if we cannot set a bad block -
2067 * it really is bad so there is no loss in not
2068 * recording it yet
2069 */
2070 rdev_set_badblocks(rdev, addr, s, 0);
2071
2072 if (rdev != conf->mirrors[dw].rdev) {
2073 /* need bad block on destination too */
2074 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2075 addr = r10_bio->devs[1].addr + sect;
2076 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2077 if (!ok) {
2078 /* just abort the recovery */
2079 printk(KERN_NOTICE
2080 "md/raid10:%s: recovery aborted"
2081 " due to read error\n",
2082 mdname(mddev));
2083
2084 conf->mirrors[dw].recovery_disabled
2085 = mddev->recovery_disabled;
2086 set_bit(MD_RECOVERY_INTR,
2087 &mddev->recovery);
2088 break;
2089 }
2090 }
2091 }
2092
2093 sectors -= s;
2094 sect += s;
2095 idx++;
2096 }
2097}
2098
2099static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2100{
2101 struct r10conf *conf = mddev->private;
2102 int d;
2103 struct bio *wbio, *wbio2;
2104
2105 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2106 fix_recovery_read_error(r10_bio);
2107 end_sync_request(r10_bio);
2108 return;
2109 }
2110
2111 /*
2112 * share the pages with the first bio
2113 * and submit the write request
2114 */
2115 d = r10_bio->devs[1].devnum;
2116 wbio = r10_bio->devs[1].bio;
2117 wbio2 = r10_bio->devs[1].repl_bio;
2118 if (wbio->bi_end_io) {
2119 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2120 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
2121 generic_make_request(wbio);
2122 }
2123 if (wbio2 && wbio2->bi_end_io) {
2124 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2125 md_sync_acct(conf->mirrors[d].replacement->bdev,
2126 wbio2->bi_size >> 9);
2127 generic_make_request(wbio2);
2128 }
2129}
2130
2131
2132/*
2133 * Used by fix_read_error() to decay the per rdev read_errors.
2134 * We halve the read error count for every hour that has elapsed
2135 * since the last recorded read error.
2136 *
2137 */
2138static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2139{
2140 struct timespec cur_time_mon;
2141 unsigned long hours_since_last;
2142 unsigned int read_errors = atomic_read(&rdev->read_errors);
2143
2144 ktime_get_ts(&cur_time_mon);
2145
2146 if (rdev->last_read_error.tv_sec == 0 &&
2147 rdev->last_read_error.tv_nsec == 0) {
2148 /* first time we've seen a read error */
2149 rdev->last_read_error = cur_time_mon;
2150 return;
2151 }
2152
2153 hours_since_last = (cur_time_mon.tv_sec -
2154 rdev->last_read_error.tv_sec) / 3600;
2155
2156 rdev->last_read_error = cur_time_mon;
2157
2158 /*
2159 * if hours_since_last is > the number of bits in read_errors
2160 * just set read errors to 0. We do this to avoid
2161 * overflowing the shift of read_errors by hours_since_last.
2162 */
2163 if (hours_since_last >= 8 * sizeof(read_errors))
2164 atomic_set(&rdev->read_errors, 0);
2165 else
2166 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2167}
2168
2169static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2170 int sectors, struct page *page, int rw)
2171{
2172 sector_t first_bad;
2173 int bad_sectors;
2174
2175 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2176 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2177 return -1;
2178 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2179 /* success */
2180 return 1;
2181 if (rw == WRITE) {
2182 set_bit(WriteErrorSeen, &rdev->flags);
2183 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2184 set_bit(MD_RECOVERY_NEEDED,
2185 &rdev->mddev->recovery);
2186 }
2187 /* need to record an error - either for the block or the device */
2188 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2189 md_error(rdev->mddev, rdev);
2190 return 0;
2191}
2192
2193/*
2194 * This is a kernel thread which:
2195 *
2196 * 1. Retries failed read operations on working mirrors.
2197 * 2. Updates the raid superblock when problems encounter.
2198 * 3. Performs writes following reads for array synchronising.
2199 */
2200
2201static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2202{
2203 int sect = 0; /* Offset from r10_bio->sector */
2204 int sectors = r10_bio->sectors;
2205 struct md_rdev*rdev;
2206 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2207 int d = r10_bio->devs[r10_bio->read_slot].devnum;
2208
2209 /* still own a reference to this rdev, so it cannot
2210 * have been cleared recently.
2211 */
2212 rdev = conf->mirrors[d].rdev;
2213
2214 if (test_bit(Faulty, &rdev->flags))
2215 /* drive has already been failed, just ignore any
2216 more fix_read_error() attempts */
2217 return;
2218
2219 check_decay_read_errors(mddev, rdev);
2220 atomic_inc(&rdev->read_errors);
2221 if (atomic_read(&rdev->read_errors) > max_read_errors) {
2222 char b[BDEVNAME_SIZE];
2223 bdevname(rdev->bdev, b);
2224
2225 printk(KERN_NOTICE
2226 "md/raid10:%s: %s: Raid device exceeded "
2227 "read_error threshold [cur %d:max %d]\n",
2228 mdname(mddev), b,
2229 atomic_read(&rdev->read_errors), max_read_errors);
2230 printk(KERN_NOTICE
2231 "md/raid10:%s: %s: Failing raid device\n",
2232 mdname(mddev), b);
2233 md_error(mddev, conf->mirrors[d].rdev);
2234 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2235 return;
2236 }
2237
2238 while(sectors) {
2239 int s = sectors;
2240 int sl = r10_bio->read_slot;
2241 int success = 0;
2242 int start;
2243
2244 if (s > (PAGE_SIZE>>9))
2245 s = PAGE_SIZE >> 9;
2246
2247 rcu_read_lock();
2248 do {
2249 sector_t first_bad;
2250 int bad_sectors;
2251
2252 d = r10_bio->devs[sl].devnum;
2253 rdev = rcu_dereference(conf->mirrors[d].rdev);
2254 if (rdev &&
2255 !test_bit(Unmerged, &rdev->flags) &&
2256 test_bit(In_sync, &rdev->flags) &&
2257 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2258 &first_bad, &bad_sectors) == 0) {
2259 atomic_inc(&rdev->nr_pending);
2260 rcu_read_unlock();
2261 success = sync_page_io(rdev,
2262 r10_bio->devs[sl].addr +
2263 sect,
2264 s<<9,
2265 conf->tmppage, READ, false);
2266 rdev_dec_pending(rdev, mddev);
2267 rcu_read_lock();
2268 if (success)
2269 break;
2270 }
2271 sl++;
2272 if (sl == conf->copies)
2273 sl = 0;
2274 } while (!success && sl != r10_bio->read_slot);
2275 rcu_read_unlock();
2276
2277 if (!success) {
2278 /* Cannot read from anywhere, just mark the block
2279 * as bad on the first device to discourage future
2280 * reads.
2281 */
2282 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2283 rdev = conf->mirrors[dn].rdev;
2284
2285 if (!rdev_set_badblocks(
2286 rdev,
2287 r10_bio->devs[r10_bio->read_slot].addr
2288 + sect,
2289 s, 0)) {
2290 md_error(mddev, rdev);
2291 r10_bio->devs[r10_bio->read_slot].bio
2292 = IO_BLOCKED;
2293 }
2294 break;
2295 }
2296
2297 start = sl;
2298 /* write it back and re-read */
2299 rcu_read_lock();
2300 while (sl != r10_bio->read_slot) {
2301 char b[BDEVNAME_SIZE];
2302
2303 if (sl==0)
2304 sl = conf->copies;
2305 sl--;
2306 d = r10_bio->devs[sl].devnum;
2307 rdev = rcu_dereference(conf->mirrors[d].rdev);
2308 if (!rdev ||
2309 test_bit(Unmerged, &rdev->flags) ||
2310 !test_bit(In_sync, &rdev->flags))
2311 continue;
2312
2313 atomic_inc(&rdev->nr_pending);
2314 rcu_read_unlock();
2315 if (r10_sync_page_io(rdev,
2316 r10_bio->devs[sl].addr +
2317 sect,
2318 s, conf->tmppage, WRITE)
2319 == 0) {
2320 /* Well, this device is dead */
2321 printk(KERN_NOTICE
2322 "md/raid10:%s: read correction "
2323 "write failed"
2324 " (%d sectors at %llu on %s)\n",
2325 mdname(mddev), s,
2326 (unsigned long long)(
2327 sect +
2328 choose_data_offset(r10_bio,
2329 rdev)),
2330 bdevname(rdev->bdev, b));
2331 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2332 "drive\n",
2333 mdname(mddev),
2334 bdevname(rdev->bdev, b));
2335 }
2336 rdev_dec_pending(rdev, mddev);
2337 rcu_read_lock();
2338 }
2339 sl = start;
2340 while (sl != r10_bio->read_slot) {
2341 char b[BDEVNAME_SIZE];
2342
2343 if (sl==0)
2344 sl = conf->copies;
2345 sl--;
2346 d = r10_bio->devs[sl].devnum;
2347 rdev = rcu_dereference(conf->mirrors[d].rdev);
2348 if (!rdev ||
2349 !test_bit(In_sync, &rdev->flags))
2350 continue;
2351
2352 atomic_inc(&rdev->nr_pending);
2353 rcu_read_unlock();
2354 switch (r10_sync_page_io(rdev,
2355 r10_bio->devs[sl].addr +
2356 sect,
2357 s, conf->tmppage,
2358 READ)) {
2359 case 0:
2360 /* Well, this device is dead */
2361 printk(KERN_NOTICE
2362 "md/raid10:%s: unable to read back "
2363 "corrected sectors"
2364 " (%d sectors at %llu on %s)\n",
2365 mdname(mddev), s,
2366 (unsigned long long)(
2367 sect +
2368 choose_data_offset(r10_bio, rdev)),
2369 bdevname(rdev->bdev, b));
2370 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2371 "drive\n",
2372 mdname(mddev),
2373 bdevname(rdev->bdev, b));
2374 break;
2375 case 1:
2376 printk(KERN_INFO
2377 "md/raid10:%s: read error corrected"
2378 " (%d sectors at %llu on %s)\n",
2379 mdname(mddev), s,
2380 (unsigned long long)(
2381 sect +
2382 choose_data_offset(r10_bio, rdev)),
2383 bdevname(rdev->bdev, b));
2384 atomic_add(s, &rdev->corrected_errors);
2385 }
2386
2387 rdev_dec_pending(rdev, mddev);
2388 rcu_read_lock();
2389 }
2390 rcu_read_unlock();
2391
2392 sectors -= s;
2393 sect += s;
2394 }
2395}
2396
2397static void bi_complete(struct bio *bio, int error)
2398{
2399 complete((struct completion *)bio->bi_private);
2400}
2401
2402static int submit_bio_wait(int rw, struct bio *bio)
2403{
2404 struct completion event;
2405 rw |= REQ_SYNC;
2406
2407 init_completion(&event);
2408 bio->bi_private = &event;
2409 bio->bi_end_io = bi_complete;
2410 submit_bio(rw, bio);
2411 wait_for_completion(&event);
2412
2413 return test_bit(BIO_UPTODATE, &bio->bi_flags);
2414}
2415
2416static int narrow_write_error(struct r10bio *r10_bio, int i)
2417{
2418 struct bio *bio = r10_bio->master_bio;
2419 struct mddev *mddev = r10_bio->mddev;
2420 struct r10conf *conf = mddev->private;
2421 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2422 /* bio has the data to be written to slot 'i' where
2423 * we just recently had a write error.
2424 * We repeatedly clone the bio and trim down to one block,
2425 * then try the write. Where the write fails we record
2426 * a bad block.
2427 * It is conceivable that the bio doesn't exactly align with
2428 * blocks. We must handle this.
2429 *
2430 * We currently own a reference to the rdev.
2431 */
2432
2433 int block_sectors;
2434 sector_t sector;
2435 int sectors;
2436 int sect_to_write = r10_bio->sectors;
2437 int ok = 1;
2438
2439 if (rdev->badblocks.shift < 0)
2440 return 0;
2441
2442 block_sectors = 1 << rdev->badblocks.shift;
2443 sector = r10_bio->sector;
2444 sectors = ((r10_bio->sector + block_sectors)
2445 & ~(sector_t)(block_sectors - 1))
2446 - sector;
2447
2448 while (sect_to_write) {
2449 struct bio *wbio;
2450 if (sectors > sect_to_write)
2451 sectors = sect_to_write;
2452 /* Write at 'sector' for 'sectors' */
2453 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2454 md_trim_bio(wbio, sector - bio->bi_sector, sectors);
2455 wbio->bi_sector = (r10_bio->devs[i].addr+
2456 choose_data_offset(r10_bio, rdev) +
2457 (sector - r10_bio->sector));
2458 wbio->bi_bdev = rdev->bdev;
2459 if (submit_bio_wait(WRITE, wbio) == 0)
2460 /* Failure! */
2461 ok = rdev_set_badblocks(rdev, sector,
2462 sectors, 0)
2463 && ok;
2464
2465 bio_put(wbio);
2466 sect_to_write -= sectors;
2467 sector += sectors;
2468 sectors = block_sectors;
2469 }
2470 return ok;
2471}
2472
2473static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2474{
2475 int slot = r10_bio->read_slot;
2476 struct bio *bio;
2477 struct r10conf *conf = mddev->private;
2478 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2479 char b[BDEVNAME_SIZE];
2480 unsigned long do_sync;
2481 int max_sectors;
2482
2483 /* we got a read error. Maybe the drive is bad. Maybe just
2484 * the block and we can fix it.
2485 * We freeze all other IO, and try reading the block from
2486 * other devices. When we find one, we re-write
2487 * and check it that fixes the read error.
2488 * This is all done synchronously while the array is
2489 * frozen.
2490 */
2491 bio = r10_bio->devs[slot].bio;
2492 bdevname(bio->bi_bdev, b);
2493 bio_put(bio);
2494 r10_bio->devs[slot].bio = NULL;
2495
2496 if (mddev->ro == 0) {
2497 freeze_array(conf);
2498 fix_read_error(conf, mddev, r10_bio);
2499 unfreeze_array(conf);
2500 } else
2501 r10_bio->devs[slot].bio = IO_BLOCKED;
2502
2503 rdev_dec_pending(rdev, mddev);
2504
2505read_more:
2506 rdev = read_balance(conf, r10_bio, &max_sectors);
2507 if (rdev == NULL) {
2508 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2509 " read error for block %llu\n",
2510 mdname(mddev), b,
2511 (unsigned long long)r10_bio->sector);
2512 raid_end_bio_io(r10_bio);
2513 return;
2514 }
2515
2516 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2517 slot = r10_bio->read_slot;
2518 printk_ratelimited(
2519 KERN_ERR
2520 "md/raid10:%s: %s: redirecting "
2521 "sector %llu to another mirror\n",
2522 mdname(mddev),
2523 bdevname(rdev->bdev, b),
2524 (unsigned long long)r10_bio->sector);
2525 bio = bio_clone_mddev(r10_bio->master_bio,
2526 GFP_NOIO, mddev);
2527 md_trim_bio(bio,
2528 r10_bio->sector - bio->bi_sector,
2529 max_sectors);
2530 r10_bio->devs[slot].bio = bio;
2531 r10_bio->devs[slot].rdev = rdev;
2532 bio->bi_sector = r10_bio->devs[slot].addr
2533 + choose_data_offset(r10_bio, rdev);
2534 bio->bi_bdev = rdev->bdev;
2535 bio->bi_rw = READ | do_sync;
2536 bio->bi_private = r10_bio;
2537 bio->bi_end_io = raid10_end_read_request;
2538 if (max_sectors < r10_bio->sectors) {
2539 /* Drat - have to split this up more */
2540 struct bio *mbio = r10_bio->master_bio;
2541 int sectors_handled =
2542 r10_bio->sector + max_sectors
2543 - mbio->bi_sector;
2544 r10_bio->sectors = max_sectors;
2545 spin_lock_irq(&conf->device_lock);
2546 if (mbio->bi_phys_segments == 0)
2547 mbio->bi_phys_segments = 2;
2548 else
2549 mbio->bi_phys_segments++;
2550 spin_unlock_irq(&conf->device_lock);
2551 generic_make_request(bio);
2552
2553 r10_bio = mempool_alloc(conf->r10bio_pool,
2554 GFP_NOIO);
2555 r10_bio->master_bio = mbio;
2556 r10_bio->sectors = (mbio->bi_size >> 9)
2557 - sectors_handled;
2558 r10_bio->state = 0;
2559 set_bit(R10BIO_ReadError,
2560 &r10_bio->state);
2561 r10_bio->mddev = mddev;
2562 r10_bio->sector = mbio->bi_sector
2563 + sectors_handled;
2564
2565 goto read_more;
2566 } else
2567 generic_make_request(bio);
2568}
2569
2570static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2571{
2572 /* Some sort of write request has finished and it
2573 * succeeded in writing where we thought there was a
2574 * bad block. So forget the bad block.
2575 * Or possibly if failed and we need to record
2576 * a bad block.
2577 */
2578 int m;
2579 struct md_rdev *rdev;
2580
2581 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2582 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2583 for (m = 0; m < conf->copies; m++) {
2584 int dev = r10_bio->devs[m].devnum;
2585 rdev = conf->mirrors[dev].rdev;
2586 if (r10_bio->devs[m].bio == NULL)
2587 continue;
2588 if (test_bit(BIO_UPTODATE,
2589 &r10_bio->devs[m].bio->bi_flags)) {
2590 rdev_clear_badblocks(
2591 rdev,
2592 r10_bio->devs[m].addr,
2593 r10_bio->sectors, 0);
2594 } else {
2595 if (!rdev_set_badblocks(
2596 rdev,
2597 r10_bio->devs[m].addr,
2598 r10_bio->sectors, 0))
2599 md_error(conf->mddev, rdev);
2600 }
2601 rdev = conf->mirrors[dev].replacement;
2602 if (r10_bio->devs[m].repl_bio == NULL)
2603 continue;
2604 if (test_bit(BIO_UPTODATE,
2605 &r10_bio->devs[m].repl_bio->bi_flags)) {
2606 rdev_clear_badblocks(
2607 rdev,
2608 r10_bio->devs[m].addr,
2609 r10_bio->sectors, 0);
2610 } else {
2611 if (!rdev_set_badblocks(
2612 rdev,
2613 r10_bio->devs[m].addr,
2614 r10_bio->sectors, 0))
2615 md_error(conf->mddev, rdev);
2616 }
2617 }
2618 put_buf(r10_bio);
2619 } else {
2620 for (m = 0; m < conf->copies; m++) {
2621 int dev = r10_bio->devs[m].devnum;
2622 struct bio *bio = r10_bio->devs[m].bio;
2623 rdev = conf->mirrors[dev].rdev;
2624 if (bio == IO_MADE_GOOD) {
2625 rdev_clear_badblocks(
2626 rdev,
2627 r10_bio->devs[m].addr,
2628 r10_bio->sectors, 0);
2629 rdev_dec_pending(rdev, conf->mddev);
2630 } else if (bio != NULL &&
2631 !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2632 if (!narrow_write_error(r10_bio, m)) {
2633 md_error(conf->mddev, rdev);
2634 set_bit(R10BIO_Degraded,
2635 &r10_bio->state);
2636 }
2637 rdev_dec_pending(rdev, conf->mddev);
2638 }
2639 bio = r10_bio->devs[m].repl_bio;
2640 rdev = conf->mirrors[dev].replacement;
2641 if (rdev && bio == IO_MADE_GOOD) {
2642 rdev_clear_badblocks(
2643 rdev,
2644 r10_bio->devs[m].addr,
2645 r10_bio->sectors, 0);
2646 rdev_dec_pending(rdev, conf->mddev);
2647 }
2648 }
2649 if (test_bit(R10BIO_WriteError,
2650 &r10_bio->state))
2651 close_write(r10_bio);
2652 raid_end_bio_io(r10_bio);
2653 }
2654}
2655
2656static void raid10d(struct mddev *mddev)
2657{
2658 struct r10bio *r10_bio;
2659 unsigned long flags;
2660 struct r10conf *conf = mddev->private;
2661 struct list_head *head = &conf->retry_list;
2662 struct blk_plug plug;
2663
2664 md_check_recovery(mddev);
2665
2666 blk_start_plug(&plug);
2667 for (;;) {
2668
2669 if (atomic_read(&mddev->plug_cnt) == 0)
2670 flush_pending_writes(conf);
2671
2672 spin_lock_irqsave(&conf->device_lock, flags);
2673 if (list_empty(head)) {
2674 spin_unlock_irqrestore(&conf->device_lock, flags);
2675 break;
2676 }
2677 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2678 list_del(head->prev);
2679 conf->nr_queued--;
2680 spin_unlock_irqrestore(&conf->device_lock, flags);
2681
2682 mddev = r10_bio->mddev;
2683 conf = mddev->private;
2684 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2685 test_bit(R10BIO_WriteError, &r10_bio->state))
2686 handle_write_completed(conf, r10_bio);
2687 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2688 reshape_request_write(mddev, r10_bio);
2689 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2690 sync_request_write(mddev, r10_bio);
2691 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2692 recovery_request_write(mddev, r10_bio);
2693 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2694 handle_read_error(mddev, r10_bio);
2695 else {
2696 /* just a partial read to be scheduled from a
2697 * separate context
2698 */
2699 int slot = r10_bio->read_slot;
2700 generic_make_request(r10_bio->devs[slot].bio);
2701 }
2702
2703 cond_resched();
2704 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2705 md_check_recovery(mddev);
2706 }
2707 blk_finish_plug(&plug);
2708}
2709
2710
2711static int init_resync(struct r10conf *conf)
2712{
2713 int buffs;
2714 int i;
2715
2716 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2717 BUG_ON(conf->r10buf_pool);
2718 conf->have_replacement = 0;
2719 for (i = 0; i < conf->geo.raid_disks; i++)
2720 if (conf->mirrors[i].replacement)
2721 conf->have_replacement = 1;
2722 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2723 if (!conf->r10buf_pool)
2724 return -ENOMEM;
2725 conf->next_resync = 0;
2726 return 0;
2727}
2728
2729/*
2730 * perform a "sync" on one "block"
2731 *
2732 * We need to make sure that no normal I/O request - particularly write
2733 * requests - conflict with active sync requests.
2734 *
2735 * This is achieved by tracking pending requests and a 'barrier' concept
2736 * that can be installed to exclude normal IO requests.
2737 *
2738 * Resync and recovery are handled very differently.
2739 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2740 *
2741 * For resync, we iterate over virtual addresses, read all copies,
2742 * and update if there are differences. If only one copy is live,
2743 * skip it.
2744 * For recovery, we iterate over physical addresses, read a good
2745 * value for each non-in_sync drive, and over-write.
2746 *
2747 * So, for recovery we may have several outstanding complex requests for a
2748 * given address, one for each out-of-sync device. We model this by allocating
2749 * a number of r10_bio structures, one for each out-of-sync device.
2750 * As we setup these structures, we collect all bio's together into a list
2751 * which we then process collectively to add pages, and then process again
2752 * to pass to generic_make_request.
2753 *
2754 * The r10_bio structures are linked using a borrowed master_bio pointer.
2755 * This link is counted in ->remaining. When the r10_bio that points to NULL
2756 * has its remaining count decremented to 0, the whole complex operation
2757 * is complete.
2758 *
2759 */
2760
2761static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2762 int *skipped, int go_faster)
2763{
2764 struct r10conf *conf = mddev->private;
2765 struct r10bio *r10_bio;
2766 struct bio *biolist = NULL, *bio;
2767 sector_t max_sector, nr_sectors;
2768 int i;
2769 int max_sync;
2770 sector_t sync_blocks;
2771 sector_t sectors_skipped = 0;
2772 int chunks_skipped = 0;
2773 sector_t chunk_mask = conf->geo.chunk_mask;
2774
2775 if (!conf->r10buf_pool)
2776 if (init_resync(conf))
2777 return 0;
2778
2779 skipped:
2780 max_sector = mddev->dev_sectors;
2781 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2782 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2783 max_sector = mddev->resync_max_sectors;
2784 if (sector_nr >= max_sector) {
2785 /* If we aborted, we need to abort the
2786 * sync on the 'current' bitmap chucks (there can
2787 * be several when recovering multiple devices).
2788 * as we may have started syncing it but not finished.
2789 * We can find the current address in
2790 * mddev->curr_resync, but for recovery,
2791 * we need to convert that to several
2792 * virtual addresses.
2793 */
2794 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2795 end_reshape(conf);
2796 return 0;
2797 }
2798
2799 if (mddev->curr_resync < max_sector) { /* aborted */
2800 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2801 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2802 &sync_blocks, 1);
2803 else for (i = 0; i < conf->geo.raid_disks; i++) {
2804 sector_t sect =
2805 raid10_find_virt(conf, mddev->curr_resync, i);
2806 bitmap_end_sync(mddev->bitmap, sect,
2807 &sync_blocks, 1);
2808 }
2809 } else {
2810 /* completed sync */
2811 if ((!mddev->bitmap || conf->fullsync)
2812 && conf->have_replacement
2813 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2814 /* Completed a full sync so the replacements
2815 * are now fully recovered.
2816 */
2817 for (i = 0; i < conf->geo.raid_disks; i++)
2818 if (conf->mirrors[i].replacement)
2819 conf->mirrors[i].replacement
2820 ->recovery_offset
2821 = MaxSector;
2822 }
2823 conf->fullsync = 0;
2824 }
2825 bitmap_close_sync(mddev->bitmap);
2826 close_sync(conf);
2827 *skipped = 1;
2828 return sectors_skipped;
2829 }
2830
2831 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2832 return reshape_request(mddev, sector_nr, skipped);
2833
2834 if (chunks_skipped >= conf->geo.raid_disks) {
2835 /* if there has been nothing to do on any drive,
2836 * then there is nothing to do at all..
2837 */
2838 *skipped = 1;
2839 return (max_sector - sector_nr) + sectors_skipped;
2840 }
2841
2842 if (max_sector > mddev->resync_max)
2843 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2844
2845 /* make sure whole request will fit in a chunk - if chunks
2846 * are meaningful
2847 */
2848 if (conf->geo.near_copies < conf->geo.raid_disks &&
2849 max_sector > (sector_nr | chunk_mask))
2850 max_sector = (sector_nr | chunk_mask) + 1;
2851 /*
2852 * If there is non-resync activity waiting for us then
2853 * put in a delay to throttle resync.
2854 */
2855 if (!go_faster && conf->nr_waiting)
2856 msleep_interruptible(1000);
2857
2858 /* Again, very different code for resync and recovery.
2859 * Both must result in an r10bio with a list of bios that
2860 * have bi_end_io, bi_sector, bi_bdev set,
2861 * and bi_private set to the r10bio.
2862 * For recovery, we may actually create several r10bios
2863 * with 2 bios in each, that correspond to the bios in the main one.
2864 * In this case, the subordinate r10bios link back through a
2865 * borrowed master_bio pointer, and the counter in the master
2866 * includes a ref from each subordinate.
2867 */
2868 /* First, we decide what to do and set ->bi_end_io
2869 * To end_sync_read if we want to read, and
2870 * end_sync_write if we will want to write.
2871 */
2872
2873 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
2874 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2875 /* recovery... the complicated one */
2876 int j;
2877 r10_bio = NULL;
2878
2879 for (i = 0 ; i < conf->geo.raid_disks; i++) {
2880 int still_degraded;
2881 struct r10bio *rb2;
2882 sector_t sect;
2883 int must_sync;
2884 int any_working;
2885 struct mirror_info *mirror = &conf->mirrors[i];
2886
2887 if ((mirror->rdev == NULL ||
2888 test_bit(In_sync, &mirror->rdev->flags))
2889 &&
2890 (mirror->replacement == NULL ||
2891 test_bit(Faulty,
2892 &mirror->replacement->flags)))
2893 continue;
2894
2895 still_degraded = 0;
2896 /* want to reconstruct this device */
2897 rb2 = r10_bio;
2898 sect = raid10_find_virt(conf, sector_nr, i);
2899 if (sect >= mddev->resync_max_sectors) {
2900 /* last stripe is not complete - don't
2901 * try to recover this sector.
2902 */
2903 continue;
2904 }
2905 /* Unless we are doing a full sync, or a replacement
2906 * we only need to recover the block if it is set in
2907 * the bitmap
2908 */
2909 must_sync = bitmap_start_sync(mddev->bitmap, sect,
2910 &sync_blocks, 1);
2911 if (sync_blocks < max_sync)
2912 max_sync = sync_blocks;
2913 if (!must_sync &&
2914 mirror->replacement == NULL &&
2915 !conf->fullsync) {
2916 /* yep, skip the sync_blocks here, but don't assume
2917 * that there will never be anything to do here
2918 */
2919 chunks_skipped = -1;
2920 continue;
2921 }
2922
2923 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
2924 raise_barrier(conf, rb2 != NULL);
2925 atomic_set(&r10_bio->remaining, 0);
2926
2927 r10_bio->master_bio = (struct bio*)rb2;
2928 if (rb2)
2929 atomic_inc(&rb2->remaining);
2930 r10_bio->mddev = mddev;
2931 set_bit(R10BIO_IsRecover, &r10_bio->state);
2932 r10_bio->sector = sect;
2933
2934 raid10_find_phys(conf, r10_bio);
2935
2936 /* Need to check if the array will still be
2937 * degraded
2938 */
2939 for (j = 0; j < conf->geo.raid_disks; j++)
2940 if (conf->mirrors[j].rdev == NULL ||
2941 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
2942 still_degraded = 1;
2943 break;
2944 }
2945
2946 must_sync = bitmap_start_sync(mddev->bitmap, sect,
2947 &sync_blocks, still_degraded);
2948
2949 any_working = 0;
2950 for (j=0; j<conf->copies;j++) {
2951 int k;
2952 int d = r10_bio->devs[j].devnum;
2953 sector_t from_addr, to_addr;
2954 struct md_rdev *rdev;
2955 sector_t sector, first_bad;
2956 int bad_sectors;
2957 if (!conf->mirrors[d].rdev ||
2958 !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
2959 continue;
2960 /* This is where we read from */
2961 any_working = 1;
2962 rdev = conf->mirrors[d].rdev;
2963 sector = r10_bio->devs[j].addr;
2964
2965 if (is_badblock(rdev, sector, max_sync,
2966 &first_bad, &bad_sectors)) {
2967 if (first_bad > sector)
2968 max_sync = first_bad - sector;
2969 else {
2970 bad_sectors -= (sector
2971 - first_bad);
2972 if (max_sync > bad_sectors)
2973 max_sync = bad_sectors;
2974 continue;
2975 }
2976 }
2977 bio = r10_bio->devs[0].bio;
2978 bio->bi_next = biolist;
2979 biolist = bio;
2980 bio->bi_private = r10_bio;
2981 bio->bi_end_io = end_sync_read;
2982 bio->bi_rw = READ;
2983 from_addr = r10_bio->devs[j].addr;
2984 bio->bi_sector = from_addr + rdev->data_offset;
2985 bio->bi_bdev = rdev->bdev;
2986 atomic_inc(&rdev->nr_pending);
2987 /* and we write to 'i' (if not in_sync) */
2988
2989 for (k=0; k<conf->copies; k++)
2990 if (r10_bio->devs[k].devnum == i)
2991 break;
2992 BUG_ON(k == conf->copies);
2993 to_addr = r10_bio->devs[k].addr;
2994 r10_bio->devs[0].devnum = d;
2995 r10_bio->devs[0].addr = from_addr;
2996 r10_bio->devs[1].devnum = i;
2997 r10_bio->devs[1].addr = to_addr;
2998
2999 rdev = mirror->rdev;
3000 if (!test_bit(In_sync, &rdev->flags)) {
3001 bio = r10_bio->devs[1].bio;
3002 bio->bi_next = biolist;
3003 biolist = bio;
3004 bio->bi_private = r10_bio;
3005 bio->bi_end_io = end_sync_write;
3006 bio->bi_rw = WRITE;
3007 bio->bi_sector = to_addr
3008 + rdev->data_offset;
3009 bio->bi_bdev = rdev->bdev;
3010 atomic_inc(&r10_bio->remaining);
3011 } else
3012 r10_bio->devs[1].bio->bi_end_io = NULL;
3013
3014 /* and maybe write to replacement */
3015 bio = r10_bio->devs[1].repl_bio;
3016 if (bio)
3017 bio->bi_end_io = NULL;
3018 rdev = mirror->replacement;
3019 /* Note: if rdev != NULL, then bio
3020 * cannot be NULL as r10buf_pool_alloc will
3021 * have allocated it.
3022 * So the second test here is pointless.
3023 * But it keeps semantic-checkers happy, and
3024 * this comment keeps human reviewers
3025 * happy.
3026 */
3027 if (rdev == NULL || bio == NULL ||
3028 test_bit(Faulty, &rdev->flags))
3029 break;
3030 bio->bi_next = biolist;
3031 biolist = bio;
3032 bio->bi_private = r10_bio;
3033 bio->bi_end_io = end_sync_write;
3034 bio->bi_rw = WRITE;
3035 bio->bi_sector = to_addr + rdev->data_offset;
3036 bio->bi_bdev = rdev->bdev;
3037 atomic_inc(&r10_bio->remaining);
3038 break;
3039 }
3040 if (j == conf->copies) {
3041 /* Cannot recover, so abort the recovery or
3042 * record a bad block */
3043 put_buf(r10_bio);
3044 if (rb2)
3045 atomic_dec(&rb2->remaining);
3046 r10_bio = rb2;
3047 if (any_working) {
3048 /* problem is that there are bad blocks
3049 * on other device(s)
3050 */
3051 int k;
3052 for (k = 0; k < conf->copies; k++)
3053 if (r10_bio->devs[k].devnum == i)
3054 break;
3055 if (!test_bit(In_sync,
3056 &mirror->rdev->flags)
3057 && !rdev_set_badblocks(
3058 mirror->rdev,
3059 r10_bio->devs[k].addr,
3060 max_sync, 0))
3061 any_working = 0;
3062 if (mirror->replacement &&
3063 !rdev_set_badblocks(
3064 mirror->replacement,
3065 r10_bio->devs[k].addr,
3066 max_sync, 0))
3067 any_working = 0;
3068 }
3069 if (!any_working) {
3070 if (!test_and_set_bit(MD_RECOVERY_INTR,
3071 &mddev->recovery))
3072 printk(KERN_INFO "md/raid10:%s: insufficient "
3073 "working devices for recovery.\n",
3074 mdname(mddev));
3075 mirror->recovery_disabled
3076 = mddev->recovery_disabled;
3077 }
3078 break;
3079 }
3080 }
3081 if (biolist == NULL) {
3082 while (r10_bio) {
3083 struct r10bio *rb2 = r10_bio;
3084 r10_bio = (struct r10bio*) rb2->master_bio;
3085 rb2->master_bio = NULL;
3086 put_buf(rb2);
3087 }
3088 goto giveup;
3089 }
3090 } else {
3091 /* resync. Schedule a read for every block at this virt offset */
3092 int count = 0;
3093
3094 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
3095
3096 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
3097 &sync_blocks, mddev->degraded) &&
3098 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3099 &mddev->recovery)) {
3100 /* We can skip this block */
3101 *skipped = 1;
3102 return sync_blocks + sectors_skipped;
3103 }
3104 if (sync_blocks < max_sync)
3105 max_sync = sync_blocks;
3106 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3107
3108 r10_bio->mddev = mddev;
3109 atomic_set(&r10_bio->remaining, 0);
3110 raise_barrier(conf, 0);
3111 conf->next_resync = sector_nr;
3112
3113 r10_bio->master_bio = NULL;
3114 r10_bio->sector = sector_nr;
3115 set_bit(R10BIO_IsSync, &r10_bio->state);
3116 raid10_find_phys(conf, r10_bio);
3117 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3118
3119 for (i = 0; i < conf->copies; i++) {
3120 int d = r10_bio->devs[i].devnum;
3121 sector_t first_bad, sector;
3122 int bad_sectors;
3123
3124 if (r10_bio->devs[i].repl_bio)
3125 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3126
3127 bio = r10_bio->devs[i].bio;
3128 bio->bi_end_io = NULL;
3129 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3130 if (conf->mirrors[d].rdev == NULL ||
3131 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
3132 continue;
3133 sector = r10_bio->devs[i].addr;
3134 if (is_badblock(conf->mirrors[d].rdev,
3135 sector, max_sync,
3136 &first_bad, &bad_sectors)) {
3137 if (first_bad > sector)
3138 max_sync = first_bad - sector;
3139 else {
3140 bad_sectors -= (sector - first_bad);
3141 if (max_sync > bad_sectors)
3142 max_sync = max_sync;
3143 continue;
3144 }
3145 }
3146 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3147 atomic_inc(&r10_bio->remaining);
3148 bio->bi_next = biolist;
3149 biolist = bio;
3150 bio->bi_private = r10_bio;
3151 bio->bi_end_io = end_sync_read;
3152 bio->bi_rw = READ;
3153 bio->bi_sector = sector +
3154 conf->mirrors[d].rdev->data_offset;
3155 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3156 count++;
3157
3158 if (conf->mirrors[d].replacement == NULL ||
3159 test_bit(Faulty,
3160 &conf->mirrors[d].replacement->flags))
3161 continue;
3162
3163 /* Need to set up for writing to the replacement */
3164 bio = r10_bio->devs[i].repl_bio;
3165 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3166
3167 sector = r10_bio->devs[i].addr;
3168 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3169 bio->bi_next = biolist;
3170 biolist = bio;
3171 bio->bi_private = r10_bio;
3172 bio->bi_end_io = end_sync_write;
3173 bio->bi_rw = WRITE;
3174 bio->bi_sector = sector +
3175 conf->mirrors[d].replacement->data_offset;
3176 bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3177 count++;
3178 }
3179
3180 if (count < 2) {
3181 for (i=0; i<conf->copies; i++) {
3182 int d = r10_bio->devs[i].devnum;
3183 if (r10_bio->devs[i].bio->bi_end_io)
3184 rdev_dec_pending(conf->mirrors[d].rdev,
3185 mddev);
3186 if (r10_bio->devs[i].repl_bio &&
3187 r10_bio->devs[i].repl_bio->bi_end_io)
3188 rdev_dec_pending(
3189 conf->mirrors[d].replacement,
3190 mddev);
3191 }
3192 put_buf(r10_bio);
3193 biolist = NULL;
3194 goto giveup;
3195 }
3196 }
3197
3198 for (bio = biolist; bio ; bio=bio->bi_next) {
3199
3200 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
3201 if (bio->bi_end_io)
3202 bio->bi_flags |= 1 << BIO_UPTODATE;
3203 bio->bi_vcnt = 0;
3204 bio->bi_idx = 0;
3205 bio->bi_phys_segments = 0;
3206 bio->bi_size = 0;
3207 }
3208
3209 nr_sectors = 0;
3210 if (sector_nr + max_sync < max_sector)
3211 max_sector = sector_nr + max_sync;
3212 do {
3213 struct page *page;
3214 int len = PAGE_SIZE;
3215 if (sector_nr + (len>>9) > max_sector)
3216 len = (max_sector - sector_nr) << 9;
3217 if (len == 0)
3218 break;
3219 for (bio= biolist ; bio ; bio=bio->bi_next) {
3220 struct bio *bio2;
3221 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
3222 if (bio_add_page(bio, page, len, 0))
3223 continue;
3224
3225 /* stop here */
3226 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3227 for (bio2 = biolist;
3228 bio2 && bio2 != bio;
3229 bio2 = bio2->bi_next) {
3230 /* remove last page from this bio */
3231 bio2->bi_vcnt--;
3232 bio2->bi_size -= len;
3233 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
3234 }
3235 goto bio_full;
3236 }
3237 nr_sectors += len>>9;
3238 sector_nr += len>>9;
3239 } while (biolist->bi_vcnt < RESYNC_PAGES);
3240 bio_full:
3241 r10_bio->sectors = nr_sectors;
3242
3243 while (biolist) {
3244 bio = biolist;
3245 biolist = biolist->bi_next;
3246
3247 bio->bi_next = NULL;
3248 r10_bio = bio->bi_private;
3249 r10_bio->sectors = nr_sectors;
3250
3251 if (bio->bi_end_io == end_sync_read) {
3252 md_sync_acct(bio->bi_bdev, nr_sectors);
3253 generic_make_request(bio);
3254 }
3255 }
3256
3257 if (sectors_skipped)
3258 /* pretend they weren't skipped, it makes
3259 * no important difference in this case
3260 */
3261 md_done_sync(mddev, sectors_skipped, 1);
3262
3263 return sectors_skipped + nr_sectors;
3264 giveup:
3265 /* There is nowhere to write, so all non-sync
3266 * drives must be failed or in resync, all drives
3267 * have a bad block, so try the next chunk...
3268 */
3269 if (sector_nr + max_sync < max_sector)
3270 max_sector = sector_nr + max_sync;
3271
3272 sectors_skipped += (max_sector - sector_nr);
3273 chunks_skipped ++;
3274 sector_nr = max_sector;
3275 goto skipped;
3276}
3277
3278static sector_t
3279raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3280{
3281 sector_t size;
3282 struct r10conf *conf = mddev->private;
3283
3284 if (!raid_disks)
3285 raid_disks = min(conf->geo.raid_disks,
3286 conf->prev.raid_disks);
3287 if (!sectors)
3288 sectors = conf->dev_sectors;
3289
3290 size = sectors >> conf->geo.chunk_shift;
3291 sector_div(size, conf->geo.far_copies);
3292 size = size * raid_disks;
3293 sector_div(size, conf->geo.near_copies);
3294
3295 return size << conf->geo.chunk_shift;
3296}
3297
3298static void calc_sectors(struct r10conf *conf, sector_t size)
3299{
3300 /* Calculate the number of sectors-per-device that will
3301 * actually be used, and set conf->dev_sectors and
3302 * conf->stride
3303 */
3304
3305 size = size >> conf->geo.chunk_shift;
3306 sector_div(size, conf->geo.far_copies);
3307 size = size * conf->geo.raid_disks;
3308 sector_div(size, conf->geo.near_copies);
3309 /* 'size' is now the number of chunks in the array */
3310 /* calculate "used chunks per device" */
3311 size = size * conf->copies;
3312
3313 /* We need to round up when dividing by raid_disks to
3314 * get the stride size.
3315 */
3316 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3317
3318 conf->dev_sectors = size << conf->geo.chunk_shift;
3319
3320 if (conf->geo.far_offset)
3321 conf->geo.stride = 1 << conf->geo.chunk_shift;
3322 else {
3323 sector_div(size, conf->geo.far_copies);
3324 conf->geo.stride = size << conf->geo.chunk_shift;
3325 }
3326}
3327
3328enum geo_type {geo_new, geo_old, geo_start};
3329static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3330{
3331 int nc, fc, fo;
3332 int layout, chunk, disks;
3333 switch (new) {
3334 case geo_old:
3335 layout = mddev->layout;
3336 chunk = mddev->chunk_sectors;
3337 disks = mddev->raid_disks - mddev->delta_disks;
3338 break;
3339 case geo_new:
3340 layout = mddev->new_layout;
3341 chunk = mddev->new_chunk_sectors;
3342 disks = mddev->raid_disks;
3343 break;
3344 default: /* avoid 'may be unused' warnings */
3345 case geo_start: /* new when starting reshape - raid_disks not
3346 * updated yet. */
3347 layout = mddev->new_layout;
3348 chunk = mddev->new_chunk_sectors;
3349 disks = mddev->raid_disks + mddev->delta_disks;
3350 break;
3351 }
3352 if (layout >> 17)
3353 return -1;
3354 if (chunk < (PAGE_SIZE >> 9) ||
3355 !is_power_of_2(chunk))
3356 return -2;
3357 nc = layout & 255;
3358 fc = (layout >> 8) & 255;
3359 fo = layout & (1<<16);
3360 geo->raid_disks = disks;
3361 geo->near_copies = nc;
3362 geo->far_copies = fc;
3363 geo->far_offset = fo;
3364 geo->chunk_mask = chunk - 1;
3365 geo->chunk_shift = ffz(~chunk);
3366 return nc*fc;
3367}
3368
3369static struct r10conf *setup_conf(struct mddev *mddev)
3370{
3371 struct r10conf *conf = NULL;
3372 int err = -EINVAL;
3373 struct geom geo;
3374 int copies;
3375
3376 copies = setup_geo(&geo, mddev, geo_new);
3377
3378 if (copies == -2) {
3379 printk(KERN_ERR "md/raid10:%s: chunk size must be "
3380 "at least PAGE_SIZE(%ld) and be a power of 2.\n",
3381 mdname(mddev), PAGE_SIZE);
3382 goto out;
3383 }
3384
3385 if (copies < 2 || copies > mddev->raid_disks) {
3386 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3387 mdname(mddev), mddev->new_layout);
3388 goto out;
3389 }
3390
3391 err = -ENOMEM;
3392 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3393 if (!conf)
3394 goto out;
3395
3396 /* FIXME calc properly */
3397 conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks +
3398 max(0,mddev->delta_disks)),
3399 GFP_KERNEL);
3400 if (!conf->mirrors)
3401 goto out;
3402
3403 conf->tmppage = alloc_page(GFP_KERNEL);
3404 if (!conf->tmppage)
3405 goto out;
3406
3407 conf->geo = geo;
3408 conf->copies = copies;
3409 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3410 r10bio_pool_free, conf);
3411 if (!conf->r10bio_pool)
3412 goto out;
3413
3414 calc_sectors(conf, mddev->dev_sectors);
3415 if (mddev->reshape_position == MaxSector) {
3416 conf->prev = conf->geo;
3417 conf->reshape_progress = MaxSector;
3418 } else {
3419 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3420 err = -EINVAL;
3421 goto out;
3422 }
3423 conf->reshape_progress = mddev->reshape_position;
3424 if (conf->prev.far_offset)
3425 conf->prev.stride = 1 << conf->prev.chunk_shift;
3426 else
3427 /* far_copies must be 1 */
3428 conf->prev.stride = conf->dev_sectors;
3429 }
3430 spin_lock_init(&conf->device_lock);
3431 INIT_LIST_HEAD(&conf->retry_list);
3432
3433 spin_lock_init(&conf->resync_lock);
3434 init_waitqueue_head(&conf->wait_barrier);
3435
3436 conf->thread = md_register_thread(raid10d, mddev, "raid10");
3437 if (!conf->thread)
3438 goto out;
3439
3440 conf->mddev = mddev;
3441 return conf;
3442
3443 out:
3444 if (err == -ENOMEM)
3445 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3446 mdname(mddev));
3447 if (conf) {
3448 if (conf->r10bio_pool)
3449 mempool_destroy(conf->r10bio_pool);
3450 kfree(conf->mirrors);
3451 safe_put_page(conf->tmppage);
3452 kfree(conf);
3453 }
3454 return ERR_PTR(err);
3455}
3456
3457static int run(struct mddev *mddev)
3458{
3459 struct r10conf *conf;
3460 int i, disk_idx, chunk_size;
3461 struct mirror_info *disk;
3462 struct md_rdev *rdev;
3463 sector_t size;
3464 sector_t min_offset_diff = 0;
3465 int first = 1;
3466
3467 if (mddev->private == NULL) {
3468 conf = setup_conf(mddev);
3469 if (IS_ERR(conf))
3470 return PTR_ERR(conf);
3471 mddev->private = conf;
3472 }
3473 conf = mddev->private;
3474 if (!conf)
3475 goto out;
3476
3477 mddev->thread = conf->thread;
3478 conf->thread = NULL;
3479
3480 chunk_size = mddev->chunk_sectors << 9;
3481 blk_queue_io_min(mddev->queue, chunk_size);
3482 if (conf->geo.raid_disks % conf->geo.near_copies)
3483 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3484 else
3485 blk_queue_io_opt(mddev->queue, chunk_size *
3486 (conf->geo.raid_disks / conf->geo.near_copies));
3487
3488 rdev_for_each(rdev, mddev) {
3489 long long diff;
3490 struct request_queue *q;
3491
3492 disk_idx = rdev->raid_disk;
3493 if (disk_idx < 0)
3494 continue;
3495 if (disk_idx >= conf->geo.raid_disks &&
3496 disk_idx >= conf->prev.raid_disks)
3497 continue;
3498 disk = conf->mirrors + disk_idx;
3499
3500 if (test_bit(Replacement, &rdev->flags)) {
3501 if (disk->replacement)
3502 goto out_free_conf;
3503 disk->replacement = rdev;
3504 } else {
3505 if (disk->rdev)
3506 goto out_free_conf;
3507 disk->rdev = rdev;
3508 }
3509 q = bdev_get_queue(rdev->bdev);
3510 if (q->merge_bvec_fn)
3511 mddev->merge_check_needed = 1;
3512 diff = (rdev->new_data_offset - rdev->data_offset);
3513 if (!mddev->reshape_backwards)
3514 diff = -diff;
3515 if (diff < 0)
3516 diff = 0;
3517 if (first || diff < min_offset_diff)
3518 min_offset_diff = diff;
3519
3520 disk_stack_limits(mddev->gendisk, rdev->bdev,
3521 rdev->data_offset << 9);
3522
3523 disk->head_position = 0;
3524 }
3525
3526 /* need to check that every block has at least one working mirror */
3527 if (!enough(conf, -1)) {
3528 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
3529 mdname(mddev));
3530 goto out_free_conf;
3531 }
3532
3533 if (conf->reshape_progress != MaxSector) {
3534 /* must ensure that shape change is supported */
3535 if (conf->geo.far_copies != 1 &&
3536 conf->geo.far_offset == 0)
3537 goto out_free_conf;
3538 if (conf->prev.far_copies != 1 &&
3539 conf->geo.far_offset == 0)
3540 goto out_free_conf;
3541 }
3542
3543 mddev->degraded = 0;
3544 for (i = 0;
3545 i < conf->geo.raid_disks
3546 || i < conf->prev.raid_disks;
3547 i++) {
3548
3549 disk = conf->mirrors + i;
3550
3551 if (!disk->rdev && disk->replacement) {
3552 /* The replacement is all we have - use it */
3553 disk->rdev = disk->replacement;
3554 disk->replacement = NULL;
3555 clear_bit(Replacement, &disk->rdev->flags);
3556 }
3557
3558 if (!disk->rdev ||
3559 !test_bit(In_sync, &disk->rdev->flags)) {
3560 disk->head_position = 0;
3561 mddev->degraded++;
3562 if (disk->rdev)
3563 conf->fullsync = 1;
3564 }
3565 disk->recovery_disabled = mddev->recovery_disabled - 1;
3566 }
3567
3568 if (mddev->recovery_cp != MaxSector)
3569 printk(KERN_NOTICE "md/raid10:%s: not clean"
3570 " -- starting background reconstruction\n",
3571 mdname(mddev));
3572 printk(KERN_INFO
3573 "md/raid10:%s: active with %d out of %d devices\n",
3574 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3575 conf->geo.raid_disks);
3576 /*
3577 * Ok, everything is just fine now
3578 */
3579 mddev->dev_sectors = conf->dev_sectors;
3580 size = raid10_size(mddev, 0, 0);
3581 md_set_array_sectors(mddev, size);
3582 mddev->resync_max_sectors = size;
3583
3584 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
3585 mddev->queue->backing_dev_info.congested_data = mddev;
3586
3587 /* Calculate max read-ahead size.
3588 * We need to readahead at least twice a whole stripe....
3589 * maybe...
3590 */
3591 {
3592 int stripe = conf->geo.raid_disks *
3593 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
3594 stripe /= conf->geo.near_copies;
3595 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3596 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3597 }
3598
3599 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
3600
3601 if (md_integrity_register(mddev))
3602 goto out_free_conf;
3603
3604 if (conf->reshape_progress != MaxSector) {
3605 unsigned long before_length, after_length;
3606
3607 before_length = ((1 << conf->prev.chunk_shift) *
3608 conf->prev.far_copies);
3609 after_length = ((1 << conf->geo.chunk_shift) *
3610 conf->geo.far_copies);
3611
3612 if (max(before_length, after_length) > min_offset_diff) {
3613 /* This cannot work */
3614 printk("md/raid10: offset difference not enough to continue reshape\n");
3615 goto out_free_conf;
3616 }
3617 conf->offset_diff = min_offset_diff;
3618
3619 conf->reshape_safe = conf->reshape_progress;
3620 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3621 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3622 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3623 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3624 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3625 "reshape");
3626 }
3627
3628 return 0;
3629
3630out_free_conf:
3631 md_unregister_thread(&mddev->thread);
3632 if (conf->r10bio_pool)
3633 mempool_destroy(conf->r10bio_pool);
3634 safe_put_page(conf->tmppage);
3635 kfree(conf->mirrors);
3636 kfree(conf);
3637 mddev->private = NULL;
3638out:
3639 return -EIO;
3640}
3641
3642static int stop(struct mddev *mddev)
3643{
3644 struct r10conf *conf = mddev->private;
3645
3646 raise_barrier(conf, 0);
3647 lower_barrier(conf);
3648
3649 md_unregister_thread(&mddev->thread);
3650 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
3651 if (conf->r10bio_pool)
3652 mempool_destroy(conf->r10bio_pool);
3653 kfree(conf->mirrors);
3654 kfree(conf);
3655 mddev->private = NULL;
3656 return 0;
3657}
3658
3659static void raid10_quiesce(struct mddev *mddev, int state)
3660{
3661 struct r10conf *conf = mddev->private;
3662
3663 switch(state) {
3664 case 1:
3665 raise_barrier(conf, 0);
3666 break;
3667 case 0:
3668 lower_barrier(conf);
3669 break;
3670 }
3671}
3672
3673static int raid10_resize(struct mddev *mddev, sector_t sectors)
3674{
3675 /* Resize of 'far' arrays is not supported.
3676 * For 'near' and 'offset' arrays we can set the
3677 * number of sectors used to be an appropriate multiple
3678 * of the chunk size.
3679 * For 'offset', this is far_copies*chunksize.
3680 * For 'near' the multiplier is the LCM of
3681 * near_copies and raid_disks.
3682 * So if far_copies > 1 && !far_offset, fail.
3683 * Else find LCM(raid_disks, near_copy)*far_copies and
3684 * multiply by chunk_size. Then round to this number.
3685 * This is mostly done by raid10_size()
3686 */
3687 struct r10conf *conf = mddev->private;
3688 sector_t oldsize, size;
3689
3690 if (mddev->reshape_position != MaxSector)
3691 return -EBUSY;
3692
3693 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3694 return -EINVAL;
3695
3696 oldsize = raid10_size(mddev, 0, 0);
3697 size = raid10_size(mddev, sectors, 0);
3698 if (mddev->external_size &&
3699 mddev->array_sectors > size)
3700 return -EINVAL;
3701 if (mddev->bitmap) {
3702 int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
3703 if (ret)
3704 return ret;
3705 }
3706 md_set_array_sectors(mddev, size);
3707 set_capacity(mddev->gendisk, mddev->array_sectors);
3708 revalidate_disk(mddev->gendisk);
3709 if (sectors > mddev->dev_sectors &&
3710 mddev->recovery_cp > oldsize) {
3711 mddev->recovery_cp = oldsize;
3712 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3713 }
3714 calc_sectors(conf, sectors);
3715 mddev->dev_sectors = conf->dev_sectors;
3716 mddev->resync_max_sectors = size;
3717 return 0;
3718}
3719
3720static void *raid10_takeover_raid0(struct mddev *mddev)
3721{
3722 struct md_rdev *rdev;
3723 struct r10conf *conf;
3724
3725 if (mddev->degraded > 0) {
3726 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3727 mdname(mddev));
3728 return ERR_PTR(-EINVAL);
3729 }
3730
3731 /* Set new parameters */
3732 mddev->new_level = 10;
3733 /* new layout: far_copies = 1, near_copies = 2 */
3734 mddev->new_layout = (1<<8) + 2;
3735 mddev->new_chunk_sectors = mddev->chunk_sectors;
3736 mddev->delta_disks = mddev->raid_disks;
3737 mddev->raid_disks *= 2;
3738 /* make sure it will be not marked as dirty */
3739 mddev->recovery_cp = MaxSector;
3740
3741 conf = setup_conf(mddev);
3742 if (!IS_ERR(conf)) {
3743 rdev_for_each(rdev, mddev)
3744 if (rdev->raid_disk >= 0)
3745 rdev->new_raid_disk = rdev->raid_disk * 2;
3746 conf->barrier = 1;
3747 }
3748
3749 return conf;
3750}
3751
3752static void *raid10_takeover(struct mddev *mddev)
3753{
3754 struct r0conf *raid0_conf;
3755
3756 /* raid10 can take over:
3757 * raid0 - providing it has only two drives
3758 */
3759 if (mddev->level == 0) {
3760 /* for raid0 takeover only one zone is supported */
3761 raid0_conf = mddev->private;
3762 if (raid0_conf->nr_strip_zones > 1) {
3763 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3764 " with more than one zone.\n",
3765 mdname(mddev));
3766 return ERR_PTR(-EINVAL);
3767 }
3768 return raid10_takeover_raid0(mddev);
3769 }
3770 return ERR_PTR(-EINVAL);
3771}
3772
3773static int raid10_check_reshape(struct mddev *mddev)
3774{
3775 /* Called when there is a request to change
3776 * - layout (to ->new_layout)
3777 * - chunk size (to ->new_chunk_sectors)
3778 * - raid_disks (by delta_disks)
3779 * or when trying to restart a reshape that was ongoing.
3780 *
3781 * We need to validate the request and possibly allocate
3782 * space if that might be an issue later.
3783 *
3784 * Currently we reject any reshape of a 'far' mode array,
3785 * allow chunk size to change if new is generally acceptable,
3786 * allow raid_disks to increase, and allow
3787 * a switch between 'near' mode and 'offset' mode.
3788 */
3789 struct r10conf *conf = mddev->private;
3790 struct geom geo;
3791
3792 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
3793 return -EINVAL;
3794
3795 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
3796 /* mustn't change number of copies */
3797 return -EINVAL;
3798 if (geo.far_copies > 1 && !geo.far_offset)
3799 /* Cannot switch to 'far' mode */
3800 return -EINVAL;
3801
3802 if (mddev->array_sectors & geo.chunk_mask)
3803 /* not factor of array size */
3804 return -EINVAL;
3805
3806 if (!enough(conf, -1))
3807 return -EINVAL;
3808
3809 kfree(conf->mirrors_new);
3810 conf->mirrors_new = NULL;
3811 if (mddev->delta_disks > 0) {
3812 /* allocate new 'mirrors' list */
3813 conf->mirrors_new = kzalloc(
3814 sizeof(struct mirror_info)
3815 *(mddev->raid_disks +
3816 mddev->delta_disks),
3817 GFP_KERNEL);
3818 if (!conf->mirrors_new)
3819 return -ENOMEM;
3820 }
3821 return 0;
3822}
3823
3824/*
3825 * Need to check if array has failed when deciding whether to:
3826 * - start an array
3827 * - remove non-faulty devices
3828 * - add a spare
3829 * - allow a reshape
3830 * This determination is simple when no reshape is happening.
3831 * However if there is a reshape, we need to carefully check
3832 * both the before and after sections.
3833 * This is because some failed devices may only affect one
3834 * of the two sections, and some non-in_sync devices may
3835 * be insync in the section most affected by failed devices.
3836 */
3837static int calc_degraded(struct r10conf *conf)
3838{
3839 int degraded, degraded2;
3840 int i;
3841
3842 rcu_read_lock();
3843 degraded = 0;
3844 /* 'prev' section first */
3845 for (i = 0; i < conf->prev.raid_disks; i++) {
3846 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3847 if (!rdev || test_bit(Faulty, &rdev->flags))
3848 degraded++;
3849 else if (!test_bit(In_sync, &rdev->flags))
3850 /* When we can reduce the number of devices in
3851 * an array, this might not contribute to
3852 * 'degraded'. It does now.
3853 */
3854 degraded++;
3855 }
3856 rcu_read_unlock();
3857 if (conf->geo.raid_disks == conf->prev.raid_disks)
3858 return degraded;
3859 rcu_read_lock();
3860 degraded2 = 0;
3861 for (i = 0; i < conf->geo.raid_disks; i++) {
3862 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3863 if (!rdev || test_bit(Faulty, &rdev->flags))
3864 degraded2++;
3865 else if (!test_bit(In_sync, &rdev->flags)) {
3866 /* If reshape is increasing the number of devices,
3867 * this section has already been recovered, so
3868 * it doesn't contribute to degraded.
3869 * else it does.
3870 */
3871 if (conf->geo.raid_disks <= conf->prev.raid_disks)
3872 degraded2++;
3873 }
3874 }
3875 rcu_read_unlock();
3876 if (degraded2 > degraded)
3877 return degraded2;
3878 return degraded;
3879}
3880
3881static int raid10_start_reshape(struct mddev *mddev)
3882{
3883 /* A 'reshape' has been requested. This commits
3884 * the various 'new' fields and sets MD_RECOVER_RESHAPE
3885 * This also checks if there are enough spares and adds them
3886 * to the array.
3887 * We currently require enough spares to make the final
3888 * array non-degraded. We also require that the difference
3889 * between old and new data_offset - on each device - is
3890 * enough that we never risk over-writing.
3891 */
3892
3893 unsigned long before_length, after_length;
3894 sector_t min_offset_diff = 0;
3895 int first = 1;
3896 struct geom new;
3897 struct r10conf *conf = mddev->private;
3898 struct md_rdev *rdev;
3899 int spares = 0;
3900 int ret;
3901
3902 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3903 return -EBUSY;
3904
3905 if (setup_geo(&new, mddev, geo_start) != conf->copies)
3906 return -EINVAL;
3907
3908 before_length = ((1 << conf->prev.chunk_shift) *
3909 conf->prev.far_copies);
3910 after_length = ((1 << conf->geo.chunk_shift) *
3911 conf->geo.far_copies);
3912
3913 rdev_for_each(rdev, mddev) {
3914 if (!test_bit(In_sync, &rdev->flags)
3915 && !test_bit(Faulty, &rdev->flags))
3916 spares++;
3917 if (rdev->raid_disk >= 0) {
3918 long long diff = (rdev->new_data_offset
3919 - rdev->data_offset);
3920 if (!mddev->reshape_backwards)
3921 diff = -diff;
3922 if (diff < 0)
3923 diff = 0;
3924 if (first || diff < min_offset_diff)
3925 min_offset_diff = diff;
3926 }
3927 }
3928
3929 if (max(before_length, after_length) > min_offset_diff)
3930 return -EINVAL;
3931
3932 if (spares < mddev->delta_disks)
3933 return -EINVAL;
3934
3935 conf->offset_diff = min_offset_diff;
3936 spin_lock_irq(&conf->device_lock);
3937 if (conf->mirrors_new) {
3938 memcpy(conf->mirrors_new, conf->mirrors,
3939 sizeof(struct mirror_info)*conf->prev.raid_disks);
3940 smp_mb();
3941 kfree(conf->mirrors_old); /* FIXME and elsewhere */
3942 conf->mirrors_old = conf->mirrors;
3943 conf->mirrors = conf->mirrors_new;
3944 conf->mirrors_new = NULL;
3945 }
3946 setup_geo(&conf->geo, mddev, geo_start);
3947 smp_mb();
3948 if (mddev->reshape_backwards) {
3949 sector_t size = raid10_size(mddev, 0, 0);
3950 if (size < mddev->array_sectors) {
3951 spin_unlock_irq(&conf->device_lock);
3952 printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
3953 mdname(mddev));
3954 return -EINVAL;
3955 }
3956 mddev->resync_max_sectors = size;
3957 conf->reshape_progress = size;
3958 } else
3959 conf->reshape_progress = 0;
3960 spin_unlock_irq(&conf->device_lock);
3961
3962 if (mddev->delta_disks && mddev->bitmap) {
3963 ret = bitmap_resize(mddev->bitmap,
3964 raid10_size(mddev, 0,
3965 conf->geo.raid_disks),
3966 0, 0);
3967 if (ret)
3968 goto abort;
3969 }
3970 if (mddev->delta_disks > 0) {
3971 rdev_for_each(rdev, mddev)
3972 if (rdev->raid_disk < 0 &&
3973 !test_bit(Faulty, &rdev->flags)) {
3974 if (raid10_add_disk(mddev, rdev) == 0) {
3975 if (rdev->raid_disk >=
3976 conf->prev.raid_disks)
3977 set_bit(In_sync, &rdev->flags);
3978 else
3979 rdev->recovery_offset = 0;
3980
3981 if (sysfs_link_rdev(mddev, rdev))
3982 /* Failure here is OK */;
3983 }
3984 } else if (rdev->raid_disk >= conf->prev.raid_disks
3985 && !test_bit(Faulty, &rdev->flags)) {
3986 /* This is a spare that was manually added */
3987 set_bit(In_sync, &rdev->flags);
3988 }
3989 }
3990 /* When a reshape changes the number of devices,
3991 * ->degraded is measured against the larger of the
3992 * pre and post numbers.
3993 */
3994 spin_lock_irq(&conf->device_lock);
3995 mddev->degraded = calc_degraded(conf);
3996 spin_unlock_irq(&conf->device_lock);
3997 mddev->raid_disks = conf->geo.raid_disks;
3998 mddev->reshape_position = conf->reshape_progress;
3999 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4000
4001 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4002 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4003 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4004 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4005
4006 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4007 "reshape");
4008 if (!mddev->sync_thread) {
4009 ret = -EAGAIN;
4010 goto abort;
4011 }
4012 conf->reshape_checkpoint = jiffies;
4013 md_wakeup_thread(mddev->sync_thread);
4014 md_new_event(mddev);
4015 return 0;
4016
4017abort:
4018 mddev->recovery = 0;
4019 spin_lock_irq(&conf->device_lock);
4020 conf->geo = conf->prev;
4021 mddev->raid_disks = conf->geo.raid_disks;
4022 rdev_for_each(rdev, mddev)
4023 rdev->new_data_offset = rdev->data_offset;
4024 smp_wmb();
4025 conf->reshape_progress = MaxSector;
4026 mddev->reshape_position = MaxSector;
4027 spin_unlock_irq(&conf->device_lock);
4028 return ret;
4029}
4030
4031/* Calculate the last device-address that could contain
4032 * any block from the chunk that includes the array-address 's'
4033 * and report the next address.
4034 * i.e. the address returned will be chunk-aligned and after
4035 * any data that is in the chunk containing 's'.
4036 */
4037static sector_t last_dev_address(sector_t s, struct geom *geo)
4038{
4039 s = (s | geo->chunk_mask) + 1;
4040 s >>= geo->chunk_shift;
4041 s *= geo->near_copies;
4042 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4043 s *= geo->far_copies;
4044 s <<= geo->chunk_shift;
4045 return s;
4046}
4047
4048/* Calculate the first device-address that could contain
4049 * any block from the chunk that includes the array-address 's'.
4050 * This too will be the start of a chunk
4051 */
4052static sector_t first_dev_address(sector_t s, struct geom *geo)
4053{
4054 s >>= geo->chunk_shift;
4055 s *= geo->near_copies;
4056 sector_div(s, geo->raid_disks);
4057 s *= geo->far_copies;
4058 s <<= geo->chunk_shift;
4059 return s;
4060}
4061
4062static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4063 int *skipped)
4064{
4065 /* We simply copy at most one chunk (smallest of old and new)
4066 * at a time, possibly less if that exceeds RESYNC_PAGES,
4067 * or we hit a bad block or something.
4068 * This might mean we pause for normal IO in the middle of
4069 * a chunk, but that is not a problem was mddev->reshape_position
4070 * can record any location.
4071 *
4072 * If we will want to write to a location that isn't
4073 * yet recorded as 'safe' (i.e. in metadata on disk) then
4074 * we need to flush all reshape requests and update the metadata.
4075 *
4076 * When reshaping forwards (e.g. to more devices), we interpret
4077 * 'safe' as the earliest block which might not have been copied
4078 * down yet. We divide this by previous stripe size and multiply
4079 * by previous stripe length to get lowest device offset that we
4080 * cannot write to yet.
4081 * We interpret 'sector_nr' as an address that we want to write to.
4082 * From this we use last_device_address() to find where we might
4083 * write to, and first_device_address on the 'safe' position.
4084 * If this 'next' write position is after the 'safe' position,
4085 * we must update the metadata to increase the 'safe' position.
4086 *
4087 * When reshaping backwards, we round in the opposite direction
4088 * and perform the reverse test: next write position must not be
4089 * less than current safe position.
4090 *
4091 * In all this the minimum difference in data offsets
4092 * (conf->offset_diff - always positive) allows a bit of slack,
4093 * so next can be after 'safe', but not by more than offset_disk
4094 *
4095 * We need to prepare all the bios here before we start any IO
4096 * to ensure the size we choose is acceptable to all devices.
4097 * The means one for each copy for write-out and an extra one for
4098 * read-in.
4099 * We store the read-in bio in ->master_bio and the others in
4100 * ->devs[x].bio and ->devs[x].repl_bio.
4101 */
4102 struct r10conf *conf = mddev->private;
4103 struct r10bio *r10_bio;
4104 sector_t next, safe, last;
4105 int max_sectors;
4106 int nr_sectors;
4107 int s;
4108 struct md_rdev *rdev;
4109 int need_flush = 0;
4110 struct bio *blist;
4111 struct bio *bio, *read_bio;
4112 int sectors_done = 0;
4113
4114 if (sector_nr == 0) {
4115 /* If restarting in the middle, skip the initial sectors */
4116 if (mddev->reshape_backwards &&
4117 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4118 sector_nr = (raid10_size(mddev, 0, 0)
4119 - conf->reshape_progress);
4120 } else if (!mddev->reshape_backwards &&
4121 conf->reshape_progress > 0)
4122 sector_nr = conf->reshape_progress;
4123 if (sector_nr) {
4124 mddev->curr_resync_completed = sector_nr;
4125 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4126 *skipped = 1;
4127 return sector_nr;
4128 }
4129 }
4130
4131 /* We don't use sector_nr to track where we are up to
4132 * as that doesn't work well for ->reshape_backwards.
4133 * So just use ->reshape_progress.
4134 */
4135 if (mddev->reshape_backwards) {
4136 /* 'next' is the earliest device address that we might
4137 * write to for this chunk in the new layout
4138 */
4139 next = first_dev_address(conf->reshape_progress - 1,
4140 &conf->geo);
4141
4142 /* 'safe' is the last device address that we might read from
4143 * in the old layout after a restart
4144 */
4145 safe = last_dev_address(conf->reshape_safe - 1,
4146 &conf->prev);
4147
4148 if (next + conf->offset_diff < safe)
4149 need_flush = 1;
4150
4151 last = conf->reshape_progress - 1;
4152 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4153 & conf->prev.chunk_mask);
4154 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4155 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4156 } else {
4157 /* 'next' is after the last device address that we
4158 * might write to for this chunk in the new layout
4159 */
4160 next = last_dev_address(conf->reshape_progress, &conf->geo);
4161
4162 /* 'safe' is the earliest device address that we might
4163 * read from in the old layout after a restart
4164 */
4165 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4166
4167 /* Need to update metadata if 'next' might be beyond 'safe'
4168 * as that would possibly corrupt data
4169 */
4170 if (next > safe + conf->offset_diff)
4171 need_flush = 1;
4172
4173 sector_nr = conf->reshape_progress;
4174 last = sector_nr | (conf->geo.chunk_mask
4175 & conf->prev.chunk_mask);
4176
4177 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4178 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4179 }
4180
4181 if (need_flush ||
4182 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4183 /* Need to update reshape_position in metadata */
4184 wait_barrier(conf);
4185 mddev->reshape_position = conf->reshape_progress;
4186 if (mddev->reshape_backwards)
4187 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4188 - conf->reshape_progress;
4189 else
4190 mddev->curr_resync_completed = conf->reshape_progress;
4191 conf->reshape_checkpoint = jiffies;
4192 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4193 md_wakeup_thread(mddev->thread);
4194 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4195 kthread_should_stop());
4196 conf->reshape_safe = mddev->reshape_position;
4197 allow_barrier(conf);
4198 }
4199
4200read_more:
4201 /* Now schedule reads for blocks from sector_nr to last */
4202 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4203 raise_barrier(conf, sectors_done != 0);
4204 atomic_set(&r10_bio->remaining, 0);
4205 r10_bio->mddev = mddev;
4206 r10_bio->sector = sector_nr;
4207 set_bit(R10BIO_IsReshape, &r10_bio->state);
4208 r10_bio->sectors = last - sector_nr + 1;
4209 rdev = read_balance(conf, r10_bio, &max_sectors);
4210 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4211
4212 if (!rdev) {
4213 /* Cannot read from here, so need to record bad blocks
4214 * on all the target devices.
4215 */
4216 // FIXME
4217 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4218 return sectors_done;
4219 }
4220
4221 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4222
4223 read_bio->bi_bdev = rdev->bdev;
4224 read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4225 + rdev->data_offset);
4226 read_bio->bi_private = r10_bio;
4227 read_bio->bi_end_io = end_sync_read;
4228 read_bio->bi_rw = READ;
4229 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4230 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4231 read_bio->bi_vcnt = 0;
4232 read_bio->bi_idx = 0;
4233 read_bio->bi_size = 0;
4234 r10_bio->master_bio = read_bio;
4235 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4236
4237 /* Now find the locations in the new layout */
4238 __raid10_find_phys(&conf->geo, r10_bio);
4239
4240 blist = read_bio;
4241 read_bio->bi_next = NULL;
4242
4243 for (s = 0; s < conf->copies*2; s++) {
4244 struct bio *b;
4245 int d = r10_bio->devs[s/2].devnum;
4246 struct md_rdev *rdev2;
4247 if (s&1) {
4248 rdev2 = conf->mirrors[d].replacement;
4249 b = r10_bio->devs[s/2].repl_bio;
4250 } else {
4251 rdev2 = conf->mirrors[d].rdev;
4252 b = r10_bio->devs[s/2].bio;
4253 }
4254 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4255 continue;
4256 b->bi_bdev = rdev2->bdev;
4257 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
4258 b->bi_private = r10_bio;
4259 b->bi_end_io = end_reshape_write;
4260 b->bi_rw = WRITE;
4261 b->bi_flags &= ~(BIO_POOL_MASK - 1);
4262 b->bi_flags |= 1 << BIO_UPTODATE;
4263 b->bi_next = blist;
4264 b->bi_vcnt = 0;
4265 b->bi_idx = 0;
4266 b->bi_size = 0;
4267 blist = b;
4268 }
4269
4270 /* Now add as many pages as possible to all of these bios. */
4271
4272 nr_sectors = 0;
4273 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4274 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
4275 int len = (max_sectors - s) << 9;
4276 if (len > PAGE_SIZE)
4277 len = PAGE_SIZE;
4278 for (bio = blist; bio ; bio = bio->bi_next) {
4279 struct bio *bio2;
4280 if (bio_add_page(bio, page, len, 0))
4281 continue;
4282
4283 /* Didn't fit, must stop */
4284 for (bio2 = blist;
4285 bio2 && bio2 != bio;
4286 bio2 = bio2->bi_next) {
4287 /* Remove last page from this bio */
4288 bio2->bi_vcnt--;
4289 bio2->bi_size -= len;
4290 bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
4291 }
4292 goto bio_full;
4293 }
4294 sector_nr += len >> 9;
4295 nr_sectors += len >> 9;
4296 }
4297bio_full:
4298 r10_bio->sectors = nr_sectors;
4299
4300 /* Now submit the read */
4301 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
4302 atomic_inc(&r10_bio->remaining);
4303 read_bio->bi_next = NULL;
4304 generic_make_request(read_bio);
4305 sector_nr += nr_sectors;
4306 sectors_done += nr_sectors;
4307 if (sector_nr <= last)
4308 goto read_more;
4309
4310 /* Now that we have done the whole section we can
4311 * update reshape_progress
4312 */
4313 if (mddev->reshape_backwards)
4314 conf->reshape_progress -= sectors_done;
4315 else
4316 conf->reshape_progress += sectors_done;
4317
4318 return sectors_done;
4319}
4320
4321static void end_reshape_request(struct r10bio *r10_bio);
4322static int handle_reshape_read_error(struct mddev *mddev,
4323 struct r10bio *r10_bio);
4324static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4325{
4326 /* Reshape read completed. Hopefully we have a block
4327 * to write out.
4328 * If we got a read error then we do sync 1-page reads from
4329 * elsewhere until we find the data - or give up.
4330 */
4331 struct r10conf *conf = mddev->private;
4332 int s;
4333
4334 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4335 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4336 /* Reshape has been aborted */
4337 md_done_sync(mddev, r10_bio->sectors, 0);
4338 return;
4339 }
4340
4341 /* We definitely have the data in the pages, schedule the
4342 * writes.
4343 */
4344 atomic_set(&r10_bio->remaining, 1);
4345 for (s = 0; s < conf->copies*2; s++) {
4346 struct bio *b;
4347 int d = r10_bio->devs[s/2].devnum;
4348 struct md_rdev *rdev;
4349 if (s&1) {
4350 rdev = conf->mirrors[d].replacement;
4351 b = r10_bio->devs[s/2].repl_bio;
4352 } else {
4353 rdev = conf->mirrors[d].rdev;
4354 b = r10_bio->devs[s/2].bio;
4355 }
4356 if (!rdev || test_bit(Faulty, &rdev->flags))
4357 continue;
4358 atomic_inc(&rdev->nr_pending);
4359 md_sync_acct(b->bi_bdev, r10_bio->sectors);
4360 atomic_inc(&r10_bio->remaining);
4361 b->bi_next = NULL;
4362 generic_make_request(b);
4363 }
4364 end_reshape_request(r10_bio);
4365}
4366
4367static void end_reshape(struct r10conf *conf)
4368{
4369 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4370 return;
4371
4372 spin_lock_irq(&conf->device_lock);
4373 conf->prev = conf->geo;
4374 md_finish_reshape(conf->mddev);
4375 smp_wmb();
4376 conf->reshape_progress = MaxSector;
4377 spin_unlock_irq(&conf->device_lock);
4378
4379 /* read-ahead size must cover two whole stripes, which is
4380 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4381 */
4382 if (conf->mddev->queue) {
4383 int stripe = conf->geo.raid_disks *
4384 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4385 stripe /= conf->geo.near_copies;
4386 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4387 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4388 }
4389 conf->fullsync = 0;
4390}
4391
4392
4393static int handle_reshape_read_error(struct mddev *mddev,
4394 struct r10bio *r10_bio)
4395{
4396 /* Use sync reads to get the blocks from somewhere else */
4397 int sectors = r10_bio->sectors;
4398 struct r10conf *conf = mddev->private;
4399 struct {
4400 struct r10bio r10_bio;
4401 struct r10dev devs[conf->copies];
4402 } on_stack;
4403 struct r10bio *r10b = &on_stack.r10_bio;
4404 int slot = 0;
4405 int idx = 0;
4406 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
4407
4408 r10b->sector = r10_bio->sector;
4409 __raid10_find_phys(&conf->prev, r10b);
4410
4411 while (sectors) {
4412 int s = sectors;
4413 int success = 0;
4414 int first_slot = slot;
4415
4416 if (s > (PAGE_SIZE >> 9))
4417 s = PAGE_SIZE >> 9;
4418
4419 while (!success) {
4420 int d = r10b->devs[slot].devnum;
4421 struct md_rdev *rdev = conf->mirrors[d].rdev;
4422 sector_t addr;
4423 if (rdev == NULL ||
4424 test_bit(Faulty, &rdev->flags) ||
4425 !test_bit(In_sync, &rdev->flags))
4426 goto failed;
4427
4428 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4429 success = sync_page_io(rdev,
4430 addr,
4431 s << 9,
4432 bvec[idx].bv_page,
4433 READ, false);
4434 if (success)
4435 break;
4436 failed:
4437 slot++;
4438 if (slot >= conf->copies)
4439 slot = 0;
4440 if (slot == first_slot)
4441 break;
4442 }
4443 if (!success) {
4444 /* couldn't read this block, must give up */
4445 set_bit(MD_RECOVERY_INTR,
4446 &mddev->recovery);
4447 return -EIO;
4448 }
4449 sectors -= s;
4450 idx++;
4451 }
4452 return 0;
4453}
4454
4455static void end_reshape_write(struct bio *bio, int error)
4456{
4457 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4458 struct r10bio *r10_bio = bio->bi_private;
4459 struct mddev *mddev = r10_bio->mddev;
4460 struct r10conf *conf = mddev->private;
4461 int d;
4462 int slot;
4463 int repl;
4464 struct md_rdev *rdev = NULL;
4465
4466 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4467 if (repl)
4468 rdev = conf->mirrors[d].replacement;
4469 if (!rdev) {
4470 smp_mb();
4471 rdev = conf->mirrors[d].rdev;
4472 }
4473
4474 if (!uptodate) {
4475 /* FIXME should record badblock */
4476 md_error(mddev, rdev);
4477 }
4478
4479 rdev_dec_pending(rdev, mddev);
4480 end_reshape_request(r10_bio);
4481}
4482
4483static void end_reshape_request(struct r10bio *r10_bio)
4484{
4485 if (!atomic_dec_and_test(&r10_bio->remaining))
4486 return;
4487 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4488 bio_put(r10_bio->master_bio);
4489 put_buf(r10_bio);
4490}
4491
4492static void raid10_finish_reshape(struct mddev *mddev)
4493{
4494 struct r10conf *conf = mddev->private;
4495
4496 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4497 return;
4498
4499 if (mddev->delta_disks > 0) {
4500 sector_t size = raid10_size(mddev, 0, 0);
4501 md_set_array_sectors(mddev, size);
4502 if (mddev->recovery_cp > mddev->resync_max_sectors) {
4503 mddev->recovery_cp = mddev->resync_max_sectors;
4504 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4505 }
4506 mddev->resync_max_sectors = size;
4507 set_capacity(mddev->gendisk, mddev->array_sectors);
4508 revalidate_disk(mddev->gendisk);
4509 } else {
4510 int d;
4511 for (d = conf->geo.raid_disks ;
4512 d < conf->geo.raid_disks - mddev->delta_disks;
4513 d++) {
4514 struct md_rdev *rdev = conf->mirrors[d].rdev;
4515 if (rdev)
4516 clear_bit(In_sync, &rdev->flags);
4517 rdev = conf->mirrors[d].replacement;
4518 if (rdev)
4519 clear_bit(In_sync, &rdev->flags);
4520 }
4521 }
4522 mddev->layout = mddev->new_layout;
4523 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4524 mddev->reshape_position = MaxSector;
4525 mddev->delta_disks = 0;
4526 mddev->reshape_backwards = 0;
4527}
4528
4529static struct md_personality raid10_personality =
4530{
4531 .name = "raid10",
4532 .level = 10,
4533 .owner = THIS_MODULE,
4534 .make_request = make_request,
4535 .run = run,
4536 .stop = stop,
4537 .status = status,
4538 .error_handler = error,
4539 .hot_add_disk = raid10_add_disk,
4540 .hot_remove_disk= raid10_remove_disk,
4541 .spare_active = raid10_spare_active,
4542 .sync_request = sync_request,
4543 .quiesce = raid10_quiesce,
4544 .size = raid10_size,
4545 .resize = raid10_resize,
4546 .takeover = raid10_takeover,
4547 .check_reshape = raid10_check_reshape,
4548 .start_reshape = raid10_start_reshape,
4549 .finish_reshape = raid10_finish_reshape,
4550};
4551
4552static int __init raid_init(void)
4553{
4554 return register_md_personality(&raid10_personality);
4555}
4556
4557static void raid_exit(void)
4558{
4559 unregister_md_personality(&raid10_personality);
4560}
4561
4562module_init(raid_init);
4563module_exit(raid_exit);
4564MODULE_LICENSE("GPL");
4565MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
4566MODULE_ALIAS("md-personality-9"); /* RAID10 */
4567MODULE_ALIAS("md-raid10");
4568MODULE_ALIAS("md-level-10");
4569
4570module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);