Loading...
1/*
2 * multipath.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * MULTIPATH management functions.
9 *
10 * derived from raid1.c.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * You should have received a copy of the GNU General Public License
18 * (for example /usr/src/linux/COPYING); if not, write to the Free
19 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/blkdev.h>
23#include <linux/module.h>
24#include <linux/raid/md_u.h>
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include "md.h"
28#include "multipath.h"
29
30#define MAX_WORK_PER_DISK 128
31
32#define NR_RESERVED_BUFS 32
33
34
35static int multipath_map (struct mpconf *conf)
36{
37 int i, disks = conf->raid_disks;
38
39 /*
40 * Later we do read balancing on the read side
41 * now we use the first available disk.
42 */
43
44 rcu_read_lock();
45 for (i = 0; i < disks; i++) {
46 struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
47 if (rdev && test_bit(In_sync, &rdev->flags)) {
48 atomic_inc(&rdev->nr_pending);
49 rcu_read_unlock();
50 return i;
51 }
52 }
53 rcu_read_unlock();
54
55 printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
56 return (-1);
57}
58
59static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
60{
61 unsigned long flags;
62 struct mddev *mddev = mp_bh->mddev;
63 struct mpconf *conf = mddev->private;
64
65 spin_lock_irqsave(&conf->device_lock, flags);
66 list_add(&mp_bh->retry_list, &conf->retry_list);
67 spin_unlock_irqrestore(&conf->device_lock, flags);
68 md_wakeup_thread(mddev->thread);
69}
70
71
72/*
73 * multipath_end_bh_io() is called when we have finished servicing a multipathed
74 * operation and are ready to return a success/failure code to the buffer
75 * cache layer.
76 */
77static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
78{
79 struct bio *bio = mp_bh->master_bio;
80 struct mpconf *conf = mp_bh->mddev->private;
81
82 bio_endio(bio, err);
83 mempool_free(mp_bh, conf->pool);
84}
85
86static void multipath_end_request(struct bio *bio, int error)
87{
88 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
89 struct multipath_bh *mp_bh = bio->bi_private;
90 struct mpconf *conf = mp_bh->mddev->private;
91 struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
92
93 if (uptodate)
94 multipath_end_bh_io(mp_bh, 0);
95 else if (!(bio->bi_rw & REQ_RAHEAD)) {
96 /*
97 * oops, IO error:
98 */
99 char b[BDEVNAME_SIZE];
100 md_error (mp_bh->mddev, rdev);
101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
102 bdevname(rdev->bdev,b),
103 (unsigned long long)bio->bi_sector);
104 multipath_reschedule_retry(mp_bh);
105 } else
106 multipath_end_bh_io(mp_bh, error);
107 rdev_dec_pending(rdev, conf->mddev);
108}
109
110static void multipath_make_request(struct mddev *mddev, struct bio * bio)
111{
112 struct mpconf *conf = mddev->private;
113 struct multipath_bh * mp_bh;
114 struct multipath_info *multipath;
115
116 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
117 md_flush_request(mddev, bio);
118 return;
119 }
120
121 mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
122
123 mp_bh->master_bio = bio;
124 mp_bh->mddev = mddev;
125
126 mp_bh->path = multipath_map(conf);
127 if (mp_bh->path < 0) {
128 bio_endio(bio, -EIO);
129 mempool_free(mp_bh, conf->pool);
130 return;
131 }
132 multipath = conf->multipaths + mp_bh->path;
133
134 mp_bh->bio = *bio;
135 mp_bh->bio.bi_sector += multipath->rdev->data_offset;
136 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
138 mp_bh->bio.bi_end_io = multipath_end_request;
139 mp_bh->bio.bi_private = mp_bh;
140 generic_make_request(&mp_bh->bio);
141 return;
142}
143
144static void multipath_status (struct seq_file *seq, struct mddev *mddev)
145{
146 struct mpconf *conf = mddev->private;
147 int i;
148
149 seq_printf (seq, " [%d/%d] [", conf->raid_disks,
150 conf->raid_disks - mddev->degraded);
151 for (i = 0; i < conf->raid_disks; i++)
152 seq_printf (seq, "%s",
153 conf->multipaths[i].rdev &&
154 test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
155 seq_printf (seq, "]");
156}
157
158static int multipath_congested(void *data, int bits)
159{
160 struct mddev *mddev = data;
161 struct mpconf *conf = mddev->private;
162 int i, ret = 0;
163
164 if (mddev_congested(mddev, bits))
165 return 1;
166
167 rcu_read_lock();
168 for (i = 0; i < mddev->raid_disks ; i++) {
169 struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
170 if (rdev && !test_bit(Faulty, &rdev->flags)) {
171 struct request_queue *q = bdev_get_queue(rdev->bdev);
172
173 ret |= bdi_congested(&q->backing_dev_info, bits);
174 /* Just like multipath_map, we just check the
175 * first available device
176 */
177 break;
178 }
179 }
180 rcu_read_unlock();
181 return ret;
182}
183
184/*
185 * Careful, this can execute in IRQ contexts as well!
186 */
187static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
188{
189 struct mpconf *conf = mddev->private;
190 char b[BDEVNAME_SIZE];
191
192 if (conf->raid_disks - mddev->degraded <= 1) {
193 /*
194 * Uh oh, we can do nothing if this is our last path, but
195 * first check if this is a queued request for a device
196 * which has just failed.
197 */
198 printk(KERN_ALERT
199 "multipath: only one IO path left and IO error.\n");
200 /* leave it active... it's all we have */
201 return;
202 }
203 /*
204 * Mark disk as unusable
205 */
206 if (test_and_clear_bit(In_sync, &rdev->flags)) {
207 unsigned long flags;
208 spin_lock_irqsave(&conf->device_lock, flags);
209 mddev->degraded++;
210 spin_unlock_irqrestore(&conf->device_lock, flags);
211 }
212 set_bit(Faulty, &rdev->flags);
213 set_bit(MD_CHANGE_DEVS, &mddev->flags);
214 printk(KERN_ALERT "multipath: IO failure on %s,"
215 " disabling IO path.\n"
216 "multipath: Operation continuing"
217 " on %d IO paths.\n",
218 bdevname(rdev->bdev, b),
219 conf->raid_disks - mddev->degraded);
220}
221
222static void print_multipath_conf (struct mpconf *conf)
223{
224 int i;
225 struct multipath_info *tmp;
226
227 printk("MULTIPATH conf printout:\n");
228 if (!conf) {
229 printk("(conf==NULL)\n");
230 return;
231 }
232 printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
233 conf->raid_disks);
234
235 for (i = 0; i < conf->raid_disks; i++) {
236 char b[BDEVNAME_SIZE];
237 tmp = conf->multipaths + i;
238 if (tmp->rdev)
239 printk(" disk%d, o:%d, dev:%s\n",
240 i,!test_bit(Faulty, &tmp->rdev->flags),
241 bdevname(tmp->rdev->bdev,b));
242 }
243}
244
245
246static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
247{
248 struct mpconf *conf = mddev->private;
249 struct request_queue *q;
250 int err = -EEXIST;
251 int path;
252 struct multipath_info *p;
253 int first = 0;
254 int last = mddev->raid_disks - 1;
255
256 if (rdev->raid_disk >= 0)
257 first = last = rdev->raid_disk;
258
259 print_multipath_conf(conf);
260
261 for (path = first; path <= last; path++)
262 if ((p=conf->multipaths+path)->rdev == NULL) {
263 q = rdev->bdev->bd_disk->queue;
264 disk_stack_limits(mddev->gendisk, rdev->bdev,
265 rdev->data_offset << 9);
266
267 /* as we don't honour merge_bvec_fn, we must never risk
268 * violating it, so limit ->max_segments to one, lying
269 * within a single page.
270 * (Note: it is very unlikely that a device with
271 * merge_bvec_fn will be involved in multipath.)
272 */
273 if (q->merge_bvec_fn) {
274 blk_queue_max_segments(mddev->queue, 1);
275 blk_queue_segment_boundary(mddev->queue,
276 PAGE_CACHE_SIZE - 1);
277 }
278
279 spin_lock_irq(&conf->device_lock);
280 mddev->degraded--;
281 rdev->raid_disk = path;
282 set_bit(In_sync, &rdev->flags);
283 spin_unlock_irq(&conf->device_lock);
284 rcu_assign_pointer(p->rdev, rdev);
285 err = 0;
286 md_integrity_add_rdev(rdev, mddev);
287 break;
288 }
289
290 print_multipath_conf(conf);
291
292 return err;
293}
294
295static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
296{
297 struct mpconf *conf = mddev->private;
298 int err = 0;
299 int number = rdev->raid_disk;
300 struct multipath_info *p = conf->multipaths + number;
301
302 print_multipath_conf(conf);
303
304 if (rdev == p->rdev) {
305 if (test_bit(In_sync, &rdev->flags) ||
306 atomic_read(&rdev->nr_pending)) {
307 printk(KERN_ERR "hot-remove-disk, slot %d is identified"
308 " but is still operational!\n", number);
309 err = -EBUSY;
310 goto abort;
311 }
312 p->rdev = NULL;
313 synchronize_rcu();
314 if (atomic_read(&rdev->nr_pending)) {
315 /* lost the race, try later */
316 err = -EBUSY;
317 p->rdev = rdev;
318 goto abort;
319 }
320 err = md_integrity_register(mddev);
321 }
322abort:
323
324 print_multipath_conf(conf);
325 return err;
326}
327
328
329
330/*
331 * This is a kernel thread which:
332 *
333 * 1. Retries failed read operations on working multipaths.
334 * 2. Updates the raid superblock when problems encounter.
335 * 3. Performs writes following reads for array syncronising.
336 */
337
338static void multipathd (struct mddev *mddev)
339{
340 struct multipath_bh *mp_bh;
341 struct bio *bio;
342 unsigned long flags;
343 struct mpconf *conf = mddev->private;
344 struct list_head *head = &conf->retry_list;
345
346 md_check_recovery(mddev);
347 for (;;) {
348 char b[BDEVNAME_SIZE];
349 spin_lock_irqsave(&conf->device_lock, flags);
350 if (list_empty(head))
351 break;
352 mp_bh = list_entry(head->prev, struct multipath_bh, retry_list);
353 list_del(head->prev);
354 spin_unlock_irqrestore(&conf->device_lock, flags);
355
356 bio = &mp_bh->bio;
357 bio->bi_sector = mp_bh->master_bio->bi_sector;
358
359 if ((mp_bh->path = multipath_map (conf))<0) {
360 printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
361 " error for block %llu\n",
362 bdevname(bio->bi_bdev,b),
363 (unsigned long long)bio->bi_sector);
364 multipath_end_bh_io(mp_bh, -EIO);
365 } else {
366 printk(KERN_ERR "multipath: %s: redirecting sector %llu"
367 " to another IO path\n",
368 bdevname(bio->bi_bdev,b),
369 (unsigned long long)bio->bi_sector);
370 *bio = *(mp_bh->master_bio);
371 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
372 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
373 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
374 bio->bi_end_io = multipath_end_request;
375 bio->bi_private = mp_bh;
376 generic_make_request(bio);
377 }
378 }
379 spin_unlock_irqrestore(&conf->device_lock, flags);
380}
381
382static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks)
383{
384 WARN_ONCE(sectors || raid_disks,
385 "%s does not support generic reshape\n", __func__);
386
387 return mddev->dev_sectors;
388}
389
390static int multipath_run (struct mddev *mddev)
391{
392 struct mpconf *conf;
393 int disk_idx;
394 struct multipath_info *disk;
395 struct md_rdev *rdev;
396 int working_disks;
397
398 if (md_check_no_bitmap(mddev))
399 return -EINVAL;
400
401 if (mddev->level != LEVEL_MULTIPATH) {
402 printk("multipath: %s: raid level not set to multipath IO (%d)\n",
403 mdname(mddev), mddev->level);
404 goto out;
405 }
406 /*
407 * copy the already verified devices into our private MULTIPATH
408 * bookkeeping area. [whatever we allocate in multipath_run(),
409 * should be freed in multipath_stop()]
410 */
411
412 conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
413 mddev->private = conf;
414 if (!conf) {
415 printk(KERN_ERR
416 "multipath: couldn't allocate memory for %s\n",
417 mdname(mddev));
418 goto out;
419 }
420
421 conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
422 GFP_KERNEL);
423 if (!conf->multipaths) {
424 printk(KERN_ERR
425 "multipath: couldn't allocate memory for %s\n",
426 mdname(mddev));
427 goto out_free_conf;
428 }
429
430 working_disks = 0;
431 rdev_for_each(rdev, mddev) {
432 disk_idx = rdev->raid_disk;
433 if (disk_idx < 0 ||
434 disk_idx >= mddev->raid_disks)
435 continue;
436
437 disk = conf->multipaths + disk_idx;
438 disk->rdev = rdev;
439 disk_stack_limits(mddev->gendisk, rdev->bdev,
440 rdev->data_offset << 9);
441
442 /* as we don't honour merge_bvec_fn, we must never risk
443 * violating it, not that we ever expect a device with
444 * a merge_bvec_fn to be involved in multipath */
445 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
446 blk_queue_max_segments(mddev->queue, 1);
447 blk_queue_segment_boundary(mddev->queue,
448 PAGE_CACHE_SIZE - 1);
449 }
450
451 if (!test_bit(Faulty, &rdev->flags))
452 working_disks++;
453 }
454
455 conf->raid_disks = mddev->raid_disks;
456 conf->mddev = mddev;
457 spin_lock_init(&conf->device_lock);
458 INIT_LIST_HEAD(&conf->retry_list);
459
460 if (!working_disks) {
461 printk(KERN_ERR "multipath: no operational IO paths for %s\n",
462 mdname(mddev));
463 goto out_free_conf;
464 }
465 mddev->degraded = conf->raid_disks - working_disks;
466
467 conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
468 sizeof(struct multipath_bh));
469 if (conf->pool == NULL) {
470 printk(KERN_ERR
471 "multipath: couldn't allocate memory for %s\n",
472 mdname(mddev));
473 goto out_free_conf;
474 }
475
476 {
477 mddev->thread = md_register_thread(multipathd, mddev,
478 "multipath");
479 if (!mddev->thread) {
480 printk(KERN_ERR "multipath: couldn't allocate thread"
481 " for %s\n", mdname(mddev));
482 goto out_free_conf;
483 }
484 }
485
486 printk(KERN_INFO
487 "multipath: array %s active with %d out of %d IO paths\n",
488 mdname(mddev), conf->raid_disks - mddev->degraded,
489 mddev->raid_disks);
490 /*
491 * Ok, everything is just fine now
492 */
493 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
494
495 mddev->queue->backing_dev_info.congested_fn = multipath_congested;
496 mddev->queue->backing_dev_info.congested_data = mddev;
497
498 if (md_integrity_register(mddev))
499 goto out_free_conf;
500
501 return 0;
502
503out_free_conf:
504 if (conf->pool)
505 mempool_destroy(conf->pool);
506 kfree(conf->multipaths);
507 kfree(conf);
508 mddev->private = NULL;
509out:
510 return -EIO;
511}
512
513
514static int multipath_stop (struct mddev *mddev)
515{
516 struct mpconf *conf = mddev->private;
517
518 md_unregister_thread(&mddev->thread);
519 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
520 mempool_destroy(conf->pool);
521 kfree(conf->multipaths);
522 kfree(conf);
523 mddev->private = NULL;
524 return 0;
525}
526
527static struct md_personality multipath_personality =
528{
529 .name = "multipath",
530 .level = LEVEL_MULTIPATH,
531 .owner = THIS_MODULE,
532 .make_request = multipath_make_request,
533 .run = multipath_run,
534 .stop = multipath_stop,
535 .status = multipath_status,
536 .error_handler = multipath_error,
537 .hot_add_disk = multipath_add_disk,
538 .hot_remove_disk= multipath_remove_disk,
539 .size = multipath_size,
540};
541
542static int __init multipath_init (void)
543{
544 return register_md_personality (&multipath_personality);
545}
546
547static void __exit multipath_exit (void)
548{
549 unregister_md_personality (&multipath_personality);
550}
551
552module_init(multipath_init);
553module_exit(multipath_exit);
554MODULE_LICENSE("GPL");
555MODULE_DESCRIPTION("simple multi-path personality for MD");
556MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
557MODULE_ALIAS("md-multipath");
558MODULE_ALIAS("md-level--4");
1/*
2 * multipath.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5 *
6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7 *
8 * MULTIPATH management functions.
9 *
10 * derived from raid1.c.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * You should have received a copy of the GNU General Public License
18 * (for example /usr/src/linux/COPYING); if not, write to the Free
19 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/blkdev.h>
23#include <linux/module.h>
24#include <linux/raid/md_u.h>
25#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include "md.h"
28#include "multipath.h"
29
30#define MAX_WORK_PER_DISK 128
31
32#define NR_RESERVED_BUFS 32
33
34static int multipath_map (struct mpconf *conf)
35{
36 int i, disks = conf->raid_disks;
37
38 /*
39 * Later we do read balancing on the read side
40 * now we use the first available disk.
41 */
42
43 rcu_read_lock();
44 for (i = 0; i < disks; i++) {
45 struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
46 if (rdev && test_bit(In_sync, &rdev->flags)) {
47 atomic_inc(&rdev->nr_pending);
48 rcu_read_unlock();
49 return i;
50 }
51 }
52 rcu_read_unlock();
53
54 printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
55 return (-1);
56}
57
58static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
59{
60 unsigned long flags;
61 struct mddev *mddev = mp_bh->mddev;
62 struct mpconf *conf = mddev->private;
63
64 spin_lock_irqsave(&conf->device_lock, flags);
65 list_add(&mp_bh->retry_list, &conf->retry_list);
66 spin_unlock_irqrestore(&conf->device_lock, flags);
67 md_wakeup_thread(mddev->thread);
68}
69
70/*
71 * multipath_end_bh_io() is called when we have finished servicing a multipathed
72 * operation and are ready to return a success/failure code to the buffer
73 * cache layer.
74 */
75static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
76{
77 struct bio *bio = mp_bh->master_bio;
78 struct mpconf *conf = mp_bh->mddev->private;
79
80 bio->bi_error = err;
81 bio_endio(bio);
82 mempool_free(mp_bh, conf->pool);
83}
84
85static void multipath_end_request(struct bio *bio)
86{
87 struct multipath_bh *mp_bh = bio->bi_private;
88 struct mpconf *conf = mp_bh->mddev->private;
89 struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
90
91 if (!bio->bi_error)
92 multipath_end_bh_io(mp_bh, 0);
93 else if (!(bio->bi_rw & REQ_RAHEAD)) {
94 /*
95 * oops, IO error:
96 */
97 char b[BDEVNAME_SIZE];
98 md_error (mp_bh->mddev, rdev);
99 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
100 bdevname(rdev->bdev,b),
101 (unsigned long long)bio->bi_iter.bi_sector);
102 multipath_reschedule_retry(mp_bh);
103 } else
104 multipath_end_bh_io(mp_bh, bio->bi_error);
105 rdev_dec_pending(rdev, conf->mddev);
106}
107
108static void multipath_make_request(struct mddev *mddev, struct bio * bio)
109{
110 struct mpconf *conf = mddev->private;
111 struct multipath_bh * mp_bh;
112 struct multipath_info *multipath;
113
114 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
115 md_flush_request(mddev, bio);
116 return;
117 }
118
119 mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
120
121 mp_bh->master_bio = bio;
122 mp_bh->mddev = mddev;
123
124 mp_bh->path = multipath_map(conf);
125 if (mp_bh->path < 0) {
126 bio_io_error(bio);
127 mempool_free(mp_bh, conf->pool);
128 return;
129 }
130 multipath = conf->multipaths + mp_bh->path;
131
132 bio_init(&mp_bh->bio);
133 __bio_clone_fast(&mp_bh->bio, bio);
134
135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
136 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
138 mp_bh->bio.bi_end_io = multipath_end_request;
139 mp_bh->bio.bi_private = mp_bh;
140 generic_make_request(&mp_bh->bio);
141 return;
142}
143
144static void multipath_status (struct seq_file *seq, struct mddev *mddev)
145{
146 struct mpconf *conf = mddev->private;
147 int i;
148
149 seq_printf (seq, " [%d/%d] [", conf->raid_disks,
150 conf->raid_disks - mddev->degraded);
151 for (i = 0; i < conf->raid_disks; i++)
152 seq_printf (seq, "%s",
153 conf->multipaths[i].rdev &&
154 test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
155 seq_printf (seq, "]");
156}
157
158static int multipath_congested(struct mddev *mddev, int bits)
159{
160 struct mpconf *conf = mddev->private;
161 int i, ret = 0;
162
163 rcu_read_lock();
164 for (i = 0; i < mddev->raid_disks ; i++) {
165 struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
166 if (rdev && !test_bit(Faulty, &rdev->flags)) {
167 struct request_queue *q = bdev_get_queue(rdev->bdev);
168
169 ret |= bdi_congested(&q->backing_dev_info, bits);
170 /* Just like multipath_map, we just check the
171 * first available device
172 */
173 break;
174 }
175 }
176 rcu_read_unlock();
177 return ret;
178}
179
180/*
181 * Careful, this can execute in IRQ contexts as well!
182 */
183static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
184{
185 struct mpconf *conf = mddev->private;
186 char b[BDEVNAME_SIZE];
187
188 if (conf->raid_disks - mddev->degraded <= 1) {
189 /*
190 * Uh oh, we can do nothing if this is our last path, but
191 * first check if this is a queued request for a device
192 * which has just failed.
193 */
194 printk(KERN_ALERT
195 "multipath: only one IO path left and IO error.\n");
196 /* leave it active... it's all we have */
197 return;
198 }
199 /*
200 * Mark disk as unusable
201 */
202 if (test_and_clear_bit(In_sync, &rdev->flags)) {
203 unsigned long flags;
204 spin_lock_irqsave(&conf->device_lock, flags);
205 mddev->degraded++;
206 spin_unlock_irqrestore(&conf->device_lock, flags);
207 }
208 set_bit(Faulty, &rdev->flags);
209 set_bit(MD_CHANGE_DEVS, &mddev->flags);
210 printk(KERN_ALERT "multipath: IO failure on %s,"
211 " disabling IO path.\n"
212 "multipath: Operation continuing"
213 " on %d IO paths.\n",
214 bdevname(rdev->bdev, b),
215 conf->raid_disks - mddev->degraded);
216}
217
218static void print_multipath_conf (struct mpconf *conf)
219{
220 int i;
221 struct multipath_info *tmp;
222
223 printk("MULTIPATH conf printout:\n");
224 if (!conf) {
225 printk("(conf==NULL)\n");
226 return;
227 }
228 printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
229 conf->raid_disks);
230
231 for (i = 0; i < conf->raid_disks; i++) {
232 char b[BDEVNAME_SIZE];
233 tmp = conf->multipaths + i;
234 if (tmp->rdev)
235 printk(" disk%d, o:%d, dev:%s\n",
236 i,!test_bit(Faulty, &tmp->rdev->flags),
237 bdevname(tmp->rdev->bdev,b));
238 }
239}
240
241static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
242{
243 struct mpconf *conf = mddev->private;
244 struct request_queue *q;
245 int err = -EEXIST;
246 int path;
247 struct multipath_info *p;
248 int first = 0;
249 int last = mddev->raid_disks - 1;
250
251 if (rdev->raid_disk >= 0)
252 first = last = rdev->raid_disk;
253
254 print_multipath_conf(conf);
255
256 for (path = first; path <= last; path++)
257 if ((p=conf->multipaths+path)->rdev == NULL) {
258 q = rdev->bdev->bd_disk->queue;
259 disk_stack_limits(mddev->gendisk, rdev->bdev,
260 rdev->data_offset << 9);
261
262 err = md_integrity_add_rdev(rdev, mddev);
263 if (err)
264 break;
265 spin_lock_irq(&conf->device_lock);
266 mddev->degraded--;
267 rdev->raid_disk = path;
268 set_bit(In_sync, &rdev->flags);
269 spin_unlock_irq(&conf->device_lock);
270 rcu_assign_pointer(p->rdev, rdev);
271 err = 0;
272 break;
273 }
274
275 print_multipath_conf(conf);
276
277 return err;
278}
279
280static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
281{
282 struct mpconf *conf = mddev->private;
283 int err = 0;
284 int number = rdev->raid_disk;
285 struct multipath_info *p = conf->multipaths + number;
286
287 print_multipath_conf(conf);
288
289 if (rdev == p->rdev) {
290 if (test_bit(In_sync, &rdev->flags) ||
291 atomic_read(&rdev->nr_pending)) {
292 printk(KERN_ERR "hot-remove-disk, slot %d is identified"
293 " but is still operational!\n", number);
294 err = -EBUSY;
295 goto abort;
296 }
297 p->rdev = NULL;
298 synchronize_rcu();
299 if (atomic_read(&rdev->nr_pending)) {
300 /* lost the race, try later */
301 err = -EBUSY;
302 p->rdev = rdev;
303 goto abort;
304 }
305 err = md_integrity_register(mddev);
306 }
307abort:
308
309 print_multipath_conf(conf);
310 return err;
311}
312
313/*
314 * This is a kernel thread which:
315 *
316 * 1. Retries failed read operations on working multipaths.
317 * 2. Updates the raid superblock when problems encounter.
318 * 3. Performs writes following reads for array syncronising.
319 */
320
321static void multipathd(struct md_thread *thread)
322{
323 struct mddev *mddev = thread->mddev;
324 struct multipath_bh *mp_bh;
325 struct bio *bio;
326 unsigned long flags;
327 struct mpconf *conf = mddev->private;
328 struct list_head *head = &conf->retry_list;
329
330 md_check_recovery(mddev);
331 for (;;) {
332 char b[BDEVNAME_SIZE];
333 spin_lock_irqsave(&conf->device_lock, flags);
334 if (list_empty(head))
335 break;
336 mp_bh = list_entry(head->prev, struct multipath_bh, retry_list);
337 list_del(head->prev);
338 spin_unlock_irqrestore(&conf->device_lock, flags);
339
340 bio = &mp_bh->bio;
341 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
342
343 if ((mp_bh->path = multipath_map (conf))<0) {
344 printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
345 " error for block %llu\n",
346 bdevname(bio->bi_bdev,b),
347 (unsigned long long)bio->bi_iter.bi_sector);
348 multipath_end_bh_io(mp_bh, -EIO);
349 } else {
350 printk(KERN_ERR "multipath: %s: redirecting sector %llu"
351 " to another IO path\n",
352 bdevname(bio->bi_bdev,b),
353 (unsigned long long)bio->bi_iter.bi_sector);
354 *bio = *(mp_bh->master_bio);
355 bio->bi_iter.bi_sector +=
356 conf->multipaths[mp_bh->path].rdev->data_offset;
357 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
358 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
359 bio->bi_end_io = multipath_end_request;
360 bio->bi_private = mp_bh;
361 generic_make_request(bio);
362 }
363 }
364 spin_unlock_irqrestore(&conf->device_lock, flags);
365}
366
367static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks)
368{
369 WARN_ONCE(sectors || raid_disks,
370 "%s does not support generic reshape\n", __func__);
371
372 return mddev->dev_sectors;
373}
374
375static int multipath_run (struct mddev *mddev)
376{
377 struct mpconf *conf;
378 int disk_idx;
379 struct multipath_info *disk;
380 struct md_rdev *rdev;
381 int working_disks;
382
383 if (md_check_no_bitmap(mddev))
384 return -EINVAL;
385
386 if (mddev->level != LEVEL_MULTIPATH) {
387 printk("multipath: %s: raid level not set to multipath IO (%d)\n",
388 mdname(mddev), mddev->level);
389 goto out;
390 }
391 /*
392 * copy the already verified devices into our private MULTIPATH
393 * bookkeeping area. [whatever we allocate in multipath_run(),
394 * should be freed in multipath_free()]
395 */
396
397 conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
398 mddev->private = conf;
399 if (!conf) {
400 printk(KERN_ERR
401 "multipath: couldn't allocate memory for %s\n",
402 mdname(mddev));
403 goto out;
404 }
405
406 conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
407 GFP_KERNEL);
408 if (!conf->multipaths) {
409 printk(KERN_ERR
410 "multipath: couldn't allocate memory for %s\n",
411 mdname(mddev));
412 goto out_free_conf;
413 }
414
415 working_disks = 0;
416 rdev_for_each(rdev, mddev) {
417 disk_idx = rdev->raid_disk;
418 if (disk_idx < 0 ||
419 disk_idx >= mddev->raid_disks)
420 continue;
421
422 disk = conf->multipaths + disk_idx;
423 disk->rdev = rdev;
424 disk_stack_limits(mddev->gendisk, rdev->bdev,
425 rdev->data_offset << 9);
426
427 if (!test_bit(Faulty, &rdev->flags))
428 working_disks++;
429 }
430
431 conf->raid_disks = mddev->raid_disks;
432 conf->mddev = mddev;
433 spin_lock_init(&conf->device_lock);
434 INIT_LIST_HEAD(&conf->retry_list);
435
436 if (!working_disks) {
437 printk(KERN_ERR "multipath: no operational IO paths for %s\n",
438 mdname(mddev));
439 goto out_free_conf;
440 }
441 mddev->degraded = conf->raid_disks - working_disks;
442
443 conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
444 sizeof(struct multipath_bh));
445 if (conf->pool == NULL) {
446 printk(KERN_ERR
447 "multipath: couldn't allocate memory for %s\n",
448 mdname(mddev));
449 goto out_free_conf;
450 }
451
452 {
453 mddev->thread = md_register_thread(multipathd, mddev,
454 "multipath");
455 if (!mddev->thread) {
456 printk(KERN_ERR "multipath: couldn't allocate thread"
457 " for %s\n", mdname(mddev));
458 goto out_free_conf;
459 }
460 }
461
462 printk(KERN_INFO
463 "multipath: array %s active with %d out of %d IO paths\n",
464 mdname(mddev), conf->raid_disks - mddev->degraded,
465 mddev->raid_disks);
466 /*
467 * Ok, everything is just fine now
468 */
469 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
470
471 if (md_integrity_register(mddev))
472 goto out_free_conf;
473
474 return 0;
475
476out_free_conf:
477 mempool_destroy(conf->pool);
478 kfree(conf->multipaths);
479 kfree(conf);
480 mddev->private = NULL;
481out:
482 return -EIO;
483}
484
485static void multipath_free(struct mddev *mddev, void *priv)
486{
487 struct mpconf *conf = priv;
488
489 mempool_destroy(conf->pool);
490 kfree(conf->multipaths);
491 kfree(conf);
492}
493
494static struct md_personality multipath_personality =
495{
496 .name = "multipath",
497 .level = LEVEL_MULTIPATH,
498 .owner = THIS_MODULE,
499 .make_request = multipath_make_request,
500 .run = multipath_run,
501 .free = multipath_free,
502 .status = multipath_status,
503 .error_handler = multipath_error,
504 .hot_add_disk = multipath_add_disk,
505 .hot_remove_disk= multipath_remove_disk,
506 .size = multipath_size,
507 .congested = multipath_congested,
508};
509
510static int __init multipath_init (void)
511{
512 return register_md_personality (&multipath_personality);
513}
514
515static void __exit multipath_exit (void)
516{
517 unregister_md_personality (&multipath_personality);
518}
519
520module_init(multipath_init);
521module_exit(multipath_exit);
522MODULE_LICENSE("GPL");
523MODULE_DESCRIPTION("simple multi-path personality for MD");
524MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
525MODULE_ALIAS("md-multipath");
526MODULE_ALIAS("md-level--4");