Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2003 Sistina Software Limited.
4 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/device-mapper.h>
10
11#include "dm-rq.h"
12#include "dm-bio-record.h"
13#include "dm-path-selector.h"
14#include "dm-uevent.h"
15
16#include <linux/blkdev.h>
17#include <linux/ctype.h>
18#include <linux/init.h>
19#include <linux/mempool.h>
20#include <linux/module.h>
21#include <linux/pagemap.h>
22#include <linux/slab.h>
23#include <linux/time.h>
24#include <linux/timer.h>
25#include <linux/workqueue.h>
26#include <linux/delay.h>
27#include <scsi/scsi_dh.h>
28#include <linux/atomic.h>
29#include <linux/blk-mq.h>
30
31static struct workqueue_struct *dm_mpath_wq;
32
33#define DM_MSG_PREFIX "multipath"
34#define DM_PG_INIT_DELAY_MSECS 2000
35#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
36#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
37
38static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
39
40/* Path properties */
41struct pgpath {
42 struct list_head list;
43
44 struct priority_group *pg; /* Owning PG */
45 unsigned int fail_count; /* Cumulative failure count */
46
47 struct dm_path path;
48 struct delayed_work activate_path;
49
50 bool is_active:1; /* Path status */
51};
52
53#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
54
55/*
56 * Paths are grouped into Priority Groups and numbered from 1 upwards.
57 * Each has a path selector which controls which path gets used.
58 */
59struct priority_group {
60 struct list_head list;
61
62 struct multipath *m; /* Owning multipath instance */
63 struct path_selector ps;
64
65 unsigned int pg_num; /* Reference number */
66 unsigned int nr_pgpaths; /* Number of paths in PG */
67 struct list_head pgpaths;
68
69 bool bypassed:1; /* Temporarily bypass this PG? */
70};
71
72/* Multipath context */
73struct multipath {
74 unsigned long flags; /* Multipath state flags */
75
76 spinlock_t lock;
77 enum dm_queue_mode queue_mode;
78
79 struct pgpath *current_pgpath;
80 struct priority_group *current_pg;
81 struct priority_group *next_pg; /* Switch to this PG if set */
82
83 atomic_t nr_valid_paths; /* Total number of usable paths */
84 unsigned int nr_priority_groups;
85 struct list_head priority_groups;
86
87 const char *hw_handler_name;
88 char *hw_handler_params;
89 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
90 unsigned int pg_init_retries; /* Number of times to retry pg_init */
91 unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */
92 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
93 atomic_t pg_init_count; /* Number of times pg_init called */
94
95 struct mutex work_mutex;
96 struct work_struct trigger_event;
97 struct dm_target *ti;
98
99 struct work_struct process_queued_bios;
100 struct bio_list queued_bios;
101
102 struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
103};
104
105/*
106 * Context information attached to each io we process.
107 */
108struct dm_mpath_io {
109 struct pgpath *pgpath;
110 size_t nr_bytes;
111 u64 start_time_ns;
112};
113
114typedef int (*action_fn) (struct pgpath *pgpath);
115
116static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
117static void trigger_event(struct work_struct *work);
118static void activate_or_offline_path(struct pgpath *pgpath);
119static void activate_path_work(struct work_struct *work);
120static void process_queued_bios(struct work_struct *work);
121static void queue_if_no_path_timeout_work(struct timer_list *t);
122
123/*
124 *-----------------------------------------------
125 * Multipath state flags.
126 *-----------------------------------------------
127 */
128#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
129#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
130#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
131#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
132#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
133#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
134#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
135
136static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
137{
138 bool r = test_bit(MPATHF_bit, &m->flags);
139
140 if (r) {
141 unsigned long flags;
142
143 spin_lock_irqsave(&m->lock, flags);
144 r = test_bit(MPATHF_bit, &m->flags);
145 spin_unlock_irqrestore(&m->lock, flags);
146 }
147
148 return r;
149}
150
151/*
152 *-----------------------------------------------
153 * Allocation routines
154 *-----------------------------------------------
155 */
156static struct pgpath *alloc_pgpath(void)
157{
158 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
159
160 if (!pgpath)
161 return NULL;
162
163 pgpath->is_active = true;
164
165 return pgpath;
166}
167
168static void free_pgpath(struct pgpath *pgpath)
169{
170 kfree(pgpath);
171}
172
173static struct priority_group *alloc_priority_group(void)
174{
175 struct priority_group *pg;
176
177 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
178
179 if (pg)
180 INIT_LIST_HEAD(&pg->pgpaths);
181
182 return pg;
183}
184
185static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
186{
187 struct pgpath *pgpath, *tmp;
188
189 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
190 list_del(&pgpath->list);
191 dm_put_device(ti, pgpath->path.dev);
192 free_pgpath(pgpath);
193 }
194}
195
196static void free_priority_group(struct priority_group *pg,
197 struct dm_target *ti)
198{
199 struct path_selector *ps = &pg->ps;
200
201 if (ps->type) {
202 ps->type->destroy(ps);
203 dm_put_path_selector(ps->type);
204 }
205
206 free_pgpaths(&pg->pgpaths, ti);
207 kfree(pg);
208}
209
210static struct multipath *alloc_multipath(struct dm_target *ti)
211{
212 struct multipath *m;
213
214 m = kzalloc(sizeof(*m), GFP_KERNEL);
215 if (m) {
216 INIT_LIST_HEAD(&m->priority_groups);
217 spin_lock_init(&m->lock);
218 atomic_set(&m->nr_valid_paths, 0);
219 INIT_WORK(&m->trigger_event, trigger_event);
220 mutex_init(&m->work_mutex);
221
222 m->queue_mode = DM_TYPE_NONE;
223
224 m->ti = ti;
225 ti->private = m;
226
227 timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
228 }
229
230 return m;
231}
232
233static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
234{
235 if (m->queue_mode == DM_TYPE_NONE) {
236 m->queue_mode = DM_TYPE_REQUEST_BASED;
237 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
238 INIT_WORK(&m->process_queued_bios, process_queued_bios);
239 /*
240 * bio-based doesn't support any direct scsi_dh management;
241 * it just discovers if a scsi_dh is attached.
242 */
243 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
244 }
245
246 dm_table_set_type(ti->table, m->queue_mode);
247
248 /*
249 * Init fields that are only used when a scsi_dh is attached
250 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
251 */
252 set_bit(MPATHF_QUEUE_IO, &m->flags);
253 atomic_set(&m->pg_init_in_progress, 0);
254 atomic_set(&m->pg_init_count, 0);
255 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
256 init_waitqueue_head(&m->pg_init_wait);
257
258 return 0;
259}
260
261static void free_multipath(struct multipath *m)
262{
263 struct priority_group *pg, *tmp;
264
265 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
266 list_del(&pg->list);
267 free_priority_group(pg, m->ti);
268 }
269
270 kfree(m->hw_handler_name);
271 kfree(m->hw_handler_params);
272 mutex_destroy(&m->work_mutex);
273 kfree(m);
274}
275
276static struct dm_mpath_io *get_mpio(union map_info *info)
277{
278 return info->ptr;
279}
280
281static size_t multipath_per_bio_data_size(void)
282{
283 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
284}
285
286static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
287{
288 return dm_per_bio_data(bio, multipath_per_bio_data_size());
289}
290
291static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
292{
293 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
294 void *bio_details = mpio + 1;
295 return bio_details;
296}
297
298static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
299{
300 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
301 struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
302
303 mpio->nr_bytes = bio->bi_iter.bi_size;
304 mpio->pgpath = NULL;
305 mpio->start_time_ns = 0;
306 *mpio_p = mpio;
307
308 dm_bio_record(bio_details, bio);
309}
310
311/*
312 *-----------------------------------------------
313 * Path selection
314 *-----------------------------------------------
315 */
316static int __pg_init_all_paths(struct multipath *m)
317{
318 struct pgpath *pgpath;
319 unsigned long pg_init_delay = 0;
320
321 lockdep_assert_held(&m->lock);
322
323 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
324 return 0;
325
326 atomic_inc(&m->pg_init_count);
327 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
328
329 /* Check here to reset pg_init_required */
330 if (!m->current_pg)
331 return 0;
332
333 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
334 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
335 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
336 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
337 /* Skip failed paths */
338 if (!pgpath->is_active)
339 continue;
340 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
341 pg_init_delay))
342 atomic_inc(&m->pg_init_in_progress);
343 }
344 return atomic_read(&m->pg_init_in_progress);
345}
346
347static int pg_init_all_paths(struct multipath *m)
348{
349 int ret;
350 unsigned long flags;
351
352 spin_lock_irqsave(&m->lock, flags);
353 ret = __pg_init_all_paths(m);
354 spin_unlock_irqrestore(&m->lock, flags);
355
356 return ret;
357}
358
359static void __switch_pg(struct multipath *m, struct priority_group *pg)
360{
361 lockdep_assert_held(&m->lock);
362
363 m->current_pg = pg;
364
365 /* Must we initialise the PG first, and queue I/O till it's ready? */
366 if (m->hw_handler_name) {
367 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
368 set_bit(MPATHF_QUEUE_IO, &m->flags);
369 } else {
370 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
371 clear_bit(MPATHF_QUEUE_IO, &m->flags);
372 }
373
374 atomic_set(&m->pg_init_count, 0);
375}
376
377static struct pgpath *choose_path_in_pg(struct multipath *m,
378 struct priority_group *pg,
379 size_t nr_bytes)
380{
381 unsigned long flags;
382 struct dm_path *path;
383 struct pgpath *pgpath;
384
385 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
386 if (!path)
387 return ERR_PTR(-ENXIO);
388
389 pgpath = path_to_pgpath(path);
390
391 if (unlikely(READ_ONCE(m->current_pg) != pg)) {
392 /* Only update current_pgpath if pg changed */
393 spin_lock_irqsave(&m->lock, flags);
394 m->current_pgpath = pgpath;
395 __switch_pg(m, pg);
396 spin_unlock_irqrestore(&m->lock, flags);
397 }
398
399 return pgpath;
400}
401
402static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
403{
404 unsigned long flags;
405 struct priority_group *pg;
406 struct pgpath *pgpath;
407 unsigned int bypassed = 1;
408
409 if (!atomic_read(&m->nr_valid_paths)) {
410 spin_lock_irqsave(&m->lock, flags);
411 clear_bit(MPATHF_QUEUE_IO, &m->flags);
412 spin_unlock_irqrestore(&m->lock, flags);
413 goto failed;
414 }
415
416 /* Were we instructed to switch PG? */
417 if (READ_ONCE(m->next_pg)) {
418 spin_lock_irqsave(&m->lock, flags);
419 pg = m->next_pg;
420 if (!pg) {
421 spin_unlock_irqrestore(&m->lock, flags);
422 goto check_current_pg;
423 }
424 m->next_pg = NULL;
425 spin_unlock_irqrestore(&m->lock, flags);
426 pgpath = choose_path_in_pg(m, pg, nr_bytes);
427 if (!IS_ERR_OR_NULL(pgpath))
428 return pgpath;
429 }
430
431 /* Don't change PG until it has no remaining paths */
432check_current_pg:
433 pg = READ_ONCE(m->current_pg);
434 if (pg) {
435 pgpath = choose_path_in_pg(m, pg, nr_bytes);
436 if (!IS_ERR_OR_NULL(pgpath))
437 return pgpath;
438 }
439
440 /*
441 * Loop through priority groups until we find a valid path.
442 * First time we skip PGs marked 'bypassed'.
443 * Second time we only try the ones we skipped, but set
444 * pg_init_delay_retry so we do not hammer controllers.
445 */
446 do {
447 list_for_each_entry(pg, &m->priority_groups, list) {
448 if (pg->bypassed == !!bypassed)
449 continue;
450 pgpath = choose_path_in_pg(m, pg, nr_bytes);
451 if (!IS_ERR_OR_NULL(pgpath)) {
452 if (!bypassed) {
453 spin_lock_irqsave(&m->lock, flags);
454 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
455 spin_unlock_irqrestore(&m->lock, flags);
456 }
457 return pgpath;
458 }
459 }
460 } while (bypassed--);
461
462failed:
463 spin_lock_irqsave(&m->lock, flags);
464 m->current_pgpath = NULL;
465 m->current_pg = NULL;
466 spin_unlock_irqrestore(&m->lock, flags);
467
468 return NULL;
469}
470
471/*
472 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
473 * report the function name and line number of the function from which
474 * it has been invoked.
475 */
476#define dm_report_EIO(m) \
477 DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
478 dm_table_device_name((m)->ti->table), \
479 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
480 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
481 dm_noflush_suspending((m)->ti))
482
483/*
484 * Check whether bios must be queued in the device-mapper core rather
485 * than here in the target.
486 */
487static bool __must_push_back(struct multipath *m)
488{
489 return dm_noflush_suspending(m->ti);
490}
491
492static bool must_push_back_rq(struct multipath *m)
493{
494 unsigned long flags;
495 bool ret;
496
497 spin_lock_irqsave(&m->lock, flags);
498 ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m));
499 spin_unlock_irqrestore(&m->lock, flags);
500
501 return ret;
502}
503
504/*
505 * Map cloned requests (request-based multipath)
506 */
507static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
508 union map_info *map_context,
509 struct request **__clone)
510{
511 struct multipath *m = ti->private;
512 size_t nr_bytes = blk_rq_bytes(rq);
513 struct pgpath *pgpath;
514 struct block_device *bdev;
515 struct dm_mpath_io *mpio = get_mpio(map_context);
516 struct request_queue *q;
517 struct request *clone;
518
519 /* Do we need to select a new pgpath? */
520 pgpath = READ_ONCE(m->current_pgpath);
521 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
522 pgpath = choose_pgpath(m, nr_bytes);
523
524 if (!pgpath) {
525 if (must_push_back_rq(m))
526 return DM_MAPIO_DELAY_REQUEUE;
527 dm_report_EIO(m); /* Failed */
528 return DM_MAPIO_KILL;
529 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
530 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
531 pg_init_all_paths(m);
532 return DM_MAPIO_DELAY_REQUEUE;
533 }
534
535 mpio->pgpath = pgpath;
536 mpio->nr_bytes = nr_bytes;
537
538 bdev = pgpath->path.dev->bdev;
539 q = bdev_get_queue(bdev);
540 clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
541 BLK_MQ_REQ_NOWAIT);
542 if (IS_ERR(clone)) {
543 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
544 if (blk_queue_dying(q)) {
545 atomic_inc(&m->pg_init_in_progress);
546 activate_or_offline_path(pgpath);
547 return DM_MAPIO_DELAY_REQUEUE;
548 }
549
550 /*
551 * blk-mq's SCHED_RESTART can cover this requeue, so we
552 * needn't deal with it by DELAY_REQUEUE. More importantly,
553 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
554 * get the queue busy feedback (via BLK_STS_RESOURCE),
555 * otherwise I/O merging can suffer.
556 */
557 return DM_MAPIO_REQUEUE;
558 }
559 clone->bio = clone->biotail = NULL;
560 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
561 *__clone = clone;
562
563 if (pgpath->pg->ps.type->start_io)
564 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
565 &pgpath->path,
566 nr_bytes);
567 return DM_MAPIO_REMAPPED;
568}
569
570static void multipath_release_clone(struct request *clone,
571 union map_info *map_context)
572{
573 if (unlikely(map_context)) {
574 /*
575 * non-NULL map_context means caller is still map
576 * method; must undo multipath_clone_and_map()
577 */
578 struct dm_mpath_io *mpio = get_mpio(map_context);
579 struct pgpath *pgpath = mpio->pgpath;
580
581 if (pgpath && pgpath->pg->ps.type->end_io)
582 pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
583 &pgpath->path,
584 mpio->nr_bytes,
585 clone->io_start_time_ns);
586 }
587
588 blk_mq_free_request(clone);
589}
590
591/*
592 * Map cloned bios (bio-based multipath)
593 */
594
595static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
596{
597 /* Queue for the daemon to resubmit */
598 bio_list_add(&m->queued_bios, bio);
599 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
600 queue_work(kmultipathd, &m->process_queued_bios);
601}
602
603static void multipath_queue_bio(struct multipath *m, struct bio *bio)
604{
605 unsigned long flags;
606
607 spin_lock_irqsave(&m->lock, flags);
608 __multipath_queue_bio(m, bio);
609 spin_unlock_irqrestore(&m->lock, flags);
610}
611
612static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
613{
614 struct pgpath *pgpath;
615 unsigned long flags;
616
617 /* Do we need to select a new pgpath? */
618 pgpath = READ_ONCE(m->current_pgpath);
619 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
620 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
621
622 if (!pgpath) {
623 spin_lock_irqsave(&m->lock, flags);
624 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
625 __multipath_queue_bio(m, bio);
626 pgpath = ERR_PTR(-EAGAIN);
627 }
628 spin_unlock_irqrestore(&m->lock, flags);
629
630 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
631 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
632 multipath_queue_bio(m, bio);
633 pg_init_all_paths(m);
634 return ERR_PTR(-EAGAIN);
635 }
636
637 return pgpath;
638}
639
640static int __multipath_map_bio(struct multipath *m, struct bio *bio,
641 struct dm_mpath_io *mpio)
642{
643 struct pgpath *pgpath = __map_bio(m, bio);
644
645 if (IS_ERR(pgpath))
646 return DM_MAPIO_SUBMITTED;
647
648 if (!pgpath) {
649 if (__must_push_back(m))
650 return DM_MAPIO_REQUEUE;
651 dm_report_EIO(m);
652 return DM_MAPIO_KILL;
653 }
654
655 mpio->pgpath = pgpath;
656
657 if (dm_ps_use_hr_timer(pgpath->pg->ps.type))
658 mpio->start_time_ns = ktime_get_ns();
659
660 bio->bi_status = 0;
661 bio_set_dev(bio, pgpath->path.dev->bdev);
662 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
663
664 if (pgpath->pg->ps.type->start_io)
665 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
666 &pgpath->path,
667 mpio->nr_bytes);
668 return DM_MAPIO_REMAPPED;
669}
670
671static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
672{
673 struct multipath *m = ti->private;
674 struct dm_mpath_io *mpio = NULL;
675
676 multipath_init_per_bio_data(bio, &mpio);
677 return __multipath_map_bio(m, bio, mpio);
678}
679
680static void process_queued_io_list(struct multipath *m)
681{
682 if (m->queue_mode == DM_TYPE_REQUEST_BASED)
683 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
684 else if (m->queue_mode == DM_TYPE_BIO_BASED)
685 queue_work(kmultipathd, &m->process_queued_bios);
686}
687
688static void process_queued_bios(struct work_struct *work)
689{
690 int r;
691 unsigned long flags;
692 struct bio *bio;
693 struct bio_list bios;
694 struct blk_plug plug;
695 struct multipath *m =
696 container_of(work, struct multipath, process_queued_bios);
697
698 bio_list_init(&bios);
699
700 spin_lock_irqsave(&m->lock, flags);
701
702 if (bio_list_empty(&m->queued_bios)) {
703 spin_unlock_irqrestore(&m->lock, flags);
704 return;
705 }
706
707 bio_list_merge_init(&bios, &m->queued_bios);
708
709 spin_unlock_irqrestore(&m->lock, flags);
710
711 blk_start_plug(&plug);
712 while ((bio = bio_list_pop(&bios))) {
713 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
714
715 dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
716 r = __multipath_map_bio(m, bio, mpio);
717 switch (r) {
718 case DM_MAPIO_KILL:
719 bio->bi_status = BLK_STS_IOERR;
720 bio_endio(bio);
721 break;
722 case DM_MAPIO_REQUEUE:
723 bio->bi_status = BLK_STS_DM_REQUEUE;
724 bio_endio(bio);
725 break;
726 case DM_MAPIO_REMAPPED:
727 submit_bio_noacct(bio);
728 break;
729 case DM_MAPIO_SUBMITTED:
730 break;
731 default:
732 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
733 }
734 }
735 blk_finish_plug(&plug);
736}
737
738/*
739 * If we run out of usable paths, should we queue I/O or error it?
740 */
741static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path,
742 bool save_old_value, const char *caller)
743{
744 unsigned long flags;
745 bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
746 const char *dm_dev_name = dm_table_device_name(m->ti->table);
747
748 DMDEBUG("%s: %s caller=%s f_queue_if_no_path=%d save_old_value=%d",
749 dm_dev_name, __func__, caller, f_queue_if_no_path, save_old_value);
750
751 spin_lock_irqsave(&m->lock, flags);
752
753 queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
754 saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
755
756 if (save_old_value) {
757 if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
758 DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
759 dm_dev_name);
760 } else
761 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
762 } else if (!f_queue_if_no_path && saved_queue_if_no_path_bit) {
763 /* due to "fail_if_no_path" message, need to honor it. */
764 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
765 }
766 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, f_queue_if_no_path);
767
768 DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
769 dm_dev_name, __func__,
770 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
771 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
772 dm_noflush_suspending(m->ti));
773
774 spin_unlock_irqrestore(&m->lock, flags);
775
776 if (!f_queue_if_no_path) {
777 dm_table_run_md_queue_async(m->ti->table);
778 process_queued_io_list(m);
779 }
780
781 return 0;
782}
783
784/*
785 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
786 * process any queued I/O.
787 */
788static void queue_if_no_path_timeout_work(struct timer_list *t)
789{
790 struct multipath *m = from_timer(m, t, nopath_timer);
791
792 DMWARN("queue_if_no_path timeout on %s, failing queued IO",
793 dm_table_device_name(m->ti->table));
794 queue_if_no_path(m, false, false, __func__);
795}
796
797/*
798 * Enable the queue_if_no_path timeout if necessary.
799 * Called with m->lock held.
800 */
801static void enable_nopath_timeout(struct multipath *m)
802{
803 unsigned long queue_if_no_path_timeout =
804 READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
805
806 lockdep_assert_held(&m->lock);
807
808 if (queue_if_no_path_timeout > 0 &&
809 atomic_read(&m->nr_valid_paths) == 0 &&
810 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
811 mod_timer(&m->nopath_timer,
812 jiffies + queue_if_no_path_timeout);
813 }
814}
815
816static void disable_nopath_timeout(struct multipath *m)
817{
818 del_timer_sync(&m->nopath_timer);
819}
820
821/*
822 * An event is triggered whenever a path is taken out of use.
823 * Includes path failure and PG bypass.
824 */
825static void trigger_event(struct work_struct *work)
826{
827 struct multipath *m =
828 container_of(work, struct multipath, trigger_event);
829
830 dm_table_event(m->ti->table);
831}
832
833/*
834 *---------------------------------------------------------------
835 * Constructor/argument parsing:
836 * <#multipath feature args> [<arg>]*
837 * <#hw_handler args> [hw_handler [<arg>]*]
838 * <#priority groups>
839 * <initial priority group>
840 * [<selector> <#selector args> [<arg>]*
841 * <#paths> <#per-path selector args>
842 * [<path> [<arg>]* ]+ ]+
843 *---------------------------------------------------------------
844 */
845static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
846 struct dm_target *ti)
847{
848 int r;
849 struct path_selector_type *pst;
850 unsigned int ps_argc;
851
852 static const struct dm_arg _args[] = {
853 {0, 1024, "invalid number of path selector args"},
854 };
855
856 pst = dm_get_path_selector(dm_shift_arg(as));
857 if (!pst) {
858 ti->error = "unknown path selector type";
859 return -EINVAL;
860 }
861
862 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
863 if (r) {
864 dm_put_path_selector(pst);
865 return -EINVAL;
866 }
867
868 r = pst->create(&pg->ps, ps_argc, as->argv);
869 if (r) {
870 dm_put_path_selector(pst);
871 ti->error = "path selector constructor failed";
872 return r;
873 }
874
875 pg->ps.type = pst;
876 dm_consume_args(as, ps_argc);
877
878 return 0;
879}
880
881static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
882 const char **attached_handler_name, char **error)
883{
884 struct request_queue *q = bdev_get_queue(bdev);
885 int r;
886
887 if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
888retain:
889 if (*attached_handler_name) {
890 /*
891 * Clear any hw_handler_params associated with a
892 * handler that isn't already attached.
893 */
894 if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
895 kfree(m->hw_handler_params);
896 m->hw_handler_params = NULL;
897 }
898
899 /*
900 * Reset hw_handler_name to match the attached handler
901 *
902 * NB. This modifies the table line to show the actual
903 * handler instead of the original table passed in.
904 */
905 kfree(m->hw_handler_name);
906 m->hw_handler_name = *attached_handler_name;
907 *attached_handler_name = NULL;
908 }
909 }
910
911 if (m->hw_handler_name) {
912 r = scsi_dh_attach(q, m->hw_handler_name);
913 if (r == -EBUSY) {
914 DMINFO("retaining handler on device %pg", bdev);
915 goto retain;
916 }
917 if (r < 0) {
918 *error = "error attaching hardware handler";
919 return r;
920 }
921
922 if (m->hw_handler_params) {
923 r = scsi_dh_set_params(q, m->hw_handler_params);
924 if (r < 0) {
925 *error = "unable to set hardware handler parameters";
926 return r;
927 }
928 }
929 }
930
931 return 0;
932}
933
934static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
935 struct dm_target *ti)
936{
937 int r;
938 struct pgpath *p;
939 struct multipath *m = ti->private;
940 struct request_queue *q;
941 const char *attached_handler_name = NULL;
942
943 /* we need at least a path arg */
944 if (as->argc < 1) {
945 ti->error = "no device given";
946 return ERR_PTR(-EINVAL);
947 }
948
949 p = alloc_pgpath();
950 if (!p)
951 return ERR_PTR(-ENOMEM);
952
953 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
954 &p->path.dev);
955 if (r) {
956 ti->error = "error getting device";
957 goto bad;
958 }
959
960 q = bdev_get_queue(p->path.dev->bdev);
961 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
962 if (attached_handler_name || m->hw_handler_name) {
963 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
964 r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
965 kfree(attached_handler_name);
966 if (r) {
967 dm_put_device(ti, p->path.dev);
968 goto bad;
969 }
970 }
971
972 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
973 if (r) {
974 dm_put_device(ti, p->path.dev);
975 goto bad;
976 }
977
978 return p;
979 bad:
980 free_pgpath(p);
981 return ERR_PTR(r);
982}
983
984static struct priority_group *parse_priority_group(struct dm_arg_set *as,
985 struct multipath *m)
986{
987 static const struct dm_arg _args[] = {
988 {1, 1024, "invalid number of paths"},
989 {0, 1024, "invalid number of selector args"}
990 };
991
992 int r;
993 unsigned int i, nr_selector_args, nr_args;
994 struct priority_group *pg;
995 struct dm_target *ti = m->ti;
996
997 if (as->argc < 2) {
998 as->argc = 0;
999 ti->error = "not enough priority group arguments";
1000 return ERR_PTR(-EINVAL);
1001 }
1002
1003 pg = alloc_priority_group();
1004 if (!pg) {
1005 ti->error = "couldn't allocate priority group";
1006 return ERR_PTR(-ENOMEM);
1007 }
1008 pg->m = m;
1009
1010 r = parse_path_selector(as, pg, ti);
1011 if (r)
1012 goto bad;
1013
1014 /*
1015 * read the paths
1016 */
1017 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1018 if (r)
1019 goto bad;
1020
1021 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1022 if (r)
1023 goto bad;
1024
1025 nr_args = 1 + nr_selector_args;
1026 for (i = 0; i < pg->nr_pgpaths; i++) {
1027 struct pgpath *pgpath;
1028 struct dm_arg_set path_args;
1029
1030 if (as->argc < nr_args) {
1031 ti->error = "not enough path parameters";
1032 r = -EINVAL;
1033 goto bad;
1034 }
1035
1036 path_args.argc = nr_args;
1037 path_args.argv = as->argv;
1038
1039 pgpath = parse_path(&path_args, &pg->ps, ti);
1040 if (IS_ERR(pgpath)) {
1041 r = PTR_ERR(pgpath);
1042 goto bad;
1043 }
1044
1045 pgpath->pg = pg;
1046 list_add_tail(&pgpath->list, &pg->pgpaths);
1047 dm_consume_args(as, nr_args);
1048 }
1049
1050 return pg;
1051
1052 bad:
1053 free_priority_group(pg, ti);
1054 return ERR_PTR(r);
1055}
1056
1057static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1058{
1059 unsigned int hw_argc;
1060 int ret;
1061 struct dm_target *ti = m->ti;
1062
1063 static const struct dm_arg _args[] = {
1064 {0, 1024, "invalid number of hardware handler args"},
1065 };
1066
1067 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1068 return -EINVAL;
1069
1070 if (!hw_argc)
1071 return 0;
1072
1073 if (m->queue_mode == DM_TYPE_BIO_BASED) {
1074 dm_consume_args(as, hw_argc);
1075 DMERR("bio-based multipath doesn't allow hardware handler args");
1076 return 0;
1077 }
1078
1079 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1080 if (!m->hw_handler_name)
1081 return -EINVAL;
1082
1083 if (hw_argc > 1) {
1084 char *p;
1085 int i, j, len = 4;
1086
1087 for (i = 0; i <= hw_argc - 2; i++)
1088 len += strlen(as->argv[i]) + 1;
1089 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1090 if (!p) {
1091 ti->error = "memory allocation failed";
1092 ret = -ENOMEM;
1093 goto fail;
1094 }
1095 j = sprintf(p, "%d", hw_argc - 1);
1096 for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1)
1097 j = sprintf(p, "%s", as->argv[i]);
1098 }
1099 dm_consume_args(as, hw_argc - 1);
1100
1101 return 0;
1102fail:
1103 kfree(m->hw_handler_name);
1104 m->hw_handler_name = NULL;
1105 return ret;
1106}
1107
1108static int parse_features(struct dm_arg_set *as, struct multipath *m)
1109{
1110 int r;
1111 unsigned int argc;
1112 struct dm_target *ti = m->ti;
1113 const char *arg_name;
1114
1115 static const struct dm_arg _args[] = {
1116 {0, 8, "invalid number of feature args"},
1117 {1, 50, "pg_init_retries must be between 1 and 50"},
1118 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1119 };
1120
1121 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1122 if (r)
1123 return -EINVAL;
1124
1125 if (!argc)
1126 return 0;
1127
1128 do {
1129 arg_name = dm_shift_arg(as);
1130 argc--;
1131
1132 if (!strcasecmp(arg_name, "queue_if_no_path")) {
1133 r = queue_if_no_path(m, true, false, __func__);
1134 continue;
1135 }
1136
1137 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1138 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1139 continue;
1140 }
1141
1142 if (!strcasecmp(arg_name, "pg_init_retries") &&
1143 (argc >= 1)) {
1144 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1145 argc--;
1146 continue;
1147 }
1148
1149 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1150 (argc >= 1)) {
1151 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1152 argc--;
1153 continue;
1154 }
1155
1156 if (!strcasecmp(arg_name, "queue_mode") &&
1157 (argc >= 1)) {
1158 const char *queue_mode_name = dm_shift_arg(as);
1159
1160 if (!strcasecmp(queue_mode_name, "bio"))
1161 m->queue_mode = DM_TYPE_BIO_BASED;
1162 else if (!strcasecmp(queue_mode_name, "rq") ||
1163 !strcasecmp(queue_mode_name, "mq"))
1164 m->queue_mode = DM_TYPE_REQUEST_BASED;
1165 else {
1166 ti->error = "Unknown 'queue_mode' requested";
1167 r = -EINVAL;
1168 }
1169 argc--;
1170 continue;
1171 }
1172
1173 ti->error = "Unrecognised multipath feature request";
1174 r = -EINVAL;
1175 } while (argc && !r);
1176
1177 return r;
1178}
1179
1180static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1181{
1182 /* target arguments */
1183 static const struct dm_arg _args[] = {
1184 {0, 1024, "invalid number of priority groups"},
1185 {0, 1024, "invalid initial priority group number"},
1186 };
1187
1188 int r;
1189 struct multipath *m;
1190 struct dm_arg_set as;
1191 unsigned int pg_count = 0;
1192 unsigned int next_pg_num;
1193 unsigned long flags;
1194
1195 as.argc = argc;
1196 as.argv = argv;
1197
1198 m = alloc_multipath(ti);
1199 if (!m) {
1200 ti->error = "can't allocate multipath";
1201 return -EINVAL;
1202 }
1203
1204 r = parse_features(&as, m);
1205 if (r)
1206 goto bad;
1207
1208 r = alloc_multipath_stage2(ti, m);
1209 if (r)
1210 goto bad;
1211
1212 r = parse_hw_handler(&as, m);
1213 if (r)
1214 goto bad;
1215
1216 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1217 if (r)
1218 goto bad;
1219
1220 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1221 if (r)
1222 goto bad;
1223
1224 if ((!m->nr_priority_groups && next_pg_num) ||
1225 (m->nr_priority_groups && !next_pg_num)) {
1226 ti->error = "invalid initial priority group";
1227 r = -EINVAL;
1228 goto bad;
1229 }
1230
1231 /* parse the priority groups */
1232 while (as.argc) {
1233 struct priority_group *pg;
1234 unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
1235
1236 pg = parse_priority_group(&as, m);
1237 if (IS_ERR(pg)) {
1238 r = PTR_ERR(pg);
1239 goto bad;
1240 }
1241
1242 nr_valid_paths += pg->nr_pgpaths;
1243 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1244
1245 list_add_tail(&pg->list, &m->priority_groups);
1246 pg_count++;
1247 pg->pg_num = pg_count;
1248 if (!--next_pg_num)
1249 m->next_pg = pg;
1250 }
1251
1252 if (pg_count != m->nr_priority_groups) {
1253 ti->error = "priority group count mismatch";
1254 r = -EINVAL;
1255 goto bad;
1256 }
1257
1258 spin_lock_irqsave(&m->lock, flags);
1259 enable_nopath_timeout(m);
1260 spin_unlock_irqrestore(&m->lock, flags);
1261
1262 ti->num_flush_bios = 1;
1263 ti->num_discard_bios = 1;
1264 ti->num_write_zeroes_bios = 1;
1265 if (m->queue_mode == DM_TYPE_BIO_BASED)
1266 ti->per_io_data_size = multipath_per_bio_data_size();
1267 else
1268 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1269
1270 return 0;
1271
1272 bad:
1273 free_multipath(m);
1274 return r;
1275}
1276
1277static void multipath_wait_for_pg_init_completion(struct multipath *m)
1278{
1279 DEFINE_WAIT(wait);
1280
1281 while (1) {
1282 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1283
1284 if (!atomic_read(&m->pg_init_in_progress))
1285 break;
1286
1287 io_schedule();
1288 }
1289 finish_wait(&m->pg_init_wait, &wait);
1290}
1291
1292static void flush_multipath_work(struct multipath *m)
1293{
1294 if (m->hw_handler_name) {
1295 unsigned long flags;
1296
1297 if (!atomic_read(&m->pg_init_in_progress))
1298 goto skip;
1299
1300 spin_lock_irqsave(&m->lock, flags);
1301 if (atomic_read(&m->pg_init_in_progress) &&
1302 !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
1303 spin_unlock_irqrestore(&m->lock, flags);
1304
1305 flush_workqueue(kmpath_handlerd);
1306 multipath_wait_for_pg_init_completion(m);
1307
1308 spin_lock_irqsave(&m->lock, flags);
1309 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1310 }
1311 spin_unlock_irqrestore(&m->lock, flags);
1312 }
1313skip:
1314 if (m->queue_mode == DM_TYPE_BIO_BASED)
1315 flush_work(&m->process_queued_bios);
1316 flush_work(&m->trigger_event);
1317}
1318
1319static void multipath_dtr(struct dm_target *ti)
1320{
1321 struct multipath *m = ti->private;
1322
1323 disable_nopath_timeout(m);
1324 flush_multipath_work(m);
1325 free_multipath(m);
1326}
1327
1328/*
1329 * Take a path out of use.
1330 */
1331static int fail_path(struct pgpath *pgpath)
1332{
1333 unsigned long flags;
1334 struct multipath *m = pgpath->pg->m;
1335
1336 spin_lock_irqsave(&m->lock, flags);
1337
1338 if (!pgpath->is_active)
1339 goto out;
1340
1341 DMWARN("%s: Failing path %s.",
1342 dm_table_device_name(m->ti->table),
1343 pgpath->path.dev->name);
1344
1345 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1346 pgpath->is_active = false;
1347 pgpath->fail_count++;
1348
1349 atomic_dec(&m->nr_valid_paths);
1350
1351 if (pgpath == m->current_pgpath)
1352 m->current_pgpath = NULL;
1353
1354 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1355 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1356
1357 queue_work(dm_mpath_wq, &m->trigger_event);
1358
1359 enable_nopath_timeout(m);
1360
1361out:
1362 spin_unlock_irqrestore(&m->lock, flags);
1363
1364 return 0;
1365}
1366
1367/*
1368 * Reinstate a previously-failed path
1369 */
1370static int reinstate_path(struct pgpath *pgpath)
1371{
1372 int r = 0, run_queue = 0;
1373 unsigned long flags;
1374 struct multipath *m = pgpath->pg->m;
1375 unsigned int nr_valid_paths;
1376
1377 spin_lock_irqsave(&m->lock, flags);
1378
1379 if (pgpath->is_active)
1380 goto out;
1381
1382 DMWARN("%s: Reinstating path %s.",
1383 dm_table_device_name(m->ti->table),
1384 pgpath->path.dev->name);
1385
1386 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1387 if (r)
1388 goto out;
1389
1390 pgpath->is_active = true;
1391
1392 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1393 if (nr_valid_paths == 1) {
1394 m->current_pgpath = NULL;
1395 run_queue = 1;
1396 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1397 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1398 atomic_inc(&m->pg_init_in_progress);
1399 }
1400
1401 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1402 pgpath->path.dev->name, nr_valid_paths);
1403
1404 schedule_work(&m->trigger_event);
1405
1406out:
1407 spin_unlock_irqrestore(&m->lock, flags);
1408 if (run_queue) {
1409 dm_table_run_md_queue_async(m->ti->table);
1410 process_queued_io_list(m);
1411 }
1412
1413 if (pgpath->is_active)
1414 disable_nopath_timeout(m);
1415
1416 return r;
1417}
1418
1419/*
1420 * Fail or reinstate all paths that match the provided struct dm_dev.
1421 */
1422static int action_dev(struct multipath *m, dev_t dev, action_fn action)
1423{
1424 int r = -EINVAL;
1425 struct pgpath *pgpath;
1426 struct priority_group *pg;
1427
1428 list_for_each_entry(pg, &m->priority_groups, list) {
1429 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1430 if (pgpath->path.dev->bdev->bd_dev == dev)
1431 r = action(pgpath);
1432 }
1433 }
1434
1435 return r;
1436}
1437
1438/*
1439 * Temporarily try to avoid having to use the specified PG
1440 */
1441static void bypass_pg(struct multipath *m, struct priority_group *pg,
1442 bool bypassed)
1443{
1444 unsigned long flags;
1445
1446 spin_lock_irqsave(&m->lock, flags);
1447
1448 pg->bypassed = bypassed;
1449 m->current_pgpath = NULL;
1450 m->current_pg = NULL;
1451
1452 spin_unlock_irqrestore(&m->lock, flags);
1453
1454 schedule_work(&m->trigger_event);
1455}
1456
1457/*
1458 * Switch to using the specified PG from the next I/O that gets mapped
1459 */
1460static int switch_pg_num(struct multipath *m, const char *pgstr)
1461{
1462 struct priority_group *pg;
1463 unsigned int pgnum;
1464 unsigned long flags;
1465 char dummy;
1466
1467 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1468 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1469 DMWARN("invalid PG number supplied to %s", __func__);
1470 return -EINVAL;
1471 }
1472
1473 spin_lock_irqsave(&m->lock, flags);
1474 list_for_each_entry(pg, &m->priority_groups, list) {
1475 pg->bypassed = false;
1476 if (--pgnum)
1477 continue;
1478
1479 m->current_pgpath = NULL;
1480 m->current_pg = NULL;
1481 m->next_pg = pg;
1482 }
1483 spin_unlock_irqrestore(&m->lock, flags);
1484
1485 schedule_work(&m->trigger_event);
1486 return 0;
1487}
1488
1489/*
1490 * Set/clear bypassed status of a PG.
1491 * PGs are numbered upwards from 1 in the order they were declared.
1492 */
1493static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1494{
1495 struct priority_group *pg;
1496 unsigned int pgnum;
1497 char dummy;
1498
1499 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1500 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1501 DMWARN("invalid PG number supplied to bypass_pg");
1502 return -EINVAL;
1503 }
1504
1505 list_for_each_entry(pg, &m->priority_groups, list) {
1506 if (!--pgnum)
1507 break;
1508 }
1509
1510 bypass_pg(m, pg, bypassed);
1511 return 0;
1512}
1513
1514/*
1515 * Should we retry pg_init immediately?
1516 */
1517static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1518{
1519 unsigned long flags;
1520 bool limit_reached = false;
1521
1522 spin_lock_irqsave(&m->lock, flags);
1523
1524 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1525 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1526 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1527 else
1528 limit_reached = true;
1529
1530 spin_unlock_irqrestore(&m->lock, flags);
1531
1532 return limit_reached;
1533}
1534
1535static void pg_init_done(void *data, int errors)
1536{
1537 struct pgpath *pgpath = data;
1538 struct priority_group *pg = pgpath->pg;
1539 struct multipath *m = pg->m;
1540 unsigned long flags;
1541 bool delay_retry = false;
1542
1543 /* device or driver problems */
1544 switch (errors) {
1545 case SCSI_DH_OK:
1546 break;
1547 case SCSI_DH_NOSYS:
1548 if (!m->hw_handler_name) {
1549 errors = 0;
1550 break;
1551 }
1552 DMERR("Could not failover the device: Handler scsi_dh_%s "
1553 "Error %d.", m->hw_handler_name, errors);
1554 /*
1555 * Fail path for now, so we do not ping pong
1556 */
1557 fail_path(pgpath);
1558 break;
1559 case SCSI_DH_DEV_TEMP_BUSY:
1560 /*
1561 * Probably doing something like FW upgrade on the
1562 * controller so try the other pg.
1563 */
1564 bypass_pg(m, pg, true);
1565 break;
1566 case SCSI_DH_RETRY:
1567 /* Wait before retrying. */
1568 delay_retry = true;
1569 fallthrough;
1570 case SCSI_DH_IMM_RETRY:
1571 case SCSI_DH_RES_TEMP_UNAVAIL:
1572 if (pg_init_limit_reached(m, pgpath))
1573 fail_path(pgpath);
1574 errors = 0;
1575 break;
1576 case SCSI_DH_DEV_OFFLINED:
1577 default:
1578 /*
1579 * We probably do not want to fail the path for a device
1580 * error, but this is what the old dm did. In future
1581 * patches we can do more advanced handling.
1582 */
1583 fail_path(pgpath);
1584 }
1585
1586 spin_lock_irqsave(&m->lock, flags);
1587 if (errors) {
1588 if (pgpath == m->current_pgpath) {
1589 DMERR("Could not failover device. Error %d.", errors);
1590 m->current_pgpath = NULL;
1591 m->current_pg = NULL;
1592 }
1593 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1594 pg->bypassed = false;
1595
1596 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1597 /* Activations of other paths are still on going */
1598 goto out;
1599
1600 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1601 if (delay_retry)
1602 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1603 else
1604 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1605
1606 if (__pg_init_all_paths(m))
1607 goto out;
1608 }
1609 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1610
1611 process_queued_io_list(m);
1612
1613 /*
1614 * Wake up any thread waiting to suspend.
1615 */
1616 wake_up(&m->pg_init_wait);
1617
1618out:
1619 spin_unlock_irqrestore(&m->lock, flags);
1620}
1621
1622static void activate_or_offline_path(struct pgpath *pgpath)
1623{
1624 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1625
1626 if (pgpath->is_active && !blk_queue_dying(q))
1627 scsi_dh_activate(q, pg_init_done, pgpath);
1628 else
1629 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1630}
1631
1632static void activate_path_work(struct work_struct *work)
1633{
1634 struct pgpath *pgpath =
1635 container_of(work, struct pgpath, activate_path.work);
1636
1637 activate_or_offline_path(pgpath);
1638}
1639
1640static int multipath_end_io(struct dm_target *ti, struct request *clone,
1641 blk_status_t error, union map_info *map_context)
1642{
1643 struct dm_mpath_io *mpio = get_mpio(map_context);
1644 struct pgpath *pgpath = mpio->pgpath;
1645 int r = DM_ENDIO_DONE;
1646
1647 /*
1648 * We don't queue any clone request inside the multipath target
1649 * during end I/O handling, since those clone requests don't have
1650 * bio clones. If we queue them inside the multipath target,
1651 * we need to make bio clones, that requires memory allocation.
1652 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1653 * don't have bio clones.)
1654 * Instead of queueing the clone request here, we queue the original
1655 * request into dm core, which will remake a clone request and
1656 * clone bios for it and resubmit it later.
1657 */
1658 if (error && blk_path_error(error)) {
1659 struct multipath *m = ti->private;
1660
1661 if (error == BLK_STS_RESOURCE)
1662 r = DM_ENDIO_DELAY_REQUEUE;
1663 else
1664 r = DM_ENDIO_REQUEUE;
1665
1666 if (pgpath)
1667 fail_path(pgpath);
1668
1669 if (!atomic_read(&m->nr_valid_paths) &&
1670 !must_push_back_rq(m)) {
1671 if (error == BLK_STS_IOERR)
1672 dm_report_EIO(m);
1673 /* complete with the original error */
1674 r = DM_ENDIO_DONE;
1675 }
1676 }
1677
1678 if (pgpath) {
1679 struct path_selector *ps = &pgpath->pg->ps;
1680
1681 if (ps->type->end_io)
1682 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1683 clone->io_start_time_ns);
1684 }
1685
1686 return r;
1687}
1688
1689static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1690 blk_status_t *error)
1691{
1692 struct multipath *m = ti->private;
1693 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1694 struct pgpath *pgpath = mpio->pgpath;
1695 unsigned long flags;
1696 int r = DM_ENDIO_DONE;
1697
1698 if (!*error || !blk_path_error(*error))
1699 goto done;
1700
1701 if (pgpath)
1702 fail_path(pgpath);
1703
1704 if (!atomic_read(&m->nr_valid_paths)) {
1705 spin_lock_irqsave(&m->lock, flags);
1706 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1707 if (__must_push_back(m)) {
1708 r = DM_ENDIO_REQUEUE;
1709 } else {
1710 dm_report_EIO(m);
1711 *error = BLK_STS_IOERR;
1712 }
1713 spin_unlock_irqrestore(&m->lock, flags);
1714 goto done;
1715 }
1716 spin_unlock_irqrestore(&m->lock, flags);
1717 }
1718
1719 multipath_queue_bio(m, clone);
1720 r = DM_ENDIO_INCOMPLETE;
1721done:
1722 if (pgpath) {
1723 struct path_selector *ps = &pgpath->pg->ps;
1724
1725 if (ps->type->end_io)
1726 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1727 (mpio->start_time_ns ?:
1728 dm_start_time_ns_from_clone(clone)));
1729 }
1730
1731 return r;
1732}
1733
1734/*
1735 * Suspend with flush can't complete until all the I/O is processed
1736 * so if the last path fails we must error any remaining I/O.
1737 * - Note that if the freeze_bdev fails while suspending, the
1738 * queue_if_no_path state is lost - userspace should reset it.
1739 * Otherwise, during noflush suspend, queue_if_no_path will not change.
1740 */
1741static void multipath_presuspend(struct dm_target *ti)
1742{
1743 struct multipath *m = ti->private;
1744
1745 /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
1746 if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
1747 queue_if_no_path(m, false, true, __func__);
1748}
1749
1750static void multipath_postsuspend(struct dm_target *ti)
1751{
1752 struct multipath *m = ti->private;
1753
1754 mutex_lock(&m->work_mutex);
1755 flush_multipath_work(m);
1756 mutex_unlock(&m->work_mutex);
1757}
1758
1759/*
1760 * Restore the queue_if_no_path setting.
1761 */
1762static void multipath_resume(struct dm_target *ti)
1763{
1764 struct multipath *m = ti->private;
1765 unsigned long flags;
1766
1767 spin_lock_irqsave(&m->lock, flags);
1768 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
1769 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1770 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
1771 }
1772
1773 DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
1774 dm_table_device_name(m->ti->table), __func__,
1775 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
1776 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1777
1778 spin_unlock_irqrestore(&m->lock, flags);
1779}
1780
1781/*
1782 * Info output has the following format:
1783 * num_multipath_feature_args [multipath_feature_args]*
1784 * num_handler_status_args [handler_status_args]*
1785 * num_groups init_group_number
1786 * [A|D|E num_ps_status_args [ps_status_args]*
1787 * num_paths num_selector_args
1788 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1789 *
1790 * Table output has the following format (identical to the constructor string):
1791 * num_feature_args [features_args]*
1792 * num_handler_args hw_handler [hw_handler_args]*
1793 * num_groups init_group_number
1794 * [priority selector-name num_ps_args [ps_args]*
1795 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1796 */
1797static void multipath_status(struct dm_target *ti, status_type_t type,
1798 unsigned int status_flags, char *result, unsigned int maxlen)
1799{
1800 int sz = 0, pg_counter, pgpath_counter;
1801 unsigned long flags;
1802 struct multipath *m = ti->private;
1803 struct priority_group *pg;
1804 struct pgpath *p;
1805 unsigned int pg_num;
1806 char state;
1807
1808 spin_lock_irqsave(&m->lock, flags);
1809
1810 /* Features */
1811 if (type == STATUSTYPE_INFO)
1812 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1813 atomic_read(&m->pg_init_count));
1814 else {
1815 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1816 (m->pg_init_retries > 0) * 2 +
1817 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1818 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1819 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1820
1821 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1822 DMEMIT("queue_if_no_path ");
1823 if (m->pg_init_retries)
1824 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1825 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1826 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1827 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1828 DMEMIT("retain_attached_hw_handler ");
1829 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1830 switch (m->queue_mode) {
1831 case DM_TYPE_BIO_BASED:
1832 DMEMIT("queue_mode bio ");
1833 break;
1834 default:
1835 WARN_ON_ONCE(true);
1836 break;
1837 }
1838 }
1839 }
1840
1841 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1842 DMEMIT("0 ");
1843 else
1844 DMEMIT("1 %s ", m->hw_handler_name);
1845
1846 DMEMIT("%u ", m->nr_priority_groups);
1847
1848 if (m->next_pg)
1849 pg_num = m->next_pg->pg_num;
1850 else if (m->current_pg)
1851 pg_num = m->current_pg->pg_num;
1852 else
1853 pg_num = (m->nr_priority_groups ? 1 : 0);
1854
1855 DMEMIT("%u ", pg_num);
1856
1857 switch (type) {
1858 case STATUSTYPE_INFO:
1859 list_for_each_entry(pg, &m->priority_groups, list) {
1860 if (pg->bypassed)
1861 state = 'D'; /* Disabled */
1862 else if (pg == m->current_pg)
1863 state = 'A'; /* Currently Active */
1864 else
1865 state = 'E'; /* Enabled */
1866
1867 DMEMIT("%c ", state);
1868
1869 if (pg->ps.type->status)
1870 sz += pg->ps.type->status(&pg->ps, NULL, type,
1871 result + sz,
1872 maxlen - sz);
1873 else
1874 DMEMIT("0 ");
1875
1876 DMEMIT("%u %u ", pg->nr_pgpaths,
1877 pg->ps.type->info_args);
1878
1879 list_for_each_entry(p, &pg->pgpaths, list) {
1880 DMEMIT("%s %s %u ", p->path.dev->name,
1881 p->is_active ? "A" : "F",
1882 p->fail_count);
1883 if (pg->ps.type->status)
1884 sz += pg->ps.type->status(&pg->ps,
1885 &p->path, type, result + sz,
1886 maxlen - sz);
1887 }
1888 }
1889 break;
1890
1891 case STATUSTYPE_TABLE:
1892 list_for_each_entry(pg, &m->priority_groups, list) {
1893 DMEMIT("%s ", pg->ps.type->name);
1894
1895 if (pg->ps.type->status)
1896 sz += pg->ps.type->status(&pg->ps, NULL, type,
1897 result + sz,
1898 maxlen - sz);
1899 else
1900 DMEMIT("0 ");
1901
1902 DMEMIT("%u %u ", pg->nr_pgpaths,
1903 pg->ps.type->table_args);
1904
1905 list_for_each_entry(p, &pg->pgpaths, list) {
1906 DMEMIT("%s ", p->path.dev->name);
1907 if (pg->ps.type->status)
1908 sz += pg->ps.type->status(&pg->ps,
1909 &p->path, type, result + sz,
1910 maxlen - sz);
1911 }
1912 }
1913 break;
1914
1915 case STATUSTYPE_IMA:
1916 sz = 0; /*reset the result pointer*/
1917
1918 DMEMIT_TARGET_NAME_VERSION(ti->type);
1919 DMEMIT(",nr_priority_groups=%u", m->nr_priority_groups);
1920
1921 pg_counter = 0;
1922 list_for_each_entry(pg, &m->priority_groups, list) {
1923 if (pg->bypassed)
1924 state = 'D'; /* Disabled */
1925 else if (pg == m->current_pg)
1926 state = 'A'; /* Currently Active */
1927 else
1928 state = 'E'; /* Enabled */
1929 DMEMIT(",pg_state_%d=%c", pg_counter, state);
1930 DMEMIT(",nr_pgpaths_%d=%u", pg_counter, pg->nr_pgpaths);
1931 DMEMIT(",path_selector_name_%d=%s", pg_counter, pg->ps.type->name);
1932
1933 pgpath_counter = 0;
1934 list_for_each_entry(p, &pg->pgpaths, list) {
1935 DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u",
1936 pg_counter, pgpath_counter, p->path.dev->name,
1937 pg_counter, pgpath_counter, p->is_active ? 'A' : 'F',
1938 pg_counter, pgpath_counter, p->fail_count);
1939 if (pg->ps.type->status) {
1940 DMEMIT(",path_selector_status_%d_%d=",
1941 pg_counter, pgpath_counter);
1942 sz += pg->ps.type->status(&pg->ps, &p->path,
1943 type, result + sz,
1944 maxlen - sz);
1945 }
1946 pgpath_counter++;
1947 }
1948 pg_counter++;
1949 }
1950 DMEMIT(";");
1951 break;
1952 }
1953
1954 spin_unlock_irqrestore(&m->lock, flags);
1955}
1956
1957static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
1958 char *result, unsigned int maxlen)
1959{
1960 int r = -EINVAL;
1961 dev_t dev;
1962 struct multipath *m = ti->private;
1963 action_fn action;
1964 unsigned long flags;
1965
1966 mutex_lock(&m->work_mutex);
1967
1968 if (dm_suspended(ti)) {
1969 r = -EBUSY;
1970 goto out;
1971 }
1972
1973 if (argc == 1) {
1974 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1975 r = queue_if_no_path(m, true, false, __func__);
1976 spin_lock_irqsave(&m->lock, flags);
1977 enable_nopath_timeout(m);
1978 spin_unlock_irqrestore(&m->lock, flags);
1979 goto out;
1980 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1981 r = queue_if_no_path(m, false, false, __func__);
1982 disable_nopath_timeout(m);
1983 goto out;
1984 }
1985 }
1986
1987 if (argc != 2) {
1988 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1989 goto out;
1990 }
1991
1992 if (!strcasecmp(argv[0], "disable_group")) {
1993 r = bypass_pg_num(m, argv[1], true);
1994 goto out;
1995 } else if (!strcasecmp(argv[0], "enable_group")) {
1996 r = bypass_pg_num(m, argv[1], false);
1997 goto out;
1998 } else if (!strcasecmp(argv[0], "switch_group")) {
1999 r = switch_pg_num(m, argv[1]);
2000 goto out;
2001 } else if (!strcasecmp(argv[0], "reinstate_path"))
2002 action = reinstate_path;
2003 else if (!strcasecmp(argv[0], "fail_path"))
2004 action = fail_path;
2005 else {
2006 DMWARN("Unrecognised multipath message received: %s", argv[0]);
2007 goto out;
2008 }
2009
2010 r = dm_devt_from_path(argv[1], &dev);
2011 if (r) {
2012 DMWARN("message: error getting device %s",
2013 argv[1]);
2014 goto out;
2015 }
2016
2017 r = action_dev(m, dev, action);
2018
2019out:
2020 mutex_unlock(&m->work_mutex);
2021 return r;
2022}
2023
2024static int multipath_prepare_ioctl(struct dm_target *ti,
2025 struct block_device **bdev)
2026{
2027 struct multipath *m = ti->private;
2028 struct pgpath *pgpath;
2029 unsigned long flags;
2030 int r;
2031
2032 pgpath = READ_ONCE(m->current_pgpath);
2033 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
2034 pgpath = choose_pgpath(m, 0);
2035
2036 if (pgpath) {
2037 if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
2038 *bdev = pgpath->path.dev->bdev;
2039 r = 0;
2040 } else {
2041 /* pg_init has not started or completed */
2042 r = -ENOTCONN;
2043 }
2044 } else {
2045 /* No path is available */
2046 r = -EIO;
2047 spin_lock_irqsave(&m->lock, flags);
2048 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2049 r = -ENOTCONN;
2050 spin_unlock_irqrestore(&m->lock, flags);
2051 }
2052
2053 if (r == -ENOTCONN) {
2054 if (!READ_ONCE(m->current_pg)) {
2055 /* Path status changed, redo selection */
2056 (void) choose_pgpath(m, 0);
2057 }
2058 spin_lock_irqsave(&m->lock, flags);
2059 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
2060 (void) __pg_init_all_paths(m);
2061 spin_unlock_irqrestore(&m->lock, flags);
2062 dm_table_run_md_queue_async(m->ti->table);
2063 process_queued_io_list(m);
2064 }
2065
2066 /*
2067 * Only pass ioctls through if the device sizes match exactly.
2068 */
2069 if (!r && ti->len != bdev_nr_sectors((*bdev)))
2070 return 1;
2071 return r;
2072}
2073
2074static int multipath_iterate_devices(struct dm_target *ti,
2075 iterate_devices_callout_fn fn, void *data)
2076{
2077 struct multipath *m = ti->private;
2078 struct priority_group *pg;
2079 struct pgpath *p;
2080 int ret = 0;
2081
2082 list_for_each_entry(pg, &m->priority_groups, list) {
2083 list_for_each_entry(p, &pg->pgpaths, list) {
2084 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
2085 if (ret)
2086 goto out;
2087 }
2088 }
2089
2090out:
2091 return ret;
2092}
2093
2094static int pgpath_busy(struct pgpath *pgpath)
2095{
2096 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2097
2098 return blk_lld_busy(q);
2099}
2100
2101/*
2102 * We return "busy", only when we can map I/Os but underlying devices
2103 * are busy (so even if we map I/Os now, the I/Os will wait on
2104 * the underlying queue).
2105 * In other words, if we want to kill I/Os or queue them inside us
2106 * due to map unavailability, we don't return "busy". Otherwise,
2107 * dm core won't give us the I/Os and we can't do what we want.
2108 */
2109static int multipath_busy(struct dm_target *ti)
2110{
2111 bool busy = false, has_active = false;
2112 struct multipath *m = ti->private;
2113 struct priority_group *pg, *next_pg;
2114 struct pgpath *pgpath;
2115
2116 /* pg_init in progress */
2117 if (atomic_read(&m->pg_init_in_progress))
2118 return true;
2119
2120 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2121 if (!atomic_read(&m->nr_valid_paths)) {
2122 unsigned long flags;
2123
2124 spin_lock_irqsave(&m->lock, flags);
2125 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2126 spin_unlock_irqrestore(&m->lock, flags);
2127 return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2128 }
2129 spin_unlock_irqrestore(&m->lock, flags);
2130 }
2131
2132 /* Guess which priority_group will be used at next mapping time */
2133 pg = READ_ONCE(m->current_pg);
2134 next_pg = READ_ONCE(m->next_pg);
2135 if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
2136 pg = next_pg;
2137
2138 if (!pg) {
2139 /*
2140 * We don't know which pg will be used at next mapping time.
2141 * We don't call choose_pgpath() here to avoid to trigger
2142 * pg_init just by busy checking.
2143 * So we don't know whether underlying devices we will be using
2144 * at next mapping time are busy or not. Just try mapping.
2145 */
2146 return busy;
2147 }
2148
2149 /*
2150 * If there is one non-busy active path at least, the path selector
2151 * will be able to select it. So we consider such a pg as not busy.
2152 */
2153 busy = true;
2154 list_for_each_entry(pgpath, &pg->pgpaths, list) {
2155 if (pgpath->is_active) {
2156 has_active = true;
2157 if (!pgpath_busy(pgpath)) {
2158 busy = false;
2159 break;
2160 }
2161 }
2162 }
2163
2164 if (!has_active) {
2165 /*
2166 * No active path in this pg, so this pg won't be used and
2167 * the current_pg will be changed at next mapping time.
2168 * We need to try mapping to determine it.
2169 */
2170 busy = false;
2171 }
2172
2173 return busy;
2174}
2175
2176/*
2177 *---------------------------------------------------------------
2178 * Module setup
2179 *---------------------------------------------------------------
2180 */
2181static struct target_type multipath_target = {
2182 .name = "multipath",
2183 .version = {1, 14, 0},
2184 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2185 DM_TARGET_PASSES_INTEGRITY,
2186 .module = THIS_MODULE,
2187 .ctr = multipath_ctr,
2188 .dtr = multipath_dtr,
2189 .clone_and_map_rq = multipath_clone_and_map,
2190 .release_clone_rq = multipath_release_clone,
2191 .rq_end_io = multipath_end_io,
2192 .map = multipath_map_bio,
2193 .end_io = multipath_end_io_bio,
2194 .presuspend = multipath_presuspend,
2195 .postsuspend = multipath_postsuspend,
2196 .resume = multipath_resume,
2197 .status = multipath_status,
2198 .message = multipath_message,
2199 .prepare_ioctl = multipath_prepare_ioctl,
2200 .iterate_devices = multipath_iterate_devices,
2201 .busy = multipath_busy,
2202};
2203
2204static int __init dm_multipath_init(void)
2205{
2206 int r = -ENOMEM;
2207
2208 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2209 if (!kmultipathd) {
2210 DMERR("failed to create workqueue kmpathd");
2211 goto bad_alloc_kmultipathd;
2212 }
2213
2214 /*
2215 * A separate workqueue is used to handle the device handlers
2216 * to avoid overloading existing workqueue. Overloading the
2217 * old workqueue would also create a bottleneck in the
2218 * path of the storage hardware device activation.
2219 */
2220 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2221 WQ_MEM_RECLAIM);
2222 if (!kmpath_handlerd) {
2223 DMERR("failed to create workqueue kmpath_handlerd");
2224 goto bad_alloc_kmpath_handlerd;
2225 }
2226
2227 dm_mpath_wq = alloc_workqueue("dm_mpath_wq", 0, 0);
2228 if (!dm_mpath_wq) {
2229 DMERR("failed to create workqueue dm_mpath_wq");
2230 goto bad_alloc_dm_mpath_wq;
2231 }
2232
2233 r = dm_register_target(&multipath_target);
2234 if (r < 0)
2235 goto bad_register_target;
2236
2237 return 0;
2238
2239bad_register_target:
2240 destroy_workqueue(dm_mpath_wq);
2241bad_alloc_dm_mpath_wq:
2242 destroy_workqueue(kmpath_handlerd);
2243bad_alloc_kmpath_handlerd:
2244 destroy_workqueue(kmultipathd);
2245bad_alloc_kmultipathd:
2246 return r;
2247}
2248
2249static void __exit dm_multipath_exit(void)
2250{
2251 destroy_workqueue(dm_mpath_wq);
2252 destroy_workqueue(kmpath_handlerd);
2253 destroy_workqueue(kmultipathd);
2254
2255 dm_unregister_target(&multipath_target);
2256}
2257
2258module_init(dm_multipath_init);
2259module_exit(dm_multipath_exit);
2260
2261module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644);
2262MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
2263
2264MODULE_DESCRIPTION(DM_NAME " multipath target");
2265MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>");
2266MODULE_LICENSE("GPL");
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9
10#include "dm-rq.h"
11#include "dm-bio-record.h"
12#include "dm-path-selector.h"
13#include "dm-uevent.h"
14
15#include <linux/blkdev.h>
16#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/mempool.h>
19#include <linux/module.h>
20#include <linux/pagemap.h>
21#include <linux/slab.h>
22#include <linux/time.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <scsi/scsi_dh.h>
26#include <linux/atomic.h>
27#include <linux/blk-mq.h>
28
29#define DM_MSG_PREFIX "multipath"
30#define DM_PG_INIT_DELAY_MSECS 2000
31#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
32
33/* Path properties */
34struct pgpath {
35 struct list_head list;
36
37 struct priority_group *pg; /* Owning PG */
38 unsigned fail_count; /* Cumulative failure count */
39
40 struct dm_path path;
41 struct delayed_work activate_path;
42
43 bool is_active:1; /* Path status */
44};
45
46#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
47
48/*
49 * Paths are grouped into Priority Groups and numbered from 1 upwards.
50 * Each has a path selector which controls which path gets used.
51 */
52struct priority_group {
53 struct list_head list;
54
55 struct multipath *m; /* Owning multipath instance */
56 struct path_selector ps;
57
58 unsigned pg_num; /* Reference number */
59 unsigned nr_pgpaths; /* Number of paths in PG */
60 struct list_head pgpaths;
61
62 bool bypassed:1; /* Temporarily bypass this PG? */
63};
64
65/* Multipath context */
66struct multipath {
67 unsigned long flags; /* Multipath state flags */
68
69 spinlock_t lock;
70 enum dm_queue_mode queue_mode;
71
72 struct pgpath *current_pgpath;
73 struct priority_group *current_pg;
74 struct priority_group *next_pg; /* Switch to this PG if set */
75
76 atomic_t nr_valid_paths; /* Total number of usable paths */
77 unsigned nr_priority_groups;
78 struct list_head priority_groups;
79
80 const char *hw_handler_name;
81 char *hw_handler_params;
82 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
83 unsigned pg_init_retries; /* Number of times to retry pg_init */
84 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
85 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
86 atomic_t pg_init_count; /* Number of times pg_init called */
87
88 struct mutex work_mutex;
89 struct work_struct trigger_event;
90 struct dm_target *ti;
91
92 struct work_struct process_queued_bios;
93 struct bio_list queued_bios;
94};
95
96/*
97 * Context information attached to each io we process.
98 */
99struct dm_mpath_io {
100 struct pgpath *pgpath;
101 size_t nr_bytes;
102};
103
104typedef int (*action_fn) (struct pgpath *pgpath);
105
106static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
107static void trigger_event(struct work_struct *work);
108static void activate_or_offline_path(struct pgpath *pgpath);
109static void activate_path_work(struct work_struct *work);
110static void process_queued_bios(struct work_struct *work);
111
112/*-----------------------------------------------
113 * Multipath state flags.
114 *-----------------------------------------------*/
115
116#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
117#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
118#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
119#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
120#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
121#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
122#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
123
124/*-----------------------------------------------
125 * Allocation routines
126 *-----------------------------------------------*/
127
128static struct pgpath *alloc_pgpath(void)
129{
130 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
131
132 if (!pgpath)
133 return NULL;
134
135 pgpath->is_active = true;
136
137 return pgpath;
138}
139
140static void free_pgpath(struct pgpath *pgpath)
141{
142 kfree(pgpath);
143}
144
145static struct priority_group *alloc_priority_group(void)
146{
147 struct priority_group *pg;
148
149 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
150
151 if (pg)
152 INIT_LIST_HEAD(&pg->pgpaths);
153
154 return pg;
155}
156
157static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
158{
159 struct pgpath *pgpath, *tmp;
160
161 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
162 list_del(&pgpath->list);
163 dm_put_device(ti, pgpath->path.dev);
164 free_pgpath(pgpath);
165 }
166}
167
168static void free_priority_group(struct priority_group *pg,
169 struct dm_target *ti)
170{
171 struct path_selector *ps = &pg->ps;
172
173 if (ps->type) {
174 ps->type->destroy(ps);
175 dm_put_path_selector(ps->type);
176 }
177
178 free_pgpaths(&pg->pgpaths, ti);
179 kfree(pg);
180}
181
182static struct multipath *alloc_multipath(struct dm_target *ti)
183{
184 struct multipath *m;
185
186 m = kzalloc(sizeof(*m), GFP_KERNEL);
187 if (m) {
188 INIT_LIST_HEAD(&m->priority_groups);
189 spin_lock_init(&m->lock);
190 atomic_set(&m->nr_valid_paths, 0);
191 INIT_WORK(&m->trigger_event, trigger_event);
192 mutex_init(&m->work_mutex);
193
194 m->queue_mode = DM_TYPE_NONE;
195
196 m->ti = ti;
197 ti->private = m;
198 }
199
200 return m;
201}
202
203static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
204{
205 if (m->queue_mode == DM_TYPE_NONE) {
206 /*
207 * Default to request-based.
208 */
209 if (dm_use_blk_mq(dm_table_get_md(ti->table)))
210 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
211 else
212 m->queue_mode = DM_TYPE_REQUEST_BASED;
213
214 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
215 INIT_WORK(&m->process_queued_bios, process_queued_bios);
216 /*
217 * bio-based doesn't support any direct scsi_dh management;
218 * it just discovers if a scsi_dh is attached.
219 */
220 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
221 }
222
223 dm_table_set_type(ti->table, m->queue_mode);
224
225 /*
226 * Init fields that are only used when a scsi_dh is attached
227 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
228 */
229 set_bit(MPATHF_QUEUE_IO, &m->flags);
230 atomic_set(&m->pg_init_in_progress, 0);
231 atomic_set(&m->pg_init_count, 0);
232 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
233 init_waitqueue_head(&m->pg_init_wait);
234
235 return 0;
236}
237
238static void free_multipath(struct multipath *m)
239{
240 struct priority_group *pg, *tmp;
241
242 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
243 list_del(&pg->list);
244 free_priority_group(pg, m->ti);
245 }
246
247 kfree(m->hw_handler_name);
248 kfree(m->hw_handler_params);
249 mutex_destroy(&m->work_mutex);
250 kfree(m);
251}
252
253static struct dm_mpath_io *get_mpio(union map_info *info)
254{
255 return info->ptr;
256}
257
258static size_t multipath_per_bio_data_size(void)
259{
260 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
261}
262
263static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
264{
265 return dm_per_bio_data(bio, multipath_per_bio_data_size());
266}
267
268static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
269{
270 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
271 void *bio_details = mpio + 1;
272 return bio_details;
273}
274
275static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
276{
277 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
278 struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
279
280 mpio->nr_bytes = bio->bi_iter.bi_size;
281 mpio->pgpath = NULL;
282 *mpio_p = mpio;
283
284 dm_bio_record(bio_details, bio);
285}
286
287/*-----------------------------------------------
288 * Path selection
289 *-----------------------------------------------*/
290
291static int __pg_init_all_paths(struct multipath *m)
292{
293 struct pgpath *pgpath;
294 unsigned long pg_init_delay = 0;
295
296 lockdep_assert_held(&m->lock);
297
298 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
299 return 0;
300
301 atomic_inc(&m->pg_init_count);
302 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
303
304 /* Check here to reset pg_init_required */
305 if (!m->current_pg)
306 return 0;
307
308 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
309 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
310 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
311 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
312 /* Skip failed paths */
313 if (!pgpath->is_active)
314 continue;
315 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
316 pg_init_delay))
317 atomic_inc(&m->pg_init_in_progress);
318 }
319 return atomic_read(&m->pg_init_in_progress);
320}
321
322static int pg_init_all_paths(struct multipath *m)
323{
324 int ret;
325 unsigned long flags;
326
327 spin_lock_irqsave(&m->lock, flags);
328 ret = __pg_init_all_paths(m);
329 spin_unlock_irqrestore(&m->lock, flags);
330
331 return ret;
332}
333
334static void __switch_pg(struct multipath *m, struct priority_group *pg)
335{
336 m->current_pg = pg;
337
338 /* Must we initialise the PG first, and queue I/O till it's ready? */
339 if (m->hw_handler_name) {
340 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
341 set_bit(MPATHF_QUEUE_IO, &m->flags);
342 } else {
343 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
344 clear_bit(MPATHF_QUEUE_IO, &m->flags);
345 }
346
347 atomic_set(&m->pg_init_count, 0);
348}
349
350static struct pgpath *choose_path_in_pg(struct multipath *m,
351 struct priority_group *pg,
352 size_t nr_bytes)
353{
354 unsigned long flags;
355 struct dm_path *path;
356 struct pgpath *pgpath;
357
358 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
359 if (!path)
360 return ERR_PTR(-ENXIO);
361
362 pgpath = path_to_pgpath(path);
363
364 if (unlikely(READ_ONCE(m->current_pg) != pg)) {
365 /* Only update current_pgpath if pg changed */
366 spin_lock_irqsave(&m->lock, flags);
367 m->current_pgpath = pgpath;
368 __switch_pg(m, pg);
369 spin_unlock_irqrestore(&m->lock, flags);
370 }
371
372 return pgpath;
373}
374
375static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
376{
377 unsigned long flags;
378 struct priority_group *pg;
379 struct pgpath *pgpath;
380 unsigned bypassed = 1;
381
382 if (!atomic_read(&m->nr_valid_paths)) {
383 clear_bit(MPATHF_QUEUE_IO, &m->flags);
384 goto failed;
385 }
386
387 /* Were we instructed to switch PG? */
388 if (READ_ONCE(m->next_pg)) {
389 spin_lock_irqsave(&m->lock, flags);
390 pg = m->next_pg;
391 if (!pg) {
392 spin_unlock_irqrestore(&m->lock, flags);
393 goto check_current_pg;
394 }
395 m->next_pg = NULL;
396 spin_unlock_irqrestore(&m->lock, flags);
397 pgpath = choose_path_in_pg(m, pg, nr_bytes);
398 if (!IS_ERR_OR_NULL(pgpath))
399 return pgpath;
400 }
401
402 /* Don't change PG until it has no remaining paths */
403check_current_pg:
404 pg = READ_ONCE(m->current_pg);
405 if (pg) {
406 pgpath = choose_path_in_pg(m, pg, nr_bytes);
407 if (!IS_ERR_OR_NULL(pgpath))
408 return pgpath;
409 }
410
411 /*
412 * Loop through priority groups until we find a valid path.
413 * First time we skip PGs marked 'bypassed'.
414 * Second time we only try the ones we skipped, but set
415 * pg_init_delay_retry so we do not hammer controllers.
416 */
417 do {
418 list_for_each_entry(pg, &m->priority_groups, list) {
419 if (pg->bypassed == !!bypassed)
420 continue;
421 pgpath = choose_path_in_pg(m, pg, nr_bytes);
422 if (!IS_ERR_OR_NULL(pgpath)) {
423 if (!bypassed)
424 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
425 return pgpath;
426 }
427 }
428 } while (bypassed--);
429
430failed:
431 spin_lock_irqsave(&m->lock, flags);
432 m->current_pgpath = NULL;
433 m->current_pg = NULL;
434 spin_unlock_irqrestore(&m->lock, flags);
435
436 return NULL;
437}
438
439/*
440 * dm_report_EIO() is a macro instead of a function to make pr_debug()
441 * report the function name and line number of the function from which
442 * it has been invoked.
443 */
444#define dm_report_EIO(m) \
445do { \
446 struct mapped_device *md = dm_table_get_md((m)->ti->table); \
447 \
448 pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
449 dm_device_name(md), \
450 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
451 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
452 dm_noflush_suspending((m)->ti)); \
453} while (0)
454
455/*
456 * Check whether bios must be queued in the device-mapper core rather
457 * than here in the target.
458 *
459 * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
460 * the same value then we are not between multipath_presuspend()
461 * and multipath_resume() calls and we have no need to check
462 * for the DMF_NOFLUSH_SUSPENDING flag.
463 */
464static bool __must_push_back(struct multipath *m, unsigned long flags)
465{
466 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
467 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
468 dm_noflush_suspending(m->ti));
469}
470
471/*
472 * Following functions use READ_ONCE to get atomic access to
473 * all m->flags to avoid taking spinlock
474 */
475static bool must_push_back_rq(struct multipath *m)
476{
477 unsigned long flags = READ_ONCE(m->flags);
478 return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
479}
480
481static bool must_push_back_bio(struct multipath *m)
482{
483 unsigned long flags = READ_ONCE(m->flags);
484 return __must_push_back(m, flags);
485}
486
487/*
488 * Map cloned requests (request-based multipath)
489 */
490static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
491 union map_info *map_context,
492 struct request **__clone)
493{
494 struct multipath *m = ti->private;
495 size_t nr_bytes = blk_rq_bytes(rq);
496 struct pgpath *pgpath;
497 struct block_device *bdev;
498 struct dm_mpath_io *mpio = get_mpio(map_context);
499 struct request_queue *q;
500 struct request *clone;
501
502 /* Do we need to select a new pgpath? */
503 pgpath = READ_ONCE(m->current_pgpath);
504 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
505 pgpath = choose_pgpath(m, nr_bytes);
506
507 if (!pgpath) {
508 if (must_push_back_rq(m))
509 return DM_MAPIO_DELAY_REQUEUE;
510 dm_report_EIO(m); /* Failed */
511 return DM_MAPIO_KILL;
512 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
513 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
514 pg_init_all_paths(m);
515 return DM_MAPIO_DELAY_REQUEUE;
516 }
517
518 mpio->pgpath = pgpath;
519 mpio->nr_bytes = nr_bytes;
520
521 bdev = pgpath->path.dev->bdev;
522 q = bdev_get_queue(bdev);
523 clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
524 if (IS_ERR(clone)) {
525 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
526 if (blk_queue_dying(q)) {
527 atomic_inc(&m->pg_init_in_progress);
528 activate_or_offline_path(pgpath);
529 return DM_MAPIO_DELAY_REQUEUE;
530 }
531
532 /*
533 * blk-mq's SCHED_RESTART can cover this requeue, so we
534 * needn't deal with it by DELAY_REQUEUE. More importantly,
535 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
536 * get the queue busy feedback (via BLK_STS_RESOURCE),
537 * otherwise I/O merging can suffer.
538 */
539 if (q->mq_ops)
540 return DM_MAPIO_REQUEUE;
541 else
542 return DM_MAPIO_DELAY_REQUEUE;
543 }
544 clone->bio = clone->biotail = NULL;
545 clone->rq_disk = bdev->bd_disk;
546 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
547 *__clone = clone;
548
549 if (pgpath->pg->ps.type->start_io)
550 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
551 &pgpath->path,
552 nr_bytes);
553 return DM_MAPIO_REMAPPED;
554}
555
556static void multipath_release_clone(struct request *clone)
557{
558 blk_put_request(clone);
559}
560
561/*
562 * Map cloned bios (bio-based multipath)
563 */
564
565static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
566{
567 struct pgpath *pgpath;
568 unsigned long flags;
569 bool queue_io;
570
571 /* Do we need to select a new pgpath? */
572 pgpath = READ_ONCE(m->current_pgpath);
573 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
574 if (!pgpath || !queue_io)
575 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
576
577 if ((pgpath && queue_io) ||
578 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
579 /* Queue for the daemon to resubmit */
580 spin_lock_irqsave(&m->lock, flags);
581 bio_list_add(&m->queued_bios, bio);
582 spin_unlock_irqrestore(&m->lock, flags);
583
584 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
585 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
586 pg_init_all_paths(m);
587 else if (!queue_io)
588 queue_work(kmultipathd, &m->process_queued_bios);
589
590 return ERR_PTR(-EAGAIN);
591 }
592
593 return pgpath;
594}
595
596static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
597{
598 struct pgpath *pgpath;
599 unsigned long flags;
600
601 /* Do we need to select a new pgpath? */
602 /*
603 * FIXME: currently only switching path if no path (due to failure, etc)
604 * - which negates the point of using a path selector
605 */
606 pgpath = READ_ONCE(m->current_pgpath);
607 if (!pgpath)
608 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
609
610 if (!pgpath) {
611 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
612 /* Queue for the daemon to resubmit */
613 spin_lock_irqsave(&m->lock, flags);
614 bio_list_add(&m->queued_bios, bio);
615 spin_unlock_irqrestore(&m->lock, flags);
616 queue_work(kmultipathd, &m->process_queued_bios);
617
618 return ERR_PTR(-EAGAIN);
619 }
620 return NULL;
621 }
622
623 return pgpath;
624}
625
626static int __multipath_map_bio(struct multipath *m, struct bio *bio,
627 struct dm_mpath_io *mpio)
628{
629 struct pgpath *pgpath;
630
631 if (!m->hw_handler_name)
632 pgpath = __map_bio_fast(m, bio);
633 else
634 pgpath = __map_bio(m, bio);
635
636 if (IS_ERR(pgpath))
637 return DM_MAPIO_SUBMITTED;
638
639 if (!pgpath) {
640 if (must_push_back_bio(m))
641 return DM_MAPIO_REQUEUE;
642 dm_report_EIO(m);
643 return DM_MAPIO_KILL;
644 }
645
646 mpio->pgpath = pgpath;
647
648 bio->bi_status = 0;
649 bio_set_dev(bio, pgpath->path.dev->bdev);
650 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
651
652 if (pgpath->pg->ps.type->start_io)
653 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
654 &pgpath->path,
655 mpio->nr_bytes);
656 return DM_MAPIO_REMAPPED;
657}
658
659static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
660{
661 struct multipath *m = ti->private;
662 struct dm_mpath_io *mpio = NULL;
663
664 multipath_init_per_bio_data(bio, &mpio);
665 return __multipath_map_bio(m, bio, mpio);
666}
667
668static void process_queued_io_list(struct multipath *m)
669{
670 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
671 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
672 else if (m->queue_mode == DM_TYPE_BIO_BASED)
673 queue_work(kmultipathd, &m->process_queued_bios);
674}
675
676static void process_queued_bios(struct work_struct *work)
677{
678 int r;
679 unsigned long flags;
680 struct bio *bio;
681 struct bio_list bios;
682 struct blk_plug plug;
683 struct multipath *m =
684 container_of(work, struct multipath, process_queued_bios);
685
686 bio_list_init(&bios);
687
688 spin_lock_irqsave(&m->lock, flags);
689
690 if (bio_list_empty(&m->queued_bios)) {
691 spin_unlock_irqrestore(&m->lock, flags);
692 return;
693 }
694
695 bio_list_merge(&bios, &m->queued_bios);
696 bio_list_init(&m->queued_bios);
697
698 spin_unlock_irqrestore(&m->lock, flags);
699
700 blk_start_plug(&plug);
701 while ((bio = bio_list_pop(&bios))) {
702 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
703 dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
704 r = __multipath_map_bio(m, bio, mpio);
705 switch (r) {
706 case DM_MAPIO_KILL:
707 bio->bi_status = BLK_STS_IOERR;
708 bio_endio(bio);
709 break;
710 case DM_MAPIO_REQUEUE:
711 bio->bi_status = BLK_STS_DM_REQUEUE;
712 bio_endio(bio);
713 break;
714 case DM_MAPIO_REMAPPED:
715 generic_make_request(bio);
716 break;
717 case DM_MAPIO_SUBMITTED:
718 break;
719 default:
720 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
721 }
722 }
723 blk_finish_plug(&plug);
724}
725
726/*
727 * If we run out of usable paths, should we queue I/O or error it?
728 */
729static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
730 bool save_old_value)
731{
732 unsigned long flags;
733
734 spin_lock_irqsave(&m->lock, flags);
735 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
736 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
737 (!save_old_value && queue_if_no_path));
738 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
739 spin_unlock_irqrestore(&m->lock, flags);
740
741 if (!queue_if_no_path) {
742 dm_table_run_md_queue_async(m->ti->table);
743 process_queued_io_list(m);
744 }
745
746 return 0;
747}
748
749/*
750 * An event is triggered whenever a path is taken out of use.
751 * Includes path failure and PG bypass.
752 */
753static void trigger_event(struct work_struct *work)
754{
755 struct multipath *m =
756 container_of(work, struct multipath, trigger_event);
757
758 dm_table_event(m->ti->table);
759}
760
761/*-----------------------------------------------------------------
762 * Constructor/argument parsing:
763 * <#multipath feature args> [<arg>]*
764 * <#hw_handler args> [hw_handler [<arg>]*]
765 * <#priority groups>
766 * <initial priority group>
767 * [<selector> <#selector args> [<arg>]*
768 * <#paths> <#per-path selector args>
769 * [<path> [<arg>]* ]+ ]+
770 *---------------------------------------------------------------*/
771static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
772 struct dm_target *ti)
773{
774 int r;
775 struct path_selector_type *pst;
776 unsigned ps_argc;
777
778 static const struct dm_arg _args[] = {
779 {0, 1024, "invalid number of path selector args"},
780 };
781
782 pst = dm_get_path_selector(dm_shift_arg(as));
783 if (!pst) {
784 ti->error = "unknown path selector type";
785 return -EINVAL;
786 }
787
788 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
789 if (r) {
790 dm_put_path_selector(pst);
791 return -EINVAL;
792 }
793
794 r = pst->create(&pg->ps, ps_argc, as->argv);
795 if (r) {
796 dm_put_path_selector(pst);
797 ti->error = "path selector constructor failed";
798 return r;
799 }
800
801 pg->ps.type = pst;
802 dm_consume_args(as, ps_argc);
803
804 return 0;
805}
806
807static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
808 const char *attached_handler_name, char **error)
809{
810 struct request_queue *q = bdev_get_queue(bdev);
811 int r;
812
813 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
814retain:
815 if (attached_handler_name) {
816 /*
817 * Clear any hw_handler_params associated with a
818 * handler that isn't already attached.
819 */
820 if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
821 kfree(m->hw_handler_params);
822 m->hw_handler_params = NULL;
823 }
824
825 /*
826 * Reset hw_handler_name to match the attached handler
827 *
828 * NB. This modifies the table line to show the actual
829 * handler instead of the original table passed in.
830 */
831 kfree(m->hw_handler_name);
832 m->hw_handler_name = attached_handler_name;
833 }
834 }
835
836 if (m->hw_handler_name) {
837 r = scsi_dh_attach(q, m->hw_handler_name);
838 if (r == -EBUSY) {
839 char b[BDEVNAME_SIZE];
840
841 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
842 bdevname(bdev, b));
843 goto retain;
844 }
845 if (r < 0) {
846 *error = "error attaching hardware handler";
847 return r;
848 }
849
850 if (m->hw_handler_params) {
851 r = scsi_dh_set_params(q, m->hw_handler_params);
852 if (r < 0) {
853 *error = "unable to set hardware handler parameters";
854 return r;
855 }
856 }
857 }
858
859 return 0;
860}
861
862static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
863 struct dm_target *ti)
864{
865 int r;
866 struct pgpath *p;
867 struct multipath *m = ti->private;
868 struct request_queue *q;
869 const char *attached_handler_name;
870
871 /* we need at least a path arg */
872 if (as->argc < 1) {
873 ti->error = "no device given";
874 return ERR_PTR(-EINVAL);
875 }
876
877 p = alloc_pgpath();
878 if (!p)
879 return ERR_PTR(-ENOMEM);
880
881 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
882 &p->path.dev);
883 if (r) {
884 ti->error = "error getting device";
885 goto bad;
886 }
887
888 q = bdev_get_queue(p->path.dev->bdev);
889 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
890 if (attached_handler_name || m->hw_handler_name) {
891 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
892 r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
893 if (r) {
894 dm_put_device(ti, p->path.dev);
895 goto bad;
896 }
897 }
898
899 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
900 if (r) {
901 dm_put_device(ti, p->path.dev);
902 goto bad;
903 }
904
905 return p;
906 bad:
907 free_pgpath(p);
908 return ERR_PTR(r);
909}
910
911static struct priority_group *parse_priority_group(struct dm_arg_set *as,
912 struct multipath *m)
913{
914 static const struct dm_arg _args[] = {
915 {1, 1024, "invalid number of paths"},
916 {0, 1024, "invalid number of selector args"}
917 };
918
919 int r;
920 unsigned i, nr_selector_args, nr_args;
921 struct priority_group *pg;
922 struct dm_target *ti = m->ti;
923
924 if (as->argc < 2) {
925 as->argc = 0;
926 ti->error = "not enough priority group arguments";
927 return ERR_PTR(-EINVAL);
928 }
929
930 pg = alloc_priority_group();
931 if (!pg) {
932 ti->error = "couldn't allocate priority group";
933 return ERR_PTR(-ENOMEM);
934 }
935 pg->m = m;
936
937 r = parse_path_selector(as, pg, ti);
938 if (r)
939 goto bad;
940
941 /*
942 * read the paths
943 */
944 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
945 if (r)
946 goto bad;
947
948 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
949 if (r)
950 goto bad;
951
952 nr_args = 1 + nr_selector_args;
953 for (i = 0; i < pg->nr_pgpaths; i++) {
954 struct pgpath *pgpath;
955 struct dm_arg_set path_args;
956
957 if (as->argc < nr_args) {
958 ti->error = "not enough path parameters";
959 r = -EINVAL;
960 goto bad;
961 }
962
963 path_args.argc = nr_args;
964 path_args.argv = as->argv;
965
966 pgpath = parse_path(&path_args, &pg->ps, ti);
967 if (IS_ERR(pgpath)) {
968 r = PTR_ERR(pgpath);
969 goto bad;
970 }
971
972 pgpath->pg = pg;
973 list_add_tail(&pgpath->list, &pg->pgpaths);
974 dm_consume_args(as, nr_args);
975 }
976
977 return pg;
978
979 bad:
980 free_priority_group(pg, ti);
981 return ERR_PTR(r);
982}
983
984static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
985{
986 unsigned hw_argc;
987 int ret;
988 struct dm_target *ti = m->ti;
989
990 static const struct dm_arg _args[] = {
991 {0, 1024, "invalid number of hardware handler args"},
992 };
993
994 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
995 return -EINVAL;
996
997 if (!hw_argc)
998 return 0;
999
1000 if (m->queue_mode == DM_TYPE_BIO_BASED) {
1001 dm_consume_args(as, hw_argc);
1002 DMERR("bio-based multipath doesn't allow hardware handler args");
1003 return 0;
1004 }
1005
1006 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1007 if (!m->hw_handler_name)
1008 return -EINVAL;
1009
1010 if (hw_argc > 1) {
1011 char *p;
1012 int i, j, len = 4;
1013
1014 for (i = 0; i <= hw_argc - 2; i++)
1015 len += strlen(as->argv[i]) + 1;
1016 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1017 if (!p) {
1018 ti->error = "memory allocation failed";
1019 ret = -ENOMEM;
1020 goto fail;
1021 }
1022 j = sprintf(p, "%d", hw_argc - 1);
1023 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1024 j = sprintf(p, "%s", as->argv[i]);
1025 }
1026 dm_consume_args(as, hw_argc - 1);
1027
1028 return 0;
1029fail:
1030 kfree(m->hw_handler_name);
1031 m->hw_handler_name = NULL;
1032 return ret;
1033}
1034
1035static int parse_features(struct dm_arg_set *as, struct multipath *m)
1036{
1037 int r;
1038 unsigned argc;
1039 struct dm_target *ti = m->ti;
1040 const char *arg_name;
1041
1042 static const struct dm_arg _args[] = {
1043 {0, 8, "invalid number of feature args"},
1044 {1, 50, "pg_init_retries must be between 1 and 50"},
1045 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1046 };
1047
1048 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1049 if (r)
1050 return -EINVAL;
1051
1052 if (!argc)
1053 return 0;
1054
1055 do {
1056 arg_name = dm_shift_arg(as);
1057 argc--;
1058
1059 if (!strcasecmp(arg_name, "queue_if_no_path")) {
1060 r = queue_if_no_path(m, true, false);
1061 continue;
1062 }
1063
1064 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1065 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1066 continue;
1067 }
1068
1069 if (!strcasecmp(arg_name, "pg_init_retries") &&
1070 (argc >= 1)) {
1071 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1072 argc--;
1073 continue;
1074 }
1075
1076 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1077 (argc >= 1)) {
1078 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1079 argc--;
1080 continue;
1081 }
1082
1083 if (!strcasecmp(arg_name, "queue_mode") &&
1084 (argc >= 1)) {
1085 const char *queue_mode_name = dm_shift_arg(as);
1086
1087 if (!strcasecmp(queue_mode_name, "bio"))
1088 m->queue_mode = DM_TYPE_BIO_BASED;
1089 else if (!strcasecmp(queue_mode_name, "rq"))
1090 m->queue_mode = DM_TYPE_REQUEST_BASED;
1091 else if (!strcasecmp(queue_mode_name, "mq"))
1092 m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1093 else {
1094 ti->error = "Unknown 'queue_mode' requested";
1095 r = -EINVAL;
1096 }
1097 argc--;
1098 continue;
1099 }
1100
1101 ti->error = "Unrecognised multipath feature request";
1102 r = -EINVAL;
1103 } while (argc && !r);
1104
1105 return r;
1106}
1107
1108static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1109{
1110 /* target arguments */
1111 static const struct dm_arg _args[] = {
1112 {0, 1024, "invalid number of priority groups"},
1113 {0, 1024, "invalid initial priority group number"},
1114 };
1115
1116 int r;
1117 struct multipath *m;
1118 struct dm_arg_set as;
1119 unsigned pg_count = 0;
1120 unsigned next_pg_num;
1121
1122 as.argc = argc;
1123 as.argv = argv;
1124
1125 m = alloc_multipath(ti);
1126 if (!m) {
1127 ti->error = "can't allocate multipath";
1128 return -EINVAL;
1129 }
1130
1131 r = parse_features(&as, m);
1132 if (r)
1133 goto bad;
1134
1135 r = alloc_multipath_stage2(ti, m);
1136 if (r)
1137 goto bad;
1138
1139 r = parse_hw_handler(&as, m);
1140 if (r)
1141 goto bad;
1142
1143 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1144 if (r)
1145 goto bad;
1146
1147 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1148 if (r)
1149 goto bad;
1150
1151 if ((!m->nr_priority_groups && next_pg_num) ||
1152 (m->nr_priority_groups && !next_pg_num)) {
1153 ti->error = "invalid initial priority group";
1154 r = -EINVAL;
1155 goto bad;
1156 }
1157
1158 /* parse the priority groups */
1159 while (as.argc) {
1160 struct priority_group *pg;
1161 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1162
1163 pg = parse_priority_group(&as, m);
1164 if (IS_ERR(pg)) {
1165 r = PTR_ERR(pg);
1166 goto bad;
1167 }
1168
1169 nr_valid_paths += pg->nr_pgpaths;
1170 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1171
1172 list_add_tail(&pg->list, &m->priority_groups);
1173 pg_count++;
1174 pg->pg_num = pg_count;
1175 if (!--next_pg_num)
1176 m->next_pg = pg;
1177 }
1178
1179 if (pg_count != m->nr_priority_groups) {
1180 ti->error = "priority group count mismatch";
1181 r = -EINVAL;
1182 goto bad;
1183 }
1184
1185 ti->num_flush_bios = 1;
1186 ti->num_discard_bios = 1;
1187 ti->num_write_same_bios = 1;
1188 ti->num_write_zeroes_bios = 1;
1189 if (m->queue_mode == DM_TYPE_BIO_BASED)
1190 ti->per_io_data_size = multipath_per_bio_data_size();
1191 else
1192 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1193
1194 return 0;
1195
1196 bad:
1197 free_multipath(m);
1198 return r;
1199}
1200
1201static void multipath_wait_for_pg_init_completion(struct multipath *m)
1202{
1203 DEFINE_WAIT(wait);
1204
1205 while (1) {
1206 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1207
1208 if (!atomic_read(&m->pg_init_in_progress))
1209 break;
1210
1211 io_schedule();
1212 }
1213 finish_wait(&m->pg_init_wait, &wait);
1214}
1215
1216static void flush_multipath_work(struct multipath *m)
1217{
1218 if (m->hw_handler_name) {
1219 set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1220 smp_mb__after_atomic();
1221
1222 flush_workqueue(kmpath_handlerd);
1223 multipath_wait_for_pg_init_completion(m);
1224
1225 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1226 smp_mb__after_atomic();
1227 }
1228
1229 flush_workqueue(kmultipathd);
1230 flush_work(&m->trigger_event);
1231}
1232
1233static void multipath_dtr(struct dm_target *ti)
1234{
1235 struct multipath *m = ti->private;
1236
1237 flush_multipath_work(m);
1238 free_multipath(m);
1239}
1240
1241/*
1242 * Take a path out of use.
1243 */
1244static int fail_path(struct pgpath *pgpath)
1245{
1246 unsigned long flags;
1247 struct multipath *m = pgpath->pg->m;
1248
1249 spin_lock_irqsave(&m->lock, flags);
1250
1251 if (!pgpath->is_active)
1252 goto out;
1253
1254 DMWARN("Failing path %s.", pgpath->path.dev->name);
1255
1256 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1257 pgpath->is_active = false;
1258 pgpath->fail_count++;
1259
1260 atomic_dec(&m->nr_valid_paths);
1261
1262 if (pgpath == m->current_pgpath)
1263 m->current_pgpath = NULL;
1264
1265 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1266 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1267
1268 schedule_work(&m->trigger_event);
1269
1270out:
1271 spin_unlock_irqrestore(&m->lock, flags);
1272
1273 return 0;
1274}
1275
1276/*
1277 * Reinstate a previously-failed path
1278 */
1279static int reinstate_path(struct pgpath *pgpath)
1280{
1281 int r = 0, run_queue = 0;
1282 unsigned long flags;
1283 struct multipath *m = pgpath->pg->m;
1284 unsigned nr_valid_paths;
1285
1286 spin_lock_irqsave(&m->lock, flags);
1287
1288 if (pgpath->is_active)
1289 goto out;
1290
1291 DMWARN("Reinstating path %s.", pgpath->path.dev->name);
1292
1293 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1294 if (r)
1295 goto out;
1296
1297 pgpath->is_active = true;
1298
1299 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1300 if (nr_valid_paths == 1) {
1301 m->current_pgpath = NULL;
1302 run_queue = 1;
1303 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1304 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1305 atomic_inc(&m->pg_init_in_progress);
1306 }
1307
1308 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1309 pgpath->path.dev->name, nr_valid_paths);
1310
1311 schedule_work(&m->trigger_event);
1312
1313out:
1314 spin_unlock_irqrestore(&m->lock, flags);
1315 if (run_queue) {
1316 dm_table_run_md_queue_async(m->ti->table);
1317 process_queued_io_list(m);
1318 }
1319
1320 return r;
1321}
1322
1323/*
1324 * Fail or reinstate all paths that match the provided struct dm_dev.
1325 */
1326static int action_dev(struct multipath *m, struct dm_dev *dev,
1327 action_fn action)
1328{
1329 int r = -EINVAL;
1330 struct pgpath *pgpath;
1331 struct priority_group *pg;
1332
1333 list_for_each_entry(pg, &m->priority_groups, list) {
1334 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1335 if (pgpath->path.dev == dev)
1336 r = action(pgpath);
1337 }
1338 }
1339
1340 return r;
1341}
1342
1343/*
1344 * Temporarily try to avoid having to use the specified PG
1345 */
1346static void bypass_pg(struct multipath *m, struct priority_group *pg,
1347 bool bypassed)
1348{
1349 unsigned long flags;
1350
1351 spin_lock_irqsave(&m->lock, flags);
1352
1353 pg->bypassed = bypassed;
1354 m->current_pgpath = NULL;
1355 m->current_pg = NULL;
1356
1357 spin_unlock_irqrestore(&m->lock, flags);
1358
1359 schedule_work(&m->trigger_event);
1360}
1361
1362/*
1363 * Switch to using the specified PG from the next I/O that gets mapped
1364 */
1365static int switch_pg_num(struct multipath *m, const char *pgstr)
1366{
1367 struct priority_group *pg;
1368 unsigned pgnum;
1369 unsigned long flags;
1370 char dummy;
1371
1372 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1373 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1374 DMWARN("invalid PG number supplied to switch_pg_num");
1375 return -EINVAL;
1376 }
1377
1378 spin_lock_irqsave(&m->lock, flags);
1379 list_for_each_entry(pg, &m->priority_groups, list) {
1380 pg->bypassed = false;
1381 if (--pgnum)
1382 continue;
1383
1384 m->current_pgpath = NULL;
1385 m->current_pg = NULL;
1386 m->next_pg = pg;
1387 }
1388 spin_unlock_irqrestore(&m->lock, flags);
1389
1390 schedule_work(&m->trigger_event);
1391 return 0;
1392}
1393
1394/*
1395 * Set/clear bypassed status of a PG.
1396 * PGs are numbered upwards from 1 in the order they were declared.
1397 */
1398static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1399{
1400 struct priority_group *pg;
1401 unsigned pgnum;
1402 char dummy;
1403
1404 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1405 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1406 DMWARN("invalid PG number supplied to bypass_pg");
1407 return -EINVAL;
1408 }
1409
1410 list_for_each_entry(pg, &m->priority_groups, list) {
1411 if (!--pgnum)
1412 break;
1413 }
1414
1415 bypass_pg(m, pg, bypassed);
1416 return 0;
1417}
1418
1419/*
1420 * Should we retry pg_init immediately?
1421 */
1422static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1423{
1424 unsigned long flags;
1425 bool limit_reached = false;
1426
1427 spin_lock_irqsave(&m->lock, flags);
1428
1429 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1430 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1431 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1432 else
1433 limit_reached = true;
1434
1435 spin_unlock_irqrestore(&m->lock, flags);
1436
1437 return limit_reached;
1438}
1439
1440static void pg_init_done(void *data, int errors)
1441{
1442 struct pgpath *pgpath = data;
1443 struct priority_group *pg = pgpath->pg;
1444 struct multipath *m = pg->m;
1445 unsigned long flags;
1446 bool delay_retry = false;
1447
1448 /* device or driver problems */
1449 switch (errors) {
1450 case SCSI_DH_OK:
1451 break;
1452 case SCSI_DH_NOSYS:
1453 if (!m->hw_handler_name) {
1454 errors = 0;
1455 break;
1456 }
1457 DMERR("Could not failover the device: Handler scsi_dh_%s "
1458 "Error %d.", m->hw_handler_name, errors);
1459 /*
1460 * Fail path for now, so we do not ping pong
1461 */
1462 fail_path(pgpath);
1463 break;
1464 case SCSI_DH_DEV_TEMP_BUSY:
1465 /*
1466 * Probably doing something like FW upgrade on the
1467 * controller so try the other pg.
1468 */
1469 bypass_pg(m, pg, true);
1470 break;
1471 case SCSI_DH_RETRY:
1472 /* Wait before retrying. */
1473 delay_retry = 1;
1474 /* fall through */
1475 case SCSI_DH_IMM_RETRY:
1476 case SCSI_DH_RES_TEMP_UNAVAIL:
1477 if (pg_init_limit_reached(m, pgpath))
1478 fail_path(pgpath);
1479 errors = 0;
1480 break;
1481 case SCSI_DH_DEV_OFFLINED:
1482 default:
1483 /*
1484 * We probably do not want to fail the path for a device
1485 * error, but this is what the old dm did. In future
1486 * patches we can do more advanced handling.
1487 */
1488 fail_path(pgpath);
1489 }
1490
1491 spin_lock_irqsave(&m->lock, flags);
1492 if (errors) {
1493 if (pgpath == m->current_pgpath) {
1494 DMERR("Could not failover device. Error %d.", errors);
1495 m->current_pgpath = NULL;
1496 m->current_pg = NULL;
1497 }
1498 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1499 pg->bypassed = false;
1500
1501 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1502 /* Activations of other paths are still on going */
1503 goto out;
1504
1505 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1506 if (delay_retry)
1507 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1508 else
1509 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1510
1511 if (__pg_init_all_paths(m))
1512 goto out;
1513 }
1514 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1515
1516 process_queued_io_list(m);
1517
1518 /*
1519 * Wake up any thread waiting to suspend.
1520 */
1521 wake_up(&m->pg_init_wait);
1522
1523out:
1524 spin_unlock_irqrestore(&m->lock, flags);
1525}
1526
1527static void activate_or_offline_path(struct pgpath *pgpath)
1528{
1529 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1530
1531 if (pgpath->is_active && !blk_queue_dying(q))
1532 scsi_dh_activate(q, pg_init_done, pgpath);
1533 else
1534 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1535}
1536
1537static void activate_path_work(struct work_struct *work)
1538{
1539 struct pgpath *pgpath =
1540 container_of(work, struct pgpath, activate_path.work);
1541
1542 activate_or_offline_path(pgpath);
1543}
1544
1545static int multipath_end_io(struct dm_target *ti, struct request *clone,
1546 blk_status_t error, union map_info *map_context)
1547{
1548 struct dm_mpath_io *mpio = get_mpio(map_context);
1549 struct pgpath *pgpath = mpio->pgpath;
1550 int r = DM_ENDIO_DONE;
1551
1552 /*
1553 * We don't queue any clone request inside the multipath target
1554 * during end I/O handling, since those clone requests don't have
1555 * bio clones. If we queue them inside the multipath target,
1556 * we need to make bio clones, that requires memory allocation.
1557 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1558 * don't have bio clones.)
1559 * Instead of queueing the clone request here, we queue the original
1560 * request into dm core, which will remake a clone request and
1561 * clone bios for it and resubmit it later.
1562 */
1563 if (error && blk_path_error(error)) {
1564 struct multipath *m = ti->private;
1565
1566 if (error == BLK_STS_RESOURCE)
1567 r = DM_ENDIO_DELAY_REQUEUE;
1568 else
1569 r = DM_ENDIO_REQUEUE;
1570
1571 if (pgpath)
1572 fail_path(pgpath);
1573
1574 if (atomic_read(&m->nr_valid_paths) == 0 &&
1575 !must_push_back_rq(m)) {
1576 if (error == BLK_STS_IOERR)
1577 dm_report_EIO(m);
1578 /* complete with the original error */
1579 r = DM_ENDIO_DONE;
1580 }
1581 }
1582
1583 if (pgpath) {
1584 struct path_selector *ps = &pgpath->pg->ps;
1585
1586 if (ps->type->end_io)
1587 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1588 }
1589
1590 return r;
1591}
1592
1593static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1594 blk_status_t *error)
1595{
1596 struct multipath *m = ti->private;
1597 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1598 struct pgpath *pgpath = mpio->pgpath;
1599 unsigned long flags;
1600 int r = DM_ENDIO_DONE;
1601
1602 if (!*error || !blk_path_error(*error))
1603 goto done;
1604
1605 if (pgpath)
1606 fail_path(pgpath);
1607
1608 if (atomic_read(&m->nr_valid_paths) == 0 &&
1609 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1610 if (must_push_back_bio(m)) {
1611 r = DM_ENDIO_REQUEUE;
1612 } else {
1613 dm_report_EIO(m);
1614 *error = BLK_STS_IOERR;
1615 }
1616 goto done;
1617 }
1618
1619 spin_lock_irqsave(&m->lock, flags);
1620 bio_list_add(&m->queued_bios, clone);
1621 spin_unlock_irqrestore(&m->lock, flags);
1622 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1623 queue_work(kmultipathd, &m->process_queued_bios);
1624
1625 r = DM_ENDIO_INCOMPLETE;
1626done:
1627 if (pgpath) {
1628 struct path_selector *ps = &pgpath->pg->ps;
1629
1630 if (ps->type->end_io)
1631 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1632 }
1633
1634 return r;
1635}
1636
1637/*
1638 * Suspend can't complete until all the I/O is processed so if
1639 * the last path fails we must error any remaining I/O.
1640 * Note that if the freeze_bdev fails while suspending, the
1641 * queue_if_no_path state is lost - userspace should reset it.
1642 */
1643static void multipath_presuspend(struct dm_target *ti)
1644{
1645 struct multipath *m = ti->private;
1646
1647 queue_if_no_path(m, false, true);
1648}
1649
1650static void multipath_postsuspend(struct dm_target *ti)
1651{
1652 struct multipath *m = ti->private;
1653
1654 mutex_lock(&m->work_mutex);
1655 flush_multipath_work(m);
1656 mutex_unlock(&m->work_mutex);
1657}
1658
1659/*
1660 * Restore the queue_if_no_path setting.
1661 */
1662static void multipath_resume(struct dm_target *ti)
1663{
1664 struct multipath *m = ti->private;
1665 unsigned long flags;
1666
1667 spin_lock_irqsave(&m->lock, flags);
1668 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
1669 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1670 spin_unlock_irqrestore(&m->lock, flags);
1671}
1672
1673/*
1674 * Info output has the following format:
1675 * num_multipath_feature_args [multipath_feature_args]*
1676 * num_handler_status_args [handler_status_args]*
1677 * num_groups init_group_number
1678 * [A|D|E num_ps_status_args [ps_status_args]*
1679 * num_paths num_selector_args
1680 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1681 *
1682 * Table output has the following format (identical to the constructor string):
1683 * num_feature_args [features_args]*
1684 * num_handler_args hw_handler [hw_handler_args]*
1685 * num_groups init_group_number
1686 * [priority selector-name num_ps_args [ps_args]*
1687 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1688 */
1689static void multipath_status(struct dm_target *ti, status_type_t type,
1690 unsigned status_flags, char *result, unsigned maxlen)
1691{
1692 int sz = 0;
1693 unsigned long flags;
1694 struct multipath *m = ti->private;
1695 struct priority_group *pg;
1696 struct pgpath *p;
1697 unsigned pg_num;
1698 char state;
1699
1700 spin_lock_irqsave(&m->lock, flags);
1701
1702 /* Features */
1703 if (type == STATUSTYPE_INFO)
1704 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1705 atomic_read(&m->pg_init_count));
1706 else {
1707 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1708 (m->pg_init_retries > 0) * 2 +
1709 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1710 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1711 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1712
1713 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1714 DMEMIT("queue_if_no_path ");
1715 if (m->pg_init_retries)
1716 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1717 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1718 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1719 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1720 DMEMIT("retain_attached_hw_handler ");
1721 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1722 switch(m->queue_mode) {
1723 case DM_TYPE_BIO_BASED:
1724 DMEMIT("queue_mode bio ");
1725 break;
1726 case DM_TYPE_MQ_REQUEST_BASED:
1727 DMEMIT("queue_mode mq ");
1728 break;
1729 default:
1730 WARN_ON_ONCE(true);
1731 break;
1732 }
1733 }
1734 }
1735
1736 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1737 DMEMIT("0 ");
1738 else
1739 DMEMIT("1 %s ", m->hw_handler_name);
1740
1741 DMEMIT("%u ", m->nr_priority_groups);
1742
1743 if (m->next_pg)
1744 pg_num = m->next_pg->pg_num;
1745 else if (m->current_pg)
1746 pg_num = m->current_pg->pg_num;
1747 else
1748 pg_num = (m->nr_priority_groups ? 1 : 0);
1749
1750 DMEMIT("%u ", pg_num);
1751
1752 switch (type) {
1753 case STATUSTYPE_INFO:
1754 list_for_each_entry(pg, &m->priority_groups, list) {
1755 if (pg->bypassed)
1756 state = 'D'; /* Disabled */
1757 else if (pg == m->current_pg)
1758 state = 'A'; /* Currently Active */
1759 else
1760 state = 'E'; /* Enabled */
1761
1762 DMEMIT("%c ", state);
1763
1764 if (pg->ps.type->status)
1765 sz += pg->ps.type->status(&pg->ps, NULL, type,
1766 result + sz,
1767 maxlen - sz);
1768 else
1769 DMEMIT("0 ");
1770
1771 DMEMIT("%u %u ", pg->nr_pgpaths,
1772 pg->ps.type->info_args);
1773
1774 list_for_each_entry(p, &pg->pgpaths, list) {
1775 DMEMIT("%s %s %u ", p->path.dev->name,
1776 p->is_active ? "A" : "F",
1777 p->fail_count);
1778 if (pg->ps.type->status)
1779 sz += pg->ps.type->status(&pg->ps,
1780 &p->path, type, result + sz,
1781 maxlen - sz);
1782 }
1783 }
1784 break;
1785
1786 case STATUSTYPE_TABLE:
1787 list_for_each_entry(pg, &m->priority_groups, list) {
1788 DMEMIT("%s ", pg->ps.type->name);
1789
1790 if (pg->ps.type->status)
1791 sz += pg->ps.type->status(&pg->ps, NULL, type,
1792 result + sz,
1793 maxlen - sz);
1794 else
1795 DMEMIT("0 ");
1796
1797 DMEMIT("%u %u ", pg->nr_pgpaths,
1798 pg->ps.type->table_args);
1799
1800 list_for_each_entry(p, &pg->pgpaths, list) {
1801 DMEMIT("%s ", p->path.dev->name);
1802 if (pg->ps.type->status)
1803 sz += pg->ps.type->status(&pg->ps,
1804 &p->path, type, result + sz,
1805 maxlen - sz);
1806 }
1807 }
1808 break;
1809 }
1810
1811 spin_unlock_irqrestore(&m->lock, flags);
1812}
1813
1814static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
1815 char *result, unsigned maxlen)
1816{
1817 int r = -EINVAL;
1818 struct dm_dev *dev;
1819 struct multipath *m = ti->private;
1820 action_fn action;
1821
1822 mutex_lock(&m->work_mutex);
1823
1824 if (dm_suspended(ti)) {
1825 r = -EBUSY;
1826 goto out;
1827 }
1828
1829 if (argc == 1) {
1830 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1831 r = queue_if_no_path(m, true, false);
1832 goto out;
1833 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1834 r = queue_if_no_path(m, false, false);
1835 goto out;
1836 }
1837 }
1838
1839 if (argc != 2) {
1840 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1841 goto out;
1842 }
1843
1844 if (!strcasecmp(argv[0], "disable_group")) {
1845 r = bypass_pg_num(m, argv[1], true);
1846 goto out;
1847 } else if (!strcasecmp(argv[0], "enable_group")) {
1848 r = bypass_pg_num(m, argv[1], false);
1849 goto out;
1850 } else if (!strcasecmp(argv[0], "switch_group")) {
1851 r = switch_pg_num(m, argv[1]);
1852 goto out;
1853 } else if (!strcasecmp(argv[0], "reinstate_path"))
1854 action = reinstate_path;
1855 else if (!strcasecmp(argv[0], "fail_path"))
1856 action = fail_path;
1857 else {
1858 DMWARN("Unrecognised multipath message received: %s", argv[0]);
1859 goto out;
1860 }
1861
1862 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1863 if (r) {
1864 DMWARN("message: error getting device %s",
1865 argv[1]);
1866 goto out;
1867 }
1868
1869 r = action_dev(m, dev, action);
1870
1871 dm_put_device(ti, dev);
1872
1873out:
1874 mutex_unlock(&m->work_mutex);
1875 return r;
1876}
1877
1878static int multipath_prepare_ioctl(struct dm_target *ti,
1879 struct block_device **bdev)
1880{
1881 struct multipath *m = ti->private;
1882 struct pgpath *current_pgpath;
1883 int r;
1884
1885 current_pgpath = READ_ONCE(m->current_pgpath);
1886 if (!current_pgpath)
1887 current_pgpath = choose_pgpath(m, 0);
1888
1889 if (current_pgpath) {
1890 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1891 *bdev = current_pgpath->path.dev->bdev;
1892 r = 0;
1893 } else {
1894 /* pg_init has not started or completed */
1895 r = -ENOTCONN;
1896 }
1897 } else {
1898 /* No path is available */
1899 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1900 r = -ENOTCONN;
1901 else
1902 r = -EIO;
1903 }
1904
1905 if (r == -ENOTCONN) {
1906 if (!READ_ONCE(m->current_pg)) {
1907 /* Path status changed, redo selection */
1908 (void) choose_pgpath(m, 0);
1909 }
1910 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1911 pg_init_all_paths(m);
1912 dm_table_run_md_queue_async(m->ti->table);
1913 process_queued_io_list(m);
1914 }
1915
1916 /*
1917 * Only pass ioctls through if the device sizes match exactly.
1918 */
1919 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1920 return 1;
1921 return r;
1922}
1923
1924static int multipath_iterate_devices(struct dm_target *ti,
1925 iterate_devices_callout_fn fn, void *data)
1926{
1927 struct multipath *m = ti->private;
1928 struct priority_group *pg;
1929 struct pgpath *p;
1930 int ret = 0;
1931
1932 list_for_each_entry(pg, &m->priority_groups, list) {
1933 list_for_each_entry(p, &pg->pgpaths, list) {
1934 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1935 if (ret)
1936 goto out;
1937 }
1938 }
1939
1940out:
1941 return ret;
1942}
1943
1944static int pgpath_busy(struct pgpath *pgpath)
1945{
1946 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1947
1948 return blk_lld_busy(q);
1949}
1950
1951/*
1952 * We return "busy", only when we can map I/Os but underlying devices
1953 * are busy (so even if we map I/Os now, the I/Os will wait on
1954 * the underlying queue).
1955 * In other words, if we want to kill I/Os or queue them inside us
1956 * due to map unavailability, we don't return "busy". Otherwise,
1957 * dm core won't give us the I/Os and we can't do what we want.
1958 */
1959static int multipath_busy(struct dm_target *ti)
1960{
1961 bool busy = false, has_active = false;
1962 struct multipath *m = ti->private;
1963 struct priority_group *pg, *next_pg;
1964 struct pgpath *pgpath;
1965
1966 /* pg_init in progress */
1967 if (atomic_read(&m->pg_init_in_progress))
1968 return true;
1969
1970 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
1971 if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1972 return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
1973
1974 /* Guess which priority_group will be used at next mapping time */
1975 pg = READ_ONCE(m->current_pg);
1976 next_pg = READ_ONCE(m->next_pg);
1977 if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
1978 pg = next_pg;
1979
1980 if (!pg) {
1981 /*
1982 * We don't know which pg will be used at next mapping time.
1983 * We don't call choose_pgpath() here to avoid to trigger
1984 * pg_init just by busy checking.
1985 * So we don't know whether underlying devices we will be using
1986 * at next mapping time are busy or not. Just try mapping.
1987 */
1988 return busy;
1989 }
1990
1991 /*
1992 * If there is one non-busy active path at least, the path selector
1993 * will be able to select it. So we consider such a pg as not busy.
1994 */
1995 busy = true;
1996 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1997 if (pgpath->is_active) {
1998 has_active = true;
1999 if (!pgpath_busy(pgpath)) {
2000 busy = false;
2001 break;
2002 }
2003 }
2004 }
2005
2006 if (!has_active) {
2007 /*
2008 * No active path in this pg, so this pg won't be used and
2009 * the current_pg will be changed at next mapping time.
2010 * We need to try mapping to determine it.
2011 */
2012 busy = false;
2013 }
2014
2015 return busy;
2016}
2017
2018/*-----------------------------------------------------------------
2019 * Module setup
2020 *---------------------------------------------------------------*/
2021static struct target_type multipath_target = {
2022 .name = "multipath",
2023 .version = {1, 13, 0},
2024 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2025 DM_TARGET_PASSES_INTEGRITY,
2026 .module = THIS_MODULE,
2027 .ctr = multipath_ctr,
2028 .dtr = multipath_dtr,
2029 .clone_and_map_rq = multipath_clone_and_map,
2030 .release_clone_rq = multipath_release_clone,
2031 .rq_end_io = multipath_end_io,
2032 .map = multipath_map_bio,
2033 .end_io = multipath_end_io_bio,
2034 .presuspend = multipath_presuspend,
2035 .postsuspend = multipath_postsuspend,
2036 .resume = multipath_resume,
2037 .status = multipath_status,
2038 .message = multipath_message,
2039 .prepare_ioctl = multipath_prepare_ioctl,
2040 .iterate_devices = multipath_iterate_devices,
2041 .busy = multipath_busy,
2042};
2043
2044static int __init dm_multipath_init(void)
2045{
2046 int r;
2047
2048 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2049 if (!kmultipathd) {
2050 DMERR("failed to create workqueue kmpathd");
2051 r = -ENOMEM;
2052 goto bad_alloc_kmultipathd;
2053 }
2054
2055 /*
2056 * A separate workqueue is used to handle the device handlers
2057 * to avoid overloading existing workqueue. Overloading the
2058 * old workqueue would also create a bottleneck in the
2059 * path of the storage hardware device activation.
2060 */
2061 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2062 WQ_MEM_RECLAIM);
2063 if (!kmpath_handlerd) {
2064 DMERR("failed to create workqueue kmpath_handlerd");
2065 r = -ENOMEM;
2066 goto bad_alloc_kmpath_handlerd;
2067 }
2068
2069 r = dm_register_target(&multipath_target);
2070 if (r < 0) {
2071 DMERR("request-based register failed %d", r);
2072 r = -EINVAL;
2073 goto bad_register_target;
2074 }
2075
2076 return 0;
2077
2078bad_register_target:
2079 destroy_workqueue(kmpath_handlerd);
2080bad_alloc_kmpath_handlerd:
2081 destroy_workqueue(kmultipathd);
2082bad_alloc_kmultipathd:
2083 return r;
2084}
2085
2086static void __exit dm_multipath_exit(void)
2087{
2088 destroy_workqueue(kmpath_handlerd);
2089 destroy_workqueue(kmultipathd);
2090
2091 dm_unregister_target(&multipath_target);
2092}
2093
2094module_init(dm_multipath_init);
2095module_exit(dm_multipath_exit);
2096
2097MODULE_DESCRIPTION(DM_NAME " multipath target");
2098MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2099MODULE_LICENSE("GPL");