Linux Audio

Check our new training course

Loading...
v6.2
 
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9
  10#include "dm-rq.h"
  11#include "dm-bio-record.h"
  12#include "dm-path-selector.h"
  13#include "dm-uevent.h"
  14
  15#include <linux/blkdev.h>
  16#include <linux/ctype.h>
  17#include <linux/init.h>
  18#include <linux/mempool.h>
  19#include <linux/module.h>
  20#include <linux/pagemap.h>
  21#include <linux/slab.h>
  22#include <linux/time.h>
  23#include <linux/timer.h>
  24#include <linux/workqueue.h>
  25#include <linux/delay.h>
  26#include <scsi/scsi_dh.h>
  27#include <linux/atomic.h>
  28#include <linux/blk-mq.h>
  29
 
 
  30#define DM_MSG_PREFIX "multipath"
  31#define DM_PG_INIT_DELAY_MSECS 2000
  32#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  33#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
  34
  35static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
  36
  37/* Path properties */
  38struct pgpath {
  39	struct list_head list;
  40
  41	struct priority_group *pg;	/* Owning PG */
  42	unsigned fail_count;		/* Cumulative failure count */
  43
  44	struct dm_path path;
  45	struct delayed_work activate_path;
  46
  47	bool is_active:1;		/* Path status */
  48};
  49
  50#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  51
  52/*
  53 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  54 * Each has a path selector which controls which path gets used.
  55 */
  56struct priority_group {
  57	struct list_head list;
  58
  59	struct multipath *m;		/* Owning multipath instance */
  60	struct path_selector ps;
  61
  62	unsigned pg_num;		/* Reference number */
  63	unsigned nr_pgpaths;		/* Number of paths in PG */
  64	struct list_head pgpaths;
  65
  66	bool bypassed:1;		/* Temporarily bypass this PG? */
  67};
  68
  69/* Multipath context */
  70struct multipath {
  71	unsigned long flags;		/* Multipath state flags */
  72
  73	spinlock_t lock;
  74	enum dm_queue_mode queue_mode;
  75
  76	struct pgpath *current_pgpath;
  77	struct priority_group *current_pg;
  78	struct priority_group *next_pg;	/* Switch to this PG if set */
  79
  80	atomic_t nr_valid_paths;	/* Total number of usable paths */
  81	unsigned nr_priority_groups;
  82	struct list_head priority_groups;
  83
  84	const char *hw_handler_name;
  85	char *hw_handler_params;
  86	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  87	unsigned pg_init_retries;	/* Number of times to retry pg_init */
  88	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  89	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
  90	atomic_t pg_init_count;		/* Number of times pg_init called */
  91
  92	struct mutex work_mutex;
  93	struct work_struct trigger_event;
  94	struct dm_target *ti;
  95
  96	struct work_struct process_queued_bios;
  97	struct bio_list queued_bios;
  98
  99	struct timer_list nopath_timer;	/* Timeout for queue_if_no_path */
 100};
 101
 102/*
 103 * Context information attached to each io we process.
 104 */
 105struct dm_mpath_io {
 106	struct pgpath *pgpath;
 107	size_t nr_bytes;
 108	u64 start_time_ns;
 109};
 110
 111typedef int (*action_fn) (struct pgpath *pgpath);
 112
 113static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 114static void trigger_event(struct work_struct *work);
 115static void activate_or_offline_path(struct pgpath *pgpath);
 116static void activate_path_work(struct work_struct *work);
 117static void process_queued_bios(struct work_struct *work);
 118static void queue_if_no_path_timeout_work(struct timer_list *t);
 119
 120/*-----------------------------------------------
 
 121 * Multipath state flags.
 122 *-----------------------------------------------*/
 123
 124#define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
 125#define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
 126#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
 127#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
 128#define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
 129#define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
 130#define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
 131
 132static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
 133{
 134	bool r = test_bit(MPATHF_bit, &m->flags);
 135
 136	if (r) {
 137		unsigned long flags;
 
 138		spin_lock_irqsave(&m->lock, flags);
 139		r = test_bit(MPATHF_bit, &m->flags);
 140		spin_unlock_irqrestore(&m->lock, flags);
 141	}
 142
 143	return r;
 144}
 145
 146/*-----------------------------------------------
 
 147 * Allocation routines
 148 *-----------------------------------------------*/
 149
 150static struct pgpath *alloc_pgpath(void)
 151{
 152	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 153
 154	if (!pgpath)
 155		return NULL;
 156
 157	pgpath->is_active = true;
 158
 159	return pgpath;
 160}
 161
 162static void free_pgpath(struct pgpath *pgpath)
 163{
 164	kfree(pgpath);
 165}
 166
 167static struct priority_group *alloc_priority_group(void)
 168{
 169	struct priority_group *pg;
 170
 171	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 172
 173	if (pg)
 174		INIT_LIST_HEAD(&pg->pgpaths);
 175
 176	return pg;
 177}
 178
 179static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 180{
 181	struct pgpath *pgpath, *tmp;
 182
 183	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 184		list_del(&pgpath->list);
 185		dm_put_device(ti, pgpath->path.dev);
 186		free_pgpath(pgpath);
 187	}
 188}
 189
 190static void free_priority_group(struct priority_group *pg,
 191				struct dm_target *ti)
 192{
 193	struct path_selector *ps = &pg->ps;
 194
 195	if (ps->type) {
 196		ps->type->destroy(ps);
 197		dm_put_path_selector(ps->type);
 198	}
 199
 200	free_pgpaths(&pg->pgpaths, ti);
 201	kfree(pg);
 202}
 203
 204static struct multipath *alloc_multipath(struct dm_target *ti)
 205{
 206	struct multipath *m;
 207
 208	m = kzalloc(sizeof(*m), GFP_KERNEL);
 209	if (m) {
 210		INIT_LIST_HEAD(&m->priority_groups);
 211		spin_lock_init(&m->lock);
 212		atomic_set(&m->nr_valid_paths, 0);
 213		INIT_WORK(&m->trigger_event, trigger_event);
 214		mutex_init(&m->work_mutex);
 215
 216		m->queue_mode = DM_TYPE_NONE;
 217
 218		m->ti = ti;
 219		ti->private = m;
 220
 221		timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
 222	}
 223
 224	return m;
 225}
 226
 227static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
 228{
 229	if (m->queue_mode == DM_TYPE_NONE) {
 230		m->queue_mode = DM_TYPE_REQUEST_BASED;
 231	} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
 232		INIT_WORK(&m->process_queued_bios, process_queued_bios);
 233		/*
 234		 * bio-based doesn't support any direct scsi_dh management;
 235		 * it just discovers if a scsi_dh is attached.
 236		 */
 237		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 238	}
 239
 240	dm_table_set_type(ti->table, m->queue_mode);
 241
 242	/*
 243	 * Init fields that are only used when a scsi_dh is attached
 244	 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
 245	 */
 246	set_bit(MPATHF_QUEUE_IO, &m->flags);
 247	atomic_set(&m->pg_init_in_progress, 0);
 248	atomic_set(&m->pg_init_count, 0);
 249	m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 250	init_waitqueue_head(&m->pg_init_wait);
 251
 252	return 0;
 253}
 254
 255static void free_multipath(struct multipath *m)
 256{
 257	struct priority_group *pg, *tmp;
 258
 259	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 260		list_del(&pg->list);
 261		free_priority_group(pg, m->ti);
 262	}
 263
 264	kfree(m->hw_handler_name);
 265	kfree(m->hw_handler_params);
 266	mutex_destroy(&m->work_mutex);
 267	kfree(m);
 268}
 269
 270static struct dm_mpath_io *get_mpio(union map_info *info)
 271{
 272	return info->ptr;
 273}
 274
 275static size_t multipath_per_bio_data_size(void)
 276{
 277	return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
 278}
 279
 280static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
 281{
 282	return dm_per_bio_data(bio, multipath_per_bio_data_size());
 283}
 284
 285static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
 286{
 287	/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
 288	void *bio_details = mpio + 1;
 289	return bio_details;
 290}
 291
 292static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
 293{
 294	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 295	struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
 296
 297	mpio->nr_bytes = bio->bi_iter.bi_size;
 298	mpio->pgpath = NULL;
 299	mpio->start_time_ns = 0;
 300	*mpio_p = mpio;
 301
 302	dm_bio_record(bio_details, bio);
 303}
 304
 305/*-----------------------------------------------
 
 306 * Path selection
 307 *-----------------------------------------------*/
 308
 309static int __pg_init_all_paths(struct multipath *m)
 310{
 311	struct pgpath *pgpath;
 312	unsigned long pg_init_delay = 0;
 313
 314	lockdep_assert_held(&m->lock);
 315
 316	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 317		return 0;
 318
 319	atomic_inc(&m->pg_init_count);
 320	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 321
 322	/* Check here to reset pg_init_required */
 323	if (!m->current_pg)
 324		return 0;
 325
 326	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
 327		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 328						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 329	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 330		/* Skip failed paths */
 331		if (!pgpath->is_active)
 332			continue;
 333		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 334				       pg_init_delay))
 335			atomic_inc(&m->pg_init_in_progress);
 336	}
 337	return atomic_read(&m->pg_init_in_progress);
 338}
 339
 340static int pg_init_all_paths(struct multipath *m)
 341{
 342	int ret;
 343	unsigned long flags;
 344
 345	spin_lock_irqsave(&m->lock, flags);
 346	ret = __pg_init_all_paths(m);
 347	spin_unlock_irqrestore(&m->lock, flags);
 348
 349	return ret;
 350}
 351
 352static void __switch_pg(struct multipath *m, struct priority_group *pg)
 353{
 354	lockdep_assert_held(&m->lock);
 355
 356	m->current_pg = pg;
 357
 358	/* Must we initialise the PG first, and queue I/O till it's ready? */
 359	if (m->hw_handler_name) {
 360		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 361		set_bit(MPATHF_QUEUE_IO, &m->flags);
 362	} else {
 363		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 364		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 365	}
 366
 367	atomic_set(&m->pg_init_count, 0);
 368}
 369
 370static struct pgpath *choose_path_in_pg(struct multipath *m,
 371					struct priority_group *pg,
 372					size_t nr_bytes)
 373{
 374	unsigned long flags;
 375	struct dm_path *path;
 376	struct pgpath *pgpath;
 377
 378	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 379	if (!path)
 380		return ERR_PTR(-ENXIO);
 381
 382	pgpath = path_to_pgpath(path);
 383
 384	if (unlikely(READ_ONCE(m->current_pg) != pg)) {
 385		/* Only update current_pgpath if pg changed */
 386		spin_lock_irqsave(&m->lock, flags);
 387		m->current_pgpath = pgpath;
 388		__switch_pg(m, pg);
 389		spin_unlock_irqrestore(&m->lock, flags);
 390	}
 391
 392	return pgpath;
 393}
 394
 395static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 396{
 397	unsigned long flags;
 398	struct priority_group *pg;
 399	struct pgpath *pgpath;
 400	unsigned bypassed = 1;
 401
 402	if (!atomic_read(&m->nr_valid_paths)) {
 403		spin_lock_irqsave(&m->lock, flags);
 404		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 405		spin_unlock_irqrestore(&m->lock, flags);
 406		goto failed;
 407	}
 408
 409	/* Were we instructed to switch PG? */
 410	if (READ_ONCE(m->next_pg)) {
 411		spin_lock_irqsave(&m->lock, flags);
 412		pg = m->next_pg;
 413		if (!pg) {
 414			spin_unlock_irqrestore(&m->lock, flags);
 415			goto check_current_pg;
 416		}
 417		m->next_pg = NULL;
 418		spin_unlock_irqrestore(&m->lock, flags);
 419		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 420		if (!IS_ERR_OR_NULL(pgpath))
 421			return pgpath;
 422	}
 423
 424	/* Don't change PG until it has no remaining paths */
 425check_current_pg:
 426	pg = READ_ONCE(m->current_pg);
 427	if (pg) {
 428		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 429		if (!IS_ERR_OR_NULL(pgpath))
 430			return pgpath;
 431	}
 432
 433	/*
 434	 * Loop through priority groups until we find a valid path.
 435	 * First time we skip PGs marked 'bypassed'.
 436	 * Second time we only try the ones we skipped, but set
 437	 * pg_init_delay_retry so we do not hammer controllers.
 438	 */
 439	do {
 440		list_for_each_entry(pg, &m->priority_groups, list) {
 441			if (pg->bypassed == !!bypassed)
 442				continue;
 443			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 444			if (!IS_ERR_OR_NULL(pgpath)) {
 445				if (!bypassed) {
 446					spin_lock_irqsave(&m->lock, flags);
 447					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 448					spin_unlock_irqrestore(&m->lock, flags);
 449				}
 450				return pgpath;
 451			}
 452		}
 453	} while (bypassed--);
 454
 455failed:
 456	spin_lock_irqsave(&m->lock, flags);
 457	m->current_pgpath = NULL;
 458	m->current_pg = NULL;
 459	spin_unlock_irqrestore(&m->lock, flags);
 460
 461	return NULL;
 462}
 463
 464/*
 465 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
 466 * report the function name and line number of the function from which
 467 * it has been invoked.
 468 */
 469#define dm_report_EIO(m)						\
 470do {									\
 471	DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
 472		      dm_table_device_name((m)->ti->table),		\
 473		      test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags),	\
 474		      test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
 475		      dm_noflush_suspending((m)->ti));			\
 476} while (0)
 477
 478/*
 479 * Check whether bios must be queued in the device-mapper core rather
 480 * than here in the target.
 481 */
 482static bool __must_push_back(struct multipath *m)
 483{
 484	return dm_noflush_suspending(m->ti);
 485}
 486
 487static bool must_push_back_rq(struct multipath *m)
 488{
 489	unsigned long flags;
 490	bool ret;
 491
 492	spin_lock_irqsave(&m->lock, flags);
 493	ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m));
 494	spin_unlock_irqrestore(&m->lock, flags);
 495
 496	return ret;
 497}
 498
 499/*
 500 * Map cloned requests (request-based multipath)
 501 */
 502static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 503				   union map_info *map_context,
 504				   struct request **__clone)
 505{
 506	struct multipath *m = ti->private;
 507	size_t nr_bytes = blk_rq_bytes(rq);
 508	struct pgpath *pgpath;
 509	struct block_device *bdev;
 510	struct dm_mpath_io *mpio = get_mpio(map_context);
 511	struct request_queue *q;
 512	struct request *clone;
 513
 514	/* Do we need to select a new pgpath? */
 515	pgpath = READ_ONCE(m->current_pgpath);
 516	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
 517		pgpath = choose_pgpath(m, nr_bytes);
 518
 519	if (!pgpath) {
 520		if (must_push_back_rq(m))
 521			return DM_MAPIO_DELAY_REQUEUE;
 522		dm_report_EIO(m);	/* Failed */
 523		return DM_MAPIO_KILL;
 524	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
 525		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
 526		pg_init_all_paths(m);
 527		return DM_MAPIO_DELAY_REQUEUE;
 528	}
 529
 530	mpio->pgpath = pgpath;
 531	mpio->nr_bytes = nr_bytes;
 532
 533	bdev = pgpath->path.dev->bdev;
 534	q = bdev_get_queue(bdev);
 535	clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
 536			BLK_MQ_REQ_NOWAIT);
 537	if (IS_ERR(clone)) {
 538		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
 539		if (blk_queue_dying(q)) {
 540			atomic_inc(&m->pg_init_in_progress);
 541			activate_or_offline_path(pgpath);
 542			return DM_MAPIO_DELAY_REQUEUE;
 543		}
 544
 545		/*
 546		 * blk-mq's SCHED_RESTART can cover this requeue, so we
 547		 * needn't deal with it by DELAY_REQUEUE. More importantly,
 548		 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
 549		 * get the queue busy feedback (via BLK_STS_RESOURCE),
 550		 * otherwise I/O merging can suffer.
 551		 */
 552		return DM_MAPIO_REQUEUE;
 553	}
 554	clone->bio = clone->biotail = NULL;
 555	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 556	*__clone = clone;
 557
 558	if (pgpath->pg->ps.type->start_io)
 559		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 560					      &pgpath->path,
 561					      nr_bytes);
 562	return DM_MAPIO_REMAPPED;
 563}
 564
 565static void multipath_release_clone(struct request *clone,
 566				    union map_info *map_context)
 567{
 568	if (unlikely(map_context)) {
 569		/*
 570		 * non-NULL map_context means caller is still map
 571		 * method; must undo multipath_clone_and_map()
 572		 */
 573		struct dm_mpath_io *mpio = get_mpio(map_context);
 574		struct pgpath *pgpath = mpio->pgpath;
 575
 576		if (pgpath && pgpath->pg->ps.type->end_io)
 577			pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
 578						    &pgpath->path,
 579						    mpio->nr_bytes,
 580						    clone->io_start_time_ns);
 581	}
 582
 583	blk_mq_free_request(clone);
 584}
 585
 586/*
 587 * Map cloned bios (bio-based multipath)
 588 */
 589
 590static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
 591{
 592	/* Queue for the daemon to resubmit */
 593	bio_list_add(&m->queued_bios, bio);
 594	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
 595		queue_work(kmultipathd, &m->process_queued_bios);
 596}
 597
 598static void multipath_queue_bio(struct multipath *m, struct bio *bio)
 599{
 600	unsigned long flags;
 601
 602	spin_lock_irqsave(&m->lock, flags);
 603	__multipath_queue_bio(m, bio);
 604	spin_unlock_irqrestore(&m->lock, flags);
 605}
 606
 607static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 608{
 609	struct pgpath *pgpath;
 610	unsigned long flags;
 611
 612	/* Do we need to select a new pgpath? */
 613	pgpath = READ_ONCE(m->current_pgpath);
 614	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
 615		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 616
 617	if (!pgpath) {
 618		spin_lock_irqsave(&m->lock, flags);
 619		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 620			__multipath_queue_bio(m, bio);
 621			pgpath = ERR_PTR(-EAGAIN);
 622		}
 623		spin_unlock_irqrestore(&m->lock, flags);
 624
 625	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
 626		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
 627		multipath_queue_bio(m, bio);
 628		pg_init_all_paths(m);
 629		return ERR_PTR(-EAGAIN);
 630	}
 631
 632	return pgpath;
 633}
 634
 635static int __multipath_map_bio(struct multipath *m, struct bio *bio,
 636			       struct dm_mpath_io *mpio)
 637{
 638	struct pgpath *pgpath = __map_bio(m, bio);
 639
 640	if (IS_ERR(pgpath))
 641		return DM_MAPIO_SUBMITTED;
 642
 643	if (!pgpath) {
 644		if (__must_push_back(m))
 645			return DM_MAPIO_REQUEUE;
 646		dm_report_EIO(m);
 647		return DM_MAPIO_KILL;
 648	}
 649
 650	mpio->pgpath = pgpath;
 651
 652	if (dm_ps_use_hr_timer(pgpath->pg->ps.type))
 653		mpio->start_time_ns = ktime_get_ns();
 654
 655	bio->bi_status = 0;
 656	bio_set_dev(bio, pgpath->path.dev->bdev);
 657	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 658
 659	if (pgpath->pg->ps.type->start_io)
 660		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 661					      &pgpath->path,
 662					      mpio->nr_bytes);
 663	return DM_MAPIO_REMAPPED;
 664}
 665
 666static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
 667{
 668	struct multipath *m = ti->private;
 669	struct dm_mpath_io *mpio = NULL;
 670
 671	multipath_init_per_bio_data(bio, &mpio);
 672	return __multipath_map_bio(m, bio, mpio);
 673}
 674
 675static void process_queued_io_list(struct multipath *m)
 676{
 677	if (m->queue_mode == DM_TYPE_REQUEST_BASED)
 678		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 679	else if (m->queue_mode == DM_TYPE_BIO_BASED)
 680		queue_work(kmultipathd, &m->process_queued_bios);
 681}
 682
 683static void process_queued_bios(struct work_struct *work)
 684{
 685	int r;
 686	unsigned long flags;
 687	struct bio *bio;
 688	struct bio_list bios;
 689	struct blk_plug plug;
 690	struct multipath *m =
 691		container_of(work, struct multipath, process_queued_bios);
 692
 693	bio_list_init(&bios);
 694
 695	spin_lock_irqsave(&m->lock, flags);
 696
 697	if (bio_list_empty(&m->queued_bios)) {
 698		spin_unlock_irqrestore(&m->lock, flags);
 699		return;
 700	}
 701
 702	bio_list_merge(&bios, &m->queued_bios);
 703	bio_list_init(&m->queued_bios);
 704
 705	spin_unlock_irqrestore(&m->lock, flags);
 706
 707	blk_start_plug(&plug);
 708	while ((bio = bio_list_pop(&bios))) {
 709		struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 
 710		dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
 711		r = __multipath_map_bio(m, bio, mpio);
 712		switch (r) {
 713		case DM_MAPIO_KILL:
 714			bio->bi_status = BLK_STS_IOERR;
 715			bio_endio(bio);
 716			break;
 717		case DM_MAPIO_REQUEUE:
 718			bio->bi_status = BLK_STS_DM_REQUEUE;
 719			bio_endio(bio);
 720			break;
 721		case DM_MAPIO_REMAPPED:
 722			submit_bio_noacct(bio);
 723			break;
 724		case DM_MAPIO_SUBMITTED:
 725			break;
 726		default:
 727			WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
 728		}
 729	}
 730	blk_finish_plug(&plug);
 731}
 732
 733/*
 734 * If we run out of usable paths, should we queue I/O or error it?
 735 */
 736static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 737			    bool save_old_value, const char *caller)
 738{
 739	unsigned long flags;
 740	bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
 741	const char *dm_dev_name = dm_table_device_name(m->ti->table);
 742
 743	DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
 744		dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
 745
 746	spin_lock_irqsave(&m->lock, flags);
 747
 748	queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 749	saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 750
 751	if (save_old_value) {
 752		if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
 753			DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
 754			      dm_dev_name);
 755		} else
 756			assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
 757	} else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
 758		/* due to "fail_if_no_path" message, need to honor it. */
 759		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 760	}
 761	assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
 762
 763	DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
 764		dm_dev_name, __func__,
 765		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
 766		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
 767		dm_noflush_suspending(m->ti));
 768
 769	spin_unlock_irqrestore(&m->lock, flags);
 770
 771	if (!queue_if_no_path) {
 772		dm_table_run_md_queue_async(m->ti->table);
 773		process_queued_io_list(m);
 774	}
 775
 776	return 0;
 777}
 778
 779/*
 780 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
 781 * process any queued I/O.
 782 */
 783static void queue_if_no_path_timeout_work(struct timer_list *t)
 784{
 785	struct multipath *m = from_timer(m, t, nopath_timer);
 786
 787	DMWARN("queue_if_no_path timeout on %s, failing queued IO",
 788	       dm_table_device_name(m->ti->table));
 789	queue_if_no_path(m, false, false, __func__);
 790}
 791
 792/*
 793 * Enable the queue_if_no_path timeout if necessary.
 794 * Called with m->lock held.
 795 */
 796static void enable_nopath_timeout(struct multipath *m)
 797{
 798	unsigned long queue_if_no_path_timeout =
 799		READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
 800
 801	lockdep_assert_held(&m->lock);
 802
 803	if (queue_if_no_path_timeout > 0 &&
 804	    atomic_read(&m->nr_valid_paths) == 0 &&
 805	    test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 806		mod_timer(&m->nopath_timer,
 807			  jiffies + queue_if_no_path_timeout);
 808	}
 809}
 810
 811static void disable_nopath_timeout(struct multipath *m)
 812{
 813	del_timer_sync(&m->nopath_timer);
 814}
 815
 816/*
 817 * An event is triggered whenever a path is taken out of use.
 818 * Includes path failure and PG bypass.
 819 */
 820static void trigger_event(struct work_struct *work)
 821{
 822	struct multipath *m =
 823		container_of(work, struct multipath, trigger_event);
 824
 825	dm_table_event(m->ti->table);
 826}
 827
 828/*-----------------------------------------------------------------
 
 829 * Constructor/argument parsing:
 830 * <#multipath feature args> [<arg>]*
 831 * <#hw_handler args> [hw_handler [<arg>]*]
 832 * <#priority groups>
 833 * <initial priority group>
 834 *     [<selector> <#selector args> [<arg>]*
 835 *      <#paths> <#per-path selector args>
 836 *         [<path> [<arg>]* ]+ ]+
 837 *---------------------------------------------------------------*/
 
 838static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 839			       struct dm_target *ti)
 840{
 841	int r;
 842	struct path_selector_type *pst;
 843	unsigned ps_argc;
 844
 845	static const struct dm_arg _args[] = {
 846		{0, 1024, "invalid number of path selector args"},
 847	};
 848
 849	pst = dm_get_path_selector(dm_shift_arg(as));
 850	if (!pst) {
 851		ti->error = "unknown path selector type";
 852		return -EINVAL;
 853	}
 854
 855	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 856	if (r) {
 857		dm_put_path_selector(pst);
 858		return -EINVAL;
 859	}
 860
 861	r = pst->create(&pg->ps, ps_argc, as->argv);
 862	if (r) {
 863		dm_put_path_selector(pst);
 864		ti->error = "path selector constructor failed";
 865		return r;
 866	}
 867
 868	pg->ps.type = pst;
 869	dm_consume_args(as, ps_argc);
 870
 871	return 0;
 872}
 873
 874static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
 875			 const char **attached_handler_name, char **error)
 876{
 877	struct request_queue *q = bdev_get_queue(bdev);
 878	int r;
 879
 880	if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
 881retain:
 882		if (*attached_handler_name) {
 883			/*
 884			 * Clear any hw_handler_params associated with a
 885			 * handler that isn't already attached.
 886			 */
 887			if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
 888				kfree(m->hw_handler_params);
 889				m->hw_handler_params = NULL;
 890			}
 891
 892			/*
 893			 * Reset hw_handler_name to match the attached handler
 894			 *
 895			 * NB. This modifies the table line to show the actual
 896			 * handler instead of the original table passed in.
 897			 */
 898			kfree(m->hw_handler_name);
 899			m->hw_handler_name = *attached_handler_name;
 900			*attached_handler_name = NULL;
 901		}
 902	}
 903
 904	if (m->hw_handler_name) {
 905		r = scsi_dh_attach(q, m->hw_handler_name);
 906		if (r == -EBUSY) {
 907			DMINFO("retaining handler on device %pg", bdev);
 908			goto retain;
 909		}
 910		if (r < 0) {
 911			*error = "error attaching hardware handler";
 912			return r;
 913		}
 914
 915		if (m->hw_handler_params) {
 916			r = scsi_dh_set_params(q, m->hw_handler_params);
 917			if (r < 0) {
 918				*error = "unable to set hardware handler parameters";
 919				return r;
 920			}
 921		}
 922	}
 923
 924	return 0;
 925}
 926
 927static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 928				 struct dm_target *ti)
 929{
 930	int r;
 931	struct pgpath *p;
 932	struct multipath *m = ti->private;
 933	struct request_queue *q;
 934	const char *attached_handler_name = NULL;
 935
 936	/* we need at least a path arg */
 937	if (as->argc < 1) {
 938		ti->error = "no device given";
 939		return ERR_PTR(-EINVAL);
 940	}
 941
 942	p = alloc_pgpath();
 943	if (!p)
 944		return ERR_PTR(-ENOMEM);
 945
 946	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 947			  &p->path.dev);
 948	if (r) {
 949		ti->error = "error getting device";
 950		goto bad;
 951	}
 952
 953	q = bdev_get_queue(p->path.dev->bdev);
 954	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 955	if (attached_handler_name || m->hw_handler_name) {
 956		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 957		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
 958		kfree(attached_handler_name);
 959		if (r) {
 960			dm_put_device(ti, p->path.dev);
 961			goto bad;
 962		}
 963	}
 964
 965	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 966	if (r) {
 967		dm_put_device(ti, p->path.dev);
 968		goto bad;
 969	}
 970
 971	return p;
 972 bad:
 973	free_pgpath(p);
 974	return ERR_PTR(r);
 975}
 976
 977static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 978						   struct multipath *m)
 979{
 980	static const struct dm_arg _args[] = {
 981		{1, 1024, "invalid number of paths"},
 982		{0, 1024, "invalid number of selector args"}
 983	};
 984
 985	int r;
 986	unsigned i, nr_selector_args, nr_args;
 987	struct priority_group *pg;
 988	struct dm_target *ti = m->ti;
 989
 990	if (as->argc < 2) {
 991		as->argc = 0;
 992		ti->error = "not enough priority group arguments";
 993		return ERR_PTR(-EINVAL);
 994	}
 995
 996	pg = alloc_priority_group();
 997	if (!pg) {
 998		ti->error = "couldn't allocate priority group";
 999		return ERR_PTR(-ENOMEM);
1000	}
1001	pg->m = m;
1002
1003	r = parse_path_selector(as, pg, ti);
1004	if (r)
1005		goto bad;
1006
1007	/*
1008	 * read the paths
1009	 */
1010	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1011	if (r)
1012		goto bad;
1013
1014	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1015	if (r)
1016		goto bad;
1017
1018	nr_args = 1 + nr_selector_args;
1019	for (i = 0; i < pg->nr_pgpaths; i++) {
1020		struct pgpath *pgpath;
1021		struct dm_arg_set path_args;
1022
1023		if (as->argc < nr_args) {
1024			ti->error = "not enough path parameters";
1025			r = -EINVAL;
1026			goto bad;
1027		}
1028
1029		path_args.argc = nr_args;
1030		path_args.argv = as->argv;
1031
1032		pgpath = parse_path(&path_args, &pg->ps, ti);
1033		if (IS_ERR(pgpath)) {
1034			r = PTR_ERR(pgpath);
1035			goto bad;
1036		}
1037
1038		pgpath->pg = pg;
1039		list_add_tail(&pgpath->list, &pg->pgpaths);
1040		dm_consume_args(as, nr_args);
1041	}
1042
1043	return pg;
1044
1045 bad:
1046	free_priority_group(pg, ti);
1047	return ERR_PTR(r);
1048}
1049
1050static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1051{
1052	unsigned hw_argc;
1053	int ret;
1054	struct dm_target *ti = m->ti;
1055
1056	static const struct dm_arg _args[] = {
1057		{0, 1024, "invalid number of hardware handler args"},
1058	};
1059
1060	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1061		return -EINVAL;
1062
1063	if (!hw_argc)
1064		return 0;
1065
1066	if (m->queue_mode == DM_TYPE_BIO_BASED) {
1067		dm_consume_args(as, hw_argc);
1068		DMERR("bio-based multipath doesn't allow hardware handler args");
1069		return 0;
1070	}
1071
1072	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1073	if (!m->hw_handler_name)
1074		return -EINVAL;
1075
1076	if (hw_argc > 1) {
1077		char *p;
1078		int i, j, len = 4;
1079
1080		for (i = 0; i <= hw_argc - 2; i++)
1081			len += strlen(as->argv[i]) + 1;
1082		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1083		if (!p) {
1084			ti->error = "memory allocation failed";
1085			ret = -ENOMEM;
1086			goto fail;
1087		}
1088		j = sprintf(p, "%d", hw_argc - 1);
1089		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1090			j = sprintf(p, "%s", as->argv[i]);
1091	}
1092	dm_consume_args(as, hw_argc - 1);
1093
1094	return 0;
1095fail:
1096	kfree(m->hw_handler_name);
1097	m->hw_handler_name = NULL;
1098	return ret;
1099}
1100
1101static int parse_features(struct dm_arg_set *as, struct multipath *m)
1102{
1103	int r;
1104	unsigned argc;
1105	struct dm_target *ti = m->ti;
1106	const char *arg_name;
1107
1108	static const struct dm_arg _args[] = {
1109		{0, 8, "invalid number of feature args"},
1110		{1, 50, "pg_init_retries must be between 1 and 50"},
1111		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1112	};
1113
1114	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1115	if (r)
1116		return -EINVAL;
1117
1118	if (!argc)
1119		return 0;
1120
1121	do {
1122		arg_name = dm_shift_arg(as);
1123		argc--;
1124
1125		if (!strcasecmp(arg_name, "queue_if_no_path")) {
1126			r = queue_if_no_path(m, true, false, __func__);
1127			continue;
1128		}
1129
1130		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1131			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1132			continue;
1133		}
1134
1135		if (!strcasecmp(arg_name, "pg_init_retries") &&
1136		    (argc >= 1)) {
1137			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1138			argc--;
1139			continue;
1140		}
1141
1142		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1143		    (argc >= 1)) {
1144			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1145			argc--;
1146			continue;
1147		}
1148
1149		if (!strcasecmp(arg_name, "queue_mode") &&
1150		    (argc >= 1)) {
1151			const char *queue_mode_name = dm_shift_arg(as);
1152
1153			if (!strcasecmp(queue_mode_name, "bio"))
1154				m->queue_mode = DM_TYPE_BIO_BASED;
1155			else if (!strcasecmp(queue_mode_name, "rq") ||
1156				 !strcasecmp(queue_mode_name, "mq"))
1157				m->queue_mode = DM_TYPE_REQUEST_BASED;
1158			else {
1159				ti->error = "Unknown 'queue_mode' requested";
1160				r = -EINVAL;
1161			}
1162			argc--;
1163			continue;
1164		}
1165
1166		ti->error = "Unrecognised multipath feature request";
1167		r = -EINVAL;
1168	} while (argc && !r);
1169
1170	return r;
1171}
1172
1173static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1174{
1175	/* target arguments */
1176	static const struct dm_arg _args[] = {
1177		{0, 1024, "invalid number of priority groups"},
1178		{0, 1024, "invalid initial priority group number"},
1179	};
1180
1181	int r;
1182	struct multipath *m;
1183	struct dm_arg_set as;
1184	unsigned pg_count = 0;
1185	unsigned next_pg_num;
1186	unsigned long flags;
1187
1188	as.argc = argc;
1189	as.argv = argv;
1190
1191	m = alloc_multipath(ti);
1192	if (!m) {
1193		ti->error = "can't allocate multipath";
1194		return -EINVAL;
1195	}
1196
1197	r = parse_features(&as, m);
1198	if (r)
1199		goto bad;
1200
1201	r = alloc_multipath_stage2(ti, m);
1202	if (r)
1203		goto bad;
1204
1205	r = parse_hw_handler(&as, m);
1206	if (r)
1207		goto bad;
1208
1209	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1210	if (r)
1211		goto bad;
1212
1213	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1214	if (r)
1215		goto bad;
1216
1217	if ((!m->nr_priority_groups && next_pg_num) ||
1218	    (m->nr_priority_groups && !next_pg_num)) {
1219		ti->error = "invalid initial priority group";
1220		r = -EINVAL;
1221		goto bad;
1222	}
1223
1224	/* parse the priority groups */
1225	while (as.argc) {
1226		struct priority_group *pg;
1227		unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1228
1229		pg = parse_priority_group(&as, m);
1230		if (IS_ERR(pg)) {
1231			r = PTR_ERR(pg);
1232			goto bad;
1233		}
1234
1235		nr_valid_paths += pg->nr_pgpaths;
1236		atomic_set(&m->nr_valid_paths, nr_valid_paths);
1237
1238		list_add_tail(&pg->list, &m->priority_groups);
1239		pg_count++;
1240		pg->pg_num = pg_count;
1241		if (!--next_pg_num)
1242			m->next_pg = pg;
1243	}
1244
1245	if (pg_count != m->nr_priority_groups) {
1246		ti->error = "priority group count mismatch";
1247		r = -EINVAL;
1248		goto bad;
1249	}
1250
1251	spin_lock_irqsave(&m->lock, flags);
1252	enable_nopath_timeout(m);
1253	spin_unlock_irqrestore(&m->lock, flags);
1254
1255	ti->num_flush_bios = 1;
1256	ti->num_discard_bios = 1;
1257	ti->num_write_zeroes_bios = 1;
1258	if (m->queue_mode == DM_TYPE_BIO_BASED)
1259		ti->per_io_data_size = multipath_per_bio_data_size();
1260	else
1261		ti->per_io_data_size = sizeof(struct dm_mpath_io);
1262
1263	return 0;
1264
1265 bad:
1266	free_multipath(m);
1267	return r;
1268}
1269
1270static void multipath_wait_for_pg_init_completion(struct multipath *m)
1271{
1272	DEFINE_WAIT(wait);
1273
1274	while (1) {
1275		prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1276
1277		if (!atomic_read(&m->pg_init_in_progress))
1278			break;
1279
1280		io_schedule();
1281	}
1282	finish_wait(&m->pg_init_wait, &wait);
1283}
1284
1285static void flush_multipath_work(struct multipath *m)
1286{
1287	if (m->hw_handler_name) {
1288		unsigned long flags;
1289
1290		if (!atomic_read(&m->pg_init_in_progress))
1291			goto skip;
1292
1293		spin_lock_irqsave(&m->lock, flags);
1294		if (atomic_read(&m->pg_init_in_progress) &&
1295		    !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
1296			spin_unlock_irqrestore(&m->lock, flags);
1297
1298			flush_workqueue(kmpath_handlerd);
1299			multipath_wait_for_pg_init_completion(m);
1300
1301			spin_lock_irqsave(&m->lock, flags);
1302			clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1303		}
1304		spin_unlock_irqrestore(&m->lock, flags);
1305	}
1306skip:
1307	if (m->queue_mode == DM_TYPE_BIO_BASED)
1308		flush_work(&m->process_queued_bios);
1309	flush_work(&m->trigger_event);
1310}
1311
1312static void multipath_dtr(struct dm_target *ti)
1313{
1314	struct multipath *m = ti->private;
1315
1316	disable_nopath_timeout(m);
1317	flush_multipath_work(m);
1318	free_multipath(m);
1319}
1320
1321/*
1322 * Take a path out of use.
1323 */
1324static int fail_path(struct pgpath *pgpath)
1325{
1326	unsigned long flags;
1327	struct multipath *m = pgpath->pg->m;
1328
1329	spin_lock_irqsave(&m->lock, flags);
1330
1331	if (!pgpath->is_active)
1332		goto out;
1333
1334	DMWARN("%s: Failing path %s.",
1335	       dm_table_device_name(m->ti->table),
1336	       pgpath->path.dev->name);
1337
1338	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1339	pgpath->is_active = false;
1340	pgpath->fail_count++;
1341
1342	atomic_dec(&m->nr_valid_paths);
1343
1344	if (pgpath == m->current_pgpath)
1345		m->current_pgpath = NULL;
1346
1347	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1348		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1349
1350	schedule_work(&m->trigger_event);
1351
1352	enable_nopath_timeout(m);
1353
1354out:
1355	spin_unlock_irqrestore(&m->lock, flags);
1356
1357	return 0;
1358}
1359
1360/*
1361 * Reinstate a previously-failed path
1362 */
1363static int reinstate_path(struct pgpath *pgpath)
1364{
1365	int r = 0, run_queue = 0;
1366	unsigned long flags;
1367	struct multipath *m = pgpath->pg->m;
1368	unsigned nr_valid_paths;
1369
1370	spin_lock_irqsave(&m->lock, flags);
1371
1372	if (pgpath->is_active)
1373		goto out;
1374
1375	DMWARN("%s: Reinstating path %s.",
1376	       dm_table_device_name(m->ti->table),
1377	       pgpath->path.dev->name);
1378
1379	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1380	if (r)
1381		goto out;
1382
1383	pgpath->is_active = true;
1384
1385	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1386	if (nr_valid_paths == 1) {
1387		m->current_pgpath = NULL;
1388		run_queue = 1;
1389	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1390		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1391			atomic_inc(&m->pg_init_in_progress);
1392	}
1393
1394	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1395		       pgpath->path.dev->name, nr_valid_paths);
1396
1397	schedule_work(&m->trigger_event);
1398
1399out:
1400	spin_unlock_irqrestore(&m->lock, flags);
1401	if (run_queue) {
1402		dm_table_run_md_queue_async(m->ti->table);
1403		process_queued_io_list(m);
1404	}
1405
1406	if (pgpath->is_active)
1407		disable_nopath_timeout(m);
1408
1409	return r;
1410}
1411
1412/*
1413 * Fail or reinstate all paths that match the provided struct dm_dev.
1414 */
1415static int action_dev(struct multipath *m, struct dm_dev *dev,
1416		      action_fn action)
1417{
1418	int r = -EINVAL;
1419	struct pgpath *pgpath;
1420	struct priority_group *pg;
1421
1422	list_for_each_entry(pg, &m->priority_groups, list) {
1423		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1424			if (pgpath->path.dev == dev)
1425				r = action(pgpath);
1426		}
1427	}
1428
1429	return r;
1430}
1431
1432/*
1433 * Temporarily try to avoid having to use the specified PG
1434 */
1435static void bypass_pg(struct multipath *m, struct priority_group *pg,
1436		      bool bypassed)
1437{
1438	unsigned long flags;
1439
1440	spin_lock_irqsave(&m->lock, flags);
1441
1442	pg->bypassed = bypassed;
1443	m->current_pgpath = NULL;
1444	m->current_pg = NULL;
1445
1446	spin_unlock_irqrestore(&m->lock, flags);
1447
1448	schedule_work(&m->trigger_event);
1449}
1450
1451/*
1452 * Switch to using the specified PG from the next I/O that gets mapped
1453 */
1454static int switch_pg_num(struct multipath *m, const char *pgstr)
1455{
1456	struct priority_group *pg;
1457	unsigned pgnum;
1458	unsigned long flags;
1459	char dummy;
1460
1461	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1462	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1463		DMWARN("invalid PG number supplied to switch_pg_num");
1464		return -EINVAL;
1465	}
1466
1467	spin_lock_irqsave(&m->lock, flags);
1468	list_for_each_entry(pg, &m->priority_groups, list) {
1469		pg->bypassed = false;
1470		if (--pgnum)
1471			continue;
1472
1473		m->current_pgpath = NULL;
1474		m->current_pg = NULL;
1475		m->next_pg = pg;
1476	}
1477	spin_unlock_irqrestore(&m->lock, flags);
1478
1479	schedule_work(&m->trigger_event);
1480	return 0;
1481}
1482
1483/*
1484 * Set/clear bypassed status of a PG.
1485 * PGs are numbered upwards from 1 in the order they were declared.
1486 */
1487static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1488{
1489	struct priority_group *pg;
1490	unsigned pgnum;
1491	char dummy;
1492
1493	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1494	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1495		DMWARN("invalid PG number supplied to bypass_pg");
1496		return -EINVAL;
1497	}
1498
1499	list_for_each_entry(pg, &m->priority_groups, list) {
1500		if (!--pgnum)
1501			break;
1502	}
1503
1504	bypass_pg(m, pg, bypassed);
1505	return 0;
1506}
1507
1508/*
1509 * Should we retry pg_init immediately?
1510 */
1511static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1512{
1513	unsigned long flags;
1514	bool limit_reached = false;
1515
1516	spin_lock_irqsave(&m->lock, flags);
1517
1518	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1519	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1520		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1521	else
1522		limit_reached = true;
1523
1524	spin_unlock_irqrestore(&m->lock, flags);
1525
1526	return limit_reached;
1527}
1528
1529static void pg_init_done(void *data, int errors)
1530{
1531	struct pgpath *pgpath = data;
1532	struct priority_group *pg = pgpath->pg;
1533	struct multipath *m = pg->m;
1534	unsigned long flags;
1535	bool delay_retry = false;
1536
1537	/* device or driver problems */
1538	switch (errors) {
1539	case SCSI_DH_OK:
1540		break;
1541	case SCSI_DH_NOSYS:
1542		if (!m->hw_handler_name) {
1543			errors = 0;
1544			break;
1545		}
1546		DMERR("Could not failover the device: Handler scsi_dh_%s "
1547		      "Error %d.", m->hw_handler_name, errors);
1548		/*
1549		 * Fail path for now, so we do not ping pong
1550		 */
1551		fail_path(pgpath);
1552		break;
1553	case SCSI_DH_DEV_TEMP_BUSY:
1554		/*
1555		 * Probably doing something like FW upgrade on the
1556		 * controller so try the other pg.
1557		 */
1558		bypass_pg(m, pg, true);
1559		break;
1560	case SCSI_DH_RETRY:
1561		/* Wait before retrying. */
1562		delay_retry = true;
1563		fallthrough;
1564	case SCSI_DH_IMM_RETRY:
1565	case SCSI_DH_RES_TEMP_UNAVAIL:
1566		if (pg_init_limit_reached(m, pgpath))
1567			fail_path(pgpath);
1568		errors = 0;
1569		break;
1570	case SCSI_DH_DEV_OFFLINED:
1571	default:
1572		/*
1573		 * We probably do not want to fail the path for a device
1574		 * error, but this is what the old dm did. In future
1575		 * patches we can do more advanced handling.
1576		 */
1577		fail_path(pgpath);
1578	}
1579
1580	spin_lock_irqsave(&m->lock, flags);
1581	if (errors) {
1582		if (pgpath == m->current_pgpath) {
1583			DMERR("Could not failover device. Error %d.", errors);
1584			m->current_pgpath = NULL;
1585			m->current_pg = NULL;
1586		}
1587	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1588		pg->bypassed = false;
1589
1590	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1591		/* Activations of other paths are still on going */
1592		goto out;
1593
1594	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1595		if (delay_retry)
1596			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1597		else
1598			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1599
1600		if (__pg_init_all_paths(m))
1601			goto out;
1602	}
1603	clear_bit(MPATHF_QUEUE_IO, &m->flags);
1604
1605	process_queued_io_list(m);
1606
1607	/*
1608	 * Wake up any thread waiting to suspend.
1609	 */
1610	wake_up(&m->pg_init_wait);
1611
1612out:
1613	spin_unlock_irqrestore(&m->lock, flags);
1614}
1615
1616static void activate_or_offline_path(struct pgpath *pgpath)
1617{
1618	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1619
1620	if (pgpath->is_active && !blk_queue_dying(q))
1621		scsi_dh_activate(q, pg_init_done, pgpath);
1622	else
1623		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1624}
1625
1626static void activate_path_work(struct work_struct *work)
1627{
1628	struct pgpath *pgpath =
1629		container_of(work, struct pgpath, activate_path.work);
1630
1631	activate_or_offline_path(pgpath);
1632}
1633
1634static int multipath_end_io(struct dm_target *ti, struct request *clone,
1635			    blk_status_t error, union map_info *map_context)
1636{
1637	struct dm_mpath_io *mpio = get_mpio(map_context);
1638	struct pgpath *pgpath = mpio->pgpath;
1639	int r = DM_ENDIO_DONE;
1640
1641	/*
1642	 * We don't queue any clone request inside the multipath target
1643	 * during end I/O handling, since those clone requests don't have
1644	 * bio clones.  If we queue them inside the multipath target,
1645	 * we need to make bio clones, that requires memory allocation.
1646	 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1647	 *  don't have bio clones.)
1648	 * Instead of queueing the clone request here, we queue the original
1649	 * request into dm core, which will remake a clone request and
1650	 * clone bios for it and resubmit it later.
1651	 */
1652	if (error && blk_path_error(error)) {
1653		struct multipath *m = ti->private;
1654
1655		if (error == BLK_STS_RESOURCE)
1656			r = DM_ENDIO_DELAY_REQUEUE;
1657		else
1658			r = DM_ENDIO_REQUEUE;
1659
1660		if (pgpath)
1661			fail_path(pgpath);
1662
1663		if (!atomic_read(&m->nr_valid_paths) &&
1664		    !must_push_back_rq(m)) {
1665			if (error == BLK_STS_IOERR)
1666				dm_report_EIO(m);
1667			/* complete with the original error */
1668			r = DM_ENDIO_DONE;
1669		}
1670	}
1671
1672	if (pgpath) {
1673		struct path_selector *ps = &pgpath->pg->ps;
1674
1675		if (ps->type->end_io)
1676			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1677					 clone->io_start_time_ns);
1678	}
1679
1680	return r;
1681}
1682
1683static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1684				blk_status_t *error)
1685{
1686	struct multipath *m = ti->private;
1687	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1688	struct pgpath *pgpath = mpio->pgpath;
1689	unsigned long flags;
1690	int r = DM_ENDIO_DONE;
1691
1692	if (!*error || !blk_path_error(*error))
1693		goto done;
1694
1695	if (pgpath)
1696		fail_path(pgpath);
1697
1698	if (!atomic_read(&m->nr_valid_paths)) {
1699		spin_lock_irqsave(&m->lock, flags);
1700		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1701			if (__must_push_back(m)) {
1702				r = DM_ENDIO_REQUEUE;
1703			} else {
1704				dm_report_EIO(m);
1705				*error = BLK_STS_IOERR;
1706			}
1707			spin_unlock_irqrestore(&m->lock, flags);
1708			goto done;
1709		}
1710		spin_unlock_irqrestore(&m->lock, flags);
1711	}
1712
1713	multipath_queue_bio(m, clone);
1714	r = DM_ENDIO_INCOMPLETE;
1715done:
1716	if (pgpath) {
1717		struct path_selector *ps = &pgpath->pg->ps;
1718
1719		if (ps->type->end_io)
1720			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1721					 (mpio->start_time_ns ?:
1722					  dm_start_time_ns_from_clone(clone)));
1723	}
1724
1725	return r;
1726}
1727
1728/*
1729 * Suspend with flush can't complete until all the I/O is processed
1730 * so if the last path fails we must error any remaining I/O.
1731 * - Note that if the freeze_bdev fails while suspending, the
1732 *   queue_if_no_path state is lost - userspace should reset it.
1733 * Otherwise, during noflush suspend, queue_if_no_path will not change.
1734 */
1735static void multipath_presuspend(struct dm_target *ti)
1736{
1737	struct multipath *m = ti->private;
1738
1739	/* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
1740	if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
1741		queue_if_no_path(m, false, true, __func__);
1742}
1743
1744static void multipath_postsuspend(struct dm_target *ti)
1745{
1746	struct multipath *m = ti->private;
1747
1748	mutex_lock(&m->work_mutex);
1749	flush_multipath_work(m);
1750	mutex_unlock(&m->work_mutex);
1751}
1752
1753/*
1754 * Restore the queue_if_no_path setting.
1755 */
1756static void multipath_resume(struct dm_target *ti)
1757{
1758	struct multipath *m = ti->private;
1759	unsigned long flags;
1760
1761	spin_lock_irqsave(&m->lock, flags);
1762	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
1763		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1764		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
1765	}
1766
1767	DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
1768		dm_table_device_name(m->ti->table), __func__,
1769		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
1770		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1771
1772	spin_unlock_irqrestore(&m->lock, flags);
1773}
1774
1775/*
1776 * Info output has the following format:
1777 * num_multipath_feature_args [multipath_feature_args]*
1778 * num_handler_status_args [handler_status_args]*
1779 * num_groups init_group_number
1780 *            [A|D|E num_ps_status_args [ps_status_args]*
1781 *             num_paths num_selector_args
1782 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1783 *
1784 * Table output has the following format (identical to the constructor string):
1785 * num_feature_args [features_args]*
1786 * num_handler_args hw_handler [hw_handler_args]*
1787 * num_groups init_group_number
1788 *     [priority selector-name num_ps_args [ps_args]*
1789 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1790 */
1791static void multipath_status(struct dm_target *ti, status_type_t type,
1792			     unsigned status_flags, char *result, unsigned maxlen)
1793{
1794	int sz = 0, pg_counter, pgpath_counter;
1795	unsigned long flags;
1796	struct multipath *m = ti->private;
1797	struct priority_group *pg;
1798	struct pgpath *p;
1799	unsigned pg_num;
1800	char state;
1801
1802	spin_lock_irqsave(&m->lock, flags);
1803
1804	/* Features */
1805	if (type == STATUSTYPE_INFO)
1806		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1807		       atomic_read(&m->pg_init_count));
1808	else {
1809		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1810			      (m->pg_init_retries > 0) * 2 +
1811			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1812			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1813			      (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1814
1815		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1816			DMEMIT("queue_if_no_path ");
1817		if (m->pg_init_retries)
1818			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1819		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1820			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1821		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1822			DMEMIT("retain_attached_hw_handler ");
1823		if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1824			switch(m->queue_mode) {
1825			case DM_TYPE_BIO_BASED:
1826				DMEMIT("queue_mode bio ");
1827				break;
1828			default:
1829				WARN_ON_ONCE(true);
1830				break;
1831			}
1832		}
1833	}
1834
1835	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1836		DMEMIT("0 ");
1837	else
1838		DMEMIT("1 %s ", m->hw_handler_name);
1839
1840	DMEMIT("%u ", m->nr_priority_groups);
1841
1842	if (m->next_pg)
1843		pg_num = m->next_pg->pg_num;
1844	else if (m->current_pg)
1845		pg_num = m->current_pg->pg_num;
1846	else
1847		pg_num = (m->nr_priority_groups ? 1 : 0);
1848
1849	DMEMIT("%u ", pg_num);
1850
1851	switch (type) {
1852	case STATUSTYPE_INFO:
1853		list_for_each_entry(pg, &m->priority_groups, list) {
1854			if (pg->bypassed)
1855				state = 'D';	/* Disabled */
1856			else if (pg == m->current_pg)
1857				state = 'A';	/* Currently Active */
1858			else
1859				state = 'E';	/* Enabled */
1860
1861			DMEMIT("%c ", state);
1862
1863			if (pg->ps.type->status)
1864				sz += pg->ps.type->status(&pg->ps, NULL, type,
1865							  result + sz,
1866							  maxlen - sz);
1867			else
1868				DMEMIT("0 ");
1869
1870			DMEMIT("%u %u ", pg->nr_pgpaths,
1871			       pg->ps.type->info_args);
1872
1873			list_for_each_entry(p, &pg->pgpaths, list) {
1874				DMEMIT("%s %s %u ", p->path.dev->name,
1875				       p->is_active ? "A" : "F",
1876				       p->fail_count);
1877				if (pg->ps.type->status)
1878					sz += pg->ps.type->status(&pg->ps,
1879					      &p->path, type, result + sz,
1880					      maxlen - sz);
1881			}
1882		}
1883		break;
1884
1885	case STATUSTYPE_TABLE:
1886		list_for_each_entry(pg, &m->priority_groups, list) {
1887			DMEMIT("%s ", pg->ps.type->name);
1888
1889			if (pg->ps.type->status)
1890				sz += pg->ps.type->status(&pg->ps, NULL, type,
1891							  result + sz,
1892							  maxlen - sz);
1893			else
1894				DMEMIT("0 ");
1895
1896			DMEMIT("%u %u ", pg->nr_pgpaths,
1897			       pg->ps.type->table_args);
1898
1899			list_for_each_entry(p, &pg->pgpaths, list) {
1900				DMEMIT("%s ", p->path.dev->name);
1901				if (pg->ps.type->status)
1902					sz += pg->ps.type->status(&pg->ps,
1903					      &p->path, type, result + sz,
1904					      maxlen - sz);
1905			}
1906		}
1907		break;
1908
1909	case STATUSTYPE_IMA:
1910		sz = 0; /*reset the result pointer*/
1911
1912		DMEMIT_TARGET_NAME_VERSION(ti->type);
1913		DMEMIT(",nr_priority_groups=%u", m->nr_priority_groups);
1914
1915		pg_counter = 0;
1916		list_for_each_entry(pg, &m->priority_groups, list) {
1917			if (pg->bypassed)
1918				state = 'D';	/* Disabled */
1919			else if (pg == m->current_pg)
1920				state = 'A';	/* Currently Active */
1921			else
1922				state = 'E';	/* Enabled */
1923			DMEMIT(",pg_state_%d=%c", pg_counter, state);
1924			DMEMIT(",nr_pgpaths_%d=%u", pg_counter, pg->nr_pgpaths);
1925			DMEMIT(",path_selector_name_%d=%s", pg_counter, pg->ps.type->name);
1926
1927			pgpath_counter = 0;
1928			list_for_each_entry(p, &pg->pgpaths, list) {
1929				DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u",
1930				       pg_counter, pgpath_counter, p->path.dev->name,
1931				       pg_counter, pgpath_counter, p->is_active ? 'A' : 'F',
1932				       pg_counter, pgpath_counter, p->fail_count);
1933				if (pg->ps.type->status) {
1934					DMEMIT(",path_selector_status_%d_%d=",
1935					       pg_counter, pgpath_counter);
1936					sz += pg->ps.type->status(&pg->ps, &p->path,
1937								  type, result + sz,
1938								  maxlen - sz);
1939				}
1940				pgpath_counter++;
1941			}
1942			pg_counter++;
1943		}
1944		DMEMIT(";");
1945		break;
1946	}
1947
1948	spin_unlock_irqrestore(&m->lock, flags);
1949}
1950
1951static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
1952			     char *result, unsigned maxlen)
1953{
1954	int r = -EINVAL;
1955	struct dm_dev *dev;
1956	struct multipath *m = ti->private;
1957	action_fn action;
1958	unsigned long flags;
1959
1960	mutex_lock(&m->work_mutex);
1961
1962	if (dm_suspended(ti)) {
1963		r = -EBUSY;
1964		goto out;
1965	}
1966
1967	if (argc == 1) {
1968		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1969			r = queue_if_no_path(m, true, false, __func__);
1970			spin_lock_irqsave(&m->lock, flags);
1971			enable_nopath_timeout(m);
1972			spin_unlock_irqrestore(&m->lock, flags);
1973			goto out;
1974		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1975			r = queue_if_no_path(m, false, false, __func__);
1976			disable_nopath_timeout(m);
1977			goto out;
1978		}
1979	}
1980
1981	if (argc != 2) {
1982		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1983		goto out;
1984	}
1985
1986	if (!strcasecmp(argv[0], "disable_group")) {
1987		r = bypass_pg_num(m, argv[1], true);
1988		goto out;
1989	} else if (!strcasecmp(argv[0], "enable_group")) {
1990		r = bypass_pg_num(m, argv[1], false);
1991		goto out;
1992	} else if (!strcasecmp(argv[0], "switch_group")) {
1993		r = switch_pg_num(m, argv[1]);
1994		goto out;
1995	} else if (!strcasecmp(argv[0], "reinstate_path"))
1996		action = reinstate_path;
1997	else if (!strcasecmp(argv[0], "fail_path"))
1998		action = fail_path;
1999	else {
2000		DMWARN("Unrecognised multipath message received: %s", argv[0]);
2001		goto out;
2002	}
2003
2004	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
2005	if (r) {
2006		DMWARN("message: error getting device %s",
2007		       argv[1]);
2008		goto out;
2009	}
2010
2011	r = action_dev(m, dev, action);
2012
2013	dm_put_device(ti, dev);
2014
2015out:
2016	mutex_unlock(&m->work_mutex);
2017	return r;
2018}
2019
2020static int multipath_prepare_ioctl(struct dm_target *ti,
2021				   struct block_device **bdev)
2022{
2023	struct multipath *m = ti->private;
2024	struct pgpath *pgpath;
2025	unsigned long flags;
2026	int r;
2027
2028	pgpath = READ_ONCE(m->current_pgpath);
2029	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
2030		pgpath = choose_pgpath(m, 0);
2031
2032	if (pgpath) {
2033		if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
2034			*bdev = pgpath->path.dev->bdev;
2035			r = 0;
2036		} else {
2037			/* pg_init has not started or completed */
2038			r = -ENOTCONN;
2039		}
2040	} else {
2041		/* No path is available */
2042		r = -EIO;
2043		spin_lock_irqsave(&m->lock, flags);
2044		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2045			r = -ENOTCONN;
2046		spin_unlock_irqrestore(&m->lock, flags);
2047	}
2048
2049	if (r == -ENOTCONN) {
2050		if (!READ_ONCE(m->current_pg)) {
2051			/* Path status changed, redo selection */
2052			(void) choose_pgpath(m, 0);
2053		}
2054		spin_lock_irqsave(&m->lock, flags);
2055		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
2056			(void) __pg_init_all_paths(m);
2057		spin_unlock_irqrestore(&m->lock, flags);
2058		dm_table_run_md_queue_async(m->ti->table);
2059		process_queued_io_list(m);
2060	}
2061
2062	/*
2063	 * Only pass ioctls through if the device sizes match exactly.
2064	 */
2065	if (!r && ti->len != bdev_nr_sectors((*bdev)))
2066		return 1;
2067	return r;
2068}
2069
2070static int multipath_iterate_devices(struct dm_target *ti,
2071				     iterate_devices_callout_fn fn, void *data)
2072{
2073	struct multipath *m = ti->private;
2074	struct priority_group *pg;
2075	struct pgpath *p;
2076	int ret = 0;
2077
2078	list_for_each_entry(pg, &m->priority_groups, list) {
2079		list_for_each_entry(p, &pg->pgpaths, list) {
2080			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
2081			if (ret)
2082				goto out;
2083		}
2084	}
2085
2086out:
2087	return ret;
2088}
2089
2090static int pgpath_busy(struct pgpath *pgpath)
2091{
2092	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2093
2094	return blk_lld_busy(q);
2095}
2096
2097/*
2098 * We return "busy", only when we can map I/Os but underlying devices
2099 * are busy (so even if we map I/Os now, the I/Os will wait on
2100 * the underlying queue).
2101 * In other words, if we want to kill I/Os or queue them inside us
2102 * due to map unavailability, we don't return "busy".  Otherwise,
2103 * dm core won't give us the I/Os and we can't do what we want.
2104 */
2105static int multipath_busy(struct dm_target *ti)
2106{
2107	bool busy = false, has_active = false;
2108	struct multipath *m = ti->private;
2109	struct priority_group *pg, *next_pg;
2110	struct pgpath *pgpath;
2111
2112	/* pg_init in progress */
2113	if (atomic_read(&m->pg_init_in_progress))
2114		return true;
2115
2116	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2117	if (!atomic_read(&m->nr_valid_paths)) {
2118		unsigned long flags;
 
2119		spin_lock_irqsave(&m->lock, flags);
2120		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2121			spin_unlock_irqrestore(&m->lock, flags);
2122			return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2123		}
2124		spin_unlock_irqrestore(&m->lock, flags);
2125	}
2126
2127	/* Guess which priority_group will be used at next mapping time */
2128	pg = READ_ONCE(m->current_pg);
2129	next_pg = READ_ONCE(m->next_pg);
2130	if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
2131		pg = next_pg;
2132
2133	if (!pg) {
2134		/*
2135		 * We don't know which pg will be used at next mapping time.
2136		 * We don't call choose_pgpath() here to avoid to trigger
2137		 * pg_init just by busy checking.
2138		 * So we don't know whether underlying devices we will be using
2139		 * at next mapping time are busy or not. Just try mapping.
2140		 */
2141		return busy;
2142	}
2143
2144	/*
2145	 * If there is one non-busy active path at least, the path selector
2146	 * will be able to select it. So we consider such a pg as not busy.
2147	 */
2148	busy = true;
2149	list_for_each_entry(pgpath, &pg->pgpaths, list) {
2150		if (pgpath->is_active) {
2151			has_active = true;
2152			if (!pgpath_busy(pgpath)) {
2153				busy = false;
2154				break;
2155			}
2156		}
2157	}
2158
2159	if (!has_active) {
2160		/*
2161		 * No active path in this pg, so this pg won't be used and
2162		 * the current_pg will be changed at next mapping time.
2163		 * We need to try mapping to determine it.
2164		 */
2165		busy = false;
2166	}
2167
2168	return busy;
2169}
2170
2171/*-----------------------------------------------------------------
 
2172 * Module setup
2173 *---------------------------------------------------------------*/
 
2174static struct target_type multipath_target = {
2175	.name = "multipath",
2176	.version = {1, 14, 0},
2177	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2178		    DM_TARGET_PASSES_INTEGRITY,
2179	.module = THIS_MODULE,
2180	.ctr = multipath_ctr,
2181	.dtr = multipath_dtr,
2182	.clone_and_map_rq = multipath_clone_and_map,
2183	.release_clone_rq = multipath_release_clone,
2184	.rq_end_io = multipath_end_io,
2185	.map = multipath_map_bio,
2186	.end_io = multipath_end_io_bio,
2187	.presuspend = multipath_presuspend,
2188	.postsuspend = multipath_postsuspend,
2189	.resume = multipath_resume,
2190	.status = multipath_status,
2191	.message = multipath_message,
2192	.prepare_ioctl = multipath_prepare_ioctl,
2193	.iterate_devices = multipath_iterate_devices,
2194	.busy = multipath_busy,
2195};
2196
2197static int __init dm_multipath_init(void)
2198{
2199	int r;
2200
2201	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2202	if (!kmultipathd) {
2203		DMERR("failed to create workqueue kmpathd");
2204		r = -ENOMEM;
2205		goto bad_alloc_kmultipathd;
2206	}
2207
2208	/*
2209	 * A separate workqueue is used to handle the device handlers
2210	 * to avoid overloading existing workqueue. Overloading the
2211	 * old workqueue would also create a bottleneck in the
2212	 * path of the storage hardware device activation.
2213	 */
2214	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2215						  WQ_MEM_RECLAIM);
2216	if (!kmpath_handlerd) {
2217		DMERR("failed to create workqueue kmpath_handlerd");
2218		r = -ENOMEM;
2219		goto bad_alloc_kmpath_handlerd;
2220	}
2221
 
 
 
 
 
 
2222	r = dm_register_target(&multipath_target);
2223	if (r < 0) {
2224		DMERR("request-based register failed %d", r);
2225		r = -EINVAL;
2226		goto bad_register_target;
2227	}
2228
2229	return 0;
2230
2231bad_register_target:
 
 
2232	destroy_workqueue(kmpath_handlerd);
2233bad_alloc_kmpath_handlerd:
2234	destroy_workqueue(kmultipathd);
2235bad_alloc_kmultipathd:
2236	return r;
2237}
2238
2239static void __exit dm_multipath_exit(void)
2240{
 
2241	destroy_workqueue(kmpath_handlerd);
2242	destroy_workqueue(kmultipathd);
2243
2244	dm_unregister_target(&multipath_target);
2245}
2246
2247module_init(dm_multipath_init);
2248module_exit(dm_multipath_exit);
2249
2250module_param_named(queue_if_no_path_timeout_secs,
2251		   queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
2252MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
2253
2254MODULE_DESCRIPTION(DM_NAME " multipath target");
2255MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2256MODULE_LICENSE("GPL");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2003 Sistina Software Limited.
   4 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/device-mapper.h>
  10
  11#include "dm-rq.h"
  12#include "dm-bio-record.h"
  13#include "dm-path-selector.h"
  14#include "dm-uevent.h"
  15
  16#include <linux/blkdev.h>
  17#include <linux/ctype.h>
  18#include <linux/init.h>
  19#include <linux/mempool.h>
  20#include <linux/module.h>
  21#include <linux/pagemap.h>
  22#include <linux/slab.h>
  23#include <linux/time.h>
  24#include <linux/timer.h>
  25#include <linux/workqueue.h>
  26#include <linux/delay.h>
  27#include <scsi/scsi_dh.h>
  28#include <linux/atomic.h>
  29#include <linux/blk-mq.h>
  30
  31static struct workqueue_struct *dm_mpath_wq;
  32
  33#define DM_MSG_PREFIX "multipath"
  34#define DM_PG_INIT_DELAY_MSECS 2000
  35#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
  36#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
  37
  38static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
  39
  40/* Path properties */
  41struct pgpath {
  42	struct list_head list;
  43
  44	struct priority_group *pg;	/* Owning PG */
  45	unsigned int fail_count;		/* Cumulative failure count */
  46
  47	struct dm_path path;
  48	struct delayed_work activate_path;
  49
  50	bool is_active:1;		/* Path status */
  51};
  52
  53#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  54
  55/*
  56 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  57 * Each has a path selector which controls which path gets used.
  58 */
  59struct priority_group {
  60	struct list_head list;
  61
  62	struct multipath *m;		/* Owning multipath instance */
  63	struct path_selector ps;
  64
  65	unsigned int pg_num;		/* Reference number */
  66	unsigned int nr_pgpaths;		/* Number of paths in PG */
  67	struct list_head pgpaths;
  68
  69	bool bypassed:1;		/* Temporarily bypass this PG? */
  70};
  71
  72/* Multipath context */
  73struct multipath {
  74	unsigned long flags;		/* Multipath state flags */
  75
  76	spinlock_t lock;
  77	enum dm_queue_mode queue_mode;
  78
  79	struct pgpath *current_pgpath;
  80	struct priority_group *current_pg;
  81	struct priority_group *next_pg;	/* Switch to this PG if set */
  82
  83	atomic_t nr_valid_paths;	/* Total number of usable paths */
  84	unsigned int nr_priority_groups;
  85	struct list_head priority_groups;
  86
  87	const char *hw_handler_name;
  88	char *hw_handler_params;
  89	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  90	unsigned int pg_init_retries;	/* Number of times to retry pg_init */
  91	unsigned int pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  92	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
  93	atomic_t pg_init_count;		/* Number of times pg_init called */
  94
  95	struct mutex work_mutex;
  96	struct work_struct trigger_event;
  97	struct dm_target *ti;
  98
  99	struct work_struct process_queued_bios;
 100	struct bio_list queued_bios;
 101
 102	struct timer_list nopath_timer;	/* Timeout for queue_if_no_path */
 103};
 104
 105/*
 106 * Context information attached to each io we process.
 107 */
 108struct dm_mpath_io {
 109	struct pgpath *pgpath;
 110	size_t nr_bytes;
 111	u64 start_time_ns;
 112};
 113
 114typedef int (*action_fn) (struct pgpath *pgpath);
 115
 116static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 117static void trigger_event(struct work_struct *work);
 118static void activate_or_offline_path(struct pgpath *pgpath);
 119static void activate_path_work(struct work_struct *work);
 120static void process_queued_bios(struct work_struct *work);
 121static void queue_if_no_path_timeout_work(struct timer_list *t);
 122
 123/*
 124 *-----------------------------------------------
 125 * Multipath state flags.
 126 *-----------------------------------------------
 127 */
 128#define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
 129#define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
 130#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
 131#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
 132#define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
 133#define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
 134#define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
 135
 136static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
 137{
 138	bool r = test_bit(MPATHF_bit, &m->flags);
 139
 140	if (r) {
 141		unsigned long flags;
 142
 143		spin_lock_irqsave(&m->lock, flags);
 144		r = test_bit(MPATHF_bit, &m->flags);
 145		spin_unlock_irqrestore(&m->lock, flags);
 146	}
 147
 148	return r;
 149}
 150
 151/*
 152 *-----------------------------------------------
 153 * Allocation routines
 154 *-----------------------------------------------
 155 */
 156static struct pgpath *alloc_pgpath(void)
 157{
 158	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 159
 160	if (!pgpath)
 161		return NULL;
 162
 163	pgpath->is_active = true;
 164
 165	return pgpath;
 166}
 167
 168static void free_pgpath(struct pgpath *pgpath)
 169{
 170	kfree(pgpath);
 171}
 172
 173static struct priority_group *alloc_priority_group(void)
 174{
 175	struct priority_group *pg;
 176
 177	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 178
 179	if (pg)
 180		INIT_LIST_HEAD(&pg->pgpaths);
 181
 182	return pg;
 183}
 184
 185static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 186{
 187	struct pgpath *pgpath, *tmp;
 188
 189	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 190		list_del(&pgpath->list);
 191		dm_put_device(ti, pgpath->path.dev);
 192		free_pgpath(pgpath);
 193	}
 194}
 195
 196static void free_priority_group(struct priority_group *pg,
 197				struct dm_target *ti)
 198{
 199	struct path_selector *ps = &pg->ps;
 200
 201	if (ps->type) {
 202		ps->type->destroy(ps);
 203		dm_put_path_selector(ps->type);
 204	}
 205
 206	free_pgpaths(&pg->pgpaths, ti);
 207	kfree(pg);
 208}
 209
 210static struct multipath *alloc_multipath(struct dm_target *ti)
 211{
 212	struct multipath *m;
 213
 214	m = kzalloc(sizeof(*m), GFP_KERNEL);
 215	if (m) {
 216		INIT_LIST_HEAD(&m->priority_groups);
 217		spin_lock_init(&m->lock);
 218		atomic_set(&m->nr_valid_paths, 0);
 219		INIT_WORK(&m->trigger_event, trigger_event);
 220		mutex_init(&m->work_mutex);
 221
 222		m->queue_mode = DM_TYPE_NONE;
 223
 224		m->ti = ti;
 225		ti->private = m;
 226
 227		timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
 228	}
 229
 230	return m;
 231}
 232
 233static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
 234{
 235	if (m->queue_mode == DM_TYPE_NONE) {
 236		m->queue_mode = DM_TYPE_REQUEST_BASED;
 237	} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
 238		INIT_WORK(&m->process_queued_bios, process_queued_bios);
 239		/*
 240		 * bio-based doesn't support any direct scsi_dh management;
 241		 * it just discovers if a scsi_dh is attached.
 242		 */
 243		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 244	}
 245
 246	dm_table_set_type(ti->table, m->queue_mode);
 247
 248	/*
 249	 * Init fields that are only used when a scsi_dh is attached
 250	 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
 251	 */
 252	set_bit(MPATHF_QUEUE_IO, &m->flags);
 253	atomic_set(&m->pg_init_in_progress, 0);
 254	atomic_set(&m->pg_init_count, 0);
 255	m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 256	init_waitqueue_head(&m->pg_init_wait);
 257
 258	return 0;
 259}
 260
 261static void free_multipath(struct multipath *m)
 262{
 263	struct priority_group *pg, *tmp;
 264
 265	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 266		list_del(&pg->list);
 267		free_priority_group(pg, m->ti);
 268	}
 269
 270	kfree(m->hw_handler_name);
 271	kfree(m->hw_handler_params);
 272	mutex_destroy(&m->work_mutex);
 273	kfree(m);
 274}
 275
 276static struct dm_mpath_io *get_mpio(union map_info *info)
 277{
 278	return info->ptr;
 279}
 280
 281static size_t multipath_per_bio_data_size(void)
 282{
 283	return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
 284}
 285
 286static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
 287{
 288	return dm_per_bio_data(bio, multipath_per_bio_data_size());
 289}
 290
 291static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
 292{
 293	/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
 294	void *bio_details = mpio + 1;
 295	return bio_details;
 296}
 297
 298static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
 299{
 300	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 301	struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
 302
 303	mpio->nr_bytes = bio->bi_iter.bi_size;
 304	mpio->pgpath = NULL;
 305	mpio->start_time_ns = 0;
 306	*mpio_p = mpio;
 307
 308	dm_bio_record(bio_details, bio);
 309}
 310
 311/*
 312 *-----------------------------------------------
 313 * Path selection
 314 *-----------------------------------------------
 315 */
 316static int __pg_init_all_paths(struct multipath *m)
 317{
 318	struct pgpath *pgpath;
 319	unsigned long pg_init_delay = 0;
 320
 321	lockdep_assert_held(&m->lock);
 322
 323	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 324		return 0;
 325
 326	atomic_inc(&m->pg_init_count);
 327	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 328
 329	/* Check here to reset pg_init_required */
 330	if (!m->current_pg)
 331		return 0;
 332
 333	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
 334		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 335						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 336	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 337		/* Skip failed paths */
 338		if (!pgpath->is_active)
 339			continue;
 340		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 341				       pg_init_delay))
 342			atomic_inc(&m->pg_init_in_progress);
 343	}
 344	return atomic_read(&m->pg_init_in_progress);
 345}
 346
 347static int pg_init_all_paths(struct multipath *m)
 348{
 349	int ret;
 350	unsigned long flags;
 351
 352	spin_lock_irqsave(&m->lock, flags);
 353	ret = __pg_init_all_paths(m);
 354	spin_unlock_irqrestore(&m->lock, flags);
 355
 356	return ret;
 357}
 358
 359static void __switch_pg(struct multipath *m, struct priority_group *pg)
 360{
 361	lockdep_assert_held(&m->lock);
 362
 363	m->current_pg = pg;
 364
 365	/* Must we initialise the PG first, and queue I/O till it's ready? */
 366	if (m->hw_handler_name) {
 367		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 368		set_bit(MPATHF_QUEUE_IO, &m->flags);
 369	} else {
 370		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 371		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 372	}
 373
 374	atomic_set(&m->pg_init_count, 0);
 375}
 376
 377static struct pgpath *choose_path_in_pg(struct multipath *m,
 378					struct priority_group *pg,
 379					size_t nr_bytes)
 380{
 381	unsigned long flags;
 382	struct dm_path *path;
 383	struct pgpath *pgpath;
 384
 385	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 386	if (!path)
 387		return ERR_PTR(-ENXIO);
 388
 389	pgpath = path_to_pgpath(path);
 390
 391	if (unlikely(READ_ONCE(m->current_pg) != pg)) {
 392		/* Only update current_pgpath if pg changed */
 393		spin_lock_irqsave(&m->lock, flags);
 394		m->current_pgpath = pgpath;
 395		__switch_pg(m, pg);
 396		spin_unlock_irqrestore(&m->lock, flags);
 397	}
 398
 399	return pgpath;
 400}
 401
 402static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 403{
 404	unsigned long flags;
 405	struct priority_group *pg;
 406	struct pgpath *pgpath;
 407	unsigned int bypassed = 1;
 408
 409	if (!atomic_read(&m->nr_valid_paths)) {
 410		spin_lock_irqsave(&m->lock, flags);
 411		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 412		spin_unlock_irqrestore(&m->lock, flags);
 413		goto failed;
 414	}
 415
 416	/* Were we instructed to switch PG? */
 417	if (READ_ONCE(m->next_pg)) {
 418		spin_lock_irqsave(&m->lock, flags);
 419		pg = m->next_pg;
 420		if (!pg) {
 421			spin_unlock_irqrestore(&m->lock, flags);
 422			goto check_current_pg;
 423		}
 424		m->next_pg = NULL;
 425		spin_unlock_irqrestore(&m->lock, flags);
 426		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 427		if (!IS_ERR_OR_NULL(pgpath))
 428			return pgpath;
 429	}
 430
 431	/* Don't change PG until it has no remaining paths */
 432check_current_pg:
 433	pg = READ_ONCE(m->current_pg);
 434	if (pg) {
 435		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 436		if (!IS_ERR_OR_NULL(pgpath))
 437			return pgpath;
 438	}
 439
 440	/*
 441	 * Loop through priority groups until we find a valid path.
 442	 * First time we skip PGs marked 'bypassed'.
 443	 * Second time we only try the ones we skipped, but set
 444	 * pg_init_delay_retry so we do not hammer controllers.
 445	 */
 446	do {
 447		list_for_each_entry(pg, &m->priority_groups, list) {
 448			if (pg->bypassed == !!bypassed)
 449				continue;
 450			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 451			if (!IS_ERR_OR_NULL(pgpath)) {
 452				if (!bypassed) {
 453					spin_lock_irqsave(&m->lock, flags);
 454					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 455					spin_unlock_irqrestore(&m->lock, flags);
 456				}
 457				return pgpath;
 458			}
 459		}
 460	} while (bypassed--);
 461
 462failed:
 463	spin_lock_irqsave(&m->lock, flags);
 464	m->current_pgpath = NULL;
 465	m->current_pg = NULL;
 466	spin_unlock_irqrestore(&m->lock, flags);
 467
 468	return NULL;
 469}
 470
 471/*
 472 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
 473 * report the function name and line number of the function from which
 474 * it has been invoked.
 475 */
 476#define dm_report_EIO(m)						\
 
 477	DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
 478		      dm_table_device_name((m)->ti->table),		\
 479		      test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags),	\
 480		      test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
 481		      dm_noflush_suspending((m)->ti))
 
 482
 483/*
 484 * Check whether bios must be queued in the device-mapper core rather
 485 * than here in the target.
 486 */
 487static bool __must_push_back(struct multipath *m)
 488{
 489	return dm_noflush_suspending(m->ti);
 490}
 491
 492static bool must_push_back_rq(struct multipath *m)
 493{
 494	unsigned long flags;
 495	bool ret;
 496
 497	spin_lock_irqsave(&m->lock, flags);
 498	ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m));
 499	spin_unlock_irqrestore(&m->lock, flags);
 500
 501	return ret;
 502}
 503
 504/*
 505 * Map cloned requests (request-based multipath)
 506 */
 507static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 508				   union map_info *map_context,
 509				   struct request **__clone)
 510{
 511	struct multipath *m = ti->private;
 512	size_t nr_bytes = blk_rq_bytes(rq);
 513	struct pgpath *pgpath;
 514	struct block_device *bdev;
 515	struct dm_mpath_io *mpio = get_mpio(map_context);
 516	struct request_queue *q;
 517	struct request *clone;
 518
 519	/* Do we need to select a new pgpath? */
 520	pgpath = READ_ONCE(m->current_pgpath);
 521	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
 522		pgpath = choose_pgpath(m, nr_bytes);
 523
 524	if (!pgpath) {
 525		if (must_push_back_rq(m))
 526			return DM_MAPIO_DELAY_REQUEUE;
 527		dm_report_EIO(m);	/* Failed */
 528		return DM_MAPIO_KILL;
 529	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
 530		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
 531		pg_init_all_paths(m);
 532		return DM_MAPIO_DELAY_REQUEUE;
 533	}
 534
 535	mpio->pgpath = pgpath;
 536	mpio->nr_bytes = nr_bytes;
 537
 538	bdev = pgpath->path.dev->bdev;
 539	q = bdev_get_queue(bdev);
 540	clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
 541			BLK_MQ_REQ_NOWAIT);
 542	if (IS_ERR(clone)) {
 543		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
 544		if (blk_queue_dying(q)) {
 545			atomic_inc(&m->pg_init_in_progress);
 546			activate_or_offline_path(pgpath);
 547			return DM_MAPIO_DELAY_REQUEUE;
 548		}
 549
 550		/*
 551		 * blk-mq's SCHED_RESTART can cover this requeue, so we
 552		 * needn't deal with it by DELAY_REQUEUE. More importantly,
 553		 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
 554		 * get the queue busy feedback (via BLK_STS_RESOURCE),
 555		 * otherwise I/O merging can suffer.
 556		 */
 557		return DM_MAPIO_REQUEUE;
 558	}
 559	clone->bio = clone->biotail = NULL;
 560	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 561	*__clone = clone;
 562
 563	if (pgpath->pg->ps.type->start_io)
 564		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 565					      &pgpath->path,
 566					      nr_bytes);
 567	return DM_MAPIO_REMAPPED;
 568}
 569
 570static void multipath_release_clone(struct request *clone,
 571				    union map_info *map_context)
 572{
 573	if (unlikely(map_context)) {
 574		/*
 575		 * non-NULL map_context means caller is still map
 576		 * method; must undo multipath_clone_and_map()
 577		 */
 578		struct dm_mpath_io *mpio = get_mpio(map_context);
 579		struct pgpath *pgpath = mpio->pgpath;
 580
 581		if (pgpath && pgpath->pg->ps.type->end_io)
 582			pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
 583						    &pgpath->path,
 584						    mpio->nr_bytes,
 585						    clone->io_start_time_ns);
 586	}
 587
 588	blk_mq_free_request(clone);
 589}
 590
 591/*
 592 * Map cloned bios (bio-based multipath)
 593 */
 594
 595static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
 596{
 597	/* Queue for the daemon to resubmit */
 598	bio_list_add(&m->queued_bios, bio);
 599	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
 600		queue_work(kmultipathd, &m->process_queued_bios);
 601}
 602
 603static void multipath_queue_bio(struct multipath *m, struct bio *bio)
 604{
 605	unsigned long flags;
 606
 607	spin_lock_irqsave(&m->lock, flags);
 608	__multipath_queue_bio(m, bio);
 609	spin_unlock_irqrestore(&m->lock, flags);
 610}
 611
 612static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 613{
 614	struct pgpath *pgpath;
 615	unsigned long flags;
 616
 617	/* Do we need to select a new pgpath? */
 618	pgpath = READ_ONCE(m->current_pgpath);
 619	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
 620		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 621
 622	if (!pgpath) {
 623		spin_lock_irqsave(&m->lock, flags);
 624		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 625			__multipath_queue_bio(m, bio);
 626			pgpath = ERR_PTR(-EAGAIN);
 627		}
 628		spin_unlock_irqrestore(&m->lock, flags);
 629
 630	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
 631		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
 632		multipath_queue_bio(m, bio);
 633		pg_init_all_paths(m);
 634		return ERR_PTR(-EAGAIN);
 635	}
 636
 637	return pgpath;
 638}
 639
 640static int __multipath_map_bio(struct multipath *m, struct bio *bio,
 641			       struct dm_mpath_io *mpio)
 642{
 643	struct pgpath *pgpath = __map_bio(m, bio);
 644
 645	if (IS_ERR(pgpath))
 646		return DM_MAPIO_SUBMITTED;
 647
 648	if (!pgpath) {
 649		if (__must_push_back(m))
 650			return DM_MAPIO_REQUEUE;
 651		dm_report_EIO(m);
 652		return DM_MAPIO_KILL;
 653	}
 654
 655	mpio->pgpath = pgpath;
 656
 657	if (dm_ps_use_hr_timer(pgpath->pg->ps.type))
 658		mpio->start_time_ns = ktime_get_ns();
 659
 660	bio->bi_status = 0;
 661	bio_set_dev(bio, pgpath->path.dev->bdev);
 662	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 663
 664	if (pgpath->pg->ps.type->start_io)
 665		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 666					      &pgpath->path,
 667					      mpio->nr_bytes);
 668	return DM_MAPIO_REMAPPED;
 669}
 670
 671static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
 672{
 673	struct multipath *m = ti->private;
 674	struct dm_mpath_io *mpio = NULL;
 675
 676	multipath_init_per_bio_data(bio, &mpio);
 677	return __multipath_map_bio(m, bio, mpio);
 678}
 679
 680static void process_queued_io_list(struct multipath *m)
 681{
 682	if (m->queue_mode == DM_TYPE_REQUEST_BASED)
 683		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 684	else if (m->queue_mode == DM_TYPE_BIO_BASED)
 685		queue_work(kmultipathd, &m->process_queued_bios);
 686}
 687
 688static void process_queued_bios(struct work_struct *work)
 689{
 690	int r;
 691	unsigned long flags;
 692	struct bio *bio;
 693	struct bio_list bios;
 694	struct blk_plug plug;
 695	struct multipath *m =
 696		container_of(work, struct multipath, process_queued_bios);
 697
 698	bio_list_init(&bios);
 699
 700	spin_lock_irqsave(&m->lock, flags);
 701
 702	if (bio_list_empty(&m->queued_bios)) {
 703		spin_unlock_irqrestore(&m->lock, flags);
 704		return;
 705	}
 706
 707	bio_list_merge(&bios, &m->queued_bios);
 708	bio_list_init(&m->queued_bios);
 709
 710	spin_unlock_irqrestore(&m->lock, flags);
 711
 712	blk_start_plug(&plug);
 713	while ((bio = bio_list_pop(&bios))) {
 714		struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 715
 716		dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
 717		r = __multipath_map_bio(m, bio, mpio);
 718		switch (r) {
 719		case DM_MAPIO_KILL:
 720			bio->bi_status = BLK_STS_IOERR;
 721			bio_endio(bio);
 722			break;
 723		case DM_MAPIO_REQUEUE:
 724			bio->bi_status = BLK_STS_DM_REQUEUE;
 725			bio_endio(bio);
 726			break;
 727		case DM_MAPIO_REMAPPED:
 728			submit_bio_noacct(bio);
 729			break;
 730		case DM_MAPIO_SUBMITTED:
 731			break;
 732		default:
 733			WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
 734		}
 735	}
 736	blk_finish_plug(&plug);
 737}
 738
 739/*
 740 * If we run out of usable paths, should we queue I/O or error it?
 741 */
 742static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path,
 743			    bool save_old_value, const char *caller)
 744{
 745	unsigned long flags;
 746	bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
 747	const char *dm_dev_name = dm_table_device_name(m->ti->table);
 748
 749	DMDEBUG("%s: %s caller=%s f_queue_if_no_path=%d save_old_value=%d",
 750		dm_dev_name, __func__, caller, f_queue_if_no_path, save_old_value);
 751
 752	spin_lock_irqsave(&m->lock, flags);
 753
 754	queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 755	saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 756
 757	if (save_old_value) {
 758		if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
 759			DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
 760			      dm_dev_name);
 761		} else
 762			assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
 763	} else if (!f_queue_if_no_path && saved_queue_if_no_path_bit) {
 764		/* due to "fail_if_no_path" message, need to honor it. */
 765		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 766	}
 767	assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, f_queue_if_no_path);
 768
 769	DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
 770		dm_dev_name, __func__,
 771		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
 772		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
 773		dm_noflush_suspending(m->ti));
 774
 775	spin_unlock_irqrestore(&m->lock, flags);
 776
 777	if (!f_queue_if_no_path) {
 778		dm_table_run_md_queue_async(m->ti->table);
 779		process_queued_io_list(m);
 780	}
 781
 782	return 0;
 783}
 784
 785/*
 786 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
 787 * process any queued I/O.
 788 */
 789static void queue_if_no_path_timeout_work(struct timer_list *t)
 790{
 791	struct multipath *m = from_timer(m, t, nopath_timer);
 792
 793	DMWARN("queue_if_no_path timeout on %s, failing queued IO",
 794	       dm_table_device_name(m->ti->table));
 795	queue_if_no_path(m, false, false, __func__);
 796}
 797
 798/*
 799 * Enable the queue_if_no_path timeout if necessary.
 800 * Called with m->lock held.
 801 */
 802static void enable_nopath_timeout(struct multipath *m)
 803{
 804	unsigned long queue_if_no_path_timeout =
 805		READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
 806
 807	lockdep_assert_held(&m->lock);
 808
 809	if (queue_if_no_path_timeout > 0 &&
 810	    atomic_read(&m->nr_valid_paths) == 0 &&
 811	    test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 812		mod_timer(&m->nopath_timer,
 813			  jiffies + queue_if_no_path_timeout);
 814	}
 815}
 816
 817static void disable_nopath_timeout(struct multipath *m)
 818{
 819	del_timer_sync(&m->nopath_timer);
 820}
 821
 822/*
 823 * An event is triggered whenever a path is taken out of use.
 824 * Includes path failure and PG bypass.
 825 */
 826static void trigger_event(struct work_struct *work)
 827{
 828	struct multipath *m =
 829		container_of(work, struct multipath, trigger_event);
 830
 831	dm_table_event(m->ti->table);
 832}
 833
 834/*
 835 *---------------------------------------------------------------
 836 * Constructor/argument parsing:
 837 * <#multipath feature args> [<arg>]*
 838 * <#hw_handler args> [hw_handler [<arg>]*]
 839 * <#priority groups>
 840 * <initial priority group>
 841 *     [<selector> <#selector args> [<arg>]*
 842 *      <#paths> <#per-path selector args>
 843 *         [<path> [<arg>]* ]+ ]+
 844 *---------------------------------------------------------------
 845 */
 846static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 847			       struct dm_target *ti)
 848{
 849	int r;
 850	struct path_selector_type *pst;
 851	unsigned int ps_argc;
 852
 853	static const struct dm_arg _args[] = {
 854		{0, 1024, "invalid number of path selector args"},
 855	};
 856
 857	pst = dm_get_path_selector(dm_shift_arg(as));
 858	if (!pst) {
 859		ti->error = "unknown path selector type";
 860		return -EINVAL;
 861	}
 862
 863	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 864	if (r) {
 865		dm_put_path_selector(pst);
 866		return -EINVAL;
 867	}
 868
 869	r = pst->create(&pg->ps, ps_argc, as->argv);
 870	if (r) {
 871		dm_put_path_selector(pst);
 872		ti->error = "path selector constructor failed";
 873		return r;
 874	}
 875
 876	pg->ps.type = pst;
 877	dm_consume_args(as, ps_argc);
 878
 879	return 0;
 880}
 881
 882static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
 883			 const char **attached_handler_name, char **error)
 884{
 885	struct request_queue *q = bdev_get_queue(bdev);
 886	int r;
 887
 888	if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
 889retain:
 890		if (*attached_handler_name) {
 891			/*
 892			 * Clear any hw_handler_params associated with a
 893			 * handler that isn't already attached.
 894			 */
 895			if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
 896				kfree(m->hw_handler_params);
 897				m->hw_handler_params = NULL;
 898			}
 899
 900			/*
 901			 * Reset hw_handler_name to match the attached handler
 902			 *
 903			 * NB. This modifies the table line to show the actual
 904			 * handler instead of the original table passed in.
 905			 */
 906			kfree(m->hw_handler_name);
 907			m->hw_handler_name = *attached_handler_name;
 908			*attached_handler_name = NULL;
 909		}
 910	}
 911
 912	if (m->hw_handler_name) {
 913		r = scsi_dh_attach(q, m->hw_handler_name);
 914		if (r == -EBUSY) {
 915			DMINFO("retaining handler on device %pg", bdev);
 916			goto retain;
 917		}
 918		if (r < 0) {
 919			*error = "error attaching hardware handler";
 920			return r;
 921		}
 922
 923		if (m->hw_handler_params) {
 924			r = scsi_dh_set_params(q, m->hw_handler_params);
 925			if (r < 0) {
 926				*error = "unable to set hardware handler parameters";
 927				return r;
 928			}
 929		}
 930	}
 931
 932	return 0;
 933}
 934
 935static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 936				 struct dm_target *ti)
 937{
 938	int r;
 939	struct pgpath *p;
 940	struct multipath *m = ti->private;
 941	struct request_queue *q;
 942	const char *attached_handler_name = NULL;
 943
 944	/* we need at least a path arg */
 945	if (as->argc < 1) {
 946		ti->error = "no device given";
 947		return ERR_PTR(-EINVAL);
 948	}
 949
 950	p = alloc_pgpath();
 951	if (!p)
 952		return ERR_PTR(-ENOMEM);
 953
 954	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 955			  &p->path.dev);
 956	if (r) {
 957		ti->error = "error getting device";
 958		goto bad;
 959	}
 960
 961	q = bdev_get_queue(p->path.dev->bdev);
 962	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 963	if (attached_handler_name || m->hw_handler_name) {
 964		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 965		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
 966		kfree(attached_handler_name);
 967		if (r) {
 968			dm_put_device(ti, p->path.dev);
 969			goto bad;
 970		}
 971	}
 972
 973	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 974	if (r) {
 975		dm_put_device(ti, p->path.dev);
 976		goto bad;
 977	}
 978
 979	return p;
 980 bad:
 981	free_pgpath(p);
 982	return ERR_PTR(r);
 983}
 984
 985static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 986						   struct multipath *m)
 987{
 988	static const struct dm_arg _args[] = {
 989		{1, 1024, "invalid number of paths"},
 990		{0, 1024, "invalid number of selector args"}
 991	};
 992
 993	int r;
 994	unsigned int i, nr_selector_args, nr_args;
 995	struct priority_group *pg;
 996	struct dm_target *ti = m->ti;
 997
 998	if (as->argc < 2) {
 999		as->argc = 0;
1000		ti->error = "not enough priority group arguments";
1001		return ERR_PTR(-EINVAL);
1002	}
1003
1004	pg = alloc_priority_group();
1005	if (!pg) {
1006		ti->error = "couldn't allocate priority group";
1007		return ERR_PTR(-ENOMEM);
1008	}
1009	pg->m = m;
1010
1011	r = parse_path_selector(as, pg, ti);
1012	if (r)
1013		goto bad;
1014
1015	/*
1016	 * read the paths
1017	 */
1018	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1019	if (r)
1020		goto bad;
1021
1022	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1023	if (r)
1024		goto bad;
1025
1026	nr_args = 1 + nr_selector_args;
1027	for (i = 0; i < pg->nr_pgpaths; i++) {
1028		struct pgpath *pgpath;
1029		struct dm_arg_set path_args;
1030
1031		if (as->argc < nr_args) {
1032			ti->error = "not enough path parameters";
1033			r = -EINVAL;
1034			goto bad;
1035		}
1036
1037		path_args.argc = nr_args;
1038		path_args.argv = as->argv;
1039
1040		pgpath = parse_path(&path_args, &pg->ps, ti);
1041		if (IS_ERR(pgpath)) {
1042			r = PTR_ERR(pgpath);
1043			goto bad;
1044		}
1045
1046		pgpath->pg = pg;
1047		list_add_tail(&pgpath->list, &pg->pgpaths);
1048		dm_consume_args(as, nr_args);
1049	}
1050
1051	return pg;
1052
1053 bad:
1054	free_priority_group(pg, ti);
1055	return ERR_PTR(r);
1056}
1057
1058static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1059{
1060	unsigned int hw_argc;
1061	int ret;
1062	struct dm_target *ti = m->ti;
1063
1064	static const struct dm_arg _args[] = {
1065		{0, 1024, "invalid number of hardware handler args"},
1066	};
1067
1068	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1069		return -EINVAL;
1070
1071	if (!hw_argc)
1072		return 0;
1073
1074	if (m->queue_mode == DM_TYPE_BIO_BASED) {
1075		dm_consume_args(as, hw_argc);
1076		DMERR("bio-based multipath doesn't allow hardware handler args");
1077		return 0;
1078	}
1079
1080	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1081	if (!m->hw_handler_name)
1082		return -EINVAL;
1083
1084	if (hw_argc > 1) {
1085		char *p;
1086		int i, j, len = 4;
1087
1088		for (i = 0; i <= hw_argc - 2; i++)
1089			len += strlen(as->argv[i]) + 1;
1090		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1091		if (!p) {
1092			ti->error = "memory allocation failed";
1093			ret = -ENOMEM;
1094			goto fail;
1095		}
1096		j = sprintf(p, "%d", hw_argc - 1);
1097		for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1)
1098			j = sprintf(p, "%s", as->argv[i]);
1099	}
1100	dm_consume_args(as, hw_argc - 1);
1101
1102	return 0;
1103fail:
1104	kfree(m->hw_handler_name);
1105	m->hw_handler_name = NULL;
1106	return ret;
1107}
1108
1109static int parse_features(struct dm_arg_set *as, struct multipath *m)
1110{
1111	int r;
1112	unsigned int argc;
1113	struct dm_target *ti = m->ti;
1114	const char *arg_name;
1115
1116	static const struct dm_arg _args[] = {
1117		{0, 8, "invalid number of feature args"},
1118		{1, 50, "pg_init_retries must be between 1 and 50"},
1119		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1120	};
1121
1122	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1123	if (r)
1124		return -EINVAL;
1125
1126	if (!argc)
1127		return 0;
1128
1129	do {
1130		arg_name = dm_shift_arg(as);
1131		argc--;
1132
1133		if (!strcasecmp(arg_name, "queue_if_no_path")) {
1134			r = queue_if_no_path(m, true, false, __func__);
1135			continue;
1136		}
1137
1138		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1139			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1140			continue;
1141		}
1142
1143		if (!strcasecmp(arg_name, "pg_init_retries") &&
1144		    (argc >= 1)) {
1145			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1146			argc--;
1147			continue;
1148		}
1149
1150		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1151		    (argc >= 1)) {
1152			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1153			argc--;
1154			continue;
1155		}
1156
1157		if (!strcasecmp(arg_name, "queue_mode") &&
1158		    (argc >= 1)) {
1159			const char *queue_mode_name = dm_shift_arg(as);
1160
1161			if (!strcasecmp(queue_mode_name, "bio"))
1162				m->queue_mode = DM_TYPE_BIO_BASED;
1163			else if (!strcasecmp(queue_mode_name, "rq") ||
1164				 !strcasecmp(queue_mode_name, "mq"))
1165				m->queue_mode = DM_TYPE_REQUEST_BASED;
1166			else {
1167				ti->error = "Unknown 'queue_mode' requested";
1168				r = -EINVAL;
1169			}
1170			argc--;
1171			continue;
1172		}
1173
1174		ti->error = "Unrecognised multipath feature request";
1175		r = -EINVAL;
1176	} while (argc && !r);
1177
1178	return r;
1179}
1180
1181static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1182{
1183	/* target arguments */
1184	static const struct dm_arg _args[] = {
1185		{0, 1024, "invalid number of priority groups"},
1186		{0, 1024, "invalid initial priority group number"},
1187	};
1188
1189	int r;
1190	struct multipath *m;
1191	struct dm_arg_set as;
1192	unsigned int pg_count = 0;
1193	unsigned int next_pg_num;
1194	unsigned long flags;
1195
1196	as.argc = argc;
1197	as.argv = argv;
1198
1199	m = alloc_multipath(ti);
1200	if (!m) {
1201		ti->error = "can't allocate multipath";
1202		return -EINVAL;
1203	}
1204
1205	r = parse_features(&as, m);
1206	if (r)
1207		goto bad;
1208
1209	r = alloc_multipath_stage2(ti, m);
1210	if (r)
1211		goto bad;
1212
1213	r = parse_hw_handler(&as, m);
1214	if (r)
1215		goto bad;
1216
1217	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1218	if (r)
1219		goto bad;
1220
1221	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1222	if (r)
1223		goto bad;
1224
1225	if ((!m->nr_priority_groups && next_pg_num) ||
1226	    (m->nr_priority_groups && !next_pg_num)) {
1227		ti->error = "invalid initial priority group";
1228		r = -EINVAL;
1229		goto bad;
1230	}
1231
1232	/* parse the priority groups */
1233	while (as.argc) {
1234		struct priority_group *pg;
1235		unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
1236
1237		pg = parse_priority_group(&as, m);
1238		if (IS_ERR(pg)) {
1239			r = PTR_ERR(pg);
1240			goto bad;
1241		}
1242
1243		nr_valid_paths += pg->nr_pgpaths;
1244		atomic_set(&m->nr_valid_paths, nr_valid_paths);
1245
1246		list_add_tail(&pg->list, &m->priority_groups);
1247		pg_count++;
1248		pg->pg_num = pg_count;
1249		if (!--next_pg_num)
1250			m->next_pg = pg;
1251	}
1252
1253	if (pg_count != m->nr_priority_groups) {
1254		ti->error = "priority group count mismatch";
1255		r = -EINVAL;
1256		goto bad;
1257	}
1258
1259	spin_lock_irqsave(&m->lock, flags);
1260	enable_nopath_timeout(m);
1261	spin_unlock_irqrestore(&m->lock, flags);
1262
1263	ti->num_flush_bios = 1;
1264	ti->num_discard_bios = 1;
1265	ti->num_write_zeroes_bios = 1;
1266	if (m->queue_mode == DM_TYPE_BIO_BASED)
1267		ti->per_io_data_size = multipath_per_bio_data_size();
1268	else
1269		ti->per_io_data_size = sizeof(struct dm_mpath_io);
1270
1271	return 0;
1272
1273 bad:
1274	free_multipath(m);
1275	return r;
1276}
1277
1278static void multipath_wait_for_pg_init_completion(struct multipath *m)
1279{
1280	DEFINE_WAIT(wait);
1281
1282	while (1) {
1283		prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1284
1285		if (!atomic_read(&m->pg_init_in_progress))
1286			break;
1287
1288		io_schedule();
1289	}
1290	finish_wait(&m->pg_init_wait, &wait);
1291}
1292
1293static void flush_multipath_work(struct multipath *m)
1294{
1295	if (m->hw_handler_name) {
1296		unsigned long flags;
1297
1298		if (!atomic_read(&m->pg_init_in_progress))
1299			goto skip;
1300
1301		spin_lock_irqsave(&m->lock, flags);
1302		if (atomic_read(&m->pg_init_in_progress) &&
1303		    !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
1304			spin_unlock_irqrestore(&m->lock, flags);
1305
1306			flush_workqueue(kmpath_handlerd);
1307			multipath_wait_for_pg_init_completion(m);
1308
1309			spin_lock_irqsave(&m->lock, flags);
1310			clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1311		}
1312		spin_unlock_irqrestore(&m->lock, flags);
1313	}
1314skip:
1315	if (m->queue_mode == DM_TYPE_BIO_BASED)
1316		flush_work(&m->process_queued_bios);
1317	flush_work(&m->trigger_event);
1318}
1319
1320static void multipath_dtr(struct dm_target *ti)
1321{
1322	struct multipath *m = ti->private;
1323
1324	disable_nopath_timeout(m);
1325	flush_multipath_work(m);
1326	free_multipath(m);
1327}
1328
1329/*
1330 * Take a path out of use.
1331 */
1332static int fail_path(struct pgpath *pgpath)
1333{
1334	unsigned long flags;
1335	struct multipath *m = pgpath->pg->m;
1336
1337	spin_lock_irqsave(&m->lock, flags);
1338
1339	if (!pgpath->is_active)
1340		goto out;
1341
1342	DMWARN("%s: Failing path %s.",
1343	       dm_table_device_name(m->ti->table),
1344	       pgpath->path.dev->name);
1345
1346	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1347	pgpath->is_active = false;
1348	pgpath->fail_count++;
1349
1350	atomic_dec(&m->nr_valid_paths);
1351
1352	if (pgpath == m->current_pgpath)
1353		m->current_pgpath = NULL;
1354
1355	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1356		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1357
1358	queue_work(dm_mpath_wq, &m->trigger_event);
1359
1360	enable_nopath_timeout(m);
1361
1362out:
1363	spin_unlock_irqrestore(&m->lock, flags);
1364
1365	return 0;
1366}
1367
1368/*
1369 * Reinstate a previously-failed path
1370 */
1371static int reinstate_path(struct pgpath *pgpath)
1372{
1373	int r = 0, run_queue = 0;
1374	unsigned long flags;
1375	struct multipath *m = pgpath->pg->m;
1376	unsigned int nr_valid_paths;
1377
1378	spin_lock_irqsave(&m->lock, flags);
1379
1380	if (pgpath->is_active)
1381		goto out;
1382
1383	DMWARN("%s: Reinstating path %s.",
1384	       dm_table_device_name(m->ti->table),
1385	       pgpath->path.dev->name);
1386
1387	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1388	if (r)
1389		goto out;
1390
1391	pgpath->is_active = true;
1392
1393	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1394	if (nr_valid_paths == 1) {
1395		m->current_pgpath = NULL;
1396		run_queue = 1;
1397	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1398		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1399			atomic_inc(&m->pg_init_in_progress);
1400	}
1401
1402	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1403		       pgpath->path.dev->name, nr_valid_paths);
1404
1405	schedule_work(&m->trigger_event);
1406
1407out:
1408	spin_unlock_irqrestore(&m->lock, flags);
1409	if (run_queue) {
1410		dm_table_run_md_queue_async(m->ti->table);
1411		process_queued_io_list(m);
1412	}
1413
1414	if (pgpath->is_active)
1415		disable_nopath_timeout(m);
1416
1417	return r;
1418}
1419
1420/*
1421 * Fail or reinstate all paths that match the provided struct dm_dev.
1422 */
1423static int action_dev(struct multipath *m, struct dm_dev *dev,
1424		      action_fn action)
1425{
1426	int r = -EINVAL;
1427	struct pgpath *pgpath;
1428	struct priority_group *pg;
1429
1430	list_for_each_entry(pg, &m->priority_groups, list) {
1431		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1432			if (pgpath->path.dev == dev)
1433				r = action(pgpath);
1434		}
1435	}
1436
1437	return r;
1438}
1439
1440/*
1441 * Temporarily try to avoid having to use the specified PG
1442 */
1443static void bypass_pg(struct multipath *m, struct priority_group *pg,
1444		      bool bypassed)
1445{
1446	unsigned long flags;
1447
1448	spin_lock_irqsave(&m->lock, flags);
1449
1450	pg->bypassed = bypassed;
1451	m->current_pgpath = NULL;
1452	m->current_pg = NULL;
1453
1454	spin_unlock_irqrestore(&m->lock, flags);
1455
1456	schedule_work(&m->trigger_event);
1457}
1458
1459/*
1460 * Switch to using the specified PG from the next I/O that gets mapped
1461 */
1462static int switch_pg_num(struct multipath *m, const char *pgstr)
1463{
1464	struct priority_group *pg;
1465	unsigned int pgnum;
1466	unsigned long flags;
1467	char dummy;
1468
1469	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1470	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1471		DMWARN("invalid PG number supplied to %s", __func__);
1472		return -EINVAL;
1473	}
1474
1475	spin_lock_irqsave(&m->lock, flags);
1476	list_for_each_entry(pg, &m->priority_groups, list) {
1477		pg->bypassed = false;
1478		if (--pgnum)
1479			continue;
1480
1481		m->current_pgpath = NULL;
1482		m->current_pg = NULL;
1483		m->next_pg = pg;
1484	}
1485	spin_unlock_irqrestore(&m->lock, flags);
1486
1487	schedule_work(&m->trigger_event);
1488	return 0;
1489}
1490
1491/*
1492 * Set/clear bypassed status of a PG.
1493 * PGs are numbered upwards from 1 in the order they were declared.
1494 */
1495static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1496{
1497	struct priority_group *pg;
1498	unsigned int pgnum;
1499	char dummy;
1500
1501	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1502	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1503		DMWARN("invalid PG number supplied to bypass_pg");
1504		return -EINVAL;
1505	}
1506
1507	list_for_each_entry(pg, &m->priority_groups, list) {
1508		if (!--pgnum)
1509			break;
1510	}
1511
1512	bypass_pg(m, pg, bypassed);
1513	return 0;
1514}
1515
1516/*
1517 * Should we retry pg_init immediately?
1518 */
1519static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1520{
1521	unsigned long flags;
1522	bool limit_reached = false;
1523
1524	spin_lock_irqsave(&m->lock, flags);
1525
1526	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1527	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1528		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1529	else
1530		limit_reached = true;
1531
1532	spin_unlock_irqrestore(&m->lock, flags);
1533
1534	return limit_reached;
1535}
1536
1537static void pg_init_done(void *data, int errors)
1538{
1539	struct pgpath *pgpath = data;
1540	struct priority_group *pg = pgpath->pg;
1541	struct multipath *m = pg->m;
1542	unsigned long flags;
1543	bool delay_retry = false;
1544
1545	/* device or driver problems */
1546	switch (errors) {
1547	case SCSI_DH_OK:
1548		break;
1549	case SCSI_DH_NOSYS:
1550		if (!m->hw_handler_name) {
1551			errors = 0;
1552			break;
1553		}
1554		DMERR("Could not failover the device: Handler scsi_dh_%s "
1555		      "Error %d.", m->hw_handler_name, errors);
1556		/*
1557		 * Fail path for now, so we do not ping pong
1558		 */
1559		fail_path(pgpath);
1560		break;
1561	case SCSI_DH_DEV_TEMP_BUSY:
1562		/*
1563		 * Probably doing something like FW upgrade on the
1564		 * controller so try the other pg.
1565		 */
1566		bypass_pg(m, pg, true);
1567		break;
1568	case SCSI_DH_RETRY:
1569		/* Wait before retrying. */
1570		delay_retry = true;
1571		fallthrough;
1572	case SCSI_DH_IMM_RETRY:
1573	case SCSI_DH_RES_TEMP_UNAVAIL:
1574		if (pg_init_limit_reached(m, pgpath))
1575			fail_path(pgpath);
1576		errors = 0;
1577		break;
1578	case SCSI_DH_DEV_OFFLINED:
1579	default:
1580		/*
1581		 * We probably do not want to fail the path for a device
1582		 * error, but this is what the old dm did. In future
1583		 * patches we can do more advanced handling.
1584		 */
1585		fail_path(pgpath);
1586	}
1587
1588	spin_lock_irqsave(&m->lock, flags);
1589	if (errors) {
1590		if (pgpath == m->current_pgpath) {
1591			DMERR("Could not failover device. Error %d.", errors);
1592			m->current_pgpath = NULL;
1593			m->current_pg = NULL;
1594		}
1595	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1596		pg->bypassed = false;
1597
1598	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1599		/* Activations of other paths are still on going */
1600		goto out;
1601
1602	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1603		if (delay_retry)
1604			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1605		else
1606			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1607
1608		if (__pg_init_all_paths(m))
1609			goto out;
1610	}
1611	clear_bit(MPATHF_QUEUE_IO, &m->flags);
1612
1613	process_queued_io_list(m);
1614
1615	/*
1616	 * Wake up any thread waiting to suspend.
1617	 */
1618	wake_up(&m->pg_init_wait);
1619
1620out:
1621	spin_unlock_irqrestore(&m->lock, flags);
1622}
1623
1624static void activate_or_offline_path(struct pgpath *pgpath)
1625{
1626	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1627
1628	if (pgpath->is_active && !blk_queue_dying(q))
1629		scsi_dh_activate(q, pg_init_done, pgpath);
1630	else
1631		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1632}
1633
1634static void activate_path_work(struct work_struct *work)
1635{
1636	struct pgpath *pgpath =
1637		container_of(work, struct pgpath, activate_path.work);
1638
1639	activate_or_offline_path(pgpath);
1640}
1641
1642static int multipath_end_io(struct dm_target *ti, struct request *clone,
1643			    blk_status_t error, union map_info *map_context)
1644{
1645	struct dm_mpath_io *mpio = get_mpio(map_context);
1646	struct pgpath *pgpath = mpio->pgpath;
1647	int r = DM_ENDIO_DONE;
1648
1649	/*
1650	 * We don't queue any clone request inside the multipath target
1651	 * during end I/O handling, since those clone requests don't have
1652	 * bio clones.  If we queue them inside the multipath target,
1653	 * we need to make bio clones, that requires memory allocation.
1654	 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1655	 *  don't have bio clones.)
1656	 * Instead of queueing the clone request here, we queue the original
1657	 * request into dm core, which will remake a clone request and
1658	 * clone bios for it and resubmit it later.
1659	 */
1660	if (error && blk_path_error(error)) {
1661		struct multipath *m = ti->private;
1662
1663		if (error == BLK_STS_RESOURCE)
1664			r = DM_ENDIO_DELAY_REQUEUE;
1665		else
1666			r = DM_ENDIO_REQUEUE;
1667
1668		if (pgpath)
1669			fail_path(pgpath);
1670
1671		if (!atomic_read(&m->nr_valid_paths) &&
1672		    !must_push_back_rq(m)) {
1673			if (error == BLK_STS_IOERR)
1674				dm_report_EIO(m);
1675			/* complete with the original error */
1676			r = DM_ENDIO_DONE;
1677		}
1678	}
1679
1680	if (pgpath) {
1681		struct path_selector *ps = &pgpath->pg->ps;
1682
1683		if (ps->type->end_io)
1684			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1685					 clone->io_start_time_ns);
1686	}
1687
1688	return r;
1689}
1690
1691static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1692				blk_status_t *error)
1693{
1694	struct multipath *m = ti->private;
1695	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1696	struct pgpath *pgpath = mpio->pgpath;
1697	unsigned long flags;
1698	int r = DM_ENDIO_DONE;
1699
1700	if (!*error || !blk_path_error(*error))
1701		goto done;
1702
1703	if (pgpath)
1704		fail_path(pgpath);
1705
1706	if (!atomic_read(&m->nr_valid_paths)) {
1707		spin_lock_irqsave(&m->lock, flags);
1708		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1709			if (__must_push_back(m)) {
1710				r = DM_ENDIO_REQUEUE;
1711			} else {
1712				dm_report_EIO(m);
1713				*error = BLK_STS_IOERR;
1714			}
1715			spin_unlock_irqrestore(&m->lock, flags);
1716			goto done;
1717		}
1718		spin_unlock_irqrestore(&m->lock, flags);
1719	}
1720
1721	multipath_queue_bio(m, clone);
1722	r = DM_ENDIO_INCOMPLETE;
1723done:
1724	if (pgpath) {
1725		struct path_selector *ps = &pgpath->pg->ps;
1726
1727		if (ps->type->end_io)
1728			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1729					 (mpio->start_time_ns ?:
1730					  dm_start_time_ns_from_clone(clone)));
1731	}
1732
1733	return r;
1734}
1735
1736/*
1737 * Suspend with flush can't complete until all the I/O is processed
1738 * so if the last path fails we must error any remaining I/O.
1739 * - Note that if the freeze_bdev fails while suspending, the
1740 *   queue_if_no_path state is lost - userspace should reset it.
1741 * Otherwise, during noflush suspend, queue_if_no_path will not change.
1742 */
1743static void multipath_presuspend(struct dm_target *ti)
1744{
1745	struct multipath *m = ti->private;
1746
1747	/* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
1748	if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
1749		queue_if_no_path(m, false, true, __func__);
1750}
1751
1752static void multipath_postsuspend(struct dm_target *ti)
1753{
1754	struct multipath *m = ti->private;
1755
1756	mutex_lock(&m->work_mutex);
1757	flush_multipath_work(m);
1758	mutex_unlock(&m->work_mutex);
1759}
1760
1761/*
1762 * Restore the queue_if_no_path setting.
1763 */
1764static void multipath_resume(struct dm_target *ti)
1765{
1766	struct multipath *m = ti->private;
1767	unsigned long flags;
1768
1769	spin_lock_irqsave(&m->lock, flags);
1770	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
1771		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1772		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
1773	}
1774
1775	DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
1776		dm_table_device_name(m->ti->table), __func__,
1777		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
1778		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1779
1780	spin_unlock_irqrestore(&m->lock, flags);
1781}
1782
1783/*
1784 * Info output has the following format:
1785 * num_multipath_feature_args [multipath_feature_args]*
1786 * num_handler_status_args [handler_status_args]*
1787 * num_groups init_group_number
1788 *            [A|D|E num_ps_status_args [ps_status_args]*
1789 *             num_paths num_selector_args
1790 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1791 *
1792 * Table output has the following format (identical to the constructor string):
1793 * num_feature_args [features_args]*
1794 * num_handler_args hw_handler [hw_handler_args]*
1795 * num_groups init_group_number
1796 *     [priority selector-name num_ps_args [ps_args]*
1797 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1798 */
1799static void multipath_status(struct dm_target *ti, status_type_t type,
1800			     unsigned int status_flags, char *result, unsigned int maxlen)
1801{
1802	int sz = 0, pg_counter, pgpath_counter;
1803	unsigned long flags;
1804	struct multipath *m = ti->private;
1805	struct priority_group *pg;
1806	struct pgpath *p;
1807	unsigned int pg_num;
1808	char state;
1809
1810	spin_lock_irqsave(&m->lock, flags);
1811
1812	/* Features */
1813	if (type == STATUSTYPE_INFO)
1814		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1815		       atomic_read(&m->pg_init_count));
1816	else {
1817		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1818			      (m->pg_init_retries > 0) * 2 +
1819			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1820			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1821			      (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1822
1823		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1824			DMEMIT("queue_if_no_path ");
1825		if (m->pg_init_retries)
1826			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1827		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1828			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1829		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1830			DMEMIT("retain_attached_hw_handler ");
1831		if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1832			switch (m->queue_mode) {
1833			case DM_TYPE_BIO_BASED:
1834				DMEMIT("queue_mode bio ");
1835				break;
1836			default:
1837				WARN_ON_ONCE(true);
1838				break;
1839			}
1840		}
1841	}
1842
1843	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1844		DMEMIT("0 ");
1845	else
1846		DMEMIT("1 %s ", m->hw_handler_name);
1847
1848	DMEMIT("%u ", m->nr_priority_groups);
1849
1850	if (m->next_pg)
1851		pg_num = m->next_pg->pg_num;
1852	else if (m->current_pg)
1853		pg_num = m->current_pg->pg_num;
1854	else
1855		pg_num = (m->nr_priority_groups ? 1 : 0);
1856
1857	DMEMIT("%u ", pg_num);
1858
1859	switch (type) {
1860	case STATUSTYPE_INFO:
1861		list_for_each_entry(pg, &m->priority_groups, list) {
1862			if (pg->bypassed)
1863				state = 'D';	/* Disabled */
1864			else if (pg == m->current_pg)
1865				state = 'A';	/* Currently Active */
1866			else
1867				state = 'E';	/* Enabled */
1868
1869			DMEMIT("%c ", state);
1870
1871			if (pg->ps.type->status)
1872				sz += pg->ps.type->status(&pg->ps, NULL, type,
1873							  result + sz,
1874							  maxlen - sz);
1875			else
1876				DMEMIT("0 ");
1877
1878			DMEMIT("%u %u ", pg->nr_pgpaths,
1879			       pg->ps.type->info_args);
1880
1881			list_for_each_entry(p, &pg->pgpaths, list) {
1882				DMEMIT("%s %s %u ", p->path.dev->name,
1883				       p->is_active ? "A" : "F",
1884				       p->fail_count);
1885				if (pg->ps.type->status)
1886					sz += pg->ps.type->status(&pg->ps,
1887					      &p->path, type, result + sz,
1888					      maxlen - sz);
1889			}
1890		}
1891		break;
1892
1893	case STATUSTYPE_TABLE:
1894		list_for_each_entry(pg, &m->priority_groups, list) {
1895			DMEMIT("%s ", pg->ps.type->name);
1896
1897			if (pg->ps.type->status)
1898				sz += pg->ps.type->status(&pg->ps, NULL, type,
1899							  result + sz,
1900							  maxlen - sz);
1901			else
1902				DMEMIT("0 ");
1903
1904			DMEMIT("%u %u ", pg->nr_pgpaths,
1905			       pg->ps.type->table_args);
1906
1907			list_for_each_entry(p, &pg->pgpaths, list) {
1908				DMEMIT("%s ", p->path.dev->name);
1909				if (pg->ps.type->status)
1910					sz += pg->ps.type->status(&pg->ps,
1911					      &p->path, type, result + sz,
1912					      maxlen - sz);
1913			}
1914		}
1915		break;
1916
1917	case STATUSTYPE_IMA:
1918		sz = 0; /*reset the result pointer*/
1919
1920		DMEMIT_TARGET_NAME_VERSION(ti->type);
1921		DMEMIT(",nr_priority_groups=%u", m->nr_priority_groups);
1922
1923		pg_counter = 0;
1924		list_for_each_entry(pg, &m->priority_groups, list) {
1925			if (pg->bypassed)
1926				state = 'D';	/* Disabled */
1927			else if (pg == m->current_pg)
1928				state = 'A';	/* Currently Active */
1929			else
1930				state = 'E';	/* Enabled */
1931			DMEMIT(",pg_state_%d=%c", pg_counter, state);
1932			DMEMIT(",nr_pgpaths_%d=%u", pg_counter, pg->nr_pgpaths);
1933			DMEMIT(",path_selector_name_%d=%s", pg_counter, pg->ps.type->name);
1934
1935			pgpath_counter = 0;
1936			list_for_each_entry(p, &pg->pgpaths, list) {
1937				DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u",
1938				       pg_counter, pgpath_counter, p->path.dev->name,
1939				       pg_counter, pgpath_counter, p->is_active ? 'A' : 'F',
1940				       pg_counter, pgpath_counter, p->fail_count);
1941				if (pg->ps.type->status) {
1942					DMEMIT(",path_selector_status_%d_%d=",
1943					       pg_counter, pgpath_counter);
1944					sz += pg->ps.type->status(&pg->ps, &p->path,
1945								  type, result + sz,
1946								  maxlen - sz);
1947				}
1948				pgpath_counter++;
1949			}
1950			pg_counter++;
1951		}
1952		DMEMIT(";");
1953		break;
1954	}
1955
1956	spin_unlock_irqrestore(&m->lock, flags);
1957}
1958
1959static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
1960			     char *result, unsigned int maxlen)
1961{
1962	int r = -EINVAL;
1963	struct dm_dev *dev;
1964	struct multipath *m = ti->private;
1965	action_fn action;
1966	unsigned long flags;
1967
1968	mutex_lock(&m->work_mutex);
1969
1970	if (dm_suspended(ti)) {
1971		r = -EBUSY;
1972		goto out;
1973	}
1974
1975	if (argc == 1) {
1976		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1977			r = queue_if_no_path(m, true, false, __func__);
1978			spin_lock_irqsave(&m->lock, flags);
1979			enable_nopath_timeout(m);
1980			spin_unlock_irqrestore(&m->lock, flags);
1981			goto out;
1982		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1983			r = queue_if_no_path(m, false, false, __func__);
1984			disable_nopath_timeout(m);
1985			goto out;
1986		}
1987	}
1988
1989	if (argc != 2) {
1990		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1991		goto out;
1992	}
1993
1994	if (!strcasecmp(argv[0], "disable_group")) {
1995		r = bypass_pg_num(m, argv[1], true);
1996		goto out;
1997	} else if (!strcasecmp(argv[0], "enable_group")) {
1998		r = bypass_pg_num(m, argv[1], false);
1999		goto out;
2000	} else if (!strcasecmp(argv[0], "switch_group")) {
2001		r = switch_pg_num(m, argv[1]);
2002		goto out;
2003	} else if (!strcasecmp(argv[0], "reinstate_path"))
2004		action = reinstate_path;
2005	else if (!strcasecmp(argv[0], "fail_path"))
2006		action = fail_path;
2007	else {
2008		DMWARN("Unrecognised multipath message received: %s", argv[0]);
2009		goto out;
2010	}
2011
2012	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
2013	if (r) {
2014		DMWARN("message: error getting device %s",
2015		       argv[1]);
2016		goto out;
2017	}
2018
2019	r = action_dev(m, dev, action);
2020
2021	dm_put_device(ti, dev);
2022
2023out:
2024	mutex_unlock(&m->work_mutex);
2025	return r;
2026}
2027
2028static int multipath_prepare_ioctl(struct dm_target *ti,
2029				   struct block_device **bdev)
2030{
2031	struct multipath *m = ti->private;
2032	struct pgpath *pgpath;
2033	unsigned long flags;
2034	int r;
2035
2036	pgpath = READ_ONCE(m->current_pgpath);
2037	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
2038		pgpath = choose_pgpath(m, 0);
2039
2040	if (pgpath) {
2041		if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
2042			*bdev = pgpath->path.dev->bdev;
2043			r = 0;
2044		} else {
2045			/* pg_init has not started or completed */
2046			r = -ENOTCONN;
2047		}
2048	} else {
2049		/* No path is available */
2050		r = -EIO;
2051		spin_lock_irqsave(&m->lock, flags);
2052		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2053			r = -ENOTCONN;
2054		spin_unlock_irqrestore(&m->lock, flags);
2055	}
2056
2057	if (r == -ENOTCONN) {
2058		if (!READ_ONCE(m->current_pg)) {
2059			/* Path status changed, redo selection */
2060			(void) choose_pgpath(m, 0);
2061		}
2062		spin_lock_irqsave(&m->lock, flags);
2063		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
2064			(void) __pg_init_all_paths(m);
2065		spin_unlock_irqrestore(&m->lock, flags);
2066		dm_table_run_md_queue_async(m->ti->table);
2067		process_queued_io_list(m);
2068	}
2069
2070	/*
2071	 * Only pass ioctls through if the device sizes match exactly.
2072	 */
2073	if (!r && ti->len != bdev_nr_sectors((*bdev)))
2074		return 1;
2075	return r;
2076}
2077
2078static int multipath_iterate_devices(struct dm_target *ti,
2079				     iterate_devices_callout_fn fn, void *data)
2080{
2081	struct multipath *m = ti->private;
2082	struct priority_group *pg;
2083	struct pgpath *p;
2084	int ret = 0;
2085
2086	list_for_each_entry(pg, &m->priority_groups, list) {
2087		list_for_each_entry(p, &pg->pgpaths, list) {
2088			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
2089			if (ret)
2090				goto out;
2091		}
2092	}
2093
2094out:
2095	return ret;
2096}
2097
2098static int pgpath_busy(struct pgpath *pgpath)
2099{
2100	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2101
2102	return blk_lld_busy(q);
2103}
2104
2105/*
2106 * We return "busy", only when we can map I/Os but underlying devices
2107 * are busy (so even if we map I/Os now, the I/Os will wait on
2108 * the underlying queue).
2109 * In other words, if we want to kill I/Os or queue them inside us
2110 * due to map unavailability, we don't return "busy".  Otherwise,
2111 * dm core won't give us the I/Os and we can't do what we want.
2112 */
2113static int multipath_busy(struct dm_target *ti)
2114{
2115	bool busy = false, has_active = false;
2116	struct multipath *m = ti->private;
2117	struct priority_group *pg, *next_pg;
2118	struct pgpath *pgpath;
2119
2120	/* pg_init in progress */
2121	if (atomic_read(&m->pg_init_in_progress))
2122		return true;
2123
2124	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2125	if (!atomic_read(&m->nr_valid_paths)) {
2126		unsigned long flags;
2127
2128		spin_lock_irqsave(&m->lock, flags);
2129		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2130			spin_unlock_irqrestore(&m->lock, flags);
2131			return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2132		}
2133		spin_unlock_irqrestore(&m->lock, flags);
2134	}
2135
2136	/* Guess which priority_group will be used at next mapping time */
2137	pg = READ_ONCE(m->current_pg);
2138	next_pg = READ_ONCE(m->next_pg);
2139	if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
2140		pg = next_pg;
2141
2142	if (!pg) {
2143		/*
2144		 * We don't know which pg will be used at next mapping time.
2145		 * We don't call choose_pgpath() here to avoid to trigger
2146		 * pg_init just by busy checking.
2147		 * So we don't know whether underlying devices we will be using
2148		 * at next mapping time are busy or not. Just try mapping.
2149		 */
2150		return busy;
2151	}
2152
2153	/*
2154	 * If there is one non-busy active path at least, the path selector
2155	 * will be able to select it. So we consider such a pg as not busy.
2156	 */
2157	busy = true;
2158	list_for_each_entry(pgpath, &pg->pgpaths, list) {
2159		if (pgpath->is_active) {
2160			has_active = true;
2161			if (!pgpath_busy(pgpath)) {
2162				busy = false;
2163				break;
2164			}
2165		}
2166	}
2167
2168	if (!has_active) {
2169		/*
2170		 * No active path in this pg, so this pg won't be used and
2171		 * the current_pg will be changed at next mapping time.
2172		 * We need to try mapping to determine it.
2173		 */
2174		busy = false;
2175	}
2176
2177	return busy;
2178}
2179
2180/*
2181 *---------------------------------------------------------------
2182 * Module setup
2183 *---------------------------------------------------------------
2184 */
2185static struct target_type multipath_target = {
2186	.name = "multipath",
2187	.version = {1, 14, 0},
2188	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2189		    DM_TARGET_PASSES_INTEGRITY,
2190	.module = THIS_MODULE,
2191	.ctr = multipath_ctr,
2192	.dtr = multipath_dtr,
2193	.clone_and_map_rq = multipath_clone_and_map,
2194	.release_clone_rq = multipath_release_clone,
2195	.rq_end_io = multipath_end_io,
2196	.map = multipath_map_bio,
2197	.end_io = multipath_end_io_bio,
2198	.presuspend = multipath_presuspend,
2199	.postsuspend = multipath_postsuspend,
2200	.resume = multipath_resume,
2201	.status = multipath_status,
2202	.message = multipath_message,
2203	.prepare_ioctl = multipath_prepare_ioctl,
2204	.iterate_devices = multipath_iterate_devices,
2205	.busy = multipath_busy,
2206};
2207
2208static int __init dm_multipath_init(void)
2209{
2210	int r = -ENOMEM;
2211
2212	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2213	if (!kmultipathd) {
2214		DMERR("failed to create workqueue kmpathd");
 
2215		goto bad_alloc_kmultipathd;
2216	}
2217
2218	/*
2219	 * A separate workqueue is used to handle the device handlers
2220	 * to avoid overloading existing workqueue. Overloading the
2221	 * old workqueue would also create a bottleneck in the
2222	 * path of the storage hardware device activation.
2223	 */
2224	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2225						  WQ_MEM_RECLAIM);
2226	if (!kmpath_handlerd) {
2227		DMERR("failed to create workqueue kmpath_handlerd");
 
2228		goto bad_alloc_kmpath_handlerd;
2229	}
2230
2231	dm_mpath_wq = alloc_workqueue("dm_mpath_wq", 0, 0);
2232	if (!dm_mpath_wq) {
2233		DMERR("failed to create workqueue dm_mpath_wq");
2234		goto bad_alloc_dm_mpath_wq;
2235	}
2236
2237	r = dm_register_target(&multipath_target);
2238	if (r < 0)
 
 
2239		goto bad_register_target;
 
2240
2241	return 0;
2242
2243bad_register_target:
2244	destroy_workqueue(dm_mpath_wq);
2245bad_alloc_dm_mpath_wq:
2246	destroy_workqueue(kmpath_handlerd);
2247bad_alloc_kmpath_handlerd:
2248	destroy_workqueue(kmultipathd);
2249bad_alloc_kmultipathd:
2250	return r;
2251}
2252
2253static void __exit dm_multipath_exit(void)
2254{
2255	destroy_workqueue(dm_mpath_wq);
2256	destroy_workqueue(kmpath_handlerd);
2257	destroy_workqueue(kmultipathd);
2258
2259	dm_unregister_target(&multipath_target);
2260}
2261
2262module_init(dm_multipath_init);
2263module_exit(dm_multipath_exit);
2264
2265module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644);
 
2266MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
2267
2268MODULE_DESCRIPTION(DM_NAME " multipath target");
2269MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>");
2270MODULE_LICENSE("GPL");