Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2003 Sistina Software Limited.
   4 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/device-mapper.h>
  10
  11#include "dm-rq.h"
  12#include "dm-bio-record.h"
  13#include "dm-path-selector.h"
  14#include "dm-uevent.h"
  15
  16#include <linux/blkdev.h>
  17#include <linux/ctype.h>
  18#include <linux/init.h>
  19#include <linux/mempool.h>
  20#include <linux/module.h>
  21#include <linux/pagemap.h>
  22#include <linux/slab.h>
  23#include <linux/time.h>
  24#include <linux/timer.h>
  25#include <linux/workqueue.h>
  26#include <linux/delay.h>
  27#include <scsi/scsi_dh.h>
  28#include <linux/atomic.h>
  29#include <linux/blk-mq.h>
  30
  31static struct workqueue_struct *dm_mpath_wq;
  32
  33#define DM_MSG_PREFIX "multipath"
  34#define DM_PG_INIT_DELAY_MSECS 2000
  35#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
  36#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
  37
  38static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
  39
  40/* Path properties */
  41struct pgpath {
  42	struct list_head list;
  43
  44	struct priority_group *pg;	/* Owning PG */
  45	unsigned int fail_count;		/* Cumulative failure count */
  46
  47	struct dm_path path;
  48	struct delayed_work activate_path;
  49
  50	bool is_active:1;		/* Path status */
  51};
  52
  53#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  54
  55/*
  56 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  57 * Each has a path selector which controls which path gets used.
  58 */
  59struct priority_group {
  60	struct list_head list;
  61
  62	struct multipath *m;		/* Owning multipath instance */
  63	struct path_selector ps;
  64
  65	unsigned int pg_num;		/* Reference number */
  66	unsigned int nr_pgpaths;		/* Number of paths in PG */
  67	struct list_head pgpaths;
  68
  69	bool bypassed:1;		/* Temporarily bypass this PG? */
  70};
  71
  72/* Multipath context */
  73struct multipath {
  74	unsigned long flags;		/* Multipath state flags */
  75
  76	spinlock_t lock;
  77	enum dm_queue_mode queue_mode;
  78
  79	struct pgpath *current_pgpath;
  80	struct priority_group *current_pg;
  81	struct priority_group *next_pg;	/* Switch to this PG if set */
  82
  83	atomic_t nr_valid_paths;	/* Total number of usable paths */
  84	unsigned int nr_priority_groups;
  85	struct list_head priority_groups;
  86
  87	const char *hw_handler_name;
  88	char *hw_handler_params;
  89	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  90	unsigned int pg_init_retries;	/* Number of times to retry pg_init */
  91	unsigned int pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  92	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
  93	atomic_t pg_init_count;		/* Number of times pg_init called */
  94
  95	struct mutex work_mutex;
  96	struct work_struct trigger_event;
  97	struct dm_target *ti;
  98
  99	struct work_struct process_queued_bios;
 100	struct bio_list queued_bios;
 101
 102	struct timer_list nopath_timer;	/* Timeout for queue_if_no_path */
 103};
 104
 105/*
 106 * Context information attached to each io we process.
 107 */
 108struct dm_mpath_io {
 109	struct pgpath *pgpath;
 110	size_t nr_bytes;
 111	u64 start_time_ns;
 112};
 113
 114typedef int (*action_fn) (struct pgpath *pgpath);
 115
 116static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 117static void trigger_event(struct work_struct *work);
 118static void activate_or_offline_path(struct pgpath *pgpath);
 119static void activate_path_work(struct work_struct *work);
 120static void process_queued_bios(struct work_struct *work);
 121static void queue_if_no_path_timeout_work(struct timer_list *t);
 122
 123/*
 124 *-----------------------------------------------
 125 * Multipath state flags.
 126 *-----------------------------------------------
 127 */
 128#define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
 129#define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
 130#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
 131#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
 132#define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
 133#define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
 134#define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
 135
 136static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
 137{
 138	bool r = test_bit(MPATHF_bit, &m->flags);
 139
 140	if (r) {
 141		unsigned long flags;
 142
 143		spin_lock_irqsave(&m->lock, flags);
 144		r = test_bit(MPATHF_bit, &m->flags);
 145		spin_unlock_irqrestore(&m->lock, flags);
 146	}
 147
 148	return r;
 149}
 150
 151/*
 152 *-----------------------------------------------
 153 * Allocation routines
 154 *-----------------------------------------------
 155 */
 156static struct pgpath *alloc_pgpath(void)
 157{
 158	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 159
 160	if (!pgpath)
 161		return NULL;
 162
 163	pgpath->is_active = true;
 164
 165	return pgpath;
 166}
 167
 168static void free_pgpath(struct pgpath *pgpath)
 169{
 170	kfree(pgpath);
 171}
 172
 173static struct priority_group *alloc_priority_group(void)
 174{
 175	struct priority_group *pg;
 176
 177	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 178
 179	if (pg)
 180		INIT_LIST_HEAD(&pg->pgpaths);
 181
 182	return pg;
 183}
 184
 185static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 186{
 187	struct pgpath *pgpath, *tmp;
 188
 189	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 190		list_del(&pgpath->list);
 191		dm_put_device(ti, pgpath->path.dev);
 192		free_pgpath(pgpath);
 193	}
 194}
 195
 196static void free_priority_group(struct priority_group *pg,
 197				struct dm_target *ti)
 198{
 199	struct path_selector *ps = &pg->ps;
 200
 201	if (ps->type) {
 202		ps->type->destroy(ps);
 203		dm_put_path_selector(ps->type);
 204	}
 205
 206	free_pgpaths(&pg->pgpaths, ti);
 207	kfree(pg);
 208}
 209
 210static struct multipath *alloc_multipath(struct dm_target *ti)
 211{
 212	struct multipath *m;
 213
 214	m = kzalloc(sizeof(*m), GFP_KERNEL);
 215	if (m) {
 216		INIT_LIST_HEAD(&m->priority_groups);
 217		spin_lock_init(&m->lock);
 218		atomic_set(&m->nr_valid_paths, 0);
 219		INIT_WORK(&m->trigger_event, trigger_event);
 220		mutex_init(&m->work_mutex);
 221
 222		m->queue_mode = DM_TYPE_NONE;
 223
 224		m->ti = ti;
 225		ti->private = m;
 226
 227		timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
 228	}
 229
 230	return m;
 231}
 232
 233static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
 234{
 235	if (m->queue_mode == DM_TYPE_NONE) {
 236		m->queue_mode = DM_TYPE_REQUEST_BASED;
 237	} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
 238		INIT_WORK(&m->process_queued_bios, process_queued_bios);
 239		/*
 240		 * bio-based doesn't support any direct scsi_dh management;
 241		 * it just discovers if a scsi_dh is attached.
 242		 */
 243		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 244	}
 245
 246	dm_table_set_type(ti->table, m->queue_mode);
 247
 248	/*
 249	 * Init fields that are only used when a scsi_dh is attached
 250	 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
 251	 */
 252	set_bit(MPATHF_QUEUE_IO, &m->flags);
 253	atomic_set(&m->pg_init_in_progress, 0);
 254	atomic_set(&m->pg_init_count, 0);
 255	m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 256	init_waitqueue_head(&m->pg_init_wait);
 257
 258	return 0;
 259}
 260
 261static void free_multipath(struct multipath *m)
 262{
 263	struct priority_group *pg, *tmp;
 264
 265	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 266		list_del(&pg->list);
 267		free_priority_group(pg, m->ti);
 268	}
 269
 270	kfree(m->hw_handler_name);
 271	kfree(m->hw_handler_params);
 272	mutex_destroy(&m->work_mutex);
 273	kfree(m);
 274}
 275
 276static struct dm_mpath_io *get_mpio(union map_info *info)
 277{
 278	return info->ptr;
 279}
 280
 281static size_t multipath_per_bio_data_size(void)
 282{
 283	return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
 284}
 285
 286static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
 287{
 288	return dm_per_bio_data(bio, multipath_per_bio_data_size());
 289}
 290
 291static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
 292{
 293	/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
 294	void *bio_details = mpio + 1;
 295	return bio_details;
 296}
 297
 298static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
 299{
 300	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 301	struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
 302
 303	mpio->nr_bytes = bio->bi_iter.bi_size;
 304	mpio->pgpath = NULL;
 305	mpio->start_time_ns = 0;
 306	*mpio_p = mpio;
 307
 308	dm_bio_record(bio_details, bio);
 309}
 310
 311/*
 312 *-----------------------------------------------
 313 * Path selection
 314 *-----------------------------------------------
 315 */
 316static int __pg_init_all_paths(struct multipath *m)
 317{
 318	struct pgpath *pgpath;
 319	unsigned long pg_init_delay = 0;
 320
 321	lockdep_assert_held(&m->lock);
 322
 323	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 324		return 0;
 325
 326	atomic_inc(&m->pg_init_count);
 327	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 328
 329	/* Check here to reset pg_init_required */
 330	if (!m->current_pg)
 331		return 0;
 332
 333	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
 334		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 335						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 336	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 337		/* Skip failed paths */
 338		if (!pgpath->is_active)
 339			continue;
 340		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 341				       pg_init_delay))
 342			atomic_inc(&m->pg_init_in_progress);
 343	}
 344	return atomic_read(&m->pg_init_in_progress);
 345}
 346
 347static int pg_init_all_paths(struct multipath *m)
 348{
 349	int ret;
 350	unsigned long flags;
 351
 352	spin_lock_irqsave(&m->lock, flags);
 353	ret = __pg_init_all_paths(m);
 354	spin_unlock_irqrestore(&m->lock, flags);
 355
 356	return ret;
 357}
 358
 359static void __switch_pg(struct multipath *m, struct priority_group *pg)
 360{
 361	lockdep_assert_held(&m->lock);
 362
 363	m->current_pg = pg;
 364
 365	/* Must we initialise the PG first, and queue I/O till it's ready? */
 366	if (m->hw_handler_name) {
 367		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 368		set_bit(MPATHF_QUEUE_IO, &m->flags);
 369	} else {
 370		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 371		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 372	}
 373
 374	atomic_set(&m->pg_init_count, 0);
 375}
 376
 377static struct pgpath *choose_path_in_pg(struct multipath *m,
 378					struct priority_group *pg,
 379					size_t nr_bytes)
 380{
 381	unsigned long flags;
 382	struct dm_path *path;
 383	struct pgpath *pgpath;
 384
 385	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 386	if (!path)
 387		return ERR_PTR(-ENXIO);
 388
 389	pgpath = path_to_pgpath(path);
 390
 391	if (unlikely(READ_ONCE(m->current_pg) != pg)) {
 392		/* Only update current_pgpath if pg changed */
 393		spin_lock_irqsave(&m->lock, flags);
 394		m->current_pgpath = pgpath;
 395		__switch_pg(m, pg);
 396		spin_unlock_irqrestore(&m->lock, flags);
 397	}
 398
 399	return pgpath;
 400}
 401
 402static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 403{
 404	unsigned long flags;
 405	struct priority_group *pg;
 406	struct pgpath *pgpath;
 407	unsigned int bypassed = 1;
 408
 409	if (!atomic_read(&m->nr_valid_paths)) {
 410		spin_lock_irqsave(&m->lock, flags);
 411		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 412		spin_unlock_irqrestore(&m->lock, flags);
 413		goto failed;
 414	}
 415
 416	/* Were we instructed to switch PG? */
 417	if (READ_ONCE(m->next_pg)) {
 418		spin_lock_irqsave(&m->lock, flags);
 419		pg = m->next_pg;
 420		if (!pg) {
 421			spin_unlock_irqrestore(&m->lock, flags);
 422			goto check_current_pg;
 423		}
 424		m->next_pg = NULL;
 425		spin_unlock_irqrestore(&m->lock, flags);
 426		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 427		if (!IS_ERR_OR_NULL(pgpath))
 428			return pgpath;
 429	}
 430
 431	/* Don't change PG until it has no remaining paths */
 432check_current_pg:
 433	pg = READ_ONCE(m->current_pg);
 434	if (pg) {
 435		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 436		if (!IS_ERR_OR_NULL(pgpath))
 437			return pgpath;
 438	}
 439
 440	/*
 441	 * Loop through priority groups until we find a valid path.
 442	 * First time we skip PGs marked 'bypassed'.
 443	 * Second time we only try the ones we skipped, but set
 444	 * pg_init_delay_retry so we do not hammer controllers.
 445	 */
 446	do {
 447		list_for_each_entry(pg, &m->priority_groups, list) {
 448			if (pg->bypassed == !!bypassed)
 449				continue;
 450			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 451			if (!IS_ERR_OR_NULL(pgpath)) {
 452				if (!bypassed) {
 453					spin_lock_irqsave(&m->lock, flags);
 454					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 455					spin_unlock_irqrestore(&m->lock, flags);
 456				}
 457				return pgpath;
 458			}
 459		}
 460	} while (bypassed--);
 461
 462failed:
 463	spin_lock_irqsave(&m->lock, flags);
 464	m->current_pgpath = NULL;
 465	m->current_pg = NULL;
 466	spin_unlock_irqrestore(&m->lock, flags);
 467
 468	return NULL;
 469}
 470
 471/*
 472 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
 473 * report the function name and line number of the function from which
 474 * it has been invoked.
 475 */
 476#define dm_report_EIO(m)						\
 477	DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
 478		      dm_table_device_name((m)->ti->table),		\
 479		      test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags),	\
 480		      test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
 481		      dm_noflush_suspending((m)->ti))
 
 
 
 
 482
 483/*
 484 * Check whether bios must be queued in the device-mapper core rather
 485 * than here in the target.
 
 
 
 
 
 486 */
 487static bool __must_push_back(struct multipath *m)
 488{
 489	return dm_noflush_suspending(m->ti);
 
 
 490}
 491
 
 
 
 
 492static bool must_push_back_rq(struct multipath *m)
 493{
 494	unsigned long flags;
 495	bool ret;
 496
 497	spin_lock_irqsave(&m->lock, flags);
 498	ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m));
 499	spin_unlock_irqrestore(&m->lock, flags);
 500
 501	return ret;
 
 
 
 502}
 503
 504/*
 505 * Map cloned requests (request-based multipath)
 506 */
 507static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 508				   union map_info *map_context,
 509				   struct request **__clone)
 510{
 511	struct multipath *m = ti->private;
 512	size_t nr_bytes = blk_rq_bytes(rq);
 513	struct pgpath *pgpath;
 514	struct block_device *bdev;
 515	struct dm_mpath_io *mpio = get_mpio(map_context);
 516	struct request_queue *q;
 517	struct request *clone;
 518
 519	/* Do we need to select a new pgpath? */
 520	pgpath = READ_ONCE(m->current_pgpath);
 521	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
 522		pgpath = choose_pgpath(m, nr_bytes);
 523
 524	if (!pgpath) {
 525		if (must_push_back_rq(m))
 526			return DM_MAPIO_DELAY_REQUEUE;
 527		dm_report_EIO(m);	/* Failed */
 528		return DM_MAPIO_KILL;
 529	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
 530		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
 531		pg_init_all_paths(m);
 532		return DM_MAPIO_DELAY_REQUEUE;
 533	}
 534
 535	mpio->pgpath = pgpath;
 536	mpio->nr_bytes = nr_bytes;
 537
 538	bdev = pgpath->path.dev->bdev;
 539	q = bdev_get_queue(bdev);
 540	clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
 541			BLK_MQ_REQ_NOWAIT);
 542	if (IS_ERR(clone)) {
 543		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
 544		if (blk_queue_dying(q)) {
 545			atomic_inc(&m->pg_init_in_progress);
 546			activate_or_offline_path(pgpath);
 547			return DM_MAPIO_DELAY_REQUEUE;
 548		}
 549
 550		/*
 551		 * blk-mq's SCHED_RESTART can cover this requeue, so we
 552		 * needn't deal with it by DELAY_REQUEUE. More importantly,
 553		 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
 554		 * get the queue busy feedback (via BLK_STS_RESOURCE),
 555		 * otherwise I/O merging can suffer.
 556		 */
 557		return DM_MAPIO_REQUEUE;
 558	}
 559	clone->bio = clone->biotail = NULL;
 
 560	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 561	*__clone = clone;
 562
 563	if (pgpath->pg->ps.type->start_io)
 564		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 565					      &pgpath->path,
 566					      nr_bytes);
 567	return DM_MAPIO_REMAPPED;
 568}
 569
 570static void multipath_release_clone(struct request *clone,
 571				    union map_info *map_context)
 572{
 573	if (unlikely(map_context)) {
 574		/*
 575		 * non-NULL map_context means caller is still map
 576		 * method; must undo multipath_clone_and_map()
 577		 */
 578		struct dm_mpath_io *mpio = get_mpio(map_context);
 579		struct pgpath *pgpath = mpio->pgpath;
 580
 581		if (pgpath && pgpath->pg->ps.type->end_io)
 582			pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
 583						    &pgpath->path,
 584						    mpio->nr_bytes,
 585						    clone->io_start_time_ns);
 586	}
 587
 588	blk_mq_free_request(clone);
 589}
 590
 591/*
 592 * Map cloned bios (bio-based multipath)
 593 */
 594
 595static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
 596{
 597	/* Queue for the daemon to resubmit */
 598	bio_list_add(&m->queued_bios, bio);
 599	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
 600		queue_work(kmultipathd, &m->process_queued_bios);
 601}
 602
 603static void multipath_queue_bio(struct multipath *m, struct bio *bio)
 604{
 
 605	unsigned long flags;
 
 606
 607	spin_lock_irqsave(&m->lock, flags);
 608	__multipath_queue_bio(m, bio);
 609	spin_unlock_irqrestore(&m->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610}
 611
 612static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 613{
 614	struct pgpath *pgpath;
 615	unsigned long flags;
 616
 617	/* Do we need to select a new pgpath? */
 
 
 
 
 618	pgpath = READ_ONCE(m->current_pgpath);
 619	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
 620		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 621
 622	if (!pgpath) {
 623		spin_lock_irqsave(&m->lock, flags);
 624		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 625			__multipath_queue_bio(m, bio);
 626			pgpath = ERR_PTR(-EAGAIN);
 627		}
 628		spin_unlock_irqrestore(&m->lock, flags);
 
 629
 630	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
 631		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
 632		multipath_queue_bio(m, bio);
 633		pg_init_all_paths(m);
 634		return ERR_PTR(-EAGAIN);
 635	}
 636
 637	return pgpath;
 638}
 639
 640static int __multipath_map_bio(struct multipath *m, struct bio *bio,
 641			       struct dm_mpath_io *mpio)
 642{
 643	struct pgpath *pgpath = __map_bio(m, bio);
 
 
 
 
 
 644
 645	if (IS_ERR(pgpath))
 646		return DM_MAPIO_SUBMITTED;
 647
 648	if (!pgpath) {
 649		if (__must_push_back(m))
 650			return DM_MAPIO_REQUEUE;
 651		dm_report_EIO(m);
 652		return DM_MAPIO_KILL;
 653	}
 654
 655	mpio->pgpath = pgpath;
 656
 657	if (dm_ps_use_hr_timer(pgpath->pg->ps.type))
 658		mpio->start_time_ns = ktime_get_ns();
 659
 660	bio->bi_status = 0;
 661	bio_set_dev(bio, pgpath->path.dev->bdev);
 662	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 663
 664	if (pgpath->pg->ps.type->start_io)
 665		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 666					      &pgpath->path,
 667					      mpio->nr_bytes);
 668	return DM_MAPIO_REMAPPED;
 669}
 670
 671static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
 672{
 673	struct multipath *m = ti->private;
 674	struct dm_mpath_io *mpio = NULL;
 675
 676	multipath_init_per_bio_data(bio, &mpio);
 677	return __multipath_map_bio(m, bio, mpio);
 678}
 679
 680static void process_queued_io_list(struct multipath *m)
 681{
 682	if (m->queue_mode == DM_TYPE_REQUEST_BASED)
 683		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 684	else if (m->queue_mode == DM_TYPE_BIO_BASED)
 685		queue_work(kmultipathd, &m->process_queued_bios);
 686}
 687
 688static void process_queued_bios(struct work_struct *work)
 689{
 690	int r;
 691	unsigned long flags;
 692	struct bio *bio;
 693	struct bio_list bios;
 694	struct blk_plug plug;
 695	struct multipath *m =
 696		container_of(work, struct multipath, process_queued_bios);
 697
 698	bio_list_init(&bios);
 699
 700	spin_lock_irqsave(&m->lock, flags);
 701
 702	if (bio_list_empty(&m->queued_bios)) {
 703		spin_unlock_irqrestore(&m->lock, flags);
 704		return;
 705	}
 706
 707	bio_list_merge(&bios, &m->queued_bios);
 708	bio_list_init(&m->queued_bios);
 709
 710	spin_unlock_irqrestore(&m->lock, flags);
 711
 712	blk_start_plug(&plug);
 713	while ((bio = bio_list_pop(&bios))) {
 714		struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 715
 716		dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
 717		r = __multipath_map_bio(m, bio, mpio);
 718		switch (r) {
 719		case DM_MAPIO_KILL:
 720			bio->bi_status = BLK_STS_IOERR;
 721			bio_endio(bio);
 722			break;
 723		case DM_MAPIO_REQUEUE:
 724			bio->bi_status = BLK_STS_DM_REQUEUE;
 725			bio_endio(bio);
 726			break;
 727		case DM_MAPIO_REMAPPED:
 728			submit_bio_noacct(bio);
 729			break;
 730		case DM_MAPIO_SUBMITTED:
 731			break;
 732		default:
 733			WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
 734		}
 735	}
 736	blk_finish_plug(&plug);
 737}
 738
 739/*
 740 * If we run out of usable paths, should we queue I/O or error it?
 741 */
 742static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path,
 743			    bool save_old_value, const char *caller)
 744{
 745	unsigned long flags;
 746	bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
 747	const char *dm_dev_name = dm_table_device_name(m->ti->table);
 748
 749	DMDEBUG("%s: %s caller=%s f_queue_if_no_path=%d save_old_value=%d",
 750		dm_dev_name, __func__, caller, f_queue_if_no_path, save_old_value);
 751
 752	spin_lock_irqsave(&m->lock, flags);
 753
 754	queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 755	saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 756
 757	if (save_old_value) {
 758		if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
 759			DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
 760			      dm_dev_name);
 761		} else
 762			assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
 763	} else if (!f_queue_if_no_path && saved_queue_if_no_path_bit) {
 764		/* due to "fail_if_no_path" message, need to honor it. */
 765		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 766	}
 767	assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, f_queue_if_no_path);
 768
 769	DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
 770		dm_dev_name, __func__,
 771		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
 772		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
 773		dm_noflush_suspending(m->ti));
 774
 775	spin_unlock_irqrestore(&m->lock, flags);
 776
 777	if (!f_queue_if_no_path) {
 778		dm_table_run_md_queue_async(m->ti->table);
 779		process_queued_io_list(m);
 780	}
 781
 782	return 0;
 783}
 784
 785/*
 786 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
 787 * process any queued I/O.
 788 */
 789static void queue_if_no_path_timeout_work(struct timer_list *t)
 790{
 791	struct multipath *m = from_timer(m, t, nopath_timer);
 792
 793	DMWARN("queue_if_no_path timeout on %s, failing queued IO",
 794	       dm_table_device_name(m->ti->table));
 795	queue_if_no_path(m, false, false, __func__);
 796}
 797
 798/*
 799 * Enable the queue_if_no_path timeout if necessary.
 800 * Called with m->lock held.
 801 */
 802static void enable_nopath_timeout(struct multipath *m)
 803{
 804	unsigned long queue_if_no_path_timeout =
 805		READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
 806
 807	lockdep_assert_held(&m->lock);
 808
 809	if (queue_if_no_path_timeout > 0 &&
 810	    atomic_read(&m->nr_valid_paths) == 0 &&
 811	    test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 812		mod_timer(&m->nopath_timer,
 813			  jiffies + queue_if_no_path_timeout);
 814	}
 815}
 816
 817static void disable_nopath_timeout(struct multipath *m)
 818{
 819	del_timer_sync(&m->nopath_timer);
 820}
 821
 822/*
 823 * An event is triggered whenever a path is taken out of use.
 824 * Includes path failure and PG bypass.
 825 */
 826static void trigger_event(struct work_struct *work)
 827{
 828	struct multipath *m =
 829		container_of(work, struct multipath, trigger_event);
 830
 831	dm_table_event(m->ti->table);
 832}
 833
 834/*
 835 *---------------------------------------------------------------
 836 * Constructor/argument parsing:
 837 * <#multipath feature args> [<arg>]*
 838 * <#hw_handler args> [hw_handler [<arg>]*]
 839 * <#priority groups>
 840 * <initial priority group>
 841 *     [<selector> <#selector args> [<arg>]*
 842 *      <#paths> <#per-path selector args>
 843 *         [<path> [<arg>]* ]+ ]+
 844 *---------------------------------------------------------------
 845 */
 846static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 847			       struct dm_target *ti)
 848{
 849	int r;
 850	struct path_selector_type *pst;
 851	unsigned int ps_argc;
 852
 853	static const struct dm_arg _args[] = {
 854		{0, 1024, "invalid number of path selector args"},
 855	};
 856
 857	pst = dm_get_path_selector(dm_shift_arg(as));
 858	if (!pst) {
 859		ti->error = "unknown path selector type";
 860		return -EINVAL;
 861	}
 862
 863	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 864	if (r) {
 865		dm_put_path_selector(pst);
 866		return -EINVAL;
 867	}
 868
 869	r = pst->create(&pg->ps, ps_argc, as->argv);
 870	if (r) {
 871		dm_put_path_selector(pst);
 872		ti->error = "path selector constructor failed";
 873		return r;
 874	}
 875
 876	pg->ps.type = pst;
 877	dm_consume_args(as, ps_argc);
 878
 879	return 0;
 880}
 881
 882static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
 883			 const char **attached_handler_name, char **error)
 884{
 885	struct request_queue *q = bdev_get_queue(bdev);
 886	int r;
 887
 888	if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
 889retain:
 890		if (*attached_handler_name) {
 891			/*
 892			 * Clear any hw_handler_params associated with a
 893			 * handler that isn't already attached.
 894			 */
 895			if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
 896				kfree(m->hw_handler_params);
 897				m->hw_handler_params = NULL;
 898			}
 899
 900			/*
 901			 * Reset hw_handler_name to match the attached handler
 902			 *
 903			 * NB. This modifies the table line to show the actual
 904			 * handler instead of the original table passed in.
 905			 */
 906			kfree(m->hw_handler_name);
 907			m->hw_handler_name = *attached_handler_name;
 908			*attached_handler_name = NULL;
 909		}
 910	}
 911
 912	if (m->hw_handler_name) {
 913		r = scsi_dh_attach(q, m->hw_handler_name);
 914		if (r == -EBUSY) {
 915			DMINFO("retaining handler on device %pg", bdev);
 
 
 
 916			goto retain;
 917		}
 918		if (r < 0) {
 919			*error = "error attaching hardware handler";
 920			return r;
 921		}
 922
 923		if (m->hw_handler_params) {
 924			r = scsi_dh_set_params(q, m->hw_handler_params);
 925			if (r < 0) {
 926				*error = "unable to set hardware handler parameters";
 927				return r;
 928			}
 929		}
 930	}
 931
 932	return 0;
 933}
 934
 935static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 936				 struct dm_target *ti)
 937{
 938	int r;
 939	struct pgpath *p;
 940	struct multipath *m = ti->private;
 941	struct request_queue *q;
 942	const char *attached_handler_name = NULL;
 943
 944	/* we need at least a path arg */
 945	if (as->argc < 1) {
 946		ti->error = "no device given";
 947		return ERR_PTR(-EINVAL);
 948	}
 949
 950	p = alloc_pgpath();
 951	if (!p)
 952		return ERR_PTR(-ENOMEM);
 953
 954	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 955			  &p->path.dev);
 956	if (r) {
 957		ti->error = "error getting device";
 958		goto bad;
 959	}
 960
 961	q = bdev_get_queue(p->path.dev->bdev);
 962	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 963	if (attached_handler_name || m->hw_handler_name) {
 964		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 965		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
 966		kfree(attached_handler_name);
 967		if (r) {
 968			dm_put_device(ti, p->path.dev);
 969			goto bad;
 970		}
 971	}
 972
 973	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 974	if (r) {
 975		dm_put_device(ti, p->path.dev);
 976		goto bad;
 977	}
 978
 979	return p;
 980 bad:
 981	free_pgpath(p);
 982	return ERR_PTR(r);
 983}
 984
 985static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 986						   struct multipath *m)
 987{
 988	static const struct dm_arg _args[] = {
 989		{1, 1024, "invalid number of paths"},
 990		{0, 1024, "invalid number of selector args"}
 991	};
 992
 993	int r;
 994	unsigned int i, nr_selector_args, nr_args;
 995	struct priority_group *pg;
 996	struct dm_target *ti = m->ti;
 997
 998	if (as->argc < 2) {
 999		as->argc = 0;
1000		ti->error = "not enough priority group arguments";
1001		return ERR_PTR(-EINVAL);
1002	}
1003
1004	pg = alloc_priority_group();
1005	if (!pg) {
1006		ti->error = "couldn't allocate priority group";
1007		return ERR_PTR(-ENOMEM);
1008	}
1009	pg->m = m;
1010
1011	r = parse_path_selector(as, pg, ti);
1012	if (r)
1013		goto bad;
1014
1015	/*
1016	 * read the paths
1017	 */
1018	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1019	if (r)
1020		goto bad;
1021
1022	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1023	if (r)
1024		goto bad;
1025
1026	nr_args = 1 + nr_selector_args;
1027	for (i = 0; i < pg->nr_pgpaths; i++) {
1028		struct pgpath *pgpath;
1029		struct dm_arg_set path_args;
1030
1031		if (as->argc < nr_args) {
1032			ti->error = "not enough path parameters";
1033			r = -EINVAL;
1034			goto bad;
1035		}
1036
1037		path_args.argc = nr_args;
1038		path_args.argv = as->argv;
1039
1040		pgpath = parse_path(&path_args, &pg->ps, ti);
1041		if (IS_ERR(pgpath)) {
1042			r = PTR_ERR(pgpath);
1043			goto bad;
1044		}
1045
1046		pgpath->pg = pg;
1047		list_add_tail(&pgpath->list, &pg->pgpaths);
1048		dm_consume_args(as, nr_args);
1049	}
1050
1051	return pg;
1052
1053 bad:
1054	free_priority_group(pg, ti);
1055	return ERR_PTR(r);
1056}
1057
1058static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1059{
1060	unsigned int hw_argc;
1061	int ret;
1062	struct dm_target *ti = m->ti;
1063
1064	static const struct dm_arg _args[] = {
1065		{0, 1024, "invalid number of hardware handler args"},
1066	};
1067
1068	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1069		return -EINVAL;
1070
1071	if (!hw_argc)
1072		return 0;
1073
1074	if (m->queue_mode == DM_TYPE_BIO_BASED) {
1075		dm_consume_args(as, hw_argc);
1076		DMERR("bio-based multipath doesn't allow hardware handler args");
1077		return 0;
1078	}
1079
1080	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1081	if (!m->hw_handler_name)
1082		return -EINVAL;
1083
1084	if (hw_argc > 1) {
1085		char *p;
1086		int i, j, len = 4;
1087
1088		for (i = 0; i <= hw_argc - 2; i++)
1089			len += strlen(as->argv[i]) + 1;
1090		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1091		if (!p) {
1092			ti->error = "memory allocation failed";
1093			ret = -ENOMEM;
1094			goto fail;
1095		}
1096		j = sprintf(p, "%d", hw_argc - 1);
1097		for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1)
1098			j = sprintf(p, "%s", as->argv[i]);
1099	}
1100	dm_consume_args(as, hw_argc - 1);
1101
1102	return 0;
1103fail:
1104	kfree(m->hw_handler_name);
1105	m->hw_handler_name = NULL;
1106	return ret;
1107}
1108
1109static int parse_features(struct dm_arg_set *as, struct multipath *m)
1110{
1111	int r;
1112	unsigned int argc;
1113	struct dm_target *ti = m->ti;
1114	const char *arg_name;
1115
1116	static const struct dm_arg _args[] = {
1117		{0, 8, "invalid number of feature args"},
1118		{1, 50, "pg_init_retries must be between 1 and 50"},
1119		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1120	};
1121
1122	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1123	if (r)
1124		return -EINVAL;
1125
1126	if (!argc)
1127		return 0;
1128
1129	do {
1130		arg_name = dm_shift_arg(as);
1131		argc--;
1132
1133		if (!strcasecmp(arg_name, "queue_if_no_path")) {
1134			r = queue_if_no_path(m, true, false, __func__);
1135			continue;
1136		}
1137
1138		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1139			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1140			continue;
1141		}
1142
1143		if (!strcasecmp(arg_name, "pg_init_retries") &&
1144		    (argc >= 1)) {
1145			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1146			argc--;
1147			continue;
1148		}
1149
1150		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1151		    (argc >= 1)) {
1152			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1153			argc--;
1154			continue;
1155		}
1156
1157		if (!strcasecmp(arg_name, "queue_mode") &&
1158		    (argc >= 1)) {
1159			const char *queue_mode_name = dm_shift_arg(as);
1160
1161			if (!strcasecmp(queue_mode_name, "bio"))
1162				m->queue_mode = DM_TYPE_BIO_BASED;
1163			else if (!strcasecmp(queue_mode_name, "rq") ||
1164				 !strcasecmp(queue_mode_name, "mq"))
1165				m->queue_mode = DM_TYPE_REQUEST_BASED;
1166			else {
1167				ti->error = "Unknown 'queue_mode' requested";
1168				r = -EINVAL;
1169			}
1170			argc--;
1171			continue;
1172		}
1173
1174		ti->error = "Unrecognised multipath feature request";
1175		r = -EINVAL;
1176	} while (argc && !r);
1177
1178	return r;
1179}
1180
1181static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1182{
1183	/* target arguments */
1184	static const struct dm_arg _args[] = {
1185		{0, 1024, "invalid number of priority groups"},
1186		{0, 1024, "invalid initial priority group number"},
1187	};
1188
1189	int r;
1190	struct multipath *m;
1191	struct dm_arg_set as;
1192	unsigned int pg_count = 0;
1193	unsigned int next_pg_num;
1194	unsigned long flags;
1195
1196	as.argc = argc;
1197	as.argv = argv;
1198
1199	m = alloc_multipath(ti);
1200	if (!m) {
1201		ti->error = "can't allocate multipath";
1202		return -EINVAL;
1203	}
1204
1205	r = parse_features(&as, m);
1206	if (r)
1207		goto bad;
1208
1209	r = alloc_multipath_stage2(ti, m);
1210	if (r)
1211		goto bad;
1212
1213	r = parse_hw_handler(&as, m);
1214	if (r)
1215		goto bad;
1216
1217	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1218	if (r)
1219		goto bad;
1220
1221	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1222	if (r)
1223		goto bad;
1224
1225	if ((!m->nr_priority_groups && next_pg_num) ||
1226	    (m->nr_priority_groups && !next_pg_num)) {
1227		ti->error = "invalid initial priority group";
1228		r = -EINVAL;
1229		goto bad;
1230	}
1231
1232	/* parse the priority groups */
1233	while (as.argc) {
1234		struct priority_group *pg;
1235		unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
1236
1237		pg = parse_priority_group(&as, m);
1238		if (IS_ERR(pg)) {
1239			r = PTR_ERR(pg);
1240			goto bad;
1241		}
1242
1243		nr_valid_paths += pg->nr_pgpaths;
1244		atomic_set(&m->nr_valid_paths, nr_valid_paths);
1245
1246		list_add_tail(&pg->list, &m->priority_groups);
1247		pg_count++;
1248		pg->pg_num = pg_count;
1249		if (!--next_pg_num)
1250			m->next_pg = pg;
1251	}
1252
1253	if (pg_count != m->nr_priority_groups) {
1254		ti->error = "priority group count mismatch";
1255		r = -EINVAL;
1256		goto bad;
1257	}
1258
1259	spin_lock_irqsave(&m->lock, flags);
1260	enable_nopath_timeout(m);
1261	spin_unlock_irqrestore(&m->lock, flags);
1262
1263	ti->num_flush_bios = 1;
1264	ti->num_discard_bios = 1;
 
1265	ti->num_write_zeroes_bios = 1;
1266	if (m->queue_mode == DM_TYPE_BIO_BASED)
1267		ti->per_io_data_size = multipath_per_bio_data_size();
1268	else
1269		ti->per_io_data_size = sizeof(struct dm_mpath_io);
1270
1271	return 0;
1272
1273 bad:
1274	free_multipath(m);
1275	return r;
1276}
1277
1278static void multipath_wait_for_pg_init_completion(struct multipath *m)
1279{
1280	DEFINE_WAIT(wait);
1281
1282	while (1) {
1283		prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1284
1285		if (!atomic_read(&m->pg_init_in_progress))
1286			break;
1287
1288		io_schedule();
1289	}
1290	finish_wait(&m->pg_init_wait, &wait);
1291}
1292
1293static void flush_multipath_work(struct multipath *m)
1294{
1295	if (m->hw_handler_name) {
1296		unsigned long flags;
1297
1298		if (!atomic_read(&m->pg_init_in_progress))
1299			goto skip;
1300
1301		spin_lock_irqsave(&m->lock, flags);
1302		if (atomic_read(&m->pg_init_in_progress) &&
1303		    !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
1304			spin_unlock_irqrestore(&m->lock, flags);
1305
 
1306			flush_workqueue(kmpath_handlerd);
1307			multipath_wait_for_pg_init_completion(m);
1308
1309			spin_lock_irqsave(&m->lock, flags);
1310			clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1311		}
1312		spin_unlock_irqrestore(&m->lock, flags);
1313	}
1314skip:
1315	if (m->queue_mode == DM_TYPE_BIO_BASED)
1316		flush_work(&m->process_queued_bios);
1317	flush_work(&m->trigger_event);
1318}
1319
1320static void multipath_dtr(struct dm_target *ti)
1321{
1322	struct multipath *m = ti->private;
1323
1324	disable_nopath_timeout(m);
1325	flush_multipath_work(m);
1326	free_multipath(m);
1327}
1328
1329/*
1330 * Take a path out of use.
1331 */
1332static int fail_path(struct pgpath *pgpath)
1333{
1334	unsigned long flags;
1335	struct multipath *m = pgpath->pg->m;
1336
1337	spin_lock_irqsave(&m->lock, flags);
1338
1339	if (!pgpath->is_active)
1340		goto out;
1341
1342	DMWARN("%s: Failing path %s.",
1343	       dm_table_device_name(m->ti->table),
1344	       pgpath->path.dev->name);
1345
1346	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1347	pgpath->is_active = false;
1348	pgpath->fail_count++;
1349
1350	atomic_dec(&m->nr_valid_paths);
1351
1352	if (pgpath == m->current_pgpath)
1353		m->current_pgpath = NULL;
1354
1355	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1356		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1357
1358	queue_work(dm_mpath_wq, &m->trigger_event);
1359
1360	enable_nopath_timeout(m);
1361
1362out:
1363	spin_unlock_irqrestore(&m->lock, flags);
1364
1365	return 0;
1366}
1367
1368/*
1369 * Reinstate a previously-failed path
1370 */
1371static int reinstate_path(struct pgpath *pgpath)
1372{
1373	int r = 0, run_queue = 0;
1374	unsigned long flags;
1375	struct multipath *m = pgpath->pg->m;
1376	unsigned int nr_valid_paths;
1377
1378	spin_lock_irqsave(&m->lock, flags);
1379
1380	if (pgpath->is_active)
1381		goto out;
1382
1383	DMWARN("%s: Reinstating path %s.",
1384	       dm_table_device_name(m->ti->table),
1385	       pgpath->path.dev->name);
1386
1387	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1388	if (r)
1389		goto out;
1390
1391	pgpath->is_active = true;
1392
1393	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1394	if (nr_valid_paths == 1) {
1395		m->current_pgpath = NULL;
1396		run_queue = 1;
1397	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1398		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1399			atomic_inc(&m->pg_init_in_progress);
1400	}
1401
1402	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1403		       pgpath->path.dev->name, nr_valid_paths);
1404
1405	schedule_work(&m->trigger_event);
1406
1407out:
1408	spin_unlock_irqrestore(&m->lock, flags);
1409	if (run_queue) {
1410		dm_table_run_md_queue_async(m->ti->table);
1411		process_queued_io_list(m);
1412	}
1413
1414	if (pgpath->is_active)
1415		disable_nopath_timeout(m);
1416
1417	return r;
1418}
1419
1420/*
1421 * Fail or reinstate all paths that match the provided struct dm_dev.
1422 */
1423static int action_dev(struct multipath *m, struct dm_dev *dev,
1424		      action_fn action)
1425{
1426	int r = -EINVAL;
1427	struct pgpath *pgpath;
1428	struct priority_group *pg;
1429
1430	list_for_each_entry(pg, &m->priority_groups, list) {
1431		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1432			if (pgpath->path.dev == dev)
1433				r = action(pgpath);
1434		}
1435	}
1436
1437	return r;
1438}
1439
1440/*
1441 * Temporarily try to avoid having to use the specified PG
1442 */
1443static void bypass_pg(struct multipath *m, struct priority_group *pg,
1444		      bool bypassed)
1445{
1446	unsigned long flags;
1447
1448	spin_lock_irqsave(&m->lock, flags);
1449
1450	pg->bypassed = bypassed;
1451	m->current_pgpath = NULL;
1452	m->current_pg = NULL;
1453
1454	spin_unlock_irqrestore(&m->lock, flags);
1455
1456	schedule_work(&m->trigger_event);
1457}
1458
1459/*
1460 * Switch to using the specified PG from the next I/O that gets mapped
1461 */
1462static int switch_pg_num(struct multipath *m, const char *pgstr)
1463{
1464	struct priority_group *pg;
1465	unsigned int pgnum;
1466	unsigned long flags;
1467	char dummy;
1468
1469	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1470	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1471		DMWARN("invalid PG number supplied to %s", __func__);
1472		return -EINVAL;
1473	}
1474
1475	spin_lock_irqsave(&m->lock, flags);
1476	list_for_each_entry(pg, &m->priority_groups, list) {
1477		pg->bypassed = false;
1478		if (--pgnum)
1479			continue;
1480
1481		m->current_pgpath = NULL;
1482		m->current_pg = NULL;
1483		m->next_pg = pg;
1484	}
1485	spin_unlock_irqrestore(&m->lock, flags);
1486
1487	schedule_work(&m->trigger_event);
1488	return 0;
1489}
1490
1491/*
1492 * Set/clear bypassed status of a PG.
1493 * PGs are numbered upwards from 1 in the order they were declared.
1494 */
1495static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1496{
1497	struct priority_group *pg;
1498	unsigned int pgnum;
1499	char dummy;
1500
1501	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1502	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1503		DMWARN("invalid PG number supplied to bypass_pg");
1504		return -EINVAL;
1505	}
1506
1507	list_for_each_entry(pg, &m->priority_groups, list) {
1508		if (!--pgnum)
1509			break;
1510	}
1511
1512	bypass_pg(m, pg, bypassed);
1513	return 0;
1514}
1515
1516/*
1517 * Should we retry pg_init immediately?
1518 */
1519static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1520{
1521	unsigned long flags;
1522	bool limit_reached = false;
1523
1524	spin_lock_irqsave(&m->lock, flags);
1525
1526	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1527	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1528		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1529	else
1530		limit_reached = true;
1531
1532	spin_unlock_irqrestore(&m->lock, flags);
1533
1534	return limit_reached;
1535}
1536
1537static void pg_init_done(void *data, int errors)
1538{
1539	struct pgpath *pgpath = data;
1540	struct priority_group *pg = pgpath->pg;
1541	struct multipath *m = pg->m;
1542	unsigned long flags;
1543	bool delay_retry = false;
1544
1545	/* device or driver problems */
1546	switch (errors) {
1547	case SCSI_DH_OK:
1548		break;
1549	case SCSI_DH_NOSYS:
1550		if (!m->hw_handler_name) {
1551			errors = 0;
1552			break;
1553		}
1554		DMERR("Could not failover the device: Handler scsi_dh_%s "
1555		      "Error %d.", m->hw_handler_name, errors);
1556		/*
1557		 * Fail path for now, so we do not ping pong
1558		 */
1559		fail_path(pgpath);
1560		break;
1561	case SCSI_DH_DEV_TEMP_BUSY:
1562		/*
1563		 * Probably doing something like FW upgrade on the
1564		 * controller so try the other pg.
1565		 */
1566		bypass_pg(m, pg, true);
1567		break;
1568	case SCSI_DH_RETRY:
1569		/* Wait before retrying. */
1570		delay_retry = true;
1571		fallthrough;
1572	case SCSI_DH_IMM_RETRY:
1573	case SCSI_DH_RES_TEMP_UNAVAIL:
1574		if (pg_init_limit_reached(m, pgpath))
1575			fail_path(pgpath);
1576		errors = 0;
1577		break;
1578	case SCSI_DH_DEV_OFFLINED:
1579	default:
1580		/*
1581		 * We probably do not want to fail the path for a device
1582		 * error, but this is what the old dm did. In future
1583		 * patches we can do more advanced handling.
1584		 */
1585		fail_path(pgpath);
1586	}
1587
1588	spin_lock_irqsave(&m->lock, flags);
1589	if (errors) {
1590		if (pgpath == m->current_pgpath) {
1591			DMERR("Could not failover device. Error %d.", errors);
1592			m->current_pgpath = NULL;
1593			m->current_pg = NULL;
1594		}
1595	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1596		pg->bypassed = false;
1597
1598	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1599		/* Activations of other paths are still on going */
1600		goto out;
1601
1602	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1603		if (delay_retry)
1604			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1605		else
1606			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1607
1608		if (__pg_init_all_paths(m))
1609			goto out;
1610	}
1611	clear_bit(MPATHF_QUEUE_IO, &m->flags);
1612
1613	process_queued_io_list(m);
1614
1615	/*
1616	 * Wake up any thread waiting to suspend.
1617	 */
1618	wake_up(&m->pg_init_wait);
1619
1620out:
1621	spin_unlock_irqrestore(&m->lock, flags);
1622}
1623
1624static void activate_or_offline_path(struct pgpath *pgpath)
1625{
1626	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1627
1628	if (pgpath->is_active && !blk_queue_dying(q))
1629		scsi_dh_activate(q, pg_init_done, pgpath);
1630	else
1631		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1632}
1633
1634static void activate_path_work(struct work_struct *work)
1635{
1636	struct pgpath *pgpath =
1637		container_of(work, struct pgpath, activate_path.work);
1638
1639	activate_or_offline_path(pgpath);
1640}
1641
1642static int multipath_end_io(struct dm_target *ti, struct request *clone,
1643			    blk_status_t error, union map_info *map_context)
1644{
1645	struct dm_mpath_io *mpio = get_mpio(map_context);
1646	struct pgpath *pgpath = mpio->pgpath;
1647	int r = DM_ENDIO_DONE;
1648
1649	/*
1650	 * We don't queue any clone request inside the multipath target
1651	 * during end I/O handling, since those clone requests don't have
1652	 * bio clones.  If we queue them inside the multipath target,
1653	 * we need to make bio clones, that requires memory allocation.
1654	 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1655	 *  don't have bio clones.)
1656	 * Instead of queueing the clone request here, we queue the original
1657	 * request into dm core, which will remake a clone request and
1658	 * clone bios for it and resubmit it later.
1659	 */
1660	if (error && blk_path_error(error)) {
1661		struct multipath *m = ti->private;
1662
1663		if (error == BLK_STS_RESOURCE)
1664			r = DM_ENDIO_DELAY_REQUEUE;
1665		else
1666			r = DM_ENDIO_REQUEUE;
1667
1668		if (pgpath)
1669			fail_path(pgpath);
1670
1671		if (!atomic_read(&m->nr_valid_paths) &&
1672		    !must_push_back_rq(m)) {
1673			if (error == BLK_STS_IOERR)
1674				dm_report_EIO(m);
1675			/* complete with the original error */
1676			r = DM_ENDIO_DONE;
1677		}
1678	}
1679
1680	if (pgpath) {
1681		struct path_selector *ps = &pgpath->pg->ps;
1682
1683		if (ps->type->end_io)
1684			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1685					 clone->io_start_time_ns);
1686	}
1687
1688	return r;
1689}
1690
1691static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1692				blk_status_t *error)
1693{
1694	struct multipath *m = ti->private;
1695	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1696	struct pgpath *pgpath = mpio->pgpath;
1697	unsigned long flags;
1698	int r = DM_ENDIO_DONE;
1699
1700	if (!*error || !blk_path_error(*error))
1701		goto done;
1702
1703	if (pgpath)
1704		fail_path(pgpath);
1705
1706	if (!atomic_read(&m->nr_valid_paths)) {
1707		spin_lock_irqsave(&m->lock, flags);
1708		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1709			if (__must_push_back(m)) {
1710				r = DM_ENDIO_REQUEUE;
1711			} else {
1712				dm_report_EIO(m);
1713				*error = BLK_STS_IOERR;
1714			}
1715			spin_unlock_irqrestore(&m->lock, flags);
1716			goto done;
1717		}
1718		spin_unlock_irqrestore(&m->lock, flags);
1719	}
1720
1721	multipath_queue_bio(m, clone);
 
 
 
 
 
1722	r = DM_ENDIO_INCOMPLETE;
1723done:
1724	if (pgpath) {
1725		struct path_selector *ps = &pgpath->pg->ps;
1726
1727		if (ps->type->end_io)
1728			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1729					 (mpio->start_time_ns ?:
1730					  dm_start_time_ns_from_clone(clone)));
1731	}
1732
1733	return r;
1734}
1735
1736/*
1737 * Suspend with flush can't complete until all the I/O is processed
1738 * so if the last path fails we must error any remaining I/O.
1739 * - Note that if the freeze_bdev fails while suspending, the
1740 *   queue_if_no_path state is lost - userspace should reset it.
1741 * Otherwise, during noflush suspend, queue_if_no_path will not change.
1742 */
1743static void multipath_presuspend(struct dm_target *ti)
1744{
1745	struct multipath *m = ti->private;
1746
1747	/* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
1748	if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
1749		queue_if_no_path(m, false, true, __func__);
1750}
1751
1752static void multipath_postsuspend(struct dm_target *ti)
1753{
1754	struct multipath *m = ti->private;
1755
1756	mutex_lock(&m->work_mutex);
1757	flush_multipath_work(m);
1758	mutex_unlock(&m->work_mutex);
1759}
1760
1761/*
1762 * Restore the queue_if_no_path setting.
1763 */
1764static void multipath_resume(struct dm_target *ti)
1765{
1766	struct multipath *m = ti->private;
1767	unsigned long flags;
1768
1769	spin_lock_irqsave(&m->lock, flags);
1770	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
1771		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1772		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
1773	}
1774
1775	DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
1776		dm_table_device_name(m->ti->table), __func__,
1777		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
1778		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1779
1780	spin_unlock_irqrestore(&m->lock, flags);
1781}
1782
1783/*
1784 * Info output has the following format:
1785 * num_multipath_feature_args [multipath_feature_args]*
1786 * num_handler_status_args [handler_status_args]*
1787 * num_groups init_group_number
1788 *            [A|D|E num_ps_status_args [ps_status_args]*
1789 *             num_paths num_selector_args
1790 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1791 *
1792 * Table output has the following format (identical to the constructor string):
1793 * num_feature_args [features_args]*
1794 * num_handler_args hw_handler [hw_handler_args]*
1795 * num_groups init_group_number
1796 *     [priority selector-name num_ps_args [ps_args]*
1797 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1798 */
1799static void multipath_status(struct dm_target *ti, status_type_t type,
1800			     unsigned int status_flags, char *result, unsigned int maxlen)
1801{
1802	int sz = 0, pg_counter, pgpath_counter;
1803	unsigned long flags;
1804	struct multipath *m = ti->private;
1805	struct priority_group *pg;
1806	struct pgpath *p;
1807	unsigned int pg_num;
1808	char state;
1809
1810	spin_lock_irqsave(&m->lock, flags);
1811
1812	/* Features */
1813	if (type == STATUSTYPE_INFO)
1814		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1815		       atomic_read(&m->pg_init_count));
1816	else {
1817		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1818			      (m->pg_init_retries > 0) * 2 +
1819			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1820			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1821			      (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1822
1823		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1824			DMEMIT("queue_if_no_path ");
1825		if (m->pg_init_retries)
1826			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1827		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1828			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1829		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1830			DMEMIT("retain_attached_hw_handler ");
1831		if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1832			switch (m->queue_mode) {
1833			case DM_TYPE_BIO_BASED:
1834				DMEMIT("queue_mode bio ");
1835				break;
1836			default:
1837				WARN_ON_ONCE(true);
1838				break;
1839			}
1840		}
1841	}
1842
1843	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1844		DMEMIT("0 ");
1845	else
1846		DMEMIT("1 %s ", m->hw_handler_name);
1847
1848	DMEMIT("%u ", m->nr_priority_groups);
1849
1850	if (m->next_pg)
1851		pg_num = m->next_pg->pg_num;
1852	else if (m->current_pg)
1853		pg_num = m->current_pg->pg_num;
1854	else
1855		pg_num = (m->nr_priority_groups ? 1 : 0);
1856
1857	DMEMIT("%u ", pg_num);
1858
1859	switch (type) {
1860	case STATUSTYPE_INFO:
1861		list_for_each_entry(pg, &m->priority_groups, list) {
1862			if (pg->bypassed)
1863				state = 'D';	/* Disabled */
1864			else if (pg == m->current_pg)
1865				state = 'A';	/* Currently Active */
1866			else
1867				state = 'E';	/* Enabled */
1868
1869			DMEMIT("%c ", state);
1870
1871			if (pg->ps.type->status)
1872				sz += pg->ps.type->status(&pg->ps, NULL, type,
1873							  result + sz,
1874							  maxlen - sz);
1875			else
1876				DMEMIT("0 ");
1877
1878			DMEMIT("%u %u ", pg->nr_pgpaths,
1879			       pg->ps.type->info_args);
1880
1881			list_for_each_entry(p, &pg->pgpaths, list) {
1882				DMEMIT("%s %s %u ", p->path.dev->name,
1883				       p->is_active ? "A" : "F",
1884				       p->fail_count);
1885				if (pg->ps.type->status)
1886					sz += pg->ps.type->status(&pg->ps,
1887					      &p->path, type, result + sz,
1888					      maxlen - sz);
1889			}
1890		}
1891		break;
1892
1893	case STATUSTYPE_TABLE:
1894		list_for_each_entry(pg, &m->priority_groups, list) {
1895			DMEMIT("%s ", pg->ps.type->name);
1896
1897			if (pg->ps.type->status)
1898				sz += pg->ps.type->status(&pg->ps, NULL, type,
1899							  result + sz,
1900							  maxlen - sz);
1901			else
1902				DMEMIT("0 ");
1903
1904			DMEMIT("%u %u ", pg->nr_pgpaths,
1905			       pg->ps.type->table_args);
1906
1907			list_for_each_entry(p, &pg->pgpaths, list) {
1908				DMEMIT("%s ", p->path.dev->name);
1909				if (pg->ps.type->status)
1910					sz += pg->ps.type->status(&pg->ps,
1911					      &p->path, type, result + sz,
1912					      maxlen - sz);
1913			}
1914		}
1915		break;
1916
1917	case STATUSTYPE_IMA:
1918		sz = 0; /*reset the result pointer*/
1919
1920		DMEMIT_TARGET_NAME_VERSION(ti->type);
1921		DMEMIT(",nr_priority_groups=%u", m->nr_priority_groups);
1922
1923		pg_counter = 0;
1924		list_for_each_entry(pg, &m->priority_groups, list) {
1925			if (pg->bypassed)
1926				state = 'D';	/* Disabled */
1927			else if (pg == m->current_pg)
1928				state = 'A';	/* Currently Active */
1929			else
1930				state = 'E';	/* Enabled */
1931			DMEMIT(",pg_state_%d=%c", pg_counter, state);
1932			DMEMIT(",nr_pgpaths_%d=%u", pg_counter, pg->nr_pgpaths);
1933			DMEMIT(",path_selector_name_%d=%s", pg_counter, pg->ps.type->name);
1934
1935			pgpath_counter = 0;
1936			list_for_each_entry(p, &pg->pgpaths, list) {
1937				DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u",
1938				       pg_counter, pgpath_counter, p->path.dev->name,
1939				       pg_counter, pgpath_counter, p->is_active ? 'A' : 'F',
1940				       pg_counter, pgpath_counter, p->fail_count);
1941				if (pg->ps.type->status) {
1942					DMEMIT(",path_selector_status_%d_%d=",
1943					       pg_counter, pgpath_counter);
1944					sz += pg->ps.type->status(&pg->ps, &p->path,
1945								  type, result + sz,
1946								  maxlen - sz);
1947				}
1948				pgpath_counter++;
1949			}
1950			pg_counter++;
1951		}
1952		DMEMIT(";");
1953		break;
1954	}
1955
1956	spin_unlock_irqrestore(&m->lock, flags);
1957}
1958
1959static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
1960			     char *result, unsigned int maxlen)
1961{
1962	int r = -EINVAL;
1963	struct dm_dev *dev;
1964	struct multipath *m = ti->private;
1965	action_fn action;
1966	unsigned long flags;
1967
1968	mutex_lock(&m->work_mutex);
1969
1970	if (dm_suspended(ti)) {
1971		r = -EBUSY;
1972		goto out;
1973	}
1974
1975	if (argc == 1) {
1976		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1977			r = queue_if_no_path(m, true, false, __func__);
1978			spin_lock_irqsave(&m->lock, flags);
1979			enable_nopath_timeout(m);
1980			spin_unlock_irqrestore(&m->lock, flags);
1981			goto out;
1982		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1983			r = queue_if_no_path(m, false, false, __func__);
1984			disable_nopath_timeout(m);
1985			goto out;
1986		}
1987	}
1988
1989	if (argc != 2) {
1990		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1991		goto out;
1992	}
1993
1994	if (!strcasecmp(argv[0], "disable_group")) {
1995		r = bypass_pg_num(m, argv[1], true);
1996		goto out;
1997	} else if (!strcasecmp(argv[0], "enable_group")) {
1998		r = bypass_pg_num(m, argv[1], false);
1999		goto out;
2000	} else if (!strcasecmp(argv[0], "switch_group")) {
2001		r = switch_pg_num(m, argv[1]);
2002		goto out;
2003	} else if (!strcasecmp(argv[0], "reinstate_path"))
2004		action = reinstate_path;
2005	else if (!strcasecmp(argv[0], "fail_path"))
2006		action = fail_path;
2007	else {
2008		DMWARN("Unrecognised multipath message received: %s", argv[0]);
2009		goto out;
2010	}
2011
2012	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
2013	if (r) {
2014		DMWARN("message: error getting device %s",
2015		       argv[1]);
2016		goto out;
2017	}
2018
2019	r = action_dev(m, dev, action);
2020
2021	dm_put_device(ti, dev);
2022
2023out:
2024	mutex_unlock(&m->work_mutex);
2025	return r;
2026}
2027
2028static int multipath_prepare_ioctl(struct dm_target *ti,
2029				   struct block_device **bdev)
2030{
2031	struct multipath *m = ti->private;
2032	struct pgpath *pgpath;
2033	unsigned long flags;
2034	int r;
2035
2036	pgpath = READ_ONCE(m->current_pgpath);
2037	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
2038		pgpath = choose_pgpath(m, 0);
2039
2040	if (pgpath) {
2041		if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
2042			*bdev = pgpath->path.dev->bdev;
2043			r = 0;
2044		} else {
2045			/* pg_init has not started or completed */
2046			r = -ENOTCONN;
2047		}
2048	} else {
2049		/* No path is available */
2050		r = -EIO;
2051		spin_lock_irqsave(&m->lock, flags);
2052		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2053			r = -ENOTCONN;
2054		spin_unlock_irqrestore(&m->lock, flags);
 
2055	}
2056
2057	if (r == -ENOTCONN) {
2058		if (!READ_ONCE(m->current_pg)) {
2059			/* Path status changed, redo selection */
2060			(void) choose_pgpath(m, 0);
2061		}
2062		spin_lock_irqsave(&m->lock, flags);
2063		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
2064			(void) __pg_init_all_paths(m);
2065		spin_unlock_irqrestore(&m->lock, flags);
2066		dm_table_run_md_queue_async(m->ti->table);
2067		process_queued_io_list(m);
2068	}
2069
2070	/*
2071	 * Only pass ioctls through if the device sizes match exactly.
2072	 */
2073	if (!r && ti->len != bdev_nr_sectors((*bdev)))
2074		return 1;
2075	return r;
2076}
2077
2078static int multipath_iterate_devices(struct dm_target *ti,
2079				     iterate_devices_callout_fn fn, void *data)
2080{
2081	struct multipath *m = ti->private;
2082	struct priority_group *pg;
2083	struct pgpath *p;
2084	int ret = 0;
2085
2086	list_for_each_entry(pg, &m->priority_groups, list) {
2087		list_for_each_entry(p, &pg->pgpaths, list) {
2088			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
2089			if (ret)
2090				goto out;
2091		}
2092	}
2093
2094out:
2095	return ret;
2096}
2097
2098static int pgpath_busy(struct pgpath *pgpath)
2099{
2100	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2101
2102	return blk_lld_busy(q);
2103}
2104
2105/*
2106 * We return "busy", only when we can map I/Os but underlying devices
2107 * are busy (so even if we map I/Os now, the I/Os will wait on
2108 * the underlying queue).
2109 * In other words, if we want to kill I/Os or queue them inside us
2110 * due to map unavailability, we don't return "busy".  Otherwise,
2111 * dm core won't give us the I/Os and we can't do what we want.
2112 */
2113static int multipath_busy(struct dm_target *ti)
2114{
2115	bool busy = false, has_active = false;
2116	struct multipath *m = ti->private;
2117	struct priority_group *pg, *next_pg;
2118	struct pgpath *pgpath;
2119
2120	/* pg_init in progress */
2121	if (atomic_read(&m->pg_init_in_progress))
2122		return true;
2123
2124	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2125	if (!atomic_read(&m->nr_valid_paths)) {
2126		unsigned long flags;
2127
2128		spin_lock_irqsave(&m->lock, flags);
2129		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2130			spin_unlock_irqrestore(&m->lock, flags);
2131			return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2132		}
2133		spin_unlock_irqrestore(&m->lock, flags);
2134	}
2135
2136	/* Guess which priority_group will be used at next mapping time */
2137	pg = READ_ONCE(m->current_pg);
2138	next_pg = READ_ONCE(m->next_pg);
2139	if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
2140		pg = next_pg;
2141
2142	if (!pg) {
2143		/*
2144		 * We don't know which pg will be used at next mapping time.
2145		 * We don't call choose_pgpath() here to avoid to trigger
2146		 * pg_init just by busy checking.
2147		 * So we don't know whether underlying devices we will be using
2148		 * at next mapping time are busy or not. Just try mapping.
2149		 */
2150		return busy;
2151	}
2152
2153	/*
2154	 * If there is one non-busy active path at least, the path selector
2155	 * will be able to select it. So we consider such a pg as not busy.
2156	 */
2157	busy = true;
2158	list_for_each_entry(pgpath, &pg->pgpaths, list) {
2159		if (pgpath->is_active) {
2160			has_active = true;
2161			if (!pgpath_busy(pgpath)) {
2162				busy = false;
2163				break;
2164			}
2165		}
2166	}
2167
2168	if (!has_active) {
2169		/*
2170		 * No active path in this pg, so this pg won't be used and
2171		 * the current_pg will be changed at next mapping time.
2172		 * We need to try mapping to determine it.
2173		 */
2174		busy = false;
2175	}
2176
2177	return busy;
2178}
2179
2180/*
2181 *---------------------------------------------------------------
2182 * Module setup
2183 *---------------------------------------------------------------
2184 */
2185static struct target_type multipath_target = {
2186	.name = "multipath",
2187	.version = {1, 14, 0},
2188	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2189		    DM_TARGET_PASSES_INTEGRITY,
2190	.module = THIS_MODULE,
2191	.ctr = multipath_ctr,
2192	.dtr = multipath_dtr,
2193	.clone_and_map_rq = multipath_clone_and_map,
2194	.release_clone_rq = multipath_release_clone,
2195	.rq_end_io = multipath_end_io,
2196	.map = multipath_map_bio,
2197	.end_io = multipath_end_io_bio,
2198	.presuspend = multipath_presuspend,
2199	.postsuspend = multipath_postsuspend,
2200	.resume = multipath_resume,
2201	.status = multipath_status,
2202	.message = multipath_message,
2203	.prepare_ioctl = multipath_prepare_ioctl,
2204	.iterate_devices = multipath_iterate_devices,
2205	.busy = multipath_busy,
2206};
2207
2208static int __init dm_multipath_init(void)
2209{
2210	int r = -ENOMEM;
2211
2212	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2213	if (!kmultipathd) {
2214		DMERR("failed to create workqueue kmpathd");
 
2215		goto bad_alloc_kmultipathd;
2216	}
2217
2218	/*
2219	 * A separate workqueue is used to handle the device handlers
2220	 * to avoid overloading existing workqueue. Overloading the
2221	 * old workqueue would also create a bottleneck in the
2222	 * path of the storage hardware device activation.
2223	 */
2224	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2225						  WQ_MEM_RECLAIM);
2226	if (!kmpath_handlerd) {
2227		DMERR("failed to create workqueue kmpath_handlerd");
 
2228		goto bad_alloc_kmpath_handlerd;
2229	}
2230
2231	dm_mpath_wq = alloc_workqueue("dm_mpath_wq", 0, 0);
2232	if (!dm_mpath_wq) {
2233		DMERR("failed to create workqueue dm_mpath_wq");
2234		goto bad_alloc_dm_mpath_wq;
2235	}
2236
2237	r = dm_register_target(&multipath_target);
2238	if (r < 0)
 
 
2239		goto bad_register_target;
 
2240
2241	return 0;
2242
2243bad_register_target:
2244	destroy_workqueue(dm_mpath_wq);
2245bad_alloc_dm_mpath_wq:
2246	destroy_workqueue(kmpath_handlerd);
2247bad_alloc_kmpath_handlerd:
2248	destroy_workqueue(kmultipathd);
2249bad_alloc_kmultipathd:
2250	return r;
2251}
2252
2253static void __exit dm_multipath_exit(void)
2254{
2255	destroy_workqueue(dm_mpath_wq);
2256	destroy_workqueue(kmpath_handlerd);
2257	destroy_workqueue(kmultipathd);
2258
2259	dm_unregister_target(&multipath_target);
2260}
2261
2262module_init(dm_multipath_init);
2263module_exit(dm_multipath_exit);
2264
2265module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644);
2266MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
2267
2268MODULE_DESCRIPTION(DM_NAME " multipath target");
2269MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2270MODULE_LICENSE("GPL");
v5.4
 
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9
  10#include "dm-rq.h"
  11#include "dm-bio-record.h"
  12#include "dm-path-selector.h"
  13#include "dm-uevent.h"
  14
  15#include <linux/blkdev.h>
  16#include <linux/ctype.h>
  17#include <linux/init.h>
  18#include <linux/mempool.h>
  19#include <linux/module.h>
  20#include <linux/pagemap.h>
  21#include <linux/slab.h>
  22#include <linux/time.h>
 
  23#include <linux/workqueue.h>
  24#include <linux/delay.h>
  25#include <scsi/scsi_dh.h>
  26#include <linux/atomic.h>
  27#include <linux/blk-mq.h>
  28
 
 
  29#define DM_MSG_PREFIX "multipath"
  30#define DM_PG_INIT_DELAY_MSECS 2000
  31#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
 
 
 
  32
  33/* Path properties */
  34struct pgpath {
  35	struct list_head list;
  36
  37	struct priority_group *pg;	/* Owning PG */
  38	unsigned fail_count;		/* Cumulative failure count */
  39
  40	struct dm_path path;
  41	struct delayed_work activate_path;
  42
  43	bool is_active:1;		/* Path status */
  44};
  45
  46#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  47
  48/*
  49 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  50 * Each has a path selector which controls which path gets used.
  51 */
  52struct priority_group {
  53	struct list_head list;
  54
  55	struct multipath *m;		/* Owning multipath instance */
  56	struct path_selector ps;
  57
  58	unsigned pg_num;		/* Reference number */
  59	unsigned nr_pgpaths;		/* Number of paths in PG */
  60	struct list_head pgpaths;
  61
  62	bool bypassed:1;		/* Temporarily bypass this PG? */
  63};
  64
  65/* Multipath context */
  66struct multipath {
  67	unsigned long flags;		/* Multipath state flags */
  68
  69	spinlock_t lock;
  70	enum dm_queue_mode queue_mode;
  71
  72	struct pgpath *current_pgpath;
  73	struct priority_group *current_pg;
  74	struct priority_group *next_pg;	/* Switch to this PG if set */
  75
  76	atomic_t nr_valid_paths;	/* Total number of usable paths */
  77	unsigned nr_priority_groups;
  78	struct list_head priority_groups;
  79
  80	const char *hw_handler_name;
  81	char *hw_handler_params;
  82	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  83	unsigned pg_init_retries;	/* Number of times to retry pg_init */
  84	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  85	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
  86	atomic_t pg_init_count;		/* Number of times pg_init called */
  87
  88	struct mutex work_mutex;
  89	struct work_struct trigger_event;
  90	struct dm_target *ti;
  91
  92	struct work_struct process_queued_bios;
  93	struct bio_list queued_bios;
 
 
  94};
  95
  96/*
  97 * Context information attached to each io we process.
  98 */
  99struct dm_mpath_io {
 100	struct pgpath *pgpath;
 101	size_t nr_bytes;
 
 102};
 103
 104typedef int (*action_fn) (struct pgpath *pgpath);
 105
 106static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 107static void trigger_event(struct work_struct *work);
 108static void activate_or_offline_path(struct pgpath *pgpath);
 109static void activate_path_work(struct work_struct *work);
 110static void process_queued_bios(struct work_struct *work);
 
 111
 112/*-----------------------------------------------
 
 113 * Multipath state flags.
 114 *-----------------------------------------------*/
 115
 116#define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
 117#define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
 118#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
 119#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
 120#define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
 121#define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
 122#define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
 123
 124/*-----------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125 * Allocation routines
 126 *-----------------------------------------------*/
 127
 128static struct pgpath *alloc_pgpath(void)
 129{
 130	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 131
 132	if (!pgpath)
 133		return NULL;
 134
 135	pgpath->is_active = true;
 136
 137	return pgpath;
 138}
 139
 140static void free_pgpath(struct pgpath *pgpath)
 141{
 142	kfree(pgpath);
 143}
 144
 145static struct priority_group *alloc_priority_group(void)
 146{
 147	struct priority_group *pg;
 148
 149	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 150
 151	if (pg)
 152		INIT_LIST_HEAD(&pg->pgpaths);
 153
 154	return pg;
 155}
 156
 157static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 158{
 159	struct pgpath *pgpath, *tmp;
 160
 161	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 162		list_del(&pgpath->list);
 163		dm_put_device(ti, pgpath->path.dev);
 164		free_pgpath(pgpath);
 165	}
 166}
 167
 168static void free_priority_group(struct priority_group *pg,
 169				struct dm_target *ti)
 170{
 171	struct path_selector *ps = &pg->ps;
 172
 173	if (ps->type) {
 174		ps->type->destroy(ps);
 175		dm_put_path_selector(ps->type);
 176	}
 177
 178	free_pgpaths(&pg->pgpaths, ti);
 179	kfree(pg);
 180}
 181
 182static struct multipath *alloc_multipath(struct dm_target *ti)
 183{
 184	struct multipath *m;
 185
 186	m = kzalloc(sizeof(*m), GFP_KERNEL);
 187	if (m) {
 188		INIT_LIST_HEAD(&m->priority_groups);
 189		spin_lock_init(&m->lock);
 190		atomic_set(&m->nr_valid_paths, 0);
 191		INIT_WORK(&m->trigger_event, trigger_event);
 192		mutex_init(&m->work_mutex);
 193
 194		m->queue_mode = DM_TYPE_NONE;
 195
 196		m->ti = ti;
 197		ti->private = m;
 
 
 198	}
 199
 200	return m;
 201}
 202
 203static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
 204{
 205	if (m->queue_mode == DM_TYPE_NONE) {
 206		m->queue_mode = DM_TYPE_REQUEST_BASED;
 207	} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
 208		INIT_WORK(&m->process_queued_bios, process_queued_bios);
 209		/*
 210		 * bio-based doesn't support any direct scsi_dh management;
 211		 * it just discovers if a scsi_dh is attached.
 212		 */
 213		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 214	}
 215
 216	dm_table_set_type(ti->table, m->queue_mode);
 217
 218	/*
 219	 * Init fields that are only used when a scsi_dh is attached
 220	 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
 221	 */
 222	set_bit(MPATHF_QUEUE_IO, &m->flags);
 223	atomic_set(&m->pg_init_in_progress, 0);
 224	atomic_set(&m->pg_init_count, 0);
 225	m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 226	init_waitqueue_head(&m->pg_init_wait);
 227
 228	return 0;
 229}
 230
 231static void free_multipath(struct multipath *m)
 232{
 233	struct priority_group *pg, *tmp;
 234
 235	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 236		list_del(&pg->list);
 237		free_priority_group(pg, m->ti);
 238	}
 239
 240	kfree(m->hw_handler_name);
 241	kfree(m->hw_handler_params);
 242	mutex_destroy(&m->work_mutex);
 243	kfree(m);
 244}
 245
 246static struct dm_mpath_io *get_mpio(union map_info *info)
 247{
 248	return info->ptr;
 249}
 250
 251static size_t multipath_per_bio_data_size(void)
 252{
 253	return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
 254}
 255
 256static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
 257{
 258	return dm_per_bio_data(bio, multipath_per_bio_data_size());
 259}
 260
 261static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
 262{
 263	/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
 264	void *bio_details = mpio + 1;
 265	return bio_details;
 266}
 267
 268static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
 269{
 270	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 271	struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
 272
 273	mpio->nr_bytes = bio->bi_iter.bi_size;
 274	mpio->pgpath = NULL;
 
 275	*mpio_p = mpio;
 276
 277	dm_bio_record(bio_details, bio);
 278}
 279
 280/*-----------------------------------------------
 
 281 * Path selection
 282 *-----------------------------------------------*/
 283
 284static int __pg_init_all_paths(struct multipath *m)
 285{
 286	struct pgpath *pgpath;
 287	unsigned long pg_init_delay = 0;
 288
 289	lockdep_assert_held(&m->lock);
 290
 291	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 292		return 0;
 293
 294	atomic_inc(&m->pg_init_count);
 295	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 296
 297	/* Check here to reset pg_init_required */
 298	if (!m->current_pg)
 299		return 0;
 300
 301	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
 302		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 303						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 304	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 305		/* Skip failed paths */
 306		if (!pgpath->is_active)
 307			continue;
 308		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 309				       pg_init_delay))
 310			atomic_inc(&m->pg_init_in_progress);
 311	}
 312	return atomic_read(&m->pg_init_in_progress);
 313}
 314
 315static int pg_init_all_paths(struct multipath *m)
 316{
 317	int ret;
 318	unsigned long flags;
 319
 320	spin_lock_irqsave(&m->lock, flags);
 321	ret = __pg_init_all_paths(m);
 322	spin_unlock_irqrestore(&m->lock, flags);
 323
 324	return ret;
 325}
 326
 327static void __switch_pg(struct multipath *m, struct priority_group *pg)
 328{
 
 
 329	m->current_pg = pg;
 330
 331	/* Must we initialise the PG first, and queue I/O till it's ready? */
 332	if (m->hw_handler_name) {
 333		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 334		set_bit(MPATHF_QUEUE_IO, &m->flags);
 335	} else {
 336		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 337		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 338	}
 339
 340	atomic_set(&m->pg_init_count, 0);
 341}
 342
 343static struct pgpath *choose_path_in_pg(struct multipath *m,
 344					struct priority_group *pg,
 345					size_t nr_bytes)
 346{
 347	unsigned long flags;
 348	struct dm_path *path;
 349	struct pgpath *pgpath;
 350
 351	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 352	if (!path)
 353		return ERR_PTR(-ENXIO);
 354
 355	pgpath = path_to_pgpath(path);
 356
 357	if (unlikely(READ_ONCE(m->current_pg) != pg)) {
 358		/* Only update current_pgpath if pg changed */
 359		spin_lock_irqsave(&m->lock, flags);
 360		m->current_pgpath = pgpath;
 361		__switch_pg(m, pg);
 362		spin_unlock_irqrestore(&m->lock, flags);
 363	}
 364
 365	return pgpath;
 366}
 367
 368static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 369{
 370	unsigned long flags;
 371	struct priority_group *pg;
 372	struct pgpath *pgpath;
 373	unsigned bypassed = 1;
 374
 375	if (!atomic_read(&m->nr_valid_paths)) {
 
 376		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 
 377		goto failed;
 378	}
 379
 380	/* Were we instructed to switch PG? */
 381	if (READ_ONCE(m->next_pg)) {
 382		spin_lock_irqsave(&m->lock, flags);
 383		pg = m->next_pg;
 384		if (!pg) {
 385			spin_unlock_irqrestore(&m->lock, flags);
 386			goto check_current_pg;
 387		}
 388		m->next_pg = NULL;
 389		spin_unlock_irqrestore(&m->lock, flags);
 390		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 391		if (!IS_ERR_OR_NULL(pgpath))
 392			return pgpath;
 393	}
 394
 395	/* Don't change PG until it has no remaining paths */
 396check_current_pg:
 397	pg = READ_ONCE(m->current_pg);
 398	if (pg) {
 399		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 400		if (!IS_ERR_OR_NULL(pgpath))
 401			return pgpath;
 402	}
 403
 404	/*
 405	 * Loop through priority groups until we find a valid path.
 406	 * First time we skip PGs marked 'bypassed'.
 407	 * Second time we only try the ones we skipped, but set
 408	 * pg_init_delay_retry so we do not hammer controllers.
 409	 */
 410	do {
 411		list_for_each_entry(pg, &m->priority_groups, list) {
 412			if (pg->bypassed == !!bypassed)
 413				continue;
 414			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 415			if (!IS_ERR_OR_NULL(pgpath)) {
 416				if (!bypassed)
 
 417					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 
 
 418				return pgpath;
 419			}
 420		}
 421	} while (bypassed--);
 422
 423failed:
 424	spin_lock_irqsave(&m->lock, flags);
 425	m->current_pgpath = NULL;
 426	m->current_pg = NULL;
 427	spin_unlock_irqrestore(&m->lock, flags);
 428
 429	return NULL;
 430}
 431
 432/*
 433 * dm_report_EIO() is a macro instead of a function to make pr_debug()
 434 * report the function name and line number of the function from which
 435 * it has been invoked.
 436 */
 437#define dm_report_EIO(m)						\
 438do {									\
 439	struct mapped_device *md = dm_table_get_md((m)->ti->table);	\
 440									\
 441	pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
 442		 dm_device_name(md),					\
 443		 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags),	\
 444		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags),	\
 445		 dm_noflush_suspending((m)->ti));			\
 446} while (0)
 447
 448/*
 449 * Check whether bios must be queued in the device-mapper core rather
 450 * than here in the target.
 451 *
 452 * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
 453 * the same value then we are not between multipath_presuspend()
 454 * and multipath_resume() calls and we have no need to check
 455 * for the DMF_NOFLUSH_SUSPENDING flag.
 456 */
 457static bool __must_push_back(struct multipath *m, unsigned long flags)
 458{
 459	return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
 460		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
 461		dm_noflush_suspending(m->ti));
 462}
 463
 464/*
 465 * Following functions use READ_ONCE to get atomic access to
 466 * all m->flags to avoid taking spinlock
 467 */
 468static bool must_push_back_rq(struct multipath *m)
 469{
 470	unsigned long flags = READ_ONCE(m->flags);
 471	return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
 472}
 
 
 
 473
 474static bool must_push_back_bio(struct multipath *m)
 475{
 476	unsigned long flags = READ_ONCE(m->flags);
 477	return __must_push_back(m, flags);
 478}
 479
 480/*
 481 * Map cloned requests (request-based multipath)
 482 */
 483static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 484				   union map_info *map_context,
 485				   struct request **__clone)
 486{
 487	struct multipath *m = ti->private;
 488	size_t nr_bytes = blk_rq_bytes(rq);
 489	struct pgpath *pgpath;
 490	struct block_device *bdev;
 491	struct dm_mpath_io *mpio = get_mpio(map_context);
 492	struct request_queue *q;
 493	struct request *clone;
 494
 495	/* Do we need to select a new pgpath? */
 496	pgpath = READ_ONCE(m->current_pgpath);
 497	if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
 498		pgpath = choose_pgpath(m, nr_bytes);
 499
 500	if (!pgpath) {
 501		if (must_push_back_rq(m))
 502			return DM_MAPIO_DELAY_REQUEUE;
 503		dm_report_EIO(m);	/* Failed */
 504		return DM_MAPIO_KILL;
 505	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
 506		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
 507		pg_init_all_paths(m);
 508		return DM_MAPIO_DELAY_REQUEUE;
 509	}
 510
 511	mpio->pgpath = pgpath;
 512	mpio->nr_bytes = nr_bytes;
 513
 514	bdev = pgpath->path.dev->bdev;
 515	q = bdev_get_queue(bdev);
 516	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
 517			BLK_MQ_REQ_NOWAIT);
 518	if (IS_ERR(clone)) {
 519		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
 520		if (blk_queue_dying(q)) {
 521			atomic_inc(&m->pg_init_in_progress);
 522			activate_or_offline_path(pgpath);
 523			return DM_MAPIO_DELAY_REQUEUE;
 524		}
 525
 526		/*
 527		 * blk-mq's SCHED_RESTART can cover this requeue, so we
 528		 * needn't deal with it by DELAY_REQUEUE. More importantly,
 529		 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
 530		 * get the queue busy feedback (via BLK_STS_RESOURCE),
 531		 * otherwise I/O merging can suffer.
 532		 */
 533		return DM_MAPIO_REQUEUE;
 534	}
 535	clone->bio = clone->biotail = NULL;
 536	clone->rq_disk = bdev->bd_disk;
 537	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 538	*__clone = clone;
 539
 540	if (pgpath->pg->ps.type->start_io)
 541		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 542					      &pgpath->path,
 543					      nr_bytes);
 544	return DM_MAPIO_REMAPPED;
 545}
 546
 547static void multipath_release_clone(struct request *clone,
 548				    union map_info *map_context)
 549{
 550	if (unlikely(map_context)) {
 551		/*
 552		 * non-NULL map_context means caller is still map
 553		 * method; must undo multipath_clone_and_map()
 554		 */
 555		struct dm_mpath_io *mpio = get_mpio(map_context);
 556		struct pgpath *pgpath = mpio->pgpath;
 557
 558		if (pgpath && pgpath->pg->ps.type->end_io)
 559			pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
 560						    &pgpath->path,
 561						    mpio->nr_bytes);
 
 562	}
 563
 564	blk_put_request(clone);
 565}
 566
 567/*
 568 * Map cloned bios (bio-based multipath)
 569 */
 570
 571static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 
 
 
 
 
 
 
 
 572{
 573	struct pgpath *pgpath;
 574	unsigned long flags;
 575	bool queue_io;
 576
 577	/* Do we need to select a new pgpath? */
 578	pgpath = READ_ONCE(m->current_pgpath);
 579	queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
 580	if (!pgpath || !queue_io)
 581		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 582
 583	if ((pgpath && queue_io) ||
 584	    (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
 585		/* Queue for the daemon to resubmit */
 586		spin_lock_irqsave(&m->lock, flags);
 587		bio_list_add(&m->queued_bios, bio);
 588		spin_unlock_irqrestore(&m->lock, flags);
 589
 590		/* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
 591		if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
 592			pg_init_all_paths(m);
 593		else if (!queue_io)
 594			queue_work(kmultipathd, &m->process_queued_bios);
 595
 596		return ERR_PTR(-EAGAIN);
 597	}
 598
 599	return pgpath;
 600}
 601
 602static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
 603{
 604	struct pgpath *pgpath;
 605	unsigned long flags;
 606
 607	/* Do we need to select a new pgpath? */
 608	/*
 609	 * FIXME: currently only switching path if no path (due to failure, etc)
 610	 * - which negates the point of using a path selector
 611	 */
 612	pgpath = READ_ONCE(m->current_pgpath);
 613	if (!pgpath)
 614		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 615
 616	if (!pgpath) {
 
 617		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 618			/* Queue for the daemon to resubmit */
 619			spin_lock_irqsave(&m->lock, flags);
 620			bio_list_add(&m->queued_bios, bio);
 621			spin_unlock_irqrestore(&m->lock, flags);
 622			queue_work(kmultipathd, &m->process_queued_bios);
 623
 624			return ERR_PTR(-EAGAIN);
 625		}
 626		return NULL;
 
 
 627	}
 628
 629	return pgpath;
 630}
 631
 632static int __multipath_map_bio(struct multipath *m, struct bio *bio,
 633			       struct dm_mpath_io *mpio)
 634{
 635	struct pgpath *pgpath;
 636
 637	if (!m->hw_handler_name)
 638		pgpath = __map_bio_fast(m, bio);
 639	else
 640		pgpath = __map_bio(m, bio);
 641
 642	if (IS_ERR(pgpath))
 643		return DM_MAPIO_SUBMITTED;
 644
 645	if (!pgpath) {
 646		if (must_push_back_bio(m))
 647			return DM_MAPIO_REQUEUE;
 648		dm_report_EIO(m);
 649		return DM_MAPIO_KILL;
 650	}
 651
 652	mpio->pgpath = pgpath;
 653
 
 
 
 654	bio->bi_status = 0;
 655	bio_set_dev(bio, pgpath->path.dev->bdev);
 656	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 657
 658	if (pgpath->pg->ps.type->start_io)
 659		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 660					      &pgpath->path,
 661					      mpio->nr_bytes);
 662	return DM_MAPIO_REMAPPED;
 663}
 664
 665static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
 666{
 667	struct multipath *m = ti->private;
 668	struct dm_mpath_io *mpio = NULL;
 669
 670	multipath_init_per_bio_data(bio, &mpio);
 671	return __multipath_map_bio(m, bio, mpio);
 672}
 673
 674static void process_queued_io_list(struct multipath *m)
 675{
 676	if (m->queue_mode == DM_TYPE_REQUEST_BASED)
 677		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 678	else if (m->queue_mode == DM_TYPE_BIO_BASED)
 679		queue_work(kmultipathd, &m->process_queued_bios);
 680}
 681
 682static void process_queued_bios(struct work_struct *work)
 683{
 684	int r;
 685	unsigned long flags;
 686	struct bio *bio;
 687	struct bio_list bios;
 688	struct blk_plug plug;
 689	struct multipath *m =
 690		container_of(work, struct multipath, process_queued_bios);
 691
 692	bio_list_init(&bios);
 693
 694	spin_lock_irqsave(&m->lock, flags);
 695
 696	if (bio_list_empty(&m->queued_bios)) {
 697		spin_unlock_irqrestore(&m->lock, flags);
 698		return;
 699	}
 700
 701	bio_list_merge(&bios, &m->queued_bios);
 702	bio_list_init(&m->queued_bios);
 703
 704	spin_unlock_irqrestore(&m->lock, flags);
 705
 706	blk_start_plug(&plug);
 707	while ((bio = bio_list_pop(&bios))) {
 708		struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 
 709		dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
 710		r = __multipath_map_bio(m, bio, mpio);
 711		switch (r) {
 712		case DM_MAPIO_KILL:
 713			bio->bi_status = BLK_STS_IOERR;
 714			bio_endio(bio);
 715			break;
 716		case DM_MAPIO_REQUEUE:
 717			bio->bi_status = BLK_STS_DM_REQUEUE;
 718			bio_endio(bio);
 719			break;
 720		case DM_MAPIO_REMAPPED:
 721			generic_make_request(bio);
 722			break;
 723		case DM_MAPIO_SUBMITTED:
 724			break;
 725		default:
 726			WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
 727		}
 728	}
 729	blk_finish_plug(&plug);
 730}
 731
 732/*
 733 * If we run out of usable paths, should we queue I/O or error it?
 734 */
 735static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 736			    bool save_old_value)
 737{
 738	unsigned long flags;
 
 
 
 
 
 739
 740	spin_lock_irqsave(&m->lock, flags);
 741	assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
 742		   (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
 743		   (!save_old_value && queue_if_no_path));
 744	assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 745	spin_unlock_irqrestore(&m->lock, flags);
 746
 747	if (!queue_if_no_path) {
 748		dm_table_run_md_queue_async(m->ti->table);
 749		process_queued_io_list(m);
 750	}
 751
 752	return 0;
 753}
 754
 755/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 756 * An event is triggered whenever a path is taken out of use.
 757 * Includes path failure and PG bypass.
 758 */
 759static void trigger_event(struct work_struct *work)
 760{
 761	struct multipath *m =
 762		container_of(work, struct multipath, trigger_event);
 763
 764	dm_table_event(m->ti->table);
 765}
 766
 767/*-----------------------------------------------------------------
 
 768 * Constructor/argument parsing:
 769 * <#multipath feature args> [<arg>]*
 770 * <#hw_handler args> [hw_handler [<arg>]*]
 771 * <#priority groups>
 772 * <initial priority group>
 773 *     [<selector> <#selector args> [<arg>]*
 774 *      <#paths> <#per-path selector args>
 775 *         [<path> [<arg>]* ]+ ]+
 776 *---------------------------------------------------------------*/
 
 777static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 778			       struct dm_target *ti)
 779{
 780	int r;
 781	struct path_selector_type *pst;
 782	unsigned ps_argc;
 783
 784	static const struct dm_arg _args[] = {
 785		{0, 1024, "invalid number of path selector args"},
 786	};
 787
 788	pst = dm_get_path_selector(dm_shift_arg(as));
 789	if (!pst) {
 790		ti->error = "unknown path selector type";
 791		return -EINVAL;
 792	}
 793
 794	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 795	if (r) {
 796		dm_put_path_selector(pst);
 797		return -EINVAL;
 798	}
 799
 800	r = pst->create(&pg->ps, ps_argc, as->argv);
 801	if (r) {
 802		dm_put_path_selector(pst);
 803		ti->error = "path selector constructor failed";
 804		return r;
 805	}
 806
 807	pg->ps.type = pst;
 808	dm_consume_args(as, ps_argc);
 809
 810	return 0;
 811}
 812
 813static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
 814			 const char **attached_handler_name, char **error)
 815{
 816	struct request_queue *q = bdev_get_queue(bdev);
 817	int r;
 818
 819	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 820retain:
 821		if (*attached_handler_name) {
 822			/*
 823			 * Clear any hw_handler_params associated with a
 824			 * handler that isn't already attached.
 825			 */
 826			if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
 827				kfree(m->hw_handler_params);
 828				m->hw_handler_params = NULL;
 829			}
 830
 831			/*
 832			 * Reset hw_handler_name to match the attached handler
 833			 *
 834			 * NB. This modifies the table line to show the actual
 835			 * handler instead of the original table passed in.
 836			 */
 837			kfree(m->hw_handler_name);
 838			m->hw_handler_name = *attached_handler_name;
 839			*attached_handler_name = NULL;
 840		}
 841	}
 842
 843	if (m->hw_handler_name) {
 844		r = scsi_dh_attach(q, m->hw_handler_name);
 845		if (r == -EBUSY) {
 846			char b[BDEVNAME_SIZE];
 847
 848			printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
 849			       bdevname(bdev, b));
 850			goto retain;
 851		}
 852		if (r < 0) {
 853			*error = "error attaching hardware handler";
 854			return r;
 855		}
 856
 857		if (m->hw_handler_params) {
 858			r = scsi_dh_set_params(q, m->hw_handler_params);
 859			if (r < 0) {
 860				*error = "unable to set hardware handler parameters";
 861				return r;
 862			}
 863		}
 864	}
 865
 866	return 0;
 867}
 868
 869static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 870				 struct dm_target *ti)
 871{
 872	int r;
 873	struct pgpath *p;
 874	struct multipath *m = ti->private;
 875	struct request_queue *q;
 876	const char *attached_handler_name = NULL;
 877
 878	/* we need at least a path arg */
 879	if (as->argc < 1) {
 880		ti->error = "no device given";
 881		return ERR_PTR(-EINVAL);
 882	}
 883
 884	p = alloc_pgpath();
 885	if (!p)
 886		return ERR_PTR(-ENOMEM);
 887
 888	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 889			  &p->path.dev);
 890	if (r) {
 891		ti->error = "error getting device";
 892		goto bad;
 893	}
 894
 895	q = bdev_get_queue(p->path.dev->bdev);
 896	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 897	if (attached_handler_name || m->hw_handler_name) {
 898		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 899		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
 900		kfree(attached_handler_name);
 901		if (r) {
 902			dm_put_device(ti, p->path.dev);
 903			goto bad;
 904		}
 905	}
 906
 907	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 908	if (r) {
 909		dm_put_device(ti, p->path.dev);
 910		goto bad;
 911	}
 912
 913	return p;
 914 bad:
 915	free_pgpath(p);
 916	return ERR_PTR(r);
 917}
 918
 919static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 920						   struct multipath *m)
 921{
 922	static const struct dm_arg _args[] = {
 923		{1, 1024, "invalid number of paths"},
 924		{0, 1024, "invalid number of selector args"}
 925	};
 926
 927	int r;
 928	unsigned i, nr_selector_args, nr_args;
 929	struct priority_group *pg;
 930	struct dm_target *ti = m->ti;
 931
 932	if (as->argc < 2) {
 933		as->argc = 0;
 934		ti->error = "not enough priority group arguments";
 935		return ERR_PTR(-EINVAL);
 936	}
 937
 938	pg = alloc_priority_group();
 939	if (!pg) {
 940		ti->error = "couldn't allocate priority group";
 941		return ERR_PTR(-ENOMEM);
 942	}
 943	pg->m = m;
 944
 945	r = parse_path_selector(as, pg, ti);
 946	if (r)
 947		goto bad;
 948
 949	/*
 950	 * read the paths
 951	 */
 952	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
 953	if (r)
 954		goto bad;
 955
 956	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
 957	if (r)
 958		goto bad;
 959
 960	nr_args = 1 + nr_selector_args;
 961	for (i = 0; i < pg->nr_pgpaths; i++) {
 962		struct pgpath *pgpath;
 963		struct dm_arg_set path_args;
 964
 965		if (as->argc < nr_args) {
 966			ti->error = "not enough path parameters";
 967			r = -EINVAL;
 968			goto bad;
 969		}
 970
 971		path_args.argc = nr_args;
 972		path_args.argv = as->argv;
 973
 974		pgpath = parse_path(&path_args, &pg->ps, ti);
 975		if (IS_ERR(pgpath)) {
 976			r = PTR_ERR(pgpath);
 977			goto bad;
 978		}
 979
 980		pgpath->pg = pg;
 981		list_add_tail(&pgpath->list, &pg->pgpaths);
 982		dm_consume_args(as, nr_args);
 983	}
 984
 985	return pg;
 986
 987 bad:
 988	free_priority_group(pg, ti);
 989	return ERR_PTR(r);
 990}
 991
 992static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 993{
 994	unsigned hw_argc;
 995	int ret;
 996	struct dm_target *ti = m->ti;
 997
 998	static const struct dm_arg _args[] = {
 999		{0, 1024, "invalid number of hardware handler args"},
1000	};
1001
1002	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1003		return -EINVAL;
1004
1005	if (!hw_argc)
1006		return 0;
1007
1008	if (m->queue_mode == DM_TYPE_BIO_BASED) {
1009		dm_consume_args(as, hw_argc);
1010		DMERR("bio-based multipath doesn't allow hardware handler args");
1011		return 0;
1012	}
1013
1014	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1015	if (!m->hw_handler_name)
1016		return -EINVAL;
1017
1018	if (hw_argc > 1) {
1019		char *p;
1020		int i, j, len = 4;
1021
1022		for (i = 0; i <= hw_argc - 2; i++)
1023			len += strlen(as->argv[i]) + 1;
1024		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1025		if (!p) {
1026			ti->error = "memory allocation failed";
1027			ret = -ENOMEM;
1028			goto fail;
1029		}
1030		j = sprintf(p, "%d", hw_argc - 1);
1031		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1032			j = sprintf(p, "%s", as->argv[i]);
1033	}
1034	dm_consume_args(as, hw_argc - 1);
1035
1036	return 0;
1037fail:
1038	kfree(m->hw_handler_name);
1039	m->hw_handler_name = NULL;
1040	return ret;
1041}
1042
1043static int parse_features(struct dm_arg_set *as, struct multipath *m)
1044{
1045	int r;
1046	unsigned argc;
1047	struct dm_target *ti = m->ti;
1048	const char *arg_name;
1049
1050	static const struct dm_arg _args[] = {
1051		{0, 8, "invalid number of feature args"},
1052		{1, 50, "pg_init_retries must be between 1 and 50"},
1053		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1054	};
1055
1056	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1057	if (r)
1058		return -EINVAL;
1059
1060	if (!argc)
1061		return 0;
1062
1063	do {
1064		arg_name = dm_shift_arg(as);
1065		argc--;
1066
1067		if (!strcasecmp(arg_name, "queue_if_no_path")) {
1068			r = queue_if_no_path(m, true, false);
1069			continue;
1070		}
1071
1072		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1073			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1074			continue;
1075		}
1076
1077		if (!strcasecmp(arg_name, "pg_init_retries") &&
1078		    (argc >= 1)) {
1079			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1080			argc--;
1081			continue;
1082		}
1083
1084		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1085		    (argc >= 1)) {
1086			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1087			argc--;
1088			continue;
1089		}
1090
1091		if (!strcasecmp(arg_name, "queue_mode") &&
1092		    (argc >= 1)) {
1093			const char *queue_mode_name = dm_shift_arg(as);
1094
1095			if (!strcasecmp(queue_mode_name, "bio"))
1096				m->queue_mode = DM_TYPE_BIO_BASED;
1097			else if (!strcasecmp(queue_mode_name, "rq") ||
1098				 !strcasecmp(queue_mode_name, "mq"))
1099				m->queue_mode = DM_TYPE_REQUEST_BASED;
1100			else {
1101				ti->error = "Unknown 'queue_mode' requested";
1102				r = -EINVAL;
1103			}
1104			argc--;
1105			continue;
1106		}
1107
1108		ti->error = "Unrecognised multipath feature request";
1109		r = -EINVAL;
1110	} while (argc && !r);
1111
1112	return r;
1113}
1114
1115static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1116{
1117	/* target arguments */
1118	static const struct dm_arg _args[] = {
1119		{0, 1024, "invalid number of priority groups"},
1120		{0, 1024, "invalid initial priority group number"},
1121	};
1122
1123	int r;
1124	struct multipath *m;
1125	struct dm_arg_set as;
1126	unsigned pg_count = 0;
1127	unsigned next_pg_num;
 
1128
1129	as.argc = argc;
1130	as.argv = argv;
1131
1132	m = alloc_multipath(ti);
1133	if (!m) {
1134		ti->error = "can't allocate multipath";
1135		return -EINVAL;
1136	}
1137
1138	r = parse_features(&as, m);
1139	if (r)
1140		goto bad;
1141
1142	r = alloc_multipath_stage2(ti, m);
1143	if (r)
1144		goto bad;
1145
1146	r = parse_hw_handler(&as, m);
1147	if (r)
1148		goto bad;
1149
1150	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1151	if (r)
1152		goto bad;
1153
1154	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1155	if (r)
1156		goto bad;
1157
1158	if ((!m->nr_priority_groups && next_pg_num) ||
1159	    (m->nr_priority_groups && !next_pg_num)) {
1160		ti->error = "invalid initial priority group";
1161		r = -EINVAL;
1162		goto bad;
1163	}
1164
1165	/* parse the priority groups */
1166	while (as.argc) {
1167		struct priority_group *pg;
1168		unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1169
1170		pg = parse_priority_group(&as, m);
1171		if (IS_ERR(pg)) {
1172			r = PTR_ERR(pg);
1173			goto bad;
1174		}
1175
1176		nr_valid_paths += pg->nr_pgpaths;
1177		atomic_set(&m->nr_valid_paths, nr_valid_paths);
1178
1179		list_add_tail(&pg->list, &m->priority_groups);
1180		pg_count++;
1181		pg->pg_num = pg_count;
1182		if (!--next_pg_num)
1183			m->next_pg = pg;
1184	}
1185
1186	if (pg_count != m->nr_priority_groups) {
1187		ti->error = "priority group count mismatch";
1188		r = -EINVAL;
1189		goto bad;
1190	}
1191
 
 
 
 
1192	ti->num_flush_bios = 1;
1193	ti->num_discard_bios = 1;
1194	ti->num_write_same_bios = 1;
1195	ti->num_write_zeroes_bios = 1;
1196	if (m->queue_mode == DM_TYPE_BIO_BASED)
1197		ti->per_io_data_size = multipath_per_bio_data_size();
1198	else
1199		ti->per_io_data_size = sizeof(struct dm_mpath_io);
1200
1201	return 0;
1202
1203 bad:
1204	free_multipath(m);
1205	return r;
1206}
1207
1208static void multipath_wait_for_pg_init_completion(struct multipath *m)
1209{
1210	DEFINE_WAIT(wait);
1211
1212	while (1) {
1213		prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1214
1215		if (!atomic_read(&m->pg_init_in_progress))
1216			break;
1217
1218		io_schedule();
1219	}
1220	finish_wait(&m->pg_init_wait, &wait);
1221}
1222
1223static void flush_multipath_work(struct multipath *m)
1224{
1225	if (m->hw_handler_name) {
1226		set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1227		smp_mb__after_atomic();
 
 
 
 
 
 
 
1228
1229		if (atomic_read(&m->pg_init_in_progress))
1230			flush_workqueue(kmpath_handlerd);
1231		multipath_wait_for_pg_init_completion(m);
1232
1233		clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1234		smp_mb__after_atomic();
 
 
1235	}
1236
1237	if (m->queue_mode == DM_TYPE_BIO_BASED)
1238		flush_work(&m->process_queued_bios);
1239	flush_work(&m->trigger_event);
1240}
1241
1242static void multipath_dtr(struct dm_target *ti)
1243{
1244	struct multipath *m = ti->private;
1245
 
1246	flush_multipath_work(m);
1247	free_multipath(m);
1248}
1249
1250/*
1251 * Take a path out of use.
1252 */
1253static int fail_path(struct pgpath *pgpath)
1254{
1255	unsigned long flags;
1256	struct multipath *m = pgpath->pg->m;
1257
1258	spin_lock_irqsave(&m->lock, flags);
1259
1260	if (!pgpath->is_active)
1261		goto out;
1262
1263	DMWARN("Failing path %s.", pgpath->path.dev->name);
 
 
1264
1265	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1266	pgpath->is_active = false;
1267	pgpath->fail_count++;
1268
1269	atomic_dec(&m->nr_valid_paths);
1270
1271	if (pgpath == m->current_pgpath)
1272		m->current_pgpath = NULL;
1273
1274	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1275		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1276
1277	schedule_work(&m->trigger_event);
 
 
1278
1279out:
1280	spin_unlock_irqrestore(&m->lock, flags);
1281
1282	return 0;
1283}
1284
1285/*
1286 * Reinstate a previously-failed path
1287 */
1288static int reinstate_path(struct pgpath *pgpath)
1289{
1290	int r = 0, run_queue = 0;
1291	unsigned long flags;
1292	struct multipath *m = pgpath->pg->m;
1293	unsigned nr_valid_paths;
1294
1295	spin_lock_irqsave(&m->lock, flags);
1296
1297	if (pgpath->is_active)
1298		goto out;
1299
1300	DMWARN("Reinstating path %s.", pgpath->path.dev->name);
 
 
1301
1302	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1303	if (r)
1304		goto out;
1305
1306	pgpath->is_active = true;
1307
1308	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1309	if (nr_valid_paths == 1) {
1310		m->current_pgpath = NULL;
1311		run_queue = 1;
1312	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1313		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1314			atomic_inc(&m->pg_init_in_progress);
1315	}
1316
1317	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1318		       pgpath->path.dev->name, nr_valid_paths);
1319
1320	schedule_work(&m->trigger_event);
1321
1322out:
1323	spin_unlock_irqrestore(&m->lock, flags);
1324	if (run_queue) {
1325		dm_table_run_md_queue_async(m->ti->table);
1326		process_queued_io_list(m);
1327	}
1328
 
 
 
1329	return r;
1330}
1331
1332/*
1333 * Fail or reinstate all paths that match the provided struct dm_dev.
1334 */
1335static int action_dev(struct multipath *m, struct dm_dev *dev,
1336		      action_fn action)
1337{
1338	int r = -EINVAL;
1339	struct pgpath *pgpath;
1340	struct priority_group *pg;
1341
1342	list_for_each_entry(pg, &m->priority_groups, list) {
1343		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1344			if (pgpath->path.dev == dev)
1345				r = action(pgpath);
1346		}
1347	}
1348
1349	return r;
1350}
1351
1352/*
1353 * Temporarily try to avoid having to use the specified PG
1354 */
1355static void bypass_pg(struct multipath *m, struct priority_group *pg,
1356		      bool bypassed)
1357{
1358	unsigned long flags;
1359
1360	spin_lock_irqsave(&m->lock, flags);
1361
1362	pg->bypassed = bypassed;
1363	m->current_pgpath = NULL;
1364	m->current_pg = NULL;
1365
1366	spin_unlock_irqrestore(&m->lock, flags);
1367
1368	schedule_work(&m->trigger_event);
1369}
1370
1371/*
1372 * Switch to using the specified PG from the next I/O that gets mapped
1373 */
1374static int switch_pg_num(struct multipath *m, const char *pgstr)
1375{
1376	struct priority_group *pg;
1377	unsigned pgnum;
1378	unsigned long flags;
1379	char dummy;
1380
1381	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1382	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1383		DMWARN("invalid PG number supplied to switch_pg_num");
1384		return -EINVAL;
1385	}
1386
1387	spin_lock_irqsave(&m->lock, flags);
1388	list_for_each_entry(pg, &m->priority_groups, list) {
1389		pg->bypassed = false;
1390		if (--pgnum)
1391			continue;
1392
1393		m->current_pgpath = NULL;
1394		m->current_pg = NULL;
1395		m->next_pg = pg;
1396	}
1397	spin_unlock_irqrestore(&m->lock, flags);
1398
1399	schedule_work(&m->trigger_event);
1400	return 0;
1401}
1402
1403/*
1404 * Set/clear bypassed status of a PG.
1405 * PGs are numbered upwards from 1 in the order they were declared.
1406 */
1407static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1408{
1409	struct priority_group *pg;
1410	unsigned pgnum;
1411	char dummy;
1412
1413	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1414	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1415		DMWARN("invalid PG number supplied to bypass_pg");
1416		return -EINVAL;
1417	}
1418
1419	list_for_each_entry(pg, &m->priority_groups, list) {
1420		if (!--pgnum)
1421			break;
1422	}
1423
1424	bypass_pg(m, pg, bypassed);
1425	return 0;
1426}
1427
1428/*
1429 * Should we retry pg_init immediately?
1430 */
1431static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1432{
1433	unsigned long flags;
1434	bool limit_reached = false;
1435
1436	spin_lock_irqsave(&m->lock, flags);
1437
1438	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1439	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1440		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1441	else
1442		limit_reached = true;
1443
1444	spin_unlock_irqrestore(&m->lock, flags);
1445
1446	return limit_reached;
1447}
1448
1449static void pg_init_done(void *data, int errors)
1450{
1451	struct pgpath *pgpath = data;
1452	struct priority_group *pg = pgpath->pg;
1453	struct multipath *m = pg->m;
1454	unsigned long flags;
1455	bool delay_retry = false;
1456
1457	/* device or driver problems */
1458	switch (errors) {
1459	case SCSI_DH_OK:
1460		break;
1461	case SCSI_DH_NOSYS:
1462		if (!m->hw_handler_name) {
1463			errors = 0;
1464			break;
1465		}
1466		DMERR("Could not failover the device: Handler scsi_dh_%s "
1467		      "Error %d.", m->hw_handler_name, errors);
1468		/*
1469		 * Fail path for now, so we do not ping pong
1470		 */
1471		fail_path(pgpath);
1472		break;
1473	case SCSI_DH_DEV_TEMP_BUSY:
1474		/*
1475		 * Probably doing something like FW upgrade on the
1476		 * controller so try the other pg.
1477		 */
1478		bypass_pg(m, pg, true);
1479		break;
1480	case SCSI_DH_RETRY:
1481		/* Wait before retrying. */
1482		delay_retry = 1;
1483		/* fall through */
1484	case SCSI_DH_IMM_RETRY:
1485	case SCSI_DH_RES_TEMP_UNAVAIL:
1486		if (pg_init_limit_reached(m, pgpath))
1487			fail_path(pgpath);
1488		errors = 0;
1489		break;
1490	case SCSI_DH_DEV_OFFLINED:
1491	default:
1492		/*
1493		 * We probably do not want to fail the path for a device
1494		 * error, but this is what the old dm did. In future
1495		 * patches we can do more advanced handling.
1496		 */
1497		fail_path(pgpath);
1498	}
1499
1500	spin_lock_irqsave(&m->lock, flags);
1501	if (errors) {
1502		if (pgpath == m->current_pgpath) {
1503			DMERR("Could not failover device. Error %d.", errors);
1504			m->current_pgpath = NULL;
1505			m->current_pg = NULL;
1506		}
1507	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1508		pg->bypassed = false;
1509
1510	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1511		/* Activations of other paths are still on going */
1512		goto out;
1513
1514	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1515		if (delay_retry)
1516			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1517		else
1518			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1519
1520		if (__pg_init_all_paths(m))
1521			goto out;
1522	}
1523	clear_bit(MPATHF_QUEUE_IO, &m->flags);
1524
1525	process_queued_io_list(m);
1526
1527	/*
1528	 * Wake up any thread waiting to suspend.
1529	 */
1530	wake_up(&m->pg_init_wait);
1531
1532out:
1533	spin_unlock_irqrestore(&m->lock, flags);
1534}
1535
1536static void activate_or_offline_path(struct pgpath *pgpath)
1537{
1538	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1539
1540	if (pgpath->is_active && !blk_queue_dying(q))
1541		scsi_dh_activate(q, pg_init_done, pgpath);
1542	else
1543		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1544}
1545
1546static void activate_path_work(struct work_struct *work)
1547{
1548	struct pgpath *pgpath =
1549		container_of(work, struct pgpath, activate_path.work);
1550
1551	activate_or_offline_path(pgpath);
1552}
1553
1554static int multipath_end_io(struct dm_target *ti, struct request *clone,
1555			    blk_status_t error, union map_info *map_context)
1556{
1557	struct dm_mpath_io *mpio = get_mpio(map_context);
1558	struct pgpath *pgpath = mpio->pgpath;
1559	int r = DM_ENDIO_DONE;
1560
1561	/*
1562	 * We don't queue any clone request inside the multipath target
1563	 * during end I/O handling, since those clone requests don't have
1564	 * bio clones.  If we queue them inside the multipath target,
1565	 * we need to make bio clones, that requires memory allocation.
1566	 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1567	 *  don't have bio clones.)
1568	 * Instead of queueing the clone request here, we queue the original
1569	 * request into dm core, which will remake a clone request and
1570	 * clone bios for it and resubmit it later.
1571	 */
1572	if (error && blk_path_error(error)) {
1573		struct multipath *m = ti->private;
1574
1575		if (error == BLK_STS_RESOURCE)
1576			r = DM_ENDIO_DELAY_REQUEUE;
1577		else
1578			r = DM_ENDIO_REQUEUE;
1579
1580		if (pgpath)
1581			fail_path(pgpath);
1582
1583		if (atomic_read(&m->nr_valid_paths) == 0 &&
1584		    !must_push_back_rq(m)) {
1585			if (error == BLK_STS_IOERR)
1586				dm_report_EIO(m);
1587			/* complete with the original error */
1588			r = DM_ENDIO_DONE;
1589		}
1590	}
1591
1592	if (pgpath) {
1593		struct path_selector *ps = &pgpath->pg->ps;
1594
1595		if (ps->type->end_io)
1596			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
 
1597	}
1598
1599	return r;
1600}
1601
1602static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1603				blk_status_t *error)
1604{
1605	struct multipath *m = ti->private;
1606	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1607	struct pgpath *pgpath = mpio->pgpath;
1608	unsigned long flags;
1609	int r = DM_ENDIO_DONE;
1610
1611	if (!*error || !blk_path_error(*error))
1612		goto done;
1613
1614	if (pgpath)
1615		fail_path(pgpath);
1616
1617	if (atomic_read(&m->nr_valid_paths) == 0 &&
1618	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1619		if (must_push_back_bio(m)) {
1620			r = DM_ENDIO_REQUEUE;
1621		} else {
1622			dm_report_EIO(m);
1623			*error = BLK_STS_IOERR;
 
 
 
 
1624		}
1625		goto done;
1626	}
1627
1628	spin_lock_irqsave(&m->lock, flags);
1629	bio_list_add(&m->queued_bios, clone);
1630	spin_unlock_irqrestore(&m->lock, flags);
1631	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1632		queue_work(kmultipathd, &m->process_queued_bios);
1633
1634	r = DM_ENDIO_INCOMPLETE;
1635done:
1636	if (pgpath) {
1637		struct path_selector *ps = &pgpath->pg->ps;
1638
1639		if (ps->type->end_io)
1640			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
 
 
1641	}
1642
1643	return r;
1644}
1645
1646/*
1647 * Suspend can't complete until all the I/O is processed so if
1648 * the last path fails we must error any remaining I/O.
1649 * Note that if the freeze_bdev fails while suspending, the
1650 * queue_if_no_path state is lost - userspace should reset it.
 
1651 */
1652static void multipath_presuspend(struct dm_target *ti)
1653{
1654	struct multipath *m = ti->private;
1655
1656	queue_if_no_path(m, false, true);
 
 
1657}
1658
1659static void multipath_postsuspend(struct dm_target *ti)
1660{
1661	struct multipath *m = ti->private;
1662
1663	mutex_lock(&m->work_mutex);
1664	flush_multipath_work(m);
1665	mutex_unlock(&m->work_mutex);
1666}
1667
1668/*
1669 * Restore the queue_if_no_path setting.
1670 */
1671static void multipath_resume(struct dm_target *ti)
1672{
1673	struct multipath *m = ti->private;
1674	unsigned long flags;
1675
1676	spin_lock_irqsave(&m->lock, flags);
1677	assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
1678		   test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
 
 
 
 
 
 
 
 
1679	spin_unlock_irqrestore(&m->lock, flags);
1680}
1681
1682/*
1683 * Info output has the following format:
1684 * num_multipath_feature_args [multipath_feature_args]*
1685 * num_handler_status_args [handler_status_args]*
1686 * num_groups init_group_number
1687 *            [A|D|E num_ps_status_args [ps_status_args]*
1688 *             num_paths num_selector_args
1689 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1690 *
1691 * Table output has the following format (identical to the constructor string):
1692 * num_feature_args [features_args]*
1693 * num_handler_args hw_handler [hw_handler_args]*
1694 * num_groups init_group_number
1695 *     [priority selector-name num_ps_args [ps_args]*
1696 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1697 */
1698static void multipath_status(struct dm_target *ti, status_type_t type,
1699			     unsigned status_flags, char *result, unsigned maxlen)
1700{
1701	int sz = 0;
1702	unsigned long flags;
1703	struct multipath *m = ti->private;
1704	struct priority_group *pg;
1705	struct pgpath *p;
1706	unsigned pg_num;
1707	char state;
1708
1709	spin_lock_irqsave(&m->lock, flags);
1710
1711	/* Features */
1712	if (type == STATUSTYPE_INFO)
1713		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1714		       atomic_read(&m->pg_init_count));
1715	else {
1716		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1717			      (m->pg_init_retries > 0) * 2 +
1718			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1719			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1720			      (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1721
1722		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1723			DMEMIT("queue_if_no_path ");
1724		if (m->pg_init_retries)
1725			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1726		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1727			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1728		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1729			DMEMIT("retain_attached_hw_handler ");
1730		if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1731			switch(m->queue_mode) {
1732			case DM_TYPE_BIO_BASED:
1733				DMEMIT("queue_mode bio ");
1734				break;
1735			default:
1736				WARN_ON_ONCE(true);
1737				break;
1738			}
1739		}
1740	}
1741
1742	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1743		DMEMIT("0 ");
1744	else
1745		DMEMIT("1 %s ", m->hw_handler_name);
1746
1747	DMEMIT("%u ", m->nr_priority_groups);
1748
1749	if (m->next_pg)
1750		pg_num = m->next_pg->pg_num;
1751	else if (m->current_pg)
1752		pg_num = m->current_pg->pg_num;
1753	else
1754		pg_num = (m->nr_priority_groups ? 1 : 0);
1755
1756	DMEMIT("%u ", pg_num);
1757
1758	switch (type) {
1759	case STATUSTYPE_INFO:
1760		list_for_each_entry(pg, &m->priority_groups, list) {
1761			if (pg->bypassed)
1762				state = 'D';	/* Disabled */
1763			else if (pg == m->current_pg)
1764				state = 'A';	/* Currently Active */
1765			else
1766				state = 'E';	/* Enabled */
1767
1768			DMEMIT("%c ", state);
1769
1770			if (pg->ps.type->status)
1771				sz += pg->ps.type->status(&pg->ps, NULL, type,
1772							  result + sz,
1773							  maxlen - sz);
1774			else
1775				DMEMIT("0 ");
1776
1777			DMEMIT("%u %u ", pg->nr_pgpaths,
1778			       pg->ps.type->info_args);
1779
1780			list_for_each_entry(p, &pg->pgpaths, list) {
1781				DMEMIT("%s %s %u ", p->path.dev->name,
1782				       p->is_active ? "A" : "F",
1783				       p->fail_count);
1784				if (pg->ps.type->status)
1785					sz += pg->ps.type->status(&pg->ps,
1786					      &p->path, type, result + sz,
1787					      maxlen - sz);
1788			}
1789		}
1790		break;
1791
1792	case STATUSTYPE_TABLE:
1793		list_for_each_entry(pg, &m->priority_groups, list) {
1794			DMEMIT("%s ", pg->ps.type->name);
1795
1796			if (pg->ps.type->status)
1797				sz += pg->ps.type->status(&pg->ps, NULL, type,
1798							  result + sz,
1799							  maxlen - sz);
1800			else
1801				DMEMIT("0 ");
1802
1803			DMEMIT("%u %u ", pg->nr_pgpaths,
1804			       pg->ps.type->table_args);
1805
1806			list_for_each_entry(p, &pg->pgpaths, list) {
1807				DMEMIT("%s ", p->path.dev->name);
1808				if (pg->ps.type->status)
1809					sz += pg->ps.type->status(&pg->ps,
1810					      &p->path, type, result + sz,
1811					      maxlen - sz);
1812			}
1813		}
1814		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1815	}
1816
1817	spin_unlock_irqrestore(&m->lock, flags);
1818}
1819
1820static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
1821			     char *result, unsigned maxlen)
1822{
1823	int r = -EINVAL;
1824	struct dm_dev *dev;
1825	struct multipath *m = ti->private;
1826	action_fn action;
 
1827
1828	mutex_lock(&m->work_mutex);
1829
1830	if (dm_suspended(ti)) {
1831		r = -EBUSY;
1832		goto out;
1833	}
1834
1835	if (argc == 1) {
1836		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1837			r = queue_if_no_path(m, true, false);
 
 
 
1838			goto out;
1839		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1840			r = queue_if_no_path(m, false, false);
 
1841			goto out;
1842		}
1843	}
1844
1845	if (argc != 2) {
1846		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1847		goto out;
1848	}
1849
1850	if (!strcasecmp(argv[0], "disable_group")) {
1851		r = bypass_pg_num(m, argv[1], true);
1852		goto out;
1853	} else if (!strcasecmp(argv[0], "enable_group")) {
1854		r = bypass_pg_num(m, argv[1], false);
1855		goto out;
1856	} else if (!strcasecmp(argv[0], "switch_group")) {
1857		r = switch_pg_num(m, argv[1]);
1858		goto out;
1859	} else if (!strcasecmp(argv[0], "reinstate_path"))
1860		action = reinstate_path;
1861	else if (!strcasecmp(argv[0], "fail_path"))
1862		action = fail_path;
1863	else {
1864		DMWARN("Unrecognised multipath message received: %s", argv[0]);
1865		goto out;
1866	}
1867
1868	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1869	if (r) {
1870		DMWARN("message: error getting device %s",
1871		       argv[1]);
1872		goto out;
1873	}
1874
1875	r = action_dev(m, dev, action);
1876
1877	dm_put_device(ti, dev);
1878
1879out:
1880	mutex_unlock(&m->work_mutex);
1881	return r;
1882}
1883
1884static int multipath_prepare_ioctl(struct dm_target *ti,
1885				   struct block_device **bdev)
1886{
1887	struct multipath *m = ti->private;
1888	struct pgpath *current_pgpath;
 
1889	int r;
1890
1891	current_pgpath = READ_ONCE(m->current_pgpath);
1892	if (!current_pgpath)
1893		current_pgpath = choose_pgpath(m, 0);
1894
1895	if (current_pgpath) {
1896		if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1897			*bdev = current_pgpath->path.dev->bdev;
1898			r = 0;
1899		} else {
1900			/* pg_init has not started or completed */
1901			r = -ENOTCONN;
1902		}
1903	} else {
1904		/* No path is available */
 
 
1905		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1906			r = -ENOTCONN;
1907		else
1908			r = -EIO;
1909	}
1910
1911	if (r == -ENOTCONN) {
1912		if (!READ_ONCE(m->current_pg)) {
1913			/* Path status changed, redo selection */
1914			(void) choose_pgpath(m, 0);
1915		}
 
1916		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1917			pg_init_all_paths(m);
 
1918		dm_table_run_md_queue_async(m->ti->table);
1919		process_queued_io_list(m);
1920	}
1921
1922	/*
1923	 * Only pass ioctls through if the device sizes match exactly.
1924	 */
1925	if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1926		return 1;
1927	return r;
1928}
1929
1930static int multipath_iterate_devices(struct dm_target *ti,
1931				     iterate_devices_callout_fn fn, void *data)
1932{
1933	struct multipath *m = ti->private;
1934	struct priority_group *pg;
1935	struct pgpath *p;
1936	int ret = 0;
1937
1938	list_for_each_entry(pg, &m->priority_groups, list) {
1939		list_for_each_entry(p, &pg->pgpaths, list) {
1940			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1941			if (ret)
1942				goto out;
1943		}
1944	}
1945
1946out:
1947	return ret;
1948}
1949
1950static int pgpath_busy(struct pgpath *pgpath)
1951{
1952	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1953
1954	return blk_lld_busy(q);
1955}
1956
1957/*
1958 * We return "busy", only when we can map I/Os but underlying devices
1959 * are busy (so even if we map I/Os now, the I/Os will wait on
1960 * the underlying queue).
1961 * In other words, if we want to kill I/Os or queue them inside us
1962 * due to map unavailability, we don't return "busy".  Otherwise,
1963 * dm core won't give us the I/Os and we can't do what we want.
1964 */
1965static int multipath_busy(struct dm_target *ti)
1966{
1967	bool busy = false, has_active = false;
1968	struct multipath *m = ti->private;
1969	struct priority_group *pg, *next_pg;
1970	struct pgpath *pgpath;
1971
1972	/* pg_init in progress */
1973	if (atomic_read(&m->pg_init_in_progress))
1974		return true;
1975
1976	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
1977	if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1978		return (m->queue_mode != DM_TYPE_REQUEST_BASED);
 
 
 
 
 
 
 
 
1979
1980	/* Guess which priority_group will be used at next mapping time */
1981	pg = READ_ONCE(m->current_pg);
1982	next_pg = READ_ONCE(m->next_pg);
1983	if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
1984		pg = next_pg;
1985
1986	if (!pg) {
1987		/*
1988		 * We don't know which pg will be used at next mapping time.
1989		 * We don't call choose_pgpath() here to avoid to trigger
1990		 * pg_init just by busy checking.
1991		 * So we don't know whether underlying devices we will be using
1992		 * at next mapping time are busy or not. Just try mapping.
1993		 */
1994		return busy;
1995	}
1996
1997	/*
1998	 * If there is one non-busy active path at least, the path selector
1999	 * will be able to select it. So we consider such a pg as not busy.
2000	 */
2001	busy = true;
2002	list_for_each_entry(pgpath, &pg->pgpaths, list) {
2003		if (pgpath->is_active) {
2004			has_active = true;
2005			if (!pgpath_busy(pgpath)) {
2006				busy = false;
2007				break;
2008			}
2009		}
2010	}
2011
2012	if (!has_active) {
2013		/*
2014		 * No active path in this pg, so this pg won't be used and
2015		 * the current_pg will be changed at next mapping time.
2016		 * We need to try mapping to determine it.
2017		 */
2018		busy = false;
2019	}
2020
2021	return busy;
2022}
2023
2024/*-----------------------------------------------------------------
 
2025 * Module setup
2026 *---------------------------------------------------------------*/
 
2027static struct target_type multipath_target = {
2028	.name = "multipath",
2029	.version = {1, 13, 0},
2030	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2031		    DM_TARGET_PASSES_INTEGRITY,
2032	.module = THIS_MODULE,
2033	.ctr = multipath_ctr,
2034	.dtr = multipath_dtr,
2035	.clone_and_map_rq = multipath_clone_and_map,
2036	.release_clone_rq = multipath_release_clone,
2037	.rq_end_io = multipath_end_io,
2038	.map = multipath_map_bio,
2039	.end_io = multipath_end_io_bio,
2040	.presuspend = multipath_presuspend,
2041	.postsuspend = multipath_postsuspend,
2042	.resume = multipath_resume,
2043	.status = multipath_status,
2044	.message = multipath_message,
2045	.prepare_ioctl = multipath_prepare_ioctl,
2046	.iterate_devices = multipath_iterate_devices,
2047	.busy = multipath_busy,
2048};
2049
2050static int __init dm_multipath_init(void)
2051{
2052	int r;
2053
2054	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2055	if (!kmultipathd) {
2056		DMERR("failed to create workqueue kmpathd");
2057		r = -ENOMEM;
2058		goto bad_alloc_kmultipathd;
2059	}
2060
2061	/*
2062	 * A separate workqueue is used to handle the device handlers
2063	 * to avoid overloading existing workqueue. Overloading the
2064	 * old workqueue would also create a bottleneck in the
2065	 * path of the storage hardware device activation.
2066	 */
2067	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2068						  WQ_MEM_RECLAIM);
2069	if (!kmpath_handlerd) {
2070		DMERR("failed to create workqueue kmpath_handlerd");
2071		r = -ENOMEM;
2072		goto bad_alloc_kmpath_handlerd;
2073	}
2074
 
 
 
 
 
 
2075	r = dm_register_target(&multipath_target);
2076	if (r < 0) {
2077		DMERR("request-based register failed %d", r);
2078		r = -EINVAL;
2079		goto bad_register_target;
2080	}
2081
2082	return 0;
2083
2084bad_register_target:
 
 
2085	destroy_workqueue(kmpath_handlerd);
2086bad_alloc_kmpath_handlerd:
2087	destroy_workqueue(kmultipathd);
2088bad_alloc_kmultipathd:
2089	return r;
2090}
2091
2092static void __exit dm_multipath_exit(void)
2093{
 
2094	destroy_workqueue(kmpath_handlerd);
2095	destroy_workqueue(kmultipathd);
2096
2097	dm_unregister_target(&multipath_target);
2098}
2099
2100module_init(dm_multipath_init);
2101module_exit(dm_multipath_exit);
 
 
 
2102
2103MODULE_DESCRIPTION(DM_NAME " multipath target");
2104MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2105MODULE_LICENSE("GPL");