Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2003 Sistina Software Limited.
   4 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/device-mapper.h>
  10
  11#include "dm-rq.h"
  12#include "dm-bio-record.h"
  13#include "dm-path-selector.h"
  14#include "dm-uevent.h"
  15
  16#include <linux/blkdev.h>
  17#include <linux/ctype.h>
  18#include <linux/init.h>
  19#include <linux/mempool.h>
  20#include <linux/module.h>
  21#include <linux/pagemap.h>
  22#include <linux/slab.h>
  23#include <linux/time.h>
  24#include <linux/timer.h>
  25#include <linux/workqueue.h>
  26#include <linux/delay.h>
  27#include <scsi/scsi_dh.h>
  28#include <linux/atomic.h>
  29#include <linux/blk-mq.h>
  30
  31static struct workqueue_struct *dm_mpath_wq;
  32
  33#define DM_MSG_PREFIX "multipath"
  34#define DM_PG_INIT_DELAY_MSECS 2000
  35#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
  36#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
  37
  38static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
  39
  40/* Path properties */
  41struct pgpath {
  42	struct list_head list;
  43
  44	struct priority_group *pg;	/* Owning PG */
  45	unsigned int fail_count;		/* Cumulative failure count */
  46
  47	struct dm_path path;
  48	struct delayed_work activate_path;
  49
  50	bool is_active:1;		/* Path status */
  51};
  52
  53#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  54
  55/*
  56 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  57 * Each has a path selector which controls which path gets used.
  58 */
  59struct priority_group {
  60	struct list_head list;
  61
  62	struct multipath *m;		/* Owning multipath instance */
  63	struct path_selector ps;
  64
  65	unsigned int pg_num;		/* Reference number */
  66	unsigned int nr_pgpaths;		/* Number of paths in PG */
  67	struct list_head pgpaths;
  68
  69	bool bypassed:1;		/* Temporarily bypass this PG? */
  70};
  71
  72/* Multipath context */
  73struct multipath {
  74	unsigned long flags;		/* Multipath state flags */
 
 
 
 
  75
  76	spinlock_t lock;
  77	enum dm_queue_mode queue_mode;
 
 
 
 
  78
  79	struct pgpath *current_pgpath;
  80	struct priority_group *current_pg;
  81	struct priority_group *next_pg;	/* Switch to this PG if set */
  82
  83	atomic_t nr_valid_paths;	/* Total number of usable paths */
  84	unsigned int nr_priority_groups;
  85	struct list_head priority_groups;
  86
  87	const char *hw_handler_name;
  88	char *hw_handler_params;
  89	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  90	unsigned int pg_init_retries;	/* Number of times to retry pg_init */
  91	unsigned int pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  92	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
  93	atomic_t pg_init_count;		/* Number of times pg_init called */
  94
 
 
 
 
 
 
 
 
  95	struct mutex work_mutex;
  96	struct work_struct trigger_event;
  97	struct dm_target *ti;
  98
  99	struct work_struct process_queued_bios;
 100	struct bio_list queued_bios;
 101
 102	struct timer_list nopath_timer;	/* Timeout for queue_if_no_path */
 103};
 104
 105/*
 106 * Context information attached to each io we process.
 107 */
 108struct dm_mpath_io {
 109	struct pgpath *pgpath;
 110	size_t nr_bytes;
 111	u64 start_time_ns;
 112};
 113
 114typedef int (*action_fn) (struct pgpath *pgpath);
 115
 
 
 116static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 117static void trigger_event(struct work_struct *work);
 118static void activate_or_offline_path(struct pgpath *pgpath);
 119static void activate_path_work(struct work_struct *work);
 120static void process_queued_bios(struct work_struct *work);
 121static void queue_if_no_path_timeout_work(struct timer_list *t);
 122
 123/*
 124 *-----------------------------------------------
 125 * Multipath state flags.
 126 *-----------------------------------------------
 127 */
 128#define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
 129#define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
 130#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
 131#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
 132#define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
 133#define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
 134#define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
 135
 136static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
 137{
 138	bool r = test_bit(MPATHF_bit, &m->flags);
 139
 140	if (r) {
 141		unsigned long flags;
 142
 143		spin_lock_irqsave(&m->lock, flags);
 144		r = test_bit(MPATHF_bit, &m->flags);
 145		spin_unlock_irqrestore(&m->lock, flags);
 146	}
 147
 148	return r;
 149}
 150
 151/*
 152 *-----------------------------------------------
 153 * Allocation routines
 154 *-----------------------------------------------
 155 */
 156static struct pgpath *alloc_pgpath(void)
 157{
 158	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 159
 160	if (!pgpath)
 161		return NULL;
 162
 163	pgpath->is_active = true;
 164
 165	return pgpath;
 166}
 167
 168static void free_pgpath(struct pgpath *pgpath)
 169{
 170	kfree(pgpath);
 171}
 172
 173static struct priority_group *alloc_priority_group(void)
 174{
 175	struct priority_group *pg;
 176
 177	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 178
 179	if (pg)
 180		INIT_LIST_HEAD(&pg->pgpaths);
 181
 182	return pg;
 183}
 184
 185static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 186{
 187	struct pgpath *pgpath, *tmp;
 188
 189	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 190		list_del(&pgpath->list);
 191		dm_put_device(ti, pgpath->path.dev);
 192		free_pgpath(pgpath);
 193	}
 194}
 195
 196static void free_priority_group(struct priority_group *pg,
 197				struct dm_target *ti)
 198{
 199	struct path_selector *ps = &pg->ps;
 200
 201	if (ps->type) {
 202		ps->type->destroy(ps);
 203		dm_put_path_selector(ps->type);
 204	}
 205
 206	free_pgpaths(&pg->pgpaths, ti);
 207	kfree(pg);
 208}
 209
 210static struct multipath *alloc_multipath(struct dm_target *ti)
 211{
 212	struct multipath *m;
 213
 214	m = kzalloc(sizeof(*m), GFP_KERNEL);
 215	if (m) {
 216		INIT_LIST_HEAD(&m->priority_groups);
 217		spin_lock_init(&m->lock);
 
 218		atomic_set(&m->nr_valid_paths, 0);
 
 
 
 219		INIT_WORK(&m->trigger_event, trigger_event);
 
 220		mutex_init(&m->work_mutex);
 221
 
 222		m->queue_mode = DM_TYPE_NONE;
 223
 224		m->ti = ti;
 225		ti->private = m;
 226
 227		timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
 228	}
 229
 230	return m;
 231}
 232
 233static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
 234{
 235	if (m->queue_mode == DM_TYPE_NONE) {
 236		m->queue_mode = DM_TYPE_REQUEST_BASED;
 237	} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238		INIT_WORK(&m->process_queued_bios, process_queued_bios);
 239		/*
 240		 * bio-based doesn't support any direct scsi_dh management;
 241		 * it just discovers if a scsi_dh is attached.
 242		 */
 243		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 244	}
 245
 246	dm_table_set_type(ti->table, m->queue_mode);
 247
 248	/*
 249	 * Init fields that are only used when a scsi_dh is attached
 250	 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
 251	 */
 252	set_bit(MPATHF_QUEUE_IO, &m->flags);
 253	atomic_set(&m->pg_init_in_progress, 0);
 254	atomic_set(&m->pg_init_count, 0);
 255	m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 256	init_waitqueue_head(&m->pg_init_wait);
 257
 258	return 0;
 259}
 260
 261static void free_multipath(struct multipath *m)
 262{
 263	struct priority_group *pg, *tmp;
 264
 265	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 266		list_del(&pg->list);
 267		free_priority_group(pg, m->ti);
 268	}
 269
 270	kfree(m->hw_handler_name);
 271	kfree(m->hw_handler_params);
 272	mutex_destroy(&m->work_mutex);
 273	kfree(m);
 274}
 275
 276static struct dm_mpath_io *get_mpio(union map_info *info)
 277{
 278	return info->ptr;
 279}
 280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281static size_t multipath_per_bio_data_size(void)
 282{
 283	return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
 284}
 285
 286static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
 287{
 288	return dm_per_bio_data(bio, multipath_per_bio_data_size());
 289}
 290
 291static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
 292{
 293	/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
 
 294	void *bio_details = mpio + 1;
 
 295	return bio_details;
 296}
 297
 298static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
 
 299{
 300	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 301	struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
 302
 303	mpio->nr_bytes = bio->bi_iter.bi_size;
 304	mpio->pgpath = NULL;
 305	mpio->start_time_ns = 0;
 306	*mpio_p = mpio;
 307
 
 
 308	dm_bio_record(bio_details, bio);
 
 
 
 
 
 309}
 310
 311/*
 312 *-----------------------------------------------
 313 * Path selection
 314 *-----------------------------------------------
 315 */
 316static int __pg_init_all_paths(struct multipath *m)
 317{
 318	struct pgpath *pgpath;
 319	unsigned long pg_init_delay = 0;
 320
 321	lockdep_assert_held(&m->lock);
 322
 323	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 324		return 0;
 325
 326	atomic_inc(&m->pg_init_count);
 327	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 328
 329	/* Check here to reset pg_init_required */
 330	if (!m->current_pg)
 331		return 0;
 332
 333	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
 334		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 335						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 336	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 337		/* Skip failed paths */
 338		if (!pgpath->is_active)
 339			continue;
 340		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 341				       pg_init_delay))
 342			atomic_inc(&m->pg_init_in_progress);
 343	}
 344	return atomic_read(&m->pg_init_in_progress);
 345}
 346
 347static int pg_init_all_paths(struct multipath *m)
 348{
 349	int ret;
 350	unsigned long flags;
 351
 352	spin_lock_irqsave(&m->lock, flags);
 353	ret = __pg_init_all_paths(m);
 354	spin_unlock_irqrestore(&m->lock, flags);
 355
 356	return ret;
 357}
 358
 359static void __switch_pg(struct multipath *m, struct priority_group *pg)
 360{
 361	lockdep_assert_held(&m->lock);
 362
 363	m->current_pg = pg;
 364
 365	/* Must we initialise the PG first, and queue I/O till it's ready? */
 366	if (m->hw_handler_name) {
 367		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 368		set_bit(MPATHF_QUEUE_IO, &m->flags);
 369	} else {
 370		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 371		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 372	}
 373
 374	atomic_set(&m->pg_init_count, 0);
 375}
 376
 377static struct pgpath *choose_path_in_pg(struct multipath *m,
 378					struct priority_group *pg,
 379					size_t nr_bytes)
 380{
 381	unsigned long flags;
 382	struct dm_path *path;
 383	struct pgpath *pgpath;
 384
 385	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 386	if (!path)
 387		return ERR_PTR(-ENXIO);
 388
 389	pgpath = path_to_pgpath(path);
 390
 391	if (unlikely(READ_ONCE(m->current_pg) != pg)) {
 392		/* Only update current_pgpath if pg changed */
 393		spin_lock_irqsave(&m->lock, flags);
 394		m->current_pgpath = pgpath;
 395		__switch_pg(m, pg);
 396		spin_unlock_irqrestore(&m->lock, flags);
 397	}
 398
 399	return pgpath;
 400}
 401
 402static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 403{
 404	unsigned long flags;
 405	struct priority_group *pg;
 406	struct pgpath *pgpath;
 407	unsigned int bypassed = 1;
 408
 409	if (!atomic_read(&m->nr_valid_paths)) {
 410		spin_lock_irqsave(&m->lock, flags);
 411		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 412		spin_unlock_irqrestore(&m->lock, flags);
 413		goto failed;
 414	}
 415
 416	/* Were we instructed to switch PG? */
 417	if (READ_ONCE(m->next_pg)) {
 418		spin_lock_irqsave(&m->lock, flags);
 419		pg = m->next_pg;
 420		if (!pg) {
 421			spin_unlock_irqrestore(&m->lock, flags);
 422			goto check_current_pg;
 423		}
 424		m->next_pg = NULL;
 425		spin_unlock_irqrestore(&m->lock, flags);
 426		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 427		if (!IS_ERR_OR_NULL(pgpath))
 428			return pgpath;
 429	}
 430
 431	/* Don't change PG until it has no remaining paths */
 432check_current_pg:
 433	pg = READ_ONCE(m->current_pg);
 434	if (pg) {
 435		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 436		if (!IS_ERR_OR_NULL(pgpath))
 437			return pgpath;
 438	}
 439
 440	/*
 441	 * Loop through priority groups until we find a valid path.
 442	 * First time we skip PGs marked 'bypassed'.
 443	 * Second time we only try the ones we skipped, but set
 444	 * pg_init_delay_retry so we do not hammer controllers.
 445	 */
 446	do {
 447		list_for_each_entry(pg, &m->priority_groups, list) {
 448			if (pg->bypassed == !!bypassed)
 449				continue;
 450			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 451			if (!IS_ERR_OR_NULL(pgpath)) {
 452				if (!bypassed) {
 453					spin_lock_irqsave(&m->lock, flags);
 454					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 455					spin_unlock_irqrestore(&m->lock, flags);
 456				}
 457				return pgpath;
 458			}
 459		}
 460	} while (bypassed--);
 461
 462failed:
 463	spin_lock_irqsave(&m->lock, flags);
 464	m->current_pgpath = NULL;
 465	m->current_pg = NULL;
 466	spin_unlock_irqrestore(&m->lock, flags);
 467
 468	return NULL;
 469}
 470
 471/*
 472 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
 473 * report the function name and line number of the function from which
 474 * it has been invoked.
 475 */
 476#define dm_report_EIO(m)						\
 477	DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
 478		      dm_table_device_name((m)->ti->table),		\
 479		      test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags),	\
 480		      test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
 481		      dm_noflush_suspending((m)->ti))
 482
 483/*
 484 * Check whether bios must be queued in the device-mapper core rather
 485 * than here in the target.
 
 
 
 
 
 486 */
 487static bool __must_push_back(struct multipath *m)
 488{
 489	return dm_noflush_suspending(m->ti);
 
 
 490}
 491
 492static bool must_push_back_rq(struct multipath *m)
 493{
 
 494	unsigned long flags;
 495	bool ret;
 496
 497	spin_lock_irqsave(&m->lock, flags);
 498	ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m));
 
 499	spin_unlock_irqrestore(&m->lock, flags);
 500
 501	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 502}
 503
 504/*
 505 * Map cloned requests (request-based multipath)
 506 */
 507static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 508				   union map_info *map_context,
 509				   struct request **__clone)
 510{
 511	struct multipath *m = ti->private;
 512	size_t nr_bytes = blk_rq_bytes(rq);
 
 513	struct pgpath *pgpath;
 514	struct block_device *bdev;
 515	struct dm_mpath_io *mpio = get_mpio(map_context);
 516	struct request_queue *q;
 517	struct request *clone;
 518
 519	/* Do we need to select a new pgpath? */
 520	pgpath = READ_ONCE(m->current_pgpath);
 521	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
 522		pgpath = choose_pgpath(m, nr_bytes);
 523
 524	if (!pgpath) {
 525		if (must_push_back_rq(m))
 526			return DM_MAPIO_DELAY_REQUEUE;
 527		dm_report_EIO(m);	/* Failed */
 528		return DM_MAPIO_KILL;
 529	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
 530		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
 531		pg_init_all_paths(m);
 532		return DM_MAPIO_DELAY_REQUEUE;
 533	}
 534
 
 
 
 
 
 535	mpio->pgpath = pgpath;
 536	mpio->nr_bytes = nr_bytes;
 537
 538	bdev = pgpath->path.dev->bdev;
 539	q = bdev_get_queue(bdev);
 540	clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
 541			BLK_MQ_REQ_NOWAIT);
 542	if (IS_ERR(clone)) {
 543		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
 544		if (blk_queue_dying(q)) {
 545			atomic_inc(&m->pg_init_in_progress);
 546			activate_or_offline_path(pgpath);
 547			return DM_MAPIO_DELAY_REQUEUE;
 548		}
 549
 
 550		/*
 551		 * blk-mq's SCHED_RESTART can cover this requeue, so we
 552		 * needn't deal with it by DELAY_REQUEUE. More importantly,
 553		 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
 554		 * get the queue busy feedback (via BLK_STS_RESOURCE),
 555		 * otherwise I/O merging can suffer.
 556		 */
 557		return DM_MAPIO_REQUEUE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 558	}
 559	clone->bio = clone->biotail = NULL;
 560	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 561	*__clone = clone;
 562
 563	if (pgpath->pg->ps.type->start_io)
 564		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 565					      &pgpath->path,
 566					      nr_bytes);
 567	return DM_MAPIO_REMAPPED;
 568}
 569
 570static void multipath_release_clone(struct request *clone,
 571				    union map_info *map_context)
 572{
 573	if (unlikely(map_context)) {
 574		/*
 575		 * non-NULL map_context means caller is still map
 576		 * method; must undo multipath_clone_and_map()
 577		 */
 578		struct dm_mpath_io *mpio = get_mpio(map_context);
 579		struct pgpath *pgpath = mpio->pgpath;
 580
 581		if (pgpath && pgpath->pg->ps.type->end_io)
 582			pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
 583						    &pgpath->path,
 584						    mpio->nr_bytes,
 585						    clone->io_start_time_ns);
 586	}
 587
 588	blk_mq_free_request(clone);
 589}
 590
 591/*
 592 * Map cloned bios (bio-based multipath)
 593 */
 594
 595static void __multipath_queue_bio(struct multipath *m, struct bio *bio)
 596{
 597	/* Queue for the daemon to resubmit */
 598	bio_list_add(&m->queued_bios, bio);
 599	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
 600		queue_work(kmultipathd, &m->process_queued_bios);
 601}
 602
 603static void multipath_queue_bio(struct multipath *m, struct bio *bio)
 604{
 605	unsigned long flags;
 606
 607	spin_lock_irqsave(&m->lock, flags);
 608	__multipath_queue_bio(m, bio);
 609	spin_unlock_irqrestore(&m->lock, flags);
 610}
 611
 612static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 
 
 
 613{
 
 614	struct pgpath *pgpath;
 615	unsigned long flags;
 
 616
 617	/* Do we need to select a new pgpath? */
 618	pgpath = READ_ONCE(m->current_pgpath);
 619	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
 620		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 
 621
 622	if (!pgpath) {
 
 
 623		spin_lock_irqsave(&m->lock, flags);
 624		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 625			__multipath_queue_bio(m, bio);
 626			pgpath = ERR_PTR(-EAGAIN);
 627		}
 628		spin_unlock_irqrestore(&m->lock, flags);
 629
 630	} else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) ||
 631		   mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) {
 632		multipath_queue_bio(m, bio);
 633		pg_init_all_paths(m);
 634		return ERR_PTR(-EAGAIN);
 635	}
 636
 637	return pgpath;
 638}
 639
 640static int __multipath_map_bio(struct multipath *m, struct bio *bio,
 641			       struct dm_mpath_io *mpio)
 642{
 643	struct pgpath *pgpath = __map_bio(m, bio);
 644
 645	if (IS_ERR(pgpath))
 646		return DM_MAPIO_SUBMITTED;
 
 647
 648	if (!pgpath) {
 649		if (__must_push_back(m))
 650			return DM_MAPIO_REQUEUE;
 651		dm_report_EIO(m);
 652		return DM_MAPIO_KILL;
 653	}
 654
 655	mpio->pgpath = pgpath;
 
 656
 657	if (dm_ps_use_hr_timer(pgpath->pg->ps.type))
 658		mpio->start_time_ns = ktime_get_ns();
 659
 660	bio->bi_status = 0;
 661	bio_set_dev(bio, pgpath->path.dev->bdev);
 662	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 663
 664	if (pgpath->pg->ps.type->start_io)
 665		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 666					      &pgpath->path,
 667					      mpio->nr_bytes);
 668	return DM_MAPIO_REMAPPED;
 669}
 670
 671static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
 672{
 673	struct multipath *m = ti->private;
 674	struct dm_mpath_io *mpio = NULL;
 675
 676	multipath_init_per_bio_data(bio, &mpio);
 
 677	return __multipath_map_bio(m, bio, mpio);
 678}
 679
 680static void process_queued_io_list(struct multipath *m)
 681{
 682	if (m->queue_mode == DM_TYPE_REQUEST_BASED)
 683		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 684	else if (m->queue_mode == DM_TYPE_BIO_BASED)
 685		queue_work(kmultipathd, &m->process_queued_bios);
 686}
 687
 688static void process_queued_bios(struct work_struct *work)
 689{
 690	int r;
 691	unsigned long flags;
 692	struct bio *bio;
 693	struct bio_list bios;
 694	struct blk_plug plug;
 695	struct multipath *m =
 696		container_of(work, struct multipath, process_queued_bios);
 697
 698	bio_list_init(&bios);
 699
 700	spin_lock_irqsave(&m->lock, flags);
 701
 702	if (bio_list_empty(&m->queued_bios)) {
 703		spin_unlock_irqrestore(&m->lock, flags);
 704		return;
 705	}
 706
 707	bio_list_merge(&bios, &m->queued_bios);
 708	bio_list_init(&m->queued_bios);
 709
 710	spin_unlock_irqrestore(&m->lock, flags);
 711
 712	blk_start_plug(&plug);
 713	while ((bio = bio_list_pop(&bios))) {
 714		struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 715
 716		dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
 717		r = __multipath_map_bio(m, bio, mpio);
 718		switch (r) {
 719		case DM_MAPIO_KILL:
 720			bio->bi_status = BLK_STS_IOERR;
 721			bio_endio(bio);
 722			break;
 723		case DM_MAPIO_REQUEUE:
 724			bio->bi_status = BLK_STS_DM_REQUEUE;
 725			bio_endio(bio);
 726			break;
 727		case DM_MAPIO_REMAPPED:
 728			submit_bio_noacct(bio);
 729			break;
 730		case DM_MAPIO_SUBMITTED:
 731			break;
 732		default:
 733			WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
 734		}
 735	}
 736	blk_finish_plug(&plug);
 737}
 738
 739/*
 740 * If we run out of usable paths, should we queue I/O or error it?
 741 */
 742static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path,
 743			    bool save_old_value, const char *caller)
 744{
 745	unsigned long flags;
 746	bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
 747	const char *dm_dev_name = dm_table_device_name(m->ti->table);
 748
 749	DMDEBUG("%s: %s caller=%s f_queue_if_no_path=%d save_old_value=%d",
 750		dm_dev_name, __func__, caller, f_queue_if_no_path, save_old_value);
 751
 752	spin_lock_irqsave(&m->lock, flags);
 753
 754	queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 755	saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 756
 757	if (save_old_value) {
 758		if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
 759			DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
 760			      dm_dev_name);
 761		} else
 762			assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
 763	} else if (!f_queue_if_no_path && saved_queue_if_no_path_bit) {
 764		/* due to "fail_if_no_path" message, need to honor it. */
 765		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 766	}
 767	assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, f_queue_if_no_path);
 768
 769	DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
 770		dm_dev_name, __func__,
 771		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
 772		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
 773		dm_noflush_suspending(m->ti));
 774
 775	spin_unlock_irqrestore(&m->lock, flags);
 776
 777	if (!f_queue_if_no_path) {
 778		dm_table_run_md_queue_async(m->ti->table);
 779		process_queued_io_list(m);
 780	}
 781
 782	return 0;
 783}
 784
 785/*
 786 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
 787 * process any queued I/O.
 788 */
 789static void queue_if_no_path_timeout_work(struct timer_list *t)
 790{
 791	struct multipath *m = from_timer(m, t, nopath_timer);
 792
 793	DMWARN("queue_if_no_path timeout on %s, failing queued IO",
 794	       dm_table_device_name(m->ti->table));
 795	queue_if_no_path(m, false, false, __func__);
 796}
 797
 798/*
 799 * Enable the queue_if_no_path timeout if necessary.
 800 * Called with m->lock held.
 801 */
 802static void enable_nopath_timeout(struct multipath *m)
 803{
 804	unsigned long queue_if_no_path_timeout =
 805		READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
 806
 807	lockdep_assert_held(&m->lock);
 808
 809	if (queue_if_no_path_timeout > 0 &&
 810	    atomic_read(&m->nr_valid_paths) == 0 &&
 811	    test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 812		mod_timer(&m->nopath_timer,
 813			  jiffies + queue_if_no_path_timeout);
 814	}
 815}
 816
 817static void disable_nopath_timeout(struct multipath *m)
 818{
 819	del_timer_sync(&m->nopath_timer);
 820}
 821
 822/*
 823 * An event is triggered whenever a path is taken out of use.
 824 * Includes path failure and PG bypass.
 825 */
 826static void trigger_event(struct work_struct *work)
 827{
 828	struct multipath *m =
 829		container_of(work, struct multipath, trigger_event);
 830
 831	dm_table_event(m->ti->table);
 832}
 833
 834/*
 835 *---------------------------------------------------------------
 836 * Constructor/argument parsing:
 837 * <#multipath feature args> [<arg>]*
 838 * <#hw_handler args> [hw_handler [<arg>]*]
 839 * <#priority groups>
 840 * <initial priority group>
 841 *     [<selector> <#selector args> [<arg>]*
 842 *      <#paths> <#per-path selector args>
 843 *         [<path> [<arg>]* ]+ ]+
 844 *---------------------------------------------------------------
 845 */
 846static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 847			       struct dm_target *ti)
 848{
 849	int r;
 850	struct path_selector_type *pst;
 851	unsigned int ps_argc;
 852
 853	static const struct dm_arg _args[] = {
 854		{0, 1024, "invalid number of path selector args"},
 855	};
 856
 857	pst = dm_get_path_selector(dm_shift_arg(as));
 858	if (!pst) {
 859		ti->error = "unknown path selector type";
 860		return -EINVAL;
 861	}
 862
 863	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 864	if (r) {
 865		dm_put_path_selector(pst);
 866		return -EINVAL;
 867	}
 868
 869	r = pst->create(&pg->ps, ps_argc, as->argv);
 870	if (r) {
 871		dm_put_path_selector(pst);
 872		ti->error = "path selector constructor failed";
 873		return r;
 874	}
 875
 876	pg->ps.type = pst;
 877	dm_consume_args(as, ps_argc);
 878
 879	return 0;
 880}
 881
 882static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
 883			 const char **attached_handler_name, char **error)
 884{
 885	struct request_queue *q = bdev_get_queue(bdev);
 886	int r;
 
 
 
 
 887
 888	if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 889retain:
 890		if (*attached_handler_name) {
 
 891			/*
 892			 * Clear any hw_handler_params associated with a
 893			 * handler that isn't already attached.
 894			 */
 895			if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
 896				kfree(m->hw_handler_params);
 897				m->hw_handler_params = NULL;
 898			}
 899
 900			/*
 901			 * Reset hw_handler_name to match the attached handler
 902			 *
 903			 * NB. This modifies the table line to show the actual
 904			 * handler instead of the original table passed in.
 905			 */
 906			kfree(m->hw_handler_name);
 907			m->hw_handler_name = *attached_handler_name;
 908			*attached_handler_name = NULL;
 909		}
 910	}
 911
 912	if (m->hw_handler_name) {
 913		r = scsi_dh_attach(q, m->hw_handler_name);
 914		if (r == -EBUSY) {
 915			DMINFO("retaining handler on device %pg", bdev);
 
 
 
 916			goto retain;
 917		}
 918		if (r < 0) {
 919			*error = "error attaching hardware handler";
 920			return r;
 
 921		}
 922
 923		if (m->hw_handler_params) {
 924			r = scsi_dh_set_params(q, m->hw_handler_params);
 925			if (r < 0) {
 926				*error = "unable to set hardware handler parameters";
 927				return r;
 
 
 928			}
 929		}
 930	}
 931
 932	return 0;
 933}
 934
 935static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 936				 struct dm_target *ti)
 937{
 938	int r;
 939	struct pgpath *p;
 940	struct multipath *m = ti->private;
 941	struct request_queue *q;
 942	const char *attached_handler_name = NULL;
 943
 944	/* we need at least a path arg */
 945	if (as->argc < 1) {
 946		ti->error = "no device given";
 947		return ERR_PTR(-EINVAL);
 948	}
 949
 950	p = alloc_pgpath();
 951	if (!p)
 952		return ERR_PTR(-ENOMEM);
 953
 954	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 955			  &p->path.dev);
 956	if (r) {
 957		ti->error = "error getting device";
 958		goto bad;
 959	}
 960
 961	q = bdev_get_queue(p->path.dev->bdev);
 962	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 963	if (attached_handler_name || m->hw_handler_name) {
 964		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 965		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
 966		kfree(attached_handler_name);
 967		if (r) {
 968			dm_put_device(ti, p->path.dev);
 969			goto bad;
 970		}
 971	}
 972
 973	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 974	if (r) {
 975		dm_put_device(ti, p->path.dev);
 976		goto bad;
 977	}
 978
 979	return p;
 
 980 bad:
 981	free_pgpath(p);
 982	return ERR_PTR(r);
 983}
 984
 985static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 986						   struct multipath *m)
 987{
 988	static const struct dm_arg _args[] = {
 989		{1, 1024, "invalid number of paths"},
 990		{0, 1024, "invalid number of selector args"}
 991	};
 992
 993	int r;
 994	unsigned int i, nr_selector_args, nr_args;
 995	struct priority_group *pg;
 996	struct dm_target *ti = m->ti;
 997
 998	if (as->argc < 2) {
 999		as->argc = 0;
1000		ti->error = "not enough priority group arguments";
1001		return ERR_PTR(-EINVAL);
1002	}
1003
1004	pg = alloc_priority_group();
1005	if (!pg) {
1006		ti->error = "couldn't allocate priority group";
1007		return ERR_PTR(-ENOMEM);
1008	}
1009	pg->m = m;
1010
1011	r = parse_path_selector(as, pg, ti);
1012	if (r)
1013		goto bad;
1014
1015	/*
1016	 * read the paths
1017	 */
1018	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
1019	if (r)
1020		goto bad;
1021
1022	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
1023	if (r)
1024		goto bad;
1025
1026	nr_args = 1 + nr_selector_args;
1027	for (i = 0; i < pg->nr_pgpaths; i++) {
1028		struct pgpath *pgpath;
1029		struct dm_arg_set path_args;
1030
1031		if (as->argc < nr_args) {
1032			ti->error = "not enough path parameters";
1033			r = -EINVAL;
1034			goto bad;
1035		}
1036
1037		path_args.argc = nr_args;
1038		path_args.argv = as->argv;
1039
1040		pgpath = parse_path(&path_args, &pg->ps, ti);
1041		if (IS_ERR(pgpath)) {
1042			r = PTR_ERR(pgpath);
1043			goto bad;
1044		}
1045
1046		pgpath->pg = pg;
1047		list_add_tail(&pgpath->list, &pg->pgpaths);
1048		dm_consume_args(as, nr_args);
1049	}
1050
1051	return pg;
1052
1053 bad:
1054	free_priority_group(pg, ti);
1055	return ERR_PTR(r);
1056}
1057
1058static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1059{
1060	unsigned int hw_argc;
1061	int ret;
1062	struct dm_target *ti = m->ti;
1063
1064	static const struct dm_arg _args[] = {
1065		{0, 1024, "invalid number of hardware handler args"},
1066	};
1067
1068	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1069		return -EINVAL;
1070
1071	if (!hw_argc)
1072		return 0;
1073
1074	if (m->queue_mode == DM_TYPE_BIO_BASED) {
1075		dm_consume_args(as, hw_argc);
1076		DMERR("bio-based multipath doesn't allow hardware handler args");
1077		return 0;
1078	}
1079
1080	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1081	if (!m->hw_handler_name)
1082		return -EINVAL;
1083
1084	if (hw_argc > 1) {
1085		char *p;
1086		int i, j, len = 4;
1087
1088		for (i = 0; i <= hw_argc - 2; i++)
1089			len += strlen(as->argv[i]) + 1;
1090		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1091		if (!p) {
1092			ti->error = "memory allocation failed";
1093			ret = -ENOMEM;
1094			goto fail;
1095		}
1096		j = sprintf(p, "%d", hw_argc - 1);
1097		for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1)
1098			j = sprintf(p, "%s", as->argv[i]);
1099	}
1100	dm_consume_args(as, hw_argc - 1);
1101
1102	return 0;
1103fail:
1104	kfree(m->hw_handler_name);
1105	m->hw_handler_name = NULL;
1106	return ret;
1107}
1108
1109static int parse_features(struct dm_arg_set *as, struct multipath *m)
1110{
1111	int r;
1112	unsigned int argc;
1113	struct dm_target *ti = m->ti;
1114	const char *arg_name;
1115
1116	static const struct dm_arg _args[] = {
1117		{0, 8, "invalid number of feature args"},
1118		{1, 50, "pg_init_retries must be between 1 and 50"},
1119		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1120	};
1121
1122	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1123	if (r)
1124		return -EINVAL;
1125
1126	if (!argc)
1127		return 0;
1128
1129	do {
1130		arg_name = dm_shift_arg(as);
1131		argc--;
1132
1133		if (!strcasecmp(arg_name, "queue_if_no_path")) {
1134			r = queue_if_no_path(m, true, false, __func__);
1135			continue;
1136		}
1137
1138		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1139			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1140			continue;
1141		}
1142
1143		if (!strcasecmp(arg_name, "pg_init_retries") &&
1144		    (argc >= 1)) {
1145			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1146			argc--;
1147			continue;
1148		}
1149
1150		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1151		    (argc >= 1)) {
1152			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1153			argc--;
1154			continue;
1155		}
1156
1157		if (!strcasecmp(arg_name, "queue_mode") &&
1158		    (argc >= 1)) {
1159			const char *queue_mode_name = dm_shift_arg(as);
1160
1161			if (!strcasecmp(queue_mode_name, "bio"))
1162				m->queue_mode = DM_TYPE_BIO_BASED;
1163			else if (!strcasecmp(queue_mode_name, "rq") ||
1164				 !strcasecmp(queue_mode_name, "mq"))
1165				m->queue_mode = DM_TYPE_REQUEST_BASED;
 
 
1166			else {
1167				ti->error = "Unknown 'queue_mode' requested";
1168				r = -EINVAL;
1169			}
1170			argc--;
1171			continue;
1172		}
1173
1174		ti->error = "Unrecognised multipath feature request";
1175		r = -EINVAL;
1176	} while (argc && !r);
1177
1178	return r;
1179}
1180
1181static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1182{
1183	/* target arguments */
1184	static const struct dm_arg _args[] = {
1185		{0, 1024, "invalid number of priority groups"},
1186		{0, 1024, "invalid initial priority group number"},
1187	};
1188
1189	int r;
1190	struct multipath *m;
1191	struct dm_arg_set as;
1192	unsigned int pg_count = 0;
1193	unsigned int next_pg_num;
1194	unsigned long flags;
1195
1196	as.argc = argc;
1197	as.argv = argv;
1198
1199	m = alloc_multipath(ti);
1200	if (!m) {
1201		ti->error = "can't allocate multipath";
1202		return -EINVAL;
1203	}
1204
1205	r = parse_features(&as, m);
1206	if (r)
1207		goto bad;
1208
1209	r = alloc_multipath_stage2(ti, m);
1210	if (r)
1211		goto bad;
1212
1213	r = parse_hw_handler(&as, m);
1214	if (r)
1215		goto bad;
1216
1217	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1218	if (r)
1219		goto bad;
1220
1221	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1222	if (r)
1223		goto bad;
1224
1225	if ((!m->nr_priority_groups && next_pg_num) ||
1226	    (m->nr_priority_groups && !next_pg_num)) {
1227		ti->error = "invalid initial priority group";
1228		r = -EINVAL;
1229		goto bad;
1230	}
1231
1232	/* parse the priority groups */
1233	while (as.argc) {
1234		struct priority_group *pg;
1235		unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
1236
1237		pg = parse_priority_group(&as, m);
1238		if (IS_ERR(pg)) {
1239			r = PTR_ERR(pg);
1240			goto bad;
1241		}
1242
1243		nr_valid_paths += pg->nr_pgpaths;
1244		atomic_set(&m->nr_valid_paths, nr_valid_paths);
1245
1246		list_add_tail(&pg->list, &m->priority_groups);
1247		pg_count++;
1248		pg->pg_num = pg_count;
1249		if (!--next_pg_num)
1250			m->next_pg = pg;
1251	}
1252
1253	if (pg_count != m->nr_priority_groups) {
1254		ti->error = "priority group count mismatch";
1255		r = -EINVAL;
1256		goto bad;
1257	}
1258
1259	spin_lock_irqsave(&m->lock, flags);
1260	enable_nopath_timeout(m);
1261	spin_unlock_irqrestore(&m->lock, flags);
1262
1263	ti->num_flush_bios = 1;
1264	ti->num_discard_bios = 1;
1265	ti->num_write_zeroes_bios = 1;
1266	if (m->queue_mode == DM_TYPE_BIO_BASED)
1267		ti->per_io_data_size = multipath_per_bio_data_size();
1268	else
1269		ti->per_io_data_size = sizeof(struct dm_mpath_io);
1270
1271	return 0;
1272
1273 bad:
1274	free_multipath(m);
1275	return r;
1276}
1277
1278static void multipath_wait_for_pg_init_completion(struct multipath *m)
1279{
1280	DEFINE_WAIT(wait);
1281
1282	while (1) {
1283		prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1284
1285		if (!atomic_read(&m->pg_init_in_progress))
1286			break;
1287
1288		io_schedule();
1289	}
1290	finish_wait(&m->pg_init_wait, &wait);
1291}
1292
1293static void flush_multipath_work(struct multipath *m)
1294{
1295	if (m->hw_handler_name) {
1296		unsigned long flags;
1297
1298		if (!atomic_read(&m->pg_init_in_progress))
1299			goto skip;
1300
1301		spin_lock_irqsave(&m->lock, flags);
1302		if (atomic_read(&m->pg_init_in_progress) &&
1303		    !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
1304			spin_unlock_irqrestore(&m->lock, flags);
1305
1306			flush_workqueue(kmpath_handlerd);
1307			multipath_wait_for_pg_init_completion(m);
1308
1309			spin_lock_irqsave(&m->lock, flags);
1310			clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1311		}
1312		spin_unlock_irqrestore(&m->lock, flags);
1313	}
1314skip:
1315	if (m->queue_mode == DM_TYPE_BIO_BASED)
1316		flush_work(&m->process_queued_bios);
1317	flush_work(&m->trigger_event);
 
 
 
1318}
1319
1320static void multipath_dtr(struct dm_target *ti)
1321{
1322	struct multipath *m = ti->private;
1323
1324	disable_nopath_timeout(m);
1325	flush_multipath_work(m);
1326	free_multipath(m);
1327}
1328
1329/*
1330 * Take a path out of use.
1331 */
1332static int fail_path(struct pgpath *pgpath)
1333{
1334	unsigned long flags;
1335	struct multipath *m = pgpath->pg->m;
1336
1337	spin_lock_irqsave(&m->lock, flags);
1338
1339	if (!pgpath->is_active)
1340		goto out;
1341
1342	DMWARN("%s: Failing path %s.",
1343	       dm_table_device_name(m->ti->table),
1344	       pgpath->path.dev->name);
1345
1346	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1347	pgpath->is_active = false;
1348	pgpath->fail_count++;
1349
1350	atomic_dec(&m->nr_valid_paths);
1351
1352	if (pgpath == m->current_pgpath)
1353		m->current_pgpath = NULL;
1354
1355	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1356		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1357
1358	queue_work(dm_mpath_wq, &m->trigger_event);
1359
1360	enable_nopath_timeout(m);
1361
1362out:
1363	spin_unlock_irqrestore(&m->lock, flags);
1364
1365	return 0;
1366}
1367
1368/*
1369 * Reinstate a previously-failed path
1370 */
1371static int reinstate_path(struct pgpath *pgpath)
1372{
1373	int r = 0, run_queue = 0;
1374	unsigned long flags;
1375	struct multipath *m = pgpath->pg->m;
1376	unsigned int nr_valid_paths;
1377
1378	spin_lock_irqsave(&m->lock, flags);
1379
1380	if (pgpath->is_active)
1381		goto out;
1382
1383	DMWARN("%s: Reinstating path %s.",
1384	       dm_table_device_name(m->ti->table),
1385	       pgpath->path.dev->name);
1386
1387	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1388	if (r)
1389		goto out;
1390
1391	pgpath->is_active = true;
1392
1393	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1394	if (nr_valid_paths == 1) {
1395		m->current_pgpath = NULL;
1396		run_queue = 1;
1397	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1398		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1399			atomic_inc(&m->pg_init_in_progress);
1400	}
1401
1402	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1403		       pgpath->path.dev->name, nr_valid_paths);
1404
1405	schedule_work(&m->trigger_event);
1406
1407out:
1408	spin_unlock_irqrestore(&m->lock, flags);
1409	if (run_queue) {
1410		dm_table_run_md_queue_async(m->ti->table);
1411		process_queued_io_list(m);
1412	}
1413
1414	if (pgpath->is_active)
1415		disable_nopath_timeout(m);
1416
1417	return r;
1418}
1419
1420/*
1421 * Fail or reinstate all paths that match the provided struct dm_dev.
1422 */
1423static int action_dev(struct multipath *m, struct dm_dev *dev,
1424		      action_fn action)
1425{
1426	int r = -EINVAL;
1427	struct pgpath *pgpath;
1428	struct priority_group *pg;
1429
1430	list_for_each_entry(pg, &m->priority_groups, list) {
1431		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1432			if (pgpath->path.dev == dev)
1433				r = action(pgpath);
1434		}
1435	}
1436
1437	return r;
1438}
1439
1440/*
1441 * Temporarily try to avoid having to use the specified PG
1442 */
1443static void bypass_pg(struct multipath *m, struct priority_group *pg,
1444		      bool bypassed)
1445{
1446	unsigned long flags;
1447
1448	spin_lock_irqsave(&m->lock, flags);
1449
1450	pg->bypassed = bypassed;
1451	m->current_pgpath = NULL;
1452	m->current_pg = NULL;
1453
1454	spin_unlock_irqrestore(&m->lock, flags);
1455
1456	schedule_work(&m->trigger_event);
1457}
1458
1459/*
1460 * Switch to using the specified PG from the next I/O that gets mapped
1461 */
1462static int switch_pg_num(struct multipath *m, const char *pgstr)
1463{
1464	struct priority_group *pg;
1465	unsigned int pgnum;
1466	unsigned long flags;
1467	char dummy;
1468
1469	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1470	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1471		DMWARN("invalid PG number supplied to %s", __func__);
1472		return -EINVAL;
1473	}
1474
1475	spin_lock_irqsave(&m->lock, flags);
1476	list_for_each_entry(pg, &m->priority_groups, list) {
1477		pg->bypassed = false;
1478		if (--pgnum)
1479			continue;
1480
1481		m->current_pgpath = NULL;
1482		m->current_pg = NULL;
1483		m->next_pg = pg;
1484	}
1485	spin_unlock_irqrestore(&m->lock, flags);
1486
1487	schedule_work(&m->trigger_event);
1488	return 0;
1489}
1490
1491/*
1492 * Set/clear bypassed status of a PG.
1493 * PGs are numbered upwards from 1 in the order they were declared.
1494 */
1495static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1496{
1497	struct priority_group *pg;
1498	unsigned int pgnum;
1499	char dummy;
1500
1501	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1502	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1503		DMWARN("invalid PG number supplied to bypass_pg");
1504		return -EINVAL;
1505	}
1506
1507	list_for_each_entry(pg, &m->priority_groups, list) {
1508		if (!--pgnum)
1509			break;
1510	}
1511
1512	bypass_pg(m, pg, bypassed);
1513	return 0;
1514}
1515
1516/*
1517 * Should we retry pg_init immediately?
1518 */
1519static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1520{
1521	unsigned long flags;
1522	bool limit_reached = false;
1523
1524	spin_lock_irqsave(&m->lock, flags);
1525
1526	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1527	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1528		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1529	else
1530		limit_reached = true;
1531
1532	spin_unlock_irqrestore(&m->lock, flags);
1533
1534	return limit_reached;
1535}
1536
1537static void pg_init_done(void *data, int errors)
1538{
1539	struct pgpath *pgpath = data;
1540	struct priority_group *pg = pgpath->pg;
1541	struct multipath *m = pg->m;
1542	unsigned long flags;
1543	bool delay_retry = false;
1544
1545	/* device or driver problems */
1546	switch (errors) {
1547	case SCSI_DH_OK:
1548		break;
1549	case SCSI_DH_NOSYS:
1550		if (!m->hw_handler_name) {
1551			errors = 0;
1552			break;
1553		}
1554		DMERR("Could not failover the device: Handler scsi_dh_%s "
1555		      "Error %d.", m->hw_handler_name, errors);
1556		/*
1557		 * Fail path for now, so we do not ping pong
1558		 */
1559		fail_path(pgpath);
1560		break;
1561	case SCSI_DH_DEV_TEMP_BUSY:
1562		/*
1563		 * Probably doing something like FW upgrade on the
1564		 * controller so try the other pg.
1565		 */
1566		bypass_pg(m, pg, true);
1567		break;
1568	case SCSI_DH_RETRY:
1569		/* Wait before retrying. */
1570		delay_retry = true;
1571		fallthrough;
1572	case SCSI_DH_IMM_RETRY:
1573	case SCSI_DH_RES_TEMP_UNAVAIL:
1574		if (pg_init_limit_reached(m, pgpath))
1575			fail_path(pgpath);
1576		errors = 0;
1577		break;
1578	case SCSI_DH_DEV_OFFLINED:
1579	default:
1580		/*
1581		 * We probably do not want to fail the path for a device
1582		 * error, but this is what the old dm did. In future
1583		 * patches we can do more advanced handling.
1584		 */
1585		fail_path(pgpath);
1586	}
1587
1588	spin_lock_irqsave(&m->lock, flags);
1589	if (errors) {
1590		if (pgpath == m->current_pgpath) {
1591			DMERR("Could not failover device. Error %d.", errors);
1592			m->current_pgpath = NULL;
1593			m->current_pg = NULL;
1594		}
1595	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1596		pg->bypassed = false;
1597
1598	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1599		/* Activations of other paths are still on going */
1600		goto out;
1601
1602	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1603		if (delay_retry)
1604			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1605		else
1606			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1607
1608		if (__pg_init_all_paths(m))
1609			goto out;
1610	}
1611	clear_bit(MPATHF_QUEUE_IO, &m->flags);
1612
1613	process_queued_io_list(m);
1614
1615	/*
1616	 * Wake up any thread waiting to suspend.
1617	 */
1618	wake_up(&m->pg_init_wait);
1619
1620out:
1621	spin_unlock_irqrestore(&m->lock, flags);
1622}
1623
1624static void activate_or_offline_path(struct pgpath *pgpath)
1625{
 
 
1626	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1627
1628	if (pgpath->is_active && !blk_queue_dying(q))
1629		scsi_dh_activate(q, pg_init_done, pgpath);
1630	else
1631		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1632}
1633
1634static void activate_path_work(struct work_struct *work)
1635{
1636	struct pgpath *pgpath =
1637		container_of(work, struct pgpath, activate_path.work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1638
1639	activate_or_offline_path(pgpath);
 
1640}
1641
1642static int multipath_end_io(struct dm_target *ti, struct request *clone,
1643			    blk_status_t error, union map_info *map_context)
 
 
 
1644{
1645	struct dm_mpath_io *mpio = get_mpio(map_context);
1646	struct pgpath *pgpath = mpio->pgpath;
1647	int r = DM_ENDIO_DONE;
1648
1649	/*
1650	 * We don't queue any clone request inside the multipath target
1651	 * during end I/O handling, since those clone requests don't have
1652	 * bio clones.  If we queue them inside the multipath target,
1653	 * we need to make bio clones, that requires memory allocation.
1654	 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1655	 *  don't have bio clones.)
1656	 * Instead of queueing the clone request here, we queue the original
1657	 * request into dm core, which will remake a clone request and
1658	 * clone bios for it and resubmit it later.
1659	 */
1660	if (error && blk_path_error(error)) {
1661		struct multipath *m = ti->private;
1662
1663		if (error == BLK_STS_RESOURCE)
1664			r = DM_ENDIO_DELAY_REQUEUE;
1665		else
1666			r = DM_ENDIO_REQUEUE;
1667
1668		if (pgpath)
1669			fail_path(pgpath);
 
 
 
1670
1671		if (!atomic_read(&m->nr_valid_paths) &&
1672		    !must_push_back_rq(m)) {
1673			if (error == BLK_STS_IOERR)
1674				dm_report_EIO(m);
1675			/* complete with the original error */
1676			r = DM_ENDIO_DONE;
1677		}
1678	}
1679
1680	if (pgpath) {
1681		struct path_selector *ps = &pgpath->pg->ps;
 
 
 
 
 
 
 
 
 
 
 
1682
 
 
 
 
1683		if (ps->type->end_io)
1684			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1685					 clone->io_start_time_ns);
1686	}
 
1687
1688	return r;
1689}
1690
1691static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1692				blk_status_t *error)
1693{
1694	struct multipath *m = ti->private;
1695	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1696	struct pgpath *pgpath = mpio->pgpath;
1697	unsigned long flags;
1698	int r = DM_ENDIO_DONE;
1699
1700	if (!*error || !blk_path_error(*error))
1701		goto done;
1702
1703	if (pgpath)
1704		fail_path(pgpath);
 
 
 
1705
1706	if (!atomic_read(&m->nr_valid_paths)) {
1707		spin_lock_irqsave(&m->lock, flags);
1708		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1709			if (__must_push_back(m)) {
1710				r = DM_ENDIO_REQUEUE;
1711			} else {
1712				dm_report_EIO(m);
1713				*error = BLK_STS_IOERR;
1714			}
1715			spin_unlock_irqrestore(&m->lock, flags);
1716			goto done;
1717		}
1718		spin_unlock_irqrestore(&m->lock, flags);
1719	}
1720
1721	multipath_queue_bio(m, clone);
1722	r = DM_ENDIO_INCOMPLETE;
1723done:
1724	if (pgpath) {
1725		struct path_selector *ps = &pgpath->pg->ps;
1726
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1727		if (ps->type->end_io)
1728			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1729					 (mpio->start_time_ns ?:
1730					  dm_start_time_ns_from_clone(clone)));
1731	}
1732
1733	return r;
1734}
1735
1736/*
1737 * Suspend with flush can't complete until all the I/O is processed
1738 * so if the last path fails we must error any remaining I/O.
1739 * - Note that if the freeze_bdev fails while suspending, the
1740 *   queue_if_no_path state is lost - userspace should reset it.
1741 * Otherwise, during noflush suspend, queue_if_no_path will not change.
1742 */
1743static void multipath_presuspend(struct dm_target *ti)
1744{
1745	struct multipath *m = ti->private;
1746
1747	/* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
1748	if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
1749		queue_if_no_path(m, false, true, __func__);
1750}
1751
1752static void multipath_postsuspend(struct dm_target *ti)
1753{
1754	struct multipath *m = ti->private;
1755
1756	mutex_lock(&m->work_mutex);
1757	flush_multipath_work(m);
1758	mutex_unlock(&m->work_mutex);
1759}
1760
1761/*
1762 * Restore the queue_if_no_path setting.
1763 */
1764static void multipath_resume(struct dm_target *ti)
1765{
1766	struct multipath *m = ti->private;
1767	unsigned long flags;
1768
1769	spin_lock_irqsave(&m->lock, flags);
1770	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
1771		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1772		clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
1773	}
1774
1775	DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
1776		dm_table_device_name(m->ti->table), __func__,
1777		test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
1778		test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1779
1780	spin_unlock_irqrestore(&m->lock, flags);
1781}
1782
1783/*
1784 * Info output has the following format:
1785 * num_multipath_feature_args [multipath_feature_args]*
1786 * num_handler_status_args [handler_status_args]*
1787 * num_groups init_group_number
1788 *            [A|D|E num_ps_status_args [ps_status_args]*
1789 *             num_paths num_selector_args
1790 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1791 *
1792 * Table output has the following format (identical to the constructor string):
1793 * num_feature_args [features_args]*
1794 * num_handler_args hw_handler [hw_handler_args]*
1795 * num_groups init_group_number
1796 *     [priority selector-name num_ps_args [ps_args]*
1797 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1798 */
1799static void multipath_status(struct dm_target *ti, status_type_t type,
1800			     unsigned int status_flags, char *result, unsigned int maxlen)
1801{
1802	int sz = 0, pg_counter, pgpath_counter;
1803	unsigned long flags;
1804	struct multipath *m = ti->private;
1805	struct priority_group *pg;
1806	struct pgpath *p;
1807	unsigned int pg_num;
1808	char state;
1809
1810	spin_lock_irqsave(&m->lock, flags);
1811
1812	/* Features */
1813	if (type == STATUSTYPE_INFO)
1814		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1815		       atomic_read(&m->pg_init_count));
1816	else {
1817		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1818			      (m->pg_init_retries > 0) * 2 +
1819			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1820			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1821			      (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1822
1823		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1824			DMEMIT("queue_if_no_path ");
1825		if (m->pg_init_retries)
1826			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1827		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1828			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1829		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1830			DMEMIT("retain_attached_hw_handler ");
1831		if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1832			switch (m->queue_mode) {
1833			case DM_TYPE_BIO_BASED:
1834				DMEMIT("queue_mode bio ");
1835				break;
1836			default:
1837				WARN_ON_ONCE(true);
1838				break;
1839			}
1840		}
1841	}
1842
1843	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1844		DMEMIT("0 ");
1845	else
1846		DMEMIT("1 %s ", m->hw_handler_name);
1847
1848	DMEMIT("%u ", m->nr_priority_groups);
1849
1850	if (m->next_pg)
1851		pg_num = m->next_pg->pg_num;
1852	else if (m->current_pg)
1853		pg_num = m->current_pg->pg_num;
1854	else
1855		pg_num = (m->nr_priority_groups ? 1 : 0);
1856
1857	DMEMIT("%u ", pg_num);
1858
1859	switch (type) {
1860	case STATUSTYPE_INFO:
1861		list_for_each_entry(pg, &m->priority_groups, list) {
1862			if (pg->bypassed)
1863				state = 'D';	/* Disabled */
1864			else if (pg == m->current_pg)
1865				state = 'A';	/* Currently Active */
1866			else
1867				state = 'E';	/* Enabled */
1868
1869			DMEMIT("%c ", state);
1870
1871			if (pg->ps.type->status)
1872				sz += pg->ps.type->status(&pg->ps, NULL, type,
1873							  result + sz,
1874							  maxlen - sz);
1875			else
1876				DMEMIT("0 ");
1877
1878			DMEMIT("%u %u ", pg->nr_pgpaths,
1879			       pg->ps.type->info_args);
1880
1881			list_for_each_entry(p, &pg->pgpaths, list) {
1882				DMEMIT("%s %s %u ", p->path.dev->name,
1883				       p->is_active ? "A" : "F",
1884				       p->fail_count);
1885				if (pg->ps.type->status)
1886					sz += pg->ps.type->status(&pg->ps,
1887					      &p->path, type, result + sz,
1888					      maxlen - sz);
1889			}
1890		}
1891		break;
1892
1893	case STATUSTYPE_TABLE:
1894		list_for_each_entry(pg, &m->priority_groups, list) {
1895			DMEMIT("%s ", pg->ps.type->name);
1896
1897			if (pg->ps.type->status)
1898				sz += pg->ps.type->status(&pg->ps, NULL, type,
1899							  result + sz,
1900							  maxlen - sz);
1901			else
1902				DMEMIT("0 ");
1903
1904			DMEMIT("%u %u ", pg->nr_pgpaths,
1905			       pg->ps.type->table_args);
1906
1907			list_for_each_entry(p, &pg->pgpaths, list) {
1908				DMEMIT("%s ", p->path.dev->name);
1909				if (pg->ps.type->status)
1910					sz += pg->ps.type->status(&pg->ps,
1911					      &p->path, type, result + sz,
1912					      maxlen - sz);
1913			}
1914		}
1915		break;
1916
1917	case STATUSTYPE_IMA:
1918		sz = 0; /*reset the result pointer*/
1919
1920		DMEMIT_TARGET_NAME_VERSION(ti->type);
1921		DMEMIT(",nr_priority_groups=%u", m->nr_priority_groups);
1922
1923		pg_counter = 0;
1924		list_for_each_entry(pg, &m->priority_groups, list) {
1925			if (pg->bypassed)
1926				state = 'D';	/* Disabled */
1927			else if (pg == m->current_pg)
1928				state = 'A';	/* Currently Active */
1929			else
1930				state = 'E';	/* Enabled */
1931			DMEMIT(",pg_state_%d=%c", pg_counter, state);
1932			DMEMIT(",nr_pgpaths_%d=%u", pg_counter, pg->nr_pgpaths);
1933			DMEMIT(",path_selector_name_%d=%s", pg_counter, pg->ps.type->name);
1934
1935			pgpath_counter = 0;
1936			list_for_each_entry(p, &pg->pgpaths, list) {
1937				DMEMIT(",path_name_%d_%d=%s,is_active_%d_%d=%c,fail_count_%d_%d=%u",
1938				       pg_counter, pgpath_counter, p->path.dev->name,
1939				       pg_counter, pgpath_counter, p->is_active ? 'A' : 'F',
1940				       pg_counter, pgpath_counter, p->fail_count);
1941				if (pg->ps.type->status) {
1942					DMEMIT(",path_selector_status_%d_%d=",
1943					       pg_counter, pgpath_counter);
1944					sz += pg->ps.type->status(&pg->ps, &p->path,
1945								  type, result + sz,
1946								  maxlen - sz);
1947				}
1948				pgpath_counter++;
1949			}
1950			pg_counter++;
1951		}
1952		DMEMIT(";");
1953		break;
1954	}
1955
1956	spin_unlock_irqrestore(&m->lock, flags);
1957}
1958
1959static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
1960			     char *result, unsigned int maxlen)
1961{
1962	int r = -EINVAL;
1963	struct dm_dev *dev;
1964	struct multipath *m = ti->private;
1965	action_fn action;
1966	unsigned long flags;
1967
1968	mutex_lock(&m->work_mutex);
1969
1970	if (dm_suspended(ti)) {
1971		r = -EBUSY;
1972		goto out;
1973	}
1974
1975	if (argc == 1) {
1976		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1977			r = queue_if_no_path(m, true, false, __func__);
1978			spin_lock_irqsave(&m->lock, flags);
1979			enable_nopath_timeout(m);
1980			spin_unlock_irqrestore(&m->lock, flags);
1981			goto out;
1982		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1983			r = queue_if_no_path(m, false, false, __func__);
1984			disable_nopath_timeout(m);
1985			goto out;
1986		}
1987	}
1988
1989	if (argc != 2) {
1990		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1991		goto out;
1992	}
1993
1994	if (!strcasecmp(argv[0], "disable_group")) {
1995		r = bypass_pg_num(m, argv[1], true);
1996		goto out;
1997	} else if (!strcasecmp(argv[0], "enable_group")) {
1998		r = bypass_pg_num(m, argv[1], false);
1999		goto out;
2000	} else if (!strcasecmp(argv[0], "switch_group")) {
2001		r = switch_pg_num(m, argv[1]);
2002		goto out;
2003	} else if (!strcasecmp(argv[0], "reinstate_path"))
2004		action = reinstate_path;
2005	else if (!strcasecmp(argv[0], "fail_path"))
2006		action = fail_path;
2007	else {
2008		DMWARN("Unrecognised multipath message received: %s", argv[0]);
2009		goto out;
2010	}
2011
2012	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
2013	if (r) {
2014		DMWARN("message: error getting device %s",
2015		       argv[1]);
2016		goto out;
2017	}
2018
2019	r = action_dev(m, dev, action);
2020
2021	dm_put_device(ti, dev);
2022
2023out:
2024	mutex_unlock(&m->work_mutex);
2025	return r;
2026}
2027
2028static int multipath_prepare_ioctl(struct dm_target *ti,
2029				   struct block_device **bdev)
2030{
2031	struct multipath *m = ti->private;
2032	struct pgpath *pgpath;
2033	unsigned long flags;
2034	int r;
2035
2036	pgpath = READ_ONCE(m->current_pgpath);
2037	if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
2038		pgpath = choose_pgpath(m, 0);
2039
2040	if (pgpath) {
2041		if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) {
2042			*bdev = pgpath->path.dev->bdev;
 
2043			r = 0;
2044		} else {
2045			/* pg_init has not started or completed */
2046			r = -ENOTCONN;
2047		}
2048	} else {
2049		/* No path is available */
2050		r = -EIO;
2051		spin_lock_irqsave(&m->lock, flags);
2052		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2053			r = -ENOTCONN;
2054		spin_unlock_irqrestore(&m->lock, flags);
 
2055	}
2056
2057	if (r == -ENOTCONN) {
2058		if (!READ_ONCE(m->current_pg)) {
2059			/* Path status changed, redo selection */
2060			(void) choose_pgpath(m, 0);
2061		}
2062		spin_lock_irqsave(&m->lock, flags);
2063		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
2064			(void) __pg_init_all_paths(m);
2065		spin_unlock_irqrestore(&m->lock, flags);
2066		dm_table_run_md_queue_async(m->ti->table);
2067		process_queued_io_list(m);
2068	}
2069
2070	/*
2071	 * Only pass ioctls through if the device sizes match exactly.
2072	 */
2073	if (!r && ti->len != bdev_nr_sectors((*bdev)))
2074		return 1;
2075	return r;
2076}
2077
2078static int multipath_iterate_devices(struct dm_target *ti,
2079				     iterate_devices_callout_fn fn, void *data)
2080{
2081	struct multipath *m = ti->private;
2082	struct priority_group *pg;
2083	struct pgpath *p;
2084	int ret = 0;
2085
2086	list_for_each_entry(pg, &m->priority_groups, list) {
2087		list_for_each_entry(p, &pg->pgpaths, list) {
2088			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
2089			if (ret)
2090				goto out;
2091		}
2092	}
2093
2094out:
2095	return ret;
2096}
2097
2098static int pgpath_busy(struct pgpath *pgpath)
2099{
2100	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2101
2102	return blk_lld_busy(q);
2103}
2104
2105/*
2106 * We return "busy", only when we can map I/Os but underlying devices
2107 * are busy (so even if we map I/Os now, the I/Os will wait on
2108 * the underlying queue).
2109 * In other words, if we want to kill I/Os or queue them inside us
2110 * due to map unavailability, we don't return "busy".  Otherwise,
2111 * dm core won't give us the I/Os and we can't do what we want.
2112 */
2113static int multipath_busy(struct dm_target *ti)
2114{
2115	bool busy = false, has_active = false;
2116	struct multipath *m = ti->private;
2117	struct priority_group *pg, *next_pg;
2118	struct pgpath *pgpath;
2119
2120	/* pg_init in progress */
2121	if (atomic_read(&m->pg_init_in_progress))
2122		return true;
2123
2124	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2125	if (!atomic_read(&m->nr_valid_paths)) {
2126		unsigned long flags;
2127
2128		spin_lock_irqsave(&m->lock, flags);
2129		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2130			spin_unlock_irqrestore(&m->lock, flags);
2131			return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2132		}
2133		spin_unlock_irqrestore(&m->lock, flags);
2134	}
2135
2136	/* Guess which priority_group will be used at next mapping time */
2137	pg = READ_ONCE(m->current_pg);
2138	next_pg = READ_ONCE(m->next_pg);
2139	if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
2140		pg = next_pg;
2141
2142	if (!pg) {
2143		/*
2144		 * We don't know which pg will be used at next mapping time.
2145		 * We don't call choose_pgpath() here to avoid to trigger
2146		 * pg_init just by busy checking.
2147		 * So we don't know whether underlying devices we will be using
2148		 * at next mapping time are busy or not. Just try mapping.
2149		 */
2150		return busy;
2151	}
2152
2153	/*
2154	 * If there is one non-busy active path at least, the path selector
2155	 * will be able to select it. So we consider such a pg as not busy.
2156	 */
2157	busy = true;
2158	list_for_each_entry(pgpath, &pg->pgpaths, list) {
2159		if (pgpath->is_active) {
2160			has_active = true;
2161			if (!pgpath_busy(pgpath)) {
2162				busy = false;
2163				break;
2164			}
2165		}
2166	}
2167
2168	if (!has_active) {
2169		/*
2170		 * No active path in this pg, so this pg won't be used and
2171		 * the current_pg will be changed at next mapping time.
2172		 * We need to try mapping to determine it.
2173		 */
2174		busy = false;
2175	}
2176
2177	return busy;
2178}
2179
2180/*
2181 *---------------------------------------------------------------
2182 * Module setup
2183 *---------------------------------------------------------------
2184 */
2185static struct target_type multipath_target = {
2186	.name = "multipath",
2187	.version = {1, 14, 0},
2188	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2189		    DM_TARGET_PASSES_INTEGRITY,
2190	.module = THIS_MODULE,
2191	.ctr = multipath_ctr,
2192	.dtr = multipath_dtr,
 
2193	.clone_and_map_rq = multipath_clone_and_map,
2194	.release_clone_rq = multipath_release_clone,
2195	.rq_end_io = multipath_end_io,
2196	.map = multipath_map_bio,
2197	.end_io = multipath_end_io_bio,
2198	.presuspend = multipath_presuspend,
2199	.postsuspend = multipath_postsuspend,
2200	.resume = multipath_resume,
2201	.status = multipath_status,
2202	.message = multipath_message,
2203	.prepare_ioctl = multipath_prepare_ioctl,
2204	.iterate_devices = multipath_iterate_devices,
2205	.busy = multipath_busy,
2206};
2207
2208static int __init dm_multipath_init(void)
2209{
2210	int r = -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
2211
2212	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2213	if (!kmultipathd) {
2214		DMERR("failed to create workqueue kmpathd");
 
2215		goto bad_alloc_kmultipathd;
2216	}
2217
2218	/*
2219	 * A separate workqueue is used to handle the device handlers
2220	 * to avoid overloading existing workqueue. Overloading the
2221	 * old workqueue would also create a bottleneck in the
2222	 * path of the storage hardware device activation.
2223	 */
2224	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2225						  WQ_MEM_RECLAIM);
2226	if (!kmpath_handlerd) {
2227		DMERR("failed to create workqueue kmpath_handlerd");
 
2228		goto bad_alloc_kmpath_handlerd;
2229	}
2230
2231	dm_mpath_wq = alloc_workqueue("dm_mpath_wq", 0, 0);
2232	if (!dm_mpath_wq) {
2233		DMERR("failed to create workqueue dm_mpath_wq");
2234		goto bad_alloc_dm_mpath_wq;
2235	}
2236
2237	r = dm_register_target(&multipath_target);
2238	if (r < 0)
2239		goto bad_register_target;
2240
2241	return 0;
2242
2243bad_register_target:
2244	destroy_workqueue(dm_mpath_wq);
2245bad_alloc_dm_mpath_wq:
2246	destroy_workqueue(kmpath_handlerd);
2247bad_alloc_kmpath_handlerd:
2248	destroy_workqueue(kmultipathd);
2249bad_alloc_kmultipathd:
 
 
 
 
2250	return r;
2251}
2252
2253static void __exit dm_multipath_exit(void)
2254{
2255	destroy_workqueue(dm_mpath_wq);
2256	destroy_workqueue(kmpath_handlerd);
2257	destroy_workqueue(kmultipathd);
2258
2259	dm_unregister_target(&multipath_target);
 
2260}
2261
2262module_init(dm_multipath_init);
2263module_exit(dm_multipath_exit);
2264
2265module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644);
2266MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
2267
2268MODULE_DESCRIPTION(DM_NAME " multipath target");
2269MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2270MODULE_LICENSE("GPL");
v4.10.11
 
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9
  10#include "dm-rq.h"
  11#include "dm-bio-record.h"
  12#include "dm-path-selector.h"
  13#include "dm-uevent.h"
  14
  15#include <linux/blkdev.h>
  16#include <linux/ctype.h>
  17#include <linux/init.h>
  18#include <linux/mempool.h>
  19#include <linux/module.h>
  20#include <linux/pagemap.h>
  21#include <linux/slab.h>
  22#include <linux/time.h>
 
  23#include <linux/workqueue.h>
  24#include <linux/delay.h>
  25#include <scsi/scsi_dh.h>
  26#include <linux/atomic.h>
  27#include <linux/blk-mq.h>
  28
 
 
  29#define DM_MSG_PREFIX "multipath"
  30#define DM_PG_INIT_DELAY_MSECS 2000
  31#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
 
 
 
  32
  33/* Path properties */
  34struct pgpath {
  35	struct list_head list;
  36
  37	struct priority_group *pg;	/* Owning PG */
  38	unsigned fail_count;		/* Cumulative failure count */
  39
  40	struct dm_path path;
  41	struct delayed_work activate_path;
  42
  43	bool is_active:1;		/* Path status */
  44};
  45
  46#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  47
  48/*
  49 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  50 * Each has a path selector which controls which path gets used.
  51 */
  52struct priority_group {
  53	struct list_head list;
  54
  55	struct multipath *m;		/* Owning multipath instance */
  56	struct path_selector ps;
  57
  58	unsigned pg_num;		/* Reference number */
  59	unsigned nr_pgpaths;		/* Number of paths in PG */
  60	struct list_head pgpaths;
  61
  62	bool bypassed:1;		/* Temporarily bypass this PG? */
  63};
  64
  65/* Multipath context */
  66struct multipath {
  67	struct list_head list;
  68	struct dm_target *ti;
  69
  70	const char *hw_handler_name;
  71	char *hw_handler_params;
  72
  73	spinlock_t lock;
  74
  75	unsigned nr_priority_groups;
  76	struct list_head priority_groups;
  77
  78	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  79
  80	struct pgpath *current_pgpath;
  81	struct priority_group *current_pg;
  82	struct priority_group *next_pg;	/* Switch to this PG if set */
  83
  84	unsigned long flags;		/* Multipath state flags */
 
 
  85
  86	unsigned pg_init_retries;	/* Number of times to retry pg_init */
  87	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  88
  89	atomic_t nr_valid_paths;	/* Total number of usable paths */
 
  90	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
  91	atomic_t pg_init_count;		/* Number of times pg_init called */
  92
  93	unsigned queue_mode;
  94
  95	/*
  96	 * We must use a mempool of dm_mpath_io structs so that we
  97	 * can resubmit bios on error.
  98	 */
  99	mempool_t *mpio_pool;
 100
 101	struct mutex work_mutex;
 102	struct work_struct trigger_event;
 
 103
 104	struct work_struct process_queued_bios;
 105	struct bio_list queued_bios;
 
 
 106};
 107
 108/*
 109 * Context information attached to each io we process.
 110 */
 111struct dm_mpath_io {
 112	struct pgpath *pgpath;
 113	size_t nr_bytes;
 
 114};
 115
 116typedef int (*action_fn) (struct pgpath *pgpath);
 117
 118static struct kmem_cache *_mpio_cache;
 119
 120static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 121static void trigger_event(struct work_struct *work);
 122static void activate_path(struct work_struct *work);
 
 123static void process_queued_bios(struct work_struct *work);
 
 124
 125/*-----------------------------------------------
 
 126 * Multipath state flags.
 127 *-----------------------------------------------*/
 128
 129#define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
 130#define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
 131#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
 132#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
 133#define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
 134#define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
 135#define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
 136
 137/*-----------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 138 * Allocation routines
 139 *-----------------------------------------------*/
 140
 141static struct pgpath *alloc_pgpath(void)
 142{
 143	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 144
 145	if (pgpath) {
 146		pgpath->is_active = true;
 147		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
 148	}
 149
 150	return pgpath;
 151}
 152
 153static void free_pgpath(struct pgpath *pgpath)
 154{
 155	kfree(pgpath);
 156}
 157
 158static struct priority_group *alloc_priority_group(void)
 159{
 160	struct priority_group *pg;
 161
 162	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 163
 164	if (pg)
 165		INIT_LIST_HEAD(&pg->pgpaths);
 166
 167	return pg;
 168}
 169
 170static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 171{
 172	struct pgpath *pgpath, *tmp;
 173
 174	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 175		list_del(&pgpath->list);
 176		dm_put_device(ti, pgpath->path.dev);
 177		free_pgpath(pgpath);
 178	}
 179}
 180
 181static void free_priority_group(struct priority_group *pg,
 182				struct dm_target *ti)
 183{
 184	struct path_selector *ps = &pg->ps;
 185
 186	if (ps->type) {
 187		ps->type->destroy(ps);
 188		dm_put_path_selector(ps->type);
 189	}
 190
 191	free_pgpaths(&pg->pgpaths, ti);
 192	kfree(pg);
 193}
 194
 195static struct multipath *alloc_multipath(struct dm_target *ti)
 196{
 197	struct multipath *m;
 198
 199	m = kzalloc(sizeof(*m), GFP_KERNEL);
 200	if (m) {
 201		INIT_LIST_HEAD(&m->priority_groups);
 202		spin_lock_init(&m->lock);
 203		set_bit(MPATHF_QUEUE_IO, &m->flags);
 204		atomic_set(&m->nr_valid_paths, 0);
 205		atomic_set(&m->pg_init_in_progress, 0);
 206		atomic_set(&m->pg_init_count, 0);
 207		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 208		INIT_WORK(&m->trigger_event, trigger_event);
 209		init_waitqueue_head(&m->pg_init_wait);
 210		mutex_init(&m->work_mutex);
 211
 212		m->mpio_pool = NULL;
 213		m->queue_mode = DM_TYPE_NONE;
 214
 215		m->ti = ti;
 216		ti->private = m;
 
 
 217	}
 218
 219	return m;
 220}
 221
 222static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
 223{
 224	if (m->queue_mode == DM_TYPE_NONE) {
 225		/*
 226		 * Default to request-based.
 227		 */
 228		if (dm_use_blk_mq(dm_table_get_md(ti->table)))
 229			m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
 230		else
 231			m->queue_mode = DM_TYPE_REQUEST_BASED;
 232	}
 233
 234	if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
 235		unsigned min_ios = dm_get_reserved_rq_based_ios();
 236
 237		m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
 238		if (!m->mpio_pool)
 239			return -ENOMEM;
 240	}
 241	else if (m->queue_mode == DM_TYPE_BIO_BASED) {
 242		INIT_WORK(&m->process_queued_bios, process_queued_bios);
 243		/*
 244		 * bio-based doesn't support any direct scsi_dh management;
 245		 * it just discovers if a scsi_dh is attached.
 246		 */
 247		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 248	}
 249
 250	dm_table_set_type(ti->table, m->queue_mode);
 251
 
 
 
 
 
 
 
 
 
 
 252	return 0;
 253}
 254
 255static void free_multipath(struct multipath *m)
 256{
 257	struct priority_group *pg, *tmp;
 258
 259	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 260		list_del(&pg->list);
 261		free_priority_group(pg, m->ti);
 262	}
 263
 264	kfree(m->hw_handler_name);
 265	kfree(m->hw_handler_params);
 266	mempool_destroy(m->mpio_pool);
 267	kfree(m);
 268}
 269
 270static struct dm_mpath_io *get_mpio(union map_info *info)
 271{
 272	return info->ptr;
 273}
 274
 275static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
 276{
 277	struct dm_mpath_io *mpio;
 278
 279	if (!m->mpio_pool) {
 280		/* Use blk-mq pdu memory requested via per_io_data_size */
 281		mpio = get_mpio(info);
 282		memset(mpio, 0, sizeof(*mpio));
 283		return mpio;
 284	}
 285
 286	mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
 287	if (!mpio)
 288		return NULL;
 289
 290	memset(mpio, 0, sizeof(*mpio));
 291	info->ptr = mpio;
 292
 293	return mpio;
 294}
 295
 296static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
 297{
 298	/* Only needed for non blk-mq (.request_fn) multipath */
 299	if (m->mpio_pool) {
 300		struct dm_mpath_io *mpio = info->ptr;
 301
 302		info->ptr = NULL;
 303		mempool_free(mpio, m->mpio_pool);
 304	}
 305}
 306
 307static size_t multipath_per_bio_data_size(void)
 308{
 309	return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
 310}
 311
 312static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
 313{
 314	return dm_per_bio_data(bio, multipath_per_bio_data_size());
 315}
 316
 317static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
 318{
 319	/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
 320	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 321	void *bio_details = mpio + 1;
 322
 323	return bio_details;
 324}
 325
 326static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
 327					struct dm_bio_details **bio_details_p)
 328{
 329	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 330	struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
 
 
 
 
 
 331
 332	memset(mpio, 0, sizeof(*mpio));
 333	memset(bio_details, 0, sizeof(*bio_details));
 334	dm_bio_record(bio_details, bio);
 335
 336	if (mpio_p)
 337		*mpio_p = mpio;
 338	if (bio_details_p)
 339		*bio_details_p = bio_details;
 340}
 341
 342/*-----------------------------------------------
 
 343 * Path selection
 344 *-----------------------------------------------*/
 345
 346static int __pg_init_all_paths(struct multipath *m)
 347{
 348	struct pgpath *pgpath;
 349	unsigned long pg_init_delay = 0;
 350
 
 
 351	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 352		return 0;
 353
 354	atomic_inc(&m->pg_init_count);
 355	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 356
 357	/* Check here to reset pg_init_required */
 358	if (!m->current_pg)
 359		return 0;
 360
 361	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
 362		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 363						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 364	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 365		/* Skip failed paths */
 366		if (!pgpath->is_active)
 367			continue;
 368		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 369				       pg_init_delay))
 370			atomic_inc(&m->pg_init_in_progress);
 371	}
 372	return atomic_read(&m->pg_init_in_progress);
 373}
 374
 375static void pg_init_all_paths(struct multipath *m)
 376{
 
 377	unsigned long flags;
 378
 379	spin_lock_irqsave(&m->lock, flags);
 380	__pg_init_all_paths(m);
 381	spin_unlock_irqrestore(&m->lock, flags);
 
 
 382}
 383
 384static void __switch_pg(struct multipath *m, struct priority_group *pg)
 385{
 
 
 386	m->current_pg = pg;
 387
 388	/* Must we initialise the PG first, and queue I/O till it's ready? */
 389	if (m->hw_handler_name) {
 390		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 391		set_bit(MPATHF_QUEUE_IO, &m->flags);
 392	} else {
 393		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 394		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 395	}
 396
 397	atomic_set(&m->pg_init_count, 0);
 398}
 399
 400static struct pgpath *choose_path_in_pg(struct multipath *m,
 401					struct priority_group *pg,
 402					size_t nr_bytes)
 403{
 404	unsigned long flags;
 405	struct dm_path *path;
 406	struct pgpath *pgpath;
 407
 408	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 409	if (!path)
 410		return ERR_PTR(-ENXIO);
 411
 412	pgpath = path_to_pgpath(path);
 413
 414	if (unlikely(lockless_dereference(m->current_pg) != pg)) {
 415		/* Only update current_pgpath if pg changed */
 416		spin_lock_irqsave(&m->lock, flags);
 417		m->current_pgpath = pgpath;
 418		__switch_pg(m, pg);
 419		spin_unlock_irqrestore(&m->lock, flags);
 420	}
 421
 422	return pgpath;
 423}
 424
 425static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 426{
 427	unsigned long flags;
 428	struct priority_group *pg;
 429	struct pgpath *pgpath;
 430	unsigned bypassed = 1;
 431
 432	if (!atomic_read(&m->nr_valid_paths)) {
 
 433		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 
 434		goto failed;
 435	}
 436
 437	/* Were we instructed to switch PG? */
 438	if (lockless_dereference(m->next_pg)) {
 439		spin_lock_irqsave(&m->lock, flags);
 440		pg = m->next_pg;
 441		if (!pg) {
 442			spin_unlock_irqrestore(&m->lock, flags);
 443			goto check_current_pg;
 444		}
 445		m->next_pg = NULL;
 446		spin_unlock_irqrestore(&m->lock, flags);
 447		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 448		if (!IS_ERR_OR_NULL(pgpath))
 449			return pgpath;
 450	}
 451
 452	/* Don't change PG until it has no remaining paths */
 453check_current_pg:
 454	pg = lockless_dereference(m->current_pg);
 455	if (pg) {
 456		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 457		if (!IS_ERR_OR_NULL(pgpath))
 458			return pgpath;
 459	}
 460
 461	/*
 462	 * Loop through priority groups until we find a valid path.
 463	 * First time we skip PGs marked 'bypassed'.
 464	 * Second time we only try the ones we skipped, but set
 465	 * pg_init_delay_retry so we do not hammer controllers.
 466	 */
 467	do {
 468		list_for_each_entry(pg, &m->priority_groups, list) {
 469			if (pg->bypassed == !!bypassed)
 470				continue;
 471			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 472			if (!IS_ERR_OR_NULL(pgpath)) {
 473				if (!bypassed)
 
 474					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 
 
 475				return pgpath;
 476			}
 477		}
 478	} while (bypassed--);
 479
 480failed:
 481	spin_lock_irqsave(&m->lock, flags);
 482	m->current_pgpath = NULL;
 483	m->current_pg = NULL;
 484	spin_unlock_irqrestore(&m->lock, flags);
 485
 486	return NULL;
 487}
 488
 489/*
 
 
 
 
 
 
 
 
 
 
 
 
 490 * Check whether bios must be queued in the device-mapper core rather
 491 * than here in the target.
 492 *
 493 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
 494 * same value then we are not between multipath_presuspend()
 495 * and multipath_resume() calls and we have no need to check
 496 * for the DMF_NOFLUSH_SUSPENDING flag.
 497 */
 498static bool __must_push_back(struct multipath *m)
 499{
 500	return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
 501		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
 502		dm_noflush_suspending(m->ti));
 503}
 504
 505static bool must_push_back_rq(struct multipath *m)
 506{
 507	bool r;
 508	unsigned long flags;
 
 509
 510	spin_lock_irqsave(&m->lock, flags);
 511	r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
 512	     __must_push_back(m));
 513	spin_unlock_irqrestore(&m->lock, flags);
 514
 515	return r;
 516}
 517
 518static bool must_push_back_bio(struct multipath *m)
 519{
 520	bool r;
 521	unsigned long flags;
 522
 523	spin_lock_irqsave(&m->lock, flags);
 524	r = __must_push_back(m);
 525	spin_unlock_irqrestore(&m->lock, flags);
 526
 527	return r;
 528}
 529
 530/*
 531 * Map cloned requests (request-based multipath)
 532 */
 533static int __multipath_map(struct dm_target *ti, struct request *clone,
 534			   union map_info *map_context,
 535			   struct request *rq, struct request **__clone)
 536{
 537	struct multipath *m = ti->private;
 538	int r = DM_MAPIO_REQUEUE;
 539	size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
 540	struct pgpath *pgpath;
 541	struct block_device *bdev;
 542	struct dm_mpath_io *mpio;
 
 
 543
 544	/* Do we need to select a new pgpath? */
 545	pgpath = lockless_dereference(m->current_pgpath);
 546	if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
 547		pgpath = choose_pgpath(m, nr_bytes);
 548
 549	if (!pgpath) {
 550		if (must_push_back_rq(m))
 551			return DM_MAPIO_DELAY_REQUEUE;
 552		return -EIO;	/* Failed */
 553	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
 554		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
 
 555		pg_init_all_paths(m);
 556		return r;
 557	}
 558
 559	mpio = set_mpio(m, map_context);
 560	if (!mpio)
 561		/* ENOMEM, requeue */
 562		return r;
 563
 564	mpio->pgpath = pgpath;
 565	mpio->nr_bytes = nr_bytes;
 566
 567	bdev = pgpath->path.dev->bdev;
 
 
 
 
 
 
 
 
 
 
 568
 569	if (clone) {
 570		/*
 571		 * Old request-based interface: allocated clone is passed in.
 572		 * Used by: .request_fn stacked on .request_fn path(s).
 
 
 
 573		 */
 574		clone->q = bdev_get_queue(bdev);
 575		clone->rq_disk = bdev->bd_disk;
 576		clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 577	} else {
 578		/*
 579		 * blk-mq request-based interface; used by both:
 580		 * .request_fn stacked on blk-mq path(s) and
 581		 * blk-mq stacked on blk-mq path(s).
 582		 */
 583		clone = blk_mq_alloc_request(bdev_get_queue(bdev),
 584					     rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
 585		if (IS_ERR(clone)) {
 586			/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
 587			clear_request_fn_mpio(m, map_context);
 588			return r;
 589		}
 590		clone->bio = clone->biotail = NULL;
 591		clone->rq_disk = bdev->bd_disk;
 592		clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 593		*__clone = clone;
 594	}
 
 
 
 595
 596	if (pgpath->pg->ps.type->start_io)
 597		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 598					      &pgpath->path,
 599					      nr_bytes);
 600	return DM_MAPIO_REMAPPED;
 601}
 602
 603static int multipath_map(struct dm_target *ti, struct request *clone,
 604			 union map_info *map_context)
 605{
 606	return __multipath_map(ti, clone, map_context, NULL, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 607}
 608
 609static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 610				   union map_info *map_context,
 611				   struct request **clone)
 
 
 612{
 613	return __multipath_map(ti, NULL, map_context, rq, clone);
 
 
 
 614}
 615
 616static void multipath_release_clone(struct request *clone)
 617{
 618	blk_mq_free_request(clone);
 
 
 
 
 619}
 620
 621/*
 622 * Map cloned bios (bio-based multipath)
 623 */
 624static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
 625{
 626	size_t nr_bytes = bio->bi_iter.bi_size;
 627	struct pgpath *pgpath;
 628	unsigned long flags;
 629	bool queue_io;
 630
 631	/* Do we need to select a new pgpath? */
 632	pgpath = lockless_dereference(m->current_pgpath);
 633	queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
 634	if (!pgpath || !queue_io)
 635		pgpath = choose_pgpath(m, nr_bytes);
 636
 637	if ((pgpath && queue_io) ||
 638	    (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
 639		/* Queue for the daemon to resubmit */
 640		spin_lock_irqsave(&m->lock, flags);
 641		bio_list_add(&m->queued_bios, bio);
 
 
 
 642		spin_unlock_irqrestore(&m->lock, flags);
 643		/* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
 644		if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
 645			pg_init_all_paths(m);
 646		else if (!queue_io)
 647			queue_work(kmultipathd, &m->process_queued_bios);
 
 
 
 
 
 
 
 
 
 
 
 
 648		return DM_MAPIO_SUBMITTED;
 649	}
 650
 651	if (!pgpath) {
 652		if (!must_push_back_bio(m))
 653			return -EIO;
 654		return DM_MAPIO_REQUEUE;
 
 655	}
 656
 657	mpio->pgpath = pgpath;
 658	mpio->nr_bytes = nr_bytes;
 659
 660	bio->bi_error = 0;
 661	bio->bi_bdev = pgpath->path.dev->bdev;
 
 
 
 662	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 663
 664	if (pgpath->pg->ps.type->start_io)
 665		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 666					      &pgpath->path,
 667					      nr_bytes);
 668	return DM_MAPIO_REMAPPED;
 669}
 670
 671static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
 672{
 673	struct multipath *m = ti->private;
 674	struct dm_mpath_io *mpio = NULL;
 675
 676	multipath_init_per_bio_data(bio, &mpio, NULL);
 677
 678	return __multipath_map_bio(m, bio, mpio);
 679}
 680
 681static void process_queued_io_list(struct multipath *m)
 682{
 683	if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
 684		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 685	else if (m->queue_mode == DM_TYPE_BIO_BASED)
 686		queue_work(kmultipathd, &m->process_queued_bios);
 687}
 688
 689static void process_queued_bios(struct work_struct *work)
 690{
 691	int r;
 692	unsigned long flags;
 693	struct bio *bio;
 694	struct bio_list bios;
 695	struct blk_plug plug;
 696	struct multipath *m =
 697		container_of(work, struct multipath, process_queued_bios);
 698
 699	bio_list_init(&bios);
 700
 701	spin_lock_irqsave(&m->lock, flags);
 702
 703	if (bio_list_empty(&m->queued_bios)) {
 704		spin_unlock_irqrestore(&m->lock, flags);
 705		return;
 706	}
 707
 708	bio_list_merge(&bios, &m->queued_bios);
 709	bio_list_init(&m->queued_bios);
 710
 711	spin_unlock_irqrestore(&m->lock, flags);
 712
 713	blk_start_plug(&plug);
 714	while ((bio = bio_list_pop(&bios))) {
 715		r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
 716		if (r < 0 || r == DM_MAPIO_REQUEUE) {
 717			bio->bi_error = r;
 
 
 
 
 718			bio_endio(bio);
 719		} else if (r == DM_MAPIO_REMAPPED)
 720			generic_make_request(bio);
 
 
 
 
 
 
 
 
 
 
 
 721	}
 722	blk_finish_plug(&plug);
 723}
 724
 725/*
 726 * If we run out of usable paths, should we queue I/O or error it?
 727 */
 728static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 729			    bool save_old_value)
 730{
 731	unsigned long flags;
 
 
 
 
 
 732
 733	spin_lock_irqsave(&m->lock, flags);
 734
 
 
 
 735	if (save_old_value) {
 736		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
 737			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 738		else
 739			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 740	} else {
 741		if (queue_if_no_path)
 742			set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 743		else
 744			clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
 745	}
 746	if (queue_if_no_path)
 747		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 748	else
 749		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 
 
 750
 751	spin_unlock_irqrestore(&m->lock, flags);
 752
 753	if (!queue_if_no_path) {
 754		dm_table_run_md_queue_async(m->ti->table);
 755		process_queued_io_list(m);
 756	}
 757
 758	return 0;
 759}
 760
 761/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762 * An event is triggered whenever a path is taken out of use.
 763 * Includes path failure and PG bypass.
 764 */
 765static void trigger_event(struct work_struct *work)
 766{
 767	struct multipath *m =
 768		container_of(work, struct multipath, trigger_event);
 769
 770	dm_table_event(m->ti->table);
 771}
 772
 773/*-----------------------------------------------------------------
 
 774 * Constructor/argument parsing:
 775 * <#multipath feature args> [<arg>]*
 776 * <#hw_handler args> [hw_handler [<arg>]*]
 777 * <#priority groups>
 778 * <initial priority group>
 779 *     [<selector> <#selector args> [<arg>]*
 780 *      <#paths> <#per-path selector args>
 781 *         [<path> [<arg>]* ]+ ]+
 782 *---------------------------------------------------------------*/
 
 783static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 784			       struct dm_target *ti)
 785{
 786	int r;
 787	struct path_selector_type *pst;
 788	unsigned ps_argc;
 789
 790	static struct dm_arg _args[] = {
 791		{0, 1024, "invalid number of path selector args"},
 792	};
 793
 794	pst = dm_get_path_selector(dm_shift_arg(as));
 795	if (!pst) {
 796		ti->error = "unknown path selector type";
 797		return -EINVAL;
 798	}
 799
 800	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 801	if (r) {
 802		dm_put_path_selector(pst);
 803		return -EINVAL;
 804	}
 805
 806	r = pst->create(&pg->ps, ps_argc, as->argv);
 807	if (r) {
 808		dm_put_path_selector(pst);
 809		ti->error = "path selector constructor failed";
 810		return r;
 811	}
 812
 813	pg->ps.type = pst;
 814	dm_consume_args(as, ps_argc);
 815
 816	return 0;
 817}
 818
 819static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 820			       struct dm_target *ti)
 821{
 
 822	int r;
 823	struct pgpath *p;
 824	struct multipath *m = ti->private;
 825	struct request_queue *q = NULL;
 826	const char *attached_handler_name;
 827
 828	/* we need at least a path arg */
 829	if (as->argc < 1) {
 830		ti->error = "no device given";
 831		return ERR_PTR(-EINVAL);
 832	}
 833
 834	p = alloc_pgpath();
 835	if (!p)
 836		return ERR_PTR(-ENOMEM);
 837
 838	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 839			  &p->path.dev);
 840	if (r) {
 841		ti->error = "error getting device";
 842		goto bad;
 843	}
 844
 845	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
 846		q = bdev_get_queue(p->path.dev->bdev);
 847
 848	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 849retain:
 850		attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 851		if (attached_handler_name) {
 852			/*
 853			 * Clear any hw_handler_params associated with a
 854			 * handler that isn't already attached.
 855			 */
 856			if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
 857				kfree(m->hw_handler_params);
 858				m->hw_handler_params = NULL;
 859			}
 860
 861			/*
 862			 * Reset hw_handler_name to match the attached handler
 863			 *
 864			 * NB. This modifies the table line to show the actual
 865			 * handler instead of the original table passed in.
 866			 */
 867			kfree(m->hw_handler_name);
 868			m->hw_handler_name = attached_handler_name;
 
 869		}
 870	}
 871
 872	if (m->hw_handler_name) {
 873		r = scsi_dh_attach(q, m->hw_handler_name);
 874		if (r == -EBUSY) {
 875			char b[BDEVNAME_SIZE];
 876
 877			printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
 878				bdevname(p->path.dev->bdev, b));
 879			goto retain;
 880		}
 881		if (r < 0) {
 882			ti->error = "error attaching hardware handler";
 883			dm_put_device(ti, p->path.dev);
 884			goto bad;
 885		}
 886
 887		if (m->hw_handler_params) {
 888			r = scsi_dh_set_params(q, m->hw_handler_params);
 889			if (r < 0) {
 890				ti->error = "unable to set hardware "
 891							"handler parameters";
 892				dm_put_device(ti, p->path.dev);
 893				goto bad;
 894			}
 895		}
 896	}
 897
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 898	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 899	if (r) {
 900		dm_put_device(ti, p->path.dev);
 901		goto bad;
 902	}
 903
 904	return p;
 905
 906 bad:
 907	free_pgpath(p);
 908	return ERR_PTR(r);
 909}
 910
 911static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 912						   struct multipath *m)
 913{
 914	static struct dm_arg _args[] = {
 915		{1, 1024, "invalid number of paths"},
 916		{0, 1024, "invalid number of selector args"}
 917	};
 918
 919	int r;
 920	unsigned i, nr_selector_args, nr_args;
 921	struct priority_group *pg;
 922	struct dm_target *ti = m->ti;
 923
 924	if (as->argc < 2) {
 925		as->argc = 0;
 926		ti->error = "not enough priority group arguments";
 927		return ERR_PTR(-EINVAL);
 928	}
 929
 930	pg = alloc_priority_group();
 931	if (!pg) {
 932		ti->error = "couldn't allocate priority group";
 933		return ERR_PTR(-ENOMEM);
 934	}
 935	pg->m = m;
 936
 937	r = parse_path_selector(as, pg, ti);
 938	if (r)
 939		goto bad;
 940
 941	/*
 942	 * read the paths
 943	 */
 944	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
 945	if (r)
 946		goto bad;
 947
 948	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
 949	if (r)
 950		goto bad;
 951
 952	nr_args = 1 + nr_selector_args;
 953	for (i = 0; i < pg->nr_pgpaths; i++) {
 954		struct pgpath *pgpath;
 955		struct dm_arg_set path_args;
 956
 957		if (as->argc < nr_args) {
 958			ti->error = "not enough path parameters";
 959			r = -EINVAL;
 960			goto bad;
 961		}
 962
 963		path_args.argc = nr_args;
 964		path_args.argv = as->argv;
 965
 966		pgpath = parse_path(&path_args, &pg->ps, ti);
 967		if (IS_ERR(pgpath)) {
 968			r = PTR_ERR(pgpath);
 969			goto bad;
 970		}
 971
 972		pgpath->pg = pg;
 973		list_add_tail(&pgpath->list, &pg->pgpaths);
 974		dm_consume_args(as, nr_args);
 975	}
 976
 977	return pg;
 978
 979 bad:
 980	free_priority_group(pg, ti);
 981	return ERR_PTR(r);
 982}
 983
 984static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 985{
 986	unsigned hw_argc;
 987	int ret;
 988	struct dm_target *ti = m->ti;
 989
 990	static struct dm_arg _args[] = {
 991		{0, 1024, "invalid number of hardware handler args"},
 992	};
 993
 994	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
 995		return -EINVAL;
 996
 997	if (!hw_argc)
 998		return 0;
 999
1000	if (m->queue_mode == DM_TYPE_BIO_BASED) {
1001		dm_consume_args(as, hw_argc);
1002		DMERR("bio-based multipath doesn't allow hardware handler args");
1003		return 0;
1004	}
1005
1006	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1007	if (!m->hw_handler_name)
1008		return -EINVAL;
1009
1010	if (hw_argc > 1) {
1011		char *p;
1012		int i, j, len = 4;
1013
1014		for (i = 0; i <= hw_argc - 2; i++)
1015			len += strlen(as->argv[i]) + 1;
1016		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1017		if (!p) {
1018			ti->error = "memory allocation failed";
1019			ret = -ENOMEM;
1020			goto fail;
1021		}
1022		j = sprintf(p, "%d", hw_argc - 1);
1023		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1024			j = sprintf(p, "%s", as->argv[i]);
1025	}
1026	dm_consume_args(as, hw_argc - 1);
1027
1028	return 0;
1029fail:
1030	kfree(m->hw_handler_name);
1031	m->hw_handler_name = NULL;
1032	return ret;
1033}
1034
1035static int parse_features(struct dm_arg_set *as, struct multipath *m)
1036{
1037	int r;
1038	unsigned argc;
1039	struct dm_target *ti = m->ti;
1040	const char *arg_name;
1041
1042	static struct dm_arg _args[] = {
1043		{0, 8, "invalid number of feature args"},
1044		{1, 50, "pg_init_retries must be between 1 and 50"},
1045		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1046	};
1047
1048	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1049	if (r)
1050		return -EINVAL;
1051
1052	if (!argc)
1053		return 0;
1054
1055	do {
1056		arg_name = dm_shift_arg(as);
1057		argc--;
1058
1059		if (!strcasecmp(arg_name, "queue_if_no_path")) {
1060			r = queue_if_no_path(m, true, false);
1061			continue;
1062		}
1063
1064		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1065			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1066			continue;
1067		}
1068
1069		if (!strcasecmp(arg_name, "pg_init_retries") &&
1070		    (argc >= 1)) {
1071			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1072			argc--;
1073			continue;
1074		}
1075
1076		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1077		    (argc >= 1)) {
1078			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1079			argc--;
1080			continue;
1081		}
1082
1083		if (!strcasecmp(arg_name, "queue_mode") &&
1084		    (argc >= 1)) {
1085			const char *queue_mode_name = dm_shift_arg(as);
1086
1087			if (!strcasecmp(queue_mode_name, "bio"))
1088				m->queue_mode = DM_TYPE_BIO_BASED;
1089			else if (!strcasecmp(queue_mode_name, "rq"))
 
1090				m->queue_mode = DM_TYPE_REQUEST_BASED;
1091			else if (!strcasecmp(queue_mode_name, "mq"))
1092				m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1093			else {
1094				ti->error = "Unknown 'queue_mode' requested";
1095				r = -EINVAL;
1096			}
1097			argc--;
1098			continue;
1099		}
1100
1101		ti->error = "Unrecognised multipath feature request";
1102		r = -EINVAL;
1103	} while (argc && !r);
1104
1105	return r;
1106}
1107
1108static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1109{
1110	/* target arguments */
1111	static struct dm_arg _args[] = {
1112		{0, 1024, "invalid number of priority groups"},
1113		{0, 1024, "invalid initial priority group number"},
1114	};
1115
1116	int r;
1117	struct multipath *m;
1118	struct dm_arg_set as;
1119	unsigned pg_count = 0;
1120	unsigned next_pg_num;
 
1121
1122	as.argc = argc;
1123	as.argv = argv;
1124
1125	m = alloc_multipath(ti);
1126	if (!m) {
1127		ti->error = "can't allocate multipath";
1128		return -EINVAL;
1129	}
1130
1131	r = parse_features(&as, m);
1132	if (r)
1133		goto bad;
1134
1135	r = alloc_multipath_stage2(ti, m);
1136	if (r)
1137		goto bad;
1138
1139	r = parse_hw_handler(&as, m);
1140	if (r)
1141		goto bad;
1142
1143	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1144	if (r)
1145		goto bad;
1146
1147	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1148	if (r)
1149		goto bad;
1150
1151	if ((!m->nr_priority_groups && next_pg_num) ||
1152	    (m->nr_priority_groups && !next_pg_num)) {
1153		ti->error = "invalid initial priority group";
1154		r = -EINVAL;
1155		goto bad;
1156	}
1157
1158	/* parse the priority groups */
1159	while (as.argc) {
1160		struct priority_group *pg;
1161		unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1162
1163		pg = parse_priority_group(&as, m);
1164		if (IS_ERR(pg)) {
1165			r = PTR_ERR(pg);
1166			goto bad;
1167		}
1168
1169		nr_valid_paths += pg->nr_pgpaths;
1170		atomic_set(&m->nr_valid_paths, nr_valid_paths);
1171
1172		list_add_tail(&pg->list, &m->priority_groups);
1173		pg_count++;
1174		pg->pg_num = pg_count;
1175		if (!--next_pg_num)
1176			m->next_pg = pg;
1177	}
1178
1179	if (pg_count != m->nr_priority_groups) {
1180		ti->error = "priority group count mismatch";
1181		r = -EINVAL;
1182		goto bad;
1183	}
1184
 
 
 
 
1185	ti->num_flush_bios = 1;
1186	ti->num_discard_bios = 1;
1187	ti->num_write_same_bios = 1;
1188	if (m->queue_mode == DM_TYPE_BIO_BASED)
1189		ti->per_io_data_size = multipath_per_bio_data_size();
1190	else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
1191		ti->per_io_data_size = sizeof(struct dm_mpath_io);
1192
1193	return 0;
1194
1195 bad:
1196	free_multipath(m);
1197	return r;
1198}
1199
1200static void multipath_wait_for_pg_init_completion(struct multipath *m)
1201{
1202	DEFINE_WAIT(wait);
1203
1204	while (1) {
1205		prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1206
1207		if (!atomic_read(&m->pg_init_in_progress))
1208			break;
1209
1210		io_schedule();
1211	}
1212	finish_wait(&m->pg_init_wait, &wait);
1213}
1214
1215static void flush_multipath_work(struct multipath *m)
1216{
1217	set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1218	smp_mb__after_atomic();
 
 
 
1219
1220	flush_workqueue(kmpath_handlerd);
1221	multipath_wait_for_pg_init_completion(m);
1222	flush_workqueue(kmultipathd);
 
 
 
 
 
 
 
 
 
 
 
 
 
1223	flush_work(&m->trigger_event);
1224
1225	clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1226	smp_mb__after_atomic();
1227}
1228
1229static void multipath_dtr(struct dm_target *ti)
1230{
1231	struct multipath *m = ti->private;
1232
 
1233	flush_multipath_work(m);
1234	free_multipath(m);
1235}
1236
1237/*
1238 * Take a path out of use.
1239 */
1240static int fail_path(struct pgpath *pgpath)
1241{
1242	unsigned long flags;
1243	struct multipath *m = pgpath->pg->m;
1244
1245	spin_lock_irqsave(&m->lock, flags);
1246
1247	if (!pgpath->is_active)
1248		goto out;
1249
1250	DMWARN("Failing path %s.", pgpath->path.dev->name);
 
 
1251
1252	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1253	pgpath->is_active = false;
1254	pgpath->fail_count++;
1255
1256	atomic_dec(&m->nr_valid_paths);
1257
1258	if (pgpath == m->current_pgpath)
1259		m->current_pgpath = NULL;
1260
1261	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1262		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1263
1264	schedule_work(&m->trigger_event);
 
 
1265
1266out:
1267	spin_unlock_irqrestore(&m->lock, flags);
1268
1269	return 0;
1270}
1271
1272/*
1273 * Reinstate a previously-failed path
1274 */
1275static int reinstate_path(struct pgpath *pgpath)
1276{
1277	int r = 0, run_queue = 0;
1278	unsigned long flags;
1279	struct multipath *m = pgpath->pg->m;
1280	unsigned nr_valid_paths;
1281
1282	spin_lock_irqsave(&m->lock, flags);
1283
1284	if (pgpath->is_active)
1285		goto out;
1286
1287	DMWARN("Reinstating path %s.", pgpath->path.dev->name);
 
 
1288
1289	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1290	if (r)
1291		goto out;
1292
1293	pgpath->is_active = true;
1294
1295	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1296	if (nr_valid_paths == 1) {
1297		m->current_pgpath = NULL;
1298		run_queue = 1;
1299	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1300		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1301			atomic_inc(&m->pg_init_in_progress);
1302	}
1303
1304	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1305		       pgpath->path.dev->name, nr_valid_paths);
1306
1307	schedule_work(&m->trigger_event);
1308
1309out:
1310	spin_unlock_irqrestore(&m->lock, flags);
1311	if (run_queue) {
1312		dm_table_run_md_queue_async(m->ti->table);
1313		process_queued_io_list(m);
1314	}
1315
 
 
 
1316	return r;
1317}
1318
1319/*
1320 * Fail or reinstate all paths that match the provided struct dm_dev.
1321 */
1322static int action_dev(struct multipath *m, struct dm_dev *dev,
1323		      action_fn action)
1324{
1325	int r = -EINVAL;
1326	struct pgpath *pgpath;
1327	struct priority_group *pg;
1328
1329	list_for_each_entry(pg, &m->priority_groups, list) {
1330		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1331			if (pgpath->path.dev == dev)
1332				r = action(pgpath);
1333		}
1334	}
1335
1336	return r;
1337}
1338
1339/*
1340 * Temporarily try to avoid having to use the specified PG
1341 */
1342static void bypass_pg(struct multipath *m, struct priority_group *pg,
1343		      bool bypassed)
1344{
1345	unsigned long flags;
1346
1347	spin_lock_irqsave(&m->lock, flags);
1348
1349	pg->bypassed = bypassed;
1350	m->current_pgpath = NULL;
1351	m->current_pg = NULL;
1352
1353	spin_unlock_irqrestore(&m->lock, flags);
1354
1355	schedule_work(&m->trigger_event);
1356}
1357
1358/*
1359 * Switch to using the specified PG from the next I/O that gets mapped
1360 */
1361static int switch_pg_num(struct multipath *m, const char *pgstr)
1362{
1363	struct priority_group *pg;
1364	unsigned pgnum;
1365	unsigned long flags;
1366	char dummy;
1367
1368	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1369	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1370		DMWARN("invalid PG number supplied to switch_pg_num");
1371		return -EINVAL;
1372	}
1373
1374	spin_lock_irqsave(&m->lock, flags);
1375	list_for_each_entry(pg, &m->priority_groups, list) {
1376		pg->bypassed = false;
1377		if (--pgnum)
1378			continue;
1379
1380		m->current_pgpath = NULL;
1381		m->current_pg = NULL;
1382		m->next_pg = pg;
1383	}
1384	spin_unlock_irqrestore(&m->lock, flags);
1385
1386	schedule_work(&m->trigger_event);
1387	return 0;
1388}
1389
1390/*
1391 * Set/clear bypassed status of a PG.
1392 * PGs are numbered upwards from 1 in the order they were declared.
1393 */
1394static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1395{
1396	struct priority_group *pg;
1397	unsigned pgnum;
1398	char dummy;
1399
1400	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1401	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1402		DMWARN("invalid PG number supplied to bypass_pg");
1403		return -EINVAL;
1404	}
1405
1406	list_for_each_entry(pg, &m->priority_groups, list) {
1407		if (!--pgnum)
1408			break;
1409	}
1410
1411	bypass_pg(m, pg, bypassed);
1412	return 0;
1413}
1414
1415/*
1416 * Should we retry pg_init immediately?
1417 */
1418static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1419{
1420	unsigned long flags;
1421	bool limit_reached = false;
1422
1423	spin_lock_irqsave(&m->lock, flags);
1424
1425	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1426	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1427		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1428	else
1429		limit_reached = true;
1430
1431	spin_unlock_irqrestore(&m->lock, flags);
1432
1433	return limit_reached;
1434}
1435
1436static void pg_init_done(void *data, int errors)
1437{
1438	struct pgpath *pgpath = data;
1439	struct priority_group *pg = pgpath->pg;
1440	struct multipath *m = pg->m;
1441	unsigned long flags;
1442	bool delay_retry = false;
1443
1444	/* device or driver problems */
1445	switch (errors) {
1446	case SCSI_DH_OK:
1447		break;
1448	case SCSI_DH_NOSYS:
1449		if (!m->hw_handler_name) {
1450			errors = 0;
1451			break;
1452		}
1453		DMERR("Could not failover the device: Handler scsi_dh_%s "
1454		      "Error %d.", m->hw_handler_name, errors);
1455		/*
1456		 * Fail path for now, so we do not ping pong
1457		 */
1458		fail_path(pgpath);
1459		break;
1460	case SCSI_DH_DEV_TEMP_BUSY:
1461		/*
1462		 * Probably doing something like FW upgrade on the
1463		 * controller so try the other pg.
1464		 */
1465		bypass_pg(m, pg, true);
1466		break;
1467	case SCSI_DH_RETRY:
1468		/* Wait before retrying. */
1469		delay_retry = 1;
 
1470	case SCSI_DH_IMM_RETRY:
1471	case SCSI_DH_RES_TEMP_UNAVAIL:
1472		if (pg_init_limit_reached(m, pgpath))
1473			fail_path(pgpath);
1474		errors = 0;
1475		break;
1476	case SCSI_DH_DEV_OFFLINED:
1477	default:
1478		/*
1479		 * We probably do not want to fail the path for a device
1480		 * error, but this is what the old dm did. In future
1481		 * patches we can do more advanced handling.
1482		 */
1483		fail_path(pgpath);
1484	}
1485
1486	spin_lock_irqsave(&m->lock, flags);
1487	if (errors) {
1488		if (pgpath == m->current_pgpath) {
1489			DMERR("Could not failover device. Error %d.", errors);
1490			m->current_pgpath = NULL;
1491			m->current_pg = NULL;
1492		}
1493	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1494		pg->bypassed = false;
1495
1496	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1497		/* Activations of other paths are still on going */
1498		goto out;
1499
1500	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1501		if (delay_retry)
1502			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1503		else
1504			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1505
1506		if (__pg_init_all_paths(m))
1507			goto out;
1508	}
1509	clear_bit(MPATHF_QUEUE_IO, &m->flags);
1510
1511	process_queued_io_list(m);
1512
1513	/*
1514	 * Wake up any thread waiting to suspend.
1515	 */
1516	wake_up(&m->pg_init_wait);
1517
1518out:
1519	spin_unlock_irqrestore(&m->lock, flags);
1520}
1521
1522static void activate_path(struct work_struct *work)
1523{
1524	struct pgpath *pgpath =
1525		container_of(work, struct pgpath, activate_path.work);
1526	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1527
1528	if (pgpath->is_active && !blk_queue_dying(q))
1529		scsi_dh_activate(q, pg_init_done, pgpath);
1530	else
1531		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1532}
1533
1534static int noretry_error(int error)
1535{
1536	switch (error) {
1537	case -EBADE:
1538		/*
1539		 * EBADE signals an reservation conflict.
1540		 * We shouldn't fail the path here as we can communicate with
1541		 * the target.  We should failover to the next path, but in
1542		 * doing so we might be causing a ping-pong between paths.
1543		 * So just return the reservation conflict error.
1544		 */
1545	case -EOPNOTSUPP:
1546	case -EREMOTEIO:
1547	case -EILSEQ:
1548	case -ENODATA:
1549	case -ENOSPC:
1550		return 1;
1551	}
1552
1553	/* Anything else could be a path failure, so should be retried */
1554	return 0;
1555}
1556
1557/*
1558 * end_io handling
1559 */
1560static int do_end_io(struct multipath *m, struct request *clone,
1561		     int error, struct dm_mpath_io *mpio)
1562{
 
 
 
 
1563	/*
1564	 * We don't queue any clone request inside the multipath target
1565	 * during end I/O handling, since those clone requests don't have
1566	 * bio clones.  If we queue them inside the multipath target,
1567	 * we need to make bio clones, that requires memory allocation.
1568	 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1569	 *  don't have bio clones.)
1570	 * Instead of queueing the clone request here, we queue the original
1571	 * request into dm core, which will remake a clone request and
1572	 * clone bios for it and resubmit it later.
1573	 */
1574	int r = DM_ENDIO_REQUEUE;
 
1575
1576	if (!error && !clone->errors)
1577		return 0;	/* I/O complete */
 
 
1578
1579	if (noretry_error(error))
1580		return error;
1581
1582	if (mpio->pgpath)
1583		fail_path(mpio->pgpath);
1584
1585	if (!atomic_read(&m->nr_valid_paths)) {
1586		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1587			if (!must_push_back_rq(m))
1588				r = -EIO;
 
 
1589		}
1590	}
1591
1592	return r;
1593}
1594
1595static int multipath_end_io(struct dm_target *ti, struct request *clone,
1596			    int error, union map_info *map_context)
1597{
1598	struct multipath *m = ti->private;
1599	struct dm_mpath_io *mpio = get_mpio(map_context);
1600	struct pgpath *pgpath;
1601	struct path_selector *ps;
1602	int r;
1603
1604	BUG_ON(!mpio);
1605
1606	r = do_end_io(m, clone, error, mpio);
1607	pgpath = mpio->pgpath;
1608	if (pgpath) {
1609		ps = &pgpath->pg->ps;
1610		if (ps->type->end_io)
1611			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
 
1612	}
1613	clear_request_fn_mpio(m, map_context);
1614
1615	return r;
1616}
1617
1618static int do_end_io_bio(struct multipath *m, struct bio *clone,
1619			 int error, struct dm_mpath_io *mpio)
1620{
 
 
 
1621	unsigned long flags;
 
1622
1623	if (!error)
1624		return 0;	/* I/O complete */
1625
1626	if (noretry_error(error))
1627		return error;
1628
1629	if (mpio->pgpath)
1630		fail_path(mpio->pgpath);
1631
1632	if (!atomic_read(&m->nr_valid_paths)) {
 
1633		if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1634			if (!must_push_back_bio(m))
1635				return -EIO;
1636			return DM_ENDIO_REQUEUE;
 
 
 
 
 
1637		}
 
1638	}
1639
1640	/* Queue for the daemon to resubmit */
1641	dm_bio_restore(get_bio_details_from_bio(clone), clone);
 
 
 
1642
1643	spin_lock_irqsave(&m->lock, flags);
1644	bio_list_add(&m->queued_bios, clone);
1645	spin_unlock_irqrestore(&m->lock, flags);
1646	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1647		queue_work(kmultipathd, &m->process_queued_bios);
1648
1649	return DM_ENDIO_INCOMPLETE;
1650}
1651
1652static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
1653{
1654	struct multipath *m = ti->private;
1655	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1656	struct pgpath *pgpath;
1657	struct path_selector *ps;
1658	int r;
1659
1660	BUG_ON(!mpio);
1661
1662	r = do_end_io_bio(m, clone, error, mpio);
1663	pgpath = mpio->pgpath;
1664	if (pgpath) {
1665		ps = &pgpath->pg->ps;
1666		if (ps->type->end_io)
1667			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
 
 
1668	}
1669
1670	return r;
1671}
1672
1673/*
1674 * Suspend can't complete until all the I/O is processed so if
1675 * the last path fails we must error any remaining I/O.
1676 * Note that if the freeze_bdev fails while suspending, the
1677 * queue_if_no_path state is lost - userspace should reset it.
 
1678 */
1679static void multipath_presuspend(struct dm_target *ti)
1680{
1681	struct multipath *m = ti->private;
1682
1683	queue_if_no_path(m, false, true);
 
 
1684}
1685
1686static void multipath_postsuspend(struct dm_target *ti)
1687{
1688	struct multipath *m = ti->private;
1689
1690	mutex_lock(&m->work_mutex);
1691	flush_multipath_work(m);
1692	mutex_unlock(&m->work_mutex);
1693}
1694
1695/*
1696 * Restore the queue_if_no_path setting.
1697 */
1698static void multipath_resume(struct dm_target *ti)
1699{
1700	struct multipath *m = ti->private;
1701	unsigned long flags;
1702
1703	spin_lock_irqsave(&m->lock, flags);
1704	if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
1705		set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1706	else
1707		clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
 
 
 
 
 
 
1708	spin_unlock_irqrestore(&m->lock, flags);
1709}
1710
1711/*
1712 * Info output has the following format:
1713 * num_multipath_feature_args [multipath_feature_args]*
1714 * num_handler_status_args [handler_status_args]*
1715 * num_groups init_group_number
1716 *            [A|D|E num_ps_status_args [ps_status_args]*
1717 *             num_paths num_selector_args
1718 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1719 *
1720 * Table output has the following format (identical to the constructor string):
1721 * num_feature_args [features_args]*
1722 * num_handler_args hw_handler [hw_handler_args]*
1723 * num_groups init_group_number
1724 *     [priority selector-name num_ps_args [ps_args]*
1725 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1726 */
1727static void multipath_status(struct dm_target *ti, status_type_t type,
1728			     unsigned status_flags, char *result, unsigned maxlen)
1729{
1730	int sz = 0;
1731	unsigned long flags;
1732	struct multipath *m = ti->private;
1733	struct priority_group *pg;
1734	struct pgpath *p;
1735	unsigned pg_num;
1736	char state;
1737
1738	spin_lock_irqsave(&m->lock, flags);
1739
1740	/* Features */
1741	if (type == STATUSTYPE_INFO)
1742		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1743		       atomic_read(&m->pg_init_count));
1744	else {
1745		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1746			      (m->pg_init_retries > 0) * 2 +
1747			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1748			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1749			      (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1750
1751		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1752			DMEMIT("queue_if_no_path ");
1753		if (m->pg_init_retries)
1754			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1755		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1756			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1757		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1758			DMEMIT("retain_attached_hw_handler ");
1759		if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1760			switch(m->queue_mode) {
1761			case DM_TYPE_BIO_BASED:
1762				DMEMIT("queue_mode bio ");
1763				break;
1764			case DM_TYPE_MQ_REQUEST_BASED:
1765				DMEMIT("queue_mode mq ");
1766				break;
1767			}
1768		}
1769	}
1770
1771	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1772		DMEMIT("0 ");
1773	else
1774		DMEMIT("1 %s ", m->hw_handler_name);
1775
1776	DMEMIT("%u ", m->nr_priority_groups);
1777
1778	if (m->next_pg)
1779		pg_num = m->next_pg->pg_num;
1780	else if (m->current_pg)
1781		pg_num = m->current_pg->pg_num;
1782	else
1783		pg_num = (m->nr_priority_groups ? 1 : 0);
1784
1785	DMEMIT("%u ", pg_num);
1786
1787	switch (type) {
1788	case STATUSTYPE_INFO:
1789		list_for_each_entry(pg, &m->priority_groups, list) {
1790			if (pg->bypassed)
1791				state = 'D';	/* Disabled */
1792			else if (pg == m->current_pg)
1793				state = 'A';	/* Currently Active */
1794			else
1795				state = 'E';	/* Enabled */
1796
1797			DMEMIT("%c ", state);
1798
1799			if (pg->ps.type->status)
1800				sz += pg->ps.type->status(&pg->ps, NULL, type,
1801							  result + sz,
1802							  maxlen - sz);
1803			else
1804				DMEMIT("0 ");
1805
1806			DMEMIT("%u %u ", pg->nr_pgpaths,
1807			       pg->ps.type->info_args);
1808
1809			list_for_each_entry(p, &pg->pgpaths, list) {
1810				DMEMIT("%s %s %u ", p->path.dev->name,
1811				       p->is_active ? "A" : "F",
1812				       p->fail_count);
1813				if (pg->ps.type->status)
1814					sz += pg->ps.type->status(&pg->ps,
1815					      &p->path, type, result + sz,
1816					      maxlen - sz);
1817			}
1818		}
1819		break;
1820
1821	case STATUSTYPE_TABLE:
1822		list_for_each_entry(pg, &m->priority_groups, list) {
1823			DMEMIT("%s ", pg->ps.type->name);
1824
1825			if (pg->ps.type->status)
1826				sz += pg->ps.type->status(&pg->ps, NULL, type,
1827							  result + sz,
1828							  maxlen - sz);
1829			else
1830				DMEMIT("0 ");
1831
1832			DMEMIT("%u %u ", pg->nr_pgpaths,
1833			       pg->ps.type->table_args);
1834
1835			list_for_each_entry(p, &pg->pgpaths, list) {
1836				DMEMIT("%s ", p->path.dev->name);
1837				if (pg->ps.type->status)
1838					sz += pg->ps.type->status(&pg->ps,
1839					      &p->path, type, result + sz,
1840					      maxlen - sz);
1841			}
1842		}
1843		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1844	}
1845
1846	spin_unlock_irqrestore(&m->lock, flags);
1847}
1848
1849static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
 
1850{
1851	int r = -EINVAL;
1852	struct dm_dev *dev;
1853	struct multipath *m = ti->private;
1854	action_fn action;
 
1855
1856	mutex_lock(&m->work_mutex);
1857
1858	if (dm_suspended(ti)) {
1859		r = -EBUSY;
1860		goto out;
1861	}
1862
1863	if (argc == 1) {
1864		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1865			r = queue_if_no_path(m, true, false);
 
 
 
1866			goto out;
1867		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1868			r = queue_if_no_path(m, false, false);
 
1869			goto out;
1870		}
1871	}
1872
1873	if (argc != 2) {
1874		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1875		goto out;
1876	}
1877
1878	if (!strcasecmp(argv[0], "disable_group")) {
1879		r = bypass_pg_num(m, argv[1], true);
1880		goto out;
1881	} else if (!strcasecmp(argv[0], "enable_group")) {
1882		r = bypass_pg_num(m, argv[1], false);
1883		goto out;
1884	} else if (!strcasecmp(argv[0], "switch_group")) {
1885		r = switch_pg_num(m, argv[1]);
1886		goto out;
1887	} else if (!strcasecmp(argv[0], "reinstate_path"))
1888		action = reinstate_path;
1889	else if (!strcasecmp(argv[0], "fail_path"))
1890		action = fail_path;
1891	else {
1892		DMWARN("Unrecognised multipath message received: %s", argv[0]);
1893		goto out;
1894	}
1895
1896	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1897	if (r) {
1898		DMWARN("message: error getting device %s",
1899		       argv[1]);
1900		goto out;
1901	}
1902
1903	r = action_dev(m, dev, action);
1904
1905	dm_put_device(ti, dev);
1906
1907out:
1908	mutex_unlock(&m->work_mutex);
1909	return r;
1910}
1911
1912static int multipath_prepare_ioctl(struct dm_target *ti,
1913		struct block_device **bdev, fmode_t *mode)
1914{
1915	struct multipath *m = ti->private;
1916	struct pgpath *current_pgpath;
 
1917	int r;
1918
1919	current_pgpath = lockless_dereference(m->current_pgpath);
1920	if (!current_pgpath)
1921		current_pgpath = choose_pgpath(m, 0);
1922
1923	if (current_pgpath) {
1924		if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1925			*bdev = current_pgpath->path.dev->bdev;
1926			*mode = current_pgpath->path.dev->mode;
1927			r = 0;
1928		} else {
1929			/* pg_init has not started or completed */
1930			r = -ENOTCONN;
1931		}
1932	} else {
1933		/* No path is available */
 
 
1934		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1935			r = -ENOTCONN;
1936		else
1937			r = -EIO;
1938	}
1939
1940	if (r == -ENOTCONN) {
1941		if (!lockless_dereference(m->current_pg)) {
1942			/* Path status changed, redo selection */
1943			(void) choose_pgpath(m, 0);
1944		}
 
1945		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1946			pg_init_all_paths(m);
 
1947		dm_table_run_md_queue_async(m->ti->table);
1948		process_queued_io_list(m);
1949	}
1950
1951	/*
1952	 * Only pass ioctls through if the device sizes match exactly.
1953	 */
1954	if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1955		return 1;
1956	return r;
1957}
1958
1959static int multipath_iterate_devices(struct dm_target *ti,
1960				     iterate_devices_callout_fn fn, void *data)
1961{
1962	struct multipath *m = ti->private;
1963	struct priority_group *pg;
1964	struct pgpath *p;
1965	int ret = 0;
1966
1967	list_for_each_entry(pg, &m->priority_groups, list) {
1968		list_for_each_entry(p, &pg->pgpaths, list) {
1969			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1970			if (ret)
1971				goto out;
1972		}
1973	}
1974
1975out:
1976	return ret;
1977}
1978
1979static int pgpath_busy(struct pgpath *pgpath)
1980{
1981	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1982
1983	return blk_lld_busy(q);
1984}
1985
1986/*
1987 * We return "busy", only when we can map I/Os but underlying devices
1988 * are busy (so even if we map I/Os now, the I/Os will wait on
1989 * the underlying queue).
1990 * In other words, if we want to kill I/Os or queue them inside us
1991 * due to map unavailability, we don't return "busy".  Otherwise,
1992 * dm core won't give us the I/Os and we can't do what we want.
1993 */
1994static int multipath_busy(struct dm_target *ti)
1995{
1996	bool busy = false, has_active = false;
1997	struct multipath *m = ti->private;
1998	struct priority_group *pg, *next_pg;
1999	struct pgpath *pgpath;
2000
2001	/* pg_init in progress */
2002	if (atomic_read(&m->pg_init_in_progress))
2003		return true;
2004
2005	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2006	if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
2007		return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
 
 
 
 
 
 
 
 
2008
2009	/* Guess which priority_group will be used at next mapping time */
2010	pg = lockless_dereference(m->current_pg);
2011	next_pg = lockless_dereference(m->next_pg);
2012	if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
2013		pg = next_pg;
2014
2015	if (!pg) {
2016		/*
2017		 * We don't know which pg will be used at next mapping time.
2018		 * We don't call choose_pgpath() here to avoid to trigger
2019		 * pg_init just by busy checking.
2020		 * So we don't know whether underlying devices we will be using
2021		 * at next mapping time are busy or not. Just try mapping.
2022		 */
2023		return busy;
2024	}
2025
2026	/*
2027	 * If there is one non-busy active path at least, the path selector
2028	 * will be able to select it. So we consider such a pg as not busy.
2029	 */
2030	busy = true;
2031	list_for_each_entry(pgpath, &pg->pgpaths, list) {
2032		if (pgpath->is_active) {
2033			has_active = true;
2034			if (!pgpath_busy(pgpath)) {
2035				busy = false;
2036				break;
2037			}
2038		}
2039	}
2040
2041	if (!has_active) {
2042		/*
2043		 * No active path in this pg, so this pg won't be used and
2044		 * the current_pg will be changed at next mapping time.
2045		 * We need to try mapping to determine it.
2046		 */
2047		busy = false;
2048	}
2049
2050	return busy;
2051}
2052
2053/*-----------------------------------------------------------------
 
2054 * Module setup
2055 *---------------------------------------------------------------*/
 
2056static struct target_type multipath_target = {
2057	.name = "multipath",
2058	.version = {1, 12, 0},
2059	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
 
2060	.module = THIS_MODULE,
2061	.ctr = multipath_ctr,
2062	.dtr = multipath_dtr,
2063	.map_rq = multipath_map,
2064	.clone_and_map_rq = multipath_clone_and_map,
2065	.release_clone_rq = multipath_release_clone,
2066	.rq_end_io = multipath_end_io,
2067	.map = multipath_map_bio,
2068	.end_io = multipath_end_io_bio,
2069	.presuspend = multipath_presuspend,
2070	.postsuspend = multipath_postsuspend,
2071	.resume = multipath_resume,
2072	.status = multipath_status,
2073	.message = multipath_message,
2074	.prepare_ioctl = multipath_prepare_ioctl,
2075	.iterate_devices = multipath_iterate_devices,
2076	.busy = multipath_busy,
2077};
2078
2079static int __init dm_multipath_init(void)
2080{
2081	int r;
2082
2083	/* allocate a slab for the dm_mpath_ios */
2084	_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
2085	if (!_mpio_cache)
2086		return -ENOMEM;
2087
2088	r = dm_register_target(&multipath_target);
2089	if (r < 0) {
2090		DMERR("request-based register failed %d", r);
2091		r = -EINVAL;
2092		goto bad_register_target;
2093	}
2094
2095	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2096	if (!kmultipathd) {
2097		DMERR("failed to create workqueue kmpathd");
2098		r = -ENOMEM;
2099		goto bad_alloc_kmultipathd;
2100	}
2101
2102	/*
2103	 * A separate workqueue is used to handle the device handlers
2104	 * to avoid overloading existing workqueue. Overloading the
2105	 * old workqueue would also create a bottleneck in the
2106	 * path of the storage hardware device activation.
2107	 */
2108	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2109						  WQ_MEM_RECLAIM);
2110	if (!kmpath_handlerd) {
2111		DMERR("failed to create workqueue kmpath_handlerd");
2112		r = -ENOMEM;
2113		goto bad_alloc_kmpath_handlerd;
2114	}
2115
 
 
 
 
 
 
 
 
 
 
2116	return 0;
2117
 
 
 
 
2118bad_alloc_kmpath_handlerd:
2119	destroy_workqueue(kmultipathd);
2120bad_alloc_kmultipathd:
2121	dm_unregister_target(&multipath_target);
2122bad_register_target:
2123	kmem_cache_destroy(_mpio_cache);
2124
2125	return r;
2126}
2127
2128static void __exit dm_multipath_exit(void)
2129{
 
2130	destroy_workqueue(kmpath_handlerd);
2131	destroy_workqueue(kmultipathd);
2132
2133	dm_unregister_target(&multipath_target);
2134	kmem_cache_destroy(_mpio_cache);
2135}
2136
2137module_init(dm_multipath_init);
2138module_exit(dm_multipath_exit);
 
 
 
2139
2140MODULE_DESCRIPTION(DM_NAME " multipath target");
2141MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2142MODULE_LICENSE("GPL");