Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9
 
  10#include "dm-path-selector.h"
  11#include "dm-uevent.h"
  12
 
  13#include <linux/ctype.h>
  14#include <linux/init.h>
  15#include <linux/mempool.h>
  16#include <linux/module.h>
  17#include <linux/pagemap.h>
  18#include <linux/slab.h>
  19#include <linux/time.h>
  20#include <linux/workqueue.h>
 
  21#include <scsi/scsi_dh.h>
  22#include <linux/atomic.h>
 
  23
  24#define DM_MSG_PREFIX "multipath"
  25#define DM_PG_INIT_DELAY_MSECS 2000
  26#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  27
  28/* Path properties */
  29struct pgpath {
  30	struct list_head list;
  31
  32	struct priority_group *pg;	/* Owning PG */
  33	unsigned is_active;		/* Path status */
  34	unsigned fail_count;		/* Cumulative failure count */
  35
  36	struct dm_path path;
  37	struct delayed_work activate_path;
 
 
  38};
  39
  40#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  41
  42/*
  43 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  44 * Each has a path selector which controls which path gets used.
  45 */
  46struct priority_group {
  47	struct list_head list;
  48
  49	struct multipath *m;		/* Owning multipath instance */
  50	struct path_selector ps;
  51
  52	unsigned pg_num;		/* Reference number */
  53	unsigned bypassed;		/* Temporarily bypass this PG? */
  54
  55	unsigned nr_pgpaths;		/* Number of paths in PG */
  56	struct list_head pgpaths;
 
 
  57};
  58
  59/* Multipath context */
  60struct multipath {
  61	struct list_head list;
  62	struct dm_target *ti;
  63
  64	spinlock_t lock;
  65
  66	const char *hw_handler_name;
  67	char *hw_handler_params;
  68
 
 
  69	unsigned nr_priority_groups;
  70	struct list_head priority_groups;
  71
  72	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  73
  74	unsigned pg_init_required;	/* pg_init needs calling? */
  75	unsigned pg_init_in_progress;	/* Only one pg_init allowed at once */
  76	unsigned pg_init_delay_retry;	/* Delay pg_init retry? */
  77
  78	unsigned nr_valid_paths;	/* Total number of usable paths */
  79	struct pgpath *current_pgpath;
  80	struct priority_group *current_pg;
  81	struct priority_group *next_pg;	/* Switch to this PG if set */
  82	unsigned repeat_count;		/* I/Os left before calling PS again */
  83
  84	unsigned queue_io;		/* Must we queue all I/O? */
  85	unsigned queue_if_no_path;	/* Queue I/O if last path fails? */
  86	unsigned saved_queue_if_no_path;/* Saved state during suspension */
 
 
 
 
 
  87	unsigned pg_init_retries;	/* Number of times to retry pg_init */
  88	unsigned pg_init_count;		/* Number of times pg_init called */
  89	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  90
  91	struct work_struct process_queued_ios;
  92	struct list_head queued_ios;
  93	unsigned queue_size;
  94
  95	struct work_struct trigger_event;
  96
  97	/*
  98	 * We must use a mempool of dm_mpath_io structs so that we
  99	 * can resubmit bios on error.
 100	 */
 101	mempool_t *mpio_pool;
 102
 103	struct mutex work_mutex;
 104};
 105
 106/*
 107 * Context information attached to each bio we process.
 108 */
 109struct dm_mpath_io {
 110	struct pgpath *pgpath;
 111	size_t nr_bytes;
 112};
 113
 114typedef int (*action_fn) (struct pgpath *pgpath);
 115
 116#define MIN_IOS 256	/* Mempool size */
 117
 118static struct kmem_cache *_mpio_cache;
 119
 120static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 121static void process_queued_ios(struct work_struct *work);
 122static void trigger_event(struct work_struct *work);
 123static void activate_path(struct work_struct *work);
 124
 125
 126/*-----------------------------------------------
 127 * Allocation routines
 128 *-----------------------------------------------*/
 129
 130static struct pgpath *alloc_pgpath(void)
 131{
 132	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 133
 134	if (pgpath) {
 135		pgpath->is_active = 1;
 136		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
 137	}
 138
 139	return pgpath;
 140}
 141
 142static void free_pgpath(struct pgpath *pgpath)
 143{
 144	kfree(pgpath);
 145}
 146
 147static struct priority_group *alloc_priority_group(void)
 148{
 149	struct priority_group *pg;
 150
 151	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 152
 153	if (pg)
 154		INIT_LIST_HEAD(&pg->pgpaths);
 155
 156	return pg;
 157}
 158
 159static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 160{
 161	struct pgpath *pgpath, *tmp;
 162	struct multipath *m = ti->private;
 163
 164	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 165		list_del(&pgpath->list);
 166		if (m->hw_handler_name)
 167			scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
 168		dm_put_device(ti, pgpath->path.dev);
 169		free_pgpath(pgpath);
 170	}
 171}
 172
 173static void free_priority_group(struct priority_group *pg,
 174				struct dm_target *ti)
 175{
 176	struct path_selector *ps = &pg->ps;
 177
 178	if (ps->type) {
 179		ps->type->destroy(ps);
 180		dm_put_path_selector(ps->type);
 181	}
 182
 183	free_pgpaths(&pg->pgpaths, ti);
 184	kfree(pg);
 185}
 186
 187static struct multipath *alloc_multipath(struct dm_target *ti)
 188{
 189	struct multipath *m;
 190
 191	m = kzalloc(sizeof(*m), GFP_KERNEL);
 192	if (m) {
 193		INIT_LIST_HEAD(&m->priority_groups);
 194		INIT_LIST_HEAD(&m->queued_ios);
 195		spin_lock_init(&m->lock);
 196		m->queue_io = 1;
 197		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 198		INIT_WORK(&m->process_queued_ios, process_queued_ios);
 199		INIT_WORK(&m->trigger_event, trigger_event);
 200		init_waitqueue_head(&m->pg_init_wait);
 201		mutex_init(&m->work_mutex);
 202		m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
 203		if (!m->mpio_pool) {
 204			kfree(m);
 205			return NULL;
 
 
 
 
 
 
 206		}
 
 207		m->ti = ti;
 208		ti->private = m;
 209	}
 210
 211	return m;
 212}
 213
 214static void free_multipath(struct multipath *m)
 215{
 216	struct priority_group *pg, *tmp;
 217
 218	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 219		list_del(&pg->list);
 220		free_priority_group(pg, m->ti);
 221	}
 222
 223	kfree(m->hw_handler_name);
 224	kfree(m->hw_handler_params);
 225	mempool_destroy(m->mpio_pool);
 226	kfree(m);
 227}
 228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229
 230/*-----------------------------------------------
 231 * Path selection
 232 *-----------------------------------------------*/
 233
 234static void __pg_init_all_paths(struct multipath *m)
 235{
 236	struct pgpath *pgpath;
 237	unsigned long pg_init_delay = 0;
 238
 
 
 
 239	m->pg_init_count++;
 240	m->pg_init_required = 0;
 
 
 
 
 
 241	if (m->pg_init_delay_retry)
 242		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 243						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 244	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 245		/* Skip failed paths */
 246		if (!pgpath->is_active)
 247			continue;
 248		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 249				       pg_init_delay))
 250			m->pg_init_in_progress++;
 251	}
 
 252}
 253
 254static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
 255{
 256	m->current_pg = pgpath->pg;
 257
 258	/* Must we initialise the PG first, and queue I/O till it's ready? */
 259	if (m->hw_handler_name) {
 260		m->pg_init_required = 1;
 261		m->queue_io = 1;
 262	} else {
 263		m->pg_init_required = 0;
 264		m->queue_io = 0;
 265	}
 266
 267	m->pg_init_count = 0;
 268}
 269
 270static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
 271			       size_t nr_bytes)
 272{
 273	struct dm_path *path;
 274
 275	path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
 276	if (!path)
 277		return -ENXIO;
 278
 279	m->current_pgpath = path_to_pgpath(path);
 280
 281	if (m->current_pg != pg)
 282		__switch_pg(m, m->current_pgpath);
 283
 284	return 0;
 285}
 286
 287static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
 288{
 289	struct priority_group *pg;
 290	unsigned bypassed = 1;
 291
 292	if (!m->nr_valid_paths)
 
 293		goto failed;
 
 294
 295	/* Were we instructed to switch PG? */
 296	if (m->next_pg) {
 297		pg = m->next_pg;
 298		m->next_pg = NULL;
 299		if (!__choose_path_in_pg(m, pg, nr_bytes))
 300			return;
 301	}
 302
 303	/* Don't change PG until it has no remaining paths */
 304	if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
 305		return;
 306
 307	/*
 308	 * Loop through priority groups until we find a valid path.
 309	 * First time we skip PGs marked 'bypassed'.
 310	 * Second time we only try the ones we skipped.
 
 311	 */
 312	do {
 313		list_for_each_entry(pg, &m->priority_groups, list) {
 314			if (pg->bypassed == bypassed)
 315				continue;
 316			if (!__choose_path_in_pg(m, pg, nr_bytes))
 
 
 317				return;
 
 318		}
 319	} while (bypassed--);
 320
 321failed:
 322	m->current_pgpath = NULL;
 323	m->current_pg = NULL;
 324}
 325
 326/*
 327 * Check whether bios must be queued in the device-mapper core rather
 328 * than here in the target.
 329 *
 330 * m->lock must be held on entry.
 331 *
 332 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
 333 * same value then we are not between multipath_presuspend()
 334 * and multipath_resume() calls and we have no need to check
 335 * for the DMF_NOFLUSH_SUSPENDING flag.
 336 */
 337static int __must_push_back(struct multipath *m)
 338{
 339	return (m->queue_if_no_path != m->saved_queue_if_no_path &&
 340		dm_noflush_suspending(m->ti));
 
 341}
 342
 343static int map_io(struct multipath *m, struct request *clone,
 344		  struct dm_mpath_io *mpio, unsigned was_queued)
 
 
 
 
 345{
 346	int r = DM_MAPIO_REMAPPED;
 347	size_t nr_bytes = blk_rq_bytes(clone);
 348	unsigned long flags;
 349	struct pgpath *pgpath;
 350	struct block_device *bdev;
 
 351
 352	spin_lock_irqsave(&m->lock, flags);
 353
 354	/* Do we need to select a new pgpath? */
 355	if (!m->current_pgpath ||
 356	    (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
 357		__choose_pgpath(m, nr_bytes);
 358
 359	pgpath = m->current_pgpath;
 360
 361	if (was_queued)
 362		m->queue_size--;
 
 
 
 
 
 
 363
 364	if ((pgpath && m->queue_io) ||
 365	    (!pgpath && m->queue_if_no_path)) {
 366		/* Queue for the daemon to resubmit */
 367		list_add_tail(&clone->queuelist, &m->queued_ios);
 368		m->queue_size++;
 369		if ((m->pg_init_required && !m->pg_init_in_progress) ||
 370		    !m->queue_io)
 371			queue_work(kmultipathd, &m->process_queued_ios);
 372		pgpath = NULL;
 373		r = DM_MAPIO_SUBMITTED;
 374	} else if (pgpath) {
 375		bdev = pgpath->path.dev->bdev;
 376		clone->q = bdev_get_queue(bdev);
 377		clone->rq_disk = bdev->bd_disk;
 378	} else if (__must_push_back(m))
 379		r = DM_MAPIO_REQUEUE;
 380	else
 381		r = -EIO;	/* Failed */
 382
 383	mpio->pgpath = pgpath;
 384	mpio->nr_bytes = nr_bytes;
 385
 386	if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
 387		pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 388					      nr_bytes);
 
 389
 390	spin_unlock_irqrestore(&m->lock, flags);
 
 391
 392	return r;
 393}
 394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 395/*
 396 * If we run out of usable paths, should we queue I/O or error it?
 397 */
 398static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
 399			    unsigned save_old_value)
 400{
 401	unsigned long flags;
 402
 403	spin_lock_irqsave(&m->lock, flags);
 404
 405	if (save_old_value)
 406		m->saved_queue_if_no_path = m->queue_if_no_path;
 407	else
 408		m->saved_queue_if_no_path = queue_if_no_path;
 409	m->queue_if_no_path = queue_if_no_path;
 410	if (!m->queue_if_no_path && m->queue_size)
 411		queue_work(kmultipathd, &m->process_queued_ios);
 412
 413	spin_unlock_irqrestore(&m->lock, flags);
 414
 415	return 0;
 416}
 417
 418/*-----------------------------------------------------------------
 419 * The multipath daemon is responsible for resubmitting queued ios.
 420 *---------------------------------------------------------------*/
 421
 422static void dispatch_queued_ios(struct multipath *m)
 423{
 424	int r;
 425	unsigned long flags;
 426	struct dm_mpath_io *mpio;
 427	union map_info *info;
 428	struct request *clone, *n;
 429	LIST_HEAD(cl);
 430
 431	spin_lock_irqsave(&m->lock, flags);
 432	list_splice_init(&m->queued_ios, &cl);
 433	spin_unlock_irqrestore(&m->lock, flags);
 434
 435	list_for_each_entry_safe(clone, n, &cl, queuelist) {
 436		list_del_init(&clone->queuelist);
 437
 438		info = dm_get_rq_mapinfo(clone);
 439		mpio = info->ptr;
 440
 441		r = map_io(m, clone, mpio, 1);
 442		if (r < 0) {
 443			mempool_free(mpio, m->mpio_pool);
 444			dm_kill_unmapped_request(clone, r);
 445		} else if (r == DM_MAPIO_REMAPPED)
 446			dm_dispatch_request(clone);
 447		else if (r == DM_MAPIO_REQUEUE) {
 448			mempool_free(mpio, m->mpio_pool);
 449			dm_requeue_unmapped_request(clone);
 450		}
 451	}
 452}
 453
 454static void process_queued_ios(struct work_struct *work)
 455{
 456	struct multipath *m =
 457		container_of(work, struct multipath, process_queued_ios);
 458	struct pgpath *pgpath = NULL;
 459	unsigned must_queue = 1;
 460	unsigned long flags;
 461
 462	spin_lock_irqsave(&m->lock, flags);
 463
 464	if (!m->queue_size)
 465		goto out;
 466
 467	if (!m->current_pgpath)
 468		__choose_pgpath(m, 0);
 469
 470	pgpath = m->current_pgpath;
 471
 472	if ((pgpath && !m->queue_io) ||
 473	    (!pgpath && !m->queue_if_no_path))
 474		must_queue = 0;
 475
 476	if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
 477		__pg_init_all_paths(m);
 478
 479out:
 480	spin_unlock_irqrestore(&m->lock, flags);
 481	if (!must_queue)
 482		dispatch_queued_ios(m);
 483}
 484
 485/*
 486 * An event is triggered whenever a path is taken out of use.
 487 * Includes path failure and PG bypass.
 488 */
 489static void trigger_event(struct work_struct *work)
 490{
 491	struct multipath *m =
 492		container_of(work, struct multipath, trigger_event);
 493
 494	dm_table_event(m->ti->table);
 495}
 496
 497/*-----------------------------------------------------------------
 498 * Constructor/argument parsing:
 499 * <#multipath feature args> [<arg>]*
 500 * <#hw_handler args> [hw_handler [<arg>]*]
 501 * <#priority groups>
 502 * <initial priority group>
 503 *     [<selector> <#selector args> [<arg>]*
 504 *      <#paths> <#per-path selector args>
 505 *         [<path> [<arg>]* ]+ ]+
 506 *---------------------------------------------------------------*/
 507static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 508			       struct dm_target *ti)
 509{
 510	int r;
 511	struct path_selector_type *pst;
 512	unsigned ps_argc;
 513
 514	static struct dm_arg _args[] = {
 515		{0, 1024, "invalid number of path selector args"},
 516	};
 517
 518	pst = dm_get_path_selector(dm_shift_arg(as));
 519	if (!pst) {
 520		ti->error = "unknown path selector type";
 521		return -EINVAL;
 522	}
 523
 524	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 525	if (r) {
 526		dm_put_path_selector(pst);
 527		return -EINVAL;
 528	}
 529
 530	r = pst->create(&pg->ps, ps_argc, as->argv);
 531	if (r) {
 532		dm_put_path_selector(pst);
 533		ti->error = "path selector constructor failed";
 534		return r;
 535	}
 536
 537	pg->ps.type = pst;
 538	dm_consume_args(as, ps_argc);
 539
 540	return 0;
 541}
 542
 543static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 544			       struct dm_target *ti)
 545{
 546	int r;
 547	struct pgpath *p;
 548	struct multipath *m = ti->private;
 
 
 549
 550	/* we need at least a path arg */
 551	if (as->argc < 1) {
 552		ti->error = "no device given";
 553		return ERR_PTR(-EINVAL);
 554	}
 555
 556	p = alloc_pgpath();
 557	if (!p)
 558		return ERR_PTR(-ENOMEM);
 559
 560	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 561			  &p->path.dev);
 562	if (r) {
 563		ti->error = "error getting device";
 564		goto bad;
 565	}
 566
 567	if (m->hw_handler_name) {
 568		struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
 569
 570		r = scsi_dh_attach(q, m->hw_handler_name);
 571		if (r == -EBUSY) {
 
 
 572			/*
 573			 * Already attached to different hw_handler,
 574			 * try to reattach with correct one.
 
 
 
 
 575			 */
 576			scsi_dh_detach(q);
 577			r = scsi_dh_attach(q, m->hw_handler_name);
 
 
 
 578		}
 
 579
 
 
 
 
 
 
 
 
 
 580		if (r < 0) {
 581			ti->error = "error attaching hardware handler";
 582			dm_put_device(ti, p->path.dev);
 583			goto bad;
 584		}
 585
 586		if (m->hw_handler_params) {
 587			r = scsi_dh_set_params(q, m->hw_handler_params);
 588			if (r < 0) {
 589				ti->error = "unable to set hardware "
 590							"handler parameters";
 591				scsi_dh_detach(q);
 592				dm_put_device(ti, p->path.dev);
 593				goto bad;
 594			}
 595		}
 596	}
 597
 598	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 599	if (r) {
 600		dm_put_device(ti, p->path.dev);
 601		goto bad;
 602	}
 603
 604	return p;
 605
 606 bad:
 607	free_pgpath(p);
 608	return ERR_PTR(r);
 609}
 610
 611static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 612						   struct multipath *m)
 613{
 614	static struct dm_arg _args[] = {
 615		{1, 1024, "invalid number of paths"},
 616		{0, 1024, "invalid number of selector args"}
 617	};
 618
 619	int r;
 620	unsigned i, nr_selector_args, nr_args;
 621	struct priority_group *pg;
 622	struct dm_target *ti = m->ti;
 623
 624	if (as->argc < 2) {
 625		as->argc = 0;
 626		ti->error = "not enough priority group arguments";
 627		return ERR_PTR(-EINVAL);
 628	}
 629
 630	pg = alloc_priority_group();
 631	if (!pg) {
 632		ti->error = "couldn't allocate priority group";
 633		return ERR_PTR(-ENOMEM);
 634	}
 635	pg->m = m;
 636
 637	r = parse_path_selector(as, pg, ti);
 638	if (r)
 639		goto bad;
 640
 641	/*
 642	 * read the paths
 643	 */
 644	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
 645	if (r)
 646		goto bad;
 647
 648	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
 649	if (r)
 650		goto bad;
 651
 652	nr_args = 1 + nr_selector_args;
 653	for (i = 0; i < pg->nr_pgpaths; i++) {
 654		struct pgpath *pgpath;
 655		struct dm_arg_set path_args;
 656
 657		if (as->argc < nr_args) {
 658			ti->error = "not enough path parameters";
 659			r = -EINVAL;
 660			goto bad;
 661		}
 662
 663		path_args.argc = nr_args;
 664		path_args.argv = as->argv;
 665
 666		pgpath = parse_path(&path_args, &pg->ps, ti);
 667		if (IS_ERR(pgpath)) {
 668			r = PTR_ERR(pgpath);
 669			goto bad;
 670		}
 671
 672		pgpath->pg = pg;
 673		list_add_tail(&pgpath->list, &pg->pgpaths);
 674		dm_consume_args(as, nr_args);
 675	}
 676
 677	return pg;
 678
 679 bad:
 680	free_priority_group(pg, ti);
 681	return ERR_PTR(r);
 682}
 683
 684static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 685{
 686	unsigned hw_argc;
 687	int ret;
 688	struct dm_target *ti = m->ti;
 689
 690	static struct dm_arg _args[] = {
 691		{0, 1024, "invalid number of hardware handler args"},
 692	};
 693
 694	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
 695		return -EINVAL;
 696
 697	if (!hw_argc)
 698		return 0;
 699
 700	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
 701	request_module("scsi_dh_%s", m->hw_handler_name);
 702	if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
 703		ti->error = "unknown hardware handler type";
 704		ret = -EINVAL;
 705		goto fail;
 706	}
 707
 708	if (hw_argc > 1) {
 709		char *p;
 710		int i, j, len = 4;
 711
 712		for (i = 0; i <= hw_argc - 2; i++)
 713			len += strlen(as->argv[i]) + 1;
 714		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
 715		if (!p) {
 716			ti->error = "memory allocation failed";
 717			ret = -ENOMEM;
 718			goto fail;
 719		}
 720		j = sprintf(p, "%d", hw_argc - 1);
 721		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
 722			j = sprintf(p, "%s", as->argv[i]);
 723	}
 724	dm_consume_args(as, hw_argc - 1);
 725
 726	return 0;
 727fail:
 728	kfree(m->hw_handler_name);
 729	m->hw_handler_name = NULL;
 730	return ret;
 731}
 732
 733static int parse_features(struct dm_arg_set *as, struct multipath *m)
 734{
 735	int r;
 736	unsigned argc;
 737	struct dm_target *ti = m->ti;
 738	const char *arg_name;
 739
 740	static struct dm_arg _args[] = {
 741		{0, 5, "invalid number of feature args"},
 742		{1, 50, "pg_init_retries must be between 1 and 50"},
 743		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
 744	};
 745
 746	r = dm_read_arg_group(_args, as, &argc, &ti->error);
 747	if (r)
 748		return -EINVAL;
 749
 750	if (!argc)
 751		return 0;
 752
 753	do {
 754		arg_name = dm_shift_arg(as);
 755		argc--;
 756
 757		if (!strcasecmp(arg_name, "queue_if_no_path")) {
 758			r = queue_if_no_path(m, 1, 0);
 
 
 
 
 
 759			continue;
 760		}
 761
 762		if (!strcasecmp(arg_name, "pg_init_retries") &&
 763		    (argc >= 1)) {
 764			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
 765			argc--;
 766			continue;
 767		}
 768
 769		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
 770		    (argc >= 1)) {
 771			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
 772			argc--;
 773			continue;
 774		}
 775
 776		ti->error = "Unrecognised multipath feature request";
 777		r = -EINVAL;
 778	} while (argc && !r);
 779
 780	return r;
 781}
 782
 783static int multipath_ctr(struct dm_target *ti, unsigned int argc,
 784			 char **argv)
 785{
 786	/* target arguments */
 787	static struct dm_arg _args[] = {
 788		{0, 1024, "invalid number of priority groups"},
 789		{0, 1024, "invalid initial priority group number"},
 790	};
 791
 792	int r;
 793	struct multipath *m;
 794	struct dm_arg_set as;
 795	unsigned pg_count = 0;
 796	unsigned next_pg_num;
 
 797
 798	as.argc = argc;
 799	as.argv = argv;
 800
 801	m = alloc_multipath(ti);
 802	if (!m) {
 803		ti->error = "can't allocate multipath";
 804		return -EINVAL;
 805	}
 806
 807	r = parse_features(&as, m);
 808	if (r)
 809		goto bad;
 810
 811	r = parse_hw_handler(&as, m);
 812	if (r)
 813		goto bad;
 814
 815	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
 816	if (r)
 817		goto bad;
 818
 819	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
 820	if (r)
 821		goto bad;
 822
 823	if ((!m->nr_priority_groups && next_pg_num) ||
 824	    (m->nr_priority_groups && !next_pg_num)) {
 825		ti->error = "invalid initial priority group";
 826		r = -EINVAL;
 827		goto bad;
 828	}
 829
 830	/* parse the priority groups */
 831	while (as.argc) {
 832		struct priority_group *pg;
 833
 834		pg = parse_priority_group(&as, m);
 835		if (IS_ERR(pg)) {
 836			r = PTR_ERR(pg);
 837			goto bad;
 838		}
 839
 840		m->nr_valid_paths += pg->nr_pgpaths;
 841		list_add_tail(&pg->list, &m->priority_groups);
 842		pg_count++;
 843		pg->pg_num = pg_count;
 844		if (!--next_pg_num)
 845			m->next_pg = pg;
 846	}
 847
 848	if (pg_count != m->nr_priority_groups) {
 849		ti->error = "priority group count mismatch";
 850		r = -EINVAL;
 851		goto bad;
 852	}
 853
 854	ti->num_flush_requests = 1;
 855	ti->num_discard_requests = 1;
 
 
 
 856
 857	return 0;
 858
 859 bad:
 860	free_multipath(m);
 861	return r;
 862}
 863
 864static void multipath_wait_for_pg_init_completion(struct multipath *m)
 865{
 866	DECLARE_WAITQUEUE(wait, current);
 867	unsigned long flags;
 868
 869	add_wait_queue(&m->pg_init_wait, &wait);
 870
 871	while (1) {
 872		set_current_state(TASK_UNINTERRUPTIBLE);
 873
 874		spin_lock_irqsave(&m->lock, flags);
 875		if (!m->pg_init_in_progress) {
 876			spin_unlock_irqrestore(&m->lock, flags);
 877			break;
 878		}
 879		spin_unlock_irqrestore(&m->lock, flags);
 880
 881		io_schedule();
 882	}
 883	set_current_state(TASK_RUNNING);
 884
 885	remove_wait_queue(&m->pg_init_wait, &wait);
 886}
 887
 888static void flush_multipath_work(struct multipath *m)
 889{
 
 
 
 
 
 
 890	flush_workqueue(kmpath_handlerd);
 891	multipath_wait_for_pg_init_completion(m);
 892	flush_workqueue(kmultipathd);
 893	flush_work_sync(&m->trigger_event);
 
 
 
 
 894}
 895
 896static void multipath_dtr(struct dm_target *ti)
 897{
 898	struct multipath *m = ti->private;
 899
 900	flush_multipath_work(m);
 901	free_multipath(m);
 902}
 903
 904/*
 905 * Map cloned requests
 906 */
 907static int multipath_map(struct dm_target *ti, struct request *clone,
 908			 union map_info *map_context)
 909{
 910	int r;
 911	struct dm_mpath_io *mpio;
 912	struct multipath *m = (struct multipath *) ti->private;
 913
 914	mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
 915	if (!mpio)
 916		/* ENOMEM, requeue */
 917		return DM_MAPIO_REQUEUE;
 918	memset(mpio, 0, sizeof(*mpio));
 919
 920	map_context->ptr = mpio;
 921	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 922	r = map_io(m, clone, mpio, 0);
 923	if (r < 0 || r == DM_MAPIO_REQUEUE)
 924		mempool_free(mpio, m->mpio_pool);
 925
 926	return r;
 927}
 928
 929/*
 930 * Take a path out of use.
 931 */
 932static int fail_path(struct pgpath *pgpath)
 933{
 934	unsigned long flags;
 935	struct multipath *m = pgpath->pg->m;
 936
 937	spin_lock_irqsave(&m->lock, flags);
 938
 939	if (!pgpath->is_active)
 940		goto out;
 941
 942	DMWARN("Failing path %s.", pgpath->path.dev->name);
 943
 944	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
 945	pgpath->is_active = 0;
 946	pgpath->fail_count++;
 947
 948	m->nr_valid_paths--;
 949
 950	if (pgpath == m->current_pgpath)
 951		m->current_pgpath = NULL;
 952
 953	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
 954		      pgpath->path.dev->name, m->nr_valid_paths);
 955
 956	schedule_work(&m->trigger_event);
 957
 958out:
 959	spin_unlock_irqrestore(&m->lock, flags);
 960
 961	return 0;
 962}
 963
 964/*
 965 * Reinstate a previously-failed path
 966 */
 967static int reinstate_path(struct pgpath *pgpath)
 968{
 969	int r = 0;
 970	unsigned long flags;
 971	struct multipath *m = pgpath->pg->m;
 972
 973	spin_lock_irqsave(&m->lock, flags);
 974
 975	if (pgpath->is_active)
 976		goto out;
 977
 978	if (!pgpath->pg->ps.type->reinstate_path) {
 979		DMWARN("Reinstate path not supported by path selector %s",
 980		       pgpath->pg->ps.type->name);
 981		r = -EINVAL;
 982		goto out;
 983	}
 984
 985	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
 986	if (r)
 987		goto out;
 988
 989	pgpath->is_active = 1;
 990
 991	if (!m->nr_valid_paths++ && m->queue_size) {
 992		m->current_pgpath = NULL;
 993		queue_work(kmultipathd, &m->process_queued_ios);
 994	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
 995		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
 996			m->pg_init_in_progress++;
 997	}
 998
 999	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1000		      pgpath->path.dev->name, m->nr_valid_paths);
1001
1002	schedule_work(&m->trigger_event);
1003
1004out:
1005	spin_unlock_irqrestore(&m->lock, flags);
 
 
1006
1007	return r;
1008}
1009
1010/*
1011 * Fail or reinstate all paths that match the provided struct dm_dev.
1012 */
1013static int action_dev(struct multipath *m, struct dm_dev *dev,
1014		      action_fn action)
1015{
1016	int r = -EINVAL;
1017	struct pgpath *pgpath;
1018	struct priority_group *pg;
1019
1020	list_for_each_entry(pg, &m->priority_groups, list) {
1021		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1022			if (pgpath->path.dev == dev)
1023				r = action(pgpath);
1024		}
1025	}
1026
1027	return r;
1028}
1029
1030/*
1031 * Temporarily try to avoid having to use the specified PG
1032 */
1033static void bypass_pg(struct multipath *m, struct priority_group *pg,
1034		      int bypassed)
1035{
1036	unsigned long flags;
1037
1038	spin_lock_irqsave(&m->lock, flags);
1039
1040	pg->bypassed = bypassed;
1041	m->current_pgpath = NULL;
1042	m->current_pg = NULL;
1043
1044	spin_unlock_irqrestore(&m->lock, flags);
1045
1046	schedule_work(&m->trigger_event);
1047}
1048
1049/*
1050 * Switch to using the specified PG from the next I/O that gets mapped
1051 */
1052static int switch_pg_num(struct multipath *m, const char *pgstr)
1053{
1054	struct priority_group *pg;
1055	unsigned pgnum;
1056	unsigned long flags;
 
1057
1058	if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
1059	    (pgnum > m->nr_priority_groups)) {
1060		DMWARN("invalid PG number supplied to switch_pg_num");
1061		return -EINVAL;
1062	}
1063
1064	spin_lock_irqsave(&m->lock, flags);
1065	list_for_each_entry(pg, &m->priority_groups, list) {
1066		pg->bypassed = 0;
1067		if (--pgnum)
1068			continue;
1069
1070		m->current_pgpath = NULL;
1071		m->current_pg = NULL;
1072		m->next_pg = pg;
1073	}
1074	spin_unlock_irqrestore(&m->lock, flags);
1075
1076	schedule_work(&m->trigger_event);
1077	return 0;
1078}
1079
1080/*
1081 * Set/clear bypassed status of a PG.
1082 * PGs are numbered upwards from 1 in the order they were declared.
1083 */
1084static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1085{
1086	struct priority_group *pg;
1087	unsigned pgnum;
 
1088
1089	if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum ||
1090	    (pgnum > m->nr_priority_groups)) {
1091		DMWARN("invalid PG number supplied to bypass_pg");
1092		return -EINVAL;
1093	}
1094
1095	list_for_each_entry(pg, &m->priority_groups, list) {
1096		if (!--pgnum)
1097			break;
1098	}
1099
1100	bypass_pg(m, pg, bypassed);
1101	return 0;
1102}
1103
1104/*
1105 * Should we retry pg_init immediately?
1106 */
1107static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1108{
1109	unsigned long flags;
1110	int limit_reached = 0;
1111
1112	spin_lock_irqsave(&m->lock, flags);
1113
1114	if (m->pg_init_count <= m->pg_init_retries)
1115		m->pg_init_required = 1;
1116	else
1117		limit_reached = 1;
1118
1119	spin_unlock_irqrestore(&m->lock, flags);
1120
1121	return limit_reached;
1122}
1123
1124static void pg_init_done(void *data, int errors)
1125{
1126	struct pgpath *pgpath = data;
1127	struct priority_group *pg = pgpath->pg;
1128	struct multipath *m = pg->m;
1129	unsigned long flags;
1130	unsigned delay_retry = 0;
1131
1132	/* device or driver problems */
1133	switch (errors) {
1134	case SCSI_DH_OK:
1135		break;
1136	case SCSI_DH_NOSYS:
1137		if (!m->hw_handler_name) {
1138			errors = 0;
1139			break;
1140		}
1141		DMERR("Could not failover the device: Handler scsi_dh_%s "
1142		      "Error %d.", m->hw_handler_name, errors);
1143		/*
1144		 * Fail path for now, so we do not ping pong
1145		 */
1146		fail_path(pgpath);
1147		break;
1148	case SCSI_DH_DEV_TEMP_BUSY:
1149		/*
1150		 * Probably doing something like FW upgrade on the
1151		 * controller so try the other pg.
1152		 */
1153		bypass_pg(m, pg, 1);
1154		break;
1155	case SCSI_DH_RETRY:
1156		/* Wait before retrying. */
1157		delay_retry = 1;
1158	case SCSI_DH_IMM_RETRY:
1159	case SCSI_DH_RES_TEMP_UNAVAIL:
1160		if (pg_init_limit_reached(m, pgpath))
1161			fail_path(pgpath);
1162		errors = 0;
1163		break;
 
1164	default:
1165		/*
1166		 * We probably do not want to fail the path for a device
1167		 * error, but this is what the old dm did. In future
1168		 * patches we can do more advanced handling.
1169		 */
1170		fail_path(pgpath);
1171	}
1172
1173	spin_lock_irqsave(&m->lock, flags);
1174	if (errors) {
1175		if (pgpath == m->current_pgpath) {
1176			DMERR("Could not failover device. Error %d.", errors);
1177			m->current_pgpath = NULL;
1178			m->current_pg = NULL;
1179		}
1180	} else if (!m->pg_init_required)
1181		pg->bypassed = 0;
1182
1183	if (--m->pg_init_in_progress)
1184		/* Activations of other paths are still on going */
1185		goto out;
1186
1187	if (!m->pg_init_required)
1188		m->queue_io = 0;
1189
1190	m->pg_init_delay_retry = delay_retry;
1191	queue_work(kmultipathd, &m->process_queued_ios);
 
1192
1193	/*
1194	 * Wake up any thread waiting to suspend.
1195	 */
1196	wake_up(&m->pg_init_wait);
1197
1198out:
1199	spin_unlock_irqrestore(&m->lock, flags);
1200}
1201
1202static void activate_path(struct work_struct *work)
1203{
1204	struct pgpath *pgpath =
1205		container_of(work, struct pgpath, activate_path.work);
1206
1207	scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1208				pg_init_done, pgpath);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1209}
1210
1211/*
1212 * end_io handling
1213 */
1214static int do_end_io(struct multipath *m, struct request *clone,
1215		     int error, struct dm_mpath_io *mpio)
1216{
1217	/*
1218	 * We don't queue any clone request inside the multipath target
1219	 * during end I/O handling, since those clone requests don't have
1220	 * bio clones.  If we queue them inside the multipath target,
1221	 * we need to make bio clones, that requires memory allocation.
1222	 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1223	 *  don't have bio clones.)
1224	 * Instead of queueing the clone request here, we queue the original
1225	 * request into dm core, which will remake a clone request and
1226	 * clone bios for it and resubmit it later.
1227	 */
1228	int r = DM_ENDIO_REQUEUE;
1229	unsigned long flags;
1230
1231	if (!error && !clone->errors)
1232		return 0;	/* I/O complete */
1233
1234	if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
1235		return error;
1236
1237	if (mpio->pgpath)
1238		fail_path(mpio->pgpath);
1239
1240	spin_lock_irqsave(&m->lock, flags);
1241	if (!m->nr_valid_paths) {
1242		if (!m->queue_if_no_path) {
1243			if (!__must_push_back(m))
1244				r = -EIO;
1245		} else {
1246			if (error == -EBADE)
1247				r = error;
1248		}
1249	}
1250	spin_unlock_irqrestore(&m->lock, flags);
1251
1252	return r;
1253}
1254
1255static int multipath_end_io(struct dm_target *ti, struct request *clone,
1256			    int error, union map_info *map_context)
1257{
1258	struct multipath *m = ti->private;
1259	struct dm_mpath_io *mpio = map_context->ptr;
1260	struct pgpath *pgpath = mpio->pgpath;
1261	struct path_selector *ps;
1262	int r;
1263
1264	r  = do_end_io(m, clone, error, mpio);
 
 
 
1265	if (pgpath) {
1266		ps = &pgpath->pg->ps;
1267		if (ps->type->end_io)
1268			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1269	}
1270	mempool_free(mpio, m->mpio_pool);
1271
1272	return r;
1273}
1274
1275/*
1276 * Suspend can't complete until all the I/O is processed so if
1277 * the last path fails we must error any remaining I/O.
1278 * Note that if the freeze_bdev fails while suspending, the
1279 * queue_if_no_path state is lost - userspace should reset it.
1280 */
1281static void multipath_presuspend(struct dm_target *ti)
1282{
1283	struct multipath *m = (struct multipath *) ti->private;
1284
1285	queue_if_no_path(m, 0, 1);
1286}
1287
1288static void multipath_postsuspend(struct dm_target *ti)
1289{
1290	struct multipath *m = ti->private;
1291
1292	mutex_lock(&m->work_mutex);
1293	flush_multipath_work(m);
1294	mutex_unlock(&m->work_mutex);
1295}
1296
1297/*
1298 * Restore the queue_if_no_path setting.
1299 */
1300static void multipath_resume(struct dm_target *ti)
1301{
1302	struct multipath *m = (struct multipath *) ti->private;
1303	unsigned long flags;
1304
1305	spin_lock_irqsave(&m->lock, flags);
1306	m->queue_if_no_path = m->saved_queue_if_no_path;
1307	spin_unlock_irqrestore(&m->lock, flags);
1308}
1309
1310/*
1311 * Info output has the following format:
1312 * num_multipath_feature_args [multipath_feature_args]*
1313 * num_handler_status_args [handler_status_args]*
1314 * num_groups init_group_number
1315 *            [A|D|E num_ps_status_args [ps_status_args]*
1316 *             num_paths num_selector_args
1317 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1318 *
1319 * Table output has the following format (identical to the constructor string):
1320 * num_feature_args [features_args]*
1321 * num_handler_args hw_handler [hw_handler_args]*
1322 * num_groups init_group_number
1323 *     [priority selector-name num_ps_args [ps_args]*
1324 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1325 */
1326static int multipath_status(struct dm_target *ti, status_type_t type,
1327			    char *result, unsigned int maxlen)
1328{
1329	int sz = 0;
1330	unsigned long flags;
1331	struct multipath *m = (struct multipath *) ti->private;
1332	struct priority_group *pg;
1333	struct pgpath *p;
1334	unsigned pg_num;
1335	char state;
1336
1337	spin_lock_irqsave(&m->lock, flags);
1338
1339	/* Features */
1340	if (type == STATUSTYPE_INFO)
1341		DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
1342	else {
1343		DMEMIT("%u ", m->queue_if_no_path +
1344			      (m->pg_init_retries > 0) * 2 +
1345			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
 
1346		if (m->queue_if_no_path)
1347			DMEMIT("queue_if_no_path ");
1348		if (m->pg_init_retries)
1349			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1350		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1351			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
 
 
1352	}
1353
1354	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1355		DMEMIT("0 ");
1356	else
1357		DMEMIT("1 %s ", m->hw_handler_name);
1358
1359	DMEMIT("%u ", m->nr_priority_groups);
1360
1361	if (m->next_pg)
1362		pg_num = m->next_pg->pg_num;
1363	else if (m->current_pg)
1364		pg_num = m->current_pg->pg_num;
1365	else
1366		pg_num = (m->nr_priority_groups ? 1 : 0);
1367
1368	DMEMIT("%u ", pg_num);
1369
1370	switch (type) {
1371	case STATUSTYPE_INFO:
1372		list_for_each_entry(pg, &m->priority_groups, list) {
1373			if (pg->bypassed)
1374				state = 'D';	/* Disabled */
1375			else if (pg == m->current_pg)
1376				state = 'A';	/* Currently Active */
1377			else
1378				state = 'E';	/* Enabled */
1379
1380			DMEMIT("%c ", state);
1381
1382			if (pg->ps.type->status)
1383				sz += pg->ps.type->status(&pg->ps, NULL, type,
1384							  result + sz,
1385							  maxlen - sz);
1386			else
1387				DMEMIT("0 ");
1388
1389			DMEMIT("%u %u ", pg->nr_pgpaths,
1390			       pg->ps.type->info_args);
1391
1392			list_for_each_entry(p, &pg->pgpaths, list) {
1393				DMEMIT("%s %s %u ", p->path.dev->name,
1394				       p->is_active ? "A" : "F",
1395				       p->fail_count);
1396				if (pg->ps.type->status)
1397					sz += pg->ps.type->status(&pg->ps,
1398					      &p->path, type, result + sz,
1399					      maxlen - sz);
1400			}
1401		}
1402		break;
1403
1404	case STATUSTYPE_TABLE:
1405		list_for_each_entry(pg, &m->priority_groups, list) {
1406			DMEMIT("%s ", pg->ps.type->name);
1407
1408			if (pg->ps.type->status)
1409				sz += pg->ps.type->status(&pg->ps, NULL, type,
1410							  result + sz,
1411							  maxlen - sz);
1412			else
1413				DMEMIT("0 ");
1414
1415			DMEMIT("%u %u ", pg->nr_pgpaths,
1416			       pg->ps.type->table_args);
1417
1418			list_for_each_entry(p, &pg->pgpaths, list) {
1419				DMEMIT("%s ", p->path.dev->name);
1420				if (pg->ps.type->status)
1421					sz += pg->ps.type->status(&pg->ps,
1422					      &p->path, type, result + sz,
1423					      maxlen - sz);
1424			}
1425		}
1426		break;
1427	}
1428
1429	spin_unlock_irqrestore(&m->lock, flags);
1430
1431	return 0;
1432}
1433
1434static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1435{
1436	int r = -EINVAL;
1437	struct dm_dev *dev;
1438	struct multipath *m = (struct multipath *) ti->private;
1439	action_fn action;
1440
1441	mutex_lock(&m->work_mutex);
1442
1443	if (dm_suspended(ti)) {
1444		r = -EBUSY;
1445		goto out;
1446	}
1447
1448	if (argc == 1) {
1449		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1450			r = queue_if_no_path(m, 1, 0);
1451			goto out;
1452		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1453			r = queue_if_no_path(m, 0, 0);
1454			goto out;
1455		}
1456	}
1457
1458	if (argc != 2) {
1459		DMWARN("Unrecognised multipath message received.");
1460		goto out;
1461	}
1462
1463	if (!strcasecmp(argv[0], "disable_group")) {
1464		r = bypass_pg_num(m, argv[1], 1);
1465		goto out;
1466	} else if (!strcasecmp(argv[0], "enable_group")) {
1467		r = bypass_pg_num(m, argv[1], 0);
1468		goto out;
1469	} else if (!strcasecmp(argv[0], "switch_group")) {
1470		r = switch_pg_num(m, argv[1]);
1471		goto out;
1472	} else if (!strcasecmp(argv[0], "reinstate_path"))
1473		action = reinstate_path;
1474	else if (!strcasecmp(argv[0], "fail_path"))
1475		action = fail_path;
1476	else {
1477		DMWARN("Unrecognised multipath message received.");
1478		goto out;
1479	}
1480
1481	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1482	if (r) {
1483		DMWARN("message: error getting device %s",
1484		       argv[1]);
1485		goto out;
1486	}
1487
1488	r = action_dev(m, dev, action);
1489
1490	dm_put_device(ti, dev);
1491
1492out:
1493	mutex_unlock(&m->work_mutex);
1494	return r;
1495}
1496
1497static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1498			   unsigned long arg)
1499{
1500	struct multipath *m = (struct multipath *) ti->private;
1501	struct block_device *bdev = NULL;
1502	fmode_t mode = 0;
1503	unsigned long flags;
1504	int r = 0;
1505
1506	spin_lock_irqsave(&m->lock, flags);
1507
1508	if (!m->current_pgpath)
1509		__choose_pgpath(m, 0);
1510
1511	if (m->current_pgpath) {
1512		bdev = m->current_pgpath->path.dev->bdev;
1513		mode = m->current_pgpath->path.dev->mode;
 
 
 
 
 
 
 
 
 
 
 
 
1514	}
1515
1516	if (m->queue_io)
1517		r = -EAGAIN;
1518	else if (!bdev)
1519		r = -EIO;
1520
1521	spin_unlock_irqrestore(&m->lock, flags);
1522
1523	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1524}
1525
1526static int multipath_iterate_devices(struct dm_target *ti,
1527				     iterate_devices_callout_fn fn, void *data)
1528{
1529	struct multipath *m = ti->private;
1530	struct priority_group *pg;
1531	struct pgpath *p;
1532	int ret = 0;
1533
1534	list_for_each_entry(pg, &m->priority_groups, list) {
1535		list_for_each_entry(p, &pg->pgpaths, list) {
1536			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1537			if (ret)
1538				goto out;
1539		}
1540	}
1541
1542out:
1543	return ret;
1544}
1545
1546static int __pgpath_busy(struct pgpath *pgpath)
1547{
1548	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1549
1550	return dm_underlying_device_busy(q);
1551}
1552
1553/*
1554 * We return "busy", only when we can map I/Os but underlying devices
1555 * are busy (so even if we map I/Os now, the I/Os will wait on
1556 * the underlying queue).
1557 * In other words, if we want to kill I/Os or queue them inside us
1558 * due to map unavailability, we don't return "busy".  Otherwise,
1559 * dm core won't give us the I/Os and we can't do what we want.
1560 */
1561static int multipath_busy(struct dm_target *ti)
1562{
1563	int busy = 0, has_active = 0;
1564	struct multipath *m = ti->private;
1565	struct priority_group *pg;
1566	struct pgpath *pgpath;
1567	unsigned long flags;
1568
1569	spin_lock_irqsave(&m->lock, flags);
1570
 
 
 
 
 
 
1571	/* Guess which priority_group will be used at next mapping time */
1572	if (unlikely(!m->current_pgpath && m->next_pg))
1573		pg = m->next_pg;
1574	else if (likely(m->current_pg))
1575		pg = m->current_pg;
1576	else
1577		/*
1578		 * We don't know which pg will be used at next mapping time.
1579		 * We don't call __choose_pgpath() here to avoid to trigger
1580		 * pg_init just by busy checking.
1581		 * So we don't know whether underlying devices we will be using
1582		 * at next mapping time are busy or not. Just try mapping.
1583		 */
1584		goto out;
1585
1586	/*
1587	 * If there is one non-busy active path at least, the path selector
1588	 * will be able to select it. So we consider such a pg as not busy.
1589	 */
1590	busy = 1;
1591	list_for_each_entry(pgpath, &pg->pgpaths, list)
1592		if (pgpath->is_active) {
1593			has_active = 1;
1594
1595			if (!__pgpath_busy(pgpath)) {
1596				busy = 0;
1597				break;
1598			}
1599		}
1600
1601	if (!has_active)
1602		/*
1603		 * No active path in this pg, so this pg won't be used and
1604		 * the current_pg will be changed at next mapping time.
1605		 * We need to try mapping to determine it.
1606		 */
1607		busy = 0;
1608
1609out:
1610	spin_unlock_irqrestore(&m->lock, flags);
1611
1612	return busy;
1613}
1614
1615/*-----------------------------------------------------------------
1616 * Module setup
1617 *---------------------------------------------------------------*/
1618static struct target_type multipath_target = {
1619	.name = "multipath",
1620	.version = {1, 3, 0},
 
1621	.module = THIS_MODULE,
1622	.ctr = multipath_ctr,
1623	.dtr = multipath_dtr,
1624	.map_rq = multipath_map,
 
 
1625	.rq_end_io = multipath_end_io,
1626	.presuspend = multipath_presuspend,
1627	.postsuspend = multipath_postsuspend,
1628	.resume = multipath_resume,
1629	.status = multipath_status,
1630	.message = multipath_message,
1631	.ioctl  = multipath_ioctl,
1632	.iterate_devices = multipath_iterate_devices,
1633	.busy = multipath_busy,
1634};
1635
1636static int __init dm_multipath_init(void)
1637{
1638	int r;
1639
1640	/* allocate a slab for the dm_ios */
1641	_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1642	if (!_mpio_cache)
1643		return -ENOMEM;
1644
1645	r = dm_register_target(&multipath_target);
1646	if (r < 0) {
1647		DMERR("register failed %d", r);
1648		kmem_cache_destroy(_mpio_cache);
1649		return -EINVAL;
1650	}
1651
1652	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1653	if (!kmultipathd) {
1654		DMERR("failed to create workqueue kmpathd");
1655		dm_unregister_target(&multipath_target);
1656		kmem_cache_destroy(_mpio_cache);
1657		return -ENOMEM;
1658	}
1659
1660	/*
1661	 * A separate workqueue is used to handle the device handlers
1662	 * to avoid overloading existing workqueue. Overloading the
1663	 * old workqueue would also create a bottleneck in the
1664	 * path of the storage hardware device activation.
1665	 */
1666	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1667						  WQ_MEM_RECLAIM);
1668	if (!kmpath_handlerd) {
1669		DMERR("failed to create workqueue kmpath_handlerd");
1670		destroy_workqueue(kmultipathd);
1671		dm_unregister_target(&multipath_target);
1672		kmem_cache_destroy(_mpio_cache);
1673		return -ENOMEM;
1674	}
1675
1676	DMINFO("version %u.%u.%u loaded",
1677	       multipath_target.version[0], multipath_target.version[1],
1678	       multipath_target.version[2]);
 
 
 
 
 
 
 
 
 
1679
1680	return r;
1681}
1682
1683static void __exit dm_multipath_exit(void)
1684{
1685	destroy_workqueue(kmpath_handlerd);
1686	destroy_workqueue(kmultipathd);
1687
1688	dm_unregister_target(&multipath_target);
1689	kmem_cache_destroy(_mpio_cache);
1690}
1691
1692module_init(dm_multipath_init);
1693module_exit(dm_multipath_exit);
1694
1695MODULE_DESCRIPTION(DM_NAME " multipath target");
1696MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1697MODULE_LICENSE("GPL");
v4.6
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9
  10#include "dm.h"
  11#include "dm-path-selector.h"
  12#include "dm-uevent.h"
  13
  14#include <linux/blkdev.h>
  15#include <linux/ctype.h>
  16#include <linux/init.h>
  17#include <linux/mempool.h>
  18#include <linux/module.h>
  19#include <linux/pagemap.h>
  20#include <linux/slab.h>
  21#include <linux/time.h>
  22#include <linux/workqueue.h>
  23#include <linux/delay.h>
  24#include <scsi/scsi_dh.h>
  25#include <linux/atomic.h>
  26#include <linux/blk-mq.h>
  27
  28#define DM_MSG_PREFIX "multipath"
  29#define DM_PG_INIT_DELAY_MSECS 2000
  30#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  31
  32/* Path properties */
  33struct pgpath {
  34	struct list_head list;
  35
  36	struct priority_group *pg;	/* Owning PG */
 
  37	unsigned fail_count;		/* Cumulative failure count */
  38
  39	struct dm_path path;
  40	struct delayed_work activate_path;
  41
  42	bool is_active:1;		/* Path status */
  43};
  44
  45#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  46
  47/*
  48 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  49 * Each has a path selector which controls which path gets used.
  50 */
  51struct priority_group {
  52	struct list_head list;
  53
  54	struct multipath *m;		/* Owning multipath instance */
  55	struct path_selector ps;
  56
  57	unsigned pg_num;		/* Reference number */
 
 
  58	unsigned nr_pgpaths;		/* Number of paths in PG */
  59	struct list_head pgpaths;
  60
  61	bool bypassed:1;		/* Temporarily bypass this PG? */
  62};
  63
  64/* Multipath context */
  65struct multipath {
  66	struct list_head list;
  67	struct dm_target *ti;
  68
 
 
  69	const char *hw_handler_name;
  70	char *hw_handler_params;
  71
  72	spinlock_t lock;
  73
  74	unsigned nr_priority_groups;
  75	struct list_head priority_groups;
  76
  77	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  78
 
  79	unsigned pg_init_in_progress;	/* Only one pg_init allowed at once */
 
  80
  81	unsigned nr_valid_paths;	/* Total number of usable paths */
  82	struct pgpath *current_pgpath;
  83	struct priority_group *current_pg;
  84	struct priority_group *next_pg;	/* Switch to this PG if set */
 
  85
  86	bool queue_io:1;		/* Must we queue all I/O? */
  87	bool queue_if_no_path:1;	/* Queue I/O if last path fails? */
  88	bool saved_queue_if_no_path:1;	/* Saved state during suspension */
  89	bool retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
  90	bool pg_init_disabled:1;	/* pg_init is not currently allowed */
  91	bool pg_init_required:1;	/* pg_init needs calling? */
  92	bool pg_init_delay_retry:1;	/* Delay pg_init retry? */
  93
  94	unsigned pg_init_retries;	/* Number of times to retry pg_init */
  95	unsigned pg_init_count;		/* Number of times pg_init called */
  96	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  97
 
 
 
 
  98	struct work_struct trigger_event;
  99
 100	/*
 101	 * We must use a mempool of dm_mpath_io structs so that we
 102	 * can resubmit bios on error.
 103	 */
 104	mempool_t *mpio_pool;
 105
 106	struct mutex work_mutex;
 107};
 108
 109/*
 110 * Context information attached to each bio we process.
 111 */
 112struct dm_mpath_io {
 113	struct pgpath *pgpath;
 114	size_t nr_bytes;
 115};
 116
 117typedef int (*action_fn) (struct pgpath *pgpath);
 118
 
 
 119static struct kmem_cache *_mpio_cache;
 120
 121static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 
 122static void trigger_event(struct work_struct *work);
 123static void activate_path(struct work_struct *work);
 124
 125
 126/*-----------------------------------------------
 127 * Allocation routines
 128 *-----------------------------------------------*/
 129
 130static struct pgpath *alloc_pgpath(void)
 131{
 132	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 133
 134	if (pgpath) {
 135		pgpath->is_active = true;
 136		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
 137	}
 138
 139	return pgpath;
 140}
 141
 142static void free_pgpath(struct pgpath *pgpath)
 143{
 144	kfree(pgpath);
 145}
 146
 147static struct priority_group *alloc_priority_group(void)
 148{
 149	struct priority_group *pg;
 150
 151	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 152
 153	if (pg)
 154		INIT_LIST_HEAD(&pg->pgpaths);
 155
 156	return pg;
 157}
 158
 159static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 160{
 161	struct pgpath *pgpath, *tmp;
 
 162
 163	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 164		list_del(&pgpath->list);
 
 
 165		dm_put_device(ti, pgpath->path.dev);
 166		free_pgpath(pgpath);
 167	}
 168}
 169
 170static void free_priority_group(struct priority_group *pg,
 171				struct dm_target *ti)
 172{
 173	struct path_selector *ps = &pg->ps;
 174
 175	if (ps->type) {
 176		ps->type->destroy(ps);
 177		dm_put_path_selector(ps->type);
 178	}
 179
 180	free_pgpaths(&pg->pgpaths, ti);
 181	kfree(pg);
 182}
 183
 184static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
 185{
 186	struct multipath *m;
 187
 188	m = kzalloc(sizeof(*m), GFP_KERNEL);
 189	if (m) {
 190		INIT_LIST_HEAD(&m->priority_groups);
 
 191		spin_lock_init(&m->lock);
 192		m->queue_io = true;
 193		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 
 194		INIT_WORK(&m->trigger_event, trigger_event);
 195		init_waitqueue_head(&m->pg_init_wait);
 196		mutex_init(&m->work_mutex);
 197
 198		m->mpio_pool = NULL;
 199		if (!use_blk_mq) {
 200			unsigned min_ios = dm_get_reserved_rq_based_ios();
 201
 202			m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
 203			if (!m->mpio_pool) {
 204				kfree(m);
 205				return NULL;
 206			}
 207		}
 208
 209		m->ti = ti;
 210		ti->private = m;
 211	}
 212
 213	return m;
 214}
 215
 216static void free_multipath(struct multipath *m)
 217{
 218	struct priority_group *pg, *tmp;
 219
 220	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 221		list_del(&pg->list);
 222		free_priority_group(pg, m->ti);
 223	}
 224
 225	kfree(m->hw_handler_name);
 226	kfree(m->hw_handler_params);
 227	mempool_destroy(m->mpio_pool);
 228	kfree(m);
 229}
 230
 231static struct dm_mpath_io *get_mpio(union map_info *info)
 232{
 233	return info->ptr;
 234}
 235
 236static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
 237{
 238	struct dm_mpath_io *mpio;
 239
 240	if (!m->mpio_pool) {
 241		/* Use blk-mq pdu memory requested via per_io_data_size */
 242		mpio = get_mpio(info);
 243		memset(mpio, 0, sizeof(*mpio));
 244		return mpio;
 245	}
 246
 247	mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
 248	if (!mpio)
 249		return NULL;
 250
 251	memset(mpio, 0, sizeof(*mpio));
 252	info->ptr = mpio;
 253
 254	return mpio;
 255}
 256
 257static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
 258{
 259	/* Only needed for non blk-mq (.request_fn) multipath */
 260	if (m->mpio_pool) {
 261		struct dm_mpath_io *mpio = info->ptr;
 262
 263		info->ptr = NULL;
 264		mempool_free(mpio, m->mpio_pool);
 265	}
 266}
 267
 268/*-----------------------------------------------
 269 * Path selection
 270 *-----------------------------------------------*/
 271
 272static int __pg_init_all_paths(struct multipath *m)
 273{
 274	struct pgpath *pgpath;
 275	unsigned long pg_init_delay = 0;
 276
 277	if (m->pg_init_in_progress || m->pg_init_disabled)
 278		return 0;
 279
 280	m->pg_init_count++;
 281	m->pg_init_required = false;
 282
 283	/* Check here to reset pg_init_required */
 284	if (!m->current_pg)
 285		return 0;
 286
 287	if (m->pg_init_delay_retry)
 288		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 289						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 290	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 291		/* Skip failed paths */
 292		if (!pgpath->is_active)
 293			continue;
 294		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 295				       pg_init_delay))
 296			m->pg_init_in_progress++;
 297	}
 298	return m->pg_init_in_progress;
 299}
 300
 301static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
 302{
 303	m->current_pg = pgpath->pg;
 304
 305	/* Must we initialise the PG first, and queue I/O till it's ready? */
 306	if (m->hw_handler_name) {
 307		m->pg_init_required = true;
 308		m->queue_io = true;
 309	} else {
 310		m->pg_init_required = false;
 311		m->queue_io = false;
 312	}
 313
 314	m->pg_init_count = 0;
 315}
 316
 317static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
 318			       size_t nr_bytes)
 319{
 320	struct dm_path *path;
 321
 322	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 323	if (!path)
 324		return -ENXIO;
 325
 326	m->current_pgpath = path_to_pgpath(path);
 327
 328	if (m->current_pg != pg)
 329		__switch_pg(m, m->current_pgpath);
 330
 331	return 0;
 332}
 333
 334static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
 335{
 336	struct priority_group *pg;
 337	bool bypassed = true;
 338
 339	if (!m->nr_valid_paths) {
 340		m->queue_io = false;
 341		goto failed;
 342	}
 343
 344	/* Were we instructed to switch PG? */
 345	if (m->next_pg) {
 346		pg = m->next_pg;
 347		m->next_pg = NULL;
 348		if (!__choose_path_in_pg(m, pg, nr_bytes))
 349			return;
 350	}
 351
 352	/* Don't change PG until it has no remaining paths */
 353	if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
 354		return;
 355
 356	/*
 357	 * Loop through priority groups until we find a valid path.
 358	 * First time we skip PGs marked 'bypassed'.
 359	 * Second time we only try the ones we skipped, but set
 360	 * pg_init_delay_retry so we do not hammer controllers.
 361	 */
 362	do {
 363		list_for_each_entry(pg, &m->priority_groups, list) {
 364			if (pg->bypassed == bypassed)
 365				continue;
 366			if (!__choose_path_in_pg(m, pg, nr_bytes)) {
 367				if (!bypassed)
 368					m->pg_init_delay_retry = true;
 369				return;
 370			}
 371		}
 372	} while (bypassed--);
 373
 374failed:
 375	m->current_pgpath = NULL;
 376	m->current_pg = NULL;
 377}
 378
 379/*
 380 * Check whether bios must be queued in the device-mapper core rather
 381 * than here in the target.
 382 *
 383 * m->lock must be held on entry.
 384 *
 385 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
 386 * same value then we are not between multipath_presuspend()
 387 * and multipath_resume() calls and we have no need to check
 388 * for the DMF_NOFLUSH_SUSPENDING flag.
 389 */
 390static int __must_push_back(struct multipath *m)
 391{
 392	return (m->queue_if_no_path ||
 393		(m->queue_if_no_path != m->saved_queue_if_no_path &&
 394		 dm_noflush_suspending(m->ti)));
 395}
 396
 397/*
 398 * Map cloned requests
 399 */
 400static int __multipath_map(struct dm_target *ti, struct request *clone,
 401			   union map_info *map_context,
 402			   struct request *rq, struct request **__clone)
 403{
 404	struct multipath *m = ti->private;
 405	int r = DM_MAPIO_REQUEUE;
 406	size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
 407	struct pgpath *pgpath;
 408	struct block_device *bdev;
 409	struct dm_mpath_io *mpio;
 410
 411	spin_lock_irq(&m->lock);
 412
 413	/* Do we need to select a new pgpath? */
 414	if (!m->current_pgpath || !m->queue_io)
 
 415		__choose_pgpath(m, nr_bytes);
 416
 417	pgpath = m->current_pgpath;
 418
 419	if (!pgpath) {
 420		if (!__must_push_back(m))
 421			r = -EIO;	/* Failed */
 422		goto out_unlock;
 423	} else if (m->queue_io || m->pg_init_required) {
 424		__pg_init_all_paths(m);
 425		goto out_unlock;
 426	}
 427
 428	mpio = set_mpio(m, map_context);
 429	if (!mpio)
 430		/* ENOMEM, requeue */
 431		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 432
 433	mpio->pgpath = pgpath;
 434	mpio->nr_bytes = nr_bytes;
 435
 436	bdev = pgpath->path.dev->bdev;
 437
 438	spin_unlock_irq(&m->lock);
 439
 440	if (clone) {
 441		/*
 442		 * Old request-based interface: allocated clone is passed in.
 443		 * Used by: .request_fn stacked on .request_fn path(s).
 444		 */
 445		clone->q = bdev_get_queue(bdev);
 446		clone->rq_disk = bdev->bd_disk;
 447		clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 448	} else {
 449		/*
 450		 * blk-mq request-based interface; used by both:
 451		 * .request_fn stacked on blk-mq path(s) and
 452		 * blk-mq stacked on blk-mq path(s).
 453		 */
 454		*__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
 455						rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
 456		if (IS_ERR(*__clone)) {
 457			/* ENOMEM, requeue */
 458			clear_request_fn_mpio(m, map_context);
 459			return r;
 460		}
 461		(*__clone)->bio = (*__clone)->biotail = NULL;
 462		(*__clone)->rq_disk = bdev->bd_disk;
 463		(*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 464	}
 465
 466	if (pgpath->pg->ps.type->start_io)
 467		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 468					      &pgpath->path,
 469					      nr_bytes);
 470	return DM_MAPIO_REMAPPED;
 471
 472out_unlock:
 473	spin_unlock_irq(&m->lock);
 474
 475	return r;
 476}
 477
 478static int multipath_map(struct dm_target *ti, struct request *clone,
 479			 union map_info *map_context)
 480{
 481	return __multipath_map(ti, clone, map_context, NULL, NULL);
 482}
 483
 484static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 485				   union map_info *map_context,
 486				   struct request **clone)
 487{
 488	return __multipath_map(ti, NULL, map_context, rq, clone);
 489}
 490
 491static void multipath_release_clone(struct request *clone)
 492{
 493	blk_mq_free_request(clone);
 494}
 495
 496/*
 497 * If we run out of usable paths, should we queue I/O or error it?
 498 */
 499static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 500			    bool save_old_value)
 501{
 502	unsigned long flags;
 503
 504	spin_lock_irqsave(&m->lock, flags);
 505
 506	if (save_old_value)
 507		m->saved_queue_if_no_path = m->queue_if_no_path;
 508	else
 509		m->saved_queue_if_no_path = queue_if_no_path;
 510	m->queue_if_no_path = queue_if_no_path;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511	spin_unlock_irqrestore(&m->lock, flags);
 512
 513	if (!queue_if_no_path)
 514		dm_table_run_md_queue_async(m->ti->table);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 515
 516	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517}
 518
 519/*
 520 * An event is triggered whenever a path is taken out of use.
 521 * Includes path failure and PG bypass.
 522 */
 523static void trigger_event(struct work_struct *work)
 524{
 525	struct multipath *m =
 526		container_of(work, struct multipath, trigger_event);
 527
 528	dm_table_event(m->ti->table);
 529}
 530
 531/*-----------------------------------------------------------------
 532 * Constructor/argument parsing:
 533 * <#multipath feature args> [<arg>]*
 534 * <#hw_handler args> [hw_handler [<arg>]*]
 535 * <#priority groups>
 536 * <initial priority group>
 537 *     [<selector> <#selector args> [<arg>]*
 538 *      <#paths> <#per-path selector args>
 539 *         [<path> [<arg>]* ]+ ]+
 540 *---------------------------------------------------------------*/
 541static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 542			       struct dm_target *ti)
 543{
 544	int r;
 545	struct path_selector_type *pst;
 546	unsigned ps_argc;
 547
 548	static struct dm_arg _args[] = {
 549		{0, 1024, "invalid number of path selector args"},
 550	};
 551
 552	pst = dm_get_path_selector(dm_shift_arg(as));
 553	if (!pst) {
 554		ti->error = "unknown path selector type";
 555		return -EINVAL;
 556	}
 557
 558	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 559	if (r) {
 560		dm_put_path_selector(pst);
 561		return -EINVAL;
 562	}
 563
 564	r = pst->create(&pg->ps, ps_argc, as->argv);
 565	if (r) {
 566		dm_put_path_selector(pst);
 567		ti->error = "path selector constructor failed";
 568		return r;
 569	}
 570
 571	pg->ps.type = pst;
 572	dm_consume_args(as, ps_argc);
 573
 574	return 0;
 575}
 576
 577static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 578			       struct dm_target *ti)
 579{
 580	int r;
 581	struct pgpath *p;
 582	struct multipath *m = ti->private;
 583	struct request_queue *q = NULL;
 584	const char *attached_handler_name;
 585
 586	/* we need at least a path arg */
 587	if (as->argc < 1) {
 588		ti->error = "no device given";
 589		return ERR_PTR(-EINVAL);
 590	}
 591
 592	p = alloc_pgpath();
 593	if (!p)
 594		return ERR_PTR(-ENOMEM);
 595
 596	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 597			  &p->path.dev);
 598	if (r) {
 599		ti->error = "error getting device";
 600		goto bad;
 601	}
 602
 603	if (m->retain_attached_hw_handler || m->hw_handler_name)
 604		q = bdev_get_queue(p->path.dev->bdev);
 605
 606	if (m->retain_attached_hw_handler) {
 607retain:
 608		attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 609		if (attached_handler_name) {
 610			/*
 611			 * Reset hw_handler_name to match the attached handler
 612			 * and clear any hw_handler_params associated with the
 613			 * ignored handler.
 614			 *
 615			 * NB. This modifies the table line to show the actual
 616			 * handler instead of the original table passed in.
 617			 */
 618			kfree(m->hw_handler_name);
 619			m->hw_handler_name = attached_handler_name;
 620
 621			kfree(m->hw_handler_params);
 622			m->hw_handler_params = NULL;
 623		}
 624	}
 625
 626	if (m->hw_handler_name) {
 627		r = scsi_dh_attach(q, m->hw_handler_name);
 628		if (r == -EBUSY) {
 629			char b[BDEVNAME_SIZE];
 630
 631			printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
 632				bdevname(p->path.dev->bdev, b));
 633			goto retain;
 634		}
 635		if (r < 0) {
 636			ti->error = "error attaching hardware handler";
 637			dm_put_device(ti, p->path.dev);
 638			goto bad;
 639		}
 640
 641		if (m->hw_handler_params) {
 642			r = scsi_dh_set_params(q, m->hw_handler_params);
 643			if (r < 0) {
 644				ti->error = "unable to set hardware "
 645							"handler parameters";
 
 646				dm_put_device(ti, p->path.dev);
 647				goto bad;
 648			}
 649		}
 650	}
 651
 652	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 653	if (r) {
 654		dm_put_device(ti, p->path.dev);
 655		goto bad;
 656	}
 657
 658	return p;
 659
 660 bad:
 661	free_pgpath(p);
 662	return ERR_PTR(r);
 663}
 664
 665static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 666						   struct multipath *m)
 667{
 668	static struct dm_arg _args[] = {
 669		{1, 1024, "invalid number of paths"},
 670		{0, 1024, "invalid number of selector args"}
 671	};
 672
 673	int r;
 674	unsigned i, nr_selector_args, nr_args;
 675	struct priority_group *pg;
 676	struct dm_target *ti = m->ti;
 677
 678	if (as->argc < 2) {
 679		as->argc = 0;
 680		ti->error = "not enough priority group arguments";
 681		return ERR_PTR(-EINVAL);
 682	}
 683
 684	pg = alloc_priority_group();
 685	if (!pg) {
 686		ti->error = "couldn't allocate priority group";
 687		return ERR_PTR(-ENOMEM);
 688	}
 689	pg->m = m;
 690
 691	r = parse_path_selector(as, pg, ti);
 692	if (r)
 693		goto bad;
 694
 695	/*
 696	 * read the paths
 697	 */
 698	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
 699	if (r)
 700		goto bad;
 701
 702	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
 703	if (r)
 704		goto bad;
 705
 706	nr_args = 1 + nr_selector_args;
 707	for (i = 0; i < pg->nr_pgpaths; i++) {
 708		struct pgpath *pgpath;
 709		struct dm_arg_set path_args;
 710
 711		if (as->argc < nr_args) {
 712			ti->error = "not enough path parameters";
 713			r = -EINVAL;
 714			goto bad;
 715		}
 716
 717		path_args.argc = nr_args;
 718		path_args.argv = as->argv;
 719
 720		pgpath = parse_path(&path_args, &pg->ps, ti);
 721		if (IS_ERR(pgpath)) {
 722			r = PTR_ERR(pgpath);
 723			goto bad;
 724		}
 725
 726		pgpath->pg = pg;
 727		list_add_tail(&pgpath->list, &pg->pgpaths);
 728		dm_consume_args(as, nr_args);
 729	}
 730
 731	return pg;
 732
 733 bad:
 734	free_priority_group(pg, ti);
 735	return ERR_PTR(r);
 736}
 737
 738static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 739{
 740	unsigned hw_argc;
 741	int ret;
 742	struct dm_target *ti = m->ti;
 743
 744	static struct dm_arg _args[] = {
 745		{0, 1024, "invalid number of hardware handler args"},
 746	};
 747
 748	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
 749		return -EINVAL;
 750
 751	if (!hw_argc)
 752		return 0;
 753
 754	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
 
 
 
 
 
 
 755
 756	if (hw_argc > 1) {
 757		char *p;
 758		int i, j, len = 4;
 759
 760		for (i = 0; i <= hw_argc - 2; i++)
 761			len += strlen(as->argv[i]) + 1;
 762		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
 763		if (!p) {
 764			ti->error = "memory allocation failed";
 765			ret = -ENOMEM;
 766			goto fail;
 767		}
 768		j = sprintf(p, "%d", hw_argc - 1);
 769		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
 770			j = sprintf(p, "%s", as->argv[i]);
 771	}
 772	dm_consume_args(as, hw_argc - 1);
 773
 774	return 0;
 775fail:
 776	kfree(m->hw_handler_name);
 777	m->hw_handler_name = NULL;
 778	return ret;
 779}
 780
 781static int parse_features(struct dm_arg_set *as, struct multipath *m)
 782{
 783	int r;
 784	unsigned argc;
 785	struct dm_target *ti = m->ti;
 786	const char *arg_name;
 787
 788	static struct dm_arg _args[] = {
 789		{0, 6, "invalid number of feature args"},
 790		{1, 50, "pg_init_retries must be between 1 and 50"},
 791		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
 792	};
 793
 794	r = dm_read_arg_group(_args, as, &argc, &ti->error);
 795	if (r)
 796		return -EINVAL;
 797
 798	if (!argc)
 799		return 0;
 800
 801	do {
 802		arg_name = dm_shift_arg(as);
 803		argc--;
 804
 805		if (!strcasecmp(arg_name, "queue_if_no_path")) {
 806			r = queue_if_no_path(m, true, false);
 807			continue;
 808		}
 809
 810		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
 811			m->retain_attached_hw_handler = true;
 812			continue;
 813		}
 814
 815		if (!strcasecmp(arg_name, "pg_init_retries") &&
 816		    (argc >= 1)) {
 817			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
 818			argc--;
 819			continue;
 820		}
 821
 822		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
 823		    (argc >= 1)) {
 824			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
 825			argc--;
 826			continue;
 827		}
 828
 829		ti->error = "Unrecognised multipath feature request";
 830		r = -EINVAL;
 831	} while (argc && !r);
 832
 833	return r;
 834}
 835
 836static int multipath_ctr(struct dm_target *ti, unsigned int argc,
 837			 char **argv)
 838{
 839	/* target arguments */
 840	static struct dm_arg _args[] = {
 841		{0, 1024, "invalid number of priority groups"},
 842		{0, 1024, "invalid initial priority group number"},
 843	};
 844
 845	int r;
 846	struct multipath *m;
 847	struct dm_arg_set as;
 848	unsigned pg_count = 0;
 849	unsigned next_pg_num;
 850	bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
 851
 852	as.argc = argc;
 853	as.argv = argv;
 854
 855	m = alloc_multipath(ti, use_blk_mq);
 856	if (!m) {
 857		ti->error = "can't allocate multipath";
 858		return -EINVAL;
 859	}
 860
 861	r = parse_features(&as, m);
 862	if (r)
 863		goto bad;
 864
 865	r = parse_hw_handler(&as, m);
 866	if (r)
 867		goto bad;
 868
 869	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
 870	if (r)
 871		goto bad;
 872
 873	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
 874	if (r)
 875		goto bad;
 876
 877	if ((!m->nr_priority_groups && next_pg_num) ||
 878	    (m->nr_priority_groups && !next_pg_num)) {
 879		ti->error = "invalid initial priority group";
 880		r = -EINVAL;
 881		goto bad;
 882	}
 883
 884	/* parse the priority groups */
 885	while (as.argc) {
 886		struct priority_group *pg;
 887
 888		pg = parse_priority_group(&as, m);
 889		if (IS_ERR(pg)) {
 890			r = PTR_ERR(pg);
 891			goto bad;
 892		}
 893
 894		m->nr_valid_paths += pg->nr_pgpaths;
 895		list_add_tail(&pg->list, &m->priority_groups);
 896		pg_count++;
 897		pg->pg_num = pg_count;
 898		if (!--next_pg_num)
 899			m->next_pg = pg;
 900	}
 901
 902	if (pg_count != m->nr_priority_groups) {
 903		ti->error = "priority group count mismatch";
 904		r = -EINVAL;
 905		goto bad;
 906	}
 907
 908	ti->num_flush_bios = 1;
 909	ti->num_discard_bios = 1;
 910	ti->num_write_same_bios = 1;
 911	if (use_blk_mq)
 912		ti->per_io_data_size = sizeof(struct dm_mpath_io);
 913
 914	return 0;
 915
 916 bad:
 917	free_multipath(m);
 918	return r;
 919}
 920
 921static void multipath_wait_for_pg_init_completion(struct multipath *m)
 922{
 923	DECLARE_WAITQUEUE(wait, current);
 924	unsigned long flags;
 925
 926	add_wait_queue(&m->pg_init_wait, &wait);
 927
 928	while (1) {
 929		set_current_state(TASK_UNINTERRUPTIBLE);
 930
 931		spin_lock_irqsave(&m->lock, flags);
 932		if (!m->pg_init_in_progress) {
 933			spin_unlock_irqrestore(&m->lock, flags);
 934			break;
 935		}
 936		spin_unlock_irqrestore(&m->lock, flags);
 937
 938		io_schedule();
 939	}
 940	set_current_state(TASK_RUNNING);
 941
 942	remove_wait_queue(&m->pg_init_wait, &wait);
 943}
 944
 945static void flush_multipath_work(struct multipath *m)
 946{
 947	unsigned long flags;
 948
 949	spin_lock_irqsave(&m->lock, flags);
 950	m->pg_init_disabled = true;
 951	spin_unlock_irqrestore(&m->lock, flags);
 952
 953	flush_workqueue(kmpath_handlerd);
 954	multipath_wait_for_pg_init_completion(m);
 955	flush_workqueue(kmultipathd);
 956	flush_work(&m->trigger_event);
 957
 958	spin_lock_irqsave(&m->lock, flags);
 959	m->pg_init_disabled = false;
 960	spin_unlock_irqrestore(&m->lock, flags);
 961}
 962
 963static void multipath_dtr(struct dm_target *ti)
 964{
 965	struct multipath *m = ti->private;
 966
 967	flush_multipath_work(m);
 968	free_multipath(m);
 969}
 970
 971/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972 * Take a path out of use.
 973 */
 974static int fail_path(struct pgpath *pgpath)
 975{
 976	unsigned long flags;
 977	struct multipath *m = pgpath->pg->m;
 978
 979	spin_lock_irqsave(&m->lock, flags);
 980
 981	if (!pgpath->is_active)
 982		goto out;
 983
 984	DMWARN("Failing path %s.", pgpath->path.dev->name);
 985
 986	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
 987	pgpath->is_active = false;
 988	pgpath->fail_count++;
 989
 990	m->nr_valid_paths--;
 991
 992	if (pgpath == m->current_pgpath)
 993		m->current_pgpath = NULL;
 994
 995	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
 996		      pgpath->path.dev->name, m->nr_valid_paths);
 997
 998	schedule_work(&m->trigger_event);
 999
1000out:
1001	spin_unlock_irqrestore(&m->lock, flags);
1002
1003	return 0;
1004}
1005
1006/*
1007 * Reinstate a previously-failed path
1008 */
1009static int reinstate_path(struct pgpath *pgpath)
1010{
1011	int r = 0, run_queue = 0;
1012	unsigned long flags;
1013	struct multipath *m = pgpath->pg->m;
1014
1015	spin_lock_irqsave(&m->lock, flags);
1016
1017	if (pgpath->is_active)
1018		goto out;
1019
1020	DMWARN("Reinstating path %s.", pgpath->path.dev->name);
 
 
 
 
 
1021
1022	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1023	if (r)
1024		goto out;
1025
1026	pgpath->is_active = true;
1027
1028	if (!m->nr_valid_paths++) {
1029		m->current_pgpath = NULL;
1030		run_queue = 1;
1031	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1032		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1033			m->pg_init_in_progress++;
1034	}
1035
1036	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1037		      pgpath->path.dev->name, m->nr_valid_paths);
1038
1039	schedule_work(&m->trigger_event);
1040
1041out:
1042	spin_unlock_irqrestore(&m->lock, flags);
1043	if (run_queue)
1044		dm_table_run_md_queue_async(m->ti->table);
1045
1046	return r;
1047}
1048
1049/*
1050 * Fail or reinstate all paths that match the provided struct dm_dev.
1051 */
1052static int action_dev(struct multipath *m, struct dm_dev *dev,
1053		      action_fn action)
1054{
1055	int r = -EINVAL;
1056	struct pgpath *pgpath;
1057	struct priority_group *pg;
1058
1059	list_for_each_entry(pg, &m->priority_groups, list) {
1060		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1061			if (pgpath->path.dev == dev)
1062				r = action(pgpath);
1063		}
1064	}
1065
1066	return r;
1067}
1068
1069/*
1070 * Temporarily try to avoid having to use the specified PG
1071 */
1072static void bypass_pg(struct multipath *m, struct priority_group *pg,
1073		      bool bypassed)
1074{
1075	unsigned long flags;
1076
1077	spin_lock_irqsave(&m->lock, flags);
1078
1079	pg->bypassed = bypassed;
1080	m->current_pgpath = NULL;
1081	m->current_pg = NULL;
1082
1083	spin_unlock_irqrestore(&m->lock, flags);
1084
1085	schedule_work(&m->trigger_event);
1086}
1087
1088/*
1089 * Switch to using the specified PG from the next I/O that gets mapped
1090 */
1091static int switch_pg_num(struct multipath *m, const char *pgstr)
1092{
1093	struct priority_group *pg;
1094	unsigned pgnum;
1095	unsigned long flags;
1096	char dummy;
1097
1098	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1099	    (pgnum > m->nr_priority_groups)) {
1100		DMWARN("invalid PG number supplied to switch_pg_num");
1101		return -EINVAL;
1102	}
1103
1104	spin_lock_irqsave(&m->lock, flags);
1105	list_for_each_entry(pg, &m->priority_groups, list) {
1106		pg->bypassed = false;
1107		if (--pgnum)
1108			continue;
1109
1110		m->current_pgpath = NULL;
1111		m->current_pg = NULL;
1112		m->next_pg = pg;
1113	}
1114	spin_unlock_irqrestore(&m->lock, flags);
1115
1116	schedule_work(&m->trigger_event);
1117	return 0;
1118}
1119
1120/*
1121 * Set/clear bypassed status of a PG.
1122 * PGs are numbered upwards from 1 in the order they were declared.
1123 */
1124static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1125{
1126	struct priority_group *pg;
1127	unsigned pgnum;
1128	char dummy;
1129
1130	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1131	    (pgnum > m->nr_priority_groups)) {
1132		DMWARN("invalid PG number supplied to bypass_pg");
1133		return -EINVAL;
1134	}
1135
1136	list_for_each_entry(pg, &m->priority_groups, list) {
1137		if (!--pgnum)
1138			break;
1139	}
1140
1141	bypass_pg(m, pg, bypassed);
1142	return 0;
1143}
1144
1145/*
1146 * Should we retry pg_init immediately?
1147 */
1148static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1149{
1150	unsigned long flags;
1151	bool limit_reached = false;
1152
1153	spin_lock_irqsave(&m->lock, flags);
1154
1155	if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
1156		m->pg_init_required = true;
1157	else
1158		limit_reached = true;
1159
1160	spin_unlock_irqrestore(&m->lock, flags);
1161
1162	return limit_reached;
1163}
1164
1165static void pg_init_done(void *data, int errors)
1166{
1167	struct pgpath *pgpath = data;
1168	struct priority_group *pg = pgpath->pg;
1169	struct multipath *m = pg->m;
1170	unsigned long flags;
1171	bool delay_retry = false;
1172
1173	/* device or driver problems */
1174	switch (errors) {
1175	case SCSI_DH_OK:
1176		break;
1177	case SCSI_DH_NOSYS:
1178		if (!m->hw_handler_name) {
1179			errors = 0;
1180			break;
1181		}
1182		DMERR("Could not failover the device: Handler scsi_dh_%s "
1183		      "Error %d.", m->hw_handler_name, errors);
1184		/*
1185		 * Fail path for now, so we do not ping pong
1186		 */
1187		fail_path(pgpath);
1188		break;
1189	case SCSI_DH_DEV_TEMP_BUSY:
1190		/*
1191		 * Probably doing something like FW upgrade on the
1192		 * controller so try the other pg.
1193		 */
1194		bypass_pg(m, pg, true);
1195		break;
1196	case SCSI_DH_RETRY:
1197		/* Wait before retrying. */
1198		delay_retry = 1;
1199	case SCSI_DH_IMM_RETRY:
1200	case SCSI_DH_RES_TEMP_UNAVAIL:
1201		if (pg_init_limit_reached(m, pgpath))
1202			fail_path(pgpath);
1203		errors = 0;
1204		break;
1205	case SCSI_DH_DEV_OFFLINED:
1206	default:
1207		/*
1208		 * We probably do not want to fail the path for a device
1209		 * error, but this is what the old dm did. In future
1210		 * patches we can do more advanced handling.
1211		 */
1212		fail_path(pgpath);
1213	}
1214
1215	spin_lock_irqsave(&m->lock, flags);
1216	if (errors) {
1217		if (pgpath == m->current_pgpath) {
1218			DMERR("Could not failover device. Error %d.", errors);
1219			m->current_pgpath = NULL;
1220			m->current_pg = NULL;
1221		}
1222	} else if (!m->pg_init_required)
1223		pg->bypassed = false;
1224
1225	if (--m->pg_init_in_progress)
1226		/* Activations of other paths are still on going */
1227		goto out;
1228
1229	if (m->pg_init_required) {
1230		m->pg_init_delay_retry = delay_retry;
1231		if (__pg_init_all_paths(m))
1232			goto out;
1233	}
1234	m->queue_io = false;
1235
1236	/*
1237	 * Wake up any thread waiting to suspend.
1238	 */
1239	wake_up(&m->pg_init_wait);
1240
1241out:
1242	spin_unlock_irqrestore(&m->lock, flags);
1243}
1244
1245static void activate_path(struct work_struct *work)
1246{
1247	struct pgpath *pgpath =
1248		container_of(work, struct pgpath, activate_path.work);
1249
1250	if (pgpath->is_active)
1251		scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1252				 pg_init_done, pgpath);
1253	else
1254		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1255}
1256
1257static int noretry_error(int error)
1258{
1259	switch (error) {
1260	case -EOPNOTSUPP:
1261	case -EREMOTEIO:
1262	case -EILSEQ:
1263	case -ENODATA:
1264	case -ENOSPC:
1265		return 1;
1266	}
1267
1268	/* Anything else could be a path failure, so should be retried */
1269	return 0;
1270}
1271
1272/*
1273 * end_io handling
1274 */
1275static int do_end_io(struct multipath *m, struct request *clone,
1276		     int error, struct dm_mpath_io *mpio)
1277{
1278	/*
1279	 * We don't queue any clone request inside the multipath target
1280	 * during end I/O handling, since those clone requests don't have
1281	 * bio clones.  If we queue them inside the multipath target,
1282	 * we need to make bio clones, that requires memory allocation.
1283	 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1284	 *  don't have bio clones.)
1285	 * Instead of queueing the clone request here, we queue the original
1286	 * request into dm core, which will remake a clone request and
1287	 * clone bios for it and resubmit it later.
1288	 */
1289	int r = DM_ENDIO_REQUEUE;
1290	unsigned long flags;
1291
1292	if (!error && !clone->errors)
1293		return 0;	/* I/O complete */
1294
1295	if (noretry_error(error))
1296		return error;
1297
1298	if (mpio->pgpath)
1299		fail_path(mpio->pgpath);
1300
1301	spin_lock_irqsave(&m->lock, flags);
1302	if (!m->nr_valid_paths) {
1303		if (!m->queue_if_no_path) {
1304			if (!__must_push_back(m))
1305				r = -EIO;
1306		} else {
1307			if (error == -EBADE)
1308				r = error;
1309		}
1310	}
1311	spin_unlock_irqrestore(&m->lock, flags);
1312
1313	return r;
1314}
1315
1316static int multipath_end_io(struct dm_target *ti, struct request *clone,
1317			    int error, union map_info *map_context)
1318{
1319	struct multipath *m = ti->private;
1320	struct dm_mpath_io *mpio = get_mpio(map_context);
1321	struct pgpath *pgpath;
1322	struct path_selector *ps;
1323	int r;
1324
1325	BUG_ON(!mpio);
1326
1327	r = do_end_io(m, clone, error, mpio);
1328	pgpath = mpio->pgpath;
1329	if (pgpath) {
1330		ps = &pgpath->pg->ps;
1331		if (ps->type->end_io)
1332			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1333	}
1334	clear_request_fn_mpio(m, map_context);
1335
1336	return r;
1337}
1338
1339/*
1340 * Suspend can't complete until all the I/O is processed so if
1341 * the last path fails we must error any remaining I/O.
1342 * Note that if the freeze_bdev fails while suspending, the
1343 * queue_if_no_path state is lost - userspace should reset it.
1344 */
1345static void multipath_presuspend(struct dm_target *ti)
1346{
1347	struct multipath *m = ti->private;
1348
1349	queue_if_no_path(m, false, true);
1350}
1351
1352static void multipath_postsuspend(struct dm_target *ti)
1353{
1354	struct multipath *m = ti->private;
1355
1356	mutex_lock(&m->work_mutex);
1357	flush_multipath_work(m);
1358	mutex_unlock(&m->work_mutex);
1359}
1360
1361/*
1362 * Restore the queue_if_no_path setting.
1363 */
1364static void multipath_resume(struct dm_target *ti)
1365{
1366	struct multipath *m = ti->private;
1367	unsigned long flags;
1368
1369	spin_lock_irqsave(&m->lock, flags);
1370	m->queue_if_no_path = m->saved_queue_if_no_path;
1371	spin_unlock_irqrestore(&m->lock, flags);
1372}
1373
1374/*
1375 * Info output has the following format:
1376 * num_multipath_feature_args [multipath_feature_args]*
1377 * num_handler_status_args [handler_status_args]*
1378 * num_groups init_group_number
1379 *            [A|D|E num_ps_status_args [ps_status_args]*
1380 *             num_paths num_selector_args
1381 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1382 *
1383 * Table output has the following format (identical to the constructor string):
1384 * num_feature_args [features_args]*
1385 * num_handler_args hw_handler [hw_handler_args]*
1386 * num_groups init_group_number
1387 *     [priority selector-name num_ps_args [ps_args]*
1388 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1389 */
1390static void multipath_status(struct dm_target *ti, status_type_t type,
1391			     unsigned status_flags, char *result, unsigned maxlen)
1392{
1393	int sz = 0;
1394	unsigned long flags;
1395	struct multipath *m = ti->private;
1396	struct priority_group *pg;
1397	struct pgpath *p;
1398	unsigned pg_num;
1399	char state;
1400
1401	spin_lock_irqsave(&m->lock, flags);
1402
1403	/* Features */
1404	if (type == STATUSTYPE_INFO)
1405		DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
1406	else {
1407		DMEMIT("%u ", m->queue_if_no_path +
1408			      (m->pg_init_retries > 0) * 2 +
1409			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1410			      m->retain_attached_hw_handler);
1411		if (m->queue_if_no_path)
1412			DMEMIT("queue_if_no_path ");
1413		if (m->pg_init_retries)
1414			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1415		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1416			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1417		if (m->retain_attached_hw_handler)
1418			DMEMIT("retain_attached_hw_handler ");
1419	}
1420
1421	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1422		DMEMIT("0 ");
1423	else
1424		DMEMIT("1 %s ", m->hw_handler_name);
1425
1426	DMEMIT("%u ", m->nr_priority_groups);
1427
1428	if (m->next_pg)
1429		pg_num = m->next_pg->pg_num;
1430	else if (m->current_pg)
1431		pg_num = m->current_pg->pg_num;
1432	else
1433		pg_num = (m->nr_priority_groups ? 1 : 0);
1434
1435	DMEMIT("%u ", pg_num);
1436
1437	switch (type) {
1438	case STATUSTYPE_INFO:
1439		list_for_each_entry(pg, &m->priority_groups, list) {
1440			if (pg->bypassed)
1441				state = 'D';	/* Disabled */
1442			else if (pg == m->current_pg)
1443				state = 'A';	/* Currently Active */
1444			else
1445				state = 'E';	/* Enabled */
1446
1447			DMEMIT("%c ", state);
1448
1449			if (pg->ps.type->status)
1450				sz += pg->ps.type->status(&pg->ps, NULL, type,
1451							  result + sz,
1452							  maxlen - sz);
1453			else
1454				DMEMIT("0 ");
1455
1456			DMEMIT("%u %u ", pg->nr_pgpaths,
1457			       pg->ps.type->info_args);
1458
1459			list_for_each_entry(p, &pg->pgpaths, list) {
1460				DMEMIT("%s %s %u ", p->path.dev->name,
1461				       p->is_active ? "A" : "F",
1462				       p->fail_count);
1463				if (pg->ps.type->status)
1464					sz += pg->ps.type->status(&pg->ps,
1465					      &p->path, type, result + sz,
1466					      maxlen - sz);
1467			}
1468		}
1469		break;
1470
1471	case STATUSTYPE_TABLE:
1472		list_for_each_entry(pg, &m->priority_groups, list) {
1473			DMEMIT("%s ", pg->ps.type->name);
1474
1475			if (pg->ps.type->status)
1476				sz += pg->ps.type->status(&pg->ps, NULL, type,
1477							  result + sz,
1478							  maxlen - sz);
1479			else
1480				DMEMIT("0 ");
1481
1482			DMEMIT("%u %u ", pg->nr_pgpaths,
1483			       pg->ps.type->table_args);
1484
1485			list_for_each_entry(p, &pg->pgpaths, list) {
1486				DMEMIT("%s ", p->path.dev->name);
1487				if (pg->ps.type->status)
1488					sz += pg->ps.type->status(&pg->ps,
1489					      &p->path, type, result + sz,
1490					      maxlen - sz);
1491			}
1492		}
1493		break;
1494	}
1495
1496	spin_unlock_irqrestore(&m->lock, flags);
 
 
1497}
1498
1499static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1500{
1501	int r = -EINVAL;
1502	struct dm_dev *dev;
1503	struct multipath *m = ti->private;
1504	action_fn action;
1505
1506	mutex_lock(&m->work_mutex);
1507
1508	if (dm_suspended(ti)) {
1509		r = -EBUSY;
1510		goto out;
1511	}
1512
1513	if (argc == 1) {
1514		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1515			r = queue_if_no_path(m, true, false);
1516			goto out;
1517		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1518			r = queue_if_no_path(m, false, false);
1519			goto out;
1520		}
1521	}
1522
1523	if (argc != 2) {
1524		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1525		goto out;
1526	}
1527
1528	if (!strcasecmp(argv[0], "disable_group")) {
1529		r = bypass_pg_num(m, argv[1], true);
1530		goto out;
1531	} else if (!strcasecmp(argv[0], "enable_group")) {
1532		r = bypass_pg_num(m, argv[1], false);
1533		goto out;
1534	} else if (!strcasecmp(argv[0], "switch_group")) {
1535		r = switch_pg_num(m, argv[1]);
1536		goto out;
1537	} else if (!strcasecmp(argv[0], "reinstate_path"))
1538		action = reinstate_path;
1539	else if (!strcasecmp(argv[0], "fail_path"))
1540		action = fail_path;
1541	else {
1542		DMWARN("Unrecognised multipath message received: %s", argv[0]);
1543		goto out;
1544	}
1545
1546	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1547	if (r) {
1548		DMWARN("message: error getting device %s",
1549		       argv[1]);
1550		goto out;
1551	}
1552
1553	r = action_dev(m, dev, action);
1554
1555	dm_put_device(ti, dev);
1556
1557out:
1558	mutex_unlock(&m->work_mutex);
1559	return r;
1560}
1561
1562static int multipath_prepare_ioctl(struct dm_target *ti,
1563		struct block_device **bdev, fmode_t *mode)
1564{
1565	struct multipath *m = ti->private;
 
 
1566	unsigned long flags;
1567	int r;
1568
1569	spin_lock_irqsave(&m->lock, flags);
1570
1571	if (!m->current_pgpath)
1572		__choose_pgpath(m, 0);
1573
1574	if (m->current_pgpath) {
1575		if (!m->queue_io) {
1576			*bdev = m->current_pgpath->path.dev->bdev;
1577			*mode = m->current_pgpath->path.dev->mode;
1578			r = 0;
1579		} else {
1580			/* pg_init has not started or completed */
1581			r = -ENOTCONN;
1582		}
1583	} else {
1584		/* No path is available */
1585		if (m->queue_if_no_path)
1586			r = -ENOTCONN;
1587		else
1588			r = -EIO;
1589	}
1590
 
 
 
 
 
1591	spin_unlock_irqrestore(&m->lock, flags);
1592
1593	if (r == -ENOTCONN) {
1594		spin_lock_irqsave(&m->lock, flags);
1595		if (!m->current_pg) {
1596			/* Path status changed, redo selection */
1597			__choose_pgpath(m, 0);
1598		}
1599		if (m->pg_init_required)
1600			__pg_init_all_paths(m);
1601		spin_unlock_irqrestore(&m->lock, flags);
1602		dm_table_run_md_queue_async(m->ti->table);
1603	}
1604
1605	/*
1606	 * Only pass ioctls through if the device sizes match exactly.
1607	 */
1608	if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1609		return 1;
1610	return r;
1611}
1612
1613static int multipath_iterate_devices(struct dm_target *ti,
1614				     iterate_devices_callout_fn fn, void *data)
1615{
1616	struct multipath *m = ti->private;
1617	struct priority_group *pg;
1618	struct pgpath *p;
1619	int ret = 0;
1620
1621	list_for_each_entry(pg, &m->priority_groups, list) {
1622		list_for_each_entry(p, &pg->pgpaths, list) {
1623			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1624			if (ret)
1625				goto out;
1626		}
1627	}
1628
1629out:
1630	return ret;
1631}
1632
1633static int pgpath_busy(struct pgpath *pgpath)
1634{
1635	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1636
1637	return blk_lld_busy(q);
1638}
1639
1640/*
1641 * We return "busy", only when we can map I/Os but underlying devices
1642 * are busy (so even if we map I/Os now, the I/Os will wait on
1643 * the underlying queue).
1644 * In other words, if we want to kill I/Os or queue them inside us
1645 * due to map unavailability, we don't return "busy".  Otherwise,
1646 * dm core won't give us the I/Os and we can't do what we want.
1647 */
1648static int multipath_busy(struct dm_target *ti)
1649{
1650	bool busy = false, has_active = false;
1651	struct multipath *m = ti->private;
1652	struct priority_group *pg;
1653	struct pgpath *pgpath;
1654	unsigned long flags;
1655
1656	spin_lock_irqsave(&m->lock, flags);
1657
1658	/* pg_init in progress or no paths available */
1659	if (m->pg_init_in_progress ||
1660	    (!m->nr_valid_paths && m->queue_if_no_path)) {
1661		busy = true;
1662		goto out;
1663	}
1664	/* Guess which priority_group will be used at next mapping time */
1665	if (unlikely(!m->current_pgpath && m->next_pg))
1666		pg = m->next_pg;
1667	else if (likely(m->current_pg))
1668		pg = m->current_pg;
1669	else
1670		/*
1671		 * We don't know which pg will be used at next mapping time.
1672		 * We don't call __choose_pgpath() here to avoid to trigger
1673		 * pg_init just by busy checking.
1674		 * So we don't know whether underlying devices we will be using
1675		 * at next mapping time are busy or not. Just try mapping.
1676		 */
1677		goto out;
1678
1679	/*
1680	 * If there is one non-busy active path at least, the path selector
1681	 * will be able to select it. So we consider such a pg as not busy.
1682	 */
1683	busy = true;
1684	list_for_each_entry(pgpath, &pg->pgpaths, list)
1685		if (pgpath->is_active) {
1686			has_active = true;
1687			if (!pgpath_busy(pgpath)) {
1688				busy = false;
 
1689				break;
1690			}
1691		}
1692
1693	if (!has_active)
1694		/*
1695		 * No active path in this pg, so this pg won't be used and
1696		 * the current_pg will be changed at next mapping time.
1697		 * We need to try mapping to determine it.
1698		 */
1699		busy = false;
1700
1701out:
1702	spin_unlock_irqrestore(&m->lock, flags);
1703
1704	return busy;
1705}
1706
1707/*-----------------------------------------------------------------
1708 * Module setup
1709 *---------------------------------------------------------------*/
1710static struct target_type multipath_target = {
1711	.name = "multipath",
1712	.version = {1, 11, 0},
1713	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1714	.module = THIS_MODULE,
1715	.ctr = multipath_ctr,
1716	.dtr = multipath_dtr,
1717	.map_rq = multipath_map,
1718	.clone_and_map_rq = multipath_clone_and_map,
1719	.release_clone_rq = multipath_release_clone,
1720	.rq_end_io = multipath_end_io,
1721	.presuspend = multipath_presuspend,
1722	.postsuspend = multipath_postsuspend,
1723	.resume = multipath_resume,
1724	.status = multipath_status,
1725	.message = multipath_message,
1726	.prepare_ioctl = multipath_prepare_ioctl,
1727	.iterate_devices = multipath_iterate_devices,
1728	.busy = multipath_busy,
1729};
1730
1731static int __init dm_multipath_init(void)
1732{
1733	int r;
1734
1735	/* allocate a slab for the dm_ios */
1736	_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1737	if (!_mpio_cache)
1738		return -ENOMEM;
1739
1740	r = dm_register_target(&multipath_target);
1741	if (r < 0) {
1742		DMERR("register failed %d", r);
1743		r = -EINVAL;
1744		goto bad_register_target;
1745	}
1746
1747	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1748	if (!kmultipathd) {
1749		DMERR("failed to create workqueue kmpathd");
1750		r = -ENOMEM;
1751		goto bad_alloc_kmultipathd;
 
1752	}
1753
1754	/*
1755	 * A separate workqueue is used to handle the device handlers
1756	 * to avoid overloading existing workqueue. Overloading the
1757	 * old workqueue would also create a bottleneck in the
1758	 * path of the storage hardware device activation.
1759	 */
1760	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1761						  WQ_MEM_RECLAIM);
1762	if (!kmpath_handlerd) {
1763		DMERR("failed to create workqueue kmpath_handlerd");
1764		r = -ENOMEM;
1765		goto bad_alloc_kmpath_handlerd;
 
 
1766	}
1767
1768	DMINFO("version %u.%u.%u loaded",
1769	       multipath_target.version[0], multipath_target.version[1],
1770	       multipath_target.version[2]);
1771
1772	return 0;
1773
1774bad_alloc_kmpath_handlerd:
1775	destroy_workqueue(kmultipathd);
1776bad_alloc_kmultipathd:
1777	dm_unregister_target(&multipath_target);
1778bad_register_target:
1779	kmem_cache_destroy(_mpio_cache);
1780
1781	return r;
1782}
1783
1784static void __exit dm_multipath_exit(void)
1785{
1786	destroy_workqueue(kmpath_handlerd);
1787	destroy_workqueue(kmultipathd);
1788
1789	dm_unregister_target(&multipath_target);
1790	kmem_cache_destroy(_mpio_cache);
1791}
1792
1793module_init(dm_multipath_init);
1794module_exit(dm_multipath_exit);
1795
1796MODULE_DESCRIPTION(DM_NAME " multipath target");
1797MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1798MODULE_LICENSE("GPL");