Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9
 
  10#include "dm-path-selector.h"
  11#include "dm-uevent.h"
  12
  13#include <linux/ctype.h>
  14#include <linux/init.h>
  15#include <linux/mempool.h>
  16#include <linux/module.h>
  17#include <linux/pagemap.h>
  18#include <linux/slab.h>
  19#include <linux/time.h>
  20#include <linux/workqueue.h>
  21#include <linux/delay.h>
  22#include <scsi/scsi_dh.h>
  23#include <linux/atomic.h>
  24
  25#define DM_MSG_PREFIX "multipath"
  26#define DM_PG_INIT_DELAY_MSECS 2000
  27#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  28
  29/* Path properties */
  30struct pgpath {
  31	struct list_head list;
  32
  33	struct priority_group *pg;	/* Owning PG */
  34	unsigned is_active;		/* Path status */
  35	unsigned fail_count;		/* Cumulative failure count */
  36
  37	struct dm_path path;
  38	struct delayed_work activate_path;
  39};
  40
  41#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  42
  43/*
  44 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  45 * Each has a path selector which controls which path gets used.
  46 */
  47struct priority_group {
  48	struct list_head list;
  49
  50	struct multipath *m;		/* Owning multipath instance */
  51	struct path_selector ps;
  52
  53	unsigned pg_num;		/* Reference number */
  54	unsigned bypassed;		/* Temporarily bypass this PG? */
  55
  56	unsigned nr_pgpaths;		/* Number of paths in PG */
  57	struct list_head pgpaths;
  58};
  59
  60/* Multipath context */
  61struct multipath {
  62	struct list_head list;
  63	struct dm_target *ti;
  64
  65	const char *hw_handler_name;
  66	char *hw_handler_params;
  67
  68	spinlock_t lock;
  69
  70	unsigned nr_priority_groups;
  71	struct list_head priority_groups;
  72
  73	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  74
  75	unsigned pg_init_required;	/* pg_init needs calling? */
  76	unsigned pg_init_in_progress;	/* Only one pg_init allowed at once */
  77	unsigned pg_init_delay_retry;	/* Delay pg_init retry? */
  78
  79	unsigned nr_valid_paths;	/* Total number of usable paths */
  80	struct pgpath *current_pgpath;
  81	struct priority_group *current_pg;
  82	struct priority_group *next_pg;	/* Switch to this PG if set */
  83	unsigned repeat_count;		/* I/Os left before calling PS again */
  84
  85	unsigned queue_io:1;		/* Must we queue all I/O? */
  86	unsigned queue_if_no_path:1;	/* Queue I/O if last path fails? */
  87	unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
 
 
  88
  89	unsigned pg_init_retries;	/* Number of times to retry pg_init */
  90	unsigned pg_init_count;		/* Number of times pg_init called */
  91	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  92
  93	unsigned queue_size;
  94	struct work_struct process_queued_ios;
  95	struct list_head queued_ios;
  96
  97	struct work_struct trigger_event;
  98
  99	/*
 100	 * We must use a mempool of dm_mpath_io structs so that we
 101	 * can resubmit bios on error.
 102	 */
 103	mempool_t *mpio_pool;
 104
 105	struct mutex work_mutex;
 106};
 107
 108/*
 109 * Context information attached to each bio we process.
 110 */
 111struct dm_mpath_io {
 112	struct pgpath *pgpath;
 113	size_t nr_bytes;
 114};
 115
 116typedef int (*action_fn) (struct pgpath *pgpath);
 117
 118#define MIN_IOS 256	/* Mempool size */
 119
 120static struct kmem_cache *_mpio_cache;
 121
 122static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 123static void process_queued_ios(struct work_struct *work);
 124static void trigger_event(struct work_struct *work);
 125static void activate_path(struct work_struct *work);
 
 126
 127
 128/*-----------------------------------------------
 129 * Allocation routines
 130 *-----------------------------------------------*/
 131
 132static struct pgpath *alloc_pgpath(void)
 133{
 134	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 135
 136	if (pgpath) {
 137		pgpath->is_active = 1;
 138		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
 139	}
 140
 141	return pgpath;
 142}
 143
 144static void free_pgpath(struct pgpath *pgpath)
 145{
 146	kfree(pgpath);
 147}
 148
 149static struct priority_group *alloc_priority_group(void)
 150{
 151	struct priority_group *pg;
 152
 153	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 154
 155	if (pg)
 156		INIT_LIST_HEAD(&pg->pgpaths);
 157
 158	return pg;
 159}
 160
 161static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 162{
 163	struct pgpath *pgpath, *tmp;
 164	struct multipath *m = ti->private;
 165
 166	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 167		list_del(&pgpath->list);
 168		if (m->hw_handler_name)
 169			scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
 170		dm_put_device(ti, pgpath->path.dev);
 171		free_pgpath(pgpath);
 172	}
 173}
 174
 175static void free_priority_group(struct priority_group *pg,
 176				struct dm_target *ti)
 177{
 178	struct path_selector *ps = &pg->ps;
 179
 180	if (ps->type) {
 181		ps->type->destroy(ps);
 182		dm_put_path_selector(ps->type);
 183	}
 184
 185	free_pgpaths(&pg->pgpaths, ti);
 186	kfree(pg);
 187}
 188
 189static struct multipath *alloc_multipath(struct dm_target *ti)
 190{
 191	struct multipath *m;
 
 192
 193	m = kzalloc(sizeof(*m), GFP_KERNEL);
 194	if (m) {
 195		INIT_LIST_HEAD(&m->priority_groups);
 196		INIT_LIST_HEAD(&m->queued_ios);
 197		spin_lock_init(&m->lock);
 198		m->queue_io = 1;
 199		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 200		INIT_WORK(&m->process_queued_ios, process_queued_ios);
 201		INIT_WORK(&m->trigger_event, trigger_event);
 202		init_waitqueue_head(&m->pg_init_wait);
 203		mutex_init(&m->work_mutex);
 204		m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
 205		if (!m->mpio_pool) {
 206			kfree(m);
 207			return NULL;
 208		}
 209		m->ti = ti;
 210		ti->private = m;
 211	}
 212
 213	return m;
 214}
 215
 216static void free_multipath(struct multipath *m)
 217{
 218	struct priority_group *pg, *tmp;
 219
 220	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 221		list_del(&pg->list);
 222		free_priority_group(pg, m->ti);
 223	}
 224
 225	kfree(m->hw_handler_name);
 226	kfree(m->hw_handler_params);
 227	mempool_destroy(m->mpio_pool);
 228	kfree(m);
 229}
 230
 231static int set_mapinfo(struct multipath *m, union map_info *info)
 232{
 233	struct dm_mpath_io *mpio;
 234
 235	mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
 236	if (!mpio)
 237		return -ENOMEM;
 238
 239	memset(mpio, 0, sizeof(*mpio));
 240	info->ptr = mpio;
 241
 242	return 0;
 243}
 244
 245static void clear_mapinfo(struct multipath *m, union map_info *info)
 246{
 247	struct dm_mpath_io *mpio = info->ptr;
 248
 249	info->ptr = NULL;
 250	mempool_free(mpio, m->mpio_pool);
 251}
 252
 253/*-----------------------------------------------
 254 * Path selection
 255 *-----------------------------------------------*/
 256
 257static void __pg_init_all_paths(struct multipath *m)
 258{
 259	struct pgpath *pgpath;
 260	unsigned long pg_init_delay = 0;
 261
 
 
 
 262	m->pg_init_count++;
 263	m->pg_init_required = 0;
 
 
 
 
 
 264	if (m->pg_init_delay_retry)
 265		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 266						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 267	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 268		/* Skip failed paths */
 269		if (!pgpath->is_active)
 270			continue;
 271		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 272				       pg_init_delay))
 273			m->pg_init_in_progress++;
 274	}
 
 275}
 276
 277static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
 278{
 279	m->current_pg = pgpath->pg;
 280
 281	/* Must we initialise the PG first, and queue I/O till it's ready? */
 282	if (m->hw_handler_name) {
 283		m->pg_init_required = 1;
 284		m->queue_io = 1;
 285	} else {
 286		m->pg_init_required = 0;
 287		m->queue_io = 0;
 288	}
 289
 290	m->pg_init_count = 0;
 291}
 292
 293static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
 294			       size_t nr_bytes)
 295{
 296	struct dm_path *path;
 297
 298	path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
 299	if (!path)
 300		return -ENXIO;
 301
 302	m->current_pgpath = path_to_pgpath(path);
 303
 304	if (m->current_pg != pg)
 305		__switch_pg(m, m->current_pgpath);
 306
 307	return 0;
 308}
 309
 310static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
 311{
 312	struct priority_group *pg;
 313	unsigned bypassed = 1;
 314
 315	if (!m->nr_valid_paths)
 316		goto failed;
 317
 318	/* Were we instructed to switch PG? */
 319	if (m->next_pg) {
 320		pg = m->next_pg;
 321		m->next_pg = NULL;
 322		if (!__choose_path_in_pg(m, pg, nr_bytes))
 323			return;
 324	}
 325
 326	/* Don't change PG until it has no remaining paths */
 327	if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
 328		return;
 329
 330	/*
 331	 * Loop through priority groups until we find a valid path.
 332	 * First time we skip PGs marked 'bypassed'.
 333	 * Second time we only try the ones we skipped, but set
 334	 * pg_init_delay_retry so we do not hammer controllers.
 335	 */
 336	do {
 337		list_for_each_entry(pg, &m->priority_groups, list) {
 338			if (pg->bypassed == bypassed)
 339				continue;
 340			if (!__choose_path_in_pg(m, pg, nr_bytes)) {
 341				if (!bypassed)
 342					m->pg_init_delay_retry = 1;
 343				return;
 344			}
 345		}
 346	} while (bypassed--);
 347
 348failed:
 349	m->current_pgpath = NULL;
 350	m->current_pg = NULL;
 351}
 352
 353/*
 354 * Check whether bios must be queued in the device-mapper core rather
 355 * than here in the target.
 356 *
 357 * m->lock must be held on entry.
 358 *
 359 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
 360 * same value then we are not between multipath_presuspend()
 361 * and multipath_resume() calls and we have no need to check
 362 * for the DMF_NOFLUSH_SUSPENDING flag.
 363 */
 364static int __must_push_back(struct multipath *m)
 365{
 366	return (m->queue_if_no_path != m->saved_queue_if_no_path &&
 367		dm_noflush_suspending(m->ti));
 
 368}
 369
 370static int map_io(struct multipath *m, struct request *clone,
 371		  union map_info *map_context, unsigned was_queued)
 
 
 
 
 
 372{
 373	int r = DM_MAPIO_REMAPPED;
 
 374	size_t nr_bytes = blk_rq_bytes(clone);
 375	unsigned long flags;
 376	struct pgpath *pgpath;
 377	struct block_device *bdev;
 378	struct dm_mpath_io *mpio = map_context->ptr;
 379
 380	spin_lock_irqsave(&m->lock, flags);
 381
 382	/* Do we need to select a new pgpath? */
 383	if (!m->current_pgpath ||
 384	    (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
 385		__choose_pgpath(m, nr_bytes);
 386
 387	pgpath = m->current_pgpath;
 388
 389	if (was_queued)
 390		m->queue_size--;
 391
 392	if ((pgpath && m->queue_io) ||
 393	    (!pgpath && m->queue_if_no_path)) {
 394		/* Queue for the daemon to resubmit */
 395		list_add_tail(&clone->queuelist, &m->queued_ios);
 396		m->queue_size++;
 397		if ((m->pg_init_required && !m->pg_init_in_progress) ||
 398		    !m->queue_io)
 399			queue_work(kmultipathd, &m->process_queued_ios);
 400		pgpath = NULL;
 401		r = DM_MAPIO_SUBMITTED;
 402	} else if (pgpath) {
 403		bdev = pgpath->path.dev->bdev;
 404		clone->q = bdev_get_queue(bdev);
 405		clone->rq_disk = bdev->bd_disk;
 406	} else if (__must_push_back(m))
 407		r = DM_MAPIO_REQUEUE;
 408	else
 409		r = -EIO;	/* Failed */
 410
 
 
 
 
 
 411	mpio->pgpath = pgpath;
 412	mpio->nr_bytes = nr_bytes;
 413
 414	if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
 415		pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
 416					      nr_bytes);
 
 417
 
 418	spin_unlock_irqrestore(&m->lock, flags);
 419
 420	return r;
 421}
 422
 423/*
 424 * If we run out of usable paths, should we queue I/O or error it?
 425 */
 426static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
 427			    unsigned save_old_value)
 428{
 429	unsigned long flags;
 430
 431	spin_lock_irqsave(&m->lock, flags);
 432
 433	if (save_old_value)
 434		m->saved_queue_if_no_path = m->queue_if_no_path;
 435	else
 436		m->saved_queue_if_no_path = queue_if_no_path;
 437	m->queue_if_no_path = queue_if_no_path;
 438	if (!m->queue_if_no_path && m->queue_size)
 439		queue_work(kmultipathd, &m->process_queued_ios);
 440
 441	spin_unlock_irqrestore(&m->lock, flags);
 442
 443	return 0;
 444}
 445
 446/*-----------------------------------------------------------------
 447 * The multipath daemon is responsible for resubmitting queued ios.
 448 *---------------------------------------------------------------*/
 449
 450static void dispatch_queued_ios(struct multipath *m)
 451{
 452	int r;
 453	unsigned long flags;
 454	union map_info *info;
 455	struct request *clone, *n;
 456	LIST_HEAD(cl);
 457
 458	spin_lock_irqsave(&m->lock, flags);
 459	list_splice_init(&m->queued_ios, &cl);
 460	spin_unlock_irqrestore(&m->lock, flags);
 461
 462	list_for_each_entry_safe(clone, n, &cl, queuelist) {
 463		list_del_init(&clone->queuelist);
 464
 465		info = dm_get_rq_mapinfo(clone);
 466
 467		r = map_io(m, clone, info, 1);
 468		if (r < 0) {
 469			clear_mapinfo(m, info);
 470			dm_kill_unmapped_request(clone, r);
 471		} else if (r == DM_MAPIO_REMAPPED)
 472			dm_dispatch_request(clone);
 473		else if (r == DM_MAPIO_REQUEUE) {
 474			clear_mapinfo(m, info);
 475			dm_requeue_unmapped_request(clone);
 476		}
 477	}
 478}
 479
 480static void process_queued_ios(struct work_struct *work)
 481{
 482	struct multipath *m =
 483		container_of(work, struct multipath, process_queued_ios);
 484	struct pgpath *pgpath = NULL;
 485	unsigned must_queue = 1;
 486	unsigned long flags;
 487
 488	spin_lock_irqsave(&m->lock, flags);
 489
 490	if (!m->current_pgpath)
 491		__choose_pgpath(m, 0);
 492
 493	pgpath = m->current_pgpath;
 494
 495	if ((pgpath && !m->queue_io) ||
 496	    (!pgpath && !m->queue_if_no_path))
 497		must_queue = 0;
 498
 499	if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
 500		__pg_init_all_paths(m);
 501
 502	spin_unlock_irqrestore(&m->lock, flags);
 503	if (!must_queue)
 504		dispatch_queued_ios(m);
 505}
 506
 507/*
 508 * An event is triggered whenever a path is taken out of use.
 509 * Includes path failure and PG bypass.
 510 */
 511static void trigger_event(struct work_struct *work)
 512{
 513	struct multipath *m =
 514		container_of(work, struct multipath, trigger_event);
 515
 516	dm_table_event(m->ti->table);
 517}
 518
 519/*-----------------------------------------------------------------
 520 * Constructor/argument parsing:
 521 * <#multipath feature args> [<arg>]*
 522 * <#hw_handler args> [hw_handler [<arg>]*]
 523 * <#priority groups>
 524 * <initial priority group>
 525 *     [<selector> <#selector args> [<arg>]*
 526 *      <#paths> <#per-path selector args>
 527 *         [<path> [<arg>]* ]+ ]+
 528 *---------------------------------------------------------------*/
 529static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 530			       struct dm_target *ti)
 531{
 532	int r;
 533	struct path_selector_type *pst;
 534	unsigned ps_argc;
 535
 536	static struct dm_arg _args[] = {
 537		{0, 1024, "invalid number of path selector args"},
 538	};
 539
 540	pst = dm_get_path_selector(dm_shift_arg(as));
 541	if (!pst) {
 542		ti->error = "unknown path selector type";
 543		return -EINVAL;
 544	}
 545
 546	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 547	if (r) {
 548		dm_put_path_selector(pst);
 549		return -EINVAL;
 550	}
 551
 552	r = pst->create(&pg->ps, ps_argc, as->argv);
 553	if (r) {
 554		dm_put_path_selector(pst);
 555		ti->error = "path selector constructor failed";
 556		return r;
 557	}
 558
 559	pg->ps.type = pst;
 560	dm_consume_args(as, ps_argc);
 561
 562	return 0;
 563}
 564
 565static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 566			       struct dm_target *ti)
 567{
 568	int r;
 569	struct pgpath *p;
 570	struct multipath *m = ti->private;
 
 
 571
 572	/* we need at least a path arg */
 573	if (as->argc < 1) {
 574		ti->error = "no device given";
 575		return ERR_PTR(-EINVAL);
 576	}
 577
 578	p = alloc_pgpath();
 579	if (!p)
 580		return ERR_PTR(-ENOMEM);
 581
 582	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 583			  &p->path.dev);
 584	if (r) {
 585		ti->error = "error getting device";
 586		goto bad;
 587	}
 588
 589	if (m->hw_handler_name) {
 590		struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 591
 
 
 
 
 
 
 
 
 
 
 592		r = scsi_dh_attach(q, m->hw_handler_name);
 593		if (r == -EBUSY) {
 594			/*
 595			 * Already attached to different hw_handler,
 596			 * try to reattach with correct one.
 597			 */
 598			scsi_dh_detach(q);
 599			r = scsi_dh_attach(q, m->hw_handler_name);
 600		}
 601
 602		if (r < 0) {
 603			ti->error = "error attaching hardware handler";
 604			dm_put_device(ti, p->path.dev);
 605			goto bad;
 606		}
 607
 608		if (m->hw_handler_params) {
 609			r = scsi_dh_set_params(q, m->hw_handler_params);
 610			if (r < 0) {
 611				ti->error = "unable to set hardware "
 612							"handler parameters";
 613				scsi_dh_detach(q);
 614				dm_put_device(ti, p->path.dev);
 615				goto bad;
 616			}
 617		}
 618	}
 619
 620	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 621	if (r) {
 622		dm_put_device(ti, p->path.dev);
 623		goto bad;
 624	}
 625
 626	return p;
 627
 628 bad:
 629	free_pgpath(p);
 630	return ERR_PTR(r);
 631}
 632
 633static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 634						   struct multipath *m)
 635{
 636	static struct dm_arg _args[] = {
 637		{1, 1024, "invalid number of paths"},
 638		{0, 1024, "invalid number of selector args"}
 639	};
 640
 641	int r;
 642	unsigned i, nr_selector_args, nr_args;
 643	struct priority_group *pg;
 644	struct dm_target *ti = m->ti;
 645
 646	if (as->argc < 2) {
 647		as->argc = 0;
 648		ti->error = "not enough priority group arguments";
 649		return ERR_PTR(-EINVAL);
 650	}
 651
 652	pg = alloc_priority_group();
 653	if (!pg) {
 654		ti->error = "couldn't allocate priority group";
 655		return ERR_PTR(-ENOMEM);
 656	}
 657	pg->m = m;
 658
 659	r = parse_path_selector(as, pg, ti);
 660	if (r)
 661		goto bad;
 662
 663	/*
 664	 * read the paths
 665	 */
 666	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
 667	if (r)
 668		goto bad;
 669
 670	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
 671	if (r)
 672		goto bad;
 673
 674	nr_args = 1 + nr_selector_args;
 675	for (i = 0; i < pg->nr_pgpaths; i++) {
 676		struct pgpath *pgpath;
 677		struct dm_arg_set path_args;
 678
 679		if (as->argc < nr_args) {
 680			ti->error = "not enough path parameters";
 681			r = -EINVAL;
 682			goto bad;
 683		}
 684
 685		path_args.argc = nr_args;
 686		path_args.argv = as->argv;
 687
 688		pgpath = parse_path(&path_args, &pg->ps, ti);
 689		if (IS_ERR(pgpath)) {
 690			r = PTR_ERR(pgpath);
 691			goto bad;
 692		}
 693
 694		pgpath->pg = pg;
 695		list_add_tail(&pgpath->list, &pg->pgpaths);
 696		dm_consume_args(as, nr_args);
 697	}
 698
 699	return pg;
 700
 701 bad:
 702	free_priority_group(pg, ti);
 703	return ERR_PTR(r);
 704}
 705
 706static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 707{
 708	unsigned hw_argc;
 709	int ret;
 710	struct dm_target *ti = m->ti;
 711
 712	static struct dm_arg _args[] = {
 713		{0, 1024, "invalid number of hardware handler args"},
 714	};
 715
 716	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
 717		return -EINVAL;
 718
 719	if (!hw_argc)
 720		return 0;
 721
 722	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
 723	if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
 724				     "scsi_dh_%s", m->hw_handler_name)) {
 725		ti->error = "unknown hardware handler type";
 726		ret = -EINVAL;
 727		goto fail;
 728	}
 729
 730	if (hw_argc > 1) {
 731		char *p;
 732		int i, j, len = 4;
 733
 734		for (i = 0; i <= hw_argc - 2; i++)
 735			len += strlen(as->argv[i]) + 1;
 736		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
 737		if (!p) {
 738			ti->error = "memory allocation failed";
 739			ret = -ENOMEM;
 740			goto fail;
 741		}
 742		j = sprintf(p, "%d", hw_argc - 1);
 743		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
 744			j = sprintf(p, "%s", as->argv[i]);
 745	}
 746	dm_consume_args(as, hw_argc - 1);
 747
 748	return 0;
 749fail:
 750	kfree(m->hw_handler_name);
 751	m->hw_handler_name = NULL;
 752	return ret;
 753}
 754
 755static int parse_features(struct dm_arg_set *as, struct multipath *m)
 756{
 757	int r;
 758	unsigned argc;
 759	struct dm_target *ti = m->ti;
 760	const char *arg_name;
 761
 762	static struct dm_arg _args[] = {
 763		{0, 5, "invalid number of feature args"},
 764		{1, 50, "pg_init_retries must be between 1 and 50"},
 765		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
 766	};
 767
 768	r = dm_read_arg_group(_args, as, &argc, &ti->error);
 769	if (r)
 770		return -EINVAL;
 771
 772	if (!argc)
 773		return 0;
 774
 775	do {
 776		arg_name = dm_shift_arg(as);
 777		argc--;
 778
 779		if (!strcasecmp(arg_name, "queue_if_no_path")) {
 780			r = queue_if_no_path(m, 1, 0);
 781			continue;
 782		}
 783
 
 
 
 
 
 784		if (!strcasecmp(arg_name, "pg_init_retries") &&
 785		    (argc >= 1)) {
 786			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
 787			argc--;
 788			continue;
 789		}
 790
 791		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
 792		    (argc >= 1)) {
 793			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
 794			argc--;
 795			continue;
 796		}
 797
 798		ti->error = "Unrecognised multipath feature request";
 799		r = -EINVAL;
 800	} while (argc && !r);
 801
 802	return r;
 803}
 804
 805static int multipath_ctr(struct dm_target *ti, unsigned int argc,
 806			 char **argv)
 807{
 808	/* target arguments */
 809	static struct dm_arg _args[] = {
 810		{0, 1024, "invalid number of priority groups"},
 811		{0, 1024, "invalid initial priority group number"},
 812	};
 813
 814	int r;
 815	struct multipath *m;
 816	struct dm_arg_set as;
 817	unsigned pg_count = 0;
 818	unsigned next_pg_num;
 819
 820	as.argc = argc;
 821	as.argv = argv;
 822
 823	m = alloc_multipath(ti);
 824	if (!m) {
 825		ti->error = "can't allocate multipath";
 826		return -EINVAL;
 827	}
 828
 829	r = parse_features(&as, m);
 830	if (r)
 831		goto bad;
 832
 833	r = parse_hw_handler(&as, m);
 834	if (r)
 835		goto bad;
 836
 837	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
 838	if (r)
 839		goto bad;
 840
 841	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
 842	if (r)
 843		goto bad;
 844
 845	if ((!m->nr_priority_groups && next_pg_num) ||
 846	    (m->nr_priority_groups && !next_pg_num)) {
 847		ti->error = "invalid initial priority group";
 848		r = -EINVAL;
 849		goto bad;
 850	}
 851
 852	/* parse the priority groups */
 853	while (as.argc) {
 854		struct priority_group *pg;
 855
 856		pg = parse_priority_group(&as, m);
 857		if (IS_ERR(pg)) {
 858			r = PTR_ERR(pg);
 859			goto bad;
 860		}
 861
 862		m->nr_valid_paths += pg->nr_pgpaths;
 863		list_add_tail(&pg->list, &m->priority_groups);
 864		pg_count++;
 865		pg->pg_num = pg_count;
 866		if (!--next_pg_num)
 867			m->next_pg = pg;
 868	}
 869
 870	if (pg_count != m->nr_priority_groups) {
 871		ti->error = "priority group count mismatch";
 872		r = -EINVAL;
 873		goto bad;
 874	}
 875
 876	ti->num_flush_requests = 1;
 877	ti->num_discard_requests = 1;
 
 878
 879	return 0;
 880
 881 bad:
 882	free_multipath(m);
 883	return r;
 884}
 885
 886static void multipath_wait_for_pg_init_completion(struct multipath *m)
 887{
 888	DECLARE_WAITQUEUE(wait, current);
 889	unsigned long flags;
 890
 891	add_wait_queue(&m->pg_init_wait, &wait);
 892
 893	while (1) {
 894		set_current_state(TASK_UNINTERRUPTIBLE);
 895
 896		spin_lock_irqsave(&m->lock, flags);
 897		if (!m->pg_init_in_progress) {
 898			spin_unlock_irqrestore(&m->lock, flags);
 899			break;
 900		}
 901		spin_unlock_irqrestore(&m->lock, flags);
 902
 903		io_schedule();
 904	}
 905	set_current_state(TASK_RUNNING);
 906
 907	remove_wait_queue(&m->pg_init_wait, &wait);
 908}
 909
 910static void flush_multipath_work(struct multipath *m)
 911{
 
 
 
 
 
 
 912	flush_workqueue(kmpath_handlerd);
 913	multipath_wait_for_pg_init_completion(m);
 914	flush_workqueue(kmultipathd);
 915	flush_work_sync(&m->trigger_event);
 
 
 
 
 916}
 917
 918static void multipath_dtr(struct dm_target *ti)
 919{
 920	struct multipath *m = ti->private;
 921
 922	flush_multipath_work(m);
 923	free_multipath(m);
 924}
 925
 926/*
 927 * Map cloned requests
 928 */
 929static int multipath_map(struct dm_target *ti, struct request *clone,
 930			 union map_info *map_context)
 931{
 932	int r;
 933	struct multipath *m = (struct multipath *) ti->private;
 934
 935	if (set_mapinfo(m, map_context) < 0)
 936		/* ENOMEM, requeue */
 937		return DM_MAPIO_REQUEUE;
 938
 939	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 940	r = map_io(m, clone, map_context, 0);
 941	if (r < 0 || r == DM_MAPIO_REQUEUE)
 942		clear_mapinfo(m, map_context);
 943
 944	return r;
 945}
 946
 947/*
 948 * Take a path out of use.
 949 */
 950static int fail_path(struct pgpath *pgpath)
 951{
 952	unsigned long flags;
 953	struct multipath *m = pgpath->pg->m;
 954
 955	spin_lock_irqsave(&m->lock, flags);
 956
 957	if (!pgpath->is_active)
 958		goto out;
 959
 960	DMWARN("Failing path %s.", pgpath->path.dev->name);
 961
 962	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
 963	pgpath->is_active = 0;
 964	pgpath->fail_count++;
 965
 966	m->nr_valid_paths--;
 967
 968	if (pgpath == m->current_pgpath)
 969		m->current_pgpath = NULL;
 970
 971	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
 972		      pgpath->path.dev->name, m->nr_valid_paths);
 973
 974	schedule_work(&m->trigger_event);
 975
 976out:
 977	spin_unlock_irqrestore(&m->lock, flags);
 978
 979	return 0;
 980}
 981
 982/*
 983 * Reinstate a previously-failed path
 984 */
 985static int reinstate_path(struct pgpath *pgpath)
 986{
 987	int r = 0;
 988	unsigned long flags;
 989	struct multipath *m = pgpath->pg->m;
 990
 991	spin_lock_irqsave(&m->lock, flags);
 992
 993	if (pgpath->is_active)
 994		goto out;
 995
 996	if (!pgpath->pg->ps.type->reinstate_path) {
 997		DMWARN("Reinstate path not supported by path selector %s",
 998		       pgpath->pg->ps.type->name);
 999		r = -EINVAL;
1000		goto out;
1001	}
1002
1003	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1004	if (r)
1005		goto out;
1006
1007	pgpath->is_active = 1;
1008
1009	if (!m->nr_valid_paths++ && m->queue_size) {
1010		m->current_pgpath = NULL;
1011		queue_work(kmultipathd, &m->process_queued_ios);
1012	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1013		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1014			m->pg_init_in_progress++;
1015	}
1016
1017	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1018		      pgpath->path.dev->name, m->nr_valid_paths);
1019
1020	schedule_work(&m->trigger_event);
1021
1022out:
1023	spin_unlock_irqrestore(&m->lock, flags);
 
 
1024
1025	return r;
1026}
1027
1028/*
1029 * Fail or reinstate all paths that match the provided struct dm_dev.
1030 */
1031static int action_dev(struct multipath *m, struct dm_dev *dev,
1032		      action_fn action)
1033{
1034	int r = -EINVAL;
1035	struct pgpath *pgpath;
1036	struct priority_group *pg;
1037
1038	list_for_each_entry(pg, &m->priority_groups, list) {
1039		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1040			if (pgpath->path.dev == dev)
1041				r = action(pgpath);
1042		}
1043	}
1044
1045	return r;
1046}
1047
1048/*
1049 * Temporarily try to avoid having to use the specified PG
1050 */
1051static void bypass_pg(struct multipath *m, struct priority_group *pg,
1052		      int bypassed)
1053{
1054	unsigned long flags;
1055
1056	spin_lock_irqsave(&m->lock, flags);
1057
1058	pg->bypassed = bypassed;
1059	m->current_pgpath = NULL;
1060	m->current_pg = NULL;
1061
1062	spin_unlock_irqrestore(&m->lock, flags);
1063
1064	schedule_work(&m->trigger_event);
1065}
1066
1067/*
1068 * Switch to using the specified PG from the next I/O that gets mapped
1069 */
1070static int switch_pg_num(struct multipath *m, const char *pgstr)
1071{
1072	struct priority_group *pg;
1073	unsigned pgnum;
1074	unsigned long flags;
1075	char dummy;
1076
1077	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1078	    (pgnum > m->nr_priority_groups)) {
1079		DMWARN("invalid PG number supplied to switch_pg_num");
1080		return -EINVAL;
1081	}
1082
1083	spin_lock_irqsave(&m->lock, flags);
1084	list_for_each_entry(pg, &m->priority_groups, list) {
1085		pg->bypassed = 0;
1086		if (--pgnum)
1087			continue;
1088
1089		m->current_pgpath = NULL;
1090		m->current_pg = NULL;
1091		m->next_pg = pg;
1092	}
1093	spin_unlock_irqrestore(&m->lock, flags);
1094
1095	schedule_work(&m->trigger_event);
1096	return 0;
1097}
1098
1099/*
1100 * Set/clear bypassed status of a PG.
1101 * PGs are numbered upwards from 1 in the order they were declared.
1102 */
1103static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1104{
1105	struct priority_group *pg;
1106	unsigned pgnum;
1107	char dummy;
1108
1109	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1110	    (pgnum > m->nr_priority_groups)) {
1111		DMWARN("invalid PG number supplied to bypass_pg");
1112		return -EINVAL;
1113	}
1114
1115	list_for_each_entry(pg, &m->priority_groups, list) {
1116		if (!--pgnum)
1117			break;
1118	}
1119
1120	bypass_pg(m, pg, bypassed);
1121	return 0;
1122}
1123
1124/*
1125 * Should we retry pg_init immediately?
1126 */
1127static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1128{
1129	unsigned long flags;
1130	int limit_reached = 0;
1131
1132	spin_lock_irqsave(&m->lock, flags);
1133
1134	if (m->pg_init_count <= m->pg_init_retries)
1135		m->pg_init_required = 1;
1136	else
1137		limit_reached = 1;
1138
1139	spin_unlock_irqrestore(&m->lock, flags);
1140
1141	return limit_reached;
1142}
1143
1144static void pg_init_done(void *data, int errors)
1145{
1146	struct pgpath *pgpath = data;
1147	struct priority_group *pg = pgpath->pg;
1148	struct multipath *m = pg->m;
1149	unsigned long flags;
1150	unsigned delay_retry = 0;
1151
1152	/* device or driver problems */
1153	switch (errors) {
1154	case SCSI_DH_OK:
1155		break;
1156	case SCSI_DH_NOSYS:
1157		if (!m->hw_handler_name) {
1158			errors = 0;
1159			break;
1160		}
1161		DMERR("Could not failover the device: Handler scsi_dh_%s "
1162		      "Error %d.", m->hw_handler_name, errors);
1163		/*
1164		 * Fail path for now, so we do not ping pong
1165		 */
1166		fail_path(pgpath);
1167		break;
1168	case SCSI_DH_DEV_TEMP_BUSY:
1169		/*
1170		 * Probably doing something like FW upgrade on the
1171		 * controller so try the other pg.
1172		 */
1173		bypass_pg(m, pg, 1);
1174		break;
1175	case SCSI_DH_RETRY:
1176		/* Wait before retrying. */
1177		delay_retry = 1;
1178	case SCSI_DH_IMM_RETRY:
1179	case SCSI_DH_RES_TEMP_UNAVAIL:
1180		if (pg_init_limit_reached(m, pgpath))
1181			fail_path(pgpath);
1182		errors = 0;
1183		break;
1184	default:
1185		/*
1186		 * We probably do not want to fail the path for a device
1187		 * error, but this is what the old dm did. In future
1188		 * patches we can do more advanced handling.
1189		 */
1190		fail_path(pgpath);
1191	}
1192
1193	spin_lock_irqsave(&m->lock, flags);
1194	if (errors) {
1195		if (pgpath == m->current_pgpath) {
1196			DMERR("Could not failover device. Error %d.", errors);
1197			m->current_pgpath = NULL;
1198			m->current_pg = NULL;
1199		}
1200	} else if (!m->pg_init_required)
1201		pg->bypassed = 0;
1202
1203	if (--m->pg_init_in_progress)
1204		/* Activations of other paths are still on going */
1205		goto out;
1206
1207	if (!m->pg_init_required)
1208		m->queue_io = 0;
1209
1210	m->pg_init_delay_retry = delay_retry;
1211	queue_work(kmultipathd, &m->process_queued_ios);
 
1212
1213	/*
1214	 * Wake up any thread waiting to suspend.
1215	 */
1216	wake_up(&m->pg_init_wait);
1217
1218out:
1219	spin_unlock_irqrestore(&m->lock, flags);
1220}
1221
1222static void activate_path(struct work_struct *work)
1223{
1224	struct pgpath *pgpath =
1225		container_of(work, struct pgpath, activate_path.work);
1226
1227	scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1228				pg_init_done, pgpath);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1229}
1230
1231/*
1232 * end_io handling
1233 */
1234static int do_end_io(struct multipath *m, struct request *clone,
1235		     int error, struct dm_mpath_io *mpio)
1236{
1237	/*
1238	 * We don't queue any clone request inside the multipath target
1239	 * during end I/O handling, since those clone requests don't have
1240	 * bio clones.  If we queue them inside the multipath target,
1241	 * we need to make bio clones, that requires memory allocation.
1242	 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1243	 *  don't have bio clones.)
1244	 * Instead of queueing the clone request here, we queue the original
1245	 * request into dm core, which will remake a clone request and
1246	 * clone bios for it and resubmit it later.
1247	 */
1248	int r = DM_ENDIO_REQUEUE;
1249	unsigned long flags;
1250
1251	if (!error && !clone->errors)
1252		return 0;	/* I/O complete */
1253
1254	if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
 
 
 
 
 
 
 
 
1255		return error;
 
1256
1257	if (mpio->pgpath)
1258		fail_path(mpio->pgpath);
1259
1260	spin_lock_irqsave(&m->lock, flags);
1261	if (!m->nr_valid_paths) {
1262		if (!m->queue_if_no_path) {
1263			if (!__must_push_back(m))
1264				r = -EIO;
1265		} else {
1266			if (error == -EBADE)
1267				r = error;
1268		}
1269	}
1270	spin_unlock_irqrestore(&m->lock, flags);
1271
1272	return r;
1273}
1274
1275static int multipath_end_io(struct dm_target *ti, struct request *clone,
1276			    int error, union map_info *map_context)
1277{
1278	struct multipath *m = ti->private;
1279	struct dm_mpath_io *mpio = map_context->ptr;
1280	struct pgpath *pgpath = mpio->pgpath;
1281	struct path_selector *ps;
1282	int r;
1283
1284	BUG_ON(!mpio);
1285
1286	r  = do_end_io(m, clone, error, mpio);
 
1287	if (pgpath) {
1288		ps = &pgpath->pg->ps;
1289		if (ps->type->end_io)
1290			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1291	}
1292	clear_mapinfo(m, map_context);
1293
1294	return r;
1295}
1296
1297/*
1298 * Suspend can't complete until all the I/O is processed so if
1299 * the last path fails we must error any remaining I/O.
1300 * Note that if the freeze_bdev fails while suspending, the
1301 * queue_if_no_path state is lost - userspace should reset it.
1302 */
1303static void multipath_presuspend(struct dm_target *ti)
1304{
1305	struct multipath *m = (struct multipath *) ti->private;
1306
1307	queue_if_no_path(m, 0, 1);
1308}
1309
1310static void multipath_postsuspend(struct dm_target *ti)
1311{
1312	struct multipath *m = ti->private;
1313
1314	mutex_lock(&m->work_mutex);
1315	flush_multipath_work(m);
1316	mutex_unlock(&m->work_mutex);
1317}
1318
1319/*
1320 * Restore the queue_if_no_path setting.
1321 */
1322static void multipath_resume(struct dm_target *ti)
1323{
1324	struct multipath *m = (struct multipath *) ti->private;
1325	unsigned long flags;
1326
1327	spin_lock_irqsave(&m->lock, flags);
1328	m->queue_if_no_path = m->saved_queue_if_no_path;
1329	spin_unlock_irqrestore(&m->lock, flags);
1330}
1331
1332/*
1333 * Info output has the following format:
1334 * num_multipath_feature_args [multipath_feature_args]*
1335 * num_handler_status_args [handler_status_args]*
1336 * num_groups init_group_number
1337 *            [A|D|E num_ps_status_args [ps_status_args]*
1338 *             num_paths num_selector_args
1339 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1340 *
1341 * Table output has the following format (identical to the constructor string):
1342 * num_feature_args [features_args]*
1343 * num_handler_args hw_handler [hw_handler_args]*
1344 * num_groups init_group_number
1345 *     [priority selector-name num_ps_args [ps_args]*
1346 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1347 */
1348static int multipath_status(struct dm_target *ti, status_type_t type,
1349			    char *result, unsigned int maxlen)
1350{
1351	int sz = 0;
1352	unsigned long flags;
1353	struct multipath *m = (struct multipath *) ti->private;
1354	struct priority_group *pg;
1355	struct pgpath *p;
1356	unsigned pg_num;
1357	char state;
1358
1359	spin_lock_irqsave(&m->lock, flags);
1360
1361	/* Features */
1362	if (type == STATUSTYPE_INFO)
1363		DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
1364	else {
1365		DMEMIT("%u ", m->queue_if_no_path +
1366			      (m->pg_init_retries > 0) * 2 +
1367			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
 
1368		if (m->queue_if_no_path)
1369			DMEMIT("queue_if_no_path ");
1370		if (m->pg_init_retries)
1371			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1372		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1373			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
 
 
1374	}
1375
1376	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1377		DMEMIT("0 ");
1378	else
1379		DMEMIT("1 %s ", m->hw_handler_name);
1380
1381	DMEMIT("%u ", m->nr_priority_groups);
1382
1383	if (m->next_pg)
1384		pg_num = m->next_pg->pg_num;
1385	else if (m->current_pg)
1386		pg_num = m->current_pg->pg_num;
1387	else
1388		pg_num = (m->nr_priority_groups ? 1 : 0);
1389
1390	DMEMIT("%u ", pg_num);
1391
1392	switch (type) {
1393	case STATUSTYPE_INFO:
1394		list_for_each_entry(pg, &m->priority_groups, list) {
1395			if (pg->bypassed)
1396				state = 'D';	/* Disabled */
1397			else if (pg == m->current_pg)
1398				state = 'A';	/* Currently Active */
1399			else
1400				state = 'E';	/* Enabled */
1401
1402			DMEMIT("%c ", state);
1403
1404			if (pg->ps.type->status)
1405				sz += pg->ps.type->status(&pg->ps, NULL, type,
1406							  result + sz,
1407							  maxlen - sz);
1408			else
1409				DMEMIT("0 ");
1410
1411			DMEMIT("%u %u ", pg->nr_pgpaths,
1412			       pg->ps.type->info_args);
1413
1414			list_for_each_entry(p, &pg->pgpaths, list) {
1415				DMEMIT("%s %s %u ", p->path.dev->name,
1416				       p->is_active ? "A" : "F",
1417				       p->fail_count);
1418				if (pg->ps.type->status)
1419					sz += pg->ps.type->status(&pg->ps,
1420					      &p->path, type, result + sz,
1421					      maxlen - sz);
1422			}
1423		}
1424		break;
1425
1426	case STATUSTYPE_TABLE:
1427		list_for_each_entry(pg, &m->priority_groups, list) {
1428			DMEMIT("%s ", pg->ps.type->name);
1429
1430			if (pg->ps.type->status)
1431				sz += pg->ps.type->status(&pg->ps, NULL, type,
1432							  result + sz,
1433							  maxlen - sz);
1434			else
1435				DMEMIT("0 ");
1436
1437			DMEMIT("%u %u ", pg->nr_pgpaths,
1438			       pg->ps.type->table_args);
1439
1440			list_for_each_entry(p, &pg->pgpaths, list) {
1441				DMEMIT("%s ", p->path.dev->name);
1442				if (pg->ps.type->status)
1443					sz += pg->ps.type->status(&pg->ps,
1444					      &p->path, type, result + sz,
1445					      maxlen - sz);
1446			}
1447		}
1448		break;
1449	}
1450
1451	spin_unlock_irqrestore(&m->lock, flags);
1452
1453	return 0;
1454}
1455
1456static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1457{
1458	int r = -EINVAL;
1459	struct dm_dev *dev;
1460	struct multipath *m = (struct multipath *) ti->private;
1461	action_fn action;
1462
1463	mutex_lock(&m->work_mutex);
1464
1465	if (dm_suspended(ti)) {
1466		r = -EBUSY;
1467		goto out;
1468	}
1469
1470	if (argc == 1) {
1471		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1472			r = queue_if_no_path(m, 1, 0);
1473			goto out;
1474		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1475			r = queue_if_no_path(m, 0, 0);
1476			goto out;
1477		}
1478	}
1479
1480	if (argc != 2) {
1481		DMWARN("Unrecognised multipath message received.");
1482		goto out;
1483	}
1484
1485	if (!strcasecmp(argv[0], "disable_group")) {
1486		r = bypass_pg_num(m, argv[1], 1);
1487		goto out;
1488	} else if (!strcasecmp(argv[0], "enable_group")) {
1489		r = bypass_pg_num(m, argv[1], 0);
1490		goto out;
1491	} else if (!strcasecmp(argv[0], "switch_group")) {
1492		r = switch_pg_num(m, argv[1]);
1493		goto out;
1494	} else if (!strcasecmp(argv[0], "reinstate_path"))
1495		action = reinstate_path;
1496	else if (!strcasecmp(argv[0], "fail_path"))
1497		action = fail_path;
1498	else {
1499		DMWARN("Unrecognised multipath message received.");
1500		goto out;
1501	}
1502
1503	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1504	if (r) {
1505		DMWARN("message: error getting device %s",
1506		       argv[1]);
1507		goto out;
1508	}
1509
1510	r = action_dev(m, dev, action);
1511
1512	dm_put_device(ti, dev);
1513
1514out:
1515	mutex_unlock(&m->work_mutex);
1516	return r;
1517}
1518
1519static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1520			   unsigned long arg)
1521{
1522	struct multipath *m = ti->private;
1523	struct pgpath *pgpath;
1524	struct block_device *bdev;
1525	fmode_t mode;
1526	unsigned long flags;
1527	int r;
1528
1529again:
1530	bdev = NULL;
1531	mode = 0;
1532	r = 0;
1533
1534	spin_lock_irqsave(&m->lock, flags);
1535
1536	if (!m->current_pgpath)
1537		__choose_pgpath(m, 0);
1538
1539	pgpath = m->current_pgpath;
1540
1541	if (pgpath) {
1542		bdev = pgpath->path.dev->bdev;
1543		mode = pgpath->path.dev->mode;
1544	}
1545
1546	if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1547		r = -EAGAIN;
1548	else if (!bdev)
1549		r = -EIO;
1550
1551	spin_unlock_irqrestore(&m->lock, flags);
1552
1553	/*
1554	 * Only pass ioctls through if the device sizes match exactly.
1555	 */
1556	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1557		r = scsi_verify_blk_ioctl(NULL, cmd);
 
 
 
1558
1559	if (r == -EAGAIN && !fatal_signal_pending(current)) {
1560		queue_work(kmultipathd, &m->process_queued_ios);
1561		msleep(10);
1562		goto again;
 
 
 
 
 
 
1563	}
1564
1565	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
1566}
1567
1568static int multipath_iterate_devices(struct dm_target *ti,
1569				     iterate_devices_callout_fn fn, void *data)
1570{
1571	struct multipath *m = ti->private;
1572	struct priority_group *pg;
1573	struct pgpath *p;
1574	int ret = 0;
1575
1576	list_for_each_entry(pg, &m->priority_groups, list) {
1577		list_for_each_entry(p, &pg->pgpaths, list) {
1578			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1579			if (ret)
1580				goto out;
1581		}
1582	}
1583
1584out:
1585	return ret;
1586}
1587
1588static int __pgpath_busy(struct pgpath *pgpath)
1589{
1590	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1591
1592	return dm_underlying_device_busy(q);
1593}
1594
1595/*
1596 * We return "busy", only when we can map I/Os but underlying devices
1597 * are busy (so even if we map I/Os now, the I/Os will wait on
1598 * the underlying queue).
1599 * In other words, if we want to kill I/Os or queue them inside us
1600 * due to map unavailability, we don't return "busy".  Otherwise,
1601 * dm core won't give us the I/Os and we can't do what we want.
1602 */
1603static int multipath_busy(struct dm_target *ti)
1604{
1605	int busy = 0, has_active = 0;
1606	struct multipath *m = ti->private;
1607	struct priority_group *pg;
1608	struct pgpath *pgpath;
1609	unsigned long flags;
1610
1611	spin_lock_irqsave(&m->lock, flags);
1612
 
 
 
 
 
1613	/* Guess which priority_group will be used at next mapping time */
1614	if (unlikely(!m->current_pgpath && m->next_pg))
1615		pg = m->next_pg;
1616	else if (likely(m->current_pg))
1617		pg = m->current_pg;
1618	else
1619		/*
1620		 * We don't know which pg will be used at next mapping time.
1621		 * We don't call __choose_pgpath() here to avoid to trigger
1622		 * pg_init just by busy checking.
1623		 * So we don't know whether underlying devices we will be using
1624		 * at next mapping time are busy or not. Just try mapping.
1625		 */
1626		goto out;
1627
1628	/*
1629	 * If there is one non-busy active path at least, the path selector
1630	 * will be able to select it. So we consider such a pg as not busy.
1631	 */
1632	busy = 1;
1633	list_for_each_entry(pgpath, &pg->pgpaths, list)
1634		if (pgpath->is_active) {
1635			has_active = 1;
1636
1637			if (!__pgpath_busy(pgpath)) {
1638				busy = 0;
1639				break;
1640			}
1641		}
1642
1643	if (!has_active)
1644		/*
1645		 * No active path in this pg, so this pg won't be used and
1646		 * the current_pg will be changed at next mapping time.
1647		 * We need to try mapping to determine it.
1648		 */
1649		busy = 0;
1650
1651out:
1652	spin_unlock_irqrestore(&m->lock, flags);
1653
1654	return busy;
1655}
1656
1657/*-----------------------------------------------------------------
1658 * Module setup
1659 *---------------------------------------------------------------*/
1660static struct target_type multipath_target = {
1661	.name = "multipath",
1662	.version = {1, 4, 0},
1663	.module = THIS_MODULE,
1664	.ctr = multipath_ctr,
1665	.dtr = multipath_dtr,
1666	.map_rq = multipath_map,
1667	.rq_end_io = multipath_end_io,
1668	.presuspend = multipath_presuspend,
1669	.postsuspend = multipath_postsuspend,
1670	.resume = multipath_resume,
1671	.status = multipath_status,
1672	.message = multipath_message,
1673	.ioctl  = multipath_ioctl,
1674	.iterate_devices = multipath_iterate_devices,
1675	.busy = multipath_busy,
1676};
1677
1678static int __init dm_multipath_init(void)
1679{
1680	int r;
1681
1682	/* allocate a slab for the dm_ios */
1683	_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1684	if (!_mpio_cache)
1685		return -ENOMEM;
1686
1687	r = dm_register_target(&multipath_target);
1688	if (r < 0) {
1689		DMERR("register failed %d", r);
1690		kmem_cache_destroy(_mpio_cache);
1691		return -EINVAL;
1692	}
1693
1694	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1695	if (!kmultipathd) {
1696		DMERR("failed to create workqueue kmpathd");
1697		dm_unregister_target(&multipath_target);
1698		kmem_cache_destroy(_mpio_cache);
1699		return -ENOMEM;
1700	}
1701
1702	/*
1703	 * A separate workqueue is used to handle the device handlers
1704	 * to avoid overloading existing workqueue. Overloading the
1705	 * old workqueue would also create a bottleneck in the
1706	 * path of the storage hardware device activation.
1707	 */
1708	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1709						  WQ_MEM_RECLAIM);
1710	if (!kmpath_handlerd) {
1711		DMERR("failed to create workqueue kmpath_handlerd");
1712		destroy_workqueue(kmultipathd);
1713		dm_unregister_target(&multipath_target);
1714		kmem_cache_destroy(_mpio_cache);
1715		return -ENOMEM;
1716	}
1717
1718	DMINFO("version %u.%u.%u loaded",
1719	       multipath_target.version[0], multipath_target.version[1],
1720	       multipath_target.version[2]);
1721
1722	return r;
1723}
1724
1725static void __exit dm_multipath_exit(void)
1726{
1727	destroy_workqueue(kmpath_handlerd);
1728	destroy_workqueue(kmultipathd);
1729
1730	dm_unregister_target(&multipath_target);
1731	kmem_cache_destroy(_mpio_cache);
1732}
1733
1734module_init(dm_multipath_init);
1735module_exit(dm_multipath_exit);
1736
1737MODULE_DESCRIPTION(DM_NAME " multipath target");
1738MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1739MODULE_LICENSE("GPL");
v3.15
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9
  10#include "dm.h"
  11#include "dm-path-selector.h"
  12#include "dm-uevent.h"
  13
  14#include <linux/ctype.h>
  15#include <linux/init.h>
  16#include <linux/mempool.h>
  17#include <linux/module.h>
  18#include <linux/pagemap.h>
  19#include <linux/slab.h>
  20#include <linux/time.h>
  21#include <linux/workqueue.h>
  22#include <linux/delay.h>
  23#include <scsi/scsi_dh.h>
  24#include <linux/atomic.h>
  25
  26#define DM_MSG_PREFIX "multipath"
  27#define DM_PG_INIT_DELAY_MSECS 2000
  28#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  29
  30/* Path properties */
  31struct pgpath {
  32	struct list_head list;
  33
  34	struct priority_group *pg;	/* Owning PG */
  35	unsigned is_active;		/* Path status */
  36	unsigned fail_count;		/* Cumulative failure count */
  37
  38	struct dm_path path;
  39	struct delayed_work activate_path;
  40};
  41
  42#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  43
  44/*
  45 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  46 * Each has a path selector which controls which path gets used.
  47 */
  48struct priority_group {
  49	struct list_head list;
  50
  51	struct multipath *m;		/* Owning multipath instance */
  52	struct path_selector ps;
  53
  54	unsigned pg_num;		/* Reference number */
  55	unsigned bypassed;		/* Temporarily bypass this PG? */
  56
  57	unsigned nr_pgpaths;		/* Number of paths in PG */
  58	struct list_head pgpaths;
  59};
  60
  61/* Multipath context */
  62struct multipath {
  63	struct list_head list;
  64	struct dm_target *ti;
  65
  66	const char *hw_handler_name;
  67	char *hw_handler_params;
  68
  69	spinlock_t lock;
  70
  71	unsigned nr_priority_groups;
  72	struct list_head priority_groups;
  73
  74	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  75
  76	unsigned pg_init_required;	/* pg_init needs calling? */
  77	unsigned pg_init_in_progress;	/* Only one pg_init allowed at once */
  78	unsigned pg_init_delay_retry;	/* Delay pg_init retry? */
  79
  80	unsigned nr_valid_paths;	/* Total number of usable paths */
  81	struct pgpath *current_pgpath;
  82	struct priority_group *current_pg;
  83	struct priority_group *next_pg;	/* Switch to this PG if set */
  84	unsigned repeat_count;		/* I/Os left before calling PS again */
  85
  86	unsigned queue_io:1;		/* Must we queue all I/O? */
  87	unsigned queue_if_no_path:1;	/* Queue I/O if last path fails? */
  88	unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
  89	unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
  90	unsigned pg_init_disabled:1;	/* pg_init is not currently allowed */
  91
  92	unsigned pg_init_retries;	/* Number of times to retry pg_init */
  93	unsigned pg_init_count;		/* Number of times pg_init called */
  94	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  95
 
 
 
 
  96	struct work_struct trigger_event;
  97
  98	/*
  99	 * We must use a mempool of dm_mpath_io structs so that we
 100	 * can resubmit bios on error.
 101	 */
 102	mempool_t *mpio_pool;
 103
 104	struct mutex work_mutex;
 105};
 106
 107/*
 108 * Context information attached to each bio we process.
 109 */
 110struct dm_mpath_io {
 111	struct pgpath *pgpath;
 112	size_t nr_bytes;
 113};
 114
 115typedef int (*action_fn) (struct pgpath *pgpath);
 116
 
 
 117static struct kmem_cache *_mpio_cache;
 118
 119static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 
 120static void trigger_event(struct work_struct *work);
 121static void activate_path(struct work_struct *work);
 122static int __pgpath_busy(struct pgpath *pgpath);
 123
 124
 125/*-----------------------------------------------
 126 * Allocation routines
 127 *-----------------------------------------------*/
 128
 129static struct pgpath *alloc_pgpath(void)
 130{
 131	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 132
 133	if (pgpath) {
 134		pgpath->is_active = 1;
 135		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
 136	}
 137
 138	return pgpath;
 139}
 140
 141static void free_pgpath(struct pgpath *pgpath)
 142{
 143	kfree(pgpath);
 144}
 145
 146static struct priority_group *alloc_priority_group(void)
 147{
 148	struct priority_group *pg;
 149
 150	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 151
 152	if (pg)
 153		INIT_LIST_HEAD(&pg->pgpaths);
 154
 155	return pg;
 156}
 157
 158static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 159{
 160	struct pgpath *pgpath, *tmp;
 161	struct multipath *m = ti->private;
 162
 163	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 164		list_del(&pgpath->list);
 165		if (m->hw_handler_name)
 166			scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
 167		dm_put_device(ti, pgpath->path.dev);
 168		free_pgpath(pgpath);
 169	}
 170}
 171
 172static void free_priority_group(struct priority_group *pg,
 173				struct dm_target *ti)
 174{
 175	struct path_selector *ps = &pg->ps;
 176
 177	if (ps->type) {
 178		ps->type->destroy(ps);
 179		dm_put_path_selector(ps->type);
 180	}
 181
 182	free_pgpaths(&pg->pgpaths, ti);
 183	kfree(pg);
 184}
 185
 186static struct multipath *alloc_multipath(struct dm_target *ti)
 187{
 188	struct multipath *m;
 189	unsigned min_ios = dm_get_reserved_rq_based_ios();
 190
 191	m = kzalloc(sizeof(*m), GFP_KERNEL);
 192	if (m) {
 193		INIT_LIST_HEAD(&m->priority_groups);
 
 194		spin_lock_init(&m->lock);
 195		m->queue_io = 1;
 196		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 
 197		INIT_WORK(&m->trigger_event, trigger_event);
 198		init_waitqueue_head(&m->pg_init_wait);
 199		mutex_init(&m->work_mutex);
 200		m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
 201		if (!m->mpio_pool) {
 202			kfree(m);
 203			return NULL;
 204		}
 205		m->ti = ti;
 206		ti->private = m;
 207	}
 208
 209	return m;
 210}
 211
 212static void free_multipath(struct multipath *m)
 213{
 214	struct priority_group *pg, *tmp;
 215
 216	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 217		list_del(&pg->list);
 218		free_priority_group(pg, m->ti);
 219	}
 220
 221	kfree(m->hw_handler_name);
 222	kfree(m->hw_handler_params);
 223	mempool_destroy(m->mpio_pool);
 224	kfree(m);
 225}
 226
 227static int set_mapinfo(struct multipath *m, union map_info *info)
 228{
 229	struct dm_mpath_io *mpio;
 230
 231	mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
 232	if (!mpio)
 233		return -ENOMEM;
 234
 235	memset(mpio, 0, sizeof(*mpio));
 236	info->ptr = mpio;
 237
 238	return 0;
 239}
 240
 241static void clear_mapinfo(struct multipath *m, union map_info *info)
 242{
 243	struct dm_mpath_io *mpio = info->ptr;
 244
 245	info->ptr = NULL;
 246	mempool_free(mpio, m->mpio_pool);
 247}
 248
 249/*-----------------------------------------------
 250 * Path selection
 251 *-----------------------------------------------*/
 252
 253static int __pg_init_all_paths(struct multipath *m)
 254{
 255	struct pgpath *pgpath;
 256	unsigned long pg_init_delay = 0;
 257
 258	if (m->pg_init_in_progress || m->pg_init_disabled)
 259		return 0;
 260
 261	m->pg_init_count++;
 262	m->pg_init_required = 0;
 263
 264	/* Check here to reset pg_init_required */
 265	if (!m->current_pg)
 266		return 0;
 267
 268	if (m->pg_init_delay_retry)
 269		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 270						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 271	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 272		/* Skip failed paths */
 273		if (!pgpath->is_active)
 274			continue;
 275		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 276				       pg_init_delay))
 277			m->pg_init_in_progress++;
 278	}
 279	return m->pg_init_in_progress;
 280}
 281
 282static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
 283{
 284	m->current_pg = pgpath->pg;
 285
 286	/* Must we initialise the PG first, and queue I/O till it's ready? */
 287	if (m->hw_handler_name) {
 288		m->pg_init_required = 1;
 289		m->queue_io = 1;
 290	} else {
 291		m->pg_init_required = 0;
 292		m->queue_io = 0;
 293	}
 294
 295	m->pg_init_count = 0;
 296}
 297
 298static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
 299			       size_t nr_bytes)
 300{
 301	struct dm_path *path;
 302
 303	path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
 304	if (!path)
 305		return -ENXIO;
 306
 307	m->current_pgpath = path_to_pgpath(path);
 308
 309	if (m->current_pg != pg)
 310		__switch_pg(m, m->current_pgpath);
 311
 312	return 0;
 313}
 314
 315static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
 316{
 317	struct priority_group *pg;
 318	unsigned bypassed = 1;
 319
 320	if (!m->nr_valid_paths)
 321		goto failed;
 322
 323	/* Were we instructed to switch PG? */
 324	if (m->next_pg) {
 325		pg = m->next_pg;
 326		m->next_pg = NULL;
 327		if (!__choose_path_in_pg(m, pg, nr_bytes))
 328			return;
 329	}
 330
 331	/* Don't change PG until it has no remaining paths */
 332	if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
 333		return;
 334
 335	/*
 336	 * Loop through priority groups until we find a valid path.
 337	 * First time we skip PGs marked 'bypassed'.
 338	 * Second time we only try the ones we skipped, but set
 339	 * pg_init_delay_retry so we do not hammer controllers.
 340	 */
 341	do {
 342		list_for_each_entry(pg, &m->priority_groups, list) {
 343			if (pg->bypassed == bypassed)
 344				continue;
 345			if (!__choose_path_in_pg(m, pg, nr_bytes)) {
 346				if (!bypassed)
 347					m->pg_init_delay_retry = 1;
 348				return;
 349			}
 350		}
 351	} while (bypassed--);
 352
 353failed:
 354	m->current_pgpath = NULL;
 355	m->current_pg = NULL;
 356}
 357
 358/*
 359 * Check whether bios must be queued in the device-mapper core rather
 360 * than here in the target.
 361 *
 362 * m->lock must be held on entry.
 363 *
 364 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
 365 * same value then we are not between multipath_presuspend()
 366 * and multipath_resume() calls and we have no need to check
 367 * for the DMF_NOFLUSH_SUSPENDING flag.
 368 */
 369static int __must_push_back(struct multipath *m)
 370{
 371	return (m->queue_if_no_path ||
 372		(m->queue_if_no_path != m->saved_queue_if_no_path &&
 373		 dm_noflush_suspending(m->ti)));
 374}
 375
 376#define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required)
 377
 378/*
 379 * Map cloned requests
 380 */
 381static int multipath_map(struct dm_target *ti, struct request *clone,
 382			 union map_info *map_context)
 383{
 384	struct multipath *m = (struct multipath *) ti->private;
 385	int r = DM_MAPIO_REQUEUE;
 386	size_t nr_bytes = blk_rq_bytes(clone);
 387	unsigned long flags;
 388	struct pgpath *pgpath;
 389	struct block_device *bdev;
 390	struct dm_mpath_io *mpio;
 391
 392	spin_lock_irqsave(&m->lock, flags);
 393
 394	/* Do we need to select a new pgpath? */
 395	if (!m->current_pgpath ||
 396	    (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
 397		__choose_pgpath(m, nr_bytes);
 398
 399	pgpath = m->current_pgpath;
 400
 401	if (!pgpath) {
 402		if (!__must_push_back(m))
 403			r = -EIO;	/* Failed */
 404		goto out_unlock;
 405	}
 406	if (!pg_ready(m)) {
 407		__pg_init_all_paths(m);
 408		goto out_unlock;
 409	}
 410	if (set_mapinfo(m, map_context) < 0)
 411		/* ENOMEM, requeue */
 412		goto out_unlock;
 
 
 
 
 
 
 
 
 
 413
 414	bdev = pgpath->path.dev->bdev;
 415	clone->q = bdev_get_queue(bdev);
 416	clone->rq_disk = bdev->bd_disk;
 417	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 418	mpio = map_context->ptr;
 419	mpio->pgpath = pgpath;
 420	mpio->nr_bytes = nr_bytes;
 421	if (pgpath->pg->ps.type->start_io)
 422		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 423					      &pgpath->path,
 424					      nr_bytes);
 425	r = DM_MAPIO_REMAPPED;
 426
 427out_unlock:
 428	spin_unlock_irqrestore(&m->lock, flags);
 429
 430	return r;
 431}
 432
 433/*
 434 * If we run out of usable paths, should we queue I/O or error it?
 435 */
 436static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
 437			    unsigned save_old_value)
 438{
 439	unsigned long flags;
 440
 441	spin_lock_irqsave(&m->lock, flags);
 442
 443	if (save_old_value)
 444		m->saved_queue_if_no_path = m->queue_if_no_path;
 445	else
 446		m->saved_queue_if_no_path = queue_if_no_path;
 447	m->queue_if_no_path = queue_if_no_path;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 448	spin_unlock_irqrestore(&m->lock, flags);
 449
 450	if (!queue_if_no_path)
 451		dm_table_run_md_queue_async(m->ti->table);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452
 453	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454}
 455
 456/*
 457 * An event is triggered whenever a path is taken out of use.
 458 * Includes path failure and PG bypass.
 459 */
 460static void trigger_event(struct work_struct *work)
 461{
 462	struct multipath *m =
 463		container_of(work, struct multipath, trigger_event);
 464
 465	dm_table_event(m->ti->table);
 466}
 467
 468/*-----------------------------------------------------------------
 469 * Constructor/argument parsing:
 470 * <#multipath feature args> [<arg>]*
 471 * <#hw_handler args> [hw_handler [<arg>]*]
 472 * <#priority groups>
 473 * <initial priority group>
 474 *     [<selector> <#selector args> [<arg>]*
 475 *      <#paths> <#per-path selector args>
 476 *         [<path> [<arg>]* ]+ ]+
 477 *---------------------------------------------------------------*/
 478static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 479			       struct dm_target *ti)
 480{
 481	int r;
 482	struct path_selector_type *pst;
 483	unsigned ps_argc;
 484
 485	static struct dm_arg _args[] = {
 486		{0, 1024, "invalid number of path selector args"},
 487	};
 488
 489	pst = dm_get_path_selector(dm_shift_arg(as));
 490	if (!pst) {
 491		ti->error = "unknown path selector type";
 492		return -EINVAL;
 493	}
 494
 495	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 496	if (r) {
 497		dm_put_path_selector(pst);
 498		return -EINVAL;
 499	}
 500
 501	r = pst->create(&pg->ps, ps_argc, as->argv);
 502	if (r) {
 503		dm_put_path_selector(pst);
 504		ti->error = "path selector constructor failed";
 505		return r;
 506	}
 507
 508	pg->ps.type = pst;
 509	dm_consume_args(as, ps_argc);
 510
 511	return 0;
 512}
 513
 514static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 515			       struct dm_target *ti)
 516{
 517	int r;
 518	struct pgpath *p;
 519	struct multipath *m = ti->private;
 520	struct request_queue *q = NULL;
 521	const char *attached_handler_name;
 522
 523	/* we need at least a path arg */
 524	if (as->argc < 1) {
 525		ti->error = "no device given";
 526		return ERR_PTR(-EINVAL);
 527	}
 528
 529	p = alloc_pgpath();
 530	if (!p)
 531		return ERR_PTR(-ENOMEM);
 532
 533	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 534			  &p->path.dev);
 535	if (r) {
 536		ti->error = "error getting device";
 537		goto bad;
 538	}
 539
 540	if (m->retain_attached_hw_handler || m->hw_handler_name)
 541		q = bdev_get_queue(p->path.dev->bdev);
 542
 543	if (m->retain_attached_hw_handler) {
 544		attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 545		if (attached_handler_name) {
 546			/*
 547			 * Reset hw_handler_name to match the attached handler
 548			 * and clear any hw_handler_params associated with the
 549			 * ignored handler.
 550			 *
 551			 * NB. This modifies the table line to show the actual
 552			 * handler instead of the original table passed in.
 553			 */
 554			kfree(m->hw_handler_name);
 555			m->hw_handler_name = attached_handler_name;
 556
 557			kfree(m->hw_handler_params);
 558			m->hw_handler_params = NULL;
 559		}
 560	}
 561
 562	if (m->hw_handler_name) {
 563		/*
 564		 * Increments scsi_dh reference, even when using an
 565		 * already-attached handler.
 566		 */
 567		r = scsi_dh_attach(q, m->hw_handler_name);
 568		if (r == -EBUSY) {
 569			/*
 570			 * Already attached to different hw_handler:
 571			 * try to reattach with correct one.
 572			 */
 573			scsi_dh_detach(q);
 574			r = scsi_dh_attach(q, m->hw_handler_name);
 575		}
 576
 577		if (r < 0) {
 578			ti->error = "error attaching hardware handler";
 579			dm_put_device(ti, p->path.dev);
 580			goto bad;
 581		}
 582
 583		if (m->hw_handler_params) {
 584			r = scsi_dh_set_params(q, m->hw_handler_params);
 585			if (r < 0) {
 586				ti->error = "unable to set hardware "
 587							"handler parameters";
 588				scsi_dh_detach(q);
 589				dm_put_device(ti, p->path.dev);
 590				goto bad;
 591			}
 592		}
 593	}
 594
 595	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 596	if (r) {
 597		dm_put_device(ti, p->path.dev);
 598		goto bad;
 599	}
 600
 601	return p;
 602
 603 bad:
 604	free_pgpath(p);
 605	return ERR_PTR(r);
 606}
 607
 608static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 609						   struct multipath *m)
 610{
 611	static struct dm_arg _args[] = {
 612		{1, 1024, "invalid number of paths"},
 613		{0, 1024, "invalid number of selector args"}
 614	};
 615
 616	int r;
 617	unsigned i, nr_selector_args, nr_args;
 618	struct priority_group *pg;
 619	struct dm_target *ti = m->ti;
 620
 621	if (as->argc < 2) {
 622		as->argc = 0;
 623		ti->error = "not enough priority group arguments";
 624		return ERR_PTR(-EINVAL);
 625	}
 626
 627	pg = alloc_priority_group();
 628	if (!pg) {
 629		ti->error = "couldn't allocate priority group";
 630		return ERR_PTR(-ENOMEM);
 631	}
 632	pg->m = m;
 633
 634	r = parse_path_selector(as, pg, ti);
 635	if (r)
 636		goto bad;
 637
 638	/*
 639	 * read the paths
 640	 */
 641	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
 642	if (r)
 643		goto bad;
 644
 645	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
 646	if (r)
 647		goto bad;
 648
 649	nr_args = 1 + nr_selector_args;
 650	for (i = 0; i < pg->nr_pgpaths; i++) {
 651		struct pgpath *pgpath;
 652		struct dm_arg_set path_args;
 653
 654		if (as->argc < nr_args) {
 655			ti->error = "not enough path parameters";
 656			r = -EINVAL;
 657			goto bad;
 658		}
 659
 660		path_args.argc = nr_args;
 661		path_args.argv = as->argv;
 662
 663		pgpath = parse_path(&path_args, &pg->ps, ti);
 664		if (IS_ERR(pgpath)) {
 665			r = PTR_ERR(pgpath);
 666			goto bad;
 667		}
 668
 669		pgpath->pg = pg;
 670		list_add_tail(&pgpath->list, &pg->pgpaths);
 671		dm_consume_args(as, nr_args);
 672	}
 673
 674	return pg;
 675
 676 bad:
 677	free_priority_group(pg, ti);
 678	return ERR_PTR(r);
 679}
 680
 681static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 682{
 683	unsigned hw_argc;
 684	int ret;
 685	struct dm_target *ti = m->ti;
 686
 687	static struct dm_arg _args[] = {
 688		{0, 1024, "invalid number of hardware handler args"},
 689	};
 690
 691	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
 692		return -EINVAL;
 693
 694	if (!hw_argc)
 695		return 0;
 696
 697	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
 698	if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
 699				     "scsi_dh_%s", m->hw_handler_name)) {
 700		ti->error = "unknown hardware handler type";
 701		ret = -EINVAL;
 702		goto fail;
 703	}
 704
 705	if (hw_argc > 1) {
 706		char *p;
 707		int i, j, len = 4;
 708
 709		for (i = 0; i <= hw_argc - 2; i++)
 710			len += strlen(as->argv[i]) + 1;
 711		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
 712		if (!p) {
 713			ti->error = "memory allocation failed";
 714			ret = -ENOMEM;
 715			goto fail;
 716		}
 717		j = sprintf(p, "%d", hw_argc - 1);
 718		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
 719			j = sprintf(p, "%s", as->argv[i]);
 720	}
 721	dm_consume_args(as, hw_argc - 1);
 722
 723	return 0;
 724fail:
 725	kfree(m->hw_handler_name);
 726	m->hw_handler_name = NULL;
 727	return ret;
 728}
 729
 730static int parse_features(struct dm_arg_set *as, struct multipath *m)
 731{
 732	int r;
 733	unsigned argc;
 734	struct dm_target *ti = m->ti;
 735	const char *arg_name;
 736
 737	static struct dm_arg _args[] = {
 738		{0, 6, "invalid number of feature args"},
 739		{1, 50, "pg_init_retries must be between 1 and 50"},
 740		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
 741	};
 742
 743	r = dm_read_arg_group(_args, as, &argc, &ti->error);
 744	if (r)
 745		return -EINVAL;
 746
 747	if (!argc)
 748		return 0;
 749
 750	do {
 751		arg_name = dm_shift_arg(as);
 752		argc--;
 753
 754		if (!strcasecmp(arg_name, "queue_if_no_path")) {
 755			r = queue_if_no_path(m, 1, 0);
 756			continue;
 757		}
 758
 759		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
 760			m->retain_attached_hw_handler = 1;
 761			continue;
 762		}
 763
 764		if (!strcasecmp(arg_name, "pg_init_retries") &&
 765		    (argc >= 1)) {
 766			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
 767			argc--;
 768			continue;
 769		}
 770
 771		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
 772		    (argc >= 1)) {
 773			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
 774			argc--;
 775			continue;
 776		}
 777
 778		ti->error = "Unrecognised multipath feature request";
 779		r = -EINVAL;
 780	} while (argc && !r);
 781
 782	return r;
 783}
 784
 785static int multipath_ctr(struct dm_target *ti, unsigned int argc,
 786			 char **argv)
 787{
 788	/* target arguments */
 789	static struct dm_arg _args[] = {
 790		{0, 1024, "invalid number of priority groups"},
 791		{0, 1024, "invalid initial priority group number"},
 792	};
 793
 794	int r;
 795	struct multipath *m;
 796	struct dm_arg_set as;
 797	unsigned pg_count = 0;
 798	unsigned next_pg_num;
 799
 800	as.argc = argc;
 801	as.argv = argv;
 802
 803	m = alloc_multipath(ti);
 804	if (!m) {
 805		ti->error = "can't allocate multipath";
 806		return -EINVAL;
 807	}
 808
 809	r = parse_features(&as, m);
 810	if (r)
 811		goto bad;
 812
 813	r = parse_hw_handler(&as, m);
 814	if (r)
 815		goto bad;
 816
 817	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
 818	if (r)
 819		goto bad;
 820
 821	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
 822	if (r)
 823		goto bad;
 824
 825	if ((!m->nr_priority_groups && next_pg_num) ||
 826	    (m->nr_priority_groups && !next_pg_num)) {
 827		ti->error = "invalid initial priority group";
 828		r = -EINVAL;
 829		goto bad;
 830	}
 831
 832	/* parse the priority groups */
 833	while (as.argc) {
 834		struct priority_group *pg;
 835
 836		pg = parse_priority_group(&as, m);
 837		if (IS_ERR(pg)) {
 838			r = PTR_ERR(pg);
 839			goto bad;
 840		}
 841
 842		m->nr_valid_paths += pg->nr_pgpaths;
 843		list_add_tail(&pg->list, &m->priority_groups);
 844		pg_count++;
 845		pg->pg_num = pg_count;
 846		if (!--next_pg_num)
 847			m->next_pg = pg;
 848	}
 849
 850	if (pg_count != m->nr_priority_groups) {
 851		ti->error = "priority group count mismatch";
 852		r = -EINVAL;
 853		goto bad;
 854	}
 855
 856	ti->num_flush_bios = 1;
 857	ti->num_discard_bios = 1;
 858	ti->num_write_same_bios = 1;
 859
 860	return 0;
 861
 862 bad:
 863	free_multipath(m);
 864	return r;
 865}
 866
 867static void multipath_wait_for_pg_init_completion(struct multipath *m)
 868{
 869	DECLARE_WAITQUEUE(wait, current);
 870	unsigned long flags;
 871
 872	add_wait_queue(&m->pg_init_wait, &wait);
 873
 874	while (1) {
 875		set_current_state(TASK_UNINTERRUPTIBLE);
 876
 877		spin_lock_irqsave(&m->lock, flags);
 878		if (!m->pg_init_in_progress) {
 879			spin_unlock_irqrestore(&m->lock, flags);
 880			break;
 881		}
 882		spin_unlock_irqrestore(&m->lock, flags);
 883
 884		io_schedule();
 885	}
 886	set_current_state(TASK_RUNNING);
 887
 888	remove_wait_queue(&m->pg_init_wait, &wait);
 889}
 890
 891static void flush_multipath_work(struct multipath *m)
 892{
 893	unsigned long flags;
 894
 895	spin_lock_irqsave(&m->lock, flags);
 896	m->pg_init_disabled = 1;
 897	spin_unlock_irqrestore(&m->lock, flags);
 898
 899	flush_workqueue(kmpath_handlerd);
 900	multipath_wait_for_pg_init_completion(m);
 901	flush_workqueue(kmultipathd);
 902	flush_work(&m->trigger_event);
 903
 904	spin_lock_irqsave(&m->lock, flags);
 905	m->pg_init_disabled = 0;
 906	spin_unlock_irqrestore(&m->lock, flags);
 907}
 908
 909static void multipath_dtr(struct dm_target *ti)
 910{
 911	struct multipath *m = ti->private;
 912
 913	flush_multipath_work(m);
 914	free_multipath(m);
 915}
 916
 917/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 918 * Take a path out of use.
 919 */
 920static int fail_path(struct pgpath *pgpath)
 921{
 922	unsigned long flags;
 923	struct multipath *m = pgpath->pg->m;
 924
 925	spin_lock_irqsave(&m->lock, flags);
 926
 927	if (!pgpath->is_active)
 928		goto out;
 929
 930	DMWARN("Failing path %s.", pgpath->path.dev->name);
 931
 932	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
 933	pgpath->is_active = 0;
 934	pgpath->fail_count++;
 935
 936	m->nr_valid_paths--;
 937
 938	if (pgpath == m->current_pgpath)
 939		m->current_pgpath = NULL;
 940
 941	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
 942		      pgpath->path.dev->name, m->nr_valid_paths);
 943
 944	schedule_work(&m->trigger_event);
 945
 946out:
 947	spin_unlock_irqrestore(&m->lock, flags);
 948
 949	return 0;
 950}
 951
 952/*
 953 * Reinstate a previously-failed path
 954 */
 955static int reinstate_path(struct pgpath *pgpath)
 956{
 957	int r = 0, run_queue = 0;
 958	unsigned long flags;
 959	struct multipath *m = pgpath->pg->m;
 960
 961	spin_lock_irqsave(&m->lock, flags);
 962
 963	if (pgpath->is_active)
 964		goto out;
 965
 966	if (!pgpath->pg->ps.type->reinstate_path) {
 967		DMWARN("Reinstate path not supported by path selector %s",
 968		       pgpath->pg->ps.type->name);
 969		r = -EINVAL;
 970		goto out;
 971	}
 972
 973	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
 974	if (r)
 975		goto out;
 976
 977	pgpath->is_active = 1;
 978
 979	if (!m->nr_valid_paths++) {
 980		m->current_pgpath = NULL;
 981		run_queue = 1;
 982	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
 983		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
 984			m->pg_init_in_progress++;
 985	}
 986
 987	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
 988		      pgpath->path.dev->name, m->nr_valid_paths);
 989
 990	schedule_work(&m->trigger_event);
 991
 992out:
 993	spin_unlock_irqrestore(&m->lock, flags);
 994	if (run_queue)
 995		dm_table_run_md_queue_async(m->ti->table);
 996
 997	return r;
 998}
 999
1000/*
1001 * Fail or reinstate all paths that match the provided struct dm_dev.
1002 */
1003static int action_dev(struct multipath *m, struct dm_dev *dev,
1004		      action_fn action)
1005{
1006	int r = -EINVAL;
1007	struct pgpath *pgpath;
1008	struct priority_group *pg;
1009
1010	list_for_each_entry(pg, &m->priority_groups, list) {
1011		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1012			if (pgpath->path.dev == dev)
1013				r = action(pgpath);
1014		}
1015	}
1016
1017	return r;
1018}
1019
1020/*
1021 * Temporarily try to avoid having to use the specified PG
1022 */
1023static void bypass_pg(struct multipath *m, struct priority_group *pg,
1024		      int bypassed)
1025{
1026	unsigned long flags;
1027
1028	spin_lock_irqsave(&m->lock, flags);
1029
1030	pg->bypassed = bypassed;
1031	m->current_pgpath = NULL;
1032	m->current_pg = NULL;
1033
1034	spin_unlock_irqrestore(&m->lock, flags);
1035
1036	schedule_work(&m->trigger_event);
1037}
1038
1039/*
1040 * Switch to using the specified PG from the next I/O that gets mapped
1041 */
1042static int switch_pg_num(struct multipath *m, const char *pgstr)
1043{
1044	struct priority_group *pg;
1045	unsigned pgnum;
1046	unsigned long flags;
1047	char dummy;
1048
1049	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1050	    (pgnum > m->nr_priority_groups)) {
1051		DMWARN("invalid PG number supplied to switch_pg_num");
1052		return -EINVAL;
1053	}
1054
1055	spin_lock_irqsave(&m->lock, flags);
1056	list_for_each_entry(pg, &m->priority_groups, list) {
1057		pg->bypassed = 0;
1058		if (--pgnum)
1059			continue;
1060
1061		m->current_pgpath = NULL;
1062		m->current_pg = NULL;
1063		m->next_pg = pg;
1064	}
1065	spin_unlock_irqrestore(&m->lock, flags);
1066
1067	schedule_work(&m->trigger_event);
1068	return 0;
1069}
1070
1071/*
1072 * Set/clear bypassed status of a PG.
1073 * PGs are numbered upwards from 1 in the order they were declared.
1074 */
1075static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1076{
1077	struct priority_group *pg;
1078	unsigned pgnum;
1079	char dummy;
1080
1081	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1082	    (pgnum > m->nr_priority_groups)) {
1083		DMWARN("invalid PG number supplied to bypass_pg");
1084		return -EINVAL;
1085	}
1086
1087	list_for_each_entry(pg, &m->priority_groups, list) {
1088		if (!--pgnum)
1089			break;
1090	}
1091
1092	bypass_pg(m, pg, bypassed);
1093	return 0;
1094}
1095
1096/*
1097 * Should we retry pg_init immediately?
1098 */
1099static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1100{
1101	unsigned long flags;
1102	int limit_reached = 0;
1103
1104	spin_lock_irqsave(&m->lock, flags);
1105
1106	if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
1107		m->pg_init_required = 1;
1108	else
1109		limit_reached = 1;
1110
1111	spin_unlock_irqrestore(&m->lock, flags);
1112
1113	return limit_reached;
1114}
1115
1116static void pg_init_done(void *data, int errors)
1117{
1118	struct pgpath *pgpath = data;
1119	struct priority_group *pg = pgpath->pg;
1120	struct multipath *m = pg->m;
1121	unsigned long flags;
1122	unsigned delay_retry = 0;
1123
1124	/* device or driver problems */
1125	switch (errors) {
1126	case SCSI_DH_OK:
1127		break;
1128	case SCSI_DH_NOSYS:
1129		if (!m->hw_handler_name) {
1130			errors = 0;
1131			break;
1132		}
1133		DMERR("Could not failover the device: Handler scsi_dh_%s "
1134		      "Error %d.", m->hw_handler_name, errors);
1135		/*
1136		 * Fail path for now, so we do not ping pong
1137		 */
1138		fail_path(pgpath);
1139		break;
1140	case SCSI_DH_DEV_TEMP_BUSY:
1141		/*
1142		 * Probably doing something like FW upgrade on the
1143		 * controller so try the other pg.
1144		 */
1145		bypass_pg(m, pg, 1);
1146		break;
1147	case SCSI_DH_RETRY:
1148		/* Wait before retrying. */
1149		delay_retry = 1;
1150	case SCSI_DH_IMM_RETRY:
1151	case SCSI_DH_RES_TEMP_UNAVAIL:
1152		if (pg_init_limit_reached(m, pgpath))
1153			fail_path(pgpath);
1154		errors = 0;
1155		break;
1156	default:
1157		/*
1158		 * We probably do not want to fail the path for a device
1159		 * error, but this is what the old dm did. In future
1160		 * patches we can do more advanced handling.
1161		 */
1162		fail_path(pgpath);
1163	}
1164
1165	spin_lock_irqsave(&m->lock, flags);
1166	if (errors) {
1167		if (pgpath == m->current_pgpath) {
1168			DMERR("Could not failover device. Error %d.", errors);
1169			m->current_pgpath = NULL;
1170			m->current_pg = NULL;
1171		}
1172	} else if (!m->pg_init_required)
1173		pg->bypassed = 0;
1174
1175	if (--m->pg_init_in_progress)
1176		/* Activations of other paths are still on going */
1177		goto out;
1178
1179	if (m->pg_init_required) {
1180		m->pg_init_delay_retry = delay_retry;
1181		if (__pg_init_all_paths(m))
1182			goto out;
1183	}
1184	m->queue_io = 0;
1185
1186	/*
1187	 * Wake up any thread waiting to suspend.
1188	 */
1189	wake_up(&m->pg_init_wait);
1190
1191out:
1192	spin_unlock_irqrestore(&m->lock, flags);
1193}
1194
1195static void activate_path(struct work_struct *work)
1196{
1197	struct pgpath *pgpath =
1198		container_of(work, struct pgpath, activate_path.work);
1199
1200	if (pgpath->is_active)
1201		scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1202				 pg_init_done, pgpath);
1203	else
1204		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1205}
1206
1207static int noretry_error(int error)
1208{
1209	switch (error) {
1210	case -EOPNOTSUPP:
1211	case -EREMOTEIO:
1212	case -EILSEQ:
1213	case -ENODATA:
1214	case -ENOSPC:
1215		return 1;
1216	}
1217
1218	/* Anything else could be a path failure, so should be retried */
1219	return 0;
1220}
1221
1222/*
1223 * end_io handling
1224 */
1225static int do_end_io(struct multipath *m, struct request *clone,
1226		     int error, struct dm_mpath_io *mpio)
1227{
1228	/*
1229	 * We don't queue any clone request inside the multipath target
1230	 * during end I/O handling, since those clone requests don't have
1231	 * bio clones.  If we queue them inside the multipath target,
1232	 * we need to make bio clones, that requires memory allocation.
1233	 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1234	 *  don't have bio clones.)
1235	 * Instead of queueing the clone request here, we queue the original
1236	 * request into dm core, which will remake a clone request and
1237	 * clone bios for it and resubmit it later.
1238	 */
1239	int r = DM_ENDIO_REQUEUE;
1240	unsigned long flags;
1241
1242	if (!error && !clone->errors)
1243		return 0;	/* I/O complete */
1244
1245	if (noretry_error(error)) {
1246		if ((clone->cmd_flags & REQ_WRITE_SAME) &&
1247		    !clone->q->limits.max_write_same_sectors) {
1248			struct queue_limits *limits;
1249
1250			/* device doesn't really support WRITE SAME, disable it */
1251			limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
1252			limits->max_write_same_sectors = 0;
1253		}
1254		return error;
1255	}
1256
1257	if (mpio->pgpath)
1258		fail_path(mpio->pgpath);
1259
1260	spin_lock_irqsave(&m->lock, flags);
1261	if (!m->nr_valid_paths) {
1262		if (!m->queue_if_no_path) {
1263			if (!__must_push_back(m))
1264				r = -EIO;
1265		} else {
1266			if (error == -EBADE)
1267				r = error;
1268		}
1269	}
1270	spin_unlock_irqrestore(&m->lock, flags);
1271
1272	return r;
1273}
1274
1275static int multipath_end_io(struct dm_target *ti, struct request *clone,
1276			    int error, union map_info *map_context)
1277{
1278	struct multipath *m = ti->private;
1279	struct dm_mpath_io *mpio = map_context->ptr;
1280	struct pgpath *pgpath;
1281	struct path_selector *ps;
1282	int r;
1283
1284	BUG_ON(!mpio);
1285
1286	r  = do_end_io(m, clone, error, mpio);
1287	pgpath = mpio->pgpath;
1288	if (pgpath) {
1289		ps = &pgpath->pg->ps;
1290		if (ps->type->end_io)
1291			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1292	}
1293	clear_mapinfo(m, map_context);
1294
1295	return r;
1296}
1297
1298/*
1299 * Suspend can't complete until all the I/O is processed so if
1300 * the last path fails we must error any remaining I/O.
1301 * Note that if the freeze_bdev fails while suspending, the
1302 * queue_if_no_path state is lost - userspace should reset it.
1303 */
1304static void multipath_presuspend(struct dm_target *ti)
1305{
1306	struct multipath *m = (struct multipath *) ti->private;
1307
1308	queue_if_no_path(m, 0, 1);
1309}
1310
1311static void multipath_postsuspend(struct dm_target *ti)
1312{
1313	struct multipath *m = ti->private;
1314
1315	mutex_lock(&m->work_mutex);
1316	flush_multipath_work(m);
1317	mutex_unlock(&m->work_mutex);
1318}
1319
1320/*
1321 * Restore the queue_if_no_path setting.
1322 */
1323static void multipath_resume(struct dm_target *ti)
1324{
1325	struct multipath *m = (struct multipath *) ti->private;
1326	unsigned long flags;
1327
1328	spin_lock_irqsave(&m->lock, flags);
1329	m->queue_if_no_path = m->saved_queue_if_no_path;
1330	spin_unlock_irqrestore(&m->lock, flags);
1331}
1332
1333/*
1334 * Info output has the following format:
1335 * num_multipath_feature_args [multipath_feature_args]*
1336 * num_handler_status_args [handler_status_args]*
1337 * num_groups init_group_number
1338 *            [A|D|E num_ps_status_args [ps_status_args]*
1339 *             num_paths num_selector_args
1340 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1341 *
1342 * Table output has the following format (identical to the constructor string):
1343 * num_feature_args [features_args]*
1344 * num_handler_args hw_handler [hw_handler_args]*
1345 * num_groups init_group_number
1346 *     [priority selector-name num_ps_args [ps_args]*
1347 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1348 */
1349static void multipath_status(struct dm_target *ti, status_type_t type,
1350			     unsigned status_flags, char *result, unsigned maxlen)
1351{
1352	int sz = 0;
1353	unsigned long flags;
1354	struct multipath *m = (struct multipath *) ti->private;
1355	struct priority_group *pg;
1356	struct pgpath *p;
1357	unsigned pg_num;
1358	char state;
1359
1360	spin_lock_irqsave(&m->lock, flags);
1361
1362	/* Features */
1363	if (type == STATUSTYPE_INFO)
1364		DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
1365	else {
1366		DMEMIT("%u ", m->queue_if_no_path +
1367			      (m->pg_init_retries > 0) * 2 +
1368			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1369			      m->retain_attached_hw_handler);
1370		if (m->queue_if_no_path)
1371			DMEMIT("queue_if_no_path ");
1372		if (m->pg_init_retries)
1373			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1374		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1375			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1376		if (m->retain_attached_hw_handler)
1377			DMEMIT("retain_attached_hw_handler ");
1378	}
1379
1380	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1381		DMEMIT("0 ");
1382	else
1383		DMEMIT("1 %s ", m->hw_handler_name);
1384
1385	DMEMIT("%u ", m->nr_priority_groups);
1386
1387	if (m->next_pg)
1388		pg_num = m->next_pg->pg_num;
1389	else if (m->current_pg)
1390		pg_num = m->current_pg->pg_num;
1391	else
1392		pg_num = (m->nr_priority_groups ? 1 : 0);
1393
1394	DMEMIT("%u ", pg_num);
1395
1396	switch (type) {
1397	case STATUSTYPE_INFO:
1398		list_for_each_entry(pg, &m->priority_groups, list) {
1399			if (pg->bypassed)
1400				state = 'D';	/* Disabled */
1401			else if (pg == m->current_pg)
1402				state = 'A';	/* Currently Active */
1403			else
1404				state = 'E';	/* Enabled */
1405
1406			DMEMIT("%c ", state);
1407
1408			if (pg->ps.type->status)
1409				sz += pg->ps.type->status(&pg->ps, NULL, type,
1410							  result + sz,
1411							  maxlen - sz);
1412			else
1413				DMEMIT("0 ");
1414
1415			DMEMIT("%u %u ", pg->nr_pgpaths,
1416			       pg->ps.type->info_args);
1417
1418			list_for_each_entry(p, &pg->pgpaths, list) {
1419				DMEMIT("%s %s %u ", p->path.dev->name,
1420				       p->is_active ? "A" : "F",
1421				       p->fail_count);
1422				if (pg->ps.type->status)
1423					sz += pg->ps.type->status(&pg->ps,
1424					      &p->path, type, result + sz,
1425					      maxlen - sz);
1426			}
1427		}
1428		break;
1429
1430	case STATUSTYPE_TABLE:
1431		list_for_each_entry(pg, &m->priority_groups, list) {
1432			DMEMIT("%s ", pg->ps.type->name);
1433
1434			if (pg->ps.type->status)
1435				sz += pg->ps.type->status(&pg->ps, NULL, type,
1436							  result + sz,
1437							  maxlen - sz);
1438			else
1439				DMEMIT("0 ");
1440
1441			DMEMIT("%u %u ", pg->nr_pgpaths,
1442			       pg->ps.type->table_args);
1443
1444			list_for_each_entry(p, &pg->pgpaths, list) {
1445				DMEMIT("%s ", p->path.dev->name);
1446				if (pg->ps.type->status)
1447					sz += pg->ps.type->status(&pg->ps,
1448					      &p->path, type, result + sz,
1449					      maxlen - sz);
1450			}
1451		}
1452		break;
1453	}
1454
1455	spin_unlock_irqrestore(&m->lock, flags);
 
 
1456}
1457
1458static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1459{
1460	int r = -EINVAL;
1461	struct dm_dev *dev;
1462	struct multipath *m = (struct multipath *) ti->private;
1463	action_fn action;
1464
1465	mutex_lock(&m->work_mutex);
1466
1467	if (dm_suspended(ti)) {
1468		r = -EBUSY;
1469		goto out;
1470	}
1471
1472	if (argc == 1) {
1473		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1474			r = queue_if_no_path(m, 1, 0);
1475			goto out;
1476		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1477			r = queue_if_no_path(m, 0, 0);
1478			goto out;
1479		}
1480	}
1481
1482	if (argc != 2) {
1483		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1484		goto out;
1485	}
1486
1487	if (!strcasecmp(argv[0], "disable_group")) {
1488		r = bypass_pg_num(m, argv[1], 1);
1489		goto out;
1490	} else if (!strcasecmp(argv[0], "enable_group")) {
1491		r = bypass_pg_num(m, argv[1], 0);
1492		goto out;
1493	} else if (!strcasecmp(argv[0], "switch_group")) {
1494		r = switch_pg_num(m, argv[1]);
1495		goto out;
1496	} else if (!strcasecmp(argv[0], "reinstate_path"))
1497		action = reinstate_path;
1498	else if (!strcasecmp(argv[0], "fail_path"))
1499		action = fail_path;
1500	else {
1501		DMWARN("Unrecognised multipath message received: %s", argv[0]);
1502		goto out;
1503	}
1504
1505	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1506	if (r) {
1507		DMWARN("message: error getting device %s",
1508		       argv[1]);
1509		goto out;
1510	}
1511
1512	r = action_dev(m, dev, action);
1513
1514	dm_put_device(ti, dev);
1515
1516out:
1517	mutex_unlock(&m->work_mutex);
1518	return r;
1519}
1520
1521static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1522			   unsigned long arg)
1523{
1524	struct multipath *m = ti->private;
1525	struct pgpath *pgpath;
1526	struct block_device *bdev;
1527	fmode_t mode;
1528	unsigned long flags;
1529	int r;
1530
 
1531	bdev = NULL;
1532	mode = 0;
1533	r = 0;
1534
1535	spin_lock_irqsave(&m->lock, flags);
1536
1537	if (!m->current_pgpath)
1538		__choose_pgpath(m, 0);
1539
1540	pgpath = m->current_pgpath;
1541
1542	if (pgpath) {
1543		bdev = pgpath->path.dev->bdev;
1544		mode = pgpath->path.dev->mode;
1545	}
1546
1547	if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1548		r = -ENOTCONN;
1549	else if (!bdev)
1550		r = -EIO;
1551
1552	spin_unlock_irqrestore(&m->lock, flags);
1553
1554	/*
1555	 * Only pass ioctls through if the device sizes match exactly.
1556	 */
1557	if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
1558		int err = scsi_verify_blk_ioctl(NULL, cmd);
1559		if (err)
1560			r = err;
1561	}
1562
1563	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
1564		spin_lock_irqsave(&m->lock, flags);
1565		if (!m->current_pg) {
1566			/* Path status changed, redo selection */
1567			__choose_pgpath(m, 0);
1568		}
1569		if (m->pg_init_required)
1570			__pg_init_all_paths(m);
1571		spin_unlock_irqrestore(&m->lock, flags);
1572		dm_table_run_md_queue_async(m->ti->table);
1573	}
1574
1575	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
1576}
1577
1578static int multipath_iterate_devices(struct dm_target *ti,
1579				     iterate_devices_callout_fn fn, void *data)
1580{
1581	struct multipath *m = ti->private;
1582	struct priority_group *pg;
1583	struct pgpath *p;
1584	int ret = 0;
1585
1586	list_for_each_entry(pg, &m->priority_groups, list) {
1587		list_for_each_entry(p, &pg->pgpaths, list) {
1588			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1589			if (ret)
1590				goto out;
1591		}
1592	}
1593
1594out:
1595	return ret;
1596}
1597
1598static int __pgpath_busy(struct pgpath *pgpath)
1599{
1600	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1601
1602	return dm_underlying_device_busy(q);
1603}
1604
1605/*
1606 * We return "busy", only when we can map I/Os but underlying devices
1607 * are busy (so even if we map I/Os now, the I/Os will wait on
1608 * the underlying queue).
1609 * In other words, if we want to kill I/Os or queue them inside us
1610 * due to map unavailability, we don't return "busy".  Otherwise,
1611 * dm core won't give us the I/Os and we can't do what we want.
1612 */
1613static int multipath_busy(struct dm_target *ti)
1614{
1615	int busy = 0, has_active = 0;
1616	struct multipath *m = ti->private;
1617	struct priority_group *pg;
1618	struct pgpath *pgpath;
1619	unsigned long flags;
1620
1621	spin_lock_irqsave(&m->lock, flags);
1622
1623	/* pg_init in progress, requeue until done */
1624	if (!pg_ready(m)) {
1625		busy = 1;
1626		goto out;
1627	}
1628	/* Guess which priority_group will be used at next mapping time */
1629	if (unlikely(!m->current_pgpath && m->next_pg))
1630		pg = m->next_pg;
1631	else if (likely(m->current_pg))
1632		pg = m->current_pg;
1633	else
1634		/*
1635		 * We don't know which pg will be used at next mapping time.
1636		 * We don't call __choose_pgpath() here to avoid to trigger
1637		 * pg_init just by busy checking.
1638		 * So we don't know whether underlying devices we will be using
1639		 * at next mapping time are busy or not. Just try mapping.
1640		 */
1641		goto out;
1642
1643	/*
1644	 * If there is one non-busy active path at least, the path selector
1645	 * will be able to select it. So we consider such a pg as not busy.
1646	 */
1647	busy = 1;
1648	list_for_each_entry(pgpath, &pg->pgpaths, list)
1649		if (pgpath->is_active) {
1650			has_active = 1;
1651
1652			if (!__pgpath_busy(pgpath)) {
1653				busy = 0;
1654				break;
1655			}
1656		}
1657
1658	if (!has_active)
1659		/*
1660		 * No active path in this pg, so this pg won't be used and
1661		 * the current_pg will be changed at next mapping time.
1662		 * We need to try mapping to determine it.
1663		 */
1664		busy = 0;
1665
1666out:
1667	spin_unlock_irqrestore(&m->lock, flags);
1668
1669	return busy;
1670}
1671
1672/*-----------------------------------------------------------------
1673 * Module setup
1674 *---------------------------------------------------------------*/
1675static struct target_type multipath_target = {
1676	.name = "multipath",
1677	.version = {1, 7, 0},
1678	.module = THIS_MODULE,
1679	.ctr = multipath_ctr,
1680	.dtr = multipath_dtr,
1681	.map_rq = multipath_map,
1682	.rq_end_io = multipath_end_io,
1683	.presuspend = multipath_presuspend,
1684	.postsuspend = multipath_postsuspend,
1685	.resume = multipath_resume,
1686	.status = multipath_status,
1687	.message = multipath_message,
1688	.ioctl  = multipath_ioctl,
1689	.iterate_devices = multipath_iterate_devices,
1690	.busy = multipath_busy,
1691};
1692
1693static int __init dm_multipath_init(void)
1694{
1695	int r;
1696
1697	/* allocate a slab for the dm_ios */
1698	_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1699	if (!_mpio_cache)
1700		return -ENOMEM;
1701
1702	r = dm_register_target(&multipath_target);
1703	if (r < 0) {
1704		DMERR("register failed %d", r);
1705		kmem_cache_destroy(_mpio_cache);
1706		return -EINVAL;
1707	}
1708
1709	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1710	if (!kmultipathd) {
1711		DMERR("failed to create workqueue kmpathd");
1712		dm_unregister_target(&multipath_target);
1713		kmem_cache_destroy(_mpio_cache);
1714		return -ENOMEM;
1715	}
1716
1717	/*
1718	 * A separate workqueue is used to handle the device handlers
1719	 * to avoid overloading existing workqueue. Overloading the
1720	 * old workqueue would also create a bottleneck in the
1721	 * path of the storage hardware device activation.
1722	 */
1723	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1724						  WQ_MEM_RECLAIM);
1725	if (!kmpath_handlerd) {
1726		DMERR("failed to create workqueue kmpath_handlerd");
1727		destroy_workqueue(kmultipathd);
1728		dm_unregister_target(&multipath_target);
1729		kmem_cache_destroy(_mpio_cache);
1730		return -ENOMEM;
1731	}
1732
1733	DMINFO("version %u.%u.%u loaded",
1734	       multipath_target.version[0], multipath_target.version[1],
1735	       multipath_target.version[2]);
1736
1737	return r;
1738}
1739
1740static void __exit dm_multipath_exit(void)
1741{
1742	destroy_workqueue(kmpath_handlerd);
1743	destroy_workqueue(kmultipathd);
1744
1745	dm_unregister_target(&multipath_target);
1746	kmem_cache_destroy(_mpio_cache);
1747}
1748
1749module_init(dm_multipath_init);
1750module_exit(dm_multipath_exit);
1751
1752MODULE_DESCRIPTION(DM_NAME " multipath target");
1753MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1754MODULE_LICENSE("GPL");