Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9
 
 
  10#include "dm-path-selector.h"
  11#include "dm-uevent.h"
  12
 
  13#include <linux/ctype.h>
  14#include <linux/init.h>
  15#include <linux/mempool.h>
  16#include <linux/module.h>
  17#include <linux/pagemap.h>
  18#include <linux/slab.h>
  19#include <linux/time.h>
  20#include <linux/workqueue.h>
  21#include <linux/delay.h>
  22#include <scsi/scsi_dh.h>
  23#include <linux/atomic.h>
 
  24
  25#define DM_MSG_PREFIX "multipath"
  26#define DM_PG_INIT_DELAY_MSECS 2000
  27#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  28
  29/* Path properties */
  30struct pgpath {
  31	struct list_head list;
  32
  33	struct priority_group *pg;	/* Owning PG */
  34	unsigned is_active;		/* Path status */
  35	unsigned fail_count;		/* Cumulative failure count */
  36
  37	struct dm_path path;
  38	struct delayed_work activate_path;
 
 
  39};
  40
  41#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  42
  43/*
  44 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  45 * Each has a path selector which controls which path gets used.
  46 */
  47struct priority_group {
  48	struct list_head list;
  49
  50	struct multipath *m;		/* Owning multipath instance */
  51	struct path_selector ps;
  52
  53	unsigned pg_num;		/* Reference number */
  54	unsigned bypassed;		/* Temporarily bypass this PG? */
  55
  56	unsigned nr_pgpaths;		/* Number of paths in PG */
  57	struct list_head pgpaths;
 
 
  58};
  59
  60/* Multipath context */
  61struct multipath {
  62	struct list_head list;
  63	struct dm_target *ti;
  64
  65	const char *hw_handler_name;
  66	char *hw_handler_params;
  67
  68	spinlock_t lock;
 
  69
  70	unsigned nr_priority_groups;
  71	struct list_head priority_groups;
  72
  73	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  74
  75	unsigned pg_init_required;	/* pg_init needs calling? */
  76	unsigned pg_init_in_progress;	/* Only one pg_init allowed at once */
  77	unsigned pg_init_delay_retry;	/* Delay pg_init retry? */
  78
  79	unsigned nr_valid_paths;	/* Total number of usable paths */
  80	struct pgpath *current_pgpath;
  81	struct priority_group *current_pg;
  82	struct priority_group *next_pg;	/* Switch to this PG if set */
  83	unsigned repeat_count;		/* I/Os left before calling PS again */
  84
  85	unsigned queue_io:1;		/* Must we queue all I/O? */
  86	unsigned queue_if_no_path:1;	/* Queue I/O if last path fails? */
  87	unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
  88
 
 
 
  89	unsigned pg_init_retries;	/* Number of times to retry pg_init */
  90	unsigned pg_init_count;		/* Number of times pg_init called */
  91	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
 
 
  92
  93	unsigned queue_size;
  94	struct work_struct process_queued_ios;
  95	struct list_head queued_ios;
  96
  97	struct work_struct trigger_event;
 
  98
  99	/*
 100	 * We must use a mempool of dm_mpath_io structs so that we
 101	 * can resubmit bios on error.
 102	 */
 103	mempool_t *mpio_pool;
 104
 105	struct mutex work_mutex;
 106};
 107
 108/*
 109 * Context information attached to each bio we process.
 110 */
 111struct dm_mpath_io {
 112	struct pgpath *pgpath;
 113	size_t nr_bytes;
 114};
 115
 116typedef int (*action_fn) (struct pgpath *pgpath);
 117
 118#define MIN_IOS 256	/* Mempool size */
 119
 120static struct kmem_cache *_mpio_cache;
 121
 122static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 123static void process_queued_ios(struct work_struct *work);
 124static void trigger_event(struct work_struct *work);
 125static void activate_path(struct work_struct *work);
 
 
 126
 
 
 
 
 
 
 
 
 
 
 
 127
 128/*-----------------------------------------------
 129 * Allocation routines
 130 *-----------------------------------------------*/
 131
 132static struct pgpath *alloc_pgpath(void)
 133{
 134	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 135
 136	if (pgpath) {
 137		pgpath->is_active = 1;
 138		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
 139	}
 140
 141	return pgpath;
 142}
 143
 144static void free_pgpath(struct pgpath *pgpath)
 145{
 146	kfree(pgpath);
 147}
 148
 149static struct priority_group *alloc_priority_group(void)
 150{
 151	struct priority_group *pg;
 152
 153	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 154
 155	if (pg)
 156		INIT_LIST_HEAD(&pg->pgpaths);
 157
 158	return pg;
 159}
 160
 161static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 162{
 163	struct pgpath *pgpath, *tmp;
 164	struct multipath *m = ti->private;
 165
 166	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 167		list_del(&pgpath->list);
 168		if (m->hw_handler_name)
 169			scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
 170		dm_put_device(ti, pgpath->path.dev);
 171		free_pgpath(pgpath);
 172	}
 173}
 174
 175static void free_priority_group(struct priority_group *pg,
 176				struct dm_target *ti)
 177{
 178	struct path_selector *ps = &pg->ps;
 179
 180	if (ps->type) {
 181		ps->type->destroy(ps);
 182		dm_put_path_selector(ps->type);
 183	}
 184
 185	free_pgpaths(&pg->pgpaths, ti);
 186	kfree(pg);
 187}
 188
 189static struct multipath *alloc_multipath(struct dm_target *ti)
 190{
 191	struct multipath *m;
 192
 193	m = kzalloc(sizeof(*m), GFP_KERNEL);
 194	if (m) {
 195		INIT_LIST_HEAD(&m->priority_groups);
 196		INIT_LIST_HEAD(&m->queued_ios);
 197		spin_lock_init(&m->lock);
 198		m->queue_io = 1;
 199		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 200		INIT_WORK(&m->process_queued_ios, process_queued_ios);
 201		INIT_WORK(&m->trigger_event, trigger_event);
 202		init_waitqueue_head(&m->pg_init_wait);
 203		mutex_init(&m->work_mutex);
 204		m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
 205		if (!m->mpio_pool) {
 206			kfree(m);
 207			return NULL;
 208		}
 209		m->ti = ti;
 210		ti->private = m;
 211	}
 212
 213	return m;
 214}
 215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 216static void free_multipath(struct multipath *m)
 217{
 218	struct priority_group *pg, *tmp;
 219
 220	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 221		list_del(&pg->list);
 222		free_priority_group(pg, m->ti);
 223	}
 224
 225	kfree(m->hw_handler_name);
 226	kfree(m->hw_handler_params);
 227	mempool_destroy(m->mpio_pool);
 228	kfree(m);
 229}
 230
 231static int set_mapinfo(struct multipath *m, union map_info *info)
 232{
 233	struct dm_mpath_io *mpio;
 
 234
 235	mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
 236	if (!mpio)
 237		return -ENOMEM;
 
 238
 239	memset(mpio, 0, sizeof(*mpio));
 240	info->ptr = mpio;
 
 
 241
 242	return 0;
 
 
 
 
 243}
 244
 245static void clear_mapinfo(struct multipath *m, union map_info *info)
 246{
 247	struct dm_mpath_io *mpio = info->ptr;
 
 248
 249	info->ptr = NULL;
 250	mempool_free(mpio, m->mpio_pool);
 
 
 
 251}
 252
 253/*-----------------------------------------------
 254 * Path selection
 255 *-----------------------------------------------*/
 256
 257static void __pg_init_all_paths(struct multipath *m)
 258{
 259	struct pgpath *pgpath;
 260	unsigned long pg_init_delay = 0;
 261
 262	m->pg_init_count++;
 263	m->pg_init_required = 0;
 264	if (m->pg_init_delay_retry)
 
 
 
 
 
 
 
 
 
 
 265		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 266						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 267	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 268		/* Skip failed paths */
 269		if (!pgpath->is_active)
 270			continue;
 271		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 272				       pg_init_delay))
 273			m->pg_init_in_progress++;
 274	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 275}
 276
 277static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
 278{
 279	m->current_pg = pgpath->pg;
 280
 281	/* Must we initialise the PG first, and queue I/O till it's ready? */
 282	if (m->hw_handler_name) {
 283		m->pg_init_required = 1;
 284		m->queue_io = 1;
 285	} else {
 286		m->pg_init_required = 0;
 287		m->queue_io = 0;
 288	}
 289
 290	m->pg_init_count = 0;
 291}
 292
 293static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
 294			       size_t nr_bytes)
 
 295{
 
 296	struct dm_path *path;
 
 297
 298	path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
 299	if (!path)
 300		return -ENXIO;
 301
 302	m->current_pgpath = path_to_pgpath(path);
 303
 304	if (m->current_pg != pg)
 305		__switch_pg(m, m->current_pgpath);
 
 
 
 
 
 306
 307	return 0;
 308}
 309
 310static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
 311{
 
 312	struct priority_group *pg;
 
 313	unsigned bypassed = 1;
 314
 315	if (!m->nr_valid_paths)
 
 316		goto failed;
 
 317
 318	/* Were we instructed to switch PG? */
 319	if (m->next_pg) {
 
 320		pg = m->next_pg;
 
 
 
 
 321		m->next_pg = NULL;
 322		if (!__choose_path_in_pg(m, pg, nr_bytes))
 323			return;
 
 
 324	}
 325
 326	/* Don't change PG until it has no remaining paths */
 327	if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
 328		return;
 
 
 
 
 
 329
 330	/*
 331	 * Loop through priority groups until we find a valid path.
 332	 * First time we skip PGs marked 'bypassed'.
 333	 * Second time we only try the ones we skipped, but set
 334	 * pg_init_delay_retry so we do not hammer controllers.
 335	 */
 336	do {
 337		list_for_each_entry(pg, &m->priority_groups, list) {
 338			if (pg->bypassed == bypassed)
 339				continue;
 340			if (!__choose_path_in_pg(m, pg, nr_bytes)) {
 
 341				if (!bypassed)
 342					m->pg_init_delay_retry = 1;
 343				return;
 344			}
 345		}
 346	} while (bypassed--);
 347
 348failed:
 
 349	m->current_pgpath = NULL;
 350	m->current_pg = NULL;
 
 
 
 351}
 352
 353/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 354 * Check whether bios must be queued in the device-mapper core rather
 355 * than here in the target.
 356 *
 357 * m->lock must be held on entry.
 358 *
 359 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
 360 * same value then we are not between multipath_presuspend()
 361 * and multipath_resume() calls and we have no need to check
 362 * for the DMF_NOFLUSH_SUSPENDING flag.
 363 */
 364static int __must_push_back(struct multipath *m)
 365{
 366	return (m->queue_if_no_path != m->saved_queue_if_no_path &&
 
 367		dm_noflush_suspending(m->ti));
 368}
 369
 370static int map_io(struct multipath *m, struct request *clone,
 371		  union map_info *map_context, unsigned was_queued)
 
 
 
 372{
 373	int r = DM_MAPIO_REMAPPED;
 374	size_t nr_bytes = blk_rq_bytes(clone);
 375	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 376	struct pgpath *pgpath;
 377	struct block_device *bdev;
 378	struct dm_mpath_io *mpio = map_context->ptr;
 379
 380	spin_lock_irqsave(&m->lock, flags);
 381
 382	/* Do we need to select a new pgpath? */
 383	if (!m->current_pgpath ||
 384	    (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
 385		__choose_pgpath(m, nr_bytes);
 386
 387	pgpath = m->current_pgpath;
 388
 389	if (was_queued)
 390		m->queue_size--;
 391
 392	if ((pgpath && m->queue_io) ||
 393	    (!pgpath && m->queue_if_no_path)) {
 394		/* Queue for the daemon to resubmit */
 395		list_add_tail(&clone->queuelist, &m->queued_ios);
 396		m->queue_size++;
 397		if ((m->pg_init_required && !m->pg_init_in_progress) ||
 398		    !m->queue_io)
 399			queue_work(kmultipathd, &m->process_queued_ios);
 400		pgpath = NULL;
 401		r = DM_MAPIO_SUBMITTED;
 402	} else if (pgpath) {
 403		bdev = pgpath->path.dev->bdev;
 404		clone->q = bdev_get_queue(bdev);
 405		clone->rq_disk = bdev->bd_disk;
 406	} else if (__must_push_back(m))
 407		r = DM_MAPIO_REQUEUE;
 408	else
 409		r = -EIO;	/* Failed */
 410
 411	mpio->pgpath = pgpath;
 412	mpio->nr_bytes = nr_bytes;
 413
 414	if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
 415		pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
 416					      nr_bytes);
 
 
 
 
 
 
 
 417
 418	spin_unlock_irqrestore(&m->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 419
 420	return r;
 
 
 
 
 
 
 
 
 
 421}
 422
 423/*
 424 * If we run out of usable paths, should we queue I/O or error it?
 425 */
 426static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
 427			    unsigned save_old_value)
 428{
 
 429	unsigned long flags;
 
 430
 431	spin_lock_irqsave(&m->lock, flags);
 
 
 
 
 432
 433	if (save_old_value)
 434		m->saved_queue_if_no_path = m->queue_if_no_path;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435	else
 436		m->saved_queue_if_no_path = queue_if_no_path;
 437	m->queue_if_no_path = queue_if_no_path;
 438	if (!m->queue_if_no_path && m->queue_size)
 439		queue_work(kmultipathd, &m->process_queued_ios);
 440
 441	spin_unlock_irqrestore(&m->lock, flags);
 
 442
 443	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 444}
 445
 446/*-----------------------------------------------------------------
 447 * The multipath daemon is responsible for resubmitting queued ios.
 448 *---------------------------------------------------------------*/
 
 449
 450static void dispatch_queued_ios(struct multipath *m)
 
 
 
 
 
 
 
 
 
 
 
 
 451{
 452	int r;
 453	unsigned long flags;
 454	union map_info *info;
 455	struct request *clone, *n;
 456	LIST_HEAD(cl);
 
 
 
 
 457
 458	spin_lock_irqsave(&m->lock, flags);
 459	list_splice_init(&m->queued_ios, &cl);
 460	spin_unlock_irqrestore(&m->lock, flags);
 461
 462	list_for_each_entry_safe(clone, n, &cl, queuelist) {
 463		list_del_init(&clone->queuelist);
 
 
 464
 465		info = dm_get_rq_mapinfo(clone);
 
 466
 467		r = map_io(m, clone, info, 1);
 468		if (r < 0) {
 469			clear_mapinfo(m, info);
 470			dm_kill_unmapped_request(clone, r);
 471		} else if (r == DM_MAPIO_REMAPPED)
 472			dm_dispatch_request(clone);
 473		else if (r == DM_MAPIO_REQUEUE) {
 474			clear_mapinfo(m, info);
 475			dm_requeue_unmapped_request(clone);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476		}
 477	}
 
 478}
 479
 480static void process_queued_ios(struct work_struct *work)
 
 
 
 
 481{
 482	struct multipath *m =
 483		container_of(work, struct multipath, process_queued_ios);
 484	struct pgpath *pgpath = NULL;
 485	unsigned must_queue = 1;
 486	unsigned long flags;
 487
 488	spin_lock_irqsave(&m->lock, flags);
 
 
 
 
 
 489
 490	if (!m->current_pgpath)
 491		__choose_pgpath(m, 0);
 492
 493	pgpath = m->current_pgpath;
 494
 495	if ((pgpath && !m->queue_io) ||
 496	    (!pgpath && !m->queue_if_no_path))
 497		must_queue = 0;
 498
 499	if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
 500		__pg_init_all_paths(m);
 501
 502	spin_unlock_irqrestore(&m->lock, flags);
 503	if (!must_queue)
 504		dispatch_queued_ios(m);
 505}
 506
 507/*
 508 * An event is triggered whenever a path is taken out of use.
 509 * Includes path failure and PG bypass.
 510 */
 511static void trigger_event(struct work_struct *work)
 512{
 513	struct multipath *m =
 514		container_of(work, struct multipath, trigger_event);
 515
 516	dm_table_event(m->ti->table);
 517}
 518
 519/*-----------------------------------------------------------------
 520 * Constructor/argument parsing:
 521 * <#multipath feature args> [<arg>]*
 522 * <#hw_handler args> [hw_handler [<arg>]*]
 523 * <#priority groups>
 524 * <initial priority group>
 525 *     [<selector> <#selector args> [<arg>]*
 526 *      <#paths> <#per-path selector args>
 527 *         [<path> [<arg>]* ]+ ]+
 528 *---------------------------------------------------------------*/
 529static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 530			       struct dm_target *ti)
 531{
 532	int r;
 533	struct path_selector_type *pst;
 534	unsigned ps_argc;
 535
 536	static struct dm_arg _args[] = {
 537		{0, 1024, "invalid number of path selector args"},
 538	};
 539
 540	pst = dm_get_path_selector(dm_shift_arg(as));
 541	if (!pst) {
 542		ti->error = "unknown path selector type";
 543		return -EINVAL;
 544	}
 545
 546	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 547	if (r) {
 548		dm_put_path_selector(pst);
 549		return -EINVAL;
 550	}
 551
 552	r = pst->create(&pg->ps, ps_argc, as->argv);
 553	if (r) {
 554		dm_put_path_selector(pst);
 555		ti->error = "path selector constructor failed";
 556		return r;
 557	}
 558
 559	pg->ps.type = pst;
 560	dm_consume_args(as, ps_argc);
 561
 562	return 0;
 563}
 564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 565static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 566			       struct dm_target *ti)
 567{
 568	int r;
 569	struct pgpath *p;
 570	struct multipath *m = ti->private;
 
 
 571
 572	/* we need at least a path arg */
 573	if (as->argc < 1) {
 574		ti->error = "no device given";
 575		return ERR_PTR(-EINVAL);
 576	}
 577
 578	p = alloc_pgpath();
 579	if (!p)
 580		return ERR_PTR(-ENOMEM);
 581
 582	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 583			  &p->path.dev);
 584	if (r) {
 585		ti->error = "error getting device";
 586		goto bad;
 587	}
 588
 589	if (m->hw_handler_name) {
 590		struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
 591
 592		r = scsi_dh_attach(q, m->hw_handler_name);
 593		if (r == -EBUSY) {
 594			/*
 595			 * Already attached to different hw_handler,
 596			 * try to reattach with correct one.
 597			 */
 598			scsi_dh_detach(q);
 599			r = scsi_dh_attach(q, m->hw_handler_name);
 600		}
 601
 602		if (r < 0) {
 603			ti->error = "error attaching hardware handler";
 604			dm_put_device(ti, p->path.dev);
 605			goto bad;
 606		}
 607
 608		if (m->hw_handler_params) {
 609			r = scsi_dh_set_params(q, m->hw_handler_params);
 610			if (r < 0) {
 611				ti->error = "unable to set hardware "
 612							"handler parameters";
 613				scsi_dh_detach(q);
 614				dm_put_device(ti, p->path.dev);
 615				goto bad;
 616			}
 617		}
 618	}
 619
 620	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 621	if (r) {
 622		dm_put_device(ti, p->path.dev);
 623		goto bad;
 624	}
 625
 626	return p;
 627
 628 bad:
 629	free_pgpath(p);
 630	return ERR_PTR(r);
 631}
 632
 633static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 634						   struct multipath *m)
 635{
 636	static struct dm_arg _args[] = {
 637		{1, 1024, "invalid number of paths"},
 638		{0, 1024, "invalid number of selector args"}
 639	};
 640
 641	int r;
 642	unsigned i, nr_selector_args, nr_args;
 643	struct priority_group *pg;
 644	struct dm_target *ti = m->ti;
 645
 646	if (as->argc < 2) {
 647		as->argc = 0;
 648		ti->error = "not enough priority group arguments";
 649		return ERR_PTR(-EINVAL);
 650	}
 651
 652	pg = alloc_priority_group();
 653	if (!pg) {
 654		ti->error = "couldn't allocate priority group";
 655		return ERR_PTR(-ENOMEM);
 656	}
 657	pg->m = m;
 658
 659	r = parse_path_selector(as, pg, ti);
 660	if (r)
 661		goto bad;
 662
 663	/*
 664	 * read the paths
 665	 */
 666	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
 667	if (r)
 668		goto bad;
 669
 670	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
 671	if (r)
 672		goto bad;
 673
 674	nr_args = 1 + nr_selector_args;
 675	for (i = 0; i < pg->nr_pgpaths; i++) {
 676		struct pgpath *pgpath;
 677		struct dm_arg_set path_args;
 678
 679		if (as->argc < nr_args) {
 680			ti->error = "not enough path parameters";
 681			r = -EINVAL;
 682			goto bad;
 683		}
 684
 685		path_args.argc = nr_args;
 686		path_args.argv = as->argv;
 687
 688		pgpath = parse_path(&path_args, &pg->ps, ti);
 689		if (IS_ERR(pgpath)) {
 690			r = PTR_ERR(pgpath);
 691			goto bad;
 692		}
 693
 694		pgpath->pg = pg;
 695		list_add_tail(&pgpath->list, &pg->pgpaths);
 696		dm_consume_args(as, nr_args);
 697	}
 698
 699	return pg;
 700
 701 bad:
 702	free_priority_group(pg, ti);
 703	return ERR_PTR(r);
 704}
 705
 706static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 707{
 708	unsigned hw_argc;
 709	int ret;
 710	struct dm_target *ti = m->ti;
 711
 712	static struct dm_arg _args[] = {
 713		{0, 1024, "invalid number of hardware handler args"},
 714	};
 715
 716	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
 717		return -EINVAL;
 718
 719	if (!hw_argc)
 720		return 0;
 721
 722	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
 723	if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
 724				     "scsi_dh_%s", m->hw_handler_name)) {
 725		ti->error = "unknown hardware handler type";
 726		ret = -EINVAL;
 727		goto fail;
 728	}
 729
 
 
 
 
 730	if (hw_argc > 1) {
 731		char *p;
 732		int i, j, len = 4;
 733
 734		for (i = 0; i <= hw_argc - 2; i++)
 735			len += strlen(as->argv[i]) + 1;
 736		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
 737		if (!p) {
 738			ti->error = "memory allocation failed";
 739			ret = -ENOMEM;
 740			goto fail;
 741		}
 742		j = sprintf(p, "%d", hw_argc - 1);
 743		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
 744			j = sprintf(p, "%s", as->argv[i]);
 745	}
 746	dm_consume_args(as, hw_argc - 1);
 747
 748	return 0;
 749fail:
 750	kfree(m->hw_handler_name);
 751	m->hw_handler_name = NULL;
 752	return ret;
 753}
 754
 755static int parse_features(struct dm_arg_set *as, struct multipath *m)
 756{
 757	int r;
 758	unsigned argc;
 759	struct dm_target *ti = m->ti;
 760	const char *arg_name;
 761
 762	static struct dm_arg _args[] = {
 763		{0, 5, "invalid number of feature args"},
 764		{1, 50, "pg_init_retries must be between 1 and 50"},
 765		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
 766	};
 767
 768	r = dm_read_arg_group(_args, as, &argc, &ti->error);
 769	if (r)
 770		return -EINVAL;
 771
 772	if (!argc)
 773		return 0;
 774
 775	do {
 776		arg_name = dm_shift_arg(as);
 777		argc--;
 778
 779		if (!strcasecmp(arg_name, "queue_if_no_path")) {
 780			r = queue_if_no_path(m, 1, 0);
 
 
 
 
 
 781			continue;
 782		}
 783
 784		if (!strcasecmp(arg_name, "pg_init_retries") &&
 785		    (argc >= 1)) {
 786			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
 787			argc--;
 788			continue;
 789		}
 790
 791		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
 792		    (argc >= 1)) {
 793			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
 794			argc--;
 795			continue;
 796		}
 797
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798		ti->error = "Unrecognised multipath feature request";
 799		r = -EINVAL;
 800	} while (argc && !r);
 801
 802	return r;
 803}
 804
 805static int multipath_ctr(struct dm_target *ti, unsigned int argc,
 806			 char **argv)
 807{
 808	/* target arguments */
 809	static struct dm_arg _args[] = {
 810		{0, 1024, "invalid number of priority groups"},
 811		{0, 1024, "invalid initial priority group number"},
 812	};
 813
 814	int r;
 815	struct multipath *m;
 816	struct dm_arg_set as;
 817	unsigned pg_count = 0;
 818	unsigned next_pg_num;
 819
 820	as.argc = argc;
 821	as.argv = argv;
 822
 823	m = alloc_multipath(ti);
 824	if (!m) {
 825		ti->error = "can't allocate multipath";
 826		return -EINVAL;
 827	}
 828
 829	r = parse_features(&as, m);
 830	if (r)
 831		goto bad;
 832
 
 
 
 
 833	r = parse_hw_handler(&as, m);
 834	if (r)
 835		goto bad;
 836
 837	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
 838	if (r)
 839		goto bad;
 840
 841	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
 842	if (r)
 843		goto bad;
 844
 845	if ((!m->nr_priority_groups && next_pg_num) ||
 846	    (m->nr_priority_groups && !next_pg_num)) {
 847		ti->error = "invalid initial priority group";
 848		r = -EINVAL;
 849		goto bad;
 850	}
 851
 852	/* parse the priority groups */
 853	while (as.argc) {
 854		struct priority_group *pg;
 
 855
 856		pg = parse_priority_group(&as, m);
 857		if (IS_ERR(pg)) {
 858			r = PTR_ERR(pg);
 859			goto bad;
 860		}
 861
 862		m->nr_valid_paths += pg->nr_pgpaths;
 
 
 863		list_add_tail(&pg->list, &m->priority_groups);
 864		pg_count++;
 865		pg->pg_num = pg_count;
 866		if (!--next_pg_num)
 867			m->next_pg = pg;
 868	}
 869
 870	if (pg_count != m->nr_priority_groups) {
 871		ti->error = "priority group count mismatch";
 872		r = -EINVAL;
 873		goto bad;
 874	}
 875
 876	ti->num_flush_requests = 1;
 877	ti->num_discard_requests = 1;
 
 
 
 
 
 
 878
 879	return 0;
 880
 881 bad:
 882	free_multipath(m);
 883	return r;
 884}
 885
 886static void multipath_wait_for_pg_init_completion(struct multipath *m)
 887{
 888	DECLARE_WAITQUEUE(wait, current);
 889	unsigned long flags;
 890
 891	add_wait_queue(&m->pg_init_wait, &wait);
 892
 893	while (1) {
 894		set_current_state(TASK_UNINTERRUPTIBLE);
 895
 896		spin_lock_irqsave(&m->lock, flags);
 897		if (!m->pg_init_in_progress) {
 898			spin_unlock_irqrestore(&m->lock, flags);
 899			break;
 900		}
 901		spin_unlock_irqrestore(&m->lock, flags);
 902
 903		io_schedule();
 904	}
 905	set_current_state(TASK_RUNNING);
 906
 907	remove_wait_queue(&m->pg_init_wait, &wait);
 908}
 909
 910static void flush_multipath_work(struct multipath *m)
 911{
 912	flush_workqueue(kmpath_handlerd);
 913	multipath_wait_for_pg_init_completion(m);
 
 
 
 
 
 
 
 
 
 914	flush_workqueue(kmultipathd);
 915	flush_work_sync(&m->trigger_event);
 916}
 917
 918static void multipath_dtr(struct dm_target *ti)
 919{
 920	struct multipath *m = ti->private;
 921
 922	flush_multipath_work(m);
 923	free_multipath(m);
 924}
 925
 926/*
 927 * Map cloned requests
 928 */
 929static int multipath_map(struct dm_target *ti, struct request *clone,
 930			 union map_info *map_context)
 931{
 932	int r;
 933	struct multipath *m = (struct multipath *) ti->private;
 934
 935	if (set_mapinfo(m, map_context) < 0)
 936		/* ENOMEM, requeue */
 937		return DM_MAPIO_REQUEUE;
 938
 939	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 940	r = map_io(m, clone, map_context, 0);
 941	if (r < 0 || r == DM_MAPIO_REQUEUE)
 942		clear_mapinfo(m, map_context);
 943
 944	return r;
 945}
 946
 947/*
 948 * Take a path out of use.
 949 */
 950static int fail_path(struct pgpath *pgpath)
 951{
 952	unsigned long flags;
 953	struct multipath *m = pgpath->pg->m;
 954
 955	spin_lock_irqsave(&m->lock, flags);
 956
 957	if (!pgpath->is_active)
 958		goto out;
 959
 960	DMWARN("Failing path %s.", pgpath->path.dev->name);
 961
 962	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
 963	pgpath->is_active = 0;
 964	pgpath->fail_count++;
 965
 966	m->nr_valid_paths--;
 967
 968	if (pgpath == m->current_pgpath)
 969		m->current_pgpath = NULL;
 970
 971	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
 972		      pgpath->path.dev->name, m->nr_valid_paths);
 973
 974	schedule_work(&m->trigger_event);
 975
 976out:
 977	spin_unlock_irqrestore(&m->lock, flags);
 978
 979	return 0;
 980}
 981
 982/*
 983 * Reinstate a previously-failed path
 984 */
 985static int reinstate_path(struct pgpath *pgpath)
 986{
 987	int r = 0;
 988	unsigned long flags;
 989	struct multipath *m = pgpath->pg->m;
 
 990
 991	spin_lock_irqsave(&m->lock, flags);
 992
 993	if (pgpath->is_active)
 994		goto out;
 995
 996	if (!pgpath->pg->ps.type->reinstate_path) {
 997		DMWARN("Reinstate path not supported by path selector %s",
 998		       pgpath->pg->ps.type->name);
 999		r = -EINVAL;
1000		goto out;
1001	}
1002
1003	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1004	if (r)
1005		goto out;
1006
1007	pgpath->is_active = 1;
1008
1009	if (!m->nr_valid_paths++ && m->queue_size) {
 
1010		m->current_pgpath = NULL;
1011		queue_work(kmultipathd, &m->process_queued_ios);
1012	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1013		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1014			m->pg_init_in_progress++;
1015	}
1016
1017	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1018		      pgpath->path.dev->name, m->nr_valid_paths);
1019
1020	schedule_work(&m->trigger_event);
1021
1022out:
1023	spin_unlock_irqrestore(&m->lock, flags);
 
 
 
 
1024
1025	return r;
1026}
1027
1028/*
1029 * Fail or reinstate all paths that match the provided struct dm_dev.
1030 */
1031static int action_dev(struct multipath *m, struct dm_dev *dev,
1032		      action_fn action)
1033{
1034	int r = -EINVAL;
1035	struct pgpath *pgpath;
1036	struct priority_group *pg;
1037
1038	list_for_each_entry(pg, &m->priority_groups, list) {
1039		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1040			if (pgpath->path.dev == dev)
1041				r = action(pgpath);
1042		}
1043	}
1044
1045	return r;
1046}
1047
1048/*
1049 * Temporarily try to avoid having to use the specified PG
1050 */
1051static void bypass_pg(struct multipath *m, struct priority_group *pg,
1052		      int bypassed)
1053{
1054	unsigned long flags;
1055
1056	spin_lock_irqsave(&m->lock, flags);
1057
1058	pg->bypassed = bypassed;
1059	m->current_pgpath = NULL;
1060	m->current_pg = NULL;
1061
1062	spin_unlock_irqrestore(&m->lock, flags);
1063
1064	schedule_work(&m->trigger_event);
1065}
1066
1067/*
1068 * Switch to using the specified PG from the next I/O that gets mapped
1069 */
1070static int switch_pg_num(struct multipath *m, const char *pgstr)
1071{
1072	struct priority_group *pg;
1073	unsigned pgnum;
1074	unsigned long flags;
1075	char dummy;
1076
1077	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1078	    (pgnum > m->nr_priority_groups)) {
1079		DMWARN("invalid PG number supplied to switch_pg_num");
1080		return -EINVAL;
1081	}
1082
1083	spin_lock_irqsave(&m->lock, flags);
1084	list_for_each_entry(pg, &m->priority_groups, list) {
1085		pg->bypassed = 0;
1086		if (--pgnum)
1087			continue;
1088
1089		m->current_pgpath = NULL;
1090		m->current_pg = NULL;
1091		m->next_pg = pg;
1092	}
1093	spin_unlock_irqrestore(&m->lock, flags);
1094
1095	schedule_work(&m->trigger_event);
1096	return 0;
1097}
1098
1099/*
1100 * Set/clear bypassed status of a PG.
1101 * PGs are numbered upwards from 1 in the order they were declared.
1102 */
1103static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1104{
1105	struct priority_group *pg;
1106	unsigned pgnum;
1107	char dummy;
1108
1109	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1110	    (pgnum > m->nr_priority_groups)) {
1111		DMWARN("invalid PG number supplied to bypass_pg");
1112		return -EINVAL;
1113	}
1114
1115	list_for_each_entry(pg, &m->priority_groups, list) {
1116		if (!--pgnum)
1117			break;
1118	}
1119
1120	bypass_pg(m, pg, bypassed);
1121	return 0;
1122}
1123
1124/*
1125 * Should we retry pg_init immediately?
1126 */
1127static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1128{
1129	unsigned long flags;
1130	int limit_reached = 0;
1131
1132	spin_lock_irqsave(&m->lock, flags);
1133
1134	if (m->pg_init_count <= m->pg_init_retries)
1135		m->pg_init_required = 1;
 
1136	else
1137		limit_reached = 1;
1138
1139	spin_unlock_irqrestore(&m->lock, flags);
1140
1141	return limit_reached;
1142}
1143
1144static void pg_init_done(void *data, int errors)
1145{
1146	struct pgpath *pgpath = data;
1147	struct priority_group *pg = pgpath->pg;
1148	struct multipath *m = pg->m;
1149	unsigned long flags;
1150	unsigned delay_retry = 0;
1151
1152	/* device or driver problems */
1153	switch (errors) {
1154	case SCSI_DH_OK:
1155		break;
1156	case SCSI_DH_NOSYS:
1157		if (!m->hw_handler_name) {
1158			errors = 0;
1159			break;
1160		}
1161		DMERR("Could not failover the device: Handler scsi_dh_%s "
1162		      "Error %d.", m->hw_handler_name, errors);
1163		/*
1164		 * Fail path for now, so we do not ping pong
1165		 */
1166		fail_path(pgpath);
1167		break;
1168	case SCSI_DH_DEV_TEMP_BUSY:
1169		/*
1170		 * Probably doing something like FW upgrade on the
1171		 * controller so try the other pg.
1172		 */
1173		bypass_pg(m, pg, 1);
1174		break;
1175	case SCSI_DH_RETRY:
1176		/* Wait before retrying. */
1177		delay_retry = 1;
 
1178	case SCSI_DH_IMM_RETRY:
1179	case SCSI_DH_RES_TEMP_UNAVAIL:
1180		if (pg_init_limit_reached(m, pgpath))
1181			fail_path(pgpath);
1182		errors = 0;
1183		break;
 
1184	default:
1185		/*
1186		 * We probably do not want to fail the path for a device
1187		 * error, but this is what the old dm did. In future
1188		 * patches we can do more advanced handling.
1189		 */
1190		fail_path(pgpath);
1191	}
1192
1193	spin_lock_irqsave(&m->lock, flags);
1194	if (errors) {
1195		if (pgpath == m->current_pgpath) {
1196			DMERR("Could not failover device. Error %d.", errors);
1197			m->current_pgpath = NULL;
1198			m->current_pg = NULL;
1199		}
1200	} else if (!m->pg_init_required)
1201		pg->bypassed = 0;
1202
1203	if (--m->pg_init_in_progress)
1204		/* Activations of other paths are still on going */
1205		goto out;
1206
1207	if (!m->pg_init_required)
1208		m->queue_io = 0;
 
 
 
1209
1210	m->pg_init_delay_retry = delay_retry;
1211	queue_work(kmultipathd, &m->process_queued_ios);
 
 
 
 
1212
1213	/*
1214	 * Wake up any thread waiting to suspend.
1215	 */
1216	wake_up(&m->pg_init_wait);
1217
1218out:
1219	spin_unlock_irqrestore(&m->lock, flags);
1220}
1221
1222static void activate_path(struct work_struct *work)
 
 
 
 
 
 
 
 
 
 
1223{
1224	struct pgpath *pgpath =
1225		container_of(work, struct pgpath, activate_path.work);
1226
1227	scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1228				pg_init_done, pgpath);
1229}
1230
1231/*
1232 * end_io handling
1233 */
1234static int do_end_io(struct multipath *m, struct request *clone,
1235		     int error, struct dm_mpath_io *mpio)
1236{
 
 
 
 
1237	/*
1238	 * We don't queue any clone request inside the multipath target
1239	 * during end I/O handling, since those clone requests don't have
1240	 * bio clones.  If we queue them inside the multipath target,
1241	 * we need to make bio clones, that requires memory allocation.
1242	 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1243	 *  don't have bio clones.)
1244	 * Instead of queueing the clone request here, we queue the original
1245	 * request into dm core, which will remake a clone request and
1246	 * clone bios for it and resubmit it later.
1247	 */
1248	int r = DM_ENDIO_REQUEUE;
1249	unsigned long flags;
1250
1251	if (!error && !clone->errors)
1252		return 0;	/* I/O complete */
1253
1254	if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
1255		return error;
 
 
1256
1257	if (mpio->pgpath)
1258		fail_path(mpio->pgpath);
1259
1260	spin_lock_irqsave(&m->lock, flags);
1261	if (!m->nr_valid_paths) {
1262		if (!m->queue_if_no_path) {
1263			if (!__must_push_back(m))
1264				r = -EIO;
1265		} else {
1266			if (error == -EBADE)
1267				r = error;
1268		}
1269	}
1270	spin_unlock_irqrestore(&m->lock, flags);
 
 
 
 
 
 
1271
1272	return r;
1273}
1274
1275static int multipath_end_io(struct dm_target *ti, struct request *clone,
1276			    int error, union map_info *map_context)
1277{
1278	struct multipath *m = ti->private;
1279	struct dm_mpath_io *mpio = map_context->ptr;
1280	struct pgpath *pgpath = mpio->pgpath;
1281	struct path_selector *ps;
1282	int r;
1283
1284	BUG_ON(!mpio);
 
1285
1286	r  = do_end_io(m, clone, error, mpio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287	if (pgpath) {
1288		ps = &pgpath->pg->ps;
 
1289		if (ps->type->end_io)
1290			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1291	}
1292	clear_mapinfo(m, map_context);
1293
1294	return r;
1295}
1296
1297/*
1298 * Suspend can't complete until all the I/O is processed so if
1299 * the last path fails we must error any remaining I/O.
1300 * Note that if the freeze_bdev fails while suspending, the
1301 * queue_if_no_path state is lost - userspace should reset it.
1302 */
1303static void multipath_presuspend(struct dm_target *ti)
1304{
1305	struct multipath *m = (struct multipath *) ti->private;
1306
1307	queue_if_no_path(m, 0, 1);
1308}
1309
1310static void multipath_postsuspend(struct dm_target *ti)
1311{
1312	struct multipath *m = ti->private;
1313
1314	mutex_lock(&m->work_mutex);
1315	flush_multipath_work(m);
1316	mutex_unlock(&m->work_mutex);
1317}
1318
1319/*
1320 * Restore the queue_if_no_path setting.
1321 */
1322static void multipath_resume(struct dm_target *ti)
1323{
1324	struct multipath *m = (struct multipath *) ti->private;
1325	unsigned long flags;
1326
1327	spin_lock_irqsave(&m->lock, flags);
1328	m->queue_if_no_path = m->saved_queue_if_no_path;
 
1329	spin_unlock_irqrestore(&m->lock, flags);
1330}
1331
1332/*
1333 * Info output has the following format:
1334 * num_multipath_feature_args [multipath_feature_args]*
1335 * num_handler_status_args [handler_status_args]*
1336 * num_groups init_group_number
1337 *            [A|D|E num_ps_status_args [ps_status_args]*
1338 *             num_paths num_selector_args
1339 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1340 *
1341 * Table output has the following format (identical to the constructor string):
1342 * num_feature_args [features_args]*
1343 * num_handler_args hw_handler [hw_handler_args]*
1344 * num_groups init_group_number
1345 *     [priority selector-name num_ps_args [ps_args]*
1346 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1347 */
1348static int multipath_status(struct dm_target *ti, status_type_t type,
1349			    char *result, unsigned int maxlen)
1350{
1351	int sz = 0;
1352	unsigned long flags;
1353	struct multipath *m = (struct multipath *) ti->private;
1354	struct priority_group *pg;
1355	struct pgpath *p;
1356	unsigned pg_num;
1357	char state;
1358
1359	spin_lock_irqsave(&m->lock, flags);
1360
1361	/* Features */
1362	if (type == STATUSTYPE_INFO)
1363		DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
 
1364	else {
1365		DMEMIT("%u ", m->queue_if_no_path +
1366			      (m->pg_init_retries > 0) * 2 +
1367			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
1368		if (m->queue_if_no_path)
 
 
 
1369			DMEMIT("queue_if_no_path ");
1370		if (m->pg_init_retries)
1371			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1372		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1373			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1374	}
1375
1376	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1377		DMEMIT("0 ");
1378	else
1379		DMEMIT("1 %s ", m->hw_handler_name);
1380
1381	DMEMIT("%u ", m->nr_priority_groups);
1382
1383	if (m->next_pg)
1384		pg_num = m->next_pg->pg_num;
1385	else if (m->current_pg)
1386		pg_num = m->current_pg->pg_num;
1387	else
1388		pg_num = (m->nr_priority_groups ? 1 : 0);
1389
1390	DMEMIT("%u ", pg_num);
1391
1392	switch (type) {
1393	case STATUSTYPE_INFO:
1394		list_for_each_entry(pg, &m->priority_groups, list) {
1395			if (pg->bypassed)
1396				state = 'D';	/* Disabled */
1397			else if (pg == m->current_pg)
1398				state = 'A';	/* Currently Active */
1399			else
1400				state = 'E';	/* Enabled */
1401
1402			DMEMIT("%c ", state);
1403
1404			if (pg->ps.type->status)
1405				sz += pg->ps.type->status(&pg->ps, NULL, type,
1406							  result + sz,
1407							  maxlen - sz);
1408			else
1409				DMEMIT("0 ");
1410
1411			DMEMIT("%u %u ", pg->nr_pgpaths,
1412			       pg->ps.type->info_args);
1413
1414			list_for_each_entry(p, &pg->pgpaths, list) {
1415				DMEMIT("%s %s %u ", p->path.dev->name,
1416				       p->is_active ? "A" : "F",
1417				       p->fail_count);
1418				if (pg->ps.type->status)
1419					sz += pg->ps.type->status(&pg->ps,
1420					      &p->path, type, result + sz,
1421					      maxlen - sz);
1422			}
1423		}
1424		break;
1425
1426	case STATUSTYPE_TABLE:
1427		list_for_each_entry(pg, &m->priority_groups, list) {
1428			DMEMIT("%s ", pg->ps.type->name);
1429
1430			if (pg->ps.type->status)
1431				sz += pg->ps.type->status(&pg->ps, NULL, type,
1432							  result + sz,
1433							  maxlen - sz);
1434			else
1435				DMEMIT("0 ");
1436
1437			DMEMIT("%u %u ", pg->nr_pgpaths,
1438			       pg->ps.type->table_args);
1439
1440			list_for_each_entry(p, &pg->pgpaths, list) {
1441				DMEMIT("%s ", p->path.dev->name);
1442				if (pg->ps.type->status)
1443					sz += pg->ps.type->status(&pg->ps,
1444					      &p->path, type, result + sz,
1445					      maxlen - sz);
1446			}
1447		}
1448		break;
1449	}
1450
1451	spin_unlock_irqrestore(&m->lock, flags);
1452
1453	return 0;
1454}
1455
1456static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
 
1457{
1458	int r = -EINVAL;
1459	struct dm_dev *dev;
1460	struct multipath *m = (struct multipath *) ti->private;
1461	action_fn action;
1462
1463	mutex_lock(&m->work_mutex);
1464
1465	if (dm_suspended(ti)) {
1466		r = -EBUSY;
1467		goto out;
1468	}
1469
1470	if (argc == 1) {
1471		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1472			r = queue_if_no_path(m, 1, 0);
1473			goto out;
1474		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1475			r = queue_if_no_path(m, 0, 0);
1476			goto out;
1477		}
1478	}
1479
1480	if (argc != 2) {
1481		DMWARN("Unrecognised multipath message received.");
1482		goto out;
1483	}
1484
1485	if (!strcasecmp(argv[0], "disable_group")) {
1486		r = bypass_pg_num(m, argv[1], 1);
1487		goto out;
1488	} else if (!strcasecmp(argv[0], "enable_group")) {
1489		r = bypass_pg_num(m, argv[1], 0);
1490		goto out;
1491	} else if (!strcasecmp(argv[0], "switch_group")) {
1492		r = switch_pg_num(m, argv[1]);
1493		goto out;
1494	} else if (!strcasecmp(argv[0], "reinstate_path"))
1495		action = reinstate_path;
1496	else if (!strcasecmp(argv[0], "fail_path"))
1497		action = fail_path;
1498	else {
1499		DMWARN("Unrecognised multipath message received.");
1500		goto out;
1501	}
1502
1503	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1504	if (r) {
1505		DMWARN("message: error getting device %s",
1506		       argv[1]);
1507		goto out;
1508	}
1509
1510	r = action_dev(m, dev, action);
1511
1512	dm_put_device(ti, dev);
1513
1514out:
1515	mutex_unlock(&m->work_mutex);
1516	return r;
1517}
1518
1519static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1520			   unsigned long arg)
1521{
1522	struct multipath *m = ti->private;
1523	struct pgpath *pgpath;
1524	struct block_device *bdev;
1525	fmode_t mode;
1526	unsigned long flags;
1527	int r;
1528
1529again:
1530	bdev = NULL;
1531	mode = 0;
1532	r = 0;
1533
1534	spin_lock_irqsave(&m->lock, flags);
1535
1536	if (!m->current_pgpath)
1537		__choose_pgpath(m, 0);
1538
1539	pgpath = m->current_pgpath;
1540
1541	if (pgpath) {
1542		bdev = pgpath->path.dev->bdev;
1543		mode = pgpath->path.dev->mode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1544	}
1545
1546	if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1547		r = -EAGAIN;
1548	else if (!bdev)
1549		r = -EIO;
1550
1551	spin_unlock_irqrestore(&m->lock, flags);
1552
1553	/*
1554	 * Only pass ioctls through if the device sizes match exactly.
1555	 */
1556	if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1557		r = scsi_verify_blk_ioctl(NULL, cmd);
1558
1559	if (r == -EAGAIN && !fatal_signal_pending(current)) {
1560		queue_work(kmultipathd, &m->process_queued_ios);
1561		msleep(10);
1562		goto again;
1563	}
1564
1565	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
1566}
1567
1568static int multipath_iterate_devices(struct dm_target *ti,
1569				     iterate_devices_callout_fn fn, void *data)
1570{
1571	struct multipath *m = ti->private;
1572	struct priority_group *pg;
1573	struct pgpath *p;
1574	int ret = 0;
1575
1576	list_for_each_entry(pg, &m->priority_groups, list) {
1577		list_for_each_entry(p, &pg->pgpaths, list) {
1578			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1579			if (ret)
1580				goto out;
1581		}
1582	}
1583
1584out:
1585	return ret;
1586}
1587
1588static int __pgpath_busy(struct pgpath *pgpath)
1589{
1590	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1591
1592	return dm_underlying_device_busy(q);
1593}
1594
1595/*
1596 * We return "busy", only when we can map I/Os but underlying devices
1597 * are busy (so even if we map I/Os now, the I/Os will wait on
1598 * the underlying queue).
1599 * In other words, if we want to kill I/Os or queue them inside us
1600 * due to map unavailability, we don't return "busy".  Otherwise,
1601 * dm core won't give us the I/Os and we can't do what we want.
1602 */
1603static int multipath_busy(struct dm_target *ti)
1604{
1605	int busy = 0, has_active = 0;
1606	struct multipath *m = ti->private;
1607	struct priority_group *pg;
1608	struct pgpath *pgpath;
1609	unsigned long flags;
1610
1611	spin_lock_irqsave(&m->lock, flags);
 
 
 
 
 
 
1612
1613	/* Guess which priority_group will be used at next mapping time */
1614	if (unlikely(!m->current_pgpath && m->next_pg))
1615		pg = m->next_pg;
1616	else if (likely(m->current_pg))
1617		pg = m->current_pg;
1618	else
 
1619		/*
1620		 * We don't know which pg will be used at next mapping time.
1621		 * We don't call __choose_pgpath() here to avoid to trigger
1622		 * pg_init just by busy checking.
1623		 * So we don't know whether underlying devices we will be using
1624		 * at next mapping time are busy or not. Just try mapping.
1625		 */
1626		goto out;
 
1627
1628	/*
1629	 * If there is one non-busy active path at least, the path selector
1630	 * will be able to select it. So we consider such a pg as not busy.
1631	 */
1632	busy = 1;
1633	list_for_each_entry(pgpath, &pg->pgpaths, list)
1634		if (pgpath->is_active) {
1635			has_active = 1;
1636
1637			if (!__pgpath_busy(pgpath)) {
1638				busy = 0;
1639				break;
1640			}
1641		}
 
1642
1643	if (!has_active)
1644		/*
1645		 * No active path in this pg, so this pg won't be used and
1646		 * the current_pg will be changed at next mapping time.
1647		 * We need to try mapping to determine it.
1648		 */
1649		busy = 0;
1650
1651out:
1652	spin_unlock_irqrestore(&m->lock, flags);
1653
1654	return busy;
1655}
1656
1657/*-----------------------------------------------------------------
1658 * Module setup
1659 *---------------------------------------------------------------*/
1660static struct target_type multipath_target = {
1661	.name = "multipath",
1662	.version = {1, 4, 0},
 
 
1663	.module = THIS_MODULE,
1664	.ctr = multipath_ctr,
1665	.dtr = multipath_dtr,
1666	.map_rq = multipath_map,
 
1667	.rq_end_io = multipath_end_io,
 
 
1668	.presuspend = multipath_presuspend,
1669	.postsuspend = multipath_postsuspend,
1670	.resume = multipath_resume,
1671	.status = multipath_status,
1672	.message = multipath_message,
1673	.ioctl  = multipath_ioctl,
1674	.iterate_devices = multipath_iterate_devices,
1675	.busy = multipath_busy,
1676};
1677
1678static int __init dm_multipath_init(void)
1679{
1680	int r;
1681
1682	/* allocate a slab for the dm_ios */
1683	_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
1684	if (!_mpio_cache)
1685		return -ENOMEM;
1686
1687	r = dm_register_target(&multipath_target);
1688	if (r < 0) {
1689		DMERR("register failed %d", r);
1690		kmem_cache_destroy(_mpio_cache);
1691		return -EINVAL;
1692	}
1693
1694	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1695	if (!kmultipathd) {
1696		DMERR("failed to create workqueue kmpathd");
1697		dm_unregister_target(&multipath_target);
1698		kmem_cache_destroy(_mpio_cache);
1699		return -ENOMEM;
1700	}
1701
1702	/*
1703	 * A separate workqueue is used to handle the device handlers
1704	 * to avoid overloading existing workqueue. Overloading the
1705	 * old workqueue would also create a bottleneck in the
1706	 * path of the storage hardware device activation.
1707	 */
1708	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1709						  WQ_MEM_RECLAIM);
1710	if (!kmpath_handlerd) {
1711		DMERR("failed to create workqueue kmpath_handlerd");
1712		destroy_workqueue(kmultipathd);
1713		dm_unregister_target(&multipath_target);
1714		kmem_cache_destroy(_mpio_cache);
1715		return -ENOMEM;
 
 
 
 
 
1716	}
1717
1718	DMINFO("version %u.%u.%u loaded",
1719	       multipath_target.version[0], multipath_target.version[1],
1720	       multipath_target.version[2]);
1721
 
 
 
 
 
1722	return r;
1723}
1724
1725static void __exit dm_multipath_exit(void)
1726{
1727	destroy_workqueue(kmpath_handlerd);
1728	destroy_workqueue(kmultipathd);
1729
1730	dm_unregister_target(&multipath_target);
1731	kmem_cache_destroy(_mpio_cache);
1732}
1733
1734module_init(dm_multipath_init);
1735module_exit(dm_multipath_exit);
1736
1737MODULE_DESCRIPTION(DM_NAME " multipath target");
1738MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1739MODULE_LICENSE("GPL");
v4.17
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include <linux/device-mapper.h>
   9
  10#include "dm-rq.h"
  11#include "dm-bio-record.h"
  12#include "dm-path-selector.h"
  13#include "dm-uevent.h"
  14
  15#include <linux/blkdev.h>
  16#include <linux/ctype.h>
  17#include <linux/init.h>
  18#include <linux/mempool.h>
  19#include <linux/module.h>
  20#include <linux/pagemap.h>
  21#include <linux/slab.h>
  22#include <linux/time.h>
  23#include <linux/workqueue.h>
  24#include <linux/delay.h>
  25#include <scsi/scsi_dh.h>
  26#include <linux/atomic.h>
  27#include <linux/blk-mq.h>
  28
  29#define DM_MSG_PREFIX "multipath"
  30#define DM_PG_INIT_DELAY_MSECS 2000
  31#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  32
  33/* Path properties */
  34struct pgpath {
  35	struct list_head list;
  36
  37	struct priority_group *pg;	/* Owning PG */
 
  38	unsigned fail_count;		/* Cumulative failure count */
  39
  40	struct dm_path path;
  41	struct delayed_work activate_path;
  42
  43	bool is_active:1;		/* Path status */
  44};
  45
  46#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  47
  48/*
  49 * Paths are grouped into Priority Groups and numbered from 1 upwards.
  50 * Each has a path selector which controls which path gets used.
  51 */
  52struct priority_group {
  53	struct list_head list;
  54
  55	struct multipath *m;		/* Owning multipath instance */
  56	struct path_selector ps;
  57
  58	unsigned pg_num;		/* Reference number */
 
 
  59	unsigned nr_pgpaths;		/* Number of paths in PG */
  60	struct list_head pgpaths;
  61
  62	bool bypassed:1;		/* Temporarily bypass this PG? */
  63};
  64
  65/* Multipath context */
  66struct multipath {
  67	unsigned long flags;		/* Multipath state flags */
 
 
 
 
  68
  69	spinlock_t lock;
  70	enum dm_queue_mode queue_mode;
  71
 
 
 
 
 
 
 
 
 
 
  72	struct pgpath *current_pgpath;
  73	struct priority_group *current_pg;
  74	struct priority_group *next_pg;	/* Switch to this PG if set */
 
  75
  76	atomic_t nr_valid_paths;	/* Total number of usable paths */
  77	unsigned nr_priority_groups;
  78	struct list_head priority_groups;
  79
  80	const char *hw_handler_name;
  81	char *hw_handler_params;
  82	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
  83	unsigned pg_init_retries;	/* Number of times to retry pg_init */
 
  84	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
  85	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
  86	atomic_t pg_init_count;		/* Number of times pg_init called */
  87
  88	struct mutex work_mutex;
 
 
 
  89	struct work_struct trigger_event;
  90	struct dm_target *ti;
  91
  92	struct work_struct process_queued_bios;
  93	struct bio_list queued_bios;
 
 
 
 
 
  94};
  95
  96/*
  97 * Context information attached to each io we process.
  98 */
  99struct dm_mpath_io {
 100	struct pgpath *pgpath;
 101	size_t nr_bytes;
 102};
 103
 104typedef int (*action_fn) (struct pgpath *pgpath);
 105
 
 
 
 
 106static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 
 107static void trigger_event(struct work_struct *work);
 108static void activate_or_offline_path(struct pgpath *pgpath);
 109static void activate_path_work(struct work_struct *work);
 110static void process_queued_bios(struct work_struct *work);
 111
 112/*-----------------------------------------------
 113 * Multipath state flags.
 114 *-----------------------------------------------*/
 115
 116#define MPATHF_QUEUE_IO 0			/* Must we queue all I/O? */
 117#define MPATHF_QUEUE_IF_NO_PATH 1		/* Queue I/O if last path fails? */
 118#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2		/* Saved state during suspension */
 119#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3	/* If there's already a hw_handler present, don't change it. */
 120#define MPATHF_PG_INIT_DISABLED 4		/* pg_init is not currently allowed */
 121#define MPATHF_PG_INIT_REQUIRED 5		/* pg_init needs calling? */
 122#define MPATHF_PG_INIT_DELAY_RETRY 6		/* Delay pg_init retry? */
 123
 124/*-----------------------------------------------
 125 * Allocation routines
 126 *-----------------------------------------------*/
 127
 128static struct pgpath *alloc_pgpath(void)
 129{
 130	struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 131
 132	if (!pgpath)
 133		return NULL;
 134
 135	pgpath->is_active = true;
 136
 137	return pgpath;
 138}
 139
 140static void free_pgpath(struct pgpath *pgpath)
 141{
 142	kfree(pgpath);
 143}
 144
 145static struct priority_group *alloc_priority_group(void)
 146{
 147	struct priority_group *pg;
 148
 149	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 150
 151	if (pg)
 152		INIT_LIST_HEAD(&pg->pgpaths);
 153
 154	return pg;
 155}
 156
 157static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 158{
 159	struct pgpath *pgpath, *tmp;
 
 160
 161	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 162		list_del(&pgpath->list);
 
 
 163		dm_put_device(ti, pgpath->path.dev);
 164		free_pgpath(pgpath);
 165	}
 166}
 167
 168static void free_priority_group(struct priority_group *pg,
 169				struct dm_target *ti)
 170{
 171	struct path_selector *ps = &pg->ps;
 172
 173	if (ps->type) {
 174		ps->type->destroy(ps);
 175		dm_put_path_selector(ps->type);
 176	}
 177
 178	free_pgpaths(&pg->pgpaths, ti);
 179	kfree(pg);
 180}
 181
 182static struct multipath *alloc_multipath(struct dm_target *ti)
 183{
 184	struct multipath *m;
 185
 186	m = kzalloc(sizeof(*m), GFP_KERNEL);
 187	if (m) {
 188		INIT_LIST_HEAD(&m->priority_groups);
 
 189		spin_lock_init(&m->lock);
 190		atomic_set(&m->nr_valid_paths, 0);
 
 
 191		INIT_WORK(&m->trigger_event, trigger_event);
 
 192		mutex_init(&m->work_mutex);
 193
 194		m->queue_mode = DM_TYPE_NONE;
 195
 
 
 196		m->ti = ti;
 197		ti->private = m;
 198	}
 199
 200	return m;
 201}
 202
 203static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
 204{
 205	if (m->queue_mode == DM_TYPE_NONE) {
 206		/*
 207		 * Default to request-based.
 208		 */
 209		if (dm_use_blk_mq(dm_table_get_md(ti->table)))
 210			m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
 211		else
 212			m->queue_mode = DM_TYPE_REQUEST_BASED;
 213
 214	} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
 215		INIT_WORK(&m->process_queued_bios, process_queued_bios);
 216		/*
 217		 * bio-based doesn't support any direct scsi_dh management;
 218		 * it just discovers if a scsi_dh is attached.
 219		 */
 220		set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 221	}
 222
 223	dm_table_set_type(ti->table, m->queue_mode);
 224
 225	/*
 226	 * Init fields that are only used when a scsi_dh is attached
 227	 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
 228	 */
 229	set_bit(MPATHF_QUEUE_IO, &m->flags);
 230	atomic_set(&m->pg_init_in_progress, 0);
 231	atomic_set(&m->pg_init_count, 0);
 232	m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 233	init_waitqueue_head(&m->pg_init_wait);
 234
 235	return 0;
 236}
 237
 238static void free_multipath(struct multipath *m)
 239{
 240	struct priority_group *pg, *tmp;
 241
 242	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 243		list_del(&pg->list);
 244		free_priority_group(pg, m->ti);
 245	}
 246
 247	kfree(m->hw_handler_name);
 248	kfree(m->hw_handler_params);
 249	mutex_destroy(&m->work_mutex);
 250	kfree(m);
 251}
 252
 253static struct dm_mpath_io *get_mpio(union map_info *info)
 254{
 255	return info->ptr;
 256}
 257
 258static size_t multipath_per_bio_data_size(void)
 259{
 260	return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
 261}
 262
 263static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
 264{
 265	return dm_per_bio_data(bio, multipath_per_bio_data_size());
 266}
 267
 268static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
 269{
 270	/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
 271	void *bio_details = mpio + 1;
 272	return bio_details;
 273}
 274
 275static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
 276{
 277	struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 278	struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
 279
 280	mpio->nr_bytes = bio->bi_iter.bi_size;
 281	mpio->pgpath = NULL;
 282	*mpio_p = mpio;
 283
 284	dm_bio_record(bio_details, bio);
 285}
 286
 287/*-----------------------------------------------
 288 * Path selection
 289 *-----------------------------------------------*/
 290
 291static int __pg_init_all_paths(struct multipath *m)
 292{
 293	struct pgpath *pgpath;
 294	unsigned long pg_init_delay = 0;
 295
 296	lockdep_assert_held(&m->lock);
 297
 298	if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 299		return 0;
 300
 301	atomic_inc(&m->pg_init_count);
 302	clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 303
 304	/* Check here to reset pg_init_required */
 305	if (!m->current_pg)
 306		return 0;
 307
 308	if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
 309		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 310						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 311	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 312		/* Skip failed paths */
 313		if (!pgpath->is_active)
 314			continue;
 315		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 316				       pg_init_delay))
 317			atomic_inc(&m->pg_init_in_progress);
 318	}
 319	return atomic_read(&m->pg_init_in_progress);
 320}
 321
 322static int pg_init_all_paths(struct multipath *m)
 323{
 324	int ret;
 325	unsigned long flags;
 326
 327	spin_lock_irqsave(&m->lock, flags);
 328	ret = __pg_init_all_paths(m);
 329	spin_unlock_irqrestore(&m->lock, flags);
 330
 331	return ret;
 332}
 333
 334static void __switch_pg(struct multipath *m, struct priority_group *pg)
 335{
 336	m->current_pg = pg;
 337
 338	/* Must we initialise the PG first, and queue I/O till it's ready? */
 339	if (m->hw_handler_name) {
 340		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 341		set_bit(MPATHF_QUEUE_IO, &m->flags);
 342	} else {
 343		clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 344		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 345	}
 346
 347	atomic_set(&m->pg_init_count, 0);
 348}
 349
 350static struct pgpath *choose_path_in_pg(struct multipath *m,
 351					struct priority_group *pg,
 352					size_t nr_bytes)
 353{
 354	unsigned long flags;
 355	struct dm_path *path;
 356	struct pgpath *pgpath;
 357
 358	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 359	if (!path)
 360		return ERR_PTR(-ENXIO);
 361
 362	pgpath = path_to_pgpath(path);
 363
 364	if (unlikely(READ_ONCE(m->current_pg) != pg)) {
 365		/* Only update current_pgpath if pg changed */
 366		spin_lock_irqsave(&m->lock, flags);
 367		m->current_pgpath = pgpath;
 368		__switch_pg(m, pg);
 369		spin_unlock_irqrestore(&m->lock, flags);
 370	}
 371
 372	return pgpath;
 373}
 374
 375static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 376{
 377	unsigned long flags;
 378	struct priority_group *pg;
 379	struct pgpath *pgpath;
 380	unsigned bypassed = 1;
 381
 382	if (!atomic_read(&m->nr_valid_paths)) {
 383		clear_bit(MPATHF_QUEUE_IO, &m->flags);
 384		goto failed;
 385	}
 386
 387	/* Were we instructed to switch PG? */
 388	if (READ_ONCE(m->next_pg)) {
 389		spin_lock_irqsave(&m->lock, flags);
 390		pg = m->next_pg;
 391		if (!pg) {
 392			spin_unlock_irqrestore(&m->lock, flags);
 393			goto check_current_pg;
 394		}
 395		m->next_pg = NULL;
 396		spin_unlock_irqrestore(&m->lock, flags);
 397		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 398		if (!IS_ERR_OR_NULL(pgpath))
 399			return pgpath;
 400	}
 401
 402	/* Don't change PG until it has no remaining paths */
 403check_current_pg:
 404	pg = READ_ONCE(m->current_pg);
 405	if (pg) {
 406		pgpath = choose_path_in_pg(m, pg, nr_bytes);
 407		if (!IS_ERR_OR_NULL(pgpath))
 408			return pgpath;
 409	}
 410
 411	/*
 412	 * Loop through priority groups until we find a valid path.
 413	 * First time we skip PGs marked 'bypassed'.
 414	 * Second time we only try the ones we skipped, but set
 415	 * pg_init_delay_retry so we do not hammer controllers.
 416	 */
 417	do {
 418		list_for_each_entry(pg, &m->priority_groups, list) {
 419			if (pg->bypassed == !!bypassed)
 420				continue;
 421			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 422			if (!IS_ERR_OR_NULL(pgpath)) {
 423				if (!bypassed)
 424					set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 425				return pgpath;
 426			}
 427		}
 428	} while (bypassed--);
 429
 430failed:
 431	spin_lock_irqsave(&m->lock, flags);
 432	m->current_pgpath = NULL;
 433	m->current_pg = NULL;
 434	spin_unlock_irqrestore(&m->lock, flags);
 435
 436	return NULL;
 437}
 438
 439/*
 440 * dm_report_EIO() is a macro instead of a function to make pr_debug()
 441 * report the function name and line number of the function from which
 442 * it has been invoked.
 443 */
 444#define dm_report_EIO(m)						\
 445do {									\
 446	struct mapped_device *md = dm_table_get_md((m)->ti->table);	\
 447									\
 448	pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
 449		 dm_device_name(md),					\
 450		 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags),	\
 451		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags),	\
 452		 dm_noflush_suspending((m)->ti));			\
 453} while (0)
 454
 455/*
 456 * Check whether bios must be queued in the device-mapper core rather
 457 * than here in the target.
 458 *
 459 * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
 460 * the same value then we are not between multipath_presuspend()
 
 
 461 * and multipath_resume() calls and we have no need to check
 462 * for the DMF_NOFLUSH_SUSPENDING flag.
 463 */
 464static bool __must_push_back(struct multipath *m, unsigned long flags)
 465{
 466	return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
 467		 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
 468		dm_noflush_suspending(m->ti));
 469}
 470
 471/*
 472 * Following functions use READ_ONCE to get atomic access to
 473 * all m->flags to avoid taking spinlock
 474 */
 475static bool must_push_back_rq(struct multipath *m)
 476{
 477	unsigned long flags = READ_ONCE(m->flags);
 478	return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
 479}
 480
 481static bool must_push_back_bio(struct multipath *m)
 482{
 483	unsigned long flags = READ_ONCE(m->flags);
 484	return __must_push_back(m, flags);
 485}
 486
 487/*
 488 * Map cloned requests (request-based multipath)
 489 */
 490static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 491				   union map_info *map_context,
 492				   struct request **__clone)
 493{
 494	struct multipath *m = ti->private;
 495	size_t nr_bytes = blk_rq_bytes(rq);
 496	struct pgpath *pgpath;
 497	struct block_device *bdev;
 498	struct dm_mpath_io *mpio = get_mpio(map_context);
 499	struct request_queue *q;
 500	struct request *clone;
 501
 502	/* Do we need to select a new pgpath? */
 503	pgpath = READ_ONCE(m->current_pgpath);
 504	if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
 505		pgpath = choose_pgpath(m, nr_bytes);
 506
 507	if (!pgpath) {
 508		if (must_push_back_rq(m))
 509			return DM_MAPIO_DELAY_REQUEUE;
 510		dm_report_EIO(m);	/* Failed */
 511		return DM_MAPIO_KILL;
 512	} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
 513		   test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
 514		pg_init_all_paths(m);
 515		return DM_MAPIO_DELAY_REQUEUE;
 516	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 517
 518	mpio->pgpath = pgpath;
 519	mpio->nr_bytes = nr_bytes;
 520
 521	bdev = pgpath->path.dev->bdev;
 522	q = bdev_get_queue(bdev);
 523	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
 524	if (IS_ERR(clone)) {
 525		/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
 526		if (blk_queue_dying(q)) {
 527			atomic_inc(&m->pg_init_in_progress);
 528			activate_or_offline_path(pgpath);
 529			return DM_MAPIO_DELAY_REQUEUE;
 530		}
 531
 532		/*
 533		 * blk-mq's SCHED_RESTART can cover this requeue, so we
 534		 * needn't deal with it by DELAY_REQUEUE. More importantly,
 535		 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
 536		 * get the queue busy feedback (via BLK_STS_RESOURCE),
 537		 * otherwise I/O merging can suffer.
 538		 */
 539		if (q->mq_ops)
 540			return DM_MAPIO_REQUEUE;
 541		else
 542			return DM_MAPIO_DELAY_REQUEUE;
 543	}
 544	clone->bio = clone->biotail = NULL;
 545	clone->rq_disk = bdev->bd_disk;
 546	clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 547	*__clone = clone;
 548
 549	if (pgpath->pg->ps.type->start_io)
 550		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 551					      &pgpath->path,
 552					      nr_bytes);
 553	return DM_MAPIO_REMAPPED;
 554}
 555
 556static void multipath_release_clone(struct request *clone)
 557{
 558	blk_put_request(clone);
 559}
 560
 561/*
 562 * Map cloned bios (bio-based multipath)
 563 */
 564
 565static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 566{
 567	struct pgpath *pgpath;
 568	unsigned long flags;
 569	bool queue_io;
 570
 571	/* Do we need to select a new pgpath? */
 572	pgpath = READ_ONCE(m->current_pgpath);
 573	queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
 574	if (!pgpath || !queue_io)
 575		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 576
 577	if ((pgpath && queue_io) ||
 578	    (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
 579		/* Queue for the daemon to resubmit */
 580		spin_lock_irqsave(&m->lock, flags);
 581		bio_list_add(&m->queued_bios, bio);
 582		spin_unlock_irqrestore(&m->lock, flags);
 583
 584		/* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
 585		if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
 586			pg_init_all_paths(m);
 587		else if (!queue_io)
 588			queue_work(kmultipathd, &m->process_queued_bios);
 589
 590		return ERR_PTR(-EAGAIN);
 591	}
 592
 593	return pgpath;
 594}
 595
 596static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
 597{
 598	struct pgpath *pgpath;
 599	unsigned long flags;
 600
 601	/* Do we need to select a new pgpath? */
 602	/*
 603	 * FIXME: currently only switching path if no path (due to failure, etc)
 604	 * - which negates the point of using a path selector
 605	 */
 606	pgpath = READ_ONCE(m->current_pgpath);
 607	if (!pgpath)
 608		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 609
 610	if (!pgpath) {
 611		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
 612			/* Queue for the daemon to resubmit */
 613			spin_lock_irqsave(&m->lock, flags);
 614			bio_list_add(&m->queued_bios, bio);
 615			spin_unlock_irqrestore(&m->lock, flags);
 616			queue_work(kmultipathd, &m->process_queued_bios);
 617
 618			return ERR_PTR(-EAGAIN);
 619		}
 620		return NULL;
 621	}
 622
 623	return pgpath;
 624}
 625
 626static int __multipath_map_bio(struct multipath *m, struct bio *bio,
 627			       struct dm_mpath_io *mpio)
 628{
 629	struct pgpath *pgpath;
 630
 631	if (!m->hw_handler_name)
 632		pgpath = __map_bio_fast(m, bio);
 633	else
 634		pgpath = __map_bio(m, bio);
 
 
 
 635
 636	if (IS_ERR(pgpath))
 637		return DM_MAPIO_SUBMITTED;
 638
 639	if (!pgpath) {
 640		if (must_push_back_bio(m))
 641			return DM_MAPIO_REQUEUE;
 642		dm_report_EIO(m);
 643		return DM_MAPIO_KILL;
 644	}
 645
 646	mpio->pgpath = pgpath;
 647
 648	bio->bi_status = 0;
 649	bio_set_dev(bio, pgpath->path.dev->bdev);
 650	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 651
 652	if (pgpath->pg->ps.type->start_io)
 653		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 654					      &pgpath->path,
 655					      mpio->nr_bytes);
 656	return DM_MAPIO_REMAPPED;
 657}
 658
 659static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
 660{
 661	struct multipath *m = ti->private;
 662	struct dm_mpath_io *mpio = NULL;
 663
 664	multipath_init_per_bio_data(bio, &mpio);
 665	return __multipath_map_bio(m, bio, mpio);
 666}
 667
 668static void process_queued_io_list(struct multipath *m)
 669{
 670	if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
 671		dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 672	else if (m->queue_mode == DM_TYPE_BIO_BASED)
 673		queue_work(kmultipathd, &m->process_queued_bios);
 674}
 675
 676static void process_queued_bios(struct work_struct *work)
 677{
 678	int r;
 679	unsigned long flags;
 680	struct bio *bio;
 681	struct bio_list bios;
 682	struct blk_plug plug;
 683	struct multipath *m =
 684		container_of(work, struct multipath, process_queued_bios);
 685
 686	bio_list_init(&bios);
 687
 688	spin_lock_irqsave(&m->lock, flags);
 
 
 689
 690	if (bio_list_empty(&m->queued_bios)) {
 691		spin_unlock_irqrestore(&m->lock, flags);
 692		return;
 693	}
 694
 695	bio_list_merge(&bios, &m->queued_bios);
 696	bio_list_init(&m->queued_bios);
 697
 698	spin_unlock_irqrestore(&m->lock, flags);
 699
 700	blk_start_plug(&plug);
 701	while ((bio = bio_list_pop(&bios))) {
 702		struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 703		dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
 704		r = __multipath_map_bio(m, bio, mpio);
 705		switch (r) {
 706		case DM_MAPIO_KILL:
 707			bio->bi_status = BLK_STS_IOERR;
 708			bio_endio(bio);
 709			break;
 710		case DM_MAPIO_REQUEUE:
 711			bio->bi_status = BLK_STS_DM_REQUEUE;
 712			bio_endio(bio);
 713			break;
 714		case DM_MAPIO_REMAPPED:
 715			generic_make_request(bio);
 716			break;
 717		case DM_MAPIO_SUBMITTED:
 718			break;
 719		default:
 720			WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
 721		}
 722	}
 723	blk_finish_plug(&plug);
 724}
 725
 726/*
 727 * If we run out of usable paths, should we queue I/O or error it?
 728 */
 729static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 730			    bool save_old_value)
 731{
 
 
 
 
 732	unsigned long flags;
 733
 734	spin_lock_irqsave(&m->lock, flags);
 735	assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
 736		   (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
 737		   (!save_old_value && queue_if_no_path));
 738	assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
 739	spin_unlock_irqrestore(&m->lock, flags);
 740
 741	if (!queue_if_no_path) {
 742		dm_table_run_md_queue_async(m->ti->table);
 743		process_queued_io_list(m);
 744	}
 
 
 
 
 
 
 
 745
 746	return 0;
 
 
 747}
 748
 749/*
 750 * An event is triggered whenever a path is taken out of use.
 751 * Includes path failure and PG bypass.
 752 */
 753static void trigger_event(struct work_struct *work)
 754{
 755	struct multipath *m =
 756		container_of(work, struct multipath, trigger_event);
 757
 758	dm_table_event(m->ti->table);
 759}
 760
 761/*-----------------------------------------------------------------
 762 * Constructor/argument parsing:
 763 * <#multipath feature args> [<arg>]*
 764 * <#hw_handler args> [hw_handler [<arg>]*]
 765 * <#priority groups>
 766 * <initial priority group>
 767 *     [<selector> <#selector args> [<arg>]*
 768 *      <#paths> <#per-path selector args>
 769 *         [<path> [<arg>]* ]+ ]+
 770 *---------------------------------------------------------------*/
 771static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 772			       struct dm_target *ti)
 773{
 774	int r;
 775	struct path_selector_type *pst;
 776	unsigned ps_argc;
 777
 778	static const struct dm_arg _args[] = {
 779		{0, 1024, "invalid number of path selector args"},
 780	};
 781
 782	pst = dm_get_path_selector(dm_shift_arg(as));
 783	if (!pst) {
 784		ti->error = "unknown path selector type";
 785		return -EINVAL;
 786	}
 787
 788	r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 789	if (r) {
 790		dm_put_path_selector(pst);
 791		return -EINVAL;
 792	}
 793
 794	r = pst->create(&pg->ps, ps_argc, as->argv);
 795	if (r) {
 796		dm_put_path_selector(pst);
 797		ti->error = "path selector constructor failed";
 798		return r;
 799	}
 800
 801	pg->ps.type = pst;
 802	dm_consume_args(as, ps_argc);
 803
 804	return 0;
 805}
 806
 807static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
 808			 const char *attached_handler_name, char **error)
 809{
 810	struct request_queue *q = bdev_get_queue(bdev);
 811	int r;
 812
 813	if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 814retain:
 815		if (attached_handler_name) {
 816			/*
 817			 * Clear any hw_handler_params associated with a
 818			 * handler that isn't already attached.
 819			 */
 820			if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
 821				kfree(m->hw_handler_params);
 822				m->hw_handler_params = NULL;
 823			}
 824
 825			/*
 826			 * Reset hw_handler_name to match the attached handler
 827			 *
 828			 * NB. This modifies the table line to show the actual
 829			 * handler instead of the original table passed in.
 830			 */
 831			kfree(m->hw_handler_name);
 832			m->hw_handler_name = attached_handler_name;
 833		}
 834	}
 835
 836	if (m->hw_handler_name) {
 837		r = scsi_dh_attach(q, m->hw_handler_name);
 838		if (r == -EBUSY) {
 839			char b[BDEVNAME_SIZE];
 840
 841			printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
 842			       bdevname(bdev, b));
 843			goto retain;
 844		}
 845		if (r < 0) {
 846			*error = "error attaching hardware handler";
 847			return r;
 848		}
 849
 850		if (m->hw_handler_params) {
 851			r = scsi_dh_set_params(q, m->hw_handler_params);
 852			if (r < 0) {
 853				*error = "unable to set hardware handler parameters";
 854				return r;
 855			}
 856		}
 857	}
 858
 859	return 0;
 860}
 861
 862static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 863				 struct dm_target *ti)
 864{
 865	int r;
 866	struct pgpath *p;
 867	struct multipath *m = ti->private;
 868	struct request_queue *q;
 869	const char *attached_handler_name;
 870
 871	/* we need at least a path arg */
 872	if (as->argc < 1) {
 873		ti->error = "no device given";
 874		return ERR_PTR(-EINVAL);
 875	}
 876
 877	p = alloc_pgpath();
 878	if (!p)
 879		return ERR_PTR(-ENOMEM);
 880
 881	r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 882			  &p->path.dev);
 883	if (r) {
 884		ti->error = "error getting device";
 885		goto bad;
 886	}
 887
 888	q = bdev_get_queue(p->path.dev->bdev);
 889	attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 890	if (attached_handler_name || m->hw_handler_name) {
 891		INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 892		r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
 893		if (r) {
 
 
 
 
 
 
 
 
 
 894			dm_put_device(ti, p->path.dev);
 895			goto bad;
 896		}
 
 
 
 
 
 
 
 
 
 
 
 897	}
 898
 899	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 900	if (r) {
 901		dm_put_device(ti, p->path.dev);
 902		goto bad;
 903	}
 904
 905	return p;
 
 906 bad:
 907	free_pgpath(p);
 908	return ERR_PTR(r);
 909}
 910
 911static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 912						   struct multipath *m)
 913{
 914	static const struct dm_arg _args[] = {
 915		{1, 1024, "invalid number of paths"},
 916		{0, 1024, "invalid number of selector args"}
 917	};
 918
 919	int r;
 920	unsigned i, nr_selector_args, nr_args;
 921	struct priority_group *pg;
 922	struct dm_target *ti = m->ti;
 923
 924	if (as->argc < 2) {
 925		as->argc = 0;
 926		ti->error = "not enough priority group arguments";
 927		return ERR_PTR(-EINVAL);
 928	}
 929
 930	pg = alloc_priority_group();
 931	if (!pg) {
 932		ti->error = "couldn't allocate priority group";
 933		return ERR_PTR(-ENOMEM);
 934	}
 935	pg->m = m;
 936
 937	r = parse_path_selector(as, pg, ti);
 938	if (r)
 939		goto bad;
 940
 941	/*
 942	 * read the paths
 943	 */
 944	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
 945	if (r)
 946		goto bad;
 947
 948	r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
 949	if (r)
 950		goto bad;
 951
 952	nr_args = 1 + nr_selector_args;
 953	for (i = 0; i < pg->nr_pgpaths; i++) {
 954		struct pgpath *pgpath;
 955		struct dm_arg_set path_args;
 956
 957		if (as->argc < nr_args) {
 958			ti->error = "not enough path parameters";
 959			r = -EINVAL;
 960			goto bad;
 961		}
 962
 963		path_args.argc = nr_args;
 964		path_args.argv = as->argv;
 965
 966		pgpath = parse_path(&path_args, &pg->ps, ti);
 967		if (IS_ERR(pgpath)) {
 968			r = PTR_ERR(pgpath);
 969			goto bad;
 970		}
 971
 972		pgpath->pg = pg;
 973		list_add_tail(&pgpath->list, &pg->pgpaths);
 974		dm_consume_args(as, nr_args);
 975	}
 976
 977	return pg;
 978
 979 bad:
 980	free_priority_group(pg, ti);
 981	return ERR_PTR(r);
 982}
 983
 984static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 985{
 986	unsigned hw_argc;
 987	int ret;
 988	struct dm_target *ti = m->ti;
 989
 990	static const struct dm_arg _args[] = {
 991		{0, 1024, "invalid number of hardware handler args"},
 992	};
 993
 994	if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
 995		return -EINVAL;
 996
 997	if (!hw_argc)
 998		return 0;
 999
1000	if (m->queue_mode == DM_TYPE_BIO_BASED) {
1001		dm_consume_args(as, hw_argc);
1002		DMERR("bio-based multipath doesn't allow hardware handler args");
1003		return 0;
 
 
1004	}
1005
1006	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1007	if (!m->hw_handler_name)
1008		return -EINVAL;
1009
1010	if (hw_argc > 1) {
1011		char *p;
1012		int i, j, len = 4;
1013
1014		for (i = 0; i <= hw_argc - 2; i++)
1015			len += strlen(as->argv[i]) + 1;
1016		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1017		if (!p) {
1018			ti->error = "memory allocation failed";
1019			ret = -ENOMEM;
1020			goto fail;
1021		}
1022		j = sprintf(p, "%d", hw_argc - 1);
1023		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1024			j = sprintf(p, "%s", as->argv[i]);
1025	}
1026	dm_consume_args(as, hw_argc - 1);
1027
1028	return 0;
1029fail:
1030	kfree(m->hw_handler_name);
1031	m->hw_handler_name = NULL;
1032	return ret;
1033}
1034
1035static int parse_features(struct dm_arg_set *as, struct multipath *m)
1036{
1037	int r;
1038	unsigned argc;
1039	struct dm_target *ti = m->ti;
1040	const char *arg_name;
1041
1042	static const struct dm_arg _args[] = {
1043		{0, 8, "invalid number of feature args"},
1044		{1, 50, "pg_init_retries must be between 1 and 50"},
1045		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1046	};
1047
1048	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1049	if (r)
1050		return -EINVAL;
1051
1052	if (!argc)
1053		return 0;
1054
1055	do {
1056		arg_name = dm_shift_arg(as);
1057		argc--;
1058
1059		if (!strcasecmp(arg_name, "queue_if_no_path")) {
1060			r = queue_if_no_path(m, true, false);
1061			continue;
1062		}
1063
1064		if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1065			set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1066			continue;
1067		}
1068
1069		if (!strcasecmp(arg_name, "pg_init_retries") &&
1070		    (argc >= 1)) {
1071			r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1072			argc--;
1073			continue;
1074		}
1075
1076		if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1077		    (argc >= 1)) {
1078			r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1079			argc--;
1080			continue;
1081		}
1082
1083		if (!strcasecmp(arg_name, "queue_mode") &&
1084		    (argc >= 1)) {
1085			const char *queue_mode_name = dm_shift_arg(as);
1086
1087			if (!strcasecmp(queue_mode_name, "bio"))
1088				m->queue_mode = DM_TYPE_BIO_BASED;
1089			else if (!strcasecmp(queue_mode_name, "rq"))
1090				m->queue_mode = DM_TYPE_REQUEST_BASED;
1091			else if (!strcasecmp(queue_mode_name, "mq"))
1092				m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
1093			else {
1094				ti->error = "Unknown 'queue_mode' requested";
1095				r = -EINVAL;
1096			}
1097			argc--;
1098			continue;
1099		}
1100
1101		ti->error = "Unrecognised multipath feature request";
1102		r = -EINVAL;
1103	} while (argc && !r);
1104
1105	return r;
1106}
1107
1108static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
1109{
1110	/* target arguments */
1111	static const struct dm_arg _args[] = {
1112		{0, 1024, "invalid number of priority groups"},
1113		{0, 1024, "invalid initial priority group number"},
1114	};
1115
1116	int r;
1117	struct multipath *m;
1118	struct dm_arg_set as;
1119	unsigned pg_count = 0;
1120	unsigned next_pg_num;
1121
1122	as.argc = argc;
1123	as.argv = argv;
1124
1125	m = alloc_multipath(ti);
1126	if (!m) {
1127		ti->error = "can't allocate multipath";
1128		return -EINVAL;
1129	}
1130
1131	r = parse_features(&as, m);
1132	if (r)
1133		goto bad;
1134
1135	r = alloc_multipath_stage2(ti, m);
1136	if (r)
1137		goto bad;
1138
1139	r = parse_hw_handler(&as, m);
1140	if (r)
1141		goto bad;
1142
1143	r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1144	if (r)
1145		goto bad;
1146
1147	r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1148	if (r)
1149		goto bad;
1150
1151	if ((!m->nr_priority_groups && next_pg_num) ||
1152	    (m->nr_priority_groups && !next_pg_num)) {
1153		ti->error = "invalid initial priority group";
1154		r = -EINVAL;
1155		goto bad;
1156	}
1157
1158	/* parse the priority groups */
1159	while (as.argc) {
1160		struct priority_group *pg;
1161		unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1162
1163		pg = parse_priority_group(&as, m);
1164		if (IS_ERR(pg)) {
1165			r = PTR_ERR(pg);
1166			goto bad;
1167		}
1168
1169		nr_valid_paths += pg->nr_pgpaths;
1170		atomic_set(&m->nr_valid_paths, nr_valid_paths);
1171
1172		list_add_tail(&pg->list, &m->priority_groups);
1173		pg_count++;
1174		pg->pg_num = pg_count;
1175		if (!--next_pg_num)
1176			m->next_pg = pg;
1177	}
1178
1179	if (pg_count != m->nr_priority_groups) {
1180		ti->error = "priority group count mismatch";
1181		r = -EINVAL;
1182		goto bad;
1183	}
1184
1185	ti->num_flush_bios = 1;
1186	ti->num_discard_bios = 1;
1187	ti->num_write_same_bios = 1;
1188	ti->num_write_zeroes_bios = 1;
1189	if (m->queue_mode == DM_TYPE_BIO_BASED)
1190		ti->per_io_data_size = multipath_per_bio_data_size();
1191	else
1192		ti->per_io_data_size = sizeof(struct dm_mpath_io);
1193
1194	return 0;
1195
1196 bad:
1197	free_multipath(m);
1198	return r;
1199}
1200
1201static void multipath_wait_for_pg_init_completion(struct multipath *m)
1202{
1203	DEFINE_WAIT(wait);
 
 
 
1204
1205	while (1) {
1206		prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1207
1208		if (!atomic_read(&m->pg_init_in_progress))
 
 
1209			break;
 
 
1210
1211		io_schedule();
1212	}
1213	finish_wait(&m->pg_init_wait, &wait);
 
 
1214}
1215
1216static void flush_multipath_work(struct multipath *m)
1217{
1218	if (m->hw_handler_name) {
1219		set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1220		smp_mb__after_atomic();
1221
1222		flush_workqueue(kmpath_handlerd);
1223		multipath_wait_for_pg_init_completion(m);
1224
1225		clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1226		smp_mb__after_atomic();
1227	}
1228
1229	flush_workqueue(kmultipathd);
1230	flush_work(&m->trigger_event);
1231}
1232
1233static void multipath_dtr(struct dm_target *ti)
1234{
1235	struct multipath *m = ti->private;
1236
1237	flush_multipath_work(m);
1238	free_multipath(m);
1239}
1240
1241/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1242 * Take a path out of use.
1243 */
1244static int fail_path(struct pgpath *pgpath)
1245{
1246	unsigned long flags;
1247	struct multipath *m = pgpath->pg->m;
1248
1249	spin_lock_irqsave(&m->lock, flags);
1250
1251	if (!pgpath->is_active)
1252		goto out;
1253
1254	DMWARN("Failing path %s.", pgpath->path.dev->name);
1255
1256	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1257	pgpath->is_active = false;
1258	pgpath->fail_count++;
1259
1260	atomic_dec(&m->nr_valid_paths);
1261
1262	if (pgpath == m->current_pgpath)
1263		m->current_pgpath = NULL;
1264
1265	dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1266		       pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1267
1268	schedule_work(&m->trigger_event);
1269
1270out:
1271	spin_unlock_irqrestore(&m->lock, flags);
1272
1273	return 0;
1274}
1275
1276/*
1277 * Reinstate a previously-failed path
1278 */
1279static int reinstate_path(struct pgpath *pgpath)
1280{
1281	int r = 0, run_queue = 0;
1282	unsigned long flags;
1283	struct multipath *m = pgpath->pg->m;
1284	unsigned nr_valid_paths;
1285
1286	spin_lock_irqsave(&m->lock, flags);
1287
1288	if (pgpath->is_active)
1289		goto out;
1290
1291	DMWARN("Reinstating path %s.", pgpath->path.dev->name);
 
 
 
 
 
1292
1293	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1294	if (r)
1295		goto out;
1296
1297	pgpath->is_active = true;
1298
1299	nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1300	if (nr_valid_paths == 1) {
1301		m->current_pgpath = NULL;
1302		run_queue = 1;
1303	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1304		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1305			atomic_inc(&m->pg_init_in_progress);
1306	}
1307
1308	dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1309		       pgpath->path.dev->name, nr_valid_paths);
1310
1311	schedule_work(&m->trigger_event);
1312
1313out:
1314	spin_unlock_irqrestore(&m->lock, flags);
1315	if (run_queue) {
1316		dm_table_run_md_queue_async(m->ti->table);
1317		process_queued_io_list(m);
1318	}
1319
1320	return r;
1321}
1322
1323/*
1324 * Fail or reinstate all paths that match the provided struct dm_dev.
1325 */
1326static int action_dev(struct multipath *m, struct dm_dev *dev,
1327		      action_fn action)
1328{
1329	int r = -EINVAL;
1330	struct pgpath *pgpath;
1331	struct priority_group *pg;
1332
1333	list_for_each_entry(pg, &m->priority_groups, list) {
1334		list_for_each_entry(pgpath, &pg->pgpaths, list) {
1335			if (pgpath->path.dev == dev)
1336				r = action(pgpath);
1337		}
1338	}
1339
1340	return r;
1341}
1342
1343/*
1344 * Temporarily try to avoid having to use the specified PG
1345 */
1346static void bypass_pg(struct multipath *m, struct priority_group *pg,
1347		      bool bypassed)
1348{
1349	unsigned long flags;
1350
1351	spin_lock_irqsave(&m->lock, flags);
1352
1353	pg->bypassed = bypassed;
1354	m->current_pgpath = NULL;
1355	m->current_pg = NULL;
1356
1357	spin_unlock_irqrestore(&m->lock, flags);
1358
1359	schedule_work(&m->trigger_event);
1360}
1361
1362/*
1363 * Switch to using the specified PG from the next I/O that gets mapped
1364 */
1365static int switch_pg_num(struct multipath *m, const char *pgstr)
1366{
1367	struct priority_group *pg;
1368	unsigned pgnum;
1369	unsigned long flags;
1370	char dummy;
1371
1372	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1373	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1374		DMWARN("invalid PG number supplied to switch_pg_num");
1375		return -EINVAL;
1376	}
1377
1378	spin_lock_irqsave(&m->lock, flags);
1379	list_for_each_entry(pg, &m->priority_groups, list) {
1380		pg->bypassed = false;
1381		if (--pgnum)
1382			continue;
1383
1384		m->current_pgpath = NULL;
1385		m->current_pg = NULL;
1386		m->next_pg = pg;
1387	}
1388	spin_unlock_irqrestore(&m->lock, flags);
1389
1390	schedule_work(&m->trigger_event);
1391	return 0;
1392}
1393
1394/*
1395 * Set/clear bypassed status of a PG.
1396 * PGs are numbered upwards from 1 in the order they were declared.
1397 */
1398static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1399{
1400	struct priority_group *pg;
1401	unsigned pgnum;
1402	char dummy;
1403
1404	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1405	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1406		DMWARN("invalid PG number supplied to bypass_pg");
1407		return -EINVAL;
1408	}
1409
1410	list_for_each_entry(pg, &m->priority_groups, list) {
1411		if (!--pgnum)
1412			break;
1413	}
1414
1415	bypass_pg(m, pg, bypassed);
1416	return 0;
1417}
1418
1419/*
1420 * Should we retry pg_init immediately?
1421 */
1422static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1423{
1424	unsigned long flags;
1425	bool limit_reached = false;
1426
1427	spin_lock_irqsave(&m->lock, flags);
1428
1429	if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1430	    !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1431		set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1432	else
1433		limit_reached = true;
1434
1435	spin_unlock_irqrestore(&m->lock, flags);
1436
1437	return limit_reached;
1438}
1439
1440static void pg_init_done(void *data, int errors)
1441{
1442	struct pgpath *pgpath = data;
1443	struct priority_group *pg = pgpath->pg;
1444	struct multipath *m = pg->m;
1445	unsigned long flags;
1446	bool delay_retry = false;
1447
1448	/* device or driver problems */
1449	switch (errors) {
1450	case SCSI_DH_OK:
1451		break;
1452	case SCSI_DH_NOSYS:
1453		if (!m->hw_handler_name) {
1454			errors = 0;
1455			break;
1456		}
1457		DMERR("Could not failover the device: Handler scsi_dh_%s "
1458		      "Error %d.", m->hw_handler_name, errors);
1459		/*
1460		 * Fail path for now, so we do not ping pong
1461		 */
1462		fail_path(pgpath);
1463		break;
1464	case SCSI_DH_DEV_TEMP_BUSY:
1465		/*
1466		 * Probably doing something like FW upgrade on the
1467		 * controller so try the other pg.
1468		 */
1469		bypass_pg(m, pg, true);
1470		break;
1471	case SCSI_DH_RETRY:
1472		/* Wait before retrying. */
1473		delay_retry = 1;
1474		/* fall through */
1475	case SCSI_DH_IMM_RETRY:
1476	case SCSI_DH_RES_TEMP_UNAVAIL:
1477		if (pg_init_limit_reached(m, pgpath))
1478			fail_path(pgpath);
1479		errors = 0;
1480		break;
1481	case SCSI_DH_DEV_OFFLINED:
1482	default:
1483		/*
1484		 * We probably do not want to fail the path for a device
1485		 * error, but this is what the old dm did. In future
1486		 * patches we can do more advanced handling.
1487		 */
1488		fail_path(pgpath);
1489	}
1490
1491	spin_lock_irqsave(&m->lock, flags);
1492	if (errors) {
1493		if (pgpath == m->current_pgpath) {
1494			DMERR("Could not failover device. Error %d.", errors);
1495			m->current_pgpath = NULL;
1496			m->current_pg = NULL;
1497		}
1498	} else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1499		pg->bypassed = false;
1500
1501	if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1502		/* Activations of other paths are still on going */
1503		goto out;
1504
1505	if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1506		if (delay_retry)
1507			set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1508		else
1509			clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1510
1511		if (__pg_init_all_paths(m))
1512			goto out;
1513	}
1514	clear_bit(MPATHF_QUEUE_IO, &m->flags);
1515
1516	process_queued_io_list(m);
1517
1518	/*
1519	 * Wake up any thread waiting to suspend.
1520	 */
1521	wake_up(&m->pg_init_wait);
1522
1523out:
1524	spin_unlock_irqrestore(&m->lock, flags);
1525}
1526
1527static void activate_or_offline_path(struct pgpath *pgpath)
1528{
1529	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1530
1531	if (pgpath->is_active && !blk_queue_dying(q))
1532		scsi_dh_activate(q, pg_init_done, pgpath);
1533	else
1534		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1535}
1536
1537static void activate_path_work(struct work_struct *work)
1538{
1539	struct pgpath *pgpath =
1540		container_of(work, struct pgpath, activate_path.work);
1541
1542	activate_or_offline_path(pgpath);
 
1543}
1544
1545static int multipath_end_io(struct dm_target *ti, struct request *clone,
1546			    blk_status_t error, union map_info *map_context)
 
 
 
1547{
1548	struct dm_mpath_io *mpio = get_mpio(map_context);
1549	struct pgpath *pgpath = mpio->pgpath;
1550	int r = DM_ENDIO_DONE;
1551
1552	/*
1553	 * We don't queue any clone request inside the multipath target
1554	 * during end I/O handling, since those clone requests don't have
1555	 * bio clones.  If we queue them inside the multipath target,
1556	 * we need to make bio clones, that requires memory allocation.
1557	 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1558	 *  don't have bio clones.)
1559	 * Instead of queueing the clone request here, we queue the original
1560	 * request into dm core, which will remake a clone request and
1561	 * clone bios for it and resubmit it later.
1562	 */
1563	if (error && blk_path_error(error)) {
1564		struct multipath *m = ti->private;
 
 
 
1565
1566		if (error == BLK_STS_RESOURCE)
1567			r = DM_ENDIO_DELAY_REQUEUE;
1568		else
1569			r = DM_ENDIO_REQUEUE;
1570
1571		if (pgpath)
1572			fail_path(pgpath);
1573
1574		if (atomic_read(&m->nr_valid_paths) == 0 &&
1575		    !must_push_back_rq(m)) {
1576			if (error == BLK_STS_IOERR)
1577				dm_report_EIO(m);
1578			/* complete with the original error */
1579			r = DM_ENDIO_DONE;
 
 
1580		}
1581	}
1582
1583	if (pgpath) {
1584		struct path_selector *ps = &pgpath->pg->ps;
1585
1586		if (ps->type->end_io)
1587			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1588	}
1589
1590	return r;
1591}
1592
1593static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1594				blk_status_t *error)
1595{
1596	struct multipath *m = ti->private;
1597	struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1598	struct pgpath *pgpath = mpio->pgpath;
1599	unsigned long flags;
1600	int r = DM_ENDIO_DONE;
1601
1602	if (!*error || !blk_path_error(*error))
1603		goto done;
1604
1605	if (pgpath)
1606		fail_path(pgpath);
1607
1608	if (atomic_read(&m->nr_valid_paths) == 0 &&
1609	    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1610		if (must_push_back_bio(m)) {
1611			r = DM_ENDIO_REQUEUE;
1612		} else {
1613			dm_report_EIO(m);
1614			*error = BLK_STS_IOERR;
1615		}
1616		goto done;
1617	}
1618
1619	spin_lock_irqsave(&m->lock, flags);
1620	bio_list_add(&m->queued_bios, clone);
1621	spin_unlock_irqrestore(&m->lock, flags);
1622	if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1623		queue_work(kmultipathd, &m->process_queued_bios);
1624
1625	r = DM_ENDIO_INCOMPLETE;
1626done:
1627	if (pgpath) {
1628		struct path_selector *ps = &pgpath->pg->ps;
1629
1630		if (ps->type->end_io)
1631			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1632	}
 
1633
1634	return r;
1635}
1636
1637/*
1638 * Suspend can't complete until all the I/O is processed so if
1639 * the last path fails we must error any remaining I/O.
1640 * Note that if the freeze_bdev fails while suspending, the
1641 * queue_if_no_path state is lost - userspace should reset it.
1642 */
1643static void multipath_presuspend(struct dm_target *ti)
1644{
1645	struct multipath *m = ti->private;
1646
1647	queue_if_no_path(m, false, true);
1648}
1649
1650static void multipath_postsuspend(struct dm_target *ti)
1651{
1652	struct multipath *m = ti->private;
1653
1654	mutex_lock(&m->work_mutex);
1655	flush_multipath_work(m);
1656	mutex_unlock(&m->work_mutex);
1657}
1658
1659/*
1660 * Restore the queue_if_no_path setting.
1661 */
1662static void multipath_resume(struct dm_target *ti)
1663{
1664	struct multipath *m = ti->private;
1665	unsigned long flags;
1666
1667	spin_lock_irqsave(&m->lock, flags);
1668	assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
1669		   test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1670	spin_unlock_irqrestore(&m->lock, flags);
1671}
1672
1673/*
1674 * Info output has the following format:
1675 * num_multipath_feature_args [multipath_feature_args]*
1676 * num_handler_status_args [handler_status_args]*
1677 * num_groups init_group_number
1678 *            [A|D|E num_ps_status_args [ps_status_args]*
1679 *             num_paths num_selector_args
1680 *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1681 *
1682 * Table output has the following format (identical to the constructor string):
1683 * num_feature_args [features_args]*
1684 * num_handler_args hw_handler [hw_handler_args]*
1685 * num_groups init_group_number
1686 *     [priority selector-name num_ps_args [ps_args]*
1687 *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1688 */
1689static void multipath_status(struct dm_target *ti, status_type_t type,
1690			     unsigned status_flags, char *result, unsigned maxlen)
1691{
1692	int sz = 0;
1693	unsigned long flags;
1694	struct multipath *m = ti->private;
1695	struct priority_group *pg;
1696	struct pgpath *p;
1697	unsigned pg_num;
1698	char state;
1699
1700	spin_lock_irqsave(&m->lock, flags);
1701
1702	/* Features */
1703	if (type == STATUSTYPE_INFO)
1704		DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1705		       atomic_read(&m->pg_init_count));
1706	else {
1707		DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1708			      (m->pg_init_retries > 0) * 2 +
1709			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1710			      test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1711			      (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1712
1713		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1714			DMEMIT("queue_if_no_path ");
1715		if (m->pg_init_retries)
1716			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1717		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1718			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1719		if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1720			DMEMIT("retain_attached_hw_handler ");
1721		if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1722			switch(m->queue_mode) {
1723			case DM_TYPE_BIO_BASED:
1724				DMEMIT("queue_mode bio ");
1725				break;
1726			case DM_TYPE_MQ_REQUEST_BASED:
1727				DMEMIT("queue_mode mq ");
1728				break;
1729			default:
1730				WARN_ON_ONCE(true);
1731				break;
1732			}
1733		}
1734	}
1735
1736	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1737		DMEMIT("0 ");
1738	else
1739		DMEMIT("1 %s ", m->hw_handler_name);
1740
1741	DMEMIT("%u ", m->nr_priority_groups);
1742
1743	if (m->next_pg)
1744		pg_num = m->next_pg->pg_num;
1745	else if (m->current_pg)
1746		pg_num = m->current_pg->pg_num;
1747	else
1748		pg_num = (m->nr_priority_groups ? 1 : 0);
1749
1750	DMEMIT("%u ", pg_num);
1751
1752	switch (type) {
1753	case STATUSTYPE_INFO:
1754		list_for_each_entry(pg, &m->priority_groups, list) {
1755			if (pg->bypassed)
1756				state = 'D';	/* Disabled */
1757			else if (pg == m->current_pg)
1758				state = 'A';	/* Currently Active */
1759			else
1760				state = 'E';	/* Enabled */
1761
1762			DMEMIT("%c ", state);
1763
1764			if (pg->ps.type->status)
1765				sz += pg->ps.type->status(&pg->ps, NULL, type,
1766							  result + sz,
1767							  maxlen - sz);
1768			else
1769				DMEMIT("0 ");
1770
1771			DMEMIT("%u %u ", pg->nr_pgpaths,
1772			       pg->ps.type->info_args);
1773
1774			list_for_each_entry(p, &pg->pgpaths, list) {
1775				DMEMIT("%s %s %u ", p->path.dev->name,
1776				       p->is_active ? "A" : "F",
1777				       p->fail_count);
1778				if (pg->ps.type->status)
1779					sz += pg->ps.type->status(&pg->ps,
1780					      &p->path, type, result + sz,
1781					      maxlen - sz);
1782			}
1783		}
1784		break;
1785
1786	case STATUSTYPE_TABLE:
1787		list_for_each_entry(pg, &m->priority_groups, list) {
1788			DMEMIT("%s ", pg->ps.type->name);
1789
1790			if (pg->ps.type->status)
1791				sz += pg->ps.type->status(&pg->ps, NULL, type,
1792							  result + sz,
1793							  maxlen - sz);
1794			else
1795				DMEMIT("0 ");
1796
1797			DMEMIT("%u %u ", pg->nr_pgpaths,
1798			       pg->ps.type->table_args);
1799
1800			list_for_each_entry(p, &pg->pgpaths, list) {
1801				DMEMIT("%s ", p->path.dev->name);
1802				if (pg->ps.type->status)
1803					sz += pg->ps.type->status(&pg->ps,
1804					      &p->path, type, result + sz,
1805					      maxlen - sz);
1806			}
1807		}
1808		break;
1809	}
1810
1811	spin_unlock_irqrestore(&m->lock, flags);
 
 
1812}
1813
1814static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
1815			     char *result, unsigned maxlen)
1816{
1817	int r = -EINVAL;
1818	struct dm_dev *dev;
1819	struct multipath *m = ti->private;
1820	action_fn action;
1821
1822	mutex_lock(&m->work_mutex);
1823
1824	if (dm_suspended(ti)) {
1825		r = -EBUSY;
1826		goto out;
1827	}
1828
1829	if (argc == 1) {
1830		if (!strcasecmp(argv[0], "queue_if_no_path")) {
1831			r = queue_if_no_path(m, true, false);
1832			goto out;
1833		} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1834			r = queue_if_no_path(m, false, false);
1835			goto out;
1836		}
1837	}
1838
1839	if (argc != 2) {
1840		DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1841		goto out;
1842	}
1843
1844	if (!strcasecmp(argv[0], "disable_group")) {
1845		r = bypass_pg_num(m, argv[1], true);
1846		goto out;
1847	} else if (!strcasecmp(argv[0], "enable_group")) {
1848		r = bypass_pg_num(m, argv[1], false);
1849		goto out;
1850	} else if (!strcasecmp(argv[0], "switch_group")) {
1851		r = switch_pg_num(m, argv[1]);
1852		goto out;
1853	} else if (!strcasecmp(argv[0], "reinstate_path"))
1854		action = reinstate_path;
1855	else if (!strcasecmp(argv[0], "fail_path"))
1856		action = fail_path;
1857	else {
1858		DMWARN("Unrecognised multipath message received: %s", argv[0]);
1859		goto out;
1860	}
1861
1862	r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1863	if (r) {
1864		DMWARN("message: error getting device %s",
1865		       argv[1]);
1866		goto out;
1867	}
1868
1869	r = action_dev(m, dev, action);
1870
1871	dm_put_device(ti, dev);
1872
1873out:
1874	mutex_unlock(&m->work_mutex);
1875	return r;
1876}
1877
1878static int multipath_prepare_ioctl(struct dm_target *ti,
1879				   struct block_device **bdev)
1880{
1881	struct multipath *m = ti->private;
1882	struct pgpath *current_pgpath;
 
 
 
1883	int r;
1884
1885	current_pgpath = READ_ONCE(m->current_pgpath);
1886	if (!current_pgpath)
1887		current_pgpath = choose_pgpath(m, 0);
1888
1889	if (current_pgpath) {
1890		if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1891			*bdev = current_pgpath->path.dev->bdev;
1892			r = 0;
1893		} else {
1894			/* pg_init has not started or completed */
1895			r = -ENOTCONN;
1896		}
1897	} else {
1898		/* No path is available */
1899		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1900			r = -ENOTCONN;
1901		else
1902			r = -EIO;
1903	}
1904
1905	if (r == -ENOTCONN) {
1906		if (!READ_ONCE(m->current_pg)) {
1907			/* Path status changed, redo selection */
1908			(void) choose_pgpath(m, 0);
1909		}
1910		if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1911			pg_init_all_paths(m);
1912		dm_table_run_md_queue_async(m->ti->table);
1913		process_queued_io_list(m);
1914	}
1915
 
 
 
 
 
 
 
1916	/*
1917	 * Only pass ioctls through if the device sizes match exactly.
1918	 */
1919	if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1920		return 1;
1921	return r;
 
 
 
 
 
 
 
1922}
1923
1924static int multipath_iterate_devices(struct dm_target *ti,
1925				     iterate_devices_callout_fn fn, void *data)
1926{
1927	struct multipath *m = ti->private;
1928	struct priority_group *pg;
1929	struct pgpath *p;
1930	int ret = 0;
1931
1932	list_for_each_entry(pg, &m->priority_groups, list) {
1933		list_for_each_entry(p, &pg->pgpaths, list) {
1934			ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1935			if (ret)
1936				goto out;
1937		}
1938	}
1939
1940out:
1941	return ret;
1942}
1943
1944static int pgpath_busy(struct pgpath *pgpath)
1945{
1946	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1947
1948	return blk_lld_busy(q);
1949}
1950
1951/*
1952 * We return "busy", only when we can map I/Os but underlying devices
1953 * are busy (so even if we map I/Os now, the I/Os will wait on
1954 * the underlying queue).
1955 * In other words, if we want to kill I/Os or queue them inside us
1956 * due to map unavailability, we don't return "busy".  Otherwise,
1957 * dm core won't give us the I/Os and we can't do what we want.
1958 */
1959static int multipath_busy(struct dm_target *ti)
1960{
1961	bool busy = false, has_active = false;
1962	struct multipath *m = ti->private;
1963	struct priority_group *pg, *next_pg;
1964	struct pgpath *pgpath;
 
1965
1966	/* pg_init in progress */
1967	if (atomic_read(&m->pg_init_in_progress))
1968		return true;
1969
1970	/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
1971	if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1972		return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
1973
1974	/* Guess which priority_group will be used at next mapping time */
1975	pg = READ_ONCE(m->current_pg);
1976	next_pg = READ_ONCE(m->next_pg);
1977	if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
1978		pg = next_pg;
1979
1980	if (!pg) {
1981		/*
1982		 * We don't know which pg will be used at next mapping time.
1983		 * We don't call choose_pgpath() here to avoid to trigger
1984		 * pg_init just by busy checking.
1985		 * So we don't know whether underlying devices we will be using
1986		 * at next mapping time are busy or not. Just try mapping.
1987		 */
1988		return busy;
1989	}
1990
1991	/*
1992	 * If there is one non-busy active path at least, the path selector
1993	 * will be able to select it. So we consider such a pg as not busy.
1994	 */
1995	busy = true;
1996	list_for_each_entry(pgpath, &pg->pgpaths, list) {
1997		if (pgpath->is_active) {
1998			has_active = true;
1999			if (!pgpath_busy(pgpath)) {
2000				busy = false;
 
2001				break;
2002			}
2003		}
2004	}
2005
2006	if (!has_active) {
2007		/*
2008		 * No active path in this pg, so this pg won't be used and
2009		 * the current_pg will be changed at next mapping time.
2010		 * We need to try mapping to determine it.
2011		 */
2012		busy = false;
2013	}
 
 
2014
2015	return busy;
2016}
2017
2018/*-----------------------------------------------------------------
2019 * Module setup
2020 *---------------------------------------------------------------*/
2021static struct target_type multipath_target = {
2022	.name = "multipath",
2023	.version = {1, 13, 0},
2024	.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2025		    DM_TARGET_PASSES_INTEGRITY,
2026	.module = THIS_MODULE,
2027	.ctr = multipath_ctr,
2028	.dtr = multipath_dtr,
2029	.clone_and_map_rq = multipath_clone_and_map,
2030	.release_clone_rq = multipath_release_clone,
2031	.rq_end_io = multipath_end_io,
2032	.map = multipath_map_bio,
2033	.end_io = multipath_end_io_bio,
2034	.presuspend = multipath_presuspend,
2035	.postsuspend = multipath_postsuspend,
2036	.resume = multipath_resume,
2037	.status = multipath_status,
2038	.message = multipath_message,
2039	.prepare_ioctl = multipath_prepare_ioctl,
2040	.iterate_devices = multipath_iterate_devices,
2041	.busy = multipath_busy,
2042};
2043
2044static int __init dm_multipath_init(void)
2045{
2046	int r;
2047
 
 
 
 
 
 
 
 
 
 
 
 
2048	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2049	if (!kmultipathd) {
2050		DMERR("failed to create workqueue kmpathd");
2051		r = -ENOMEM;
2052		goto bad_alloc_kmultipathd;
 
2053	}
2054
2055	/*
2056	 * A separate workqueue is used to handle the device handlers
2057	 * to avoid overloading existing workqueue. Overloading the
2058	 * old workqueue would also create a bottleneck in the
2059	 * path of the storage hardware device activation.
2060	 */
2061	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2062						  WQ_MEM_RECLAIM);
2063	if (!kmpath_handlerd) {
2064		DMERR("failed to create workqueue kmpath_handlerd");
2065		r = -ENOMEM;
2066		goto bad_alloc_kmpath_handlerd;
2067	}
2068
2069	r = dm_register_target(&multipath_target);
2070	if (r < 0) {
2071		DMERR("request-based register failed %d", r);
2072		r = -EINVAL;
2073		goto bad_register_target;
2074	}
2075
2076	return 0;
 
 
2077
2078bad_register_target:
2079	destroy_workqueue(kmpath_handlerd);
2080bad_alloc_kmpath_handlerd:
2081	destroy_workqueue(kmultipathd);
2082bad_alloc_kmultipathd:
2083	return r;
2084}
2085
2086static void __exit dm_multipath_exit(void)
2087{
2088	destroy_workqueue(kmpath_handlerd);
2089	destroy_workqueue(kmultipathd);
2090
2091	dm_unregister_target(&multipath_target);
 
2092}
2093
2094module_init(dm_multipath_init);
2095module_exit(dm_multipath_exit);
2096
2097MODULE_DESCRIPTION(DM_NAME " multipath target");
2098MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2099MODULE_LICENSE("GPL");