Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-bio-record.h"
   9
  10#include <linux/init.h>
  11#include <linux/mempool.h>
  12#include <linux/module.h>
  13#include <linux/pagemap.h>
  14#include <linux/slab.h>
  15#include <linux/workqueue.h>
  16#include <linux/device-mapper.h>
  17#include <linux/dm-io.h>
  18#include <linux/dm-dirty-log.h>
  19#include <linux/dm-kcopyd.h>
  20#include <linux/dm-region-hash.h>
  21
  22#define DM_MSG_PREFIX "raid1"
  23
  24#define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */
  25
  26#define MAX_NR_MIRRORS	(DM_KCOPYD_MAX_REGIONS + 1)
  27
  28#define DM_RAID1_HANDLE_ERRORS	0x01
  29#define DM_RAID1_KEEP_LOG	0x02
  30#define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
  31#define keep_log(p)		((p)->features & DM_RAID1_KEEP_LOG)
  32
  33static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
  34
  35/*-----------------------------------------------------------------
  36 * Mirror set structures.
  37 *---------------------------------------------------------------*/
  38enum dm_raid1_error {
  39	DM_RAID1_WRITE_ERROR,
  40	DM_RAID1_FLUSH_ERROR,
  41	DM_RAID1_SYNC_ERROR,
  42	DM_RAID1_READ_ERROR
  43};
  44
  45struct mirror {
  46	struct mirror_set *ms;
  47	atomic_t error_count;
  48	unsigned long error_type;
  49	struct dm_dev *dev;
  50	sector_t offset;
  51};
  52
  53struct mirror_set {
  54	struct dm_target *ti;
  55	struct list_head list;
  56
  57	uint64_t features;
  58
  59	spinlock_t lock;	/* protects the lists */
  60	struct bio_list reads;
  61	struct bio_list writes;
  62	struct bio_list failures;
  63	struct bio_list holds;	/* bios are waiting until suspend */
  64
  65	struct dm_region_hash *rh;
  66	struct dm_kcopyd_client *kcopyd_client;
  67	struct dm_io_client *io_client;
  68
  69	/* recovery */
  70	region_t nr_regions;
  71	int in_sync;
  72	int log_failure;
  73	int leg_failure;
  74	atomic_t suspend;
  75
  76	atomic_t default_mirror;	/* Default mirror */
  77
  78	struct workqueue_struct *kmirrord_wq;
  79	struct work_struct kmirrord_work;
  80	struct timer_list timer;
  81	unsigned long timer_pending;
  82
  83	struct work_struct trigger_event;
  84
  85	unsigned nr_mirrors;
  86	struct mirror mirror[];
  87};
  88
  89DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
  90		"A percentage of time allocated for raid resynchronization");
  91
  92static void wakeup_mirrord(void *context)
  93{
  94	struct mirror_set *ms = context;
  95
  96	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
  97}
  98
  99static void delayed_wake_fn(struct timer_list *t)
 100{
 101	struct mirror_set *ms = from_timer(ms, t, timer);
 102
 103	clear_bit(0, &ms->timer_pending);
 104	wakeup_mirrord(ms);
 105}
 106
 107static void delayed_wake(struct mirror_set *ms)
 108{
 109	if (test_and_set_bit(0, &ms->timer_pending))
 110		return;
 111
 112	ms->timer.expires = jiffies + HZ / 5;
 113	add_timer(&ms->timer);
 114}
 115
 116static void wakeup_all_recovery_waiters(void *context)
 117{
 118	wake_up_all(&_kmirrord_recovery_stopped);
 119}
 120
 121static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
 122{
 123	unsigned long flags;
 124	int should_wake = 0;
 125	struct bio_list *bl;
 126
 127	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
 128	spin_lock_irqsave(&ms->lock, flags);
 129	should_wake = !(bl->head);
 130	bio_list_add(bl, bio);
 131	spin_unlock_irqrestore(&ms->lock, flags);
 132
 133	if (should_wake)
 134		wakeup_mirrord(ms);
 135}
 136
 137static void dispatch_bios(void *context, struct bio_list *bio_list)
 138{
 139	struct mirror_set *ms = context;
 140	struct bio *bio;
 141
 142	while ((bio = bio_list_pop(bio_list)))
 143		queue_bio(ms, bio, WRITE);
 144}
 145
 146struct dm_raid1_bio_record {
 147	struct mirror *m;
 148	/* if details->bi_bdev == NULL, details were not saved */
 149	struct dm_bio_details details;
 150	region_t write_region;
 151};
 152
 153/*
 154 * Every mirror should look like this one.
 155 */
 156#define DEFAULT_MIRROR 0
 157
 158/*
 159 * This is yucky.  We squirrel the mirror struct away inside
 160 * bi_next for read/write buffers.  This is safe since the bh
 161 * doesn't get submitted to the lower levels of block layer.
 162 */
 163static struct mirror *bio_get_m(struct bio *bio)
 164{
 165	return (struct mirror *) bio->bi_next;
 166}
 167
 168static void bio_set_m(struct bio *bio, struct mirror *m)
 169{
 170	bio->bi_next = (struct bio *) m;
 171}
 172
 173static struct mirror *get_default_mirror(struct mirror_set *ms)
 174{
 175	return &ms->mirror[atomic_read(&ms->default_mirror)];
 176}
 177
 178static void set_default_mirror(struct mirror *m)
 179{
 180	struct mirror_set *ms = m->ms;
 181	struct mirror *m0 = &(ms->mirror[0]);
 182
 183	atomic_set(&ms->default_mirror, m - m0);
 184}
 185
 186static struct mirror *get_valid_mirror(struct mirror_set *ms)
 187{
 188	struct mirror *m;
 189
 190	for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
 191		if (!atomic_read(&m->error_count))
 192			return m;
 193
 194	return NULL;
 195}
 196
 197/* fail_mirror
 198 * @m: mirror device to fail
 199 * @error_type: one of the enum's, DM_RAID1_*_ERROR
 200 *
 201 * If errors are being handled, record the type of
 202 * error encountered for this device.  If this type
 203 * of error has already been recorded, we can return;
 204 * otherwise, we must signal userspace by triggering
 205 * an event.  Additionally, if the device is the
 206 * primary device, we must choose a new primary, but
 207 * only if the mirror is in-sync.
 208 *
 209 * This function must not block.
 210 */
 211static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
 212{
 213	struct mirror_set *ms = m->ms;
 214	struct mirror *new;
 215
 216	ms->leg_failure = 1;
 217
 218	/*
 219	 * error_count is used for nothing more than a
 220	 * simple way to tell if a device has encountered
 221	 * errors.
 222	 */
 223	atomic_inc(&m->error_count);
 224
 225	if (test_and_set_bit(error_type, &m->error_type))
 226		return;
 227
 228	if (!errors_handled(ms))
 229		return;
 230
 231	if (m != get_default_mirror(ms))
 232		goto out;
 233
 234	if (!ms->in_sync && !keep_log(ms)) {
 235		/*
 236		 * Better to issue requests to same failing device
 237		 * than to risk returning corrupt data.
 238		 */
 239		DMERR("Primary mirror (%s) failed while out-of-sync: "
 240		      "Reads may fail.", m->dev->name);
 241		goto out;
 242	}
 243
 244	new = get_valid_mirror(ms);
 245	if (new)
 246		set_default_mirror(new);
 247	else
 248		DMWARN("All sides of mirror have failed.");
 249
 250out:
 251	schedule_work(&ms->trigger_event);
 252}
 253
 254static int mirror_flush(struct dm_target *ti)
 255{
 256	struct mirror_set *ms = ti->private;
 257	unsigned long error_bits;
 258
 259	unsigned int i;
 260	struct dm_io_region io[MAX_NR_MIRRORS];
 261	struct mirror *m;
 262	struct dm_io_request io_req = {
 263		.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
 
 264		.mem.type = DM_IO_KMEM,
 265		.mem.ptr.addr = NULL,
 266		.client = ms->io_client,
 267	};
 268
 269	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
 270		io[i].bdev = m->dev->bdev;
 271		io[i].sector = 0;
 272		io[i].count = 0;
 273	}
 274
 275	error_bits = -1;
 276	dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
 277	if (unlikely(error_bits != 0)) {
 278		for (i = 0; i < ms->nr_mirrors; i++)
 279			if (test_bit(i, &error_bits))
 280				fail_mirror(ms->mirror + i,
 281					    DM_RAID1_FLUSH_ERROR);
 282		return -EIO;
 283	}
 284
 285	return 0;
 286}
 287
 288/*-----------------------------------------------------------------
 289 * Recovery.
 290 *
 291 * When a mirror is first activated we may find that some regions
 292 * are in the no-sync state.  We have to recover these by
 293 * recopying from the default mirror to all the others.
 294 *---------------------------------------------------------------*/
 295static void recovery_complete(int read_err, unsigned long write_err,
 296			      void *context)
 297{
 298	struct dm_region *reg = context;
 299	struct mirror_set *ms = dm_rh_region_context(reg);
 300	int m, bit = 0;
 301
 302	if (read_err) {
 303		/* Read error means the failure of default mirror. */
 304		DMERR_LIMIT("Unable to read primary mirror during recovery");
 305		fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
 306	}
 307
 308	if (write_err) {
 309		DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
 310			    write_err);
 311		/*
 312		 * Bits correspond to devices (excluding default mirror).
 313		 * The default mirror cannot change during recovery.
 314		 */
 315		for (m = 0; m < ms->nr_mirrors; m++) {
 316			if (&ms->mirror[m] == get_default_mirror(ms))
 317				continue;
 318			if (test_bit(bit, &write_err))
 319				fail_mirror(ms->mirror + m,
 320					    DM_RAID1_SYNC_ERROR);
 321			bit++;
 322		}
 323	}
 324
 325	dm_rh_recovery_end(reg, !(read_err || write_err));
 326}
 327
 328static void recover(struct mirror_set *ms, struct dm_region *reg)
 329{
 330	unsigned i;
 331	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
 332	struct mirror *m;
 333	unsigned long flags = 0;
 334	region_t key = dm_rh_get_region_key(reg);
 335	sector_t region_size = dm_rh_get_region_size(ms->rh);
 336
 337	/* fill in the source */
 338	m = get_default_mirror(ms);
 339	from.bdev = m->dev->bdev;
 340	from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
 341	if (key == (ms->nr_regions - 1)) {
 342		/*
 343		 * The final region may be smaller than
 344		 * region_size.
 345		 */
 346		from.count = ms->ti->len & (region_size - 1);
 347		if (!from.count)
 348			from.count = region_size;
 349	} else
 350		from.count = region_size;
 351
 352	/* fill in the destinations */
 353	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
 354		if (&ms->mirror[i] == get_default_mirror(ms))
 355			continue;
 356
 357		m = ms->mirror + i;
 358		dest->bdev = m->dev->bdev;
 359		dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
 360		dest->count = from.count;
 361		dest++;
 362	}
 363
 364	/* hand to kcopyd */
 365	if (!errors_handled(ms))
 366		flags |= BIT(DM_KCOPYD_IGNORE_ERROR);
 367
 368	dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
 369		       flags, recovery_complete, reg);
 370}
 371
 372static void reset_ms_flags(struct mirror_set *ms)
 373{
 374	unsigned int m;
 375
 376	ms->leg_failure = 0;
 377	for (m = 0; m < ms->nr_mirrors; m++) {
 378		atomic_set(&(ms->mirror[m].error_count), 0);
 379		ms->mirror[m].error_type = 0;
 380	}
 381}
 382
 383static void do_recovery(struct mirror_set *ms)
 384{
 385	struct dm_region *reg;
 386	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 387
 388	/*
 389	 * Start quiescing some regions.
 390	 */
 391	dm_rh_recovery_prepare(ms->rh);
 392
 393	/*
 394	 * Copy any already quiesced regions.
 395	 */
 396	while ((reg = dm_rh_recovery_start(ms->rh)))
 397		recover(ms, reg);
 398
 399	/*
 400	 * Update the in sync flag.
 401	 */
 402	if (!ms->in_sync &&
 403	    (log->type->get_sync_count(log) == ms->nr_regions)) {
 404		/* the sync is complete */
 405		dm_table_event(ms->ti->table);
 406		ms->in_sync = 1;
 407		reset_ms_flags(ms);
 408	}
 409}
 410
 411/*-----------------------------------------------------------------
 412 * Reads
 413 *---------------------------------------------------------------*/
 414static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
 415{
 416	struct mirror *m = get_default_mirror(ms);
 417
 418	do {
 419		if (likely(!atomic_read(&m->error_count)))
 420			return m;
 421
 422		if (m-- == ms->mirror)
 423			m += ms->nr_mirrors;
 424	} while (m != get_default_mirror(ms));
 425
 426	return NULL;
 427}
 428
 429static int default_ok(struct mirror *m)
 430{
 431	struct mirror *default_mirror = get_default_mirror(m->ms);
 432
 433	return !atomic_read(&default_mirror->error_count);
 434}
 435
 436static int mirror_available(struct mirror_set *ms, struct bio *bio)
 437{
 438	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 439	region_t region = dm_rh_bio_to_region(ms->rh, bio);
 440
 441	if (log->type->in_sync(log, region, 0))
 442		return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
 443
 444	return 0;
 445}
 446
 447/*
 448 * remap a buffer to a particular mirror.
 449 */
 450static sector_t map_sector(struct mirror *m, struct bio *bio)
 451{
 452	if (unlikely(!bio->bi_iter.bi_size))
 453		return 0;
 454	return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
 455}
 456
 457static void map_bio(struct mirror *m, struct bio *bio)
 458{
 459	bio_set_dev(bio, m->dev->bdev);
 460	bio->bi_iter.bi_sector = map_sector(m, bio);
 461}
 462
 463static void map_region(struct dm_io_region *io, struct mirror *m,
 464		       struct bio *bio)
 465{
 466	io->bdev = m->dev->bdev;
 467	io->sector = map_sector(m, bio);
 468	io->count = bio_sectors(bio);
 469}
 470
 471static void hold_bio(struct mirror_set *ms, struct bio *bio)
 472{
 473	/*
 474	 * Lock is required to avoid race condition during suspend
 475	 * process.
 476	 */
 477	spin_lock_irq(&ms->lock);
 478
 479	if (atomic_read(&ms->suspend)) {
 480		spin_unlock_irq(&ms->lock);
 481
 482		/*
 483		 * If device is suspended, complete the bio.
 484		 */
 485		if (dm_noflush_suspending(ms->ti))
 486			bio->bi_status = BLK_STS_DM_REQUEUE;
 487		else
 488			bio->bi_status = BLK_STS_IOERR;
 489
 490		bio_endio(bio);
 491		return;
 492	}
 493
 494	/*
 495	 * Hold bio until the suspend is complete.
 496	 */
 497	bio_list_add(&ms->holds, bio);
 498	spin_unlock_irq(&ms->lock);
 499}
 500
 501/*-----------------------------------------------------------------
 502 * Reads
 503 *---------------------------------------------------------------*/
 504static void read_callback(unsigned long error, void *context)
 505{
 506	struct bio *bio = context;
 507	struct mirror *m;
 508
 509	m = bio_get_m(bio);
 510	bio_set_m(bio, NULL);
 511
 512	if (likely(!error)) {
 513		bio_endio(bio);
 514		return;
 515	}
 516
 517	fail_mirror(m, DM_RAID1_READ_ERROR);
 518
 519	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
 520		DMWARN_LIMIT("Read failure on mirror device %s.  "
 521			     "Trying alternative device.",
 522			     m->dev->name);
 523		queue_bio(m->ms, bio, bio_data_dir(bio));
 524		return;
 525	}
 526
 527	DMERR_LIMIT("Read failure on mirror device %s.  Failing I/O.",
 528		    m->dev->name);
 529	bio_io_error(bio);
 530}
 531
 532/* Asynchronous read. */
 533static void read_async_bio(struct mirror *m, struct bio *bio)
 534{
 535	struct dm_io_region io;
 536	struct dm_io_request io_req = {
 537		.bi_opf = REQ_OP_READ,
 
 538		.mem.type = DM_IO_BIO,
 539		.mem.ptr.bio = bio,
 540		.notify.fn = read_callback,
 541		.notify.context = bio,
 542		.client = m->ms->io_client,
 543	};
 544
 545	map_region(&io, m, bio);
 546	bio_set_m(bio, m);
 547	BUG_ON(dm_io(&io_req, 1, &io, NULL));
 548}
 549
 550static inline int region_in_sync(struct mirror_set *ms, region_t region,
 551				 int may_block)
 552{
 553	int state = dm_rh_get_state(ms->rh, region, may_block);
 554	return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
 555}
 556
 557static void do_reads(struct mirror_set *ms, struct bio_list *reads)
 558{
 559	region_t region;
 560	struct bio *bio;
 561	struct mirror *m;
 562
 563	while ((bio = bio_list_pop(reads))) {
 564		region = dm_rh_bio_to_region(ms->rh, bio);
 565		m = get_default_mirror(ms);
 566
 567		/*
 568		 * We can only read balance if the region is in sync.
 569		 */
 570		if (likely(region_in_sync(ms, region, 1)))
 571			m = choose_mirror(ms, bio->bi_iter.bi_sector);
 572		else if (m && atomic_read(&m->error_count))
 573			m = NULL;
 574
 575		if (likely(m))
 576			read_async_bio(m, bio);
 577		else
 578			bio_io_error(bio);
 579	}
 580}
 581
 582/*-----------------------------------------------------------------
 583 * Writes.
 584 *
 585 * We do different things with the write io depending on the
 586 * state of the region that it's in:
 587 *
 588 * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
 589 * RECOVERING:	delay the io until recovery completes
 590 * NOSYNC:	increment pending, just write to the default mirror
 591 *---------------------------------------------------------------*/
 592
 593
 594static void write_callback(unsigned long error, void *context)
 595{
 596	unsigned i;
 597	struct bio *bio = (struct bio *) context;
 598	struct mirror_set *ms;
 599	int should_wake = 0;
 600	unsigned long flags;
 601
 602	ms = bio_get_m(bio)->ms;
 603	bio_set_m(bio, NULL);
 604
 605	/*
 606	 * NOTE: We don't decrement the pending count here,
 607	 * instead it is done by the targets endio function.
 608	 * This way we handle both writes to SYNC and NOSYNC
 609	 * regions with the same code.
 610	 */
 611	if (likely(!error)) {
 612		bio_endio(bio);
 613		return;
 614	}
 615
 616	/*
 617	 * If the bio is discard, return an error, but do not
 618	 * degrade the array.
 619	 */
 620	if (bio_op(bio) == REQ_OP_DISCARD) {
 621		bio->bi_status = BLK_STS_NOTSUPP;
 622		bio_endio(bio);
 623		return;
 624	}
 625
 626	for (i = 0; i < ms->nr_mirrors; i++)
 627		if (test_bit(i, &error))
 628			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
 629
 630	/*
 631	 * Need to raise event.  Since raising
 632	 * events can block, we need to do it in
 633	 * the main thread.
 634	 */
 635	spin_lock_irqsave(&ms->lock, flags);
 636	if (!ms->failures.head)
 637		should_wake = 1;
 638	bio_list_add(&ms->failures, bio);
 639	spin_unlock_irqrestore(&ms->lock, flags);
 640	if (should_wake)
 641		wakeup_mirrord(ms);
 642}
 643
 644static void do_write(struct mirror_set *ms, struct bio *bio)
 645{
 646	unsigned int i;
 647	struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
 648	struct mirror *m;
 649	blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
 650	struct dm_io_request io_req = {
 651		.bi_opf = REQ_OP_WRITE | op_flags,
 
 652		.mem.type = DM_IO_BIO,
 653		.mem.ptr.bio = bio,
 654		.notify.fn = write_callback,
 655		.notify.context = bio,
 656		.client = ms->io_client,
 657	};
 658
 659	if (bio_op(bio) == REQ_OP_DISCARD) {
 660		io_req.bi_opf = REQ_OP_DISCARD | op_flags;
 661		io_req.mem.type = DM_IO_KMEM;
 662		io_req.mem.ptr.addr = NULL;
 663	}
 664
 665	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
 666		map_region(dest++, m, bio);
 667
 668	/*
 669	 * Use default mirror because we only need it to retrieve the reference
 670	 * to the mirror set in write_callback().
 671	 */
 672	bio_set_m(bio, get_default_mirror(ms));
 673
 674	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
 675}
 676
 677static void do_writes(struct mirror_set *ms, struct bio_list *writes)
 678{
 679	int state;
 680	struct bio *bio;
 681	struct bio_list sync, nosync, recover, *this_list = NULL;
 682	struct bio_list requeue;
 683	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 684	region_t region;
 685
 686	if (!writes->head)
 687		return;
 688
 689	/*
 690	 * Classify each write.
 691	 */
 692	bio_list_init(&sync);
 693	bio_list_init(&nosync);
 694	bio_list_init(&recover);
 695	bio_list_init(&requeue);
 696
 697	while ((bio = bio_list_pop(writes))) {
 698		if ((bio->bi_opf & REQ_PREFLUSH) ||
 699		    (bio_op(bio) == REQ_OP_DISCARD)) {
 700			bio_list_add(&sync, bio);
 701			continue;
 702		}
 703
 704		region = dm_rh_bio_to_region(ms->rh, bio);
 705
 706		if (log->type->is_remote_recovering &&
 707		    log->type->is_remote_recovering(log, region)) {
 708			bio_list_add(&requeue, bio);
 709			continue;
 710		}
 711
 712		state = dm_rh_get_state(ms->rh, region, 1);
 713		switch (state) {
 714		case DM_RH_CLEAN:
 715		case DM_RH_DIRTY:
 716			this_list = &sync;
 717			break;
 718
 719		case DM_RH_NOSYNC:
 720			this_list = &nosync;
 721			break;
 722
 723		case DM_RH_RECOVERING:
 724			this_list = &recover;
 725			break;
 726		}
 727
 728		bio_list_add(this_list, bio);
 729	}
 730
 731	/*
 732	 * Add bios that are delayed due to remote recovery
 733	 * back on to the write queue
 734	 */
 735	if (unlikely(requeue.head)) {
 736		spin_lock_irq(&ms->lock);
 737		bio_list_merge(&ms->writes, &requeue);
 738		spin_unlock_irq(&ms->lock);
 739		delayed_wake(ms);
 740	}
 741
 742	/*
 743	 * Increment the pending counts for any regions that will
 744	 * be written to (writes to recover regions are going to
 745	 * be delayed).
 746	 */
 747	dm_rh_inc_pending(ms->rh, &sync);
 748	dm_rh_inc_pending(ms->rh, &nosync);
 749
 750	/*
 751	 * If the flush fails on a previous call and succeeds here,
 752	 * we must not reset the log_failure variable.  We need
 753	 * userspace interaction to do that.
 754	 */
 755	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
 756
 757	/*
 758	 * Dispatch io.
 759	 */
 760	if (unlikely(ms->log_failure) && errors_handled(ms)) {
 761		spin_lock_irq(&ms->lock);
 762		bio_list_merge(&ms->failures, &sync);
 763		spin_unlock_irq(&ms->lock);
 764		wakeup_mirrord(ms);
 765	} else
 766		while ((bio = bio_list_pop(&sync)))
 767			do_write(ms, bio);
 768
 769	while ((bio = bio_list_pop(&recover)))
 770		dm_rh_delay(ms->rh, bio);
 771
 772	while ((bio = bio_list_pop(&nosync))) {
 773		if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
 774			spin_lock_irq(&ms->lock);
 775			bio_list_add(&ms->failures, bio);
 776			spin_unlock_irq(&ms->lock);
 777			wakeup_mirrord(ms);
 778		} else {
 779			map_bio(get_default_mirror(ms), bio);
 780			submit_bio_noacct(bio);
 781		}
 782	}
 783}
 784
 785static void do_failures(struct mirror_set *ms, struct bio_list *failures)
 786{
 787	struct bio *bio;
 788
 789	if (likely(!failures->head))
 790		return;
 791
 792	/*
 793	 * If the log has failed, unattempted writes are being
 794	 * put on the holds list.  We can't issue those writes
 795	 * until a log has been marked, so we must store them.
 796	 *
 797	 * If a 'noflush' suspend is in progress, we can requeue
 798	 * the I/O's to the core.  This give userspace a chance
 799	 * to reconfigure the mirror, at which point the core
 800	 * will reissue the writes.  If the 'noflush' flag is
 801	 * not set, we have no choice but to return errors.
 802	 *
 803	 * Some writes on the failures list may have been
 804	 * submitted before the log failure and represent a
 805	 * failure to write to one of the devices.  It is ok
 806	 * for us to treat them the same and requeue them
 807	 * as well.
 808	 */
 809	while ((bio = bio_list_pop(failures))) {
 810		if (!ms->log_failure) {
 811			ms->in_sync = 0;
 812			dm_rh_mark_nosync(ms->rh, bio);
 813		}
 814
 815		/*
 816		 * If all the legs are dead, fail the I/O.
 817		 * If the device has failed and keep_log is enabled,
 818		 * fail the I/O.
 819		 *
 820		 * If we have been told to handle errors, and keep_log
 821		 * isn't enabled, hold the bio and wait for userspace to
 822		 * deal with the problem.
 823		 *
 824		 * Otherwise pretend that the I/O succeeded. (This would
 825		 * be wrong if the failed leg returned after reboot and
 826		 * got replicated back to the good legs.)
 827		 */
 828		if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
 829			bio_io_error(bio);
 830		else if (errors_handled(ms) && !keep_log(ms))
 831			hold_bio(ms, bio);
 832		else
 833			bio_endio(bio);
 834	}
 835}
 836
 837static void trigger_event(struct work_struct *work)
 838{
 839	struct mirror_set *ms =
 840		container_of(work, struct mirror_set, trigger_event);
 841
 842	dm_table_event(ms->ti->table);
 843}
 844
 845/*-----------------------------------------------------------------
 846 * kmirrord
 847 *---------------------------------------------------------------*/
 848static void do_mirror(struct work_struct *work)
 849{
 850	struct mirror_set *ms = container_of(work, struct mirror_set,
 851					     kmirrord_work);
 852	struct bio_list reads, writes, failures;
 853	unsigned long flags;
 854
 855	spin_lock_irqsave(&ms->lock, flags);
 856	reads = ms->reads;
 857	writes = ms->writes;
 858	failures = ms->failures;
 859	bio_list_init(&ms->reads);
 860	bio_list_init(&ms->writes);
 861	bio_list_init(&ms->failures);
 862	spin_unlock_irqrestore(&ms->lock, flags);
 863
 864	dm_rh_update_states(ms->rh, errors_handled(ms));
 865	do_recovery(ms);
 866	do_reads(ms, &reads);
 867	do_writes(ms, &writes);
 868	do_failures(ms, &failures);
 869}
 870
 871/*-----------------------------------------------------------------
 872 * Target functions
 873 *---------------------------------------------------------------*/
 874static struct mirror_set *alloc_context(unsigned int nr_mirrors,
 875					uint32_t region_size,
 876					struct dm_target *ti,
 877					struct dm_dirty_log *dl)
 878{
 879	struct mirror_set *ms =
 880		kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL);
 881
 882	if (!ms) {
 883		ti->error = "Cannot allocate mirror context";
 884		return NULL;
 885	}
 886
 887	spin_lock_init(&ms->lock);
 888	bio_list_init(&ms->reads);
 889	bio_list_init(&ms->writes);
 890	bio_list_init(&ms->failures);
 891	bio_list_init(&ms->holds);
 892
 893	ms->ti = ti;
 894	ms->nr_mirrors = nr_mirrors;
 895	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
 896	ms->in_sync = 0;
 897	ms->log_failure = 0;
 898	ms->leg_failure = 0;
 899	atomic_set(&ms->suspend, 0);
 900	atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
 901
 902	ms->io_client = dm_io_client_create();
 903	if (IS_ERR(ms->io_client)) {
 904		ti->error = "Error creating dm_io client";
 905		kfree(ms);
 906 		return NULL;
 907	}
 908
 909	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
 910				       wakeup_all_recovery_waiters,
 911				       ms->ti->begin, MAX_RECOVERY,
 912				       dl, region_size, ms->nr_regions);
 913	if (IS_ERR(ms->rh)) {
 914		ti->error = "Error creating dirty region hash";
 915		dm_io_client_destroy(ms->io_client);
 916		kfree(ms);
 917		return NULL;
 918	}
 919
 920	return ms;
 921}
 922
 923static void free_context(struct mirror_set *ms, struct dm_target *ti,
 924			 unsigned int m)
 925{
 926	while (m--)
 927		dm_put_device(ti, ms->mirror[m].dev);
 928
 929	dm_io_client_destroy(ms->io_client);
 930	dm_region_hash_destroy(ms->rh);
 931	kfree(ms);
 932}
 933
 934static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
 935		      unsigned int mirror, char **argv)
 936{
 937	unsigned long long offset;
 938	char dummy;
 939	int ret;
 940
 941	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
 942	    offset != (sector_t)offset) {
 943		ti->error = "Invalid offset";
 944		return -EINVAL;
 945	}
 946
 947	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
 948			    &ms->mirror[mirror].dev);
 949	if (ret) {
 950		ti->error = "Device lookup failure";
 951		return ret;
 952	}
 953
 954	ms->mirror[mirror].ms = ms;
 955	atomic_set(&(ms->mirror[mirror].error_count), 0);
 956	ms->mirror[mirror].error_type = 0;
 957	ms->mirror[mirror].offset = offset;
 958
 959	return 0;
 960}
 961
 962/*
 963 * Create dirty log: log_type #log_params <log_params>
 964 */
 965static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
 966					     unsigned argc, char **argv,
 967					     unsigned *args_used)
 968{
 969	unsigned param_count;
 970	struct dm_dirty_log *dl;
 971	char dummy;
 972
 973	if (argc < 2) {
 974		ti->error = "Insufficient mirror log arguments";
 975		return NULL;
 976	}
 977
 978	if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
 979		ti->error = "Invalid mirror log argument count";
 980		return NULL;
 981	}
 982
 983	*args_used = 2 + param_count;
 984
 985	if (argc < *args_used) {
 986		ti->error = "Insufficient mirror log arguments";
 987		return NULL;
 988	}
 989
 990	dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
 991				 argv + 2);
 992	if (!dl) {
 993		ti->error = "Error creating mirror dirty log";
 994		return NULL;
 995	}
 996
 997	return dl;
 998}
 999
1000static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1001			  unsigned *args_used)
1002{
1003	unsigned num_features;
1004	struct dm_target *ti = ms->ti;
1005	char dummy;
1006	int i;
1007
1008	*args_used = 0;
1009
1010	if (!argc)
1011		return 0;
1012
1013	if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
1014		ti->error = "Invalid number of features";
1015		return -EINVAL;
1016	}
1017
1018	argc--;
1019	argv++;
1020	(*args_used)++;
1021
1022	if (num_features > argc) {
1023		ti->error = "Not enough arguments to support feature count";
1024		return -EINVAL;
1025	}
1026
1027	for (i = 0; i < num_features; i++) {
1028		if (!strcmp("handle_errors", argv[0]))
1029			ms->features |= DM_RAID1_HANDLE_ERRORS;
1030		else if (!strcmp("keep_log", argv[0]))
1031			ms->features |= DM_RAID1_KEEP_LOG;
1032		else {
1033			ti->error = "Unrecognised feature requested";
1034			return -EINVAL;
1035		}
1036
1037		argc--;
1038		argv++;
1039		(*args_used)++;
1040	}
1041	if (!errors_handled(ms) && keep_log(ms)) {
1042		ti->error = "keep_log feature requires the handle_errors feature";
1043		return -EINVAL;
1044	}
1045
1046	return 0;
1047}
1048
1049/*
1050 * Construct a mirror mapping:
1051 *
1052 * log_type #log_params <log_params>
1053 * #mirrors [mirror_path offset]{2,}
1054 * [#features <features>]
1055 *
1056 * log_type is "core" or "disk"
1057 * #log_params is between 1 and 3
1058 *
1059 * If present, supported features are "handle_errors" and "keep_log".
1060 */
1061static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1062{
1063	int r;
1064	unsigned int nr_mirrors, m, args_used;
1065	struct mirror_set *ms;
1066	struct dm_dirty_log *dl;
1067	char dummy;
1068
1069	dl = create_dirty_log(ti, argc, argv, &args_used);
1070	if (!dl)
1071		return -EINVAL;
1072
1073	argv += args_used;
1074	argc -= args_used;
1075
1076	if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
1077	    nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
1078		ti->error = "Invalid number of mirrors";
1079		dm_dirty_log_destroy(dl);
1080		return -EINVAL;
1081	}
1082
1083	argv++, argc--;
1084
1085	if (argc < nr_mirrors * 2) {
1086		ti->error = "Too few mirror arguments";
1087		dm_dirty_log_destroy(dl);
1088		return -EINVAL;
1089	}
1090
1091	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1092	if (!ms) {
1093		dm_dirty_log_destroy(dl);
1094		return -ENOMEM;
1095	}
1096
1097	/* Get the mirror parameter sets */
1098	for (m = 0; m < nr_mirrors; m++) {
1099		r = get_mirror(ms, ti, m, argv);
1100		if (r) {
1101			free_context(ms, ti, m);
1102			return r;
1103		}
1104		argv += 2;
1105		argc -= 2;
1106	}
1107
1108	ti->private = ms;
1109
1110	r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1111	if (r)
1112		goto err_free_context;
1113
1114	ti->num_flush_bios = 1;
1115	ti->num_discard_bios = 1;
1116	ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
1117
1118	ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
1119	if (!ms->kmirrord_wq) {
1120		DMERR("couldn't start kmirrord");
1121		r = -ENOMEM;
1122		goto err_free_context;
1123	}
1124	INIT_WORK(&ms->kmirrord_work, do_mirror);
1125	timer_setup(&ms->timer, delayed_wake_fn, 0);
1126	ms->timer_pending = 0;
1127	INIT_WORK(&ms->trigger_event, trigger_event);
1128
1129	r = parse_features(ms, argc, argv, &args_used);
1130	if (r)
1131		goto err_destroy_wq;
1132
1133	argv += args_used;
1134	argc -= args_used;
1135
1136	/*
1137	 * Any read-balancing addition depends on the
1138	 * DM_RAID1_HANDLE_ERRORS flag being present.
1139	 * This is because the decision to balance depends
1140	 * on the sync state of a region.  If the above
1141	 * flag is not present, we ignore errors; and
1142	 * the sync state may be inaccurate.
1143	 */
1144
1145	if (argc) {
1146		ti->error = "Too many mirror arguments";
1147		r = -EINVAL;
1148		goto err_destroy_wq;
1149	}
1150
1151	ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1152	if (IS_ERR(ms->kcopyd_client)) {
1153		r = PTR_ERR(ms->kcopyd_client);
1154		goto err_destroy_wq;
1155	}
1156
1157	wakeup_mirrord(ms);
1158	return 0;
1159
1160err_destroy_wq:
1161	destroy_workqueue(ms->kmirrord_wq);
1162err_free_context:
1163	free_context(ms, ti, ms->nr_mirrors);
1164	return r;
1165}
1166
1167static void mirror_dtr(struct dm_target *ti)
1168{
1169	struct mirror_set *ms = (struct mirror_set *) ti->private;
1170
1171	del_timer_sync(&ms->timer);
1172	flush_workqueue(ms->kmirrord_wq);
1173	flush_work(&ms->trigger_event);
1174	dm_kcopyd_client_destroy(ms->kcopyd_client);
1175	destroy_workqueue(ms->kmirrord_wq);
1176	free_context(ms, ti, ms->nr_mirrors);
1177}
1178
1179/*
1180 * Mirror mapping function
1181 */
1182static int mirror_map(struct dm_target *ti, struct bio *bio)
1183{
1184	int r, rw = bio_data_dir(bio);
1185	struct mirror *m;
1186	struct mirror_set *ms = ti->private;
1187	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1188	struct dm_raid1_bio_record *bio_record =
1189	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1190
1191	bio_record->details.bi_bdev = NULL;
1192
1193	if (rw == WRITE) {
1194		/* Save region for mirror_end_io() handler */
1195		bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
1196		queue_bio(ms, bio, rw);
1197		return DM_MAPIO_SUBMITTED;
1198	}
1199
1200	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1201	if (r < 0 && r != -EWOULDBLOCK)
1202		return DM_MAPIO_KILL;
1203
1204	/*
1205	 * If region is not in-sync queue the bio.
1206	 */
1207	if (!r || (r == -EWOULDBLOCK)) {
1208		if (bio->bi_opf & REQ_RAHEAD)
1209			return DM_MAPIO_KILL;
1210
1211		queue_bio(ms, bio, rw);
1212		return DM_MAPIO_SUBMITTED;
1213	}
1214
1215	/*
1216	 * The region is in-sync and we can perform reads directly.
1217	 * Store enough information so we can retry if it fails.
1218	 */
1219	m = choose_mirror(ms, bio->bi_iter.bi_sector);
1220	if (unlikely(!m))
1221		return DM_MAPIO_KILL;
1222
1223	dm_bio_record(&bio_record->details, bio);
1224	bio_record->m = m;
1225
1226	map_bio(m, bio);
1227
1228	return DM_MAPIO_REMAPPED;
1229}
1230
1231static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1232		blk_status_t *error)
1233{
1234	int rw = bio_data_dir(bio);
1235	struct mirror_set *ms = (struct mirror_set *) ti->private;
1236	struct mirror *m = NULL;
1237	struct dm_bio_details *bd = NULL;
1238	struct dm_raid1_bio_record *bio_record =
1239	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1240
1241	/*
1242	 * We need to dec pending if this was a write.
1243	 */
1244	if (rw == WRITE) {
1245		if (!(bio->bi_opf & REQ_PREFLUSH) &&
1246		    bio_op(bio) != REQ_OP_DISCARD)
1247			dm_rh_dec(ms->rh, bio_record->write_region);
1248		return DM_ENDIO_DONE;
1249	}
1250
1251	if (*error == BLK_STS_NOTSUPP)
1252		goto out;
1253
1254	if (bio->bi_opf & REQ_RAHEAD)
1255		goto out;
1256
1257	if (unlikely(*error)) {
1258		if (!bio_record->details.bi_bdev) {
1259			/*
1260			 * There wasn't enough memory to record necessary
1261			 * information for a retry or there was no other
1262			 * mirror in-sync.
1263			 */
1264			DMERR_LIMIT("Mirror read failed.");
1265			return DM_ENDIO_DONE;
1266		}
1267
1268		m = bio_record->m;
1269
1270		DMERR("Mirror read failed from %s. Trying alternative device.",
1271		      m->dev->name);
1272
1273		fail_mirror(m, DM_RAID1_READ_ERROR);
1274
1275		/*
1276		 * A failed read is requeued for another attempt using an intact
1277		 * mirror.
1278		 */
1279		if (default_ok(m) || mirror_available(ms, bio)) {
1280			bd = &bio_record->details;
1281
1282			dm_bio_restore(bd, bio);
1283			bio_record->details.bi_bdev = NULL;
1284			bio->bi_status = 0;
1285
1286			queue_bio(ms, bio, rw);
1287			return DM_ENDIO_INCOMPLETE;
1288		}
1289		DMERR("All replicated volumes dead, failing I/O");
1290	}
1291
1292out:
1293	bio_record->details.bi_bdev = NULL;
1294
1295	return DM_ENDIO_DONE;
1296}
1297
1298static void mirror_presuspend(struct dm_target *ti)
1299{
1300	struct mirror_set *ms = (struct mirror_set *) ti->private;
1301	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1302
1303	struct bio_list holds;
1304	struct bio *bio;
1305
1306	atomic_set(&ms->suspend, 1);
1307
1308	/*
1309	 * Process bios in the hold list to start recovery waiting
1310	 * for bios in the hold list. After the process, no bio has
1311	 * a chance to be added in the hold list because ms->suspend
1312	 * is set.
1313	 */
1314	spin_lock_irq(&ms->lock);
1315	holds = ms->holds;
1316	bio_list_init(&ms->holds);
1317	spin_unlock_irq(&ms->lock);
1318
1319	while ((bio = bio_list_pop(&holds)))
1320		hold_bio(ms, bio);
1321
1322	/*
1323	 * We must finish up all the work that we've
1324	 * generated (i.e. recovery work).
1325	 */
1326	dm_rh_stop_recovery(ms->rh);
1327
1328	wait_event(_kmirrord_recovery_stopped,
1329		   !dm_rh_recovery_in_flight(ms->rh));
1330
1331	if (log->type->presuspend && log->type->presuspend(log))
1332		/* FIXME: need better error handling */
1333		DMWARN("log presuspend failed");
1334
1335	/*
1336	 * Now that recovery is complete/stopped and the
1337	 * delayed bios are queued, we need to wait for
1338	 * the worker thread to complete.  This way,
1339	 * we know that all of our I/O has been pushed.
1340	 */
1341	flush_workqueue(ms->kmirrord_wq);
1342}
1343
1344static void mirror_postsuspend(struct dm_target *ti)
1345{
1346	struct mirror_set *ms = ti->private;
1347	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1348
1349	if (log->type->postsuspend && log->type->postsuspend(log))
1350		/* FIXME: need better error handling */
1351		DMWARN("log postsuspend failed");
1352}
1353
1354static void mirror_resume(struct dm_target *ti)
1355{
1356	struct mirror_set *ms = ti->private;
1357	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1358
1359	atomic_set(&ms->suspend, 0);
1360	if (log->type->resume && log->type->resume(log))
1361		/* FIXME: need better error handling */
1362		DMWARN("log resume failed");
1363	dm_rh_start_recovery(ms->rh);
1364}
1365
1366/*
1367 * device_status_char
1368 * @m: mirror device/leg we want the status of
1369 *
1370 * We return one character representing the most severe error
1371 * we have encountered.
1372 *    A => Alive - No failures
1373 *    D => Dead - A write failure occurred leaving mirror out-of-sync
1374 *    S => Sync - A sychronization failure occurred, mirror out-of-sync
1375 *    R => Read - A read failure occurred, mirror data unaffected
1376 *
1377 * Returns: <char>
1378 */
1379static char device_status_char(struct mirror *m)
1380{
1381	if (!atomic_read(&(m->error_count)))
1382		return 'A';
1383
1384	return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1385		(test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1386		(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1387		(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1388}
1389
1390
1391static void mirror_status(struct dm_target *ti, status_type_t type,
1392			  unsigned status_flags, char *result, unsigned maxlen)
1393{
1394	unsigned int m, sz = 0;
1395	int num_feature_args = 0;
1396	struct mirror_set *ms = (struct mirror_set *) ti->private;
1397	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1398	char buffer[MAX_NR_MIRRORS + 1];
1399
1400	switch (type) {
1401	case STATUSTYPE_INFO:
1402		DMEMIT("%d ", ms->nr_mirrors);
1403		for (m = 0; m < ms->nr_mirrors; m++) {
1404			DMEMIT("%s ", ms->mirror[m].dev->name);
1405			buffer[m] = device_status_char(&(ms->mirror[m]));
1406		}
1407		buffer[m] = '\0';
1408
1409		DMEMIT("%llu/%llu 1 %s ",
1410		      (unsigned long long)log->type->get_sync_count(log),
1411		      (unsigned long long)ms->nr_regions, buffer);
1412
1413		sz += log->type->status(log, type, result+sz, maxlen-sz);
1414
1415		break;
1416
1417	case STATUSTYPE_TABLE:
1418		sz = log->type->status(log, type, result, maxlen);
1419
1420		DMEMIT("%d", ms->nr_mirrors);
1421		for (m = 0; m < ms->nr_mirrors; m++)
1422			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1423			       (unsigned long long)ms->mirror[m].offset);
1424
1425		num_feature_args += !!errors_handled(ms);
1426		num_feature_args += !!keep_log(ms);
1427		if (num_feature_args) {
1428			DMEMIT(" %d", num_feature_args);
1429			if (errors_handled(ms))
1430				DMEMIT(" handle_errors");
1431			if (keep_log(ms))
1432				DMEMIT(" keep_log");
1433		}
1434
1435		break;
1436
1437	case STATUSTYPE_IMA:
1438		DMEMIT_TARGET_NAME_VERSION(ti->type);
1439		DMEMIT(",nr_mirrors=%d", ms->nr_mirrors);
1440		for (m = 0; m < ms->nr_mirrors; m++) {
1441			DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name);
1442			DMEMIT(",mirror_device_%d_status=%c",
1443			       m, device_status_char(&(ms->mirror[m])));
1444		}
1445
1446		DMEMIT(",handle_errors=%c", errors_handled(ms) ? 'y' : 'n');
1447		DMEMIT(",keep_log=%c", keep_log(ms) ? 'y' : 'n');
1448
1449		DMEMIT(",log_type_status=");
1450		sz += log->type->status(log, type, result+sz, maxlen-sz);
1451		DMEMIT(";");
1452		break;
1453	}
1454}
1455
1456static int mirror_iterate_devices(struct dm_target *ti,
1457				  iterate_devices_callout_fn fn, void *data)
1458{
1459	struct mirror_set *ms = ti->private;
1460	int ret = 0;
1461	unsigned i;
1462
1463	for (i = 0; !ret && i < ms->nr_mirrors; i++)
1464		ret = fn(ti, ms->mirror[i].dev,
1465			 ms->mirror[i].offset, ti->len, data);
1466
1467	return ret;
1468}
1469
1470static struct target_type mirror_target = {
1471	.name	 = "mirror",
1472	.version = {1, 14, 0},
1473	.module	 = THIS_MODULE,
1474	.ctr	 = mirror_ctr,
1475	.dtr	 = mirror_dtr,
1476	.map	 = mirror_map,
1477	.end_io	 = mirror_end_io,
1478	.presuspend = mirror_presuspend,
1479	.postsuspend = mirror_postsuspend,
1480	.resume	 = mirror_resume,
1481	.status	 = mirror_status,
1482	.iterate_devices = mirror_iterate_devices,
1483};
1484
1485static int __init dm_mirror_init(void)
1486{
1487	int r;
1488
1489	r = dm_register_target(&mirror_target);
1490	if (r < 0) {
1491		DMERR("Failed to register mirror target");
1492		goto bad_target;
1493	}
1494
1495	return 0;
1496
1497bad_target:
1498	return r;
1499}
1500
1501static void __exit dm_mirror_exit(void)
1502{
1503	dm_unregister_target(&mirror_target);
1504}
1505
1506/* Module hooks */
1507module_init(dm_mirror_init);
1508module_exit(dm_mirror_exit);
1509
1510MODULE_DESCRIPTION(DM_NAME " mirror target");
1511MODULE_AUTHOR("Joe Thornber");
1512MODULE_LICENSE("GPL");
v5.9
   1/*
   2 * Copyright (C) 2003 Sistina Software Limited.
   3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-bio-record.h"
   9
  10#include <linux/init.h>
  11#include <linux/mempool.h>
  12#include <linux/module.h>
  13#include <linux/pagemap.h>
  14#include <linux/slab.h>
  15#include <linux/workqueue.h>
  16#include <linux/device-mapper.h>
  17#include <linux/dm-io.h>
  18#include <linux/dm-dirty-log.h>
  19#include <linux/dm-kcopyd.h>
  20#include <linux/dm-region-hash.h>
  21
  22#define DM_MSG_PREFIX "raid1"
  23
  24#define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */
  25
  26#define MAX_NR_MIRRORS	(DM_KCOPYD_MAX_REGIONS + 1)
  27
  28#define DM_RAID1_HANDLE_ERRORS	0x01
  29#define DM_RAID1_KEEP_LOG	0x02
  30#define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
  31#define keep_log(p)		((p)->features & DM_RAID1_KEEP_LOG)
  32
  33static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
  34
  35/*-----------------------------------------------------------------
  36 * Mirror set structures.
  37 *---------------------------------------------------------------*/
  38enum dm_raid1_error {
  39	DM_RAID1_WRITE_ERROR,
  40	DM_RAID1_FLUSH_ERROR,
  41	DM_RAID1_SYNC_ERROR,
  42	DM_RAID1_READ_ERROR
  43};
  44
  45struct mirror {
  46	struct mirror_set *ms;
  47	atomic_t error_count;
  48	unsigned long error_type;
  49	struct dm_dev *dev;
  50	sector_t offset;
  51};
  52
  53struct mirror_set {
  54	struct dm_target *ti;
  55	struct list_head list;
  56
  57	uint64_t features;
  58
  59	spinlock_t lock;	/* protects the lists */
  60	struct bio_list reads;
  61	struct bio_list writes;
  62	struct bio_list failures;
  63	struct bio_list holds;	/* bios are waiting until suspend */
  64
  65	struct dm_region_hash *rh;
  66	struct dm_kcopyd_client *kcopyd_client;
  67	struct dm_io_client *io_client;
  68
  69	/* recovery */
  70	region_t nr_regions;
  71	int in_sync;
  72	int log_failure;
  73	int leg_failure;
  74	atomic_t suspend;
  75
  76	atomic_t default_mirror;	/* Default mirror */
  77
  78	struct workqueue_struct *kmirrord_wq;
  79	struct work_struct kmirrord_work;
  80	struct timer_list timer;
  81	unsigned long timer_pending;
  82
  83	struct work_struct trigger_event;
  84
  85	unsigned nr_mirrors;
  86	struct mirror mirror[];
  87};
  88
  89DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
  90		"A percentage of time allocated for raid resynchronization");
  91
  92static void wakeup_mirrord(void *context)
  93{
  94	struct mirror_set *ms = context;
  95
  96	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
  97}
  98
  99static void delayed_wake_fn(struct timer_list *t)
 100{
 101	struct mirror_set *ms = from_timer(ms, t, timer);
 102
 103	clear_bit(0, &ms->timer_pending);
 104	wakeup_mirrord(ms);
 105}
 106
 107static void delayed_wake(struct mirror_set *ms)
 108{
 109	if (test_and_set_bit(0, &ms->timer_pending))
 110		return;
 111
 112	ms->timer.expires = jiffies + HZ / 5;
 113	add_timer(&ms->timer);
 114}
 115
 116static void wakeup_all_recovery_waiters(void *context)
 117{
 118	wake_up_all(&_kmirrord_recovery_stopped);
 119}
 120
 121static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
 122{
 123	unsigned long flags;
 124	int should_wake = 0;
 125	struct bio_list *bl;
 126
 127	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
 128	spin_lock_irqsave(&ms->lock, flags);
 129	should_wake = !(bl->head);
 130	bio_list_add(bl, bio);
 131	spin_unlock_irqrestore(&ms->lock, flags);
 132
 133	if (should_wake)
 134		wakeup_mirrord(ms);
 135}
 136
 137static void dispatch_bios(void *context, struct bio_list *bio_list)
 138{
 139	struct mirror_set *ms = context;
 140	struct bio *bio;
 141
 142	while ((bio = bio_list_pop(bio_list)))
 143		queue_bio(ms, bio, WRITE);
 144}
 145
 146struct dm_raid1_bio_record {
 147	struct mirror *m;
 148	/* if details->bi_disk == NULL, details were not saved */
 149	struct dm_bio_details details;
 150	region_t write_region;
 151};
 152
 153/*
 154 * Every mirror should look like this one.
 155 */
 156#define DEFAULT_MIRROR 0
 157
 158/*
 159 * This is yucky.  We squirrel the mirror struct away inside
 160 * bi_next for read/write buffers.  This is safe since the bh
 161 * doesn't get submitted to the lower levels of block layer.
 162 */
 163static struct mirror *bio_get_m(struct bio *bio)
 164{
 165	return (struct mirror *) bio->bi_next;
 166}
 167
 168static void bio_set_m(struct bio *bio, struct mirror *m)
 169{
 170	bio->bi_next = (struct bio *) m;
 171}
 172
 173static struct mirror *get_default_mirror(struct mirror_set *ms)
 174{
 175	return &ms->mirror[atomic_read(&ms->default_mirror)];
 176}
 177
 178static void set_default_mirror(struct mirror *m)
 179{
 180	struct mirror_set *ms = m->ms;
 181	struct mirror *m0 = &(ms->mirror[0]);
 182
 183	atomic_set(&ms->default_mirror, m - m0);
 184}
 185
 186static struct mirror *get_valid_mirror(struct mirror_set *ms)
 187{
 188	struct mirror *m;
 189
 190	for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
 191		if (!atomic_read(&m->error_count))
 192			return m;
 193
 194	return NULL;
 195}
 196
 197/* fail_mirror
 198 * @m: mirror device to fail
 199 * @error_type: one of the enum's, DM_RAID1_*_ERROR
 200 *
 201 * If errors are being handled, record the type of
 202 * error encountered for this device.  If this type
 203 * of error has already been recorded, we can return;
 204 * otherwise, we must signal userspace by triggering
 205 * an event.  Additionally, if the device is the
 206 * primary device, we must choose a new primary, but
 207 * only if the mirror is in-sync.
 208 *
 209 * This function must not block.
 210 */
 211static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
 212{
 213	struct mirror_set *ms = m->ms;
 214	struct mirror *new;
 215
 216	ms->leg_failure = 1;
 217
 218	/*
 219	 * error_count is used for nothing more than a
 220	 * simple way to tell if a device has encountered
 221	 * errors.
 222	 */
 223	atomic_inc(&m->error_count);
 224
 225	if (test_and_set_bit(error_type, &m->error_type))
 226		return;
 227
 228	if (!errors_handled(ms))
 229		return;
 230
 231	if (m != get_default_mirror(ms))
 232		goto out;
 233
 234	if (!ms->in_sync && !keep_log(ms)) {
 235		/*
 236		 * Better to issue requests to same failing device
 237		 * than to risk returning corrupt data.
 238		 */
 239		DMERR("Primary mirror (%s) failed while out-of-sync: "
 240		      "Reads may fail.", m->dev->name);
 241		goto out;
 242	}
 243
 244	new = get_valid_mirror(ms);
 245	if (new)
 246		set_default_mirror(new);
 247	else
 248		DMWARN("All sides of mirror have failed.");
 249
 250out:
 251	schedule_work(&ms->trigger_event);
 252}
 253
 254static int mirror_flush(struct dm_target *ti)
 255{
 256	struct mirror_set *ms = ti->private;
 257	unsigned long error_bits;
 258
 259	unsigned int i;
 260	struct dm_io_region io[MAX_NR_MIRRORS];
 261	struct mirror *m;
 262	struct dm_io_request io_req = {
 263		.bi_op = REQ_OP_WRITE,
 264		.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
 265		.mem.type = DM_IO_KMEM,
 266		.mem.ptr.addr = NULL,
 267		.client = ms->io_client,
 268	};
 269
 270	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
 271		io[i].bdev = m->dev->bdev;
 272		io[i].sector = 0;
 273		io[i].count = 0;
 274	}
 275
 276	error_bits = -1;
 277	dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
 278	if (unlikely(error_bits != 0)) {
 279		for (i = 0; i < ms->nr_mirrors; i++)
 280			if (test_bit(i, &error_bits))
 281				fail_mirror(ms->mirror + i,
 282					    DM_RAID1_FLUSH_ERROR);
 283		return -EIO;
 284	}
 285
 286	return 0;
 287}
 288
 289/*-----------------------------------------------------------------
 290 * Recovery.
 291 *
 292 * When a mirror is first activated we may find that some regions
 293 * are in the no-sync state.  We have to recover these by
 294 * recopying from the default mirror to all the others.
 295 *---------------------------------------------------------------*/
 296static void recovery_complete(int read_err, unsigned long write_err,
 297			      void *context)
 298{
 299	struct dm_region *reg = context;
 300	struct mirror_set *ms = dm_rh_region_context(reg);
 301	int m, bit = 0;
 302
 303	if (read_err) {
 304		/* Read error means the failure of default mirror. */
 305		DMERR_LIMIT("Unable to read primary mirror during recovery");
 306		fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
 307	}
 308
 309	if (write_err) {
 310		DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
 311			    write_err);
 312		/*
 313		 * Bits correspond to devices (excluding default mirror).
 314		 * The default mirror cannot change during recovery.
 315		 */
 316		for (m = 0; m < ms->nr_mirrors; m++) {
 317			if (&ms->mirror[m] == get_default_mirror(ms))
 318				continue;
 319			if (test_bit(bit, &write_err))
 320				fail_mirror(ms->mirror + m,
 321					    DM_RAID1_SYNC_ERROR);
 322			bit++;
 323		}
 324	}
 325
 326	dm_rh_recovery_end(reg, !(read_err || write_err));
 327}
 328
 329static void recover(struct mirror_set *ms, struct dm_region *reg)
 330{
 331	unsigned i;
 332	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
 333	struct mirror *m;
 334	unsigned long flags = 0;
 335	region_t key = dm_rh_get_region_key(reg);
 336	sector_t region_size = dm_rh_get_region_size(ms->rh);
 337
 338	/* fill in the source */
 339	m = get_default_mirror(ms);
 340	from.bdev = m->dev->bdev;
 341	from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
 342	if (key == (ms->nr_regions - 1)) {
 343		/*
 344		 * The final region may be smaller than
 345		 * region_size.
 346		 */
 347		from.count = ms->ti->len & (region_size - 1);
 348		if (!from.count)
 349			from.count = region_size;
 350	} else
 351		from.count = region_size;
 352
 353	/* fill in the destinations */
 354	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
 355		if (&ms->mirror[i] == get_default_mirror(ms))
 356			continue;
 357
 358		m = ms->mirror + i;
 359		dest->bdev = m->dev->bdev;
 360		dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
 361		dest->count = from.count;
 362		dest++;
 363	}
 364
 365	/* hand to kcopyd */
 366	if (!errors_handled(ms))
 367		set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
 368
 369	dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
 370		       flags, recovery_complete, reg);
 371}
 372
 373static void reset_ms_flags(struct mirror_set *ms)
 374{
 375	unsigned int m;
 376
 377	ms->leg_failure = 0;
 378	for (m = 0; m < ms->nr_mirrors; m++) {
 379		atomic_set(&(ms->mirror[m].error_count), 0);
 380		ms->mirror[m].error_type = 0;
 381	}
 382}
 383
 384static void do_recovery(struct mirror_set *ms)
 385{
 386	struct dm_region *reg;
 387	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 388
 389	/*
 390	 * Start quiescing some regions.
 391	 */
 392	dm_rh_recovery_prepare(ms->rh);
 393
 394	/*
 395	 * Copy any already quiesced regions.
 396	 */
 397	while ((reg = dm_rh_recovery_start(ms->rh)))
 398		recover(ms, reg);
 399
 400	/*
 401	 * Update the in sync flag.
 402	 */
 403	if (!ms->in_sync &&
 404	    (log->type->get_sync_count(log) == ms->nr_regions)) {
 405		/* the sync is complete */
 406		dm_table_event(ms->ti->table);
 407		ms->in_sync = 1;
 408		reset_ms_flags(ms);
 409	}
 410}
 411
 412/*-----------------------------------------------------------------
 413 * Reads
 414 *---------------------------------------------------------------*/
 415static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
 416{
 417	struct mirror *m = get_default_mirror(ms);
 418
 419	do {
 420		if (likely(!atomic_read(&m->error_count)))
 421			return m;
 422
 423		if (m-- == ms->mirror)
 424			m += ms->nr_mirrors;
 425	} while (m != get_default_mirror(ms));
 426
 427	return NULL;
 428}
 429
 430static int default_ok(struct mirror *m)
 431{
 432	struct mirror *default_mirror = get_default_mirror(m->ms);
 433
 434	return !atomic_read(&default_mirror->error_count);
 435}
 436
 437static int mirror_available(struct mirror_set *ms, struct bio *bio)
 438{
 439	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 440	region_t region = dm_rh_bio_to_region(ms->rh, bio);
 441
 442	if (log->type->in_sync(log, region, 0))
 443		return choose_mirror(ms,  bio->bi_iter.bi_sector) ? 1 : 0;
 444
 445	return 0;
 446}
 447
 448/*
 449 * remap a buffer to a particular mirror.
 450 */
 451static sector_t map_sector(struct mirror *m, struct bio *bio)
 452{
 453	if (unlikely(!bio->bi_iter.bi_size))
 454		return 0;
 455	return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
 456}
 457
 458static void map_bio(struct mirror *m, struct bio *bio)
 459{
 460	bio_set_dev(bio, m->dev->bdev);
 461	bio->bi_iter.bi_sector = map_sector(m, bio);
 462}
 463
 464static void map_region(struct dm_io_region *io, struct mirror *m,
 465		       struct bio *bio)
 466{
 467	io->bdev = m->dev->bdev;
 468	io->sector = map_sector(m, bio);
 469	io->count = bio_sectors(bio);
 470}
 471
 472static void hold_bio(struct mirror_set *ms, struct bio *bio)
 473{
 474	/*
 475	 * Lock is required to avoid race condition during suspend
 476	 * process.
 477	 */
 478	spin_lock_irq(&ms->lock);
 479
 480	if (atomic_read(&ms->suspend)) {
 481		spin_unlock_irq(&ms->lock);
 482
 483		/*
 484		 * If device is suspended, complete the bio.
 485		 */
 486		if (dm_noflush_suspending(ms->ti))
 487			bio->bi_status = BLK_STS_DM_REQUEUE;
 488		else
 489			bio->bi_status = BLK_STS_IOERR;
 490
 491		bio_endio(bio);
 492		return;
 493	}
 494
 495	/*
 496	 * Hold bio until the suspend is complete.
 497	 */
 498	bio_list_add(&ms->holds, bio);
 499	spin_unlock_irq(&ms->lock);
 500}
 501
 502/*-----------------------------------------------------------------
 503 * Reads
 504 *---------------------------------------------------------------*/
 505static void read_callback(unsigned long error, void *context)
 506{
 507	struct bio *bio = context;
 508	struct mirror *m;
 509
 510	m = bio_get_m(bio);
 511	bio_set_m(bio, NULL);
 512
 513	if (likely(!error)) {
 514		bio_endio(bio);
 515		return;
 516	}
 517
 518	fail_mirror(m, DM_RAID1_READ_ERROR);
 519
 520	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
 521		DMWARN_LIMIT("Read failure on mirror device %s.  "
 522			     "Trying alternative device.",
 523			     m->dev->name);
 524		queue_bio(m->ms, bio, bio_data_dir(bio));
 525		return;
 526	}
 527
 528	DMERR_LIMIT("Read failure on mirror device %s.  Failing I/O.",
 529		    m->dev->name);
 530	bio_io_error(bio);
 531}
 532
 533/* Asynchronous read. */
 534static void read_async_bio(struct mirror *m, struct bio *bio)
 535{
 536	struct dm_io_region io;
 537	struct dm_io_request io_req = {
 538		.bi_op = REQ_OP_READ,
 539		.bi_op_flags = 0,
 540		.mem.type = DM_IO_BIO,
 541		.mem.ptr.bio = bio,
 542		.notify.fn = read_callback,
 543		.notify.context = bio,
 544		.client = m->ms->io_client,
 545	};
 546
 547	map_region(&io, m, bio);
 548	bio_set_m(bio, m);
 549	BUG_ON(dm_io(&io_req, 1, &io, NULL));
 550}
 551
 552static inline int region_in_sync(struct mirror_set *ms, region_t region,
 553				 int may_block)
 554{
 555	int state = dm_rh_get_state(ms->rh, region, may_block);
 556	return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
 557}
 558
 559static void do_reads(struct mirror_set *ms, struct bio_list *reads)
 560{
 561	region_t region;
 562	struct bio *bio;
 563	struct mirror *m;
 564
 565	while ((bio = bio_list_pop(reads))) {
 566		region = dm_rh_bio_to_region(ms->rh, bio);
 567		m = get_default_mirror(ms);
 568
 569		/*
 570		 * We can only read balance if the region is in sync.
 571		 */
 572		if (likely(region_in_sync(ms, region, 1)))
 573			m = choose_mirror(ms, bio->bi_iter.bi_sector);
 574		else if (m && atomic_read(&m->error_count))
 575			m = NULL;
 576
 577		if (likely(m))
 578			read_async_bio(m, bio);
 579		else
 580			bio_io_error(bio);
 581	}
 582}
 583
 584/*-----------------------------------------------------------------
 585 * Writes.
 586 *
 587 * We do different things with the write io depending on the
 588 * state of the region that it's in:
 589 *
 590 * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
 591 * RECOVERING:	delay the io until recovery completes
 592 * NOSYNC:	increment pending, just write to the default mirror
 593 *---------------------------------------------------------------*/
 594
 595
 596static void write_callback(unsigned long error, void *context)
 597{
 598	unsigned i;
 599	struct bio *bio = (struct bio *) context;
 600	struct mirror_set *ms;
 601	int should_wake = 0;
 602	unsigned long flags;
 603
 604	ms = bio_get_m(bio)->ms;
 605	bio_set_m(bio, NULL);
 606
 607	/*
 608	 * NOTE: We don't decrement the pending count here,
 609	 * instead it is done by the targets endio function.
 610	 * This way we handle both writes to SYNC and NOSYNC
 611	 * regions with the same code.
 612	 */
 613	if (likely(!error)) {
 614		bio_endio(bio);
 615		return;
 616	}
 617
 618	/*
 619	 * If the bio is discard, return an error, but do not
 620	 * degrade the array.
 621	 */
 622	if (bio_op(bio) == REQ_OP_DISCARD) {
 623		bio->bi_status = BLK_STS_NOTSUPP;
 624		bio_endio(bio);
 625		return;
 626	}
 627
 628	for (i = 0; i < ms->nr_mirrors; i++)
 629		if (test_bit(i, &error))
 630			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
 631
 632	/*
 633	 * Need to raise event.  Since raising
 634	 * events can block, we need to do it in
 635	 * the main thread.
 636	 */
 637	spin_lock_irqsave(&ms->lock, flags);
 638	if (!ms->failures.head)
 639		should_wake = 1;
 640	bio_list_add(&ms->failures, bio);
 641	spin_unlock_irqrestore(&ms->lock, flags);
 642	if (should_wake)
 643		wakeup_mirrord(ms);
 644}
 645
 646static void do_write(struct mirror_set *ms, struct bio *bio)
 647{
 648	unsigned int i;
 649	struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
 650	struct mirror *m;
 
 651	struct dm_io_request io_req = {
 652		.bi_op = REQ_OP_WRITE,
 653		.bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
 654		.mem.type = DM_IO_BIO,
 655		.mem.ptr.bio = bio,
 656		.notify.fn = write_callback,
 657		.notify.context = bio,
 658		.client = ms->io_client,
 659	};
 660
 661	if (bio_op(bio) == REQ_OP_DISCARD) {
 662		io_req.bi_op = REQ_OP_DISCARD;
 663		io_req.mem.type = DM_IO_KMEM;
 664		io_req.mem.ptr.addr = NULL;
 665	}
 666
 667	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
 668		map_region(dest++, m, bio);
 669
 670	/*
 671	 * Use default mirror because we only need it to retrieve the reference
 672	 * to the mirror set in write_callback().
 673	 */
 674	bio_set_m(bio, get_default_mirror(ms));
 675
 676	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
 677}
 678
 679static void do_writes(struct mirror_set *ms, struct bio_list *writes)
 680{
 681	int state;
 682	struct bio *bio;
 683	struct bio_list sync, nosync, recover, *this_list = NULL;
 684	struct bio_list requeue;
 685	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
 686	region_t region;
 687
 688	if (!writes->head)
 689		return;
 690
 691	/*
 692	 * Classify each write.
 693	 */
 694	bio_list_init(&sync);
 695	bio_list_init(&nosync);
 696	bio_list_init(&recover);
 697	bio_list_init(&requeue);
 698
 699	while ((bio = bio_list_pop(writes))) {
 700		if ((bio->bi_opf & REQ_PREFLUSH) ||
 701		    (bio_op(bio) == REQ_OP_DISCARD)) {
 702			bio_list_add(&sync, bio);
 703			continue;
 704		}
 705
 706		region = dm_rh_bio_to_region(ms->rh, bio);
 707
 708		if (log->type->is_remote_recovering &&
 709		    log->type->is_remote_recovering(log, region)) {
 710			bio_list_add(&requeue, bio);
 711			continue;
 712		}
 713
 714		state = dm_rh_get_state(ms->rh, region, 1);
 715		switch (state) {
 716		case DM_RH_CLEAN:
 717		case DM_RH_DIRTY:
 718			this_list = &sync;
 719			break;
 720
 721		case DM_RH_NOSYNC:
 722			this_list = &nosync;
 723			break;
 724
 725		case DM_RH_RECOVERING:
 726			this_list = &recover;
 727			break;
 728		}
 729
 730		bio_list_add(this_list, bio);
 731	}
 732
 733	/*
 734	 * Add bios that are delayed due to remote recovery
 735	 * back on to the write queue
 736	 */
 737	if (unlikely(requeue.head)) {
 738		spin_lock_irq(&ms->lock);
 739		bio_list_merge(&ms->writes, &requeue);
 740		spin_unlock_irq(&ms->lock);
 741		delayed_wake(ms);
 742	}
 743
 744	/*
 745	 * Increment the pending counts for any regions that will
 746	 * be written to (writes to recover regions are going to
 747	 * be delayed).
 748	 */
 749	dm_rh_inc_pending(ms->rh, &sync);
 750	dm_rh_inc_pending(ms->rh, &nosync);
 751
 752	/*
 753	 * If the flush fails on a previous call and succeeds here,
 754	 * we must not reset the log_failure variable.  We need
 755	 * userspace interaction to do that.
 756	 */
 757	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
 758
 759	/*
 760	 * Dispatch io.
 761	 */
 762	if (unlikely(ms->log_failure) && errors_handled(ms)) {
 763		spin_lock_irq(&ms->lock);
 764		bio_list_merge(&ms->failures, &sync);
 765		spin_unlock_irq(&ms->lock);
 766		wakeup_mirrord(ms);
 767	} else
 768		while ((bio = bio_list_pop(&sync)))
 769			do_write(ms, bio);
 770
 771	while ((bio = bio_list_pop(&recover)))
 772		dm_rh_delay(ms->rh, bio);
 773
 774	while ((bio = bio_list_pop(&nosync))) {
 775		if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
 776			spin_lock_irq(&ms->lock);
 777			bio_list_add(&ms->failures, bio);
 778			spin_unlock_irq(&ms->lock);
 779			wakeup_mirrord(ms);
 780		} else {
 781			map_bio(get_default_mirror(ms), bio);
 782			submit_bio_noacct(bio);
 783		}
 784	}
 785}
 786
 787static void do_failures(struct mirror_set *ms, struct bio_list *failures)
 788{
 789	struct bio *bio;
 790
 791	if (likely(!failures->head))
 792		return;
 793
 794	/*
 795	 * If the log has failed, unattempted writes are being
 796	 * put on the holds list.  We can't issue those writes
 797	 * until a log has been marked, so we must store them.
 798	 *
 799	 * If a 'noflush' suspend is in progress, we can requeue
 800	 * the I/O's to the core.  This give userspace a chance
 801	 * to reconfigure the mirror, at which point the core
 802	 * will reissue the writes.  If the 'noflush' flag is
 803	 * not set, we have no choice but to return errors.
 804	 *
 805	 * Some writes on the failures list may have been
 806	 * submitted before the log failure and represent a
 807	 * failure to write to one of the devices.  It is ok
 808	 * for us to treat them the same and requeue them
 809	 * as well.
 810	 */
 811	while ((bio = bio_list_pop(failures))) {
 812		if (!ms->log_failure) {
 813			ms->in_sync = 0;
 814			dm_rh_mark_nosync(ms->rh, bio);
 815		}
 816
 817		/*
 818		 * If all the legs are dead, fail the I/O.
 819		 * If the device has failed and keep_log is enabled,
 820		 * fail the I/O.
 821		 *
 822		 * If we have been told to handle errors, and keep_log
 823		 * isn't enabled, hold the bio and wait for userspace to
 824		 * deal with the problem.
 825		 *
 826		 * Otherwise pretend that the I/O succeeded. (This would
 827		 * be wrong if the failed leg returned after reboot and
 828		 * got replicated back to the good legs.)
 829		 */
 830		if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
 831			bio_io_error(bio);
 832		else if (errors_handled(ms) && !keep_log(ms))
 833			hold_bio(ms, bio);
 834		else
 835			bio_endio(bio);
 836	}
 837}
 838
 839static void trigger_event(struct work_struct *work)
 840{
 841	struct mirror_set *ms =
 842		container_of(work, struct mirror_set, trigger_event);
 843
 844	dm_table_event(ms->ti->table);
 845}
 846
 847/*-----------------------------------------------------------------
 848 * kmirrord
 849 *---------------------------------------------------------------*/
 850static void do_mirror(struct work_struct *work)
 851{
 852	struct mirror_set *ms = container_of(work, struct mirror_set,
 853					     kmirrord_work);
 854	struct bio_list reads, writes, failures;
 855	unsigned long flags;
 856
 857	spin_lock_irqsave(&ms->lock, flags);
 858	reads = ms->reads;
 859	writes = ms->writes;
 860	failures = ms->failures;
 861	bio_list_init(&ms->reads);
 862	bio_list_init(&ms->writes);
 863	bio_list_init(&ms->failures);
 864	spin_unlock_irqrestore(&ms->lock, flags);
 865
 866	dm_rh_update_states(ms->rh, errors_handled(ms));
 867	do_recovery(ms);
 868	do_reads(ms, &reads);
 869	do_writes(ms, &writes);
 870	do_failures(ms, &failures);
 871}
 872
 873/*-----------------------------------------------------------------
 874 * Target functions
 875 *---------------------------------------------------------------*/
 876static struct mirror_set *alloc_context(unsigned int nr_mirrors,
 877					uint32_t region_size,
 878					struct dm_target *ti,
 879					struct dm_dirty_log *dl)
 880{
 881	struct mirror_set *ms =
 882		kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL);
 883
 884	if (!ms) {
 885		ti->error = "Cannot allocate mirror context";
 886		return NULL;
 887	}
 888
 889	spin_lock_init(&ms->lock);
 890	bio_list_init(&ms->reads);
 891	bio_list_init(&ms->writes);
 892	bio_list_init(&ms->failures);
 893	bio_list_init(&ms->holds);
 894
 895	ms->ti = ti;
 896	ms->nr_mirrors = nr_mirrors;
 897	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
 898	ms->in_sync = 0;
 899	ms->log_failure = 0;
 900	ms->leg_failure = 0;
 901	atomic_set(&ms->suspend, 0);
 902	atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
 903
 904	ms->io_client = dm_io_client_create();
 905	if (IS_ERR(ms->io_client)) {
 906		ti->error = "Error creating dm_io client";
 907		kfree(ms);
 908 		return NULL;
 909	}
 910
 911	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
 912				       wakeup_all_recovery_waiters,
 913				       ms->ti->begin, MAX_RECOVERY,
 914				       dl, region_size, ms->nr_regions);
 915	if (IS_ERR(ms->rh)) {
 916		ti->error = "Error creating dirty region hash";
 917		dm_io_client_destroy(ms->io_client);
 918		kfree(ms);
 919		return NULL;
 920	}
 921
 922	return ms;
 923}
 924
 925static void free_context(struct mirror_set *ms, struct dm_target *ti,
 926			 unsigned int m)
 927{
 928	while (m--)
 929		dm_put_device(ti, ms->mirror[m].dev);
 930
 931	dm_io_client_destroy(ms->io_client);
 932	dm_region_hash_destroy(ms->rh);
 933	kfree(ms);
 934}
 935
 936static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
 937		      unsigned int mirror, char **argv)
 938{
 939	unsigned long long offset;
 940	char dummy;
 941	int ret;
 942
 943	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
 944	    offset != (sector_t)offset) {
 945		ti->error = "Invalid offset";
 946		return -EINVAL;
 947	}
 948
 949	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
 950			    &ms->mirror[mirror].dev);
 951	if (ret) {
 952		ti->error = "Device lookup failure";
 953		return ret;
 954	}
 955
 956	ms->mirror[mirror].ms = ms;
 957	atomic_set(&(ms->mirror[mirror].error_count), 0);
 958	ms->mirror[mirror].error_type = 0;
 959	ms->mirror[mirror].offset = offset;
 960
 961	return 0;
 962}
 963
 964/*
 965 * Create dirty log: log_type #log_params <log_params>
 966 */
 967static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
 968					     unsigned argc, char **argv,
 969					     unsigned *args_used)
 970{
 971	unsigned param_count;
 972	struct dm_dirty_log *dl;
 973	char dummy;
 974
 975	if (argc < 2) {
 976		ti->error = "Insufficient mirror log arguments";
 977		return NULL;
 978	}
 979
 980	if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
 981		ti->error = "Invalid mirror log argument count";
 982		return NULL;
 983	}
 984
 985	*args_used = 2 + param_count;
 986
 987	if (argc < *args_used) {
 988		ti->error = "Insufficient mirror log arguments";
 989		return NULL;
 990	}
 991
 992	dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
 993				 argv + 2);
 994	if (!dl) {
 995		ti->error = "Error creating mirror dirty log";
 996		return NULL;
 997	}
 998
 999	return dl;
1000}
1001
1002static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1003			  unsigned *args_used)
1004{
1005	unsigned num_features;
1006	struct dm_target *ti = ms->ti;
1007	char dummy;
1008	int i;
1009
1010	*args_used = 0;
1011
1012	if (!argc)
1013		return 0;
1014
1015	if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
1016		ti->error = "Invalid number of features";
1017		return -EINVAL;
1018	}
1019
1020	argc--;
1021	argv++;
1022	(*args_used)++;
1023
1024	if (num_features > argc) {
1025		ti->error = "Not enough arguments to support feature count";
1026		return -EINVAL;
1027	}
1028
1029	for (i = 0; i < num_features; i++) {
1030		if (!strcmp("handle_errors", argv[0]))
1031			ms->features |= DM_RAID1_HANDLE_ERRORS;
1032		else if (!strcmp("keep_log", argv[0]))
1033			ms->features |= DM_RAID1_KEEP_LOG;
1034		else {
1035			ti->error = "Unrecognised feature requested";
1036			return -EINVAL;
1037		}
1038
1039		argc--;
1040		argv++;
1041		(*args_used)++;
1042	}
1043	if (!errors_handled(ms) && keep_log(ms)) {
1044		ti->error = "keep_log feature requires the handle_errors feature";
1045		return -EINVAL;
1046	}
1047
1048	return 0;
1049}
1050
1051/*
1052 * Construct a mirror mapping:
1053 *
1054 * log_type #log_params <log_params>
1055 * #mirrors [mirror_path offset]{2,}
1056 * [#features <features>]
1057 *
1058 * log_type is "core" or "disk"
1059 * #log_params is between 1 and 3
1060 *
1061 * If present, supported features are "handle_errors" and "keep_log".
1062 */
1063static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1064{
1065	int r;
1066	unsigned int nr_mirrors, m, args_used;
1067	struct mirror_set *ms;
1068	struct dm_dirty_log *dl;
1069	char dummy;
1070
1071	dl = create_dirty_log(ti, argc, argv, &args_used);
1072	if (!dl)
1073		return -EINVAL;
1074
1075	argv += args_used;
1076	argc -= args_used;
1077
1078	if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
1079	    nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
1080		ti->error = "Invalid number of mirrors";
1081		dm_dirty_log_destroy(dl);
1082		return -EINVAL;
1083	}
1084
1085	argv++, argc--;
1086
1087	if (argc < nr_mirrors * 2) {
1088		ti->error = "Too few mirror arguments";
1089		dm_dirty_log_destroy(dl);
1090		return -EINVAL;
1091	}
1092
1093	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1094	if (!ms) {
1095		dm_dirty_log_destroy(dl);
1096		return -ENOMEM;
1097	}
1098
1099	/* Get the mirror parameter sets */
1100	for (m = 0; m < nr_mirrors; m++) {
1101		r = get_mirror(ms, ti, m, argv);
1102		if (r) {
1103			free_context(ms, ti, m);
1104			return r;
1105		}
1106		argv += 2;
1107		argc -= 2;
1108	}
1109
1110	ti->private = ms;
1111
1112	r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1113	if (r)
1114		goto err_free_context;
1115
1116	ti->num_flush_bios = 1;
1117	ti->num_discard_bios = 1;
1118	ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
1119
1120	ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
1121	if (!ms->kmirrord_wq) {
1122		DMERR("couldn't start kmirrord");
1123		r = -ENOMEM;
1124		goto err_free_context;
1125	}
1126	INIT_WORK(&ms->kmirrord_work, do_mirror);
1127	timer_setup(&ms->timer, delayed_wake_fn, 0);
1128	ms->timer_pending = 0;
1129	INIT_WORK(&ms->trigger_event, trigger_event);
1130
1131	r = parse_features(ms, argc, argv, &args_used);
1132	if (r)
1133		goto err_destroy_wq;
1134
1135	argv += args_used;
1136	argc -= args_used;
1137
1138	/*
1139	 * Any read-balancing addition depends on the
1140	 * DM_RAID1_HANDLE_ERRORS flag being present.
1141	 * This is because the decision to balance depends
1142	 * on the sync state of a region.  If the above
1143	 * flag is not present, we ignore errors; and
1144	 * the sync state may be inaccurate.
1145	 */
1146
1147	if (argc) {
1148		ti->error = "Too many mirror arguments";
1149		r = -EINVAL;
1150		goto err_destroy_wq;
1151	}
1152
1153	ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1154	if (IS_ERR(ms->kcopyd_client)) {
1155		r = PTR_ERR(ms->kcopyd_client);
1156		goto err_destroy_wq;
1157	}
1158
1159	wakeup_mirrord(ms);
1160	return 0;
1161
1162err_destroy_wq:
1163	destroy_workqueue(ms->kmirrord_wq);
1164err_free_context:
1165	free_context(ms, ti, ms->nr_mirrors);
1166	return r;
1167}
1168
1169static void mirror_dtr(struct dm_target *ti)
1170{
1171	struct mirror_set *ms = (struct mirror_set *) ti->private;
1172
1173	del_timer_sync(&ms->timer);
1174	flush_workqueue(ms->kmirrord_wq);
1175	flush_work(&ms->trigger_event);
1176	dm_kcopyd_client_destroy(ms->kcopyd_client);
1177	destroy_workqueue(ms->kmirrord_wq);
1178	free_context(ms, ti, ms->nr_mirrors);
1179}
1180
1181/*
1182 * Mirror mapping function
1183 */
1184static int mirror_map(struct dm_target *ti, struct bio *bio)
1185{
1186	int r, rw = bio_data_dir(bio);
1187	struct mirror *m;
1188	struct mirror_set *ms = ti->private;
1189	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1190	struct dm_raid1_bio_record *bio_record =
1191	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1192
1193	bio_record->details.bi_disk = NULL;
1194
1195	if (rw == WRITE) {
1196		/* Save region for mirror_end_io() handler */
1197		bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
1198		queue_bio(ms, bio, rw);
1199		return DM_MAPIO_SUBMITTED;
1200	}
1201
1202	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1203	if (r < 0 && r != -EWOULDBLOCK)
1204		return DM_MAPIO_KILL;
1205
1206	/*
1207	 * If region is not in-sync queue the bio.
1208	 */
1209	if (!r || (r == -EWOULDBLOCK)) {
1210		if (bio->bi_opf & REQ_RAHEAD)
1211			return DM_MAPIO_KILL;
1212
1213		queue_bio(ms, bio, rw);
1214		return DM_MAPIO_SUBMITTED;
1215	}
1216
1217	/*
1218	 * The region is in-sync and we can perform reads directly.
1219	 * Store enough information so we can retry if it fails.
1220	 */
1221	m = choose_mirror(ms, bio->bi_iter.bi_sector);
1222	if (unlikely(!m))
1223		return DM_MAPIO_KILL;
1224
1225	dm_bio_record(&bio_record->details, bio);
1226	bio_record->m = m;
1227
1228	map_bio(m, bio);
1229
1230	return DM_MAPIO_REMAPPED;
1231}
1232
1233static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1234		blk_status_t *error)
1235{
1236	int rw = bio_data_dir(bio);
1237	struct mirror_set *ms = (struct mirror_set *) ti->private;
1238	struct mirror *m = NULL;
1239	struct dm_bio_details *bd = NULL;
1240	struct dm_raid1_bio_record *bio_record =
1241	  dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1242
1243	/*
1244	 * We need to dec pending if this was a write.
1245	 */
1246	if (rw == WRITE) {
1247		if (!(bio->bi_opf & REQ_PREFLUSH) &&
1248		    bio_op(bio) != REQ_OP_DISCARD)
1249			dm_rh_dec(ms->rh, bio_record->write_region);
1250		return DM_ENDIO_DONE;
1251	}
1252
1253	if (*error == BLK_STS_NOTSUPP)
1254		goto out;
1255
1256	if (bio->bi_opf & REQ_RAHEAD)
1257		goto out;
1258
1259	if (unlikely(*error)) {
1260		if (!bio_record->details.bi_disk) {
1261			/*
1262			 * There wasn't enough memory to record necessary
1263			 * information for a retry or there was no other
1264			 * mirror in-sync.
1265			 */
1266			DMERR_LIMIT("Mirror read failed.");
1267			return DM_ENDIO_DONE;
1268		}
1269
1270		m = bio_record->m;
1271
1272		DMERR("Mirror read failed from %s. Trying alternative device.",
1273		      m->dev->name);
1274
1275		fail_mirror(m, DM_RAID1_READ_ERROR);
1276
1277		/*
1278		 * A failed read is requeued for another attempt using an intact
1279		 * mirror.
1280		 */
1281		if (default_ok(m) || mirror_available(ms, bio)) {
1282			bd = &bio_record->details;
1283
1284			dm_bio_restore(bd, bio);
1285			bio_record->details.bi_disk = NULL;
1286			bio->bi_status = 0;
1287
1288			queue_bio(ms, bio, rw);
1289			return DM_ENDIO_INCOMPLETE;
1290		}
1291		DMERR("All replicated volumes dead, failing I/O");
1292	}
1293
1294out:
1295	bio_record->details.bi_disk = NULL;
1296
1297	return DM_ENDIO_DONE;
1298}
1299
1300static void mirror_presuspend(struct dm_target *ti)
1301{
1302	struct mirror_set *ms = (struct mirror_set *) ti->private;
1303	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1304
1305	struct bio_list holds;
1306	struct bio *bio;
1307
1308	atomic_set(&ms->suspend, 1);
1309
1310	/*
1311	 * Process bios in the hold list to start recovery waiting
1312	 * for bios in the hold list. After the process, no bio has
1313	 * a chance to be added in the hold list because ms->suspend
1314	 * is set.
1315	 */
1316	spin_lock_irq(&ms->lock);
1317	holds = ms->holds;
1318	bio_list_init(&ms->holds);
1319	spin_unlock_irq(&ms->lock);
1320
1321	while ((bio = bio_list_pop(&holds)))
1322		hold_bio(ms, bio);
1323
1324	/*
1325	 * We must finish up all the work that we've
1326	 * generated (i.e. recovery work).
1327	 */
1328	dm_rh_stop_recovery(ms->rh);
1329
1330	wait_event(_kmirrord_recovery_stopped,
1331		   !dm_rh_recovery_in_flight(ms->rh));
1332
1333	if (log->type->presuspend && log->type->presuspend(log))
1334		/* FIXME: need better error handling */
1335		DMWARN("log presuspend failed");
1336
1337	/*
1338	 * Now that recovery is complete/stopped and the
1339	 * delayed bios are queued, we need to wait for
1340	 * the worker thread to complete.  This way,
1341	 * we know that all of our I/O has been pushed.
1342	 */
1343	flush_workqueue(ms->kmirrord_wq);
1344}
1345
1346static void mirror_postsuspend(struct dm_target *ti)
1347{
1348	struct mirror_set *ms = ti->private;
1349	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1350
1351	if (log->type->postsuspend && log->type->postsuspend(log))
1352		/* FIXME: need better error handling */
1353		DMWARN("log postsuspend failed");
1354}
1355
1356static void mirror_resume(struct dm_target *ti)
1357{
1358	struct mirror_set *ms = ti->private;
1359	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1360
1361	atomic_set(&ms->suspend, 0);
1362	if (log->type->resume && log->type->resume(log))
1363		/* FIXME: need better error handling */
1364		DMWARN("log resume failed");
1365	dm_rh_start_recovery(ms->rh);
1366}
1367
1368/*
1369 * device_status_char
1370 * @m: mirror device/leg we want the status of
1371 *
1372 * We return one character representing the most severe error
1373 * we have encountered.
1374 *    A => Alive - No failures
1375 *    D => Dead - A write failure occurred leaving mirror out-of-sync
1376 *    S => Sync - A sychronization failure occurred, mirror out-of-sync
1377 *    R => Read - A read failure occurred, mirror data unaffected
1378 *
1379 * Returns: <char>
1380 */
1381static char device_status_char(struct mirror *m)
1382{
1383	if (!atomic_read(&(m->error_count)))
1384		return 'A';
1385
1386	return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1387		(test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1388		(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1389		(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1390}
1391
1392
1393static void mirror_status(struct dm_target *ti, status_type_t type,
1394			  unsigned status_flags, char *result, unsigned maxlen)
1395{
1396	unsigned int m, sz = 0;
1397	int num_feature_args = 0;
1398	struct mirror_set *ms = (struct mirror_set *) ti->private;
1399	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1400	char buffer[MAX_NR_MIRRORS + 1];
1401
1402	switch (type) {
1403	case STATUSTYPE_INFO:
1404		DMEMIT("%d ", ms->nr_mirrors);
1405		for (m = 0; m < ms->nr_mirrors; m++) {
1406			DMEMIT("%s ", ms->mirror[m].dev->name);
1407			buffer[m] = device_status_char(&(ms->mirror[m]));
1408		}
1409		buffer[m] = '\0';
1410
1411		DMEMIT("%llu/%llu 1 %s ",
1412		      (unsigned long long)log->type->get_sync_count(log),
1413		      (unsigned long long)ms->nr_regions, buffer);
1414
1415		sz += log->type->status(log, type, result+sz, maxlen-sz);
1416
1417		break;
1418
1419	case STATUSTYPE_TABLE:
1420		sz = log->type->status(log, type, result, maxlen);
1421
1422		DMEMIT("%d", ms->nr_mirrors);
1423		for (m = 0; m < ms->nr_mirrors; m++)
1424			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1425			       (unsigned long long)ms->mirror[m].offset);
1426
1427		num_feature_args += !!errors_handled(ms);
1428		num_feature_args += !!keep_log(ms);
1429		if (num_feature_args) {
1430			DMEMIT(" %d", num_feature_args);
1431			if (errors_handled(ms))
1432				DMEMIT(" handle_errors");
1433			if (keep_log(ms))
1434				DMEMIT(" keep_log");
1435		}
1436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1437		break;
1438	}
1439}
1440
1441static int mirror_iterate_devices(struct dm_target *ti,
1442				  iterate_devices_callout_fn fn, void *data)
1443{
1444	struct mirror_set *ms = ti->private;
1445	int ret = 0;
1446	unsigned i;
1447
1448	for (i = 0; !ret && i < ms->nr_mirrors; i++)
1449		ret = fn(ti, ms->mirror[i].dev,
1450			 ms->mirror[i].offset, ti->len, data);
1451
1452	return ret;
1453}
1454
1455static struct target_type mirror_target = {
1456	.name	 = "mirror",
1457	.version = {1, 14, 0},
1458	.module	 = THIS_MODULE,
1459	.ctr	 = mirror_ctr,
1460	.dtr	 = mirror_dtr,
1461	.map	 = mirror_map,
1462	.end_io	 = mirror_end_io,
1463	.presuspend = mirror_presuspend,
1464	.postsuspend = mirror_postsuspend,
1465	.resume	 = mirror_resume,
1466	.status	 = mirror_status,
1467	.iterate_devices = mirror_iterate_devices,
1468};
1469
1470static int __init dm_mirror_init(void)
1471{
1472	int r;
1473
1474	r = dm_register_target(&mirror_target);
1475	if (r < 0) {
1476		DMERR("Failed to register mirror target");
1477		goto bad_target;
1478	}
1479
1480	return 0;
1481
1482bad_target:
1483	return r;
1484}
1485
1486static void __exit dm_mirror_exit(void)
1487{
1488	dm_unregister_target(&mirror_target);
1489}
1490
1491/* Module hooks */
1492module_init(dm_mirror_init);
1493module_exit(dm_mirror_exit);
1494
1495MODULE_DESCRIPTION(DM_NAME " mirror target");
1496MODULE_AUTHOR("Joe Thornber");
1497MODULE_LICENSE("GPL");