Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * raid1.c : Multiple Devices driver for Linux
   4 *
   5 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
   6 *
   7 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   8 *
   9 * RAID-1 management functions.
  10 *
  11 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  12 *
  13 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  14 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  15 *
  16 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  17 * bitmapped intelligence in resync:
  18 *
  19 *      - bitmap marked during normal i/o
  20 *      - bitmap used to skip nondirty blocks during sync
  21 *
  22 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  23 * - persistent bitmap code
 
 
 
 
 
 
 
 
 
  24 */
  25
  26#include <linux/slab.h>
  27#include <linux/delay.h>
  28#include <linux/blkdev.h>
  29#include <linux/module.h>
  30#include <linux/seq_file.h>
  31#include <linux/ratelimit.h>
  32#include <linux/interval_tree_generic.h>
  33
  34#include <trace/events/block.h>
  35
  36#include "md.h"
  37#include "raid1.h"
  38#include "md-bitmap.h"
  39
  40#define UNSUPPORTED_MDDEV_FLAGS		\
  41	((1L << MD_HAS_JOURNAL) |	\
  42	 (1L << MD_JOURNAL_CLEAN) |	\
  43	 (1L << MD_HAS_PPL) |		\
  44	 (1L << MD_HAS_MULTIPLE_PPLS))
  45
  46static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
  47static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
  48
  49#define raid1_log(md, fmt, args...)				\
  50	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
  51
  52#include "raid1-10.c"
  53
  54#define START(node) ((node)->start)
  55#define LAST(node) ((node)->last)
  56INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
  57		     START, LAST, static inline, raid1_rb);
  58
  59static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
  60				struct serial_info *si, int idx)
  61{
  62	unsigned long flags;
  63	int ret = 0;
  64	sector_t lo = r1_bio->sector;
  65	sector_t hi = lo + r1_bio->sectors;
  66	struct serial_in_rdev *serial = &rdev->serial[idx];
  67
  68	spin_lock_irqsave(&serial->serial_lock, flags);
  69	/* collision happened */
  70	if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
  71		ret = -EBUSY;
  72	else {
  73		si->start = lo;
  74		si->last = hi;
  75		raid1_rb_insert(si, &serial->serial_rb);
  76	}
  77	spin_unlock_irqrestore(&serial->serial_lock, flags);
  78
  79	return ret;
  80}
 
 
 
 
 
 
 
 
 
  81
  82static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
  83{
  84	struct mddev *mddev = rdev->mddev;
  85	struct serial_info *si;
  86	int idx = sector_to_idx(r1_bio->sector);
  87	struct serial_in_rdev *serial = &rdev->serial[idx];
  88
  89	if (WARN_ON(!mddev->serial_info_pool))
  90		return;
  91	si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
  92	wait_event(serial->serial_io_wait,
  93		   check_and_add_serial(rdev, r1_bio, si, idx) == 0);
  94}
  95
  96static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
  97{
  98	struct serial_info *si;
  99	unsigned long flags;
 100	int found = 0;
 101	struct mddev *mddev = rdev->mddev;
 102	int idx = sector_to_idx(lo);
 103	struct serial_in_rdev *serial = &rdev->serial[idx];
 104
 105	spin_lock_irqsave(&serial->serial_lock, flags);
 106	for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
 107	     si; si = raid1_rb_iter_next(si, lo, hi)) {
 108		if (si->start == lo && si->last == hi) {
 109			raid1_rb_remove(si, &serial->serial_rb);
 110			mempool_free(si, mddev->serial_info_pool);
 111			found = 1;
 112			break;
 113		}
 114	}
 115	if (!found)
 116		WARN(1, "The write IO is not recorded for serialization\n");
 117	spin_unlock_irqrestore(&serial->serial_lock, flags);
 118	wake_up(&serial->serial_io_wait);
 119}
 120
 121/*
 122 * for resync bio, r1bio pointer can be retrieved from the per-bio
 123 * 'struct resync_pages'.
 124 */
 125static inline struct r1bio *get_resync_r1bio(struct bio *bio)
 126{
 127	return get_resync_pages(bio)->raid_bio;
 128}
 129
 130static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
 131{
 132	struct pool_info *pi = data;
 133	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
 134
 135	/* allocate a r1bio with room for raid_disks entries in the bios array */
 136	return kzalloc(size, gfp_flags);
 137}
 138
 
 
 
 
 
 
 139#define RESYNC_DEPTH 32
 140#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
 
 141#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
 142#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
 143#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
 144#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
 
 145
 146static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
 147{
 148	struct pool_info *pi = data;
 149	struct r1bio *r1_bio;
 150	struct bio *bio;
 151	int need_pages;
 152	int j;
 153	struct resync_pages *rps;
 154
 155	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
 156	if (!r1_bio)
 157		return NULL;
 158
 159	rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
 160			    gfp_flags);
 161	if (!rps)
 162		goto out_free_r1bio;
 163
 164	/*
 165	 * Allocate bios : 1 for reading, n-1 for writing
 166	 */
 167	for (j = pi->raid_disks ; j-- ; ) {
 168		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 169		if (!bio)
 170			goto out_free_bio;
 171		r1_bio->bios[j] = bio;
 172	}
 173	/*
 174	 * Allocate RESYNC_PAGES data pages and attach them to
 175	 * the first bio.
 176	 * If this is a user-requested check/repair, allocate
 177	 * RESYNC_PAGES for each bio.
 178	 */
 179	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
 180		need_pages = pi->raid_disks;
 181	else
 182		need_pages = 1;
 183	for (j = 0; j < pi->raid_disks; j++) {
 184		struct resync_pages *rp = &rps[j];
 185
 186		bio = r1_bio->bios[j];
 
 187
 188		if (j < need_pages) {
 189			if (resync_alloc_pages(rp, gfp_flags))
 190				goto out_free_pages;
 191		} else {
 192			memcpy(rp, &rps[0], sizeof(*rp));
 193			resync_get_all_pages(rp);
 194		}
 195
 196		rp->raid_bio = r1_bio;
 197		bio->bi_private = rp;
 198	}
 199
 200	r1_bio->master_bio = NULL;
 201
 202	return r1_bio;
 203
 204out_free_pages:
 205	while (--j >= 0)
 206		resync_free_pages(&rps[j]);
 207
 208out_free_bio:
 209	while (++j < pi->raid_disks)
 210		bio_put(r1_bio->bios[j]);
 211	kfree(rps);
 212
 213out_free_r1bio:
 214	rbio_pool_free(r1_bio, data);
 215	return NULL;
 216}
 217
 218static void r1buf_pool_free(void *__r1_bio, void *data)
 219{
 220	struct pool_info *pi = data;
 221	int i;
 222	struct r1bio *r1bio = __r1_bio;
 223	struct resync_pages *rp = NULL;
 224
 225	for (i = pi->raid_disks; i--; ) {
 226		rp = get_resync_pages(r1bio->bios[i]);
 227		resync_free_pages(rp);
 
 
 
 
 
 228		bio_put(r1bio->bios[i]);
 229	}
 230
 231	/* resync pages array stored in the 1st bio's .bi_private */
 232	kfree(rp);
 233
 234	rbio_pool_free(r1bio, data);
 235}
 236
 237static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
 238{
 239	int i;
 240
 241	for (i = 0; i < conf->raid_disks * 2; i++) {
 242		struct bio **bio = r1_bio->bios + i;
 243		if (!BIO_SPECIAL(*bio))
 244			bio_put(*bio);
 245		*bio = NULL;
 246	}
 247}
 248
 249static void free_r1bio(struct r1bio *r1_bio)
 250{
 251	struct r1conf *conf = r1_bio->mddev->private;
 252
 253	put_all_bios(conf, r1_bio);
 254	mempool_free(r1_bio, &conf->r1bio_pool);
 255}
 256
 257static void put_buf(struct r1bio *r1_bio)
 258{
 259	struct r1conf *conf = r1_bio->mddev->private;
 260	sector_t sect = r1_bio->sector;
 261	int i;
 262
 263	for (i = 0; i < conf->raid_disks * 2; i++) {
 264		struct bio *bio = r1_bio->bios[i];
 265		if (bio->bi_end_io)
 266			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
 267	}
 268
 269	mempool_free(r1_bio, &conf->r1buf_pool);
 270
 271	lower_barrier(conf, sect);
 272}
 273
 274static void reschedule_retry(struct r1bio *r1_bio)
 275{
 276	unsigned long flags;
 277	struct mddev *mddev = r1_bio->mddev;
 278	struct r1conf *conf = mddev->private;
 279	int idx;
 280
 281	idx = sector_to_idx(r1_bio->sector);
 282	spin_lock_irqsave(&conf->device_lock, flags);
 283	list_add(&r1_bio->retry_list, &conf->retry_list);
 284	atomic_inc(&conf->nr_queued[idx]);
 285	spin_unlock_irqrestore(&conf->device_lock, flags);
 286
 287	wake_up(&conf->wait_barrier);
 288	md_wakeup_thread(mddev->thread);
 289}
 290
 291/*
 292 * raid_end_bio_io() is called when we have finished servicing a mirrored
 293 * operation and are ready to return a success/failure code to the buffer
 294 * cache layer.
 295 */
 296static void call_bio_endio(struct r1bio *r1_bio)
 297{
 298	struct bio *bio = r1_bio->master_bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 299
 300	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
 301		bio->bi_status = BLK_STS_IOERR;
 302
 303	bio_endio(bio);
 
 
 
 
 
 
 
 304}
 305
 306static void raid_end_bio_io(struct r1bio *r1_bio)
 307{
 308	struct bio *bio = r1_bio->master_bio;
 309	struct r1conf *conf = r1_bio->mddev->private;
 310
 311	/* if nobody has done the final endio yet, do it now */
 312	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 313		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
 314			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
 315			 (unsigned long long) bio->bi_iter.bi_sector,
 316			 (unsigned long long) bio_end_sector(bio) - 1);
 317
 318		call_bio_endio(r1_bio);
 319	}
 320	/*
 321	 * Wake up any possible resync thread that waits for the device
 322	 * to go idle.  All I/Os, even write-behind writes, are done.
 323	 */
 324	allow_barrier(conf, r1_bio->sector);
 325
 326	free_r1bio(r1_bio);
 327}
 328
 329/*
 330 * Update disk head position estimator based on IRQ completion info.
 331 */
 332static inline void update_head_pos(int disk, struct r1bio *r1_bio)
 333{
 334	struct r1conf *conf = r1_bio->mddev->private;
 335
 336	conf->mirrors[disk].head_position =
 337		r1_bio->sector + (r1_bio->sectors);
 338}
 339
 340/*
 341 * Find the disk number which triggered given bio
 342 */
 343static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
 344{
 345	int mirror;
 346	struct r1conf *conf = r1_bio->mddev->private;
 347	int raid_disks = conf->raid_disks;
 348
 349	for (mirror = 0; mirror < raid_disks * 2; mirror++)
 350		if (r1_bio->bios[mirror] == bio)
 351			break;
 352
 353	BUG_ON(mirror == raid_disks * 2);
 354	update_head_pos(mirror, r1_bio);
 355
 356	return mirror;
 357}
 358
 359static void raid1_end_read_request(struct bio *bio)
 360{
 361	int uptodate = !bio->bi_status;
 362	struct r1bio *r1_bio = bio->bi_private;
 363	struct r1conf *conf = r1_bio->mddev->private;
 364	struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
 365
 366	/*
 367	 * this branch is our 'one mirror IO has finished' event handler:
 368	 */
 369	update_head_pos(r1_bio->read_disk, r1_bio);
 370
 371	if (uptodate)
 372		set_bit(R1BIO_Uptodate, &r1_bio->state);
 373	else if (test_bit(FailFast, &rdev->flags) &&
 374		 test_bit(R1BIO_FailFast, &r1_bio->state))
 375		/* This was a fail-fast read so we definitely
 376		 * want to retry */
 377		;
 378	else {
 379		/* If all other devices have failed, we want to return
 380		 * the error upwards rather than fail the last device.
 381		 * Here we redefine "uptodate" to mean "Don't want to retry"
 382		 */
 383		unsigned long flags;
 384		spin_lock_irqsave(&conf->device_lock, flags);
 385		if (r1_bio->mddev->degraded == conf->raid_disks ||
 386		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
 387		     test_bit(In_sync, &rdev->flags)))
 388			uptodate = 1;
 389		spin_unlock_irqrestore(&conf->device_lock, flags);
 390	}
 391
 392	if (uptodate) {
 393		raid_end_bio_io(r1_bio);
 394		rdev_dec_pending(rdev, conf->mddev);
 395	} else {
 396		/*
 397		 * oops, read error:
 398		 */
 399		char b[BDEVNAME_SIZE];
 400		pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
 401				   mdname(conf->mddev),
 402				   bdevname(rdev->bdev, b),
 403				   (unsigned long long)r1_bio->sector);
 404		set_bit(R1BIO_ReadError, &r1_bio->state);
 405		reschedule_retry(r1_bio);
 406		/* don't drop the reference on read_disk yet */
 407	}
 408}
 409
 410static void close_write(struct r1bio *r1_bio)
 411{
 412	/* it really is the end of this request */
 413	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
 414		bio_free_pages(r1_bio->behind_master_bio);
 415		bio_put(r1_bio->behind_master_bio);
 416		r1_bio->behind_master_bio = NULL;
 
 
 
 417	}
 418	/* clear the bitmap if all writes complete successfully */
 419	md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
 420			   r1_bio->sectors,
 421			   !test_bit(R1BIO_Degraded, &r1_bio->state),
 422			   test_bit(R1BIO_BehindIO, &r1_bio->state));
 423	md_write_end(r1_bio->mddev);
 424}
 425
 426static void r1_bio_write_done(struct r1bio *r1_bio)
 427{
 428	if (!atomic_dec_and_test(&r1_bio->remaining))
 429		return;
 430
 431	if (test_bit(R1BIO_WriteError, &r1_bio->state))
 432		reschedule_retry(r1_bio);
 433	else {
 434		close_write(r1_bio);
 435		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
 436			reschedule_retry(r1_bio);
 437		else
 438			raid_end_bio_io(r1_bio);
 439	}
 440}
 441
 442static void raid1_end_write_request(struct bio *bio)
 443{
 444	struct r1bio *r1_bio = bio->bi_private;
 445	int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
 446	struct r1conf *conf = r1_bio->mddev->private;
 447	struct bio *to_put = NULL;
 448	int mirror = find_bio_disk(r1_bio, bio);
 449	struct md_rdev *rdev = conf->mirrors[mirror].rdev;
 450	bool discard_error;
 451	sector_t lo = r1_bio->sector;
 452	sector_t hi = r1_bio->sector + r1_bio->sectors;
 453
 454	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 455
 456	/*
 457	 * 'one mirror IO has finished' event handler:
 458	 */
 459	if (bio->bi_status && !discard_error) {
 460		set_bit(WriteErrorSeen,	&rdev->flags);
 461		if (!test_and_set_bit(WantReplacement, &rdev->flags))
 462			set_bit(MD_RECOVERY_NEEDED, &
 463				conf->mddev->recovery);
 464
 465		if (test_bit(FailFast, &rdev->flags) &&
 466		    (bio->bi_opf & MD_FAILFAST) &&
 467		    /* We never try FailFast to WriteMostly devices */
 468		    !test_bit(WriteMostly, &rdev->flags)) {
 469			md_error(r1_bio->mddev, rdev);
 470		}
 471
 472		/*
 473		 * When the device is faulty, it is not necessary to
 474		 * handle write error.
 475		 * For failfast, this is the only remaining device,
 476		 * We need to retry the write without FailFast.
 477		 */
 478		if (!test_bit(Faulty, &rdev->flags))
 
 
 
 479			set_bit(R1BIO_WriteError, &r1_bio->state);
 480		else {
 481			/* Finished with this branch */
 482			r1_bio->bios[mirror] = NULL;
 483			to_put = bio;
 484		}
 485	} else {
 486		/*
 487		 * Set R1BIO_Uptodate in our master bio, so that we
 488		 * will return a good error code for to the higher
 489		 * levels even if IO on some other mirrored buffer
 490		 * fails.
 491		 *
 492		 * The 'master' represents the composite IO operation
 493		 * to user-side. So if something waits for IO, then it
 494		 * will wait for the 'master' bio.
 495		 */
 496		sector_t first_bad;
 497		int bad_sectors;
 498
 499		r1_bio->bios[mirror] = NULL;
 500		to_put = bio;
 501		/*
 502		 * Do not set R1BIO_Uptodate if the current device is
 503		 * rebuilding or Faulty. This is because we cannot use
 504		 * such device for properly reading the data back (we could
 505		 * potentially use it, if the current write would have felt
 506		 * before rdev->recovery_offset, but for simplicity we don't
 507		 * check this here.
 508		 */
 509		if (test_bit(In_sync, &rdev->flags) &&
 510		    !test_bit(Faulty, &rdev->flags))
 511			set_bit(R1BIO_Uptodate, &r1_bio->state);
 512
 513		/* Maybe we can clear some bad blocks. */
 514		if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
 515				&first_bad, &bad_sectors) && !discard_error) {
 516			r1_bio->bios[mirror] = IO_MADE_GOOD;
 517			set_bit(R1BIO_MadeGood, &r1_bio->state);
 518		}
 519	}
 520
 521	if (behind) {
 522		if (test_bit(CollisionCheck, &rdev->flags))
 523			remove_serial(rdev, lo, hi);
 524		if (test_bit(WriteMostly, &rdev->flags))
 525			atomic_dec(&r1_bio->behind_remaining);
 526
 527		/*
 528		 * In behind mode, we ACK the master bio once the I/O
 529		 * has safely reached all non-writemostly
 530		 * disks. Setting the Returned bit ensures that this
 531		 * gets done only once -- we don't ever want to return
 532		 * -EIO here, instead we'll wait
 533		 */
 534		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
 535		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
 536			/* Maybe we can return now */
 537			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 538				struct bio *mbio = r1_bio->master_bio;
 539				pr_debug("raid1: behind end write sectors"
 540					 " %llu-%llu\n",
 541					 (unsigned long long) mbio->bi_iter.bi_sector,
 542					 (unsigned long long) bio_end_sector(mbio) - 1);
 543				call_bio_endio(r1_bio);
 544			}
 545		}
 546	} else if (rdev->mddev->serialize_policy)
 547		remove_serial(rdev, lo, hi);
 548	if (r1_bio->bios[mirror] == NULL)
 549		rdev_dec_pending(rdev, conf->mddev);
 550
 551	/*
 552	 * Let's see if all mirrored write operations have finished
 553	 * already.
 554	 */
 555	r1_bio_write_done(r1_bio);
 556
 557	if (to_put)
 558		bio_put(to_put);
 559}
 560
 561static sector_t align_to_barrier_unit_end(sector_t start_sector,
 562					  sector_t sectors)
 563{
 564	sector_t len;
 565
 566	WARN_ON(sectors == 0);
 567	/*
 568	 * len is the number of sectors from start_sector to end of the
 569	 * barrier unit which start_sector belongs to.
 570	 */
 571	len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
 572	      start_sector;
 573
 574	if (len > sectors)
 575		len = sectors;
 576
 577	return len;
 578}
 579
 580/*
 581 * This routine returns the disk from which the requested read should
 582 * be done. There is a per-array 'next expected sequential IO' sector
 583 * number - if this matches on the next IO then we use the last disk.
 584 * There is also a per-disk 'last know head position' sector that is
 585 * maintained from IRQ contexts, both the normal and the resync IO
 586 * completion handlers update this position correctly. If there is no
 587 * perfect sequential match then we pick the disk whose head is closest.
 588 *
 589 * If there are 2 mirrors in the same 2 devices, performance degrades
 590 * because position is mirror, not device based.
 591 *
 592 * The rdev for the device selected will have nr_pending incremented.
 593 */
 594static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
 595{
 596	const sector_t this_sector = r1_bio->sector;
 597	int sectors;
 598	int best_good_sectors;
 599	int best_disk, best_dist_disk, best_pending_disk;
 600	int has_nonrot_disk;
 601	int disk;
 602	sector_t best_dist;
 603	unsigned int min_pending;
 604	struct md_rdev *rdev;
 605	int choose_first;
 606	int choose_next_idle;
 607
 608	rcu_read_lock();
 609	/*
 610	 * Check if we can balance. We can balance on the whole
 611	 * device if no resync is going on, or below the resync window.
 612	 * We take the first readable disk when above the resync window.
 613	 */
 614 retry:
 615	sectors = r1_bio->sectors;
 616	best_disk = -1;
 617	best_dist_disk = -1;
 618	best_dist = MaxSector;
 619	best_pending_disk = -1;
 620	min_pending = UINT_MAX;
 621	best_good_sectors = 0;
 622	has_nonrot_disk = 0;
 623	choose_next_idle = 0;
 624	clear_bit(R1BIO_FailFast, &r1_bio->state);
 625
 626	if ((conf->mddev->recovery_cp < this_sector + sectors) ||
 627	    (mddev_is_clustered(conf->mddev) &&
 628	    md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
 629		    this_sector + sectors)))
 630		choose_first = 1;
 631	else
 632		choose_first = 0;
 633
 634	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
 635		sector_t dist;
 636		sector_t first_bad;
 637		int bad_sectors;
 638		unsigned int pending;
 639		bool nonrot;
 640
 641		rdev = rcu_dereference(conf->mirrors[disk].rdev);
 642		if (r1_bio->bios[disk] == IO_BLOCKED
 643		    || rdev == NULL
 644		    || test_bit(Faulty, &rdev->flags))
 645			continue;
 646		if (!test_bit(In_sync, &rdev->flags) &&
 647		    rdev->recovery_offset < this_sector + sectors)
 648			continue;
 649		if (test_bit(WriteMostly, &rdev->flags)) {
 650			/* Don't balance among write-mostly, just
 651			 * use the first as a last resort */
 652			if (best_dist_disk < 0) {
 653				if (is_badblock(rdev, this_sector, sectors,
 654						&first_bad, &bad_sectors)) {
 655					if (first_bad <= this_sector)
 656						/* Cannot use this */
 657						continue;
 658					best_good_sectors = first_bad - this_sector;
 659				} else
 660					best_good_sectors = sectors;
 661				best_dist_disk = disk;
 662				best_pending_disk = disk;
 663			}
 664			continue;
 665		}
 666		/* This is a reasonable device to use.  It might
 667		 * even be best.
 668		 */
 669		if (is_badblock(rdev, this_sector, sectors,
 670				&first_bad, &bad_sectors)) {
 671			if (best_dist < MaxSector)
 672				/* already have a better device */
 673				continue;
 674			if (first_bad <= this_sector) {
 675				/* cannot read here. If this is the 'primary'
 676				 * device, then we must not read beyond
 677				 * bad_sectors from another device..
 678				 */
 679				bad_sectors -= (this_sector - first_bad);
 680				if (choose_first && sectors > bad_sectors)
 681					sectors = bad_sectors;
 682				if (best_good_sectors > sectors)
 683					best_good_sectors = sectors;
 684
 685			} else {
 686				sector_t good_sectors = first_bad - this_sector;
 687				if (good_sectors > best_good_sectors) {
 688					best_good_sectors = good_sectors;
 689					best_disk = disk;
 690				}
 691				if (choose_first)
 692					break;
 693			}
 694			continue;
 695		} else {
 696			if ((sectors > best_good_sectors) && (best_disk >= 0))
 697				best_disk = -1;
 698			best_good_sectors = sectors;
 699		}
 700
 701		if (best_disk >= 0)
 702			/* At least two disks to choose from so failfast is OK */
 703			set_bit(R1BIO_FailFast, &r1_bio->state);
 704
 705		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
 706		has_nonrot_disk |= nonrot;
 707		pending = atomic_read(&rdev->nr_pending);
 708		dist = abs(this_sector - conf->mirrors[disk].head_position);
 709		if (choose_first) {
 710			best_disk = disk;
 711			break;
 712		}
 713		/* Don't change to another disk for sequential reads */
 714		if (conf->mirrors[disk].next_seq_sect == this_sector
 715		    || dist == 0) {
 716			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
 717			struct raid1_info *mirror = &conf->mirrors[disk];
 718
 719			best_disk = disk;
 720			/*
 721			 * If buffered sequential IO size exceeds optimal
 722			 * iosize, check if there is idle disk. If yes, choose
 723			 * the idle disk. read_balance could already choose an
 724			 * idle disk before noticing it's a sequential IO in
 725			 * this disk. This doesn't matter because this disk
 726			 * will idle, next time it will be utilized after the
 727			 * first disk has IO size exceeds optimal iosize. In
 728			 * this way, iosize of the first disk will be optimal
 729			 * iosize at least. iosize of the second disk might be
 730			 * small, but not a big deal since when the second disk
 731			 * starts IO, the first disk is likely still busy.
 732			 */
 733			if (nonrot && opt_iosize > 0 &&
 734			    mirror->seq_start != MaxSector &&
 735			    mirror->next_seq_sect > opt_iosize &&
 736			    mirror->next_seq_sect - opt_iosize >=
 737			    mirror->seq_start) {
 738				choose_next_idle = 1;
 739				continue;
 740			}
 741			break;
 742		}
 743
 744		if (choose_next_idle)
 745			continue;
 746
 747		if (min_pending > pending) {
 748			min_pending = pending;
 749			best_pending_disk = disk;
 750		}
 751
 752		if (dist < best_dist) {
 753			best_dist = dist;
 754			best_dist_disk = disk;
 755		}
 756	}
 757
 758	/*
 759	 * If all disks are rotational, choose the closest disk. If any disk is
 760	 * non-rotational, choose the disk with less pending request even the
 761	 * disk is rotational, which might/might not be optimal for raids with
 762	 * mixed ratation/non-rotational disks depending on workload.
 763	 */
 764	if (best_disk == -1) {
 765		if (has_nonrot_disk || min_pending == 0)
 766			best_disk = best_pending_disk;
 767		else
 768			best_disk = best_dist_disk;
 769	}
 770
 771	if (best_disk >= 0) {
 772		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
 773		if (!rdev)
 774			goto retry;
 775		atomic_inc(&rdev->nr_pending);
 776		sectors = best_good_sectors;
 777
 778		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
 779			conf->mirrors[best_disk].seq_start = this_sector;
 780
 781		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
 782	}
 783	rcu_read_unlock();
 784	*max_sectors = sectors;
 785
 786	return best_disk;
 787}
 788
 789static void flush_bio_list(struct r1conf *conf, struct bio *bio)
 790{
 791	/* flush any pending bitmap writes to disk before proceeding w/ I/O */
 792	md_bitmap_unplug(conf->mddev->bitmap);
 793	wake_up(&conf->wait_barrier);
 794
 795	while (bio) { /* submit pending writes */
 796		struct bio *next = bio->bi_next;
 797		struct md_rdev *rdev = (void *)bio->bi_disk;
 798		bio->bi_next = NULL;
 799		bio_set_dev(bio, rdev->bdev);
 800		if (test_bit(Faulty, &rdev->flags)) {
 801			bio_io_error(bio);
 802		} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
 803				    !blk_queue_discard(bio->bi_disk->queue)))
 804			/* Just ignore it */
 805			bio_endio(bio);
 806		else
 807			submit_bio_noacct(bio);
 808		bio = next;
 809		cond_resched();
 
 
 
 
 
 810	}
 
 
 811}
 812
 813static void flush_pending_writes(struct r1conf *conf)
 814{
 815	/* Any writes that have been queued but are awaiting
 816	 * bitmap updates get flushed here.
 817	 */
 818	spin_lock_irq(&conf->device_lock);
 819
 820	if (conf->pending_bio_list.head) {
 821		struct blk_plug plug;
 822		struct bio *bio;
 823
 824		bio = bio_list_get(&conf->pending_bio_list);
 825		conf->pending_count = 0;
 826		spin_unlock_irq(&conf->device_lock);
 
 
 
 
 827
 828		/*
 829		 * As this is called in a wait_event() loop (see freeze_array),
 830		 * current->state might be TASK_UNINTERRUPTIBLE which will
 831		 * cause a warning when we prepare to wait again.  As it is
 832		 * rare that this path is taken, it is perfectly safe to force
 833		 * us to go around the wait_event() loop again, so the warning
 834		 * is a false-positive.  Silence the warning by resetting
 835		 * thread state
 836		 */
 837		__set_current_state(TASK_RUNNING);
 838		blk_start_plug(&plug);
 839		flush_bio_list(conf, bio);
 840		blk_finish_plug(&plug);
 
 
 
 841	} else
 842		spin_unlock_irq(&conf->device_lock);
 843}
 844
 845/* Barriers....
 846 * Sometimes we need to suspend IO while we do something else,
 847 * either some resync/recovery, or reconfigure the array.
 848 * To do this we raise a 'barrier'.
 849 * The 'barrier' is a counter that can be raised multiple times
 850 * to count how many activities are happening which preclude
 851 * normal IO.
 852 * We can only raise the barrier if there is no pending IO.
 853 * i.e. if nr_pending == 0.
 854 * We choose only to raise the barrier if no-one is waiting for the
 855 * barrier to go down.  This means that as soon as an IO request
 856 * is ready, no other operations which require a barrier will start
 857 * until the IO request has had a chance.
 858 *
 859 * So: regular IO calls 'wait_barrier'.  When that returns there
 860 *    is no backgroup IO happening,  It must arrange to call
 861 *    allow_barrier when it has finished its IO.
 862 * backgroup IO calls must call raise_barrier.  Once that returns
 863 *    there is no normal IO happeing.  It must arrange to call
 864 *    lower_barrier when the particular background IO completes.
 865 *
 866 * If resync/recovery is interrupted, returns -EINTR;
 867 * Otherwise, returns 0.
 868 */
 869static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
 870{
 871	int idx = sector_to_idx(sector_nr);
 872
 873	spin_lock_irq(&conf->resync_lock);
 874
 875	/* Wait until no block IO is waiting */
 876	wait_event_lock_irq(conf->wait_barrier,
 877			    !atomic_read(&conf->nr_waiting[idx]),
 878			    conf->resync_lock);
 879
 880	/* block any new IO from starting */
 881	atomic_inc(&conf->barrier[idx]);
 882	/*
 883	 * In raise_barrier() we firstly increase conf->barrier[idx] then
 884	 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
 885	 * increase conf->nr_pending[idx] then check conf->barrier[idx].
 886	 * A memory barrier here to make sure conf->nr_pending[idx] won't
 887	 * be fetched before conf->barrier[idx] is increased. Otherwise
 888	 * there will be a race between raise_barrier() and _wait_barrier().
 889	 */
 890	smp_mb__after_atomic();
 891
 892	/* For these conditions we must wait:
 893	 * A: while the array is in frozen state
 894	 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
 895	 *    existing in corresponding I/O barrier bucket.
 896	 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
 897	 *    max resync count which allowed on current I/O barrier bucket.
 
 
 898	 */
 899	wait_event_lock_irq(conf->wait_barrier,
 900			    (!conf->array_frozen &&
 901			     !atomic_read(&conf->nr_pending[idx]) &&
 902			     atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
 903				test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
 
 904			    conf->resync_lock);
 905
 906	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
 907		atomic_dec(&conf->barrier[idx]);
 908		spin_unlock_irq(&conf->resync_lock);
 909		wake_up(&conf->wait_barrier);
 910		return -EINTR;
 911	}
 912
 913	atomic_inc(&conf->nr_sync_pending);
 914	spin_unlock_irq(&conf->resync_lock);
 915
 916	return 0;
 917}
 918
 919static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
 920{
 921	int idx = sector_to_idx(sector_nr);
 922
 923	BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
 924
 925	atomic_dec(&conf->barrier[idx]);
 926	atomic_dec(&conf->nr_sync_pending);
 927	wake_up(&conf->wait_barrier);
 928}
 929
 930static void _wait_barrier(struct r1conf *conf, int idx)
 931{
 932	/*
 933	 * We need to increase conf->nr_pending[idx] very early here,
 934	 * then raise_barrier() can be blocked when it waits for
 935	 * conf->nr_pending[idx] to be 0. Then we can avoid holding
 936	 * conf->resync_lock when there is no barrier raised in same
 937	 * barrier unit bucket. Also if the array is frozen, I/O
 938	 * should be blocked until array is unfrozen.
 939	 */
 940	atomic_inc(&conf->nr_pending[idx]);
 941	/*
 942	 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
 943	 * check conf->barrier[idx]. In raise_barrier() we firstly increase
 944	 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
 945	 * barrier is necessary here to make sure conf->barrier[idx] won't be
 946	 * fetched before conf->nr_pending[idx] is increased. Otherwise there
 947	 * will be a race between _wait_barrier() and raise_barrier().
 948	 */
 949	smp_mb__after_atomic();
 950
 951	/*
 952	 * Don't worry about checking two atomic_t variables at same time
 953	 * here. If during we check conf->barrier[idx], the array is
 954	 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
 955	 * 0, it is safe to return and make the I/O continue. Because the
 956	 * array is frozen, all I/O returned here will eventually complete
 957	 * or be queued, no race will happen. See code comment in
 958	 * frozen_array().
 959	 */
 960	if (!READ_ONCE(conf->array_frozen) &&
 961	    !atomic_read(&conf->barrier[idx]))
 962		return;
 963
 964	/*
 965	 * After holding conf->resync_lock, conf->nr_pending[idx]
 966	 * should be decreased before waiting for barrier to drop.
 967	 * Otherwise, we may encounter a race condition because
 968	 * raise_barrer() might be waiting for conf->nr_pending[idx]
 969	 * to be 0 at same time.
 970	 */
 971	spin_lock_irq(&conf->resync_lock);
 972	atomic_inc(&conf->nr_waiting[idx]);
 973	atomic_dec(&conf->nr_pending[idx]);
 974	/*
 975	 * In case freeze_array() is waiting for
 976	 * get_unqueued_pending() == extra
 977	 */
 978	wake_up(&conf->wait_barrier);
 979	/* Wait for the barrier in same barrier unit bucket to drop. */
 980	wait_event_lock_irq(conf->wait_barrier,
 981			    !conf->array_frozen &&
 982			     !atomic_read(&conf->barrier[idx]),
 983			    conf->resync_lock);
 984	atomic_inc(&conf->nr_pending[idx]);
 985	atomic_dec(&conf->nr_waiting[idx]);
 986	spin_unlock_irq(&conf->resync_lock);
 987}
 988
 989static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
 990{
 991	int idx = sector_to_idx(sector_nr);
 992
 993	/*
 994	 * Very similar to _wait_barrier(). The difference is, for read
 995	 * I/O we don't need wait for sync I/O, but if the whole array
 996	 * is frozen, the read I/O still has to wait until the array is
 997	 * unfrozen. Since there is no ordering requirement with
 998	 * conf->barrier[idx] here, memory barrier is unnecessary as well.
 999	 */
1000	atomic_inc(&conf->nr_pending[idx]);
1001
1002	if (!READ_ONCE(conf->array_frozen))
1003		return;
1004
1005	spin_lock_irq(&conf->resync_lock);
1006	atomic_inc(&conf->nr_waiting[idx]);
1007	atomic_dec(&conf->nr_pending[idx]);
1008	/*
1009	 * In case freeze_array() is waiting for
1010	 * get_unqueued_pending() == extra
1011	 */
1012	wake_up(&conf->wait_barrier);
1013	/* Wait for array to be unfrozen */
1014	wait_event_lock_irq(conf->wait_barrier,
1015			    !conf->array_frozen,
1016			    conf->resync_lock);
1017	atomic_inc(&conf->nr_pending[idx]);
1018	atomic_dec(&conf->nr_waiting[idx]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1019	spin_unlock_irq(&conf->resync_lock);
 
1020}
1021
1022static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
 
1023{
1024	int idx = sector_to_idx(sector_nr);
1025
1026	_wait_barrier(conf, idx);
1027}
 
 
 
 
 
 
 
 
 
1028
1029static void _allow_barrier(struct r1conf *conf, int idx)
1030{
1031	atomic_dec(&conf->nr_pending[idx]);
 
 
 
 
 
 
 
 
 
1032	wake_up(&conf->wait_barrier);
1033}
1034
1035static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1036{
1037	int idx = sector_to_idx(sector_nr);
1038
1039	_allow_barrier(conf, idx);
1040}
1041
1042/* conf->resync_lock should be held */
1043static int get_unqueued_pending(struct r1conf *conf)
1044{
1045	int idx, ret;
1046
1047	ret = atomic_read(&conf->nr_sync_pending);
1048	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1049		ret += atomic_read(&conf->nr_pending[idx]) -
1050			atomic_read(&conf->nr_queued[idx]);
1051
1052	return ret;
1053}
1054
1055static void freeze_array(struct r1conf *conf, int extra)
1056{
1057	/* Stop sync I/O and normal I/O and wait for everything to
1058	 * go quiet.
1059	 * This is called in two situations:
1060	 * 1) management command handlers (reshape, remove disk, quiesce).
1061	 * 2) one normal I/O request failed.
1062
1063	 * After array_frozen is set to 1, new sync IO will be blocked at
1064	 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1065	 * or wait_read_barrier(). The flying I/Os will either complete or be
1066	 * queued. When everything goes quite, there are only queued I/Os left.
1067
1068	 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1069	 * barrier bucket index which this I/O request hits. When all sync and
1070	 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1071	 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1072	 * in handle_read_error(), we may call freeze_array() before trying to
1073	 * fix the read error. In this case, the error read I/O is not queued,
1074	 * so get_unqueued_pending() == 1.
1075	 *
1076	 * Therefore before this function returns, we need to wait until
1077	 * get_unqueued_pendings(conf) gets equal to extra. For
1078	 * normal I/O context, extra is 1, in rested situations extra is 0.
1079	 */
1080	spin_lock_irq(&conf->resync_lock);
1081	conf->array_frozen = 1;
1082	raid1_log(conf->mddev, "wait freeze");
1083	wait_event_lock_irq_cmd(
1084		conf->wait_barrier,
1085		get_unqueued_pending(conf) == extra,
1086		conf->resync_lock,
1087		flush_pending_writes(conf));
1088	spin_unlock_irq(&conf->resync_lock);
1089}
1090static void unfreeze_array(struct r1conf *conf)
1091{
1092	/* reverse the effect of the freeze */
1093	spin_lock_irq(&conf->resync_lock);
1094	conf->array_frozen = 0;
1095	spin_unlock_irq(&conf->resync_lock);
1096	wake_up(&conf->wait_barrier);
 
1097}
1098
1099static void alloc_behind_master_bio(struct r1bio *r1_bio,
1100					   struct bio *bio)
 
1101{
1102	int size = bio->bi_iter.bi_size;
1103	unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104	int i = 0;
1105	struct bio *behind_bio = NULL;
1106
1107	behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1108	if (!behind_bio)
1109		return;
1110
1111	/* discard op, we don't support writezero/writesame yet */
1112	if (!bio_has_data(bio)) {
1113		behind_bio->bi_iter.bi_size = size;
1114		goto skip_copy;
1115	}
1116
1117	behind_bio->bi_write_hint = bio->bi_write_hint;
1118
1119	while (i < vcnt && size) {
1120		struct page *page;
1121		int len = min_t(int, PAGE_SIZE, size);
1122
1123		page = alloc_page(GFP_NOIO);
1124		if (unlikely(!page))
1125			goto free_pages;
1126
1127		bio_add_page(behind_bio, page, len, 0);
1128
1129		size -= len;
1130		i++;
1131	}
1132
1133	bio_copy_data(behind_bio, bio);
1134skip_copy:
1135	r1_bio->behind_master_bio = behind_bio;
1136	set_bit(R1BIO_BehindIO, &r1_bio->state);
1137
1138	return;
1139
1140free_pages:
 
 
 
 
1141	pr_debug("%dB behind alloc failed, doing sync I/O\n",
1142		 bio->bi_iter.bi_size);
1143	bio_free_pages(behind_bio);
1144	bio_put(behind_bio);
1145}
1146
1147struct raid1_plug_cb {
1148	struct blk_plug_cb	cb;
1149	struct bio_list		pending;
1150	int			pending_cnt;
1151};
1152
1153static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1154{
1155	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1156						  cb);
1157	struct mddev *mddev = plug->cb.data;
1158	struct r1conf *conf = mddev->private;
1159	struct bio *bio;
1160
1161	if (from_schedule || current->bio_list) {
1162		spin_lock_irq(&conf->device_lock);
1163		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1164		conf->pending_count += plug->pending_cnt;
1165		spin_unlock_irq(&conf->device_lock);
1166		wake_up(&conf->wait_barrier);
1167		md_wakeup_thread(mddev->thread);
1168		kfree(plug);
1169		return;
1170	}
1171
1172	/* we aren't scheduling, so we can do the write-out directly. */
1173	bio = bio_list_get(&plug->pending);
1174	flush_bio_list(conf, bio);
1175	kfree(plug);
1176}
1177
1178static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1179{
1180	r1_bio->master_bio = bio;
1181	r1_bio->sectors = bio_sectors(bio);
1182	r1_bio->state = 0;
1183	r1_bio->mddev = mddev;
1184	r1_bio->sector = bio->bi_iter.bi_sector;
1185}
1186
1187static inline struct r1bio *
1188alloc_r1bio(struct mddev *mddev, struct bio *bio)
1189{
1190	struct r1conf *conf = mddev->private;
1191	struct r1bio *r1_bio;
1192
1193	r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1194	/* Ensure no bio records IO_BLOCKED */
1195	memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1196	init_r1bio(r1_bio, mddev, bio);
1197	return r1_bio;
 
 
 
 
 
 
 
 
 
 
 
 
1198}
1199
1200static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1201			       int max_read_sectors, struct r1bio *r1_bio)
1202{
1203	struct r1conf *conf = mddev->private;
1204	struct raid1_info *mirror;
1205	struct bio *read_bio;
1206	struct bitmap *bitmap = mddev->bitmap;
1207	const int op = bio_op(bio);
1208	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
 
1209	int max_sectors;
1210	int rdisk;
1211	bool print_msg = !!r1_bio;
1212	char b[BDEVNAME_SIZE];
1213
1214	/*
1215	 * If r1_bio is set, we are blocking the raid1d thread
1216	 * so there is a tiny risk of deadlock.  So ask for
1217	 * emergency memory if needed.
1218	 */
1219	gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1220
1221	if (print_msg) {
1222		/* Need to get the block device name carefully */
1223		struct md_rdev *rdev;
1224		rcu_read_lock();
1225		rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1226		if (rdev)
1227			bdevname(rdev->bdev, b);
1228		else
1229			strcpy(b, "???");
1230		rcu_read_unlock();
1231	}
1232
1233	/*
1234	 * Still need barrier for READ in case that whole
1235	 * array is frozen.
1236	 */
1237	wait_read_barrier(conf, bio->bi_iter.bi_sector);
1238
1239	if (!r1_bio)
1240		r1_bio = alloc_r1bio(mddev, bio);
1241	else
1242		init_r1bio(r1_bio, mddev, bio);
1243	r1_bio->sectors = max_read_sectors;
1244
1245	/*
1246	 * make_request() can abort the operation when read-ahead is being
1247	 * used and no empty request is available.
1248	 */
1249	rdisk = read_balance(conf, r1_bio, &max_sectors);
1250
1251	if (rdisk < 0) {
1252		/* couldn't find anywhere to read from */
1253		if (print_msg) {
1254			pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1255					    mdname(mddev),
1256					    b,
1257					    (unsigned long long)r1_bio->sector);
1258		}
1259		raid_end_bio_io(r1_bio);
1260		return;
1261	}
1262	mirror = conf->mirrors + rdisk;
1263
1264	if (print_msg)
1265		pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1266				    mdname(mddev),
1267				    (unsigned long long)r1_bio->sector,
1268				    bdevname(mirror->rdev->bdev, b));
1269
1270	if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1271	    bitmap) {
1272		/*
1273		 * Reading from a write-mostly device must take care not to
1274		 * over-take any writes that are 'behind'
1275		 */
1276		raid1_log(mddev, "wait behind writes");
1277		wait_event(bitmap->behind_wait,
1278			   atomic_read(&bitmap->behind_writes) == 0);
1279	}
1280
1281	if (max_sectors < bio_sectors(bio)) {
1282		struct bio *split = bio_split(bio, max_sectors,
1283					      gfp, &conf->bio_split);
1284		bio_chain(split, bio);
1285		submit_bio_noacct(bio);
1286		bio = split;
1287		r1_bio->master_bio = bio;
1288		r1_bio->sectors = max_sectors;
1289	}
1290
1291	r1_bio->read_disk = rdisk;
 
1292
1293	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
 
 
1294
1295	r1_bio->bios[rdisk] = read_bio;
1296
1297	read_bio->bi_iter.bi_sector = r1_bio->sector +
1298		mirror->rdev->data_offset;
1299	bio_set_dev(read_bio, mirror->rdev->bdev);
1300	read_bio->bi_end_io = raid1_end_read_request;
1301	bio_set_op_attrs(read_bio, op, do_sync);
1302	if (test_bit(FailFast, &mirror->rdev->flags) &&
1303	    test_bit(R1BIO_FailFast, &r1_bio->state))
1304	        read_bio->bi_opf |= MD_FAILFAST;
1305	read_bio->bi_private = r1_bio;
1306
1307	if (mddev->gendisk)
1308	        trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
1309				disk_devt(mddev->gendisk), r1_bio->sector);
 
1310
1311	submit_bio_noacct(read_bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1312}
1313
1314static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1315				int max_write_sectors)
1316{
1317	struct r1conf *conf = mddev->private;
1318	struct r1bio *r1_bio;
1319	int i, disks;
1320	struct bitmap *bitmap = mddev->bitmap;
1321	unsigned long flags;
 
 
 
 
1322	struct md_rdev *blocked_rdev;
1323	struct blk_plug_cb *cb;
1324	struct raid1_plug_cb *plug = NULL;
1325	int first_clone;
 
1326	int max_sectors;
 
1327
1328	if (mddev_is_clustered(mddev) &&
 
 
 
 
 
 
 
 
 
 
1329	     md_cluster_ops->area_resyncing(mddev, WRITE,
1330		     bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1331
 
 
 
 
1332		DEFINE_WAIT(w);
1333		for (;;) {
 
1334			prepare_to_wait(&conf->wait_barrier,
1335					&w, TASK_IDLE);
1336			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1337							bio->bi_iter.bi_sector,
1338							bio_end_sector(bio)))
 
 
 
1339				break;
1340			schedule();
1341		}
1342		finish_wait(&conf->wait_barrier, &w);
1343	}
1344
1345	/*
1346	 * Register the new request and wait if the reconstruction
1347	 * thread has put up a bar for new requests.
1348	 * Continue immediately if no resync is active currently.
1349	 */
1350	wait_barrier(conf, bio->bi_iter.bi_sector);
1351
1352	r1_bio = alloc_r1bio(mddev, bio);
1353	r1_bio->sectors = max_write_sectors;
1354
1355	if (conf->pending_count >= max_queued_requests) {
1356		md_wakeup_thread(mddev->thread);
1357		raid1_log(mddev, "wait queued");
1358		wait_event(conf->wait_barrier,
1359			   conf->pending_count < max_queued_requests);
1360	}
1361	/* first select target devices under rcu_lock and
1362	 * inc refcount on their rdev.  Record them by setting
1363	 * bios[x] to bio
1364	 * If there are known/acknowledged bad blocks on any device on
1365	 * which we have seen a write error, we want to avoid writing those
1366	 * blocks.
1367	 * This potentially requires several writes to write around
1368	 * the bad blocks.  Each set of writes gets it's own r1bio
1369	 * with a set of bios attached.
1370	 */
1371
1372	disks = conf->raid_disks * 2;
1373 retry_write:
 
1374	blocked_rdev = NULL;
1375	rcu_read_lock();
1376	max_sectors = r1_bio->sectors;
1377	for (i = 0;  i < disks; i++) {
1378		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1379		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1380			atomic_inc(&rdev->nr_pending);
1381			blocked_rdev = rdev;
1382			break;
1383		}
1384		r1_bio->bios[i] = NULL;
1385		if (!rdev || test_bit(Faulty, &rdev->flags)) {
1386			if (i < conf->raid_disks)
1387				set_bit(R1BIO_Degraded, &r1_bio->state);
1388			continue;
1389		}
1390
1391		atomic_inc(&rdev->nr_pending);
1392		if (test_bit(WriteErrorSeen, &rdev->flags)) {
1393			sector_t first_bad;
1394			int bad_sectors;
1395			int is_bad;
1396
1397			is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1398					     &first_bad, &bad_sectors);
1399			if (is_bad < 0) {
1400				/* mustn't write here until the bad block is
1401				 * acknowledged*/
1402				set_bit(BlockedBadBlocks, &rdev->flags);
1403				blocked_rdev = rdev;
1404				break;
1405			}
1406			if (is_bad && first_bad <= r1_bio->sector) {
1407				/* Cannot write here at all */
1408				bad_sectors -= (r1_bio->sector - first_bad);
1409				if (bad_sectors < max_sectors)
1410					/* mustn't write more than bad_sectors
1411					 * to other devices yet
1412					 */
1413					max_sectors = bad_sectors;
1414				rdev_dec_pending(rdev, mddev);
1415				/* We don't set R1BIO_Degraded as that
1416				 * only applies if the disk is
1417				 * missing, so it might be re-added,
1418				 * and we want to know to recover this
1419				 * chunk.
1420				 * In this case the device is here,
1421				 * and the fact that this chunk is not
1422				 * in-sync is recorded in the bad
1423				 * block log
1424				 */
1425				continue;
1426			}
1427			if (is_bad) {
1428				int good_sectors = first_bad - r1_bio->sector;
1429				if (good_sectors < max_sectors)
1430					max_sectors = good_sectors;
1431			}
1432		}
1433		r1_bio->bios[i] = bio;
1434	}
1435	rcu_read_unlock();
1436
1437	if (unlikely(blocked_rdev)) {
1438		/* Wait for this device to become unblocked */
1439		int j;
 
1440
1441		for (j = 0; j < i; j++)
1442			if (r1_bio->bios[j])
1443				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1444		r1_bio->state = 0;
1445		allow_barrier(conf, bio->bi_iter.bi_sector);
1446		raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1447		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1448		wait_barrier(conf, bio->bi_iter.bi_sector);
 
 
 
 
 
 
 
 
 
1449		goto retry_write;
1450	}
1451
1452	if (max_sectors < bio_sectors(bio)) {
1453		struct bio *split = bio_split(bio, max_sectors,
1454					      GFP_NOIO, &conf->bio_split);
1455		bio_chain(split, bio);
1456		submit_bio_noacct(bio);
1457		bio = split;
1458		r1_bio->master_bio = bio;
1459		r1_bio->sectors = max_sectors;
 
 
 
 
 
 
1460	}
 
1461
1462	atomic_set(&r1_bio->remaining, 1);
1463	atomic_set(&r1_bio->behind_remaining, 0);
1464
1465	first_clone = 1;
1466
1467	for (i = 0; i < disks; i++) {
1468		struct bio *mbio = NULL;
1469		struct md_rdev *rdev = conf->mirrors[i].rdev;
1470		if (!r1_bio->bios[i])
1471			continue;
1472
 
 
 
 
1473		if (first_clone) {
1474			/* do behind I/O ?
1475			 * Not if there are too many, or cannot
1476			 * allocate memory, or a reader on WriteMostly
1477			 * is waiting for behind writes to flush */
1478			if (bitmap &&
1479			    (atomic_read(&bitmap->behind_writes)
1480			     < mddev->bitmap_info.max_write_behind) &&
1481			    !waitqueue_active(&bitmap->behind_wait)) {
1482				alloc_behind_master_bio(r1_bio, bio);
1483			}
1484
1485			md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1486					     test_bit(R1BIO_BehindIO, &r1_bio->state));
 
 
1487			first_clone = 0;
1488		}
 
 
 
1489
1490		if (r1_bio->behind_master_bio)
1491			mbio = bio_clone_fast(r1_bio->behind_master_bio,
1492					      GFP_NOIO, &mddev->bio_set);
1493		else
1494			mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1495
1496		if (r1_bio->behind_master_bio) {
1497			if (test_bit(CollisionCheck, &rdev->flags))
1498				wait_for_serialization(rdev, r1_bio);
1499			if (test_bit(WriteMostly, &rdev->flags))
1500				atomic_inc(&r1_bio->behind_remaining);
1501		} else if (mddev->serialize_policy)
1502			wait_for_serialization(rdev, r1_bio);
1503
1504		r1_bio->bios[i] = mbio;
1505
1506		mbio->bi_iter.bi_sector	= (r1_bio->sector +
1507				   conf->mirrors[i].rdev->data_offset);
1508		bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1509		mbio->bi_end_io	= raid1_end_write_request;
1510		mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1511		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1512		    !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1513		    conf->raid_disks - mddev->degraded > 1)
1514			mbio->bi_opf |= MD_FAILFAST;
1515		mbio->bi_private = r1_bio;
1516
1517		atomic_inc(&r1_bio->remaining);
1518
1519		if (mddev->gendisk)
1520			trace_block_bio_remap(mbio->bi_disk->queue,
1521					      mbio, disk_devt(mddev->gendisk),
1522					      r1_bio->sector);
1523		/* flush_pending_writes() needs access to the rdev so...*/
1524		mbio->bi_disk = (void *)conf->mirrors[i].rdev;
1525
1526		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1527		if (cb)
1528			plug = container_of(cb, struct raid1_plug_cb, cb);
1529		else
1530			plug = NULL;
 
1531		if (plug) {
1532			bio_list_add(&plug->pending, mbio);
1533			plug->pending_cnt++;
1534		} else {
1535			spin_lock_irqsave(&conf->device_lock, flags);
1536			bio_list_add(&conf->pending_bio_list, mbio);
1537			conf->pending_count++;
1538			spin_unlock_irqrestore(&conf->device_lock, flags);
1539			md_wakeup_thread(mddev->thread);
1540		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1541	}
1542
1543	r1_bio_write_done(r1_bio);
1544
1545	/* In case raid1d snuck in to freeze_array */
1546	wake_up(&conf->wait_barrier);
1547}
1548
1549static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1550{
1551	sector_t sectors;
 
1552
1553	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1554	    && md_flush_request(mddev, bio))
1555		return true;
 
 
 
 
 
 
 
 
 
1556
1557	/*
1558	 * There is a limit to the maximum size, but
1559	 * the read/write handler might find a lower limit
1560	 * due to bad blocks.  To avoid multiple splits,
1561	 * we pass the maximum number of sectors down
1562	 * and let the lower level perform the split.
1563	 */
1564	sectors = align_to_barrier_unit_end(
1565		bio->bi_iter.bi_sector, bio_sectors(bio));
1566
1567	if (bio_data_dir(bio) == READ)
1568		raid1_read_request(mddev, bio, sectors, NULL);
1569	else {
1570		if (!md_write_start(mddev,bio))
1571			return false;
1572		raid1_write_request(mddev, bio, sectors);
1573	}
1574	return true;
1575}
1576
1577static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1578{
1579	struct r1conf *conf = mddev->private;
1580	int i;
1581
1582	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1583		   conf->raid_disks - mddev->degraded);
1584	rcu_read_lock();
1585	for (i = 0; i < conf->raid_disks; i++) {
1586		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1587		seq_printf(seq, "%s",
1588			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1589	}
1590	rcu_read_unlock();
1591	seq_printf(seq, "]");
1592}
1593
1594static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1595{
1596	char b[BDEVNAME_SIZE];
1597	struct r1conf *conf = mddev->private;
1598	unsigned long flags;
1599
1600	/*
1601	 * If it is not operational, then we have already marked it as dead
1602	 * else if it is the last working disks with "fail_last_dev == false",
1603	 * ignore the error, let the next level up know.
1604	 * else mark the drive as failed
1605	 */
1606	spin_lock_irqsave(&conf->device_lock, flags);
1607	if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1608	    && (conf->raid_disks - mddev->degraded) == 1) {
1609		/*
1610		 * Don't fail the drive, act as though we were just a
1611		 * normal single drive.
1612		 * However don't try a recovery from this drive as
1613		 * it is very likely to fail.
1614		 */
1615		conf->recovery_disabled = mddev->recovery_disabled;
1616		spin_unlock_irqrestore(&conf->device_lock, flags);
1617		return;
1618	}
1619	set_bit(Blocked, &rdev->flags);
1620	if (test_and_clear_bit(In_sync, &rdev->flags))
1621		mddev->degraded++;
1622	set_bit(Faulty, &rdev->flags);
 
 
1623	spin_unlock_irqrestore(&conf->device_lock, flags);
1624	/*
1625	 * if recovery is running, make sure it aborts.
1626	 */
1627	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1628	set_mask_bits(&mddev->sb_flags, 0,
1629		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1630	pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1631		"md/raid1:%s: Operation continuing on %d devices.\n",
1632		mdname(mddev), bdevname(rdev->bdev, b),
1633		mdname(mddev), conf->raid_disks - mddev->degraded);
1634}
1635
1636static void print_conf(struct r1conf *conf)
1637{
1638	int i;
1639
1640	pr_debug("RAID1 conf printout:\n");
1641	if (!conf) {
1642		pr_debug("(!conf)\n");
1643		return;
1644	}
1645	pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1646		 conf->raid_disks);
1647
1648	rcu_read_lock();
1649	for (i = 0; i < conf->raid_disks; i++) {
1650		char b[BDEVNAME_SIZE];
1651		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1652		if (rdev)
1653			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1654				 i, !test_bit(In_sync, &rdev->flags),
1655				 !test_bit(Faulty, &rdev->flags),
1656				 bdevname(rdev->bdev,b));
1657	}
1658	rcu_read_unlock();
1659}
1660
1661static void close_sync(struct r1conf *conf)
1662{
1663	int idx;
 
1664
1665	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1666		_wait_barrier(conf, idx);
1667		_allow_barrier(conf, idx);
1668	}
1669
1670	mempool_exit(&conf->r1buf_pool);
 
 
 
 
 
 
1671}
1672
1673static int raid1_spare_active(struct mddev *mddev)
1674{
1675	int i;
1676	struct r1conf *conf = mddev->private;
1677	int count = 0;
1678	unsigned long flags;
1679
1680	/*
1681	 * Find all failed disks within the RAID1 configuration
1682	 * and mark them readable.
1683	 * Called under mddev lock, so rcu protection not needed.
1684	 * device_lock used to avoid races with raid1_end_read_request
1685	 * which expects 'In_sync' flags and ->degraded to be consistent.
1686	 */
1687	spin_lock_irqsave(&conf->device_lock, flags);
1688	for (i = 0; i < conf->raid_disks; i++) {
1689		struct md_rdev *rdev = conf->mirrors[i].rdev;
1690		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1691		if (repl
1692		    && !test_bit(Candidate, &repl->flags)
1693		    && repl->recovery_offset == MaxSector
1694		    && !test_bit(Faulty, &repl->flags)
1695		    && !test_and_set_bit(In_sync, &repl->flags)) {
1696			/* replacement has just become active */
1697			if (!rdev ||
1698			    !test_and_clear_bit(In_sync, &rdev->flags))
1699				count++;
1700			if (rdev) {
1701				/* Replaced device not technically
1702				 * faulty, but we need to be sure
1703				 * it gets removed and never re-added
1704				 */
1705				set_bit(Faulty, &rdev->flags);
1706				sysfs_notify_dirent_safe(
1707					rdev->sysfs_state);
1708			}
1709		}
1710		if (rdev
1711		    && rdev->recovery_offset == MaxSector
1712		    && !test_bit(Faulty, &rdev->flags)
1713		    && !test_and_set_bit(In_sync, &rdev->flags)) {
1714			count++;
1715			sysfs_notify_dirent_safe(rdev->sysfs_state);
1716		}
1717	}
1718	mddev->degraded -= count;
1719	spin_unlock_irqrestore(&conf->device_lock, flags);
1720
1721	print_conf(conf);
1722	return count;
1723}
1724
1725static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1726{
1727	struct r1conf *conf = mddev->private;
1728	int err = -EEXIST;
1729	int mirror = 0;
1730	struct raid1_info *p;
1731	int first = 0;
1732	int last = conf->raid_disks - 1;
1733
1734	if (mddev->recovery_disabled == conf->recovery_disabled)
1735		return -EBUSY;
1736
1737	if (md_integrity_add_rdev(rdev, mddev))
1738		return -ENXIO;
1739
1740	if (rdev->raid_disk >= 0)
1741		first = last = rdev->raid_disk;
1742
1743	/*
1744	 * find the disk ... but prefer rdev->saved_raid_disk
1745	 * if possible.
1746	 */
1747	if (rdev->saved_raid_disk >= 0 &&
1748	    rdev->saved_raid_disk >= first &&
1749	    rdev->saved_raid_disk < conf->raid_disks &&
1750	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1751		first = last = rdev->saved_raid_disk;
1752
1753	for (mirror = first; mirror <= last; mirror++) {
1754		p = conf->mirrors + mirror;
1755		if (!p->rdev) {
 
1756			if (mddev->gendisk)
1757				disk_stack_limits(mddev->gendisk, rdev->bdev,
1758						  rdev->data_offset << 9);
1759
1760			p->head_position = 0;
1761			rdev->raid_disk = mirror;
1762			err = 0;
1763			/* As all devices are equivalent, we don't need a full recovery
1764			 * if this was recently any drive of the array
1765			 */
1766			if (rdev->saved_raid_disk < 0)
1767				conf->fullsync = 1;
1768			rcu_assign_pointer(p->rdev, rdev);
1769			break;
1770		}
1771		if (test_bit(WantReplacement, &p->rdev->flags) &&
1772		    p[conf->raid_disks].rdev == NULL) {
1773			/* Add this device as a replacement */
1774			clear_bit(In_sync, &rdev->flags);
1775			set_bit(Replacement, &rdev->flags);
1776			rdev->raid_disk = mirror;
1777			err = 0;
1778			conf->fullsync = 1;
1779			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1780			break;
1781		}
1782	}
1783	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1784		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1785	print_conf(conf);
1786	return err;
1787}
1788
1789static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1790{
1791	struct r1conf *conf = mddev->private;
1792	int err = 0;
1793	int number = rdev->raid_disk;
1794	struct raid1_info *p = conf->mirrors + number;
1795
1796	if (rdev != p->rdev)
1797		p = conf->mirrors + conf->raid_disks + number;
1798
1799	print_conf(conf);
1800	if (rdev == p->rdev) {
1801		if (test_bit(In_sync, &rdev->flags) ||
1802		    atomic_read(&rdev->nr_pending)) {
1803			err = -EBUSY;
1804			goto abort;
1805		}
1806		/* Only remove non-faulty devices if recovery
1807		 * is not possible.
1808		 */
1809		if (!test_bit(Faulty, &rdev->flags) &&
1810		    mddev->recovery_disabled != conf->recovery_disabled &&
1811		    mddev->degraded < conf->raid_disks) {
1812			err = -EBUSY;
1813			goto abort;
1814		}
1815		p->rdev = NULL;
1816		if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1817			synchronize_rcu();
1818			if (atomic_read(&rdev->nr_pending)) {
1819				/* lost the race, try later */
1820				err = -EBUSY;
1821				p->rdev = rdev;
1822				goto abort;
1823			}
1824		}
1825		if (conf->mirrors[conf->raid_disks + number].rdev) {
1826			/* We just removed a device that is being replaced.
1827			 * Move down the replacement.  We drain all IO before
1828			 * doing this to avoid confusion.
1829			 */
1830			struct md_rdev *repl =
1831				conf->mirrors[conf->raid_disks + number].rdev;
1832			freeze_array(conf, 0);
1833			if (atomic_read(&repl->nr_pending)) {
1834				/* It means that some queued IO of retry_list
1835				 * hold repl. Thus, we cannot set replacement
1836				 * as NULL, avoiding rdev NULL pointer
1837				 * dereference in sync_request_write and
1838				 * handle_write_finished.
1839				 */
1840				err = -EBUSY;
1841				unfreeze_array(conf);
1842				goto abort;
1843			}
1844			clear_bit(Replacement, &repl->flags);
1845			p->rdev = repl;
1846			conf->mirrors[conf->raid_disks + number].rdev = NULL;
1847			unfreeze_array(conf);
1848		}
1849
1850		clear_bit(WantReplacement, &rdev->flags);
1851		err = md_integrity_register(mddev);
1852	}
1853abort:
1854
1855	print_conf(conf);
1856	return err;
1857}
1858
1859static void end_sync_read(struct bio *bio)
1860{
1861	struct r1bio *r1_bio = get_resync_r1bio(bio);
1862
1863	update_head_pos(r1_bio->read_disk, r1_bio);
1864
1865	/*
1866	 * we have read a block, now it needs to be re-written,
1867	 * or re-read if the read failed.
1868	 * We don't do much here, just schedule handling by raid1d
1869	 */
1870	if (!bio->bi_status)
1871		set_bit(R1BIO_Uptodate, &r1_bio->state);
1872
1873	if (atomic_dec_and_test(&r1_bio->remaining))
1874		reschedule_retry(r1_bio);
1875}
1876
1877static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1878{
1879	sector_t sync_blocks = 0;
1880	sector_t s = r1_bio->sector;
1881	long sectors_to_go = r1_bio->sectors;
1882
1883	/* make sure these bits don't get cleared. */
1884	do {
1885		md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1886		s += sync_blocks;
1887		sectors_to_go -= sync_blocks;
1888	} while (sectors_to_go > 0);
1889}
1890
1891static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1892{
1893	if (atomic_dec_and_test(&r1_bio->remaining)) {
1894		struct mddev *mddev = r1_bio->mddev;
1895		int s = r1_bio->sectors;
1896
1897		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1898		    test_bit(R1BIO_WriteError, &r1_bio->state))
1899			reschedule_retry(r1_bio);
1900		else {
1901			put_buf(r1_bio);
1902			md_done_sync(mddev, s, uptodate);
1903		}
1904	}
1905}
1906
1907static void end_sync_write(struct bio *bio)
1908{
1909	int uptodate = !bio->bi_status;
1910	struct r1bio *r1_bio = get_resync_r1bio(bio);
1911	struct mddev *mddev = r1_bio->mddev;
1912	struct r1conf *conf = mddev->private;
1913	sector_t first_bad;
1914	int bad_sectors;
1915	struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1916
1917	if (!uptodate) {
1918		abort_sync_write(mddev, r1_bio);
 
 
 
 
 
 
 
 
 
1919		set_bit(WriteErrorSeen, &rdev->flags);
1920		if (!test_and_set_bit(WantReplacement, &rdev->flags))
1921			set_bit(MD_RECOVERY_NEEDED, &
1922				mddev->recovery);
1923		set_bit(R1BIO_WriteError, &r1_bio->state);
1924	} else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1925			       &first_bad, &bad_sectors) &&
1926		   !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1927				r1_bio->sector,
1928				r1_bio->sectors,
1929				&first_bad, &bad_sectors)
1930		)
1931		set_bit(R1BIO_MadeGood, &r1_bio->state);
1932
1933	put_sync_write_buf(r1_bio, uptodate);
 
 
 
 
 
 
 
 
 
1934}
1935
1936static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1937			    int sectors, struct page *page, int rw)
1938{
1939	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1940		/* success */
1941		return 1;
1942	if (rw == WRITE) {
1943		set_bit(WriteErrorSeen, &rdev->flags);
1944		if (!test_and_set_bit(WantReplacement,
1945				      &rdev->flags))
1946			set_bit(MD_RECOVERY_NEEDED, &
1947				rdev->mddev->recovery);
1948	}
1949	/* need to record an error - either for the block or the device */
1950	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1951		md_error(rdev->mddev, rdev);
1952	return 0;
1953}
1954
1955static int fix_sync_read_error(struct r1bio *r1_bio)
1956{
1957	/* Try some synchronous reads of other devices to get
1958	 * good data, much like with normal read errors.  Only
1959	 * read into the pages we already have so we don't
1960	 * need to re-issue the read request.
1961	 * We don't need to freeze the array, because being in an
1962	 * active sync request, there is no normal IO, and
1963	 * no overlapping syncs.
1964	 * We don't need to check is_badblock() again as we
1965	 * made sure that anything with a bad block in range
1966	 * will have bi_end_io clear.
1967	 */
1968	struct mddev *mddev = r1_bio->mddev;
1969	struct r1conf *conf = mddev->private;
1970	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1971	struct page **pages = get_resync_pages(bio)->pages;
1972	sector_t sect = r1_bio->sector;
1973	int sectors = r1_bio->sectors;
1974	int idx = 0;
1975	struct md_rdev *rdev;
1976
1977	rdev = conf->mirrors[r1_bio->read_disk].rdev;
1978	if (test_bit(FailFast, &rdev->flags)) {
1979		/* Don't try recovering from here - just fail it
1980		 * ... unless it is the last working device of course */
1981		md_error(mddev, rdev);
1982		if (test_bit(Faulty, &rdev->flags))
1983			/* Don't try to read from here, but make sure
1984			 * put_buf does it's thing
1985			 */
1986			bio->bi_end_io = end_sync_write;
1987	}
1988
1989	while(sectors) {
1990		int s = sectors;
1991		int d = r1_bio->read_disk;
1992		int success = 0;
1993		int start;
1994
1995		if (s > (PAGE_SIZE>>9))
1996			s = PAGE_SIZE >> 9;
1997		do {
1998			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1999				/* No rcu protection needed here devices
2000				 * can only be removed when no resync is
2001				 * active, and resync is currently active
2002				 */
2003				rdev = conf->mirrors[d].rdev;
2004				if (sync_page_io(rdev, sect, s<<9,
2005						 pages[idx],
2006						 REQ_OP_READ, 0, false)) {
2007					success = 1;
2008					break;
2009				}
2010			}
2011			d++;
2012			if (d == conf->raid_disks * 2)
2013				d = 0;
2014		} while (!success && d != r1_bio->read_disk);
2015
2016		if (!success) {
2017			char b[BDEVNAME_SIZE];
2018			int abort = 0;
2019			/* Cannot read from anywhere, this block is lost.
2020			 * Record a bad block on each device.  If that doesn't
2021			 * work just disable and interrupt the recovery.
2022			 * Don't fail devices as that won't really help.
2023			 */
2024			pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2025					    mdname(mddev), bio_devname(bio, b),
 
2026					    (unsigned long long)r1_bio->sector);
2027			for (d = 0; d < conf->raid_disks * 2; d++) {
2028				rdev = conf->mirrors[d].rdev;
2029				if (!rdev || test_bit(Faulty, &rdev->flags))
2030					continue;
2031				if (!rdev_set_badblocks(rdev, sect, s, 0))
2032					abort = 1;
2033			}
2034			if (abort) {
2035				conf->recovery_disabled =
2036					mddev->recovery_disabled;
2037				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2038				md_done_sync(mddev, r1_bio->sectors, 0);
2039				put_buf(r1_bio);
2040				return 0;
2041			}
2042			/* Try next page */
2043			sectors -= s;
2044			sect += s;
2045			idx++;
2046			continue;
2047		}
2048
2049		start = d;
2050		/* write it back and re-read */
2051		while (d != r1_bio->read_disk) {
2052			if (d == 0)
2053				d = conf->raid_disks * 2;
2054			d--;
2055			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2056				continue;
2057			rdev = conf->mirrors[d].rdev;
2058			if (r1_sync_page_io(rdev, sect, s,
2059					    pages[idx],
2060					    WRITE) == 0) {
2061				r1_bio->bios[d]->bi_end_io = NULL;
2062				rdev_dec_pending(rdev, mddev);
2063			}
2064		}
2065		d = start;
2066		while (d != r1_bio->read_disk) {
2067			if (d == 0)
2068				d = conf->raid_disks * 2;
2069			d--;
2070			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2071				continue;
2072			rdev = conf->mirrors[d].rdev;
2073			if (r1_sync_page_io(rdev, sect, s,
2074					    pages[idx],
2075					    READ) != 0)
2076				atomic_add(s, &rdev->corrected_errors);
2077		}
2078		sectors -= s;
2079		sect += s;
2080		idx ++;
2081	}
2082	set_bit(R1BIO_Uptodate, &r1_bio->state);
2083	bio->bi_status = 0;
2084	return 1;
2085}
2086
2087static void process_checks(struct r1bio *r1_bio)
2088{
2089	/* We have read all readable devices.  If we haven't
2090	 * got the block, then there is no hope left.
2091	 * If we have, then we want to do a comparison
2092	 * and skip the write if everything is the same.
2093	 * If any blocks failed to read, then we need to
2094	 * attempt an over-write
2095	 */
2096	struct mddev *mddev = r1_bio->mddev;
2097	struct r1conf *conf = mddev->private;
2098	int primary;
2099	int i;
2100	int vcnt;
2101
2102	/* Fix variable parts of all bios */
2103	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2104	for (i = 0; i < conf->raid_disks * 2; i++) {
2105		blk_status_t status;
 
 
2106		struct bio *b = r1_bio->bios[i];
2107		struct resync_pages *rp = get_resync_pages(b);
2108		if (b->bi_end_io != end_sync_read)
2109			continue;
2110		/* fixup the bio for reuse, but preserve errno */
2111		status = b->bi_status;
2112		bio_reset(b);
2113		b->bi_status = status;
 
 
2114		b->bi_iter.bi_sector = r1_bio->sector +
2115			conf->mirrors[i].rdev->data_offset;
2116		bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2117		b->bi_end_io = end_sync_read;
2118		rp->raid_bio = r1_bio;
2119		b->bi_private = rp;
2120
2121		/* initialize bvec table again */
2122		md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
 
 
 
 
 
 
 
 
 
2123	}
2124	for (primary = 0; primary < conf->raid_disks * 2; primary++)
2125		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2126		    !r1_bio->bios[primary]->bi_status) {
2127			r1_bio->bios[primary]->bi_end_io = NULL;
2128			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2129			break;
2130		}
2131	r1_bio->read_disk = primary;
2132	for (i = 0; i < conf->raid_disks * 2; i++) {
2133		int j = 0;
2134		struct bio *pbio = r1_bio->bios[primary];
2135		struct bio *sbio = r1_bio->bios[i];
2136		blk_status_t status = sbio->bi_status;
2137		struct page **ppages = get_resync_pages(pbio)->pages;
2138		struct page **spages = get_resync_pages(sbio)->pages;
2139		struct bio_vec *bi;
2140		int page_len[RESYNC_PAGES] = { 0 };
2141		struct bvec_iter_all iter_all;
2142
2143		if (sbio->bi_end_io != end_sync_read)
2144			continue;
2145		/* Now we can 'fixup' the error value */
2146		sbio->bi_status = 0;
2147
2148		bio_for_each_segment_all(bi, sbio, iter_all)
2149			page_len[j++] = bi->bv_len;
2150
2151		if (!status) {
2152			for (j = vcnt; j-- ; ) {
2153				if (memcmp(page_address(ppages[j]),
2154					   page_address(spages[j]),
2155					   page_len[j]))
 
 
 
2156					break;
2157			}
2158		} else
2159			j = 0;
2160		if (j >= 0)
2161			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2162		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2163			      && !status)) {
2164			/* No need to write to this device. */
2165			sbio->bi_end_io = NULL;
2166			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2167			continue;
2168		}
2169
2170		bio_copy_data(sbio, pbio);
2171	}
2172}
2173
2174static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2175{
2176	struct r1conf *conf = mddev->private;
2177	int i;
2178	int disks = conf->raid_disks * 2;
2179	struct bio *wbio;
 
 
2180
2181	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2182		/* ouch - failed to read all of that. */
2183		if (!fix_sync_read_error(r1_bio))
2184			return;
2185
2186	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2187		process_checks(r1_bio);
2188
2189	/*
2190	 * schedule writes
2191	 */
2192	atomic_set(&r1_bio->remaining, 1);
2193	for (i = 0; i < disks ; i++) {
2194		wbio = r1_bio->bios[i];
2195		if (wbio->bi_end_io == NULL ||
2196		    (wbio->bi_end_io == end_sync_read &&
2197		     (i == r1_bio->read_disk ||
2198		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2199			continue;
2200		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2201			abort_sync_write(mddev, r1_bio);
2202			continue;
2203		}
2204
2205		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2206		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2207			wbio->bi_opf |= MD_FAILFAST;
2208
2209		wbio->bi_end_io = end_sync_write;
2210		atomic_inc(&r1_bio->remaining);
2211		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2212
2213		submit_bio_noacct(wbio);
2214	}
2215
2216	put_sync_write_buf(r1_bio, 1);
 
 
 
 
 
 
 
 
 
 
2217}
2218
2219/*
2220 * This is a kernel thread which:
2221 *
2222 *	1.	Retries failed read operations on working mirrors.
2223 *	2.	Updates the raid superblock when problems encounter.
2224 *	3.	Performs writes following reads for array synchronising.
2225 */
2226
2227static void fix_read_error(struct r1conf *conf, int read_disk,
2228			   sector_t sect, int sectors)
2229{
2230	struct mddev *mddev = conf->mddev;
2231	while(sectors) {
2232		int s = sectors;
2233		int d = read_disk;
2234		int success = 0;
2235		int start;
2236		struct md_rdev *rdev;
2237
2238		if (s > (PAGE_SIZE>>9))
2239			s = PAGE_SIZE >> 9;
2240
2241		do {
2242			sector_t first_bad;
2243			int bad_sectors;
2244
2245			rcu_read_lock();
2246			rdev = rcu_dereference(conf->mirrors[d].rdev);
2247			if (rdev &&
2248			    (test_bit(In_sync, &rdev->flags) ||
2249			     (!test_bit(Faulty, &rdev->flags) &&
2250			      rdev->recovery_offset >= sect + s)) &&
2251			    is_badblock(rdev, sect, s,
2252					&first_bad, &bad_sectors) == 0) {
2253				atomic_inc(&rdev->nr_pending);
2254				rcu_read_unlock();
2255				if (sync_page_io(rdev, sect, s<<9,
2256					 conf->tmppage, REQ_OP_READ, 0, false))
2257					success = 1;
2258				rdev_dec_pending(rdev, mddev);
2259				if (success)
2260					break;
2261			} else
2262				rcu_read_unlock();
2263			d++;
2264			if (d == conf->raid_disks * 2)
2265				d = 0;
2266		} while (!success && d != read_disk);
2267
2268		if (!success) {
2269			/* Cannot read from anywhere - mark it bad */
2270			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2271			if (!rdev_set_badblocks(rdev, sect, s, 0))
2272				md_error(mddev, rdev);
2273			break;
2274		}
2275		/* write it back and re-read */
2276		start = d;
2277		while (d != read_disk) {
2278			if (d==0)
2279				d = conf->raid_disks * 2;
2280			d--;
2281			rcu_read_lock();
2282			rdev = rcu_dereference(conf->mirrors[d].rdev);
2283			if (rdev &&
2284			    !test_bit(Faulty, &rdev->flags)) {
2285				atomic_inc(&rdev->nr_pending);
2286				rcu_read_unlock();
2287				r1_sync_page_io(rdev, sect, s,
2288						conf->tmppage, WRITE);
2289				rdev_dec_pending(rdev, mddev);
2290			} else
2291				rcu_read_unlock();
2292		}
2293		d = start;
2294		while (d != read_disk) {
2295			char b[BDEVNAME_SIZE];
2296			if (d==0)
2297				d = conf->raid_disks * 2;
2298			d--;
2299			rcu_read_lock();
2300			rdev = rcu_dereference(conf->mirrors[d].rdev);
2301			if (rdev &&
2302			    !test_bit(Faulty, &rdev->flags)) {
2303				atomic_inc(&rdev->nr_pending);
2304				rcu_read_unlock();
2305				if (r1_sync_page_io(rdev, sect, s,
2306						    conf->tmppage, READ)) {
2307					atomic_add(s, &rdev->corrected_errors);
2308					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2309						mdname(mddev), s,
2310						(unsigned long long)(sect +
2311								     rdev->data_offset),
2312						bdevname(rdev->bdev, b));
2313				}
2314				rdev_dec_pending(rdev, mddev);
2315			} else
2316				rcu_read_unlock();
2317		}
2318		sectors -= s;
2319		sect += s;
2320	}
2321}
2322
2323static int narrow_write_error(struct r1bio *r1_bio, int i)
2324{
2325	struct mddev *mddev = r1_bio->mddev;
2326	struct r1conf *conf = mddev->private;
2327	struct md_rdev *rdev = conf->mirrors[i].rdev;
2328
2329	/* bio has the data to be written to device 'i' where
2330	 * we just recently had a write error.
2331	 * We repeatedly clone the bio and trim down to one block,
2332	 * then try the write.  Where the write fails we record
2333	 * a bad block.
2334	 * It is conceivable that the bio doesn't exactly align with
2335	 * blocks.  We must handle this somehow.
2336	 *
2337	 * We currently own a reference on the rdev.
2338	 */
2339
2340	int block_sectors;
2341	sector_t sector;
2342	int sectors;
2343	int sect_to_write = r1_bio->sectors;
2344	int ok = 1;
2345
2346	if (rdev->badblocks.shift < 0)
2347		return 0;
2348
2349	block_sectors = roundup(1 << rdev->badblocks.shift,
2350				bdev_logical_block_size(rdev->bdev) >> 9);
2351	sector = r1_bio->sector;
2352	sectors = ((sector + block_sectors)
2353		   & ~(sector_t)(block_sectors - 1))
2354		- sector;
2355
2356	while (sect_to_write) {
2357		struct bio *wbio;
2358		if (sectors > sect_to_write)
2359			sectors = sect_to_write;
2360		/* Write at 'sector' for 'sectors'*/
2361
2362		if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2363			wbio = bio_clone_fast(r1_bio->behind_master_bio,
2364					      GFP_NOIO,
2365					      &mddev->bio_set);
 
 
 
 
 
 
 
 
 
2366		} else {
2367			wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2368					      &mddev->bio_set);
2369		}
2370
2371		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2372		wbio->bi_iter.bi_sector = r1_bio->sector;
2373		wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2374
2375		bio_trim(wbio, sector - r1_bio->sector, sectors);
2376		wbio->bi_iter.bi_sector += rdev->data_offset;
2377		bio_set_dev(wbio, rdev->bdev);
2378
2379		if (submit_bio_wait(wbio) < 0)
2380			/* failure! */
2381			ok = rdev_set_badblocks(rdev, sector,
2382						sectors, 0)
2383				&& ok;
2384
2385		bio_put(wbio);
2386		sect_to_write -= sectors;
2387		sector += sectors;
2388		sectors = block_sectors;
2389	}
2390	return ok;
2391}
2392
2393static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2394{
2395	int m;
2396	int s = r1_bio->sectors;
2397	for (m = 0; m < conf->raid_disks * 2 ; m++) {
2398		struct md_rdev *rdev = conf->mirrors[m].rdev;
2399		struct bio *bio = r1_bio->bios[m];
2400		if (bio->bi_end_io == NULL)
2401			continue;
2402		if (!bio->bi_status &&
2403		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2404			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2405		}
2406		if (bio->bi_status &&
2407		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
2408			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2409				md_error(conf->mddev, rdev);
2410		}
2411	}
2412	put_buf(r1_bio);
2413	md_done_sync(conf->mddev, s, 1);
2414}
2415
2416static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2417{
2418	int m, idx;
2419	bool fail = false;
2420
2421	for (m = 0; m < conf->raid_disks * 2 ; m++)
2422		if (r1_bio->bios[m] == IO_MADE_GOOD) {
2423			struct md_rdev *rdev = conf->mirrors[m].rdev;
2424			rdev_clear_badblocks(rdev,
2425					     r1_bio->sector,
2426					     r1_bio->sectors, 0);
2427			rdev_dec_pending(rdev, conf->mddev);
2428		} else if (r1_bio->bios[m] != NULL) {
2429			/* This drive got a write error.  We need to
2430			 * narrow down and record precise write
2431			 * errors.
2432			 */
2433			fail = true;
2434			if (!narrow_write_error(r1_bio, m)) {
2435				md_error(conf->mddev,
2436					 conf->mirrors[m].rdev);
2437				/* an I/O failed, we can't clear the bitmap */
2438				set_bit(R1BIO_Degraded, &r1_bio->state);
2439			}
2440			rdev_dec_pending(conf->mirrors[m].rdev,
2441					 conf->mddev);
2442		}
2443	if (fail) {
2444		spin_lock_irq(&conf->device_lock);
2445		list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2446		idx = sector_to_idx(r1_bio->sector);
2447		atomic_inc(&conf->nr_queued[idx]);
2448		spin_unlock_irq(&conf->device_lock);
2449		/*
2450		 * In case freeze_array() is waiting for condition
2451		 * get_unqueued_pending() == extra to be true.
2452		 */
2453		wake_up(&conf->wait_barrier);
2454		md_wakeup_thread(conf->mddev->thread);
2455	} else {
2456		if (test_bit(R1BIO_WriteError, &r1_bio->state))
2457			close_write(r1_bio);
2458		raid_end_bio_io(r1_bio);
2459	}
2460}
2461
2462static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2463{
 
 
2464	struct mddev *mddev = conf->mddev;
2465	struct bio *bio;
 
2466	struct md_rdev *rdev;
 
 
2467
2468	clear_bit(R1BIO_ReadError, &r1_bio->state);
2469	/* we got a read error. Maybe the drive is bad.  Maybe just
2470	 * the block and we can fix it.
2471	 * We freeze all other IO, and try reading the block from
2472	 * other devices.  When we find one, we re-write
2473	 * and check it that fixes the read error.
2474	 * This is all done synchronously while the array is
2475	 * frozen
2476	 */
2477
2478	bio = r1_bio->bios[r1_bio->read_disk];
 
 
 
2479	bio_put(bio);
2480	r1_bio->bios[r1_bio->read_disk] = NULL;
2481
2482	rdev = conf->mirrors[r1_bio->read_disk].rdev;
2483	if (mddev->ro == 0
2484	    && !test_bit(FailFast, &rdev->flags)) {
2485		freeze_array(conf, 1);
2486		fix_read_error(conf, r1_bio->read_disk,
2487			       r1_bio->sector, r1_bio->sectors);
2488		unfreeze_array(conf);
2489	} else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2490		md_error(mddev, rdev);
2491	} else {
2492		r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2493	}
2494
2495	rdev_dec_pending(rdev, conf->mddev);
2496	allow_barrier(conf, r1_bio->sector);
2497	bio = r1_bio->master_bio;
2498
2499	/* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2500	r1_bio->state = 0;
2501	raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2502}
2503
2504static void raid1d(struct md_thread *thread)
2505{
2506	struct mddev *mddev = thread->mddev;
2507	struct r1bio *r1_bio;
2508	unsigned long flags;
2509	struct r1conf *conf = mddev->private;
2510	struct list_head *head = &conf->retry_list;
2511	struct blk_plug plug;
2512	int idx;
2513
2514	md_check_recovery(mddev);
2515
2516	if (!list_empty_careful(&conf->bio_end_io_list) &&
2517	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2518		LIST_HEAD(tmp);
2519		spin_lock_irqsave(&conf->device_lock, flags);
2520		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2521			list_splice_init(&conf->bio_end_io_list, &tmp);
 
 
 
 
2522		spin_unlock_irqrestore(&conf->device_lock, flags);
2523		while (!list_empty(&tmp)) {
2524			r1_bio = list_first_entry(&tmp, struct r1bio,
2525						  retry_list);
2526			list_del(&r1_bio->retry_list);
2527			idx = sector_to_idx(r1_bio->sector);
2528			atomic_dec(&conf->nr_queued[idx]);
2529			if (mddev->degraded)
2530				set_bit(R1BIO_Degraded, &r1_bio->state);
2531			if (test_bit(R1BIO_WriteError, &r1_bio->state))
2532				close_write(r1_bio);
2533			raid_end_bio_io(r1_bio);
2534		}
2535	}
2536
2537	blk_start_plug(&plug);
2538	for (;;) {
2539
2540		flush_pending_writes(conf);
2541
2542		spin_lock_irqsave(&conf->device_lock, flags);
2543		if (list_empty(head)) {
2544			spin_unlock_irqrestore(&conf->device_lock, flags);
2545			break;
2546		}
2547		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2548		list_del(head->prev);
2549		idx = sector_to_idx(r1_bio->sector);
2550		atomic_dec(&conf->nr_queued[idx]);
2551		spin_unlock_irqrestore(&conf->device_lock, flags);
2552
2553		mddev = r1_bio->mddev;
2554		conf = mddev->private;
2555		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2556			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2557			    test_bit(R1BIO_WriteError, &r1_bio->state))
2558				handle_sync_write_finished(conf, r1_bio);
2559			else
2560				sync_request_write(mddev, r1_bio);
2561		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2562			   test_bit(R1BIO_WriteError, &r1_bio->state))
2563			handle_write_finished(conf, r1_bio);
2564		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2565			handle_read_error(conf, r1_bio);
2566		else
2567			WARN_ON_ONCE(1);
 
 
 
2568
2569		cond_resched();
2570		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2571			md_check_recovery(mddev);
2572	}
2573	blk_finish_plug(&plug);
2574}
2575
2576static int init_resync(struct r1conf *conf)
2577{
2578	int buffs;
2579
2580	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2581	BUG_ON(mempool_initialized(&conf->r1buf_pool));
2582
2583	return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2584			    r1buf_pool_free, conf->poolinfo);
2585}
2586
2587static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2588{
2589	struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2590	struct resync_pages *rps;
2591	struct bio *bio;
2592	int i;
2593
2594	for (i = conf->poolinfo->raid_disks; i--; ) {
2595		bio = r1bio->bios[i];
2596		rps = bio->bi_private;
2597		bio_reset(bio);
2598		bio->bi_private = rps;
2599	}
2600	r1bio->master_bio = NULL;
2601	return r1bio;
2602}
2603
2604/*
2605 * perform a "sync" on one "block"
2606 *
2607 * We need to make sure that no normal I/O request - particularly write
2608 * requests - conflict with active sync requests.
2609 *
2610 * This is achieved by tracking pending requests and a 'barrier' concept
2611 * that can be installed to exclude normal IO requests.
2612 */
2613
2614static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2615				   int *skipped)
2616{
2617	struct r1conf *conf = mddev->private;
2618	struct r1bio *r1_bio;
2619	struct bio *bio;
2620	sector_t max_sector, nr_sectors;
2621	int disk = -1;
2622	int i;
2623	int wonly = -1;
2624	int write_targets = 0, read_targets = 0;
2625	sector_t sync_blocks;
2626	int still_degraded = 0;
2627	int good_sectors = RESYNC_SECTORS;
2628	int min_bad = 0; /* number of sectors that are bad in all devices */
2629	int idx = sector_to_idx(sector_nr);
2630	int page_idx = 0;
2631
2632	if (!mempool_initialized(&conf->r1buf_pool))
2633		if (init_resync(conf))
2634			return 0;
2635
2636	max_sector = mddev->dev_sectors;
2637	if (sector_nr >= max_sector) {
2638		/* If we aborted, we need to abort the
2639		 * sync on the 'current' bitmap chunk (there will
2640		 * only be one in raid1 resync.
2641		 * We can find the current addess in mddev->curr_resync
2642		 */
2643		if (mddev->curr_resync < max_sector) /* aborted */
2644			md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2645					   &sync_blocks, 1);
2646		else /* completed sync */
2647			conf->fullsync = 0;
2648
2649		md_bitmap_close_sync(mddev->bitmap);
2650		close_sync(conf);
2651
2652		if (mddev_is_clustered(mddev)) {
2653			conf->cluster_sync_low = 0;
2654			conf->cluster_sync_high = 0;
2655		}
2656		return 0;
2657	}
2658
2659	if (mddev->bitmap == NULL &&
2660	    mddev->recovery_cp == MaxSector &&
2661	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2662	    conf->fullsync == 0) {
2663		*skipped = 1;
2664		return max_sector - sector_nr;
2665	}
2666	/* before building a request, check if we can skip these blocks..
2667	 * This call the bitmap_start_sync doesn't actually record anything
2668	 */
2669	if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2670	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2671		/* We can skip this block, and probably several more */
2672		*skipped = 1;
2673		return sync_blocks;
2674	}
2675
2676	/*
2677	 * If there is non-resync activity waiting for a turn, then let it
2678	 * though before starting on this new sync request.
2679	 */
2680	if (atomic_read(&conf->nr_waiting[idx]))
2681		schedule_timeout_uninterruptible(1);
2682
2683	/* we are incrementing sector_nr below. To be safe, we check against
2684	 * sector_nr + two times RESYNC_SECTORS
2685	 */
2686
2687	md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2688		mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
 
2689
2690
2691	if (raise_barrier(conf, sector_nr))
2692		return 0;
2693
2694	r1_bio = raid1_alloc_init_r1buf(conf);
2695
2696	rcu_read_lock();
2697	/*
2698	 * If we get a correctably read error during resync or recovery,
2699	 * we might want to read from a different device.  So we
2700	 * flag all drives that could conceivably be read from for READ,
2701	 * and any others (which will be non-In_sync devices) for WRITE.
2702	 * If a read fails, we try reading from something else for which READ
2703	 * is OK.
2704	 */
2705
2706	r1_bio->mddev = mddev;
2707	r1_bio->sector = sector_nr;
2708	r1_bio->state = 0;
2709	set_bit(R1BIO_IsSync, &r1_bio->state);
2710	/* make sure good_sectors won't go across barrier unit boundary */
2711	good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2712
2713	for (i = 0; i < conf->raid_disks * 2; i++) {
2714		struct md_rdev *rdev;
2715		bio = r1_bio->bios[i];
 
2716
2717		rdev = rcu_dereference(conf->mirrors[i].rdev);
2718		if (rdev == NULL ||
2719		    test_bit(Faulty, &rdev->flags)) {
2720			if (i < conf->raid_disks)
2721				still_degraded = 1;
2722		} else if (!test_bit(In_sync, &rdev->flags)) {
2723			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2724			bio->bi_end_io = end_sync_write;
2725			write_targets ++;
2726		} else {
2727			/* may need to read from here */
2728			sector_t first_bad = MaxSector;
2729			int bad_sectors;
2730
2731			if (is_badblock(rdev, sector_nr, good_sectors,
2732					&first_bad, &bad_sectors)) {
2733				if (first_bad > sector_nr)
2734					good_sectors = first_bad - sector_nr;
2735				else {
2736					bad_sectors -= (sector_nr - first_bad);
2737					if (min_bad == 0 ||
2738					    min_bad > bad_sectors)
2739						min_bad = bad_sectors;
2740				}
2741			}
2742			if (sector_nr < first_bad) {
2743				if (test_bit(WriteMostly, &rdev->flags)) {
2744					if (wonly < 0)
2745						wonly = i;
2746				} else {
2747					if (disk < 0)
2748						disk = i;
2749				}
2750				bio_set_op_attrs(bio, REQ_OP_READ, 0);
2751				bio->bi_end_io = end_sync_read;
2752				read_targets++;
2753			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2754				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2755				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2756				/*
2757				 * The device is suitable for reading (InSync),
2758				 * but has bad block(s) here. Let's try to correct them,
2759				 * if we are doing resync or repair. Otherwise, leave
2760				 * this device alone for this sync request.
2761				 */
2762				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2763				bio->bi_end_io = end_sync_write;
2764				write_targets++;
2765			}
2766		}
2767		if (rdev && bio->bi_end_io) {
2768			atomic_inc(&rdev->nr_pending);
2769			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2770			bio_set_dev(bio, rdev->bdev);
 
2771			if (test_bit(FailFast, &rdev->flags))
2772				bio->bi_opf |= MD_FAILFAST;
2773		}
2774	}
2775	rcu_read_unlock();
2776	if (disk < 0)
2777		disk = wonly;
2778	r1_bio->read_disk = disk;
2779
2780	if (read_targets == 0 && min_bad > 0) {
2781		/* These sectors are bad on all InSync devices, so we
2782		 * need to mark them bad on all write targets
2783		 */
2784		int ok = 1;
2785		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2786			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2787				struct md_rdev *rdev = conf->mirrors[i].rdev;
2788				ok = rdev_set_badblocks(rdev, sector_nr,
2789							min_bad, 0
2790					) && ok;
2791			}
2792		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2793		*skipped = 1;
2794		put_buf(r1_bio);
2795
2796		if (!ok) {
2797			/* Cannot record the badblocks, so need to
2798			 * abort the resync.
2799			 * If there are multiple read targets, could just
2800			 * fail the really bad ones ???
2801			 */
2802			conf->recovery_disabled = mddev->recovery_disabled;
2803			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2804			return 0;
2805		} else
2806			return min_bad;
2807
2808	}
2809	if (min_bad > 0 && min_bad < good_sectors) {
2810		/* only resync enough to reach the next bad->good
2811		 * transition */
2812		good_sectors = min_bad;
2813	}
2814
2815	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2816		/* extra read targets are also write targets */
2817		write_targets += read_targets-1;
2818
2819	if (write_targets == 0 || read_targets == 0) {
2820		/* There is nowhere to write, so all non-sync
2821		 * drives must be failed - so we are finished
2822		 */
2823		sector_t rv;
2824		if (min_bad > 0)
2825			max_sector = sector_nr + min_bad;
2826		rv = max_sector - sector_nr;
2827		*skipped = 1;
2828		put_buf(r1_bio);
2829		return rv;
2830	}
2831
2832	if (max_sector > mddev->resync_max)
2833		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2834	if (max_sector > sector_nr + good_sectors)
2835		max_sector = sector_nr + good_sectors;
2836	nr_sectors = 0;
2837	sync_blocks = 0;
2838	do {
2839		struct page *page;
2840		int len = PAGE_SIZE;
2841		if (sector_nr + (len>>9) > max_sector)
2842			len = (max_sector - sector_nr) << 9;
2843		if (len == 0)
2844			break;
2845		if (sync_blocks == 0) {
2846			if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2847						  &sync_blocks, still_degraded) &&
2848			    !conf->fullsync &&
2849			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2850				break;
2851			if ((len >> 9) > sync_blocks)
2852				len = sync_blocks<<9;
2853		}
2854
2855		for (i = 0 ; i < conf->raid_disks * 2; i++) {
2856			struct resync_pages *rp;
2857
2858			bio = r1_bio->bios[i];
2859			rp = get_resync_pages(bio);
2860			if (bio->bi_end_io) {
2861				page = resync_fetch_page(rp, page_idx);
2862
2863				/*
2864				 * won't fail because the vec table is big
2865				 * enough to hold all these pages
2866				 */
2867				bio_add_page(bio, page, len, 0);
 
 
 
 
 
 
 
 
 
2868			}
2869		}
2870		nr_sectors += len>>9;
2871		sector_nr += len>>9;
2872		sync_blocks -= (len>>9);
2873	} while (++page_idx < RESYNC_PAGES);
2874
2875	r1_bio->sectors = nr_sectors;
2876
2877	if (mddev_is_clustered(mddev) &&
2878			conf->cluster_sync_high < sector_nr + nr_sectors) {
2879		conf->cluster_sync_low = mddev->curr_resync_completed;
2880		conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2881		/* Send resync message */
2882		md_cluster_ops->resync_info_update(mddev,
2883				conf->cluster_sync_low,
2884				conf->cluster_sync_high);
2885	}
2886
2887	/* For a user-requested sync, we read all readable devices and do a
2888	 * compare
2889	 */
2890	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2891		atomic_set(&r1_bio->remaining, read_targets);
2892		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2893			bio = r1_bio->bios[i];
2894			if (bio->bi_end_io == end_sync_read) {
2895				read_targets--;
2896				md_sync_acct_bio(bio, nr_sectors);
2897				if (read_targets == 1)
2898					bio->bi_opf &= ~MD_FAILFAST;
2899				submit_bio_noacct(bio);
2900			}
2901		}
2902	} else {
2903		atomic_set(&r1_bio->remaining, 1);
2904		bio = r1_bio->bios[r1_bio->read_disk];
2905		md_sync_acct_bio(bio, nr_sectors);
2906		if (read_targets == 1)
2907			bio->bi_opf &= ~MD_FAILFAST;
2908		submit_bio_noacct(bio);
 
2909	}
2910	return nr_sectors;
2911}
2912
2913static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2914{
2915	if (sectors)
2916		return sectors;
2917
2918	return mddev->dev_sectors;
2919}
2920
2921static struct r1conf *setup_conf(struct mddev *mddev)
2922{
2923	struct r1conf *conf;
2924	int i;
2925	struct raid1_info *disk;
2926	struct md_rdev *rdev;
2927	int err = -ENOMEM;
2928
2929	conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2930	if (!conf)
2931		goto abort;
2932
2933	conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2934				   sizeof(atomic_t), GFP_KERNEL);
2935	if (!conf->nr_pending)
2936		goto abort;
2937
2938	conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2939				   sizeof(atomic_t), GFP_KERNEL);
2940	if (!conf->nr_waiting)
2941		goto abort;
2942
2943	conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2944				  sizeof(atomic_t), GFP_KERNEL);
2945	if (!conf->nr_queued)
2946		goto abort;
2947
2948	conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2949				sizeof(atomic_t), GFP_KERNEL);
2950	if (!conf->barrier)
2951		goto abort;
2952
2953	conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2954					    mddev->raid_disks, 2),
2955				GFP_KERNEL);
2956	if (!conf->mirrors)
2957		goto abort;
2958
2959	conf->tmppage = alloc_page(GFP_KERNEL);
2960	if (!conf->tmppage)
2961		goto abort;
2962
2963	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2964	if (!conf->poolinfo)
2965		goto abort;
2966	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2967	err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
2968			   rbio_pool_free, conf->poolinfo);
2969	if (err)
2970		goto abort;
2971
2972	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2973	if (err)
2974		goto abort;
2975
2976	conf->poolinfo->mddev = mddev;
2977
2978	err = -EINVAL;
2979	spin_lock_init(&conf->device_lock);
2980	rdev_for_each(rdev, mddev) {
 
2981		int disk_idx = rdev->raid_disk;
2982		if (disk_idx >= mddev->raid_disks
2983		    || disk_idx < 0)
2984			continue;
2985		if (test_bit(Replacement, &rdev->flags))
2986			disk = conf->mirrors + mddev->raid_disks + disk_idx;
2987		else
2988			disk = conf->mirrors + disk_idx;
2989
2990		if (disk->rdev)
2991			goto abort;
2992		disk->rdev = rdev;
 
 
2993		disk->head_position = 0;
2994		disk->seq_start = MaxSector;
2995	}
2996	conf->raid_disks = mddev->raid_disks;
2997	conf->mddev = mddev;
2998	INIT_LIST_HEAD(&conf->retry_list);
2999	INIT_LIST_HEAD(&conf->bio_end_io_list);
3000
3001	spin_lock_init(&conf->resync_lock);
3002	init_waitqueue_head(&conf->wait_barrier);
3003
3004	bio_list_init(&conf->pending_bio_list);
3005	conf->pending_count = 0;
3006	conf->recovery_disabled = mddev->recovery_disabled - 1;
3007
 
 
 
3008	err = -EIO;
3009	for (i = 0; i < conf->raid_disks * 2; i++) {
3010
3011		disk = conf->mirrors + i;
3012
3013		if (i < conf->raid_disks &&
3014		    disk[conf->raid_disks].rdev) {
3015			/* This slot has a replacement. */
3016			if (!disk->rdev) {
3017				/* No original, just make the replacement
3018				 * a recovering spare
3019				 */
3020				disk->rdev =
3021					disk[conf->raid_disks].rdev;
3022				disk[conf->raid_disks].rdev = NULL;
3023			} else if (!test_bit(In_sync, &disk->rdev->flags))
3024				/* Original is not in_sync - bad */
3025				goto abort;
3026		}
3027
3028		if (!disk->rdev ||
3029		    !test_bit(In_sync, &disk->rdev->flags)) {
3030			disk->head_position = 0;
3031			if (disk->rdev &&
3032			    (disk->rdev->saved_raid_disk < 0))
3033				conf->fullsync = 1;
3034		}
3035	}
3036
3037	err = -ENOMEM;
3038	conf->thread = md_register_thread(raid1d, mddev, "raid1");
3039	if (!conf->thread)
3040		goto abort;
3041
3042	return conf;
3043
3044 abort:
3045	if (conf) {
3046		mempool_exit(&conf->r1bio_pool);
3047		kfree(conf->mirrors);
3048		safe_put_page(conf->tmppage);
3049		kfree(conf->poolinfo);
3050		kfree(conf->nr_pending);
3051		kfree(conf->nr_waiting);
3052		kfree(conf->nr_queued);
3053		kfree(conf->barrier);
3054		bioset_exit(&conf->bio_split);
3055		kfree(conf);
3056	}
3057	return ERR_PTR(err);
3058}
3059
3060static void raid1_free(struct mddev *mddev, void *priv);
3061static int raid1_run(struct mddev *mddev)
3062{
3063	struct r1conf *conf;
3064	int i;
3065	struct md_rdev *rdev;
3066	int ret;
3067	bool discard_supported = false;
3068
3069	if (mddev->level != 1) {
3070		pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3071			mdname(mddev), mddev->level);
3072		return -EIO;
3073	}
3074	if (mddev->reshape_position != MaxSector) {
3075		pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3076			mdname(mddev));
3077		return -EIO;
3078	}
3079	if (mddev_init_writes_pending(mddev) < 0)
3080		return -ENOMEM;
3081	/*
3082	 * copy the already verified devices into our private RAID1
3083	 * bookkeeping area. [whatever we allocate in run(),
3084	 * should be freed in raid1_free()]
3085	 */
3086	if (mddev->private == NULL)
3087		conf = setup_conf(mddev);
3088	else
3089		conf = mddev->private;
3090
3091	if (IS_ERR(conf))
3092		return PTR_ERR(conf);
3093
3094	if (mddev->queue) {
3095		blk_queue_max_write_same_sectors(mddev->queue, 0);
3096		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3097	}
3098
3099	rdev_for_each(rdev, mddev) {
3100		if (!mddev->gendisk)
3101			continue;
3102		disk_stack_limits(mddev->gendisk, rdev->bdev,
3103				  rdev->data_offset << 9);
3104		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3105			discard_supported = true;
3106	}
3107
3108	mddev->degraded = 0;
3109	for (i = 0; i < conf->raid_disks; i++)
3110		if (conf->mirrors[i].rdev == NULL ||
3111		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3112		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3113			mddev->degraded++;
3114	/*
3115	 * RAID1 needs at least one disk in active
3116	 */
3117	if (conf->raid_disks - mddev->degraded < 1) {
3118		ret = -EINVAL;
3119		goto abort;
3120	}
3121
3122	if (conf->raid_disks - mddev->degraded == 1)
3123		mddev->recovery_cp = MaxSector;
3124
3125	if (mddev->recovery_cp != MaxSector)
3126		pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3127			mdname(mddev));
3128	pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3129		mdname(mddev), mddev->raid_disks - mddev->degraded,
3130		mddev->raid_disks);
3131
3132	/*
3133	 * Ok, everything is just fine now
3134	 */
3135	mddev->thread = conf->thread;
3136	conf->thread = NULL;
3137	mddev->private = conf;
3138	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3139
3140	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3141
3142	if (mddev->queue) {
3143		if (discard_supported)
3144			blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3145						mddev->queue);
3146		else
3147			blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3148						  mddev->queue);
3149	}
3150
3151	ret = md_integrity_register(mddev);
3152	if (ret) {
3153		md_unregister_thread(&mddev->thread);
3154		goto abort;
3155	}
3156	return 0;
3157
3158abort:
3159	raid1_free(mddev, conf);
3160	return ret;
3161}
3162
3163static void raid1_free(struct mddev *mddev, void *priv)
3164{
3165	struct r1conf *conf = priv;
3166
3167	mempool_exit(&conf->r1bio_pool);
3168	kfree(conf->mirrors);
3169	safe_put_page(conf->tmppage);
3170	kfree(conf->poolinfo);
3171	kfree(conf->nr_pending);
3172	kfree(conf->nr_waiting);
3173	kfree(conf->nr_queued);
3174	kfree(conf->barrier);
3175	bioset_exit(&conf->bio_split);
3176	kfree(conf);
3177}
3178
3179static int raid1_resize(struct mddev *mddev, sector_t sectors)
3180{
3181	/* no resync is happening, and there is enough space
3182	 * on all devices, so we can resize.
3183	 * We need to make sure resync covers any new space.
3184	 * If the array is shrinking we should possibly wait until
3185	 * any io in the removed space completes, but it hardly seems
3186	 * worth it.
3187	 */
3188	sector_t newsize = raid1_size(mddev, sectors, 0);
3189	if (mddev->external_size &&
3190	    mddev->array_sectors > newsize)
3191		return -EINVAL;
3192	if (mddev->bitmap) {
3193		int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3194		if (ret)
3195			return ret;
3196	}
3197	md_set_array_sectors(mddev, newsize);
 
 
3198	if (sectors > mddev->dev_sectors &&
3199	    mddev->recovery_cp > mddev->dev_sectors) {
3200		mddev->recovery_cp = mddev->dev_sectors;
3201		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3202	}
3203	mddev->dev_sectors = sectors;
3204	mddev->resync_max_sectors = sectors;
3205	return 0;
3206}
3207
3208static int raid1_reshape(struct mddev *mddev)
3209{
3210	/* We need to:
3211	 * 1/ resize the r1bio_pool
3212	 * 2/ resize conf->mirrors
3213	 *
3214	 * We allocate a new r1bio_pool if we can.
3215	 * Then raise a device barrier and wait until all IO stops.
3216	 * Then resize conf->mirrors and swap in the new r1bio pool.
3217	 *
3218	 * At the same time, we "pack" the devices so that all the missing
3219	 * devices have the higher raid_disk numbers.
3220	 */
3221	mempool_t newpool, oldpool;
3222	struct pool_info *newpoolinfo;
3223	struct raid1_info *newmirrors;
3224	struct r1conf *conf = mddev->private;
3225	int cnt, raid_disks;
3226	unsigned long flags;
3227	int d, d2;
3228	int ret;
3229
3230	memset(&newpool, 0, sizeof(newpool));
3231	memset(&oldpool, 0, sizeof(oldpool));
3232
3233	/* Cannot change chunk_size, layout, or level */
3234	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3235	    mddev->layout != mddev->new_layout ||
3236	    mddev->level != mddev->new_level) {
3237		mddev->new_chunk_sectors = mddev->chunk_sectors;
3238		mddev->new_layout = mddev->layout;
3239		mddev->new_level = mddev->level;
3240		return -EINVAL;
3241	}
3242
3243	if (!mddev_is_clustered(mddev))
3244		md_allow_write(mddev);
 
 
 
3245
3246	raid_disks = mddev->raid_disks + mddev->delta_disks;
3247
3248	if (raid_disks < conf->raid_disks) {
3249		cnt=0;
3250		for (d= 0; d < conf->raid_disks; d++)
3251			if (conf->mirrors[d].rdev)
3252				cnt++;
3253		if (cnt > raid_disks)
3254			return -EBUSY;
3255	}
3256
3257	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3258	if (!newpoolinfo)
3259		return -ENOMEM;
3260	newpoolinfo->mddev = mddev;
3261	newpoolinfo->raid_disks = raid_disks * 2;
3262
3263	ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
3264			   rbio_pool_free, newpoolinfo);
3265	if (ret) {
3266		kfree(newpoolinfo);
3267		return ret;
3268	}
3269	newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3270					 raid_disks, 2),
3271			     GFP_KERNEL);
3272	if (!newmirrors) {
3273		kfree(newpoolinfo);
3274		mempool_exit(&newpool);
3275		return -ENOMEM;
3276	}
3277
3278	freeze_array(conf, 0);
3279
3280	/* ok, everything is stopped */
3281	oldpool = conf->r1bio_pool;
3282	conf->r1bio_pool = newpool;
3283
3284	for (d = d2 = 0; d < conf->raid_disks; d++) {
3285		struct md_rdev *rdev = conf->mirrors[d].rdev;
3286		if (rdev && rdev->raid_disk != d2) {
3287			sysfs_unlink_rdev(mddev, rdev);
3288			rdev->raid_disk = d2;
3289			sysfs_unlink_rdev(mddev, rdev);
3290			if (sysfs_link_rdev(mddev, rdev))
3291				pr_warn("md/raid1:%s: cannot register rd%d\n",
3292					mdname(mddev), rdev->raid_disk);
3293		}
3294		if (rdev)
3295			newmirrors[d2++].rdev = rdev;
3296	}
3297	kfree(conf->mirrors);
3298	conf->mirrors = newmirrors;
3299	kfree(conf->poolinfo);
3300	conf->poolinfo = newpoolinfo;
3301
3302	spin_lock_irqsave(&conf->device_lock, flags);
3303	mddev->degraded += (raid_disks - conf->raid_disks);
3304	spin_unlock_irqrestore(&conf->device_lock, flags);
3305	conf->raid_disks = mddev->raid_disks = raid_disks;
3306	mddev->delta_disks = 0;
3307
3308	unfreeze_array(conf);
3309
3310	set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3311	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3312	md_wakeup_thread(mddev->thread);
3313
3314	mempool_exit(&oldpool);
3315	return 0;
3316}
3317
3318static void raid1_quiesce(struct mddev *mddev, int quiesce)
3319{
3320	struct r1conf *conf = mddev->private;
3321
3322	if (quiesce)
 
 
 
 
3323		freeze_array(conf, 0);
3324	else
 
3325		unfreeze_array(conf);
 
 
3326}
3327
3328static void *raid1_takeover(struct mddev *mddev)
3329{
3330	/* raid1 can take over:
3331	 *  raid5 with 2 devices, any layout or chunk size
3332	 */
3333	if (mddev->level == 5 && mddev->raid_disks == 2) {
3334		struct r1conf *conf;
3335		mddev->new_level = 1;
3336		mddev->new_layout = 0;
3337		mddev->new_chunk_sectors = 0;
3338		conf = setup_conf(mddev);
3339		if (!IS_ERR(conf)) {
3340			/* Array must appear to be quiesced */
3341			conf->array_frozen = 1;
3342			mddev_clear_unsupported_flags(mddev,
3343				UNSUPPORTED_MDDEV_FLAGS);
3344		}
3345		return conf;
3346	}
3347	return ERR_PTR(-EINVAL);
3348}
3349
3350static struct md_personality raid1_personality =
3351{
3352	.name		= "raid1",
3353	.level		= 1,
3354	.owner		= THIS_MODULE,
3355	.make_request	= raid1_make_request,
3356	.run		= raid1_run,
3357	.free		= raid1_free,
3358	.status		= raid1_status,
3359	.error_handler	= raid1_error,
3360	.hot_add_disk	= raid1_add_disk,
3361	.hot_remove_disk= raid1_remove_disk,
3362	.spare_active	= raid1_spare_active,
3363	.sync_request	= raid1_sync_request,
3364	.resize		= raid1_resize,
3365	.size		= raid1_size,
3366	.check_reshape	= raid1_reshape,
3367	.quiesce	= raid1_quiesce,
3368	.takeover	= raid1_takeover,
 
3369};
3370
3371static int __init raid_init(void)
3372{
3373	return register_md_personality(&raid1_personality);
3374}
3375
3376static void raid_exit(void)
3377{
3378	unregister_md_personality(&raid1_personality);
3379}
3380
3381module_init(raid_init);
3382module_exit(raid_exit);
3383MODULE_LICENSE("GPL");
3384MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3385MODULE_ALIAS("md-personality-3"); /* RAID1 */
3386MODULE_ALIAS("md-raid1");
3387MODULE_ALIAS("md-level-1");
3388
3389module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
v4.10.11
 
   1/*
   2 * raid1.c : Multiple Devices driver for Linux
   3 *
   4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
   5 *
   6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   7 *
   8 * RAID-1 management functions.
   9 *
  10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  11 *
  12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  14 *
  15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  16 * bitmapped intelligence in resync:
  17 *
  18 *      - bitmap marked during normal i/o
  19 *      - bitmap used to skip nondirty blocks during sync
  20 *
  21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  22 * - persistent bitmap code
  23 *
  24 * This program is free software; you can redistribute it and/or modify
  25 * it under the terms of the GNU General Public License as published by
  26 * the Free Software Foundation; either version 2, or (at your option)
  27 * any later version.
  28 *
  29 * You should have received a copy of the GNU General Public License
  30 * (for example /usr/src/linux/COPYING); if not, write to the Free
  31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  32 */
  33
  34#include <linux/slab.h>
  35#include <linux/delay.h>
  36#include <linux/blkdev.h>
  37#include <linux/module.h>
  38#include <linux/seq_file.h>
  39#include <linux/ratelimit.h>
 
 
  40#include <trace/events/block.h>
 
  41#include "md.h"
  42#include "raid1.h"
  43#include "bitmap.h"
  44
  45#define UNSUPPORTED_MDDEV_FLAGS		\
  46	((1L << MD_HAS_JOURNAL) |	\
  47	 (1L << MD_JOURNAL_CLEAN))
 
 
 
 
 
 
 
 
  48
  49/*
  50 * Number of guaranteed r1bios in case of extreme VM load:
  51 */
  52#define	NR_RAID1_BIOS 256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53
  54/* when we get a read error on a read-only array, we redirect to another
  55 * device without failing the first device, or trying to over-write to
  56 * correct the read error.  To keep track of bad blocks on a per-bio
  57 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  58 */
  59#define IO_BLOCKED ((struct bio *)1)
  60/* When we successfully write to a known bad-block, we need to remove the
  61 * bad-block marking which must be done from process context.  So we record
  62 * the success by setting devs[n].bio to IO_MADE_GOOD
  63 */
  64#define IO_MADE_GOOD ((struct bio *)2)
  65
  66#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
 
 
 
 
 
  67
  68/* When there are this many requests queue to be written by
  69 * the raid1 thread, we become 'congested' to provide back-pressure
  70 * for writeback.
  71 */
  72static int max_queued_requests = 1024;
 
  73
  74static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
  75			  sector_t bi_sector);
  76static void lower_barrier(struct r1conf *conf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  77
  78#define raid1_log(md, fmt, args...)				\
  79	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
 
 
 
 
 
 
  80
  81static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
  82{
  83	struct pool_info *pi = data;
  84	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
  85
  86	/* allocate a r1bio with room for raid_disks entries in the bios array */
  87	return kzalloc(size, gfp_flags);
  88}
  89
  90static void r1bio_pool_free(void *r1_bio, void *data)
  91{
  92	kfree(r1_bio);
  93}
  94
  95#define RESYNC_BLOCK_SIZE (64*1024)
  96#define RESYNC_DEPTH 32
  97#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
  98#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  99#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
 100#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
 101#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
 102#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
 103#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
 104
 105static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
 106{
 107	struct pool_info *pi = data;
 108	struct r1bio *r1_bio;
 109	struct bio *bio;
 110	int need_pages;
 111	int i, j;
 
 112
 113	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
 114	if (!r1_bio)
 115		return NULL;
 116
 
 
 
 
 
 117	/*
 118	 * Allocate bios : 1 for reading, n-1 for writing
 119	 */
 120	for (j = pi->raid_disks ; j-- ; ) {
 121		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 122		if (!bio)
 123			goto out_free_bio;
 124		r1_bio->bios[j] = bio;
 125	}
 126	/*
 127	 * Allocate RESYNC_PAGES data pages and attach them to
 128	 * the first bio.
 129	 * If this is a user-requested check/repair, allocate
 130	 * RESYNC_PAGES for each bio.
 131	 */
 132	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
 133		need_pages = pi->raid_disks;
 134	else
 135		need_pages = 1;
 136	for (j = 0; j < need_pages; j++) {
 
 
 137		bio = r1_bio->bios[j];
 138		bio->bi_vcnt = RESYNC_PAGES;
 139
 140		if (bio_alloc_pages(bio, gfp_flags))
 141			goto out_free_pages;
 142	}
 143	/* If not user-requests, copy the page pointers to all bios */
 144	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
 145		for (i=0; i<RESYNC_PAGES ; i++)
 146			for (j=1; j<pi->raid_disks; j++)
 147				r1_bio->bios[j]->bi_io_vec[i].bv_page =
 148					r1_bio->bios[0]->bi_io_vec[i].bv_page;
 
 149	}
 150
 151	r1_bio->master_bio = NULL;
 152
 153	return r1_bio;
 154
 155out_free_pages:
 156	while (--j >= 0)
 157		bio_free_pages(r1_bio->bios[j]);
 158
 159out_free_bio:
 160	while (++j < pi->raid_disks)
 161		bio_put(r1_bio->bios[j]);
 162	r1bio_pool_free(r1_bio, data);
 
 
 
 163	return NULL;
 164}
 165
 166static void r1buf_pool_free(void *__r1_bio, void *data)
 167{
 168	struct pool_info *pi = data;
 169	int i,j;
 170	struct r1bio *r1bio = __r1_bio;
 
 171
 172	for (i = 0; i < RESYNC_PAGES; i++)
 173		for (j = pi->raid_disks; j-- ;) {
 174			if (j == 0 ||
 175			    r1bio->bios[j]->bi_io_vec[i].bv_page !=
 176			    r1bio->bios[0]->bi_io_vec[i].bv_page)
 177				safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
 178		}
 179	for (i=0 ; i < pi->raid_disks; i++)
 180		bio_put(r1bio->bios[i]);
 
 181
 182	r1bio_pool_free(r1bio, data);
 
 
 
 183}
 184
 185static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
 186{
 187	int i;
 188
 189	for (i = 0; i < conf->raid_disks * 2; i++) {
 190		struct bio **bio = r1_bio->bios + i;
 191		if (!BIO_SPECIAL(*bio))
 192			bio_put(*bio);
 193		*bio = NULL;
 194	}
 195}
 196
 197static void free_r1bio(struct r1bio *r1_bio)
 198{
 199	struct r1conf *conf = r1_bio->mddev->private;
 200
 201	put_all_bios(conf, r1_bio);
 202	mempool_free(r1_bio, conf->r1bio_pool);
 203}
 204
 205static void put_buf(struct r1bio *r1_bio)
 206{
 207	struct r1conf *conf = r1_bio->mddev->private;
 
 208	int i;
 209
 210	for (i = 0; i < conf->raid_disks * 2; i++) {
 211		struct bio *bio = r1_bio->bios[i];
 212		if (bio->bi_end_io)
 213			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
 214	}
 215
 216	mempool_free(r1_bio, conf->r1buf_pool);
 217
 218	lower_barrier(conf);
 219}
 220
 221static void reschedule_retry(struct r1bio *r1_bio)
 222{
 223	unsigned long flags;
 224	struct mddev *mddev = r1_bio->mddev;
 225	struct r1conf *conf = mddev->private;
 
 226
 
 227	spin_lock_irqsave(&conf->device_lock, flags);
 228	list_add(&r1_bio->retry_list, &conf->retry_list);
 229	conf->nr_queued ++;
 230	spin_unlock_irqrestore(&conf->device_lock, flags);
 231
 232	wake_up(&conf->wait_barrier);
 233	md_wakeup_thread(mddev->thread);
 234}
 235
 236/*
 237 * raid_end_bio_io() is called when we have finished servicing a mirrored
 238 * operation and are ready to return a success/failure code to the buffer
 239 * cache layer.
 240 */
 241static void call_bio_endio(struct r1bio *r1_bio)
 242{
 243	struct bio *bio = r1_bio->master_bio;
 244	int done;
 245	struct r1conf *conf = r1_bio->mddev->private;
 246	sector_t start_next_window = r1_bio->start_next_window;
 247	sector_t bi_sector = bio->bi_iter.bi_sector;
 248
 249	if (bio->bi_phys_segments) {
 250		unsigned long flags;
 251		spin_lock_irqsave(&conf->device_lock, flags);
 252		bio->bi_phys_segments--;
 253		done = (bio->bi_phys_segments == 0);
 254		spin_unlock_irqrestore(&conf->device_lock, flags);
 255		/*
 256		 * make_request() might be waiting for
 257		 * bi_phys_segments to decrease
 258		 */
 259		wake_up(&conf->wait_barrier);
 260	} else
 261		done = 1;
 262
 263	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
 264		bio->bi_error = -EIO;
 265
 266	if (done) {
 267		bio_endio(bio);
 268		/*
 269		 * Wake up any possible resync thread that waits for the device
 270		 * to go idle.
 271		 */
 272		allow_barrier(conf, start_next_window, bi_sector);
 273	}
 274}
 275
 276static void raid_end_bio_io(struct r1bio *r1_bio)
 277{
 278	struct bio *bio = r1_bio->master_bio;
 
 279
 280	/* if nobody has done the final endio yet, do it now */
 281	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 282		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
 283			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
 284			 (unsigned long long) bio->bi_iter.bi_sector,
 285			 (unsigned long long) bio_end_sector(bio) - 1);
 286
 287		call_bio_endio(r1_bio);
 288	}
 
 
 
 
 
 
 289	free_r1bio(r1_bio);
 290}
 291
 292/*
 293 * Update disk head position estimator based on IRQ completion info.
 294 */
 295static inline void update_head_pos(int disk, struct r1bio *r1_bio)
 296{
 297	struct r1conf *conf = r1_bio->mddev->private;
 298
 299	conf->mirrors[disk].head_position =
 300		r1_bio->sector + (r1_bio->sectors);
 301}
 302
 303/*
 304 * Find the disk number which triggered given bio
 305 */
 306static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
 307{
 308	int mirror;
 309	struct r1conf *conf = r1_bio->mddev->private;
 310	int raid_disks = conf->raid_disks;
 311
 312	for (mirror = 0; mirror < raid_disks * 2; mirror++)
 313		if (r1_bio->bios[mirror] == bio)
 314			break;
 315
 316	BUG_ON(mirror == raid_disks * 2);
 317	update_head_pos(mirror, r1_bio);
 318
 319	return mirror;
 320}
 321
 322static void raid1_end_read_request(struct bio *bio)
 323{
 324	int uptodate = !bio->bi_error;
 325	struct r1bio *r1_bio = bio->bi_private;
 326	struct r1conf *conf = r1_bio->mddev->private;
 327	struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
 328
 329	/*
 330	 * this branch is our 'one mirror IO has finished' event handler:
 331	 */
 332	update_head_pos(r1_bio->read_disk, r1_bio);
 333
 334	if (uptodate)
 335		set_bit(R1BIO_Uptodate, &r1_bio->state);
 336	else if (test_bit(FailFast, &rdev->flags) &&
 337		 test_bit(R1BIO_FailFast, &r1_bio->state))
 338		/* This was a fail-fast read so we definitely
 339		 * want to retry */
 340		;
 341	else {
 342		/* If all other devices have failed, we want to return
 343		 * the error upwards rather than fail the last device.
 344		 * Here we redefine "uptodate" to mean "Don't want to retry"
 345		 */
 346		unsigned long flags;
 347		spin_lock_irqsave(&conf->device_lock, flags);
 348		if (r1_bio->mddev->degraded == conf->raid_disks ||
 349		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
 350		     test_bit(In_sync, &rdev->flags)))
 351			uptodate = 1;
 352		spin_unlock_irqrestore(&conf->device_lock, flags);
 353	}
 354
 355	if (uptodate) {
 356		raid_end_bio_io(r1_bio);
 357		rdev_dec_pending(rdev, conf->mddev);
 358	} else {
 359		/*
 360		 * oops, read error:
 361		 */
 362		char b[BDEVNAME_SIZE];
 363		pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
 364				   mdname(conf->mddev),
 365				   bdevname(rdev->bdev, b),
 366				   (unsigned long long)r1_bio->sector);
 367		set_bit(R1BIO_ReadError, &r1_bio->state);
 368		reschedule_retry(r1_bio);
 369		/* don't drop the reference on read_disk yet */
 370	}
 371}
 372
 373static void close_write(struct r1bio *r1_bio)
 374{
 375	/* it really is the end of this request */
 376	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
 377		/* free extra copy of the data pages */
 378		int i = r1_bio->behind_page_count;
 379		while (i--)
 380			safe_put_page(r1_bio->behind_bvecs[i].bv_page);
 381		kfree(r1_bio->behind_bvecs);
 382		r1_bio->behind_bvecs = NULL;
 383	}
 384	/* clear the bitmap if all writes complete successfully */
 385	bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
 386			r1_bio->sectors,
 387			!test_bit(R1BIO_Degraded, &r1_bio->state),
 388			test_bit(R1BIO_BehindIO, &r1_bio->state));
 389	md_write_end(r1_bio->mddev);
 390}
 391
 392static void r1_bio_write_done(struct r1bio *r1_bio)
 393{
 394	if (!atomic_dec_and_test(&r1_bio->remaining))
 395		return;
 396
 397	if (test_bit(R1BIO_WriteError, &r1_bio->state))
 398		reschedule_retry(r1_bio);
 399	else {
 400		close_write(r1_bio);
 401		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
 402			reschedule_retry(r1_bio);
 403		else
 404			raid_end_bio_io(r1_bio);
 405	}
 406}
 407
 408static void raid1_end_write_request(struct bio *bio)
 409{
 410	struct r1bio *r1_bio = bio->bi_private;
 411	int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
 412	struct r1conf *conf = r1_bio->mddev->private;
 413	struct bio *to_put = NULL;
 414	int mirror = find_bio_disk(r1_bio, bio);
 415	struct md_rdev *rdev = conf->mirrors[mirror].rdev;
 416	bool discard_error;
 
 
 417
 418	discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
 419
 420	/*
 421	 * 'one mirror IO has finished' event handler:
 422	 */
 423	if (bio->bi_error && !discard_error) {
 424		set_bit(WriteErrorSeen,	&rdev->flags);
 425		if (!test_and_set_bit(WantReplacement, &rdev->flags))
 426			set_bit(MD_RECOVERY_NEEDED, &
 427				conf->mddev->recovery);
 428
 429		if (test_bit(FailFast, &rdev->flags) &&
 430		    (bio->bi_opf & MD_FAILFAST) &&
 431		    /* We never try FailFast to WriteMostly devices */
 432		    !test_bit(WriteMostly, &rdev->flags)) {
 433			md_error(r1_bio->mddev, rdev);
 434			if (!test_bit(Faulty, &rdev->flags))
 435				/* This is the only remaining device,
 436				 * We need to retry the write without
 437				 * FailFast
 438				 */
 439				set_bit(R1BIO_WriteError, &r1_bio->state);
 440			else {
 441				/* Finished with this branch */
 442				r1_bio->bios[mirror] = NULL;
 443				to_put = bio;
 444			}
 445		} else
 446			set_bit(R1BIO_WriteError, &r1_bio->state);
 
 
 
 
 
 447	} else {
 448		/*
 449		 * Set R1BIO_Uptodate in our master bio, so that we
 450		 * will return a good error code for to the higher
 451		 * levels even if IO on some other mirrored buffer
 452		 * fails.
 453		 *
 454		 * The 'master' represents the composite IO operation
 455		 * to user-side. So if something waits for IO, then it
 456		 * will wait for the 'master' bio.
 457		 */
 458		sector_t first_bad;
 459		int bad_sectors;
 460
 461		r1_bio->bios[mirror] = NULL;
 462		to_put = bio;
 463		/*
 464		 * Do not set R1BIO_Uptodate if the current device is
 465		 * rebuilding or Faulty. This is because we cannot use
 466		 * such device for properly reading the data back (we could
 467		 * potentially use it, if the current write would have felt
 468		 * before rdev->recovery_offset, but for simplicity we don't
 469		 * check this here.
 470		 */
 471		if (test_bit(In_sync, &rdev->flags) &&
 472		    !test_bit(Faulty, &rdev->flags))
 473			set_bit(R1BIO_Uptodate, &r1_bio->state);
 474
 475		/* Maybe we can clear some bad blocks. */
 476		if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
 477				&first_bad, &bad_sectors) && !discard_error) {
 478			r1_bio->bios[mirror] = IO_MADE_GOOD;
 479			set_bit(R1BIO_MadeGood, &r1_bio->state);
 480		}
 481	}
 482
 483	if (behind) {
 
 
 484		if (test_bit(WriteMostly, &rdev->flags))
 485			atomic_dec(&r1_bio->behind_remaining);
 486
 487		/*
 488		 * In behind mode, we ACK the master bio once the I/O
 489		 * has safely reached all non-writemostly
 490		 * disks. Setting the Returned bit ensures that this
 491		 * gets done only once -- we don't ever want to return
 492		 * -EIO here, instead we'll wait
 493		 */
 494		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
 495		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
 496			/* Maybe we can return now */
 497			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 498				struct bio *mbio = r1_bio->master_bio;
 499				pr_debug("raid1: behind end write sectors"
 500					 " %llu-%llu\n",
 501					 (unsigned long long) mbio->bi_iter.bi_sector,
 502					 (unsigned long long) bio_end_sector(mbio) - 1);
 503				call_bio_endio(r1_bio);
 504			}
 505		}
 506	}
 
 507	if (r1_bio->bios[mirror] == NULL)
 508		rdev_dec_pending(rdev, conf->mddev);
 509
 510	/*
 511	 * Let's see if all mirrored write operations have finished
 512	 * already.
 513	 */
 514	r1_bio_write_done(r1_bio);
 515
 516	if (to_put)
 517		bio_put(to_put);
 518}
 519
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520/*
 521 * This routine returns the disk from which the requested read should
 522 * be done. There is a per-array 'next expected sequential IO' sector
 523 * number - if this matches on the next IO then we use the last disk.
 524 * There is also a per-disk 'last know head position' sector that is
 525 * maintained from IRQ contexts, both the normal and the resync IO
 526 * completion handlers update this position correctly. If there is no
 527 * perfect sequential match then we pick the disk whose head is closest.
 528 *
 529 * If there are 2 mirrors in the same 2 devices, performance degrades
 530 * because position is mirror, not device based.
 531 *
 532 * The rdev for the device selected will have nr_pending incremented.
 533 */
 534static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
 535{
 536	const sector_t this_sector = r1_bio->sector;
 537	int sectors;
 538	int best_good_sectors;
 539	int best_disk, best_dist_disk, best_pending_disk;
 540	int has_nonrot_disk;
 541	int disk;
 542	sector_t best_dist;
 543	unsigned int min_pending;
 544	struct md_rdev *rdev;
 545	int choose_first;
 546	int choose_next_idle;
 547
 548	rcu_read_lock();
 549	/*
 550	 * Check if we can balance. We can balance on the whole
 551	 * device if no resync is going on, or below the resync window.
 552	 * We take the first readable disk when above the resync window.
 553	 */
 554 retry:
 555	sectors = r1_bio->sectors;
 556	best_disk = -1;
 557	best_dist_disk = -1;
 558	best_dist = MaxSector;
 559	best_pending_disk = -1;
 560	min_pending = UINT_MAX;
 561	best_good_sectors = 0;
 562	has_nonrot_disk = 0;
 563	choose_next_idle = 0;
 564	clear_bit(R1BIO_FailFast, &r1_bio->state);
 565
 566	if ((conf->mddev->recovery_cp < this_sector + sectors) ||
 567	    (mddev_is_clustered(conf->mddev) &&
 568	    md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
 569		    this_sector + sectors)))
 570		choose_first = 1;
 571	else
 572		choose_first = 0;
 573
 574	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
 575		sector_t dist;
 576		sector_t first_bad;
 577		int bad_sectors;
 578		unsigned int pending;
 579		bool nonrot;
 580
 581		rdev = rcu_dereference(conf->mirrors[disk].rdev);
 582		if (r1_bio->bios[disk] == IO_BLOCKED
 583		    || rdev == NULL
 584		    || test_bit(Faulty, &rdev->flags))
 585			continue;
 586		if (!test_bit(In_sync, &rdev->flags) &&
 587		    rdev->recovery_offset < this_sector + sectors)
 588			continue;
 589		if (test_bit(WriteMostly, &rdev->flags)) {
 590			/* Don't balance among write-mostly, just
 591			 * use the first as a last resort */
 592			if (best_dist_disk < 0) {
 593				if (is_badblock(rdev, this_sector, sectors,
 594						&first_bad, &bad_sectors)) {
 595					if (first_bad <= this_sector)
 596						/* Cannot use this */
 597						continue;
 598					best_good_sectors = first_bad - this_sector;
 599				} else
 600					best_good_sectors = sectors;
 601				best_dist_disk = disk;
 602				best_pending_disk = disk;
 603			}
 604			continue;
 605		}
 606		/* This is a reasonable device to use.  It might
 607		 * even be best.
 608		 */
 609		if (is_badblock(rdev, this_sector, sectors,
 610				&first_bad, &bad_sectors)) {
 611			if (best_dist < MaxSector)
 612				/* already have a better device */
 613				continue;
 614			if (first_bad <= this_sector) {
 615				/* cannot read here. If this is the 'primary'
 616				 * device, then we must not read beyond
 617				 * bad_sectors from another device..
 618				 */
 619				bad_sectors -= (this_sector - first_bad);
 620				if (choose_first && sectors > bad_sectors)
 621					sectors = bad_sectors;
 622				if (best_good_sectors > sectors)
 623					best_good_sectors = sectors;
 624
 625			} else {
 626				sector_t good_sectors = first_bad - this_sector;
 627				if (good_sectors > best_good_sectors) {
 628					best_good_sectors = good_sectors;
 629					best_disk = disk;
 630				}
 631				if (choose_first)
 632					break;
 633			}
 634			continue;
 635		} else
 
 
 636			best_good_sectors = sectors;
 
 637
 638		if (best_disk >= 0)
 639			/* At least two disks to choose from so failfast is OK */
 640			set_bit(R1BIO_FailFast, &r1_bio->state);
 641
 642		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
 643		has_nonrot_disk |= nonrot;
 644		pending = atomic_read(&rdev->nr_pending);
 645		dist = abs(this_sector - conf->mirrors[disk].head_position);
 646		if (choose_first) {
 647			best_disk = disk;
 648			break;
 649		}
 650		/* Don't change to another disk for sequential reads */
 651		if (conf->mirrors[disk].next_seq_sect == this_sector
 652		    || dist == 0) {
 653			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
 654			struct raid1_info *mirror = &conf->mirrors[disk];
 655
 656			best_disk = disk;
 657			/*
 658			 * If buffered sequential IO size exceeds optimal
 659			 * iosize, check if there is idle disk. If yes, choose
 660			 * the idle disk. read_balance could already choose an
 661			 * idle disk before noticing it's a sequential IO in
 662			 * this disk. This doesn't matter because this disk
 663			 * will idle, next time it will be utilized after the
 664			 * first disk has IO size exceeds optimal iosize. In
 665			 * this way, iosize of the first disk will be optimal
 666			 * iosize at least. iosize of the second disk might be
 667			 * small, but not a big deal since when the second disk
 668			 * starts IO, the first disk is likely still busy.
 669			 */
 670			if (nonrot && opt_iosize > 0 &&
 671			    mirror->seq_start != MaxSector &&
 672			    mirror->next_seq_sect > opt_iosize &&
 673			    mirror->next_seq_sect - opt_iosize >=
 674			    mirror->seq_start) {
 675				choose_next_idle = 1;
 676				continue;
 677			}
 678			break;
 679		}
 680
 681		if (choose_next_idle)
 682			continue;
 683
 684		if (min_pending > pending) {
 685			min_pending = pending;
 686			best_pending_disk = disk;
 687		}
 688
 689		if (dist < best_dist) {
 690			best_dist = dist;
 691			best_dist_disk = disk;
 692		}
 693	}
 694
 695	/*
 696	 * If all disks are rotational, choose the closest disk. If any disk is
 697	 * non-rotational, choose the disk with less pending request even the
 698	 * disk is rotational, which might/might not be optimal for raids with
 699	 * mixed ratation/non-rotational disks depending on workload.
 700	 */
 701	if (best_disk == -1) {
 702		if (has_nonrot_disk || min_pending == 0)
 703			best_disk = best_pending_disk;
 704		else
 705			best_disk = best_dist_disk;
 706	}
 707
 708	if (best_disk >= 0) {
 709		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
 710		if (!rdev)
 711			goto retry;
 712		atomic_inc(&rdev->nr_pending);
 713		sectors = best_good_sectors;
 714
 715		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
 716			conf->mirrors[best_disk].seq_start = this_sector;
 717
 718		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
 719	}
 720	rcu_read_unlock();
 721	*max_sectors = sectors;
 722
 723	return best_disk;
 724}
 725
 726static int raid1_congested(struct mddev *mddev, int bits)
 727{
 728	struct r1conf *conf = mddev->private;
 729	int i, ret = 0;
 
 730
 731	if ((bits & (1 << WB_async_congested)) &&
 732	    conf->pending_count >= max_queued_requests)
 733		return 1;
 734
 735	rcu_read_lock();
 736	for (i = 0; i < conf->raid_disks * 2; i++) {
 737		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 738		if (rdev && !test_bit(Faulty, &rdev->flags)) {
 739			struct request_queue *q = bdev_get_queue(rdev->bdev);
 740
 741			BUG_ON(!q);
 742
 743			/* Note the '|| 1' - when read_balance prefers
 744			 * non-congested targets, it can be removed
 745			 */
 746			if ((bits & (1 << WB_async_congested)) || 1)
 747				ret |= bdi_congested(&q->backing_dev_info, bits);
 748			else
 749				ret &= bdi_congested(&q->backing_dev_info, bits);
 750		}
 751	}
 752	rcu_read_unlock();
 753	return ret;
 754}
 755
 756static void flush_pending_writes(struct r1conf *conf)
 757{
 758	/* Any writes that have been queued but are awaiting
 759	 * bitmap updates get flushed here.
 760	 */
 761	spin_lock_irq(&conf->device_lock);
 762
 763	if (conf->pending_bio_list.head) {
 
 764		struct bio *bio;
 
 765		bio = bio_list_get(&conf->pending_bio_list);
 766		conf->pending_count = 0;
 767		spin_unlock_irq(&conf->device_lock);
 768		/* flush any pending bitmap writes to
 769		 * disk before proceeding w/ I/O */
 770		bitmap_unplug(conf->mddev->bitmap);
 771		wake_up(&conf->wait_barrier);
 772
 773		while (bio) { /* submit pending writes */
 774			struct bio *next = bio->bi_next;
 775			struct md_rdev *rdev = (void*)bio->bi_bdev;
 776			bio->bi_next = NULL;
 777			bio->bi_bdev = rdev->bdev;
 778			if (test_bit(Faulty, &rdev->flags)) {
 779				bio->bi_error = -EIO;
 780				bio_endio(bio);
 781			} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
 782					    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 783				/* Just ignore it */
 784				bio_endio(bio);
 785			else
 786				generic_make_request(bio);
 787			bio = next;
 788		}
 789	} else
 790		spin_unlock_irq(&conf->device_lock);
 791}
 792
 793/* Barriers....
 794 * Sometimes we need to suspend IO while we do something else,
 795 * either some resync/recovery, or reconfigure the array.
 796 * To do this we raise a 'barrier'.
 797 * The 'barrier' is a counter that can be raised multiple times
 798 * to count how many activities are happening which preclude
 799 * normal IO.
 800 * We can only raise the barrier if there is no pending IO.
 801 * i.e. if nr_pending == 0.
 802 * We choose only to raise the barrier if no-one is waiting for the
 803 * barrier to go down.  This means that as soon as an IO request
 804 * is ready, no other operations which require a barrier will start
 805 * until the IO request has had a chance.
 806 *
 807 * So: regular IO calls 'wait_barrier'.  When that returns there
 808 *    is no backgroup IO happening,  It must arrange to call
 809 *    allow_barrier when it has finished its IO.
 810 * backgroup IO calls must call raise_barrier.  Once that returns
 811 *    there is no normal IO happeing.  It must arrange to call
 812 *    lower_barrier when the particular background IO completes.
 
 
 
 813 */
 814static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
 815{
 
 
 816	spin_lock_irq(&conf->resync_lock);
 817
 818	/* Wait until no block IO is waiting */
 819	wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
 
 820			    conf->resync_lock);
 821
 822	/* block any new IO from starting */
 823	conf->barrier++;
 824	conf->next_resync = sector_nr;
 
 
 
 
 
 
 
 
 825
 826	/* For these conditions we must wait:
 827	 * A: while the array is in frozen state
 828	 * B: while barrier >= RESYNC_DEPTH, meaning resync reach
 829	 *    the max count which allowed.
 830	 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
 831	 *    next resync will reach to the window which normal bios are
 832	 *    handling.
 833	 * D: while there are any active requests in the current window.
 834	 */
 835	wait_event_lock_irq(conf->wait_barrier,
 836			    !conf->array_frozen &&
 837			    conf->barrier < RESYNC_DEPTH &&
 838			    conf->current_window_requests == 0 &&
 839			    (conf->start_next_window >=
 840			     conf->next_resync + RESYNC_SECTORS),
 841			    conf->resync_lock);
 842
 843	conf->nr_pending++;
 
 
 
 
 
 
 
 844	spin_unlock_irq(&conf->resync_lock);
 
 
 845}
 846
 847static void lower_barrier(struct r1conf *conf)
 848{
 849	unsigned long flags;
 850	BUG_ON(conf->barrier <= 0);
 851	spin_lock_irqsave(&conf->resync_lock, flags);
 852	conf->barrier--;
 853	conf->nr_pending--;
 854	spin_unlock_irqrestore(&conf->resync_lock, flags);
 855	wake_up(&conf->wait_barrier);
 856}
 857
 858static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
 859{
 860	bool wait = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 861
 862	if (conf->array_frozen || !bio)
 863		wait = true;
 864	else if (conf->barrier && bio_data_dir(bio) == WRITE) {
 865		if ((conf->mddev->curr_resync_completed
 866		     >= bio_end_sector(bio)) ||
 867		    (conf->start_next_window + NEXT_NORMALIO_DISTANCE
 868		     <= bio->bi_iter.bi_sector))
 869			wait = false;
 870		else
 871			wait = true;
 872	}
 
 873
 874	return wait;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 875}
 876
 877static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
 878{
 879	sector_t sector = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 880
 881	spin_lock_irq(&conf->resync_lock);
 882	if (need_to_wait_for_sync(conf, bio)) {
 883		conf->nr_waiting++;
 884		/* Wait for the barrier to drop.
 885		 * However if there are already pending
 886		 * requests (preventing the barrier from
 887		 * rising completely), and the
 888		 * per-process bio queue isn't empty,
 889		 * then don't wait, as we need to empty
 890		 * that queue to allow conf->start_next_window
 891		 * to increase.
 892		 */
 893		raid1_log(conf->mddev, "wait barrier");
 894		wait_event_lock_irq(conf->wait_barrier,
 895				    !conf->array_frozen &&
 896				    (!conf->barrier ||
 897				     ((conf->start_next_window <
 898				       conf->next_resync + RESYNC_SECTORS) &&
 899				      current->bio_list &&
 900				      !bio_list_empty(current->bio_list))),
 901				    conf->resync_lock);
 902		conf->nr_waiting--;
 903	}
 904
 905	if (bio && bio_data_dir(bio) == WRITE) {
 906		if (bio->bi_iter.bi_sector >= conf->next_resync) {
 907			if (conf->start_next_window == MaxSector)
 908				conf->start_next_window =
 909					conf->next_resync +
 910					NEXT_NORMALIO_DISTANCE;
 911
 912			if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
 913			    <= bio->bi_iter.bi_sector)
 914				conf->next_window_requests++;
 915			else
 916				conf->current_window_requests++;
 917			sector = conf->start_next_window;
 918		}
 919	}
 920
 921	conf->nr_pending++;
 922	spin_unlock_irq(&conf->resync_lock);
 923	return sector;
 924}
 925
 926static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
 927			  sector_t bi_sector)
 928{
 929	unsigned long flags;
 930
 931	spin_lock_irqsave(&conf->resync_lock, flags);
 932	conf->nr_pending--;
 933	if (start_next_window) {
 934		if (start_next_window == conf->start_next_window) {
 935			if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
 936			    <= bi_sector)
 937				conf->next_window_requests--;
 938			else
 939				conf->current_window_requests--;
 940		} else
 941			conf->current_window_requests--;
 942
 943		if (!conf->current_window_requests) {
 944			if (conf->next_window_requests) {
 945				conf->current_window_requests =
 946					conf->next_window_requests;
 947				conf->next_window_requests = 0;
 948				conf->start_next_window +=
 949					NEXT_NORMALIO_DISTANCE;
 950			} else
 951				conf->start_next_window = MaxSector;
 952		}
 953	}
 954	spin_unlock_irqrestore(&conf->resync_lock, flags);
 955	wake_up(&conf->wait_barrier);
 956}
 957
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 958static void freeze_array(struct r1conf *conf, int extra)
 959{
 960	/* stop syncio and normal IO and wait for everything to
 961	 * go quite.
 962	 * We wait until nr_pending match nr_queued+extra
 963	 * This is called in the context of one normal IO request
 964	 * that has failed. Thus any sync request that might be pending
 965	 * will be blocked by nr_pending, and we need to wait for
 966	 * pending IO requests to complete or be queued for re-try.
 967	 * Thus the number queued (nr_queued) plus this request (extra)
 968	 * must match the number of pending IOs (nr_pending) before
 969	 * we continue.
 
 
 
 
 
 
 
 
 
 
 
 
 970	 */
 971	spin_lock_irq(&conf->resync_lock);
 972	conf->array_frozen = 1;
 973	raid1_log(conf->mddev, "wait freeze");
 974	wait_event_lock_irq_cmd(conf->wait_barrier,
 975				conf->nr_pending == conf->nr_queued+extra,
 976				conf->resync_lock,
 977				flush_pending_writes(conf));
 
 978	spin_unlock_irq(&conf->resync_lock);
 979}
 980static void unfreeze_array(struct r1conf *conf)
 981{
 982	/* reverse the effect of the freeze */
 983	spin_lock_irq(&conf->resync_lock);
 984	conf->array_frozen = 0;
 
 985	wake_up(&conf->wait_barrier);
 986	spin_unlock_irq(&conf->resync_lock);
 987}
 988
 989/* duplicate the data pages for behind I/O
 990 */
 991static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
 992{
 993	int i;
 994	struct bio_vec *bvec;
 995	struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
 996					GFP_NOIO);
 997	if (unlikely(!bvecs))
 
 
 998		return;
 999
1000	bio_for_each_segment_all(bvec, bio, i) {
1001		bvecs[i] = *bvec;
1002		bvecs[i].bv_page = alloc_page(GFP_NOIO);
1003		if (unlikely(!bvecs[i].bv_page))
1004			goto do_sync_io;
1005		memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
1006		       kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
1007		kunmap(bvecs[i].bv_page);
1008		kunmap(bvec->bv_page);
 
 
 
 
 
 
 
 
 
 
 
1009	}
1010	r1_bio->behind_bvecs = bvecs;
1011	r1_bio->behind_page_count = bio->bi_vcnt;
 
 
1012	set_bit(R1BIO_BehindIO, &r1_bio->state);
 
1013	return;
1014
1015do_sync_io:
1016	for (i = 0; i < bio->bi_vcnt; i++)
1017		if (bvecs[i].bv_page)
1018			put_page(bvecs[i].bv_page);
1019	kfree(bvecs);
1020	pr_debug("%dB behind alloc failed, doing sync I/O\n",
1021		 bio->bi_iter.bi_size);
 
 
1022}
1023
1024struct raid1_plug_cb {
1025	struct blk_plug_cb	cb;
1026	struct bio_list		pending;
1027	int			pending_cnt;
1028};
1029
1030static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1031{
1032	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1033						  cb);
1034	struct mddev *mddev = plug->cb.data;
1035	struct r1conf *conf = mddev->private;
1036	struct bio *bio;
1037
1038	if (from_schedule || current->bio_list) {
1039		spin_lock_irq(&conf->device_lock);
1040		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1041		conf->pending_count += plug->pending_cnt;
1042		spin_unlock_irq(&conf->device_lock);
1043		wake_up(&conf->wait_barrier);
1044		md_wakeup_thread(mddev->thread);
1045		kfree(plug);
1046		return;
1047	}
1048
1049	/* we aren't scheduling, so we can do the write-out directly. */
1050	bio = bio_list_get(&plug->pending);
1051	bitmap_unplug(mddev->bitmap);
1052	wake_up(&conf->wait_barrier);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053
1054	while (bio) { /* submit pending writes */
1055		struct bio *next = bio->bi_next;
1056		struct md_rdev *rdev = (void*)bio->bi_bdev;
1057		bio->bi_next = NULL;
1058		bio->bi_bdev = rdev->bdev;
1059		if (test_bit(Faulty, &rdev->flags)) {
1060			bio->bi_error = -EIO;
1061			bio_endio(bio);
1062		} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
1063				    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1064			/* Just ignore it */
1065			bio_endio(bio);
1066		else
1067			generic_make_request(bio);
1068		bio = next;
1069	}
1070	kfree(plug);
1071}
1072
1073static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1074				 struct r1bio *r1_bio)
1075{
1076	struct r1conf *conf = mddev->private;
1077	struct raid1_info *mirror;
1078	struct bio *read_bio;
1079	struct bitmap *bitmap = mddev->bitmap;
1080	const int op = bio_op(bio);
1081	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1082	int sectors_handled;
1083	int max_sectors;
1084	int rdisk;
 
 
 
 
 
 
 
 
 
1085
1086	wait_barrier(conf, bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087
1088read_again:
 
 
 
1089	rdisk = read_balance(conf, r1_bio, &max_sectors);
1090
1091	if (rdisk < 0) {
1092		/* couldn't find anywhere to read from */
 
 
 
 
 
 
1093		raid_end_bio_io(r1_bio);
1094		return;
1095	}
1096	mirror = conf->mirrors + rdisk;
1097
 
 
 
 
 
 
1098	if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1099	    bitmap) {
1100		/*
1101		 * Reading from a write-mostly device must take care not to
1102		 * over-take any writes that are 'behind'
1103		 */
1104		raid1_log(mddev, "wait behind writes");
1105		wait_event(bitmap->behind_wait,
1106			   atomic_read(&bitmap->behind_writes) == 0);
1107	}
 
 
 
 
 
 
 
 
 
 
 
1108	r1_bio->read_disk = rdisk;
1109	r1_bio->start_next_window = 0;
1110
1111	read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1112	bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1113		 max_sectors);
1114
1115	r1_bio->bios[rdisk] = read_bio;
1116
1117	read_bio->bi_iter.bi_sector = r1_bio->sector +
1118		mirror->rdev->data_offset;
1119	read_bio->bi_bdev = mirror->rdev->bdev;
1120	read_bio->bi_end_io = raid1_end_read_request;
1121	bio_set_op_attrs(read_bio, op, do_sync);
1122	if (test_bit(FailFast, &mirror->rdev->flags) &&
1123	    test_bit(R1BIO_FailFast, &r1_bio->state))
1124	        read_bio->bi_opf |= MD_FAILFAST;
1125	read_bio->bi_private = r1_bio;
1126
1127	if (mddev->gendisk)
1128	        trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
1129	                              read_bio, disk_devt(mddev->gendisk),
1130	                              r1_bio->sector);
1131
1132	if (max_sectors < r1_bio->sectors) {
1133		/*
1134		 * could not read all from this device, so we will need another
1135		 * r1_bio.
1136		 */
1137		sectors_handled = (r1_bio->sector + max_sectors
1138				   - bio->bi_iter.bi_sector);
1139		r1_bio->sectors = max_sectors;
1140		spin_lock_irq(&conf->device_lock);
1141		if (bio->bi_phys_segments == 0)
1142			bio->bi_phys_segments = 2;
1143		else
1144			bio->bi_phys_segments++;
1145		spin_unlock_irq(&conf->device_lock);
1146
1147		/*
1148		 * Cannot call generic_make_request directly as that will be
1149		 * queued in __make_request and subsequent mempool_alloc might
1150		 * block waiting for it.  So hand bio over to raid1d.
1151		 */
1152		reschedule_retry(r1_bio);
1153
1154		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1155
1156		r1_bio->master_bio = bio;
1157		r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1158		r1_bio->state = 0;
1159		r1_bio->mddev = mddev;
1160		r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1161		goto read_again;
1162	} else
1163		generic_make_request(read_bio);
1164}
1165
1166static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1167				struct r1bio *r1_bio)
1168{
1169	struct r1conf *conf = mddev->private;
 
1170	int i, disks;
1171	struct bitmap *bitmap = mddev->bitmap;
1172	unsigned long flags;
1173	const int op = bio_op(bio);
1174	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1175	const unsigned long do_flush_fua = (bio->bi_opf &
1176						(REQ_PREFLUSH | REQ_FUA));
1177	struct md_rdev *blocked_rdev;
1178	struct blk_plug_cb *cb;
1179	struct raid1_plug_cb *plug = NULL;
1180	int first_clone;
1181	int sectors_handled;
1182	int max_sectors;
1183	sector_t start_next_window;
1184
1185	/*
1186	 * Register the new request and wait if the reconstruction
1187	 * thread has put up a bar for new requests.
1188	 * Continue immediately if no resync is active currently.
1189	 */
1190
1191	md_write_start(mddev, bio); /* wait on superblock update early */
1192
1193	if ((bio_end_sector(bio) > mddev->suspend_lo &&
1194	    bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1195	    (mddev_is_clustered(mddev) &&
1196	     md_cluster_ops->area_resyncing(mddev, WRITE,
1197		     bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
1198
1199		/*
1200		 * As the suspend_* range is controlled by userspace, we want
1201		 * an interruptible wait.
1202		 */
1203		DEFINE_WAIT(w);
1204		for (;;) {
1205			flush_signals(current);
1206			prepare_to_wait(&conf->wait_barrier,
1207					&w, TASK_INTERRUPTIBLE);
1208			if (bio_end_sector(bio) <= mddev->suspend_lo ||
1209			    bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1210			    (mddev_is_clustered(mddev) &&
1211			     !md_cluster_ops->area_resyncing(mddev, WRITE,
1212				     bio->bi_iter.bi_sector,
1213				     bio_end_sector(bio))))
1214				break;
1215			schedule();
1216		}
1217		finish_wait(&conf->wait_barrier, &w);
1218	}
1219	start_next_window = wait_barrier(conf, bio);
 
 
 
 
 
 
 
 
 
1220
1221	if (conf->pending_count >= max_queued_requests) {
1222		md_wakeup_thread(mddev->thread);
1223		raid1_log(mddev, "wait queued");
1224		wait_event(conf->wait_barrier,
1225			   conf->pending_count < max_queued_requests);
1226	}
1227	/* first select target devices under rcu_lock and
1228	 * inc refcount on their rdev.  Record them by setting
1229	 * bios[x] to bio
1230	 * If there are known/acknowledged bad blocks on any device on
1231	 * which we have seen a write error, we want to avoid writing those
1232	 * blocks.
1233	 * This potentially requires several writes to write around
1234	 * the bad blocks.  Each set of writes gets it's own r1bio
1235	 * with a set of bios attached.
1236	 */
1237
1238	disks = conf->raid_disks * 2;
1239 retry_write:
1240	r1_bio->start_next_window = start_next_window;
1241	blocked_rdev = NULL;
1242	rcu_read_lock();
1243	max_sectors = r1_bio->sectors;
1244	for (i = 0;  i < disks; i++) {
1245		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1246		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1247			atomic_inc(&rdev->nr_pending);
1248			blocked_rdev = rdev;
1249			break;
1250		}
1251		r1_bio->bios[i] = NULL;
1252		if (!rdev || test_bit(Faulty, &rdev->flags)) {
1253			if (i < conf->raid_disks)
1254				set_bit(R1BIO_Degraded, &r1_bio->state);
1255			continue;
1256		}
1257
1258		atomic_inc(&rdev->nr_pending);
1259		if (test_bit(WriteErrorSeen, &rdev->flags)) {
1260			sector_t first_bad;
1261			int bad_sectors;
1262			int is_bad;
1263
1264			is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1265					     &first_bad, &bad_sectors);
1266			if (is_bad < 0) {
1267				/* mustn't write here until the bad block is
1268				 * acknowledged*/
1269				set_bit(BlockedBadBlocks, &rdev->flags);
1270				blocked_rdev = rdev;
1271				break;
1272			}
1273			if (is_bad && first_bad <= r1_bio->sector) {
1274				/* Cannot write here at all */
1275				bad_sectors -= (r1_bio->sector - first_bad);
1276				if (bad_sectors < max_sectors)
1277					/* mustn't write more than bad_sectors
1278					 * to other devices yet
1279					 */
1280					max_sectors = bad_sectors;
1281				rdev_dec_pending(rdev, mddev);
1282				/* We don't set R1BIO_Degraded as that
1283				 * only applies if the disk is
1284				 * missing, so it might be re-added,
1285				 * and we want to know to recover this
1286				 * chunk.
1287				 * In this case the device is here,
1288				 * and the fact that this chunk is not
1289				 * in-sync is recorded in the bad
1290				 * block log
1291				 */
1292				continue;
1293			}
1294			if (is_bad) {
1295				int good_sectors = first_bad - r1_bio->sector;
1296				if (good_sectors < max_sectors)
1297					max_sectors = good_sectors;
1298			}
1299		}
1300		r1_bio->bios[i] = bio;
1301	}
1302	rcu_read_unlock();
1303
1304	if (unlikely(blocked_rdev)) {
1305		/* Wait for this device to become unblocked */
1306		int j;
1307		sector_t old = start_next_window;
1308
1309		for (j = 0; j < i; j++)
1310			if (r1_bio->bios[j])
1311				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1312		r1_bio->state = 0;
1313		allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1314		raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1315		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1316		start_next_window = wait_barrier(conf, bio);
1317		/*
1318		 * We must make sure the multi r1bios of bio have
1319		 * the same value of bi_phys_segments
1320		 */
1321		if (bio->bi_phys_segments && old &&
1322		    old != start_next_window)
1323			/* Wait for the former r1bio(s) to complete */
1324			wait_event(conf->wait_barrier,
1325				   bio->bi_phys_segments == 1);
1326		goto retry_write;
1327	}
1328
1329	if (max_sectors < r1_bio->sectors) {
1330		/* We are splitting this write into multiple parts, so
1331		 * we need to prepare for allocating another r1_bio.
1332		 */
 
 
 
1333		r1_bio->sectors = max_sectors;
1334		spin_lock_irq(&conf->device_lock);
1335		if (bio->bi_phys_segments == 0)
1336			bio->bi_phys_segments = 2;
1337		else
1338			bio->bi_phys_segments++;
1339		spin_unlock_irq(&conf->device_lock);
1340	}
1341	sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1342
1343	atomic_set(&r1_bio->remaining, 1);
1344	atomic_set(&r1_bio->behind_remaining, 0);
1345
1346	first_clone = 1;
 
1347	for (i = 0; i < disks; i++) {
1348		struct bio *mbio;
 
1349		if (!r1_bio->bios[i])
1350			continue;
1351
1352		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1353		bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector,
1354			 max_sectors);
1355
1356		if (first_clone) {
1357			/* do behind I/O ?
1358			 * Not if there are too many, or cannot
1359			 * allocate memory, or a reader on WriteMostly
1360			 * is waiting for behind writes to flush */
1361			if (bitmap &&
1362			    (atomic_read(&bitmap->behind_writes)
1363			     < mddev->bitmap_info.max_write_behind) &&
1364			    !waitqueue_active(&bitmap->behind_wait))
1365				alloc_behind_pages(mbio, r1_bio);
 
1366
1367			bitmap_startwrite(bitmap, r1_bio->sector,
1368					  r1_bio->sectors,
1369					  test_bit(R1BIO_BehindIO,
1370						   &r1_bio->state));
1371			first_clone = 0;
1372		}
1373		if (r1_bio->behind_bvecs) {
1374			struct bio_vec *bvec;
1375			int j;
1376
1377			/*
1378			 * We trimmed the bio, so _all is legit
1379			 */
1380			bio_for_each_segment_all(bvec, mbio, j)
1381				bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1382			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
 
 
 
 
1383				atomic_inc(&r1_bio->behind_remaining);
1384		}
 
1385
1386		r1_bio->bios[i] = mbio;
1387
1388		mbio->bi_iter.bi_sector	= (r1_bio->sector +
1389				   conf->mirrors[i].rdev->data_offset);
1390		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1391		mbio->bi_end_io	= raid1_end_write_request;
1392		bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
1393		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1394		    !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1395		    conf->raid_disks - mddev->degraded > 1)
1396			mbio->bi_opf |= MD_FAILFAST;
1397		mbio->bi_private = r1_bio;
1398
1399		atomic_inc(&r1_bio->remaining);
1400
1401		if (mddev->gendisk)
1402			trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
1403					      mbio, disk_devt(mddev->gendisk),
1404					      r1_bio->sector);
1405		/* flush_pending_writes() needs access to the rdev so...*/
1406		mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
1407
1408		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1409		if (cb)
1410			plug = container_of(cb, struct raid1_plug_cb, cb);
1411		else
1412			plug = NULL;
1413		spin_lock_irqsave(&conf->device_lock, flags);
1414		if (plug) {
1415			bio_list_add(&plug->pending, mbio);
1416			plug->pending_cnt++;
1417		} else {
 
1418			bio_list_add(&conf->pending_bio_list, mbio);
1419			conf->pending_count++;
 
 
1420		}
1421		spin_unlock_irqrestore(&conf->device_lock, flags);
1422		if (!plug)
1423			md_wakeup_thread(mddev->thread);
1424	}
1425	/* Mustn't call r1_bio_write_done before this next test,
1426	 * as it could result in the bio being freed.
1427	 */
1428	if (sectors_handled < bio_sectors(bio)) {
1429		r1_bio_write_done(r1_bio);
1430		/* We need another r1_bio.  It has already been counted
1431		 * in bio->bi_phys_segments
1432		 */
1433		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1434		r1_bio->master_bio = bio;
1435		r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1436		r1_bio->state = 0;
1437		r1_bio->mddev = mddev;
1438		r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1439		goto retry_write;
1440	}
1441
1442	r1_bio_write_done(r1_bio);
1443
1444	/* In case raid1d snuck in to freeze_array */
1445	wake_up(&conf->wait_barrier);
1446}
1447
1448static void raid1_make_request(struct mddev *mddev, struct bio *bio)
1449{
1450	struct r1conf *conf = mddev->private;
1451	struct r1bio *r1_bio;
1452
1453	/*
1454	 * make_request() can abort the operation when read-ahead is being
1455	 * used and no empty request is available.
1456	 *
1457	 */
1458	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1459
1460	r1_bio->master_bio = bio;
1461	r1_bio->sectors = bio_sectors(bio);
1462	r1_bio->state = 0;
1463	r1_bio->mddev = mddev;
1464	r1_bio->sector = bio->bi_iter.bi_sector;
1465
1466	/*
1467	 * We might need to issue multiple reads to different devices if there
1468	 * are bad blocks around, so we keep track of the number of reads in
1469	 * bio->bi_phys_segments.  If this is 0, there is only one r1_bio and
1470	 * no locking will be needed when requests complete.  If it is
1471	 * non-zero, then it is the number of not-completed requests.
1472	 */
1473	bio->bi_phys_segments = 0;
1474	bio_clear_flag(bio, BIO_SEG_VALID);
1475
1476	if (bio_data_dir(bio) == READ)
1477		raid1_read_request(mddev, bio, r1_bio);
1478	else
1479		raid1_write_request(mddev, bio, r1_bio);
 
 
 
 
1480}
1481
1482static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1483{
1484	struct r1conf *conf = mddev->private;
1485	int i;
1486
1487	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1488		   conf->raid_disks - mddev->degraded);
1489	rcu_read_lock();
1490	for (i = 0; i < conf->raid_disks; i++) {
1491		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1492		seq_printf(seq, "%s",
1493			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1494	}
1495	rcu_read_unlock();
1496	seq_printf(seq, "]");
1497}
1498
1499static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1500{
1501	char b[BDEVNAME_SIZE];
1502	struct r1conf *conf = mddev->private;
1503	unsigned long flags;
1504
1505	/*
1506	 * If it is not operational, then we have already marked it as dead
1507	 * else if it is the last working disks, ignore the error, let the
1508	 * next level up know.
1509	 * else mark the drive as failed
1510	 */
1511	spin_lock_irqsave(&conf->device_lock, flags);
1512	if (test_bit(In_sync, &rdev->flags)
1513	    && (conf->raid_disks - mddev->degraded) == 1) {
1514		/*
1515		 * Don't fail the drive, act as though we were just a
1516		 * normal single drive.
1517		 * However don't try a recovery from this drive as
1518		 * it is very likely to fail.
1519		 */
1520		conf->recovery_disabled = mddev->recovery_disabled;
1521		spin_unlock_irqrestore(&conf->device_lock, flags);
1522		return;
1523	}
1524	set_bit(Blocked, &rdev->flags);
1525	if (test_and_clear_bit(In_sync, &rdev->flags)) {
1526		mddev->degraded++;
1527		set_bit(Faulty, &rdev->flags);
1528	} else
1529		set_bit(Faulty, &rdev->flags);
1530	spin_unlock_irqrestore(&conf->device_lock, flags);
1531	/*
1532	 * if recovery is running, make sure it aborts.
1533	 */
1534	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1535	set_mask_bits(&mddev->sb_flags, 0,
1536		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1537	pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1538		"md/raid1:%s: Operation continuing on %d devices.\n",
1539		mdname(mddev), bdevname(rdev->bdev, b),
1540		mdname(mddev), conf->raid_disks - mddev->degraded);
1541}
1542
1543static void print_conf(struct r1conf *conf)
1544{
1545	int i;
1546
1547	pr_debug("RAID1 conf printout:\n");
1548	if (!conf) {
1549		pr_debug("(!conf)\n");
1550		return;
1551	}
1552	pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1553		 conf->raid_disks);
1554
1555	rcu_read_lock();
1556	for (i = 0; i < conf->raid_disks; i++) {
1557		char b[BDEVNAME_SIZE];
1558		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1559		if (rdev)
1560			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1561				 i, !test_bit(In_sync, &rdev->flags),
1562				 !test_bit(Faulty, &rdev->flags),
1563				 bdevname(rdev->bdev,b));
1564	}
1565	rcu_read_unlock();
1566}
1567
1568static void close_sync(struct r1conf *conf)
1569{
1570	wait_barrier(conf, NULL);
1571	allow_barrier(conf, 0, 0);
1572
1573	mempool_destroy(conf->r1buf_pool);
1574	conf->r1buf_pool = NULL;
 
 
1575
1576	spin_lock_irq(&conf->resync_lock);
1577	conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
1578	conf->start_next_window = MaxSector;
1579	conf->current_window_requests +=
1580		conf->next_window_requests;
1581	conf->next_window_requests = 0;
1582	spin_unlock_irq(&conf->resync_lock);
1583}
1584
1585static int raid1_spare_active(struct mddev *mddev)
1586{
1587	int i;
1588	struct r1conf *conf = mddev->private;
1589	int count = 0;
1590	unsigned long flags;
1591
1592	/*
1593	 * Find all failed disks within the RAID1 configuration
1594	 * and mark them readable.
1595	 * Called under mddev lock, so rcu protection not needed.
1596	 * device_lock used to avoid races with raid1_end_read_request
1597	 * which expects 'In_sync' flags and ->degraded to be consistent.
1598	 */
1599	spin_lock_irqsave(&conf->device_lock, flags);
1600	for (i = 0; i < conf->raid_disks; i++) {
1601		struct md_rdev *rdev = conf->mirrors[i].rdev;
1602		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1603		if (repl
1604		    && !test_bit(Candidate, &repl->flags)
1605		    && repl->recovery_offset == MaxSector
1606		    && !test_bit(Faulty, &repl->flags)
1607		    && !test_and_set_bit(In_sync, &repl->flags)) {
1608			/* replacement has just become active */
1609			if (!rdev ||
1610			    !test_and_clear_bit(In_sync, &rdev->flags))
1611				count++;
1612			if (rdev) {
1613				/* Replaced device not technically
1614				 * faulty, but we need to be sure
1615				 * it gets removed and never re-added
1616				 */
1617				set_bit(Faulty, &rdev->flags);
1618				sysfs_notify_dirent_safe(
1619					rdev->sysfs_state);
1620			}
1621		}
1622		if (rdev
1623		    && rdev->recovery_offset == MaxSector
1624		    && !test_bit(Faulty, &rdev->flags)
1625		    && !test_and_set_bit(In_sync, &rdev->flags)) {
1626			count++;
1627			sysfs_notify_dirent_safe(rdev->sysfs_state);
1628		}
1629	}
1630	mddev->degraded -= count;
1631	spin_unlock_irqrestore(&conf->device_lock, flags);
1632
1633	print_conf(conf);
1634	return count;
1635}
1636
1637static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1638{
1639	struct r1conf *conf = mddev->private;
1640	int err = -EEXIST;
1641	int mirror = 0;
1642	struct raid1_info *p;
1643	int first = 0;
1644	int last = conf->raid_disks - 1;
1645
1646	if (mddev->recovery_disabled == conf->recovery_disabled)
1647		return -EBUSY;
1648
1649	if (md_integrity_add_rdev(rdev, mddev))
1650		return -ENXIO;
1651
1652	if (rdev->raid_disk >= 0)
1653		first = last = rdev->raid_disk;
1654
1655	/*
1656	 * find the disk ... but prefer rdev->saved_raid_disk
1657	 * if possible.
1658	 */
1659	if (rdev->saved_raid_disk >= 0 &&
1660	    rdev->saved_raid_disk >= first &&
 
1661	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1662		first = last = rdev->saved_raid_disk;
1663
1664	for (mirror = first; mirror <= last; mirror++) {
1665		p = conf->mirrors+mirror;
1666		if (!p->rdev) {
1667
1668			if (mddev->gendisk)
1669				disk_stack_limits(mddev->gendisk, rdev->bdev,
1670						  rdev->data_offset << 9);
1671
1672			p->head_position = 0;
1673			rdev->raid_disk = mirror;
1674			err = 0;
1675			/* As all devices are equivalent, we don't need a full recovery
1676			 * if this was recently any drive of the array
1677			 */
1678			if (rdev->saved_raid_disk < 0)
1679				conf->fullsync = 1;
1680			rcu_assign_pointer(p->rdev, rdev);
1681			break;
1682		}
1683		if (test_bit(WantReplacement, &p->rdev->flags) &&
1684		    p[conf->raid_disks].rdev == NULL) {
1685			/* Add this device as a replacement */
1686			clear_bit(In_sync, &rdev->flags);
1687			set_bit(Replacement, &rdev->flags);
1688			rdev->raid_disk = mirror;
1689			err = 0;
1690			conf->fullsync = 1;
1691			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1692			break;
1693		}
1694	}
1695	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1696		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1697	print_conf(conf);
1698	return err;
1699}
1700
1701static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1702{
1703	struct r1conf *conf = mddev->private;
1704	int err = 0;
1705	int number = rdev->raid_disk;
1706	struct raid1_info *p = conf->mirrors + number;
1707
1708	if (rdev != p->rdev)
1709		p = conf->mirrors + conf->raid_disks + number;
1710
1711	print_conf(conf);
1712	if (rdev == p->rdev) {
1713		if (test_bit(In_sync, &rdev->flags) ||
1714		    atomic_read(&rdev->nr_pending)) {
1715			err = -EBUSY;
1716			goto abort;
1717		}
1718		/* Only remove non-faulty devices if recovery
1719		 * is not possible.
1720		 */
1721		if (!test_bit(Faulty, &rdev->flags) &&
1722		    mddev->recovery_disabled != conf->recovery_disabled &&
1723		    mddev->degraded < conf->raid_disks) {
1724			err = -EBUSY;
1725			goto abort;
1726		}
1727		p->rdev = NULL;
1728		if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1729			synchronize_rcu();
1730			if (atomic_read(&rdev->nr_pending)) {
1731				/* lost the race, try later */
1732				err = -EBUSY;
1733				p->rdev = rdev;
1734				goto abort;
1735			}
1736		}
1737		if (conf->mirrors[conf->raid_disks + number].rdev) {
1738			/* We just removed a device that is being replaced.
1739			 * Move down the replacement.  We drain all IO before
1740			 * doing this to avoid confusion.
1741			 */
1742			struct md_rdev *repl =
1743				conf->mirrors[conf->raid_disks + number].rdev;
1744			freeze_array(conf, 0);
 
 
 
 
 
 
 
 
 
 
 
1745			clear_bit(Replacement, &repl->flags);
1746			p->rdev = repl;
1747			conf->mirrors[conf->raid_disks + number].rdev = NULL;
1748			unfreeze_array(conf);
1749			clear_bit(WantReplacement, &rdev->flags);
1750		} else
1751			clear_bit(WantReplacement, &rdev->flags);
1752		err = md_integrity_register(mddev);
1753	}
1754abort:
1755
1756	print_conf(conf);
1757	return err;
1758}
1759
1760static void end_sync_read(struct bio *bio)
1761{
1762	struct r1bio *r1_bio = bio->bi_private;
1763
1764	update_head_pos(r1_bio->read_disk, r1_bio);
1765
1766	/*
1767	 * we have read a block, now it needs to be re-written,
1768	 * or re-read if the read failed.
1769	 * We don't do much here, just schedule handling by raid1d
1770	 */
1771	if (!bio->bi_error)
1772		set_bit(R1BIO_Uptodate, &r1_bio->state);
1773
1774	if (atomic_dec_and_test(&r1_bio->remaining))
1775		reschedule_retry(r1_bio);
1776}
1777
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1778static void end_sync_write(struct bio *bio)
1779{
1780	int uptodate = !bio->bi_error;
1781	struct r1bio *r1_bio = bio->bi_private;
1782	struct mddev *mddev = r1_bio->mddev;
1783	struct r1conf *conf = mddev->private;
1784	sector_t first_bad;
1785	int bad_sectors;
1786	struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1787
1788	if (!uptodate) {
1789		sector_t sync_blocks = 0;
1790		sector_t s = r1_bio->sector;
1791		long sectors_to_go = r1_bio->sectors;
1792		/* make sure these bits doesn't get cleared. */
1793		do {
1794			bitmap_end_sync(mddev->bitmap, s,
1795					&sync_blocks, 1);
1796			s += sync_blocks;
1797			sectors_to_go -= sync_blocks;
1798		} while (sectors_to_go > 0);
1799		set_bit(WriteErrorSeen, &rdev->flags);
1800		if (!test_and_set_bit(WantReplacement, &rdev->flags))
1801			set_bit(MD_RECOVERY_NEEDED, &
1802				mddev->recovery);
1803		set_bit(R1BIO_WriteError, &r1_bio->state);
1804	} else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1805			       &first_bad, &bad_sectors) &&
1806		   !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1807				r1_bio->sector,
1808				r1_bio->sectors,
1809				&first_bad, &bad_sectors)
1810		)
1811		set_bit(R1BIO_MadeGood, &r1_bio->state);
1812
1813	if (atomic_dec_and_test(&r1_bio->remaining)) {
1814		int s = r1_bio->sectors;
1815		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1816		    test_bit(R1BIO_WriteError, &r1_bio->state))
1817			reschedule_retry(r1_bio);
1818		else {
1819			put_buf(r1_bio);
1820			md_done_sync(mddev, s, uptodate);
1821		}
1822	}
1823}
1824
1825static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1826			    int sectors, struct page *page, int rw)
1827{
1828	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1829		/* success */
1830		return 1;
1831	if (rw == WRITE) {
1832		set_bit(WriteErrorSeen, &rdev->flags);
1833		if (!test_and_set_bit(WantReplacement,
1834				      &rdev->flags))
1835			set_bit(MD_RECOVERY_NEEDED, &
1836				rdev->mddev->recovery);
1837	}
1838	/* need to record an error - either for the block or the device */
1839	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1840		md_error(rdev->mddev, rdev);
1841	return 0;
1842}
1843
1844static int fix_sync_read_error(struct r1bio *r1_bio)
1845{
1846	/* Try some synchronous reads of other devices to get
1847	 * good data, much like with normal read errors.  Only
1848	 * read into the pages we already have so we don't
1849	 * need to re-issue the read request.
1850	 * We don't need to freeze the array, because being in an
1851	 * active sync request, there is no normal IO, and
1852	 * no overlapping syncs.
1853	 * We don't need to check is_badblock() again as we
1854	 * made sure that anything with a bad block in range
1855	 * will have bi_end_io clear.
1856	 */
1857	struct mddev *mddev = r1_bio->mddev;
1858	struct r1conf *conf = mddev->private;
1859	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
 
1860	sector_t sect = r1_bio->sector;
1861	int sectors = r1_bio->sectors;
1862	int idx = 0;
1863	struct md_rdev *rdev;
1864
1865	rdev = conf->mirrors[r1_bio->read_disk].rdev;
1866	if (test_bit(FailFast, &rdev->flags)) {
1867		/* Don't try recovering from here - just fail it
1868		 * ... unless it is the last working device of course */
1869		md_error(mddev, rdev);
1870		if (test_bit(Faulty, &rdev->flags))
1871			/* Don't try to read from here, but make sure
1872			 * put_buf does it's thing
1873			 */
1874			bio->bi_end_io = end_sync_write;
1875	}
1876
1877	while(sectors) {
1878		int s = sectors;
1879		int d = r1_bio->read_disk;
1880		int success = 0;
1881		int start;
1882
1883		if (s > (PAGE_SIZE>>9))
1884			s = PAGE_SIZE >> 9;
1885		do {
1886			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1887				/* No rcu protection needed here devices
1888				 * can only be removed when no resync is
1889				 * active, and resync is currently active
1890				 */
1891				rdev = conf->mirrors[d].rdev;
1892				if (sync_page_io(rdev, sect, s<<9,
1893						 bio->bi_io_vec[idx].bv_page,
1894						 REQ_OP_READ, 0, false)) {
1895					success = 1;
1896					break;
1897				}
1898			}
1899			d++;
1900			if (d == conf->raid_disks * 2)
1901				d = 0;
1902		} while (!success && d != r1_bio->read_disk);
1903
1904		if (!success) {
1905			char b[BDEVNAME_SIZE];
1906			int abort = 0;
1907			/* Cannot read from anywhere, this block is lost.
1908			 * Record a bad block on each device.  If that doesn't
1909			 * work just disable and interrupt the recovery.
1910			 * Don't fail devices as that won't really help.
1911			 */
1912			pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1913					    mdname(mddev),
1914					    bdevname(bio->bi_bdev, b),
1915					    (unsigned long long)r1_bio->sector);
1916			for (d = 0; d < conf->raid_disks * 2; d++) {
1917				rdev = conf->mirrors[d].rdev;
1918				if (!rdev || test_bit(Faulty, &rdev->flags))
1919					continue;
1920				if (!rdev_set_badblocks(rdev, sect, s, 0))
1921					abort = 1;
1922			}
1923			if (abort) {
1924				conf->recovery_disabled =
1925					mddev->recovery_disabled;
1926				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1927				md_done_sync(mddev, r1_bio->sectors, 0);
1928				put_buf(r1_bio);
1929				return 0;
1930			}
1931			/* Try next page */
1932			sectors -= s;
1933			sect += s;
1934			idx++;
1935			continue;
1936		}
1937
1938		start = d;
1939		/* write it back and re-read */
1940		while (d != r1_bio->read_disk) {
1941			if (d == 0)
1942				d = conf->raid_disks * 2;
1943			d--;
1944			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1945				continue;
1946			rdev = conf->mirrors[d].rdev;
1947			if (r1_sync_page_io(rdev, sect, s,
1948					    bio->bi_io_vec[idx].bv_page,
1949					    WRITE) == 0) {
1950				r1_bio->bios[d]->bi_end_io = NULL;
1951				rdev_dec_pending(rdev, mddev);
1952			}
1953		}
1954		d = start;
1955		while (d != r1_bio->read_disk) {
1956			if (d == 0)
1957				d = conf->raid_disks * 2;
1958			d--;
1959			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1960				continue;
1961			rdev = conf->mirrors[d].rdev;
1962			if (r1_sync_page_io(rdev, sect, s,
1963					    bio->bi_io_vec[idx].bv_page,
1964					    READ) != 0)
1965				atomic_add(s, &rdev->corrected_errors);
1966		}
1967		sectors -= s;
1968		sect += s;
1969		idx ++;
1970	}
1971	set_bit(R1BIO_Uptodate, &r1_bio->state);
1972	bio->bi_error = 0;
1973	return 1;
1974}
1975
1976static void process_checks(struct r1bio *r1_bio)
1977{
1978	/* We have read all readable devices.  If we haven't
1979	 * got the block, then there is no hope left.
1980	 * If we have, then we want to do a comparison
1981	 * and skip the write if everything is the same.
1982	 * If any blocks failed to read, then we need to
1983	 * attempt an over-write
1984	 */
1985	struct mddev *mddev = r1_bio->mddev;
1986	struct r1conf *conf = mddev->private;
1987	int primary;
1988	int i;
1989	int vcnt;
1990
1991	/* Fix variable parts of all bios */
1992	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1993	for (i = 0; i < conf->raid_disks * 2; i++) {
1994		int j;
1995		int size;
1996		int error;
1997		struct bio *b = r1_bio->bios[i];
 
1998		if (b->bi_end_io != end_sync_read)
1999			continue;
2000		/* fixup the bio for reuse, but preserve errno */
2001		error = b->bi_error;
2002		bio_reset(b);
2003		b->bi_error = error;
2004		b->bi_vcnt = vcnt;
2005		b->bi_iter.bi_size = r1_bio->sectors << 9;
2006		b->bi_iter.bi_sector = r1_bio->sector +
2007			conf->mirrors[i].rdev->data_offset;
2008		b->bi_bdev = conf->mirrors[i].rdev->bdev;
2009		b->bi_end_io = end_sync_read;
2010		b->bi_private = r1_bio;
 
2011
2012		size = b->bi_iter.bi_size;
2013		for (j = 0; j < vcnt ; j++) {
2014			struct bio_vec *bi;
2015			bi = &b->bi_io_vec[j];
2016			bi->bv_offset = 0;
2017			if (size > PAGE_SIZE)
2018				bi->bv_len = PAGE_SIZE;
2019			else
2020				bi->bv_len = size;
2021			size -= PAGE_SIZE;
2022		}
2023	}
2024	for (primary = 0; primary < conf->raid_disks * 2; primary++)
2025		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2026		    !r1_bio->bios[primary]->bi_error) {
2027			r1_bio->bios[primary]->bi_end_io = NULL;
2028			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2029			break;
2030		}
2031	r1_bio->read_disk = primary;
2032	for (i = 0; i < conf->raid_disks * 2; i++) {
2033		int j;
2034		struct bio *pbio = r1_bio->bios[primary];
2035		struct bio *sbio = r1_bio->bios[i];
2036		int error = sbio->bi_error;
 
 
 
 
 
2037
2038		if (sbio->bi_end_io != end_sync_read)
2039			continue;
2040		/* Now we can 'fixup' the error value */
2041		sbio->bi_error = 0;
 
 
 
2042
2043		if (!error) {
2044			for (j = vcnt; j-- ; ) {
2045				struct page *p, *s;
2046				p = pbio->bi_io_vec[j].bv_page;
2047				s = sbio->bi_io_vec[j].bv_page;
2048				if (memcmp(page_address(p),
2049					   page_address(s),
2050					   sbio->bi_io_vec[j].bv_len))
2051					break;
2052			}
2053		} else
2054			j = 0;
2055		if (j >= 0)
2056			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2057		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2058			      && !error)) {
2059			/* No need to write to this device. */
2060			sbio->bi_end_io = NULL;
2061			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2062			continue;
2063		}
2064
2065		bio_copy_data(sbio, pbio);
2066	}
2067}
2068
2069static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2070{
2071	struct r1conf *conf = mddev->private;
2072	int i;
2073	int disks = conf->raid_disks * 2;
2074	struct bio *bio, *wbio;
2075
2076	bio = r1_bio->bios[r1_bio->read_disk];
2077
2078	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2079		/* ouch - failed to read all of that. */
2080		if (!fix_sync_read_error(r1_bio))
2081			return;
2082
2083	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2084		process_checks(r1_bio);
2085
2086	/*
2087	 * schedule writes
2088	 */
2089	atomic_set(&r1_bio->remaining, 1);
2090	for (i = 0; i < disks ; i++) {
2091		wbio = r1_bio->bios[i];
2092		if (wbio->bi_end_io == NULL ||
2093		    (wbio->bi_end_io == end_sync_read &&
2094		     (i == r1_bio->read_disk ||
2095		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2096			continue;
 
 
 
 
2097
2098		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2099		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2100			wbio->bi_opf |= MD_FAILFAST;
2101
2102		wbio->bi_end_io = end_sync_write;
2103		atomic_inc(&r1_bio->remaining);
2104		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2105
2106		generic_make_request(wbio);
2107	}
2108
2109	if (atomic_dec_and_test(&r1_bio->remaining)) {
2110		/* if we're here, all write(s) have completed, so clean up */
2111		int s = r1_bio->sectors;
2112		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2113		    test_bit(R1BIO_WriteError, &r1_bio->state))
2114			reschedule_retry(r1_bio);
2115		else {
2116			put_buf(r1_bio);
2117			md_done_sync(mddev, s, 1);
2118		}
2119	}
2120}
2121
2122/*
2123 * This is a kernel thread which:
2124 *
2125 *	1.	Retries failed read operations on working mirrors.
2126 *	2.	Updates the raid superblock when problems encounter.
2127 *	3.	Performs writes following reads for array synchronising.
2128 */
2129
2130static void fix_read_error(struct r1conf *conf, int read_disk,
2131			   sector_t sect, int sectors)
2132{
2133	struct mddev *mddev = conf->mddev;
2134	while(sectors) {
2135		int s = sectors;
2136		int d = read_disk;
2137		int success = 0;
2138		int start;
2139		struct md_rdev *rdev;
2140
2141		if (s > (PAGE_SIZE>>9))
2142			s = PAGE_SIZE >> 9;
2143
2144		do {
2145			sector_t first_bad;
2146			int bad_sectors;
2147
2148			rcu_read_lock();
2149			rdev = rcu_dereference(conf->mirrors[d].rdev);
2150			if (rdev &&
2151			    (test_bit(In_sync, &rdev->flags) ||
2152			     (!test_bit(Faulty, &rdev->flags) &&
2153			      rdev->recovery_offset >= sect + s)) &&
2154			    is_badblock(rdev, sect, s,
2155					&first_bad, &bad_sectors) == 0) {
2156				atomic_inc(&rdev->nr_pending);
2157				rcu_read_unlock();
2158				if (sync_page_io(rdev, sect, s<<9,
2159					 conf->tmppage, REQ_OP_READ, 0, false))
2160					success = 1;
2161				rdev_dec_pending(rdev, mddev);
2162				if (success)
2163					break;
2164			} else
2165				rcu_read_unlock();
2166			d++;
2167			if (d == conf->raid_disks * 2)
2168				d = 0;
2169		} while (!success && d != read_disk);
2170
2171		if (!success) {
2172			/* Cannot read from anywhere - mark it bad */
2173			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2174			if (!rdev_set_badblocks(rdev, sect, s, 0))
2175				md_error(mddev, rdev);
2176			break;
2177		}
2178		/* write it back and re-read */
2179		start = d;
2180		while (d != read_disk) {
2181			if (d==0)
2182				d = conf->raid_disks * 2;
2183			d--;
2184			rcu_read_lock();
2185			rdev = rcu_dereference(conf->mirrors[d].rdev);
2186			if (rdev &&
2187			    !test_bit(Faulty, &rdev->flags)) {
2188				atomic_inc(&rdev->nr_pending);
2189				rcu_read_unlock();
2190				r1_sync_page_io(rdev, sect, s,
2191						conf->tmppage, WRITE);
2192				rdev_dec_pending(rdev, mddev);
2193			} else
2194				rcu_read_unlock();
2195		}
2196		d = start;
2197		while (d != read_disk) {
2198			char b[BDEVNAME_SIZE];
2199			if (d==0)
2200				d = conf->raid_disks * 2;
2201			d--;
2202			rcu_read_lock();
2203			rdev = rcu_dereference(conf->mirrors[d].rdev);
2204			if (rdev &&
2205			    !test_bit(Faulty, &rdev->flags)) {
2206				atomic_inc(&rdev->nr_pending);
2207				rcu_read_unlock();
2208				if (r1_sync_page_io(rdev, sect, s,
2209						    conf->tmppage, READ)) {
2210					atomic_add(s, &rdev->corrected_errors);
2211					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2212						mdname(mddev), s,
2213						(unsigned long long)(sect +
2214								     rdev->data_offset),
2215						bdevname(rdev->bdev, b));
2216				}
2217				rdev_dec_pending(rdev, mddev);
2218			} else
2219				rcu_read_unlock();
2220		}
2221		sectors -= s;
2222		sect += s;
2223	}
2224}
2225
2226static int narrow_write_error(struct r1bio *r1_bio, int i)
2227{
2228	struct mddev *mddev = r1_bio->mddev;
2229	struct r1conf *conf = mddev->private;
2230	struct md_rdev *rdev = conf->mirrors[i].rdev;
2231
2232	/* bio has the data to be written to device 'i' where
2233	 * we just recently had a write error.
2234	 * We repeatedly clone the bio and trim down to one block,
2235	 * then try the write.  Where the write fails we record
2236	 * a bad block.
2237	 * It is conceivable that the bio doesn't exactly align with
2238	 * blocks.  We must handle this somehow.
2239	 *
2240	 * We currently own a reference on the rdev.
2241	 */
2242
2243	int block_sectors;
2244	sector_t sector;
2245	int sectors;
2246	int sect_to_write = r1_bio->sectors;
2247	int ok = 1;
2248
2249	if (rdev->badblocks.shift < 0)
2250		return 0;
2251
2252	block_sectors = roundup(1 << rdev->badblocks.shift,
2253				bdev_logical_block_size(rdev->bdev) >> 9);
2254	sector = r1_bio->sector;
2255	sectors = ((sector + block_sectors)
2256		   & ~(sector_t)(block_sectors - 1))
2257		- sector;
2258
2259	while (sect_to_write) {
2260		struct bio *wbio;
2261		if (sectors > sect_to_write)
2262			sectors = sect_to_write;
2263		/* Write at 'sector' for 'sectors'*/
2264
2265		if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2266			unsigned vcnt = r1_bio->behind_page_count;
2267			struct bio_vec *vec = r1_bio->behind_bvecs;
2268
2269			while (!vec->bv_page) {
2270				vec++;
2271				vcnt--;
2272			}
2273
2274			wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2275			memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2276
2277			wbio->bi_vcnt = vcnt;
2278		} else {
2279			wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
 
2280		}
2281
2282		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2283		wbio->bi_iter.bi_sector = r1_bio->sector;
2284		wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2285
2286		bio_trim(wbio, sector - r1_bio->sector, sectors);
2287		wbio->bi_iter.bi_sector += rdev->data_offset;
2288		wbio->bi_bdev = rdev->bdev;
2289
2290		if (submit_bio_wait(wbio) < 0)
2291			/* failure! */
2292			ok = rdev_set_badblocks(rdev, sector,
2293						sectors, 0)
2294				&& ok;
2295
2296		bio_put(wbio);
2297		sect_to_write -= sectors;
2298		sector += sectors;
2299		sectors = block_sectors;
2300	}
2301	return ok;
2302}
2303
2304static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2305{
2306	int m;
2307	int s = r1_bio->sectors;
2308	for (m = 0; m < conf->raid_disks * 2 ; m++) {
2309		struct md_rdev *rdev = conf->mirrors[m].rdev;
2310		struct bio *bio = r1_bio->bios[m];
2311		if (bio->bi_end_io == NULL)
2312			continue;
2313		if (!bio->bi_error &&
2314		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2315			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2316		}
2317		if (bio->bi_error &&
2318		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
2319			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2320				md_error(conf->mddev, rdev);
2321		}
2322	}
2323	put_buf(r1_bio);
2324	md_done_sync(conf->mddev, s, 1);
2325}
2326
2327static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2328{
2329	int m;
2330	bool fail = false;
 
2331	for (m = 0; m < conf->raid_disks * 2 ; m++)
2332		if (r1_bio->bios[m] == IO_MADE_GOOD) {
2333			struct md_rdev *rdev = conf->mirrors[m].rdev;
2334			rdev_clear_badblocks(rdev,
2335					     r1_bio->sector,
2336					     r1_bio->sectors, 0);
2337			rdev_dec_pending(rdev, conf->mddev);
2338		} else if (r1_bio->bios[m] != NULL) {
2339			/* This drive got a write error.  We need to
2340			 * narrow down and record precise write
2341			 * errors.
2342			 */
2343			fail = true;
2344			if (!narrow_write_error(r1_bio, m)) {
2345				md_error(conf->mddev,
2346					 conf->mirrors[m].rdev);
2347				/* an I/O failed, we can't clear the bitmap */
2348				set_bit(R1BIO_Degraded, &r1_bio->state);
2349			}
2350			rdev_dec_pending(conf->mirrors[m].rdev,
2351					 conf->mddev);
2352		}
2353	if (fail) {
2354		spin_lock_irq(&conf->device_lock);
2355		list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2356		conf->nr_queued++;
 
2357		spin_unlock_irq(&conf->device_lock);
 
 
 
 
 
2358		md_wakeup_thread(conf->mddev->thread);
2359	} else {
2360		if (test_bit(R1BIO_WriteError, &r1_bio->state))
2361			close_write(r1_bio);
2362		raid_end_bio_io(r1_bio);
2363	}
2364}
2365
2366static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2367{
2368	int disk;
2369	int max_sectors;
2370	struct mddev *mddev = conf->mddev;
2371	struct bio *bio;
2372	char b[BDEVNAME_SIZE];
2373	struct md_rdev *rdev;
2374	dev_t bio_dev;
2375	sector_t bio_sector;
2376
2377	clear_bit(R1BIO_ReadError, &r1_bio->state);
2378	/* we got a read error. Maybe the drive is bad.  Maybe just
2379	 * the block and we can fix it.
2380	 * We freeze all other IO, and try reading the block from
2381	 * other devices.  When we find one, we re-write
2382	 * and check it that fixes the read error.
2383	 * This is all done synchronously while the array is
2384	 * frozen
2385	 */
2386
2387	bio = r1_bio->bios[r1_bio->read_disk];
2388	bdevname(bio->bi_bdev, b);
2389	bio_dev = bio->bi_bdev->bd_dev;
2390	bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
2391	bio_put(bio);
2392	r1_bio->bios[r1_bio->read_disk] = NULL;
2393
2394	rdev = conf->mirrors[r1_bio->read_disk].rdev;
2395	if (mddev->ro == 0
2396	    && !test_bit(FailFast, &rdev->flags)) {
2397		freeze_array(conf, 1);
2398		fix_read_error(conf, r1_bio->read_disk,
2399			       r1_bio->sector, r1_bio->sectors);
2400		unfreeze_array(conf);
 
 
2401	} else {
2402		r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2403	}
2404
2405	rdev_dec_pending(rdev, conf->mddev);
 
 
2406
2407read_more:
2408	disk = read_balance(conf, r1_bio, &max_sectors);
2409	if (disk == -1) {
2410		pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2411				    mdname(mddev), b, (unsigned long long)r1_bio->sector);
2412		raid_end_bio_io(r1_bio);
2413	} else {
2414		const unsigned long do_sync
2415			= r1_bio->master_bio->bi_opf & REQ_SYNC;
2416		r1_bio->read_disk = disk;
2417		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2418		bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2419			 max_sectors);
2420		r1_bio->bios[r1_bio->read_disk] = bio;
2421		rdev = conf->mirrors[disk].rdev;
2422		pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
2423				    mdname(mddev),
2424				    (unsigned long long)r1_bio->sector,
2425				    bdevname(rdev->bdev, b));
2426		bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2427		bio->bi_bdev = rdev->bdev;
2428		bio->bi_end_io = raid1_end_read_request;
2429		bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
2430		if (test_bit(FailFast, &rdev->flags) &&
2431		    test_bit(R1BIO_FailFast, &r1_bio->state))
2432			bio->bi_opf |= MD_FAILFAST;
2433		bio->bi_private = r1_bio;
2434		if (max_sectors < r1_bio->sectors) {
2435			/* Drat - have to split this up more */
2436			struct bio *mbio = r1_bio->master_bio;
2437			int sectors_handled = (r1_bio->sector + max_sectors
2438					       - mbio->bi_iter.bi_sector);
2439			r1_bio->sectors = max_sectors;
2440			spin_lock_irq(&conf->device_lock);
2441			if (mbio->bi_phys_segments == 0)
2442				mbio->bi_phys_segments = 2;
2443			else
2444				mbio->bi_phys_segments++;
2445			spin_unlock_irq(&conf->device_lock);
2446			trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
2447					      bio, bio_dev, bio_sector);
2448			generic_make_request(bio);
2449			bio = NULL;
2450
2451			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2452
2453			r1_bio->master_bio = mbio;
2454			r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2455			r1_bio->state = 0;
2456			set_bit(R1BIO_ReadError, &r1_bio->state);
2457			r1_bio->mddev = mddev;
2458			r1_bio->sector = mbio->bi_iter.bi_sector +
2459				sectors_handled;
2460
2461			goto read_more;
2462		} else {
2463			trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
2464					      bio, bio_dev, bio_sector);
2465			generic_make_request(bio);
2466		}
2467	}
2468}
2469
2470static void raid1d(struct md_thread *thread)
2471{
2472	struct mddev *mddev = thread->mddev;
2473	struct r1bio *r1_bio;
2474	unsigned long flags;
2475	struct r1conf *conf = mddev->private;
2476	struct list_head *head = &conf->retry_list;
2477	struct blk_plug plug;
 
2478
2479	md_check_recovery(mddev);
2480
2481	if (!list_empty_careful(&conf->bio_end_io_list) &&
2482	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2483		LIST_HEAD(tmp);
2484		spin_lock_irqsave(&conf->device_lock, flags);
2485		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2486			while (!list_empty(&conf->bio_end_io_list)) {
2487				list_move(conf->bio_end_io_list.prev, &tmp);
2488				conf->nr_queued--;
2489			}
2490		}
2491		spin_unlock_irqrestore(&conf->device_lock, flags);
2492		while (!list_empty(&tmp)) {
2493			r1_bio = list_first_entry(&tmp, struct r1bio,
2494						  retry_list);
2495			list_del(&r1_bio->retry_list);
 
 
2496			if (mddev->degraded)
2497				set_bit(R1BIO_Degraded, &r1_bio->state);
2498			if (test_bit(R1BIO_WriteError, &r1_bio->state))
2499				close_write(r1_bio);
2500			raid_end_bio_io(r1_bio);
2501		}
2502	}
2503
2504	blk_start_plug(&plug);
2505	for (;;) {
2506
2507		flush_pending_writes(conf);
2508
2509		spin_lock_irqsave(&conf->device_lock, flags);
2510		if (list_empty(head)) {
2511			spin_unlock_irqrestore(&conf->device_lock, flags);
2512			break;
2513		}
2514		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2515		list_del(head->prev);
2516		conf->nr_queued--;
 
2517		spin_unlock_irqrestore(&conf->device_lock, flags);
2518
2519		mddev = r1_bio->mddev;
2520		conf = mddev->private;
2521		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2522			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2523			    test_bit(R1BIO_WriteError, &r1_bio->state))
2524				handle_sync_write_finished(conf, r1_bio);
2525			else
2526				sync_request_write(mddev, r1_bio);
2527		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2528			   test_bit(R1BIO_WriteError, &r1_bio->state))
2529			handle_write_finished(conf, r1_bio);
2530		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2531			handle_read_error(conf, r1_bio);
2532		else
2533			/* just a partial read to be scheduled from separate
2534			 * context
2535			 */
2536			generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2537
2538		cond_resched();
2539		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2540			md_check_recovery(mddev);
2541	}
2542	blk_finish_plug(&plug);
2543}
2544
2545static int init_resync(struct r1conf *conf)
2546{
2547	int buffs;
2548
2549	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2550	BUG_ON(conf->r1buf_pool);
2551	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2552					  conf->poolinfo);
2553	if (!conf->r1buf_pool)
2554		return -ENOMEM;
2555	conf->next_resync = 0;
2556	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2557}
2558
2559/*
2560 * perform a "sync" on one "block"
2561 *
2562 * We need to make sure that no normal I/O request - particularly write
2563 * requests - conflict with active sync requests.
2564 *
2565 * This is achieved by tracking pending requests and a 'barrier' concept
2566 * that can be installed to exclude normal IO requests.
2567 */
2568
2569static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2570				   int *skipped)
2571{
2572	struct r1conf *conf = mddev->private;
2573	struct r1bio *r1_bio;
2574	struct bio *bio;
2575	sector_t max_sector, nr_sectors;
2576	int disk = -1;
2577	int i;
2578	int wonly = -1;
2579	int write_targets = 0, read_targets = 0;
2580	sector_t sync_blocks;
2581	int still_degraded = 0;
2582	int good_sectors = RESYNC_SECTORS;
2583	int min_bad = 0; /* number of sectors that are bad in all devices */
 
 
2584
2585	if (!conf->r1buf_pool)
2586		if (init_resync(conf))
2587			return 0;
2588
2589	max_sector = mddev->dev_sectors;
2590	if (sector_nr >= max_sector) {
2591		/* If we aborted, we need to abort the
2592		 * sync on the 'current' bitmap chunk (there will
2593		 * only be one in raid1 resync.
2594		 * We can find the current addess in mddev->curr_resync
2595		 */
2596		if (mddev->curr_resync < max_sector) /* aborted */
2597			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2598						&sync_blocks, 1);
2599		else /* completed sync */
2600			conf->fullsync = 0;
2601
2602		bitmap_close_sync(mddev->bitmap);
2603		close_sync(conf);
2604
2605		if (mddev_is_clustered(mddev)) {
2606			conf->cluster_sync_low = 0;
2607			conf->cluster_sync_high = 0;
2608		}
2609		return 0;
2610	}
2611
2612	if (mddev->bitmap == NULL &&
2613	    mddev->recovery_cp == MaxSector &&
2614	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2615	    conf->fullsync == 0) {
2616		*skipped = 1;
2617		return max_sector - sector_nr;
2618	}
2619	/* before building a request, check if we can skip these blocks..
2620	 * This call the bitmap_start_sync doesn't actually record anything
2621	 */
2622	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2623	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2624		/* We can skip this block, and probably several more */
2625		*skipped = 1;
2626		return sync_blocks;
2627	}
2628
2629	/*
2630	 * If there is non-resync activity waiting for a turn, then let it
2631	 * though before starting on this new sync request.
2632	 */
2633	if (conf->nr_waiting)
2634		schedule_timeout_uninterruptible(1);
2635
2636	/* we are incrementing sector_nr below. To be safe, we check against
2637	 * sector_nr + two times RESYNC_SECTORS
2638	 */
2639
2640	bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2641		mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2642	r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2643
2644	raise_barrier(conf, sector_nr);
 
 
 
 
2645
2646	rcu_read_lock();
2647	/*
2648	 * If we get a correctably read error during resync or recovery,
2649	 * we might want to read from a different device.  So we
2650	 * flag all drives that could conceivably be read from for READ,
2651	 * and any others (which will be non-In_sync devices) for WRITE.
2652	 * If a read fails, we try reading from something else for which READ
2653	 * is OK.
2654	 */
2655
2656	r1_bio->mddev = mddev;
2657	r1_bio->sector = sector_nr;
2658	r1_bio->state = 0;
2659	set_bit(R1BIO_IsSync, &r1_bio->state);
 
 
2660
2661	for (i = 0; i < conf->raid_disks * 2; i++) {
2662		struct md_rdev *rdev;
2663		bio = r1_bio->bios[i];
2664		bio_reset(bio);
2665
2666		rdev = rcu_dereference(conf->mirrors[i].rdev);
2667		if (rdev == NULL ||
2668		    test_bit(Faulty, &rdev->flags)) {
2669			if (i < conf->raid_disks)
2670				still_degraded = 1;
2671		} else if (!test_bit(In_sync, &rdev->flags)) {
2672			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2673			bio->bi_end_io = end_sync_write;
2674			write_targets ++;
2675		} else {
2676			/* may need to read from here */
2677			sector_t first_bad = MaxSector;
2678			int bad_sectors;
2679
2680			if (is_badblock(rdev, sector_nr, good_sectors,
2681					&first_bad, &bad_sectors)) {
2682				if (first_bad > sector_nr)
2683					good_sectors = first_bad - sector_nr;
2684				else {
2685					bad_sectors -= (sector_nr - first_bad);
2686					if (min_bad == 0 ||
2687					    min_bad > bad_sectors)
2688						min_bad = bad_sectors;
2689				}
2690			}
2691			if (sector_nr < first_bad) {
2692				if (test_bit(WriteMostly, &rdev->flags)) {
2693					if (wonly < 0)
2694						wonly = i;
2695				} else {
2696					if (disk < 0)
2697						disk = i;
2698				}
2699				bio_set_op_attrs(bio, REQ_OP_READ, 0);
2700				bio->bi_end_io = end_sync_read;
2701				read_targets++;
2702			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2703				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2704				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2705				/*
2706				 * The device is suitable for reading (InSync),
2707				 * but has bad block(s) here. Let's try to correct them,
2708				 * if we are doing resync or repair. Otherwise, leave
2709				 * this device alone for this sync request.
2710				 */
2711				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2712				bio->bi_end_io = end_sync_write;
2713				write_targets++;
2714			}
2715		}
2716		if (bio->bi_end_io) {
2717			atomic_inc(&rdev->nr_pending);
2718			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2719			bio->bi_bdev = rdev->bdev;
2720			bio->bi_private = r1_bio;
2721			if (test_bit(FailFast, &rdev->flags))
2722				bio->bi_opf |= MD_FAILFAST;
2723		}
2724	}
2725	rcu_read_unlock();
2726	if (disk < 0)
2727		disk = wonly;
2728	r1_bio->read_disk = disk;
2729
2730	if (read_targets == 0 && min_bad > 0) {
2731		/* These sectors are bad on all InSync devices, so we
2732		 * need to mark them bad on all write targets
2733		 */
2734		int ok = 1;
2735		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2736			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2737				struct md_rdev *rdev = conf->mirrors[i].rdev;
2738				ok = rdev_set_badblocks(rdev, sector_nr,
2739							min_bad, 0
2740					) && ok;
2741			}
2742		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2743		*skipped = 1;
2744		put_buf(r1_bio);
2745
2746		if (!ok) {
2747			/* Cannot record the badblocks, so need to
2748			 * abort the resync.
2749			 * If there are multiple read targets, could just
2750			 * fail the really bad ones ???
2751			 */
2752			conf->recovery_disabled = mddev->recovery_disabled;
2753			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2754			return 0;
2755		} else
2756			return min_bad;
2757
2758	}
2759	if (min_bad > 0 && min_bad < good_sectors) {
2760		/* only resync enough to reach the next bad->good
2761		 * transition */
2762		good_sectors = min_bad;
2763	}
2764
2765	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2766		/* extra read targets are also write targets */
2767		write_targets += read_targets-1;
2768
2769	if (write_targets == 0 || read_targets == 0) {
2770		/* There is nowhere to write, so all non-sync
2771		 * drives must be failed - so we are finished
2772		 */
2773		sector_t rv;
2774		if (min_bad > 0)
2775			max_sector = sector_nr + min_bad;
2776		rv = max_sector - sector_nr;
2777		*skipped = 1;
2778		put_buf(r1_bio);
2779		return rv;
2780	}
2781
2782	if (max_sector > mddev->resync_max)
2783		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2784	if (max_sector > sector_nr + good_sectors)
2785		max_sector = sector_nr + good_sectors;
2786	nr_sectors = 0;
2787	sync_blocks = 0;
2788	do {
2789		struct page *page;
2790		int len = PAGE_SIZE;
2791		if (sector_nr + (len>>9) > max_sector)
2792			len = (max_sector - sector_nr) << 9;
2793		if (len == 0)
2794			break;
2795		if (sync_blocks == 0) {
2796			if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2797					       &sync_blocks, still_degraded) &&
2798			    !conf->fullsync &&
2799			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2800				break;
2801			if ((len >> 9) > sync_blocks)
2802				len = sync_blocks<<9;
2803		}
2804
2805		for (i = 0 ; i < conf->raid_disks * 2; i++) {
 
 
2806			bio = r1_bio->bios[i];
 
2807			if (bio->bi_end_io) {
2808				page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2809				if (bio_add_page(bio, page, len, 0) == 0) {
2810					/* stop here */
2811					bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2812					while (i > 0) {
2813						i--;
2814						bio = r1_bio->bios[i];
2815						if (bio->bi_end_io==NULL)
2816							continue;
2817						/* remove last page from this bio */
2818						bio->bi_vcnt--;
2819						bio->bi_iter.bi_size -= len;
2820						bio_clear_flag(bio, BIO_SEG_VALID);
2821					}
2822					goto bio_full;
2823				}
2824			}
2825		}
2826		nr_sectors += len>>9;
2827		sector_nr += len>>9;
2828		sync_blocks -= (len>>9);
2829	} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2830 bio_full:
2831	r1_bio->sectors = nr_sectors;
2832
2833	if (mddev_is_clustered(mddev) &&
2834			conf->cluster_sync_high < sector_nr + nr_sectors) {
2835		conf->cluster_sync_low = mddev->curr_resync_completed;
2836		conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2837		/* Send resync message */
2838		md_cluster_ops->resync_info_update(mddev,
2839				conf->cluster_sync_low,
2840				conf->cluster_sync_high);
2841	}
2842
2843	/* For a user-requested sync, we read all readable devices and do a
2844	 * compare
2845	 */
2846	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2847		atomic_set(&r1_bio->remaining, read_targets);
2848		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2849			bio = r1_bio->bios[i];
2850			if (bio->bi_end_io == end_sync_read) {
2851				read_targets--;
2852				md_sync_acct(bio->bi_bdev, nr_sectors);
2853				if (read_targets == 1)
2854					bio->bi_opf &= ~MD_FAILFAST;
2855				generic_make_request(bio);
2856			}
2857		}
2858	} else {
2859		atomic_set(&r1_bio->remaining, 1);
2860		bio = r1_bio->bios[r1_bio->read_disk];
2861		md_sync_acct(bio->bi_bdev, nr_sectors);
2862		if (read_targets == 1)
2863			bio->bi_opf &= ~MD_FAILFAST;
2864		generic_make_request(bio);
2865
2866	}
2867	return nr_sectors;
2868}
2869
2870static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2871{
2872	if (sectors)
2873		return sectors;
2874
2875	return mddev->dev_sectors;
2876}
2877
2878static struct r1conf *setup_conf(struct mddev *mddev)
2879{
2880	struct r1conf *conf;
2881	int i;
2882	struct raid1_info *disk;
2883	struct md_rdev *rdev;
2884	int err = -ENOMEM;
2885
2886	conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2887	if (!conf)
2888		goto abort;
2889
2890	conf->mirrors = kzalloc(sizeof(struct raid1_info)
2891				* mddev->raid_disks * 2,
2892				 GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2893	if (!conf->mirrors)
2894		goto abort;
2895
2896	conf->tmppage = alloc_page(GFP_KERNEL);
2897	if (!conf->tmppage)
2898		goto abort;
2899
2900	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2901	if (!conf->poolinfo)
2902		goto abort;
2903	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2904	conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2905					  r1bio_pool_free,
2906					  conf->poolinfo);
2907	if (!conf->r1bio_pool)
 
 
 
2908		goto abort;
2909
2910	conf->poolinfo->mddev = mddev;
2911
2912	err = -EINVAL;
2913	spin_lock_init(&conf->device_lock);
2914	rdev_for_each(rdev, mddev) {
2915		struct request_queue *q;
2916		int disk_idx = rdev->raid_disk;
2917		if (disk_idx >= mddev->raid_disks
2918		    || disk_idx < 0)
2919			continue;
2920		if (test_bit(Replacement, &rdev->flags))
2921			disk = conf->mirrors + mddev->raid_disks + disk_idx;
2922		else
2923			disk = conf->mirrors + disk_idx;
2924
2925		if (disk->rdev)
2926			goto abort;
2927		disk->rdev = rdev;
2928		q = bdev_get_queue(rdev->bdev);
2929
2930		disk->head_position = 0;
2931		disk->seq_start = MaxSector;
2932	}
2933	conf->raid_disks = mddev->raid_disks;
2934	conf->mddev = mddev;
2935	INIT_LIST_HEAD(&conf->retry_list);
2936	INIT_LIST_HEAD(&conf->bio_end_io_list);
2937
2938	spin_lock_init(&conf->resync_lock);
2939	init_waitqueue_head(&conf->wait_barrier);
2940
2941	bio_list_init(&conf->pending_bio_list);
2942	conf->pending_count = 0;
2943	conf->recovery_disabled = mddev->recovery_disabled - 1;
2944
2945	conf->start_next_window = MaxSector;
2946	conf->current_window_requests = conf->next_window_requests = 0;
2947
2948	err = -EIO;
2949	for (i = 0; i < conf->raid_disks * 2; i++) {
2950
2951		disk = conf->mirrors + i;
2952
2953		if (i < conf->raid_disks &&
2954		    disk[conf->raid_disks].rdev) {
2955			/* This slot has a replacement. */
2956			if (!disk->rdev) {
2957				/* No original, just make the replacement
2958				 * a recovering spare
2959				 */
2960				disk->rdev =
2961					disk[conf->raid_disks].rdev;
2962				disk[conf->raid_disks].rdev = NULL;
2963			} else if (!test_bit(In_sync, &disk->rdev->flags))
2964				/* Original is not in_sync - bad */
2965				goto abort;
2966		}
2967
2968		if (!disk->rdev ||
2969		    !test_bit(In_sync, &disk->rdev->flags)) {
2970			disk->head_position = 0;
2971			if (disk->rdev &&
2972			    (disk->rdev->saved_raid_disk < 0))
2973				conf->fullsync = 1;
2974		}
2975	}
2976
2977	err = -ENOMEM;
2978	conf->thread = md_register_thread(raid1d, mddev, "raid1");
2979	if (!conf->thread)
2980		goto abort;
2981
2982	return conf;
2983
2984 abort:
2985	if (conf) {
2986		mempool_destroy(conf->r1bio_pool);
2987		kfree(conf->mirrors);
2988		safe_put_page(conf->tmppage);
2989		kfree(conf->poolinfo);
 
 
 
 
 
2990		kfree(conf);
2991	}
2992	return ERR_PTR(err);
2993}
2994
2995static void raid1_free(struct mddev *mddev, void *priv);
2996static int raid1_run(struct mddev *mddev)
2997{
2998	struct r1conf *conf;
2999	int i;
3000	struct md_rdev *rdev;
3001	int ret;
3002	bool discard_supported = false;
3003
3004	if (mddev->level != 1) {
3005		pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3006			mdname(mddev), mddev->level);
3007		return -EIO;
3008	}
3009	if (mddev->reshape_position != MaxSector) {
3010		pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3011			mdname(mddev));
3012		return -EIO;
3013	}
 
 
3014	/*
3015	 * copy the already verified devices into our private RAID1
3016	 * bookkeeping area. [whatever we allocate in run(),
3017	 * should be freed in raid1_free()]
3018	 */
3019	if (mddev->private == NULL)
3020		conf = setup_conf(mddev);
3021	else
3022		conf = mddev->private;
3023
3024	if (IS_ERR(conf))
3025		return PTR_ERR(conf);
3026
3027	if (mddev->queue)
3028		blk_queue_max_write_same_sectors(mddev->queue, 0);
 
 
3029
3030	rdev_for_each(rdev, mddev) {
3031		if (!mddev->gendisk)
3032			continue;
3033		disk_stack_limits(mddev->gendisk, rdev->bdev,
3034				  rdev->data_offset << 9);
3035		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3036			discard_supported = true;
3037	}
3038
3039	mddev->degraded = 0;
3040	for (i=0; i < conf->raid_disks; i++)
3041		if (conf->mirrors[i].rdev == NULL ||
3042		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3043		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3044			mddev->degraded++;
 
 
 
 
 
 
 
3045
3046	if (conf->raid_disks - mddev->degraded == 1)
3047		mddev->recovery_cp = MaxSector;
3048
3049	if (mddev->recovery_cp != MaxSector)
3050		pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3051			mdname(mddev));
3052	pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3053		mdname(mddev), mddev->raid_disks - mddev->degraded,
3054		mddev->raid_disks);
3055
3056	/*
3057	 * Ok, everything is just fine now
3058	 */
3059	mddev->thread = conf->thread;
3060	conf->thread = NULL;
3061	mddev->private = conf;
3062	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3063
3064	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3065
3066	if (mddev->queue) {
3067		if (discard_supported)
3068			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3069						mddev->queue);
3070		else
3071			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3072						  mddev->queue);
3073	}
3074
3075	ret =  md_integrity_register(mddev);
3076	if (ret) {
3077		md_unregister_thread(&mddev->thread);
3078		raid1_free(mddev, conf);
3079	}
 
 
 
 
3080	return ret;
3081}
3082
3083static void raid1_free(struct mddev *mddev, void *priv)
3084{
3085	struct r1conf *conf = priv;
3086
3087	mempool_destroy(conf->r1bio_pool);
3088	kfree(conf->mirrors);
3089	safe_put_page(conf->tmppage);
3090	kfree(conf->poolinfo);
 
 
 
 
 
3091	kfree(conf);
3092}
3093
3094static int raid1_resize(struct mddev *mddev, sector_t sectors)
3095{
3096	/* no resync is happening, and there is enough space
3097	 * on all devices, so we can resize.
3098	 * We need to make sure resync covers any new space.
3099	 * If the array is shrinking we should possibly wait until
3100	 * any io in the removed space completes, but it hardly seems
3101	 * worth it.
3102	 */
3103	sector_t newsize = raid1_size(mddev, sectors, 0);
3104	if (mddev->external_size &&
3105	    mddev->array_sectors > newsize)
3106		return -EINVAL;
3107	if (mddev->bitmap) {
3108		int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
3109		if (ret)
3110			return ret;
3111	}
3112	md_set_array_sectors(mddev, newsize);
3113	set_capacity(mddev->gendisk, mddev->array_sectors);
3114	revalidate_disk(mddev->gendisk);
3115	if (sectors > mddev->dev_sectors &&
3116	    mddev->recovery_cp > mddev->dev_sectors) {
3117		mddev->recovery_cp = mddev->dev_sectors;
3118		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3119	}
3120	mddev->dev_sectors = sectors;
3121	mddev->resync_max_sectors = sectors;
3122	return 0;
3123}
3124
3125static int raid1_reshape(struct mddev *mddev)
3126{
3127	/* We need to:
3128	 * 1/ resize the r1bio_pool
3129	 * 2/ resize conf->mirrors
3130	 *
3131	 * We allocate a new r1bio_pool if we can.
3132	 * Then raise a device barrier and wait until all IO stops.
3133	 * Then resize conf->mirrors and swap in the new r1bio pool.
3134	 *
3135	 * At the same time, we "pack" the devices so that all the missing
3136	 * devices have the higher raid_disk numbers.
3137	 */
3138	mempool_t *newpool, *oldpool;
3139	struct pool_info *newpoolinfo;
3140	struct raid1_info *newmirrors;
3141	struct r1conf *conf = mddev->private;
3142	int cnt, raid_disks;
3143	unsigned long flags;
3144	int d, d2, err;
 
 
 
 
3145
3146	/* Cannot change chunk_size, layout, or level */
3147	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3148	    mddev->layout != mddev->new_layout ||
3149	    mddev->level != mddev->new_level) {
3150		mddev->new_chunk_sectors = mddev->chunk_sectors;
3151		mddev->new_layout = mddev->layout;
3152		mddev->new_level = mddev->level;
3153		return -EINVAL;
3154	}
3155
3156	if (!mddev_is_clustered(mddev)) {
3157		err = md_allow_write(mddev);
3158		if (err)
3159			return err;
3160	}
3161
3162	raid_disks = mddev->raid_disks + mddev->delta_disks;
3163
3164	if (raid_disks < conf->raid_disks) {
3165		cnt=0;
3166		for (d= 0; d < conf->raid_disks; d++)
3167			if (conf->mirrors[d].rdev)
3168				cnt++;
3169		if (cnt > raid_disks)
3170			return -EBUSY;
3171	}
3172
3173	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3174	if (!newpoolinfo)
3175		return -ENOMEM;
3176	newpoolinfo->mddev = mddev;
3177	newpoolinfo->raid_disks = raid_disks * 2;
3178
3179	newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
3180				 r1bio_pool_free, newpoolinfo);
3181	if (!newpool) {
3182		kfree(newpoolinfo);
3183		return -ENOMEM;
3184	}
3185	newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
 
3186			     GFP_KERNEL);
3187	if (!newmirrors) {
3188		kfree(newpoolinfo);
3189		mempool_destroy(newpool);
3190		return -ENOMEM;
3191	}
3192
3193	freeze_array(conf, 0);
3194
3195	/* ok, everything is stopped */
3196	oldpool = conf->r1bio_pool;
3197	conf->r1bio_pool = newpool;
3198
3199	for (d = d2 = 0; d < conf->raid_disks; d++) {
3200		struct md_rdev *rdev = conf->mirrors[d].rdev;
3201		if (rdev && rdev->raid_disk != d2) {
3202			sysfs_unlink_rdev(mddev, rdev);
3203			rdev->raid_disk = d2;
3204			sysfs_unlink_rdev(mddev, rdev);
3205			if (sysfs_link_rdev(mddev, rdev))
3206				pr_warn("md/raid1:%s: cannot register rd%d\n",
3207					mdname(mddev), rdev->raid_disk);
3208		}
3209		if (rdev)
3210			newmirrors[d2++].rdev = rdev;
3211	}
3212	kfree(conf->mirrors);
3213	conf->mirrors = newmirrors;
3214	kfree(conf->poolinfo);
3215	conf->poolinfo = newpoolinfo;
3216
3217	spin_lock_irqsave(&conf->device_lock, flags);
3218	mddev->degraded += (raid_disks - conf->raid_disks);
3219	spin_unlock_irqrestore(&conf->device_lock, flags);
3220	conf->raid_disks = mddev->raid_disks = raid_disks;
3221	mddev->delta_disks = 0;
3222
3223	unfreeze_array(conf);
3224
3225	set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3226	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3227	md_wakeup_thread(mddev->thread);
3228
3229	mempool_destroy(oldpool);
3230	return 0;
3231}
3232
3233static void raid1_quiesce(struct mddev *mddev, int state)
3234{
3235	struct r1conf *conf = mddev->private;
3236
3237	switch(state) {
3238	case 2: /* wake for suspend */
3239		wake_up(&conf->wait_barrier);
3240		break;
3241	case 1:
3242		freeze_array(conf, 0);
3243		break;
3244	case 0:
3245		unfreeze_array(conf);
3246		break;
3247	}
3248}
3249
3250static void *raid1_takeover(struct mddev *mddev)
3251{
3252	/* raid1 can take over:
3253	 *  raid5 with 2 devices, any layout or chunk size
3254	 */
3255	if (mddev->level == 5 && mddev->raid_disks == 2) {
3256		struct r1conf *conf;
3257		mddev->new_level = 1;
3258		mddev->new_layout = 0;
3259		mddev->new_chunk_sectors = 0;
3260		conf = setup_conf(mddev);
3261		if (!IS_ERR(conf)) {
3262			/* Array must appear to be quiesced */
3263			conf->array_frozen = 1;
3264			mddev_clear_unsupported_flags(mddev,
3265				UNSUPPORTED_MDDEV_FLAGS);
3266		}
3267		return conf;
3268	}
3269	return ERR_PTR(-EINVAL);
3270}
3271
3272static struct md_personality raid1_personality =
3273{
3274	.name		= "raid1",
3275	.level		= 1,
3276	.owner		= THIS_MODULE,
3277	.make_request	= raid1_make_request,
3278	.run		= raid1_run,
3279	.free		= raid1_free,
3280	.status		= raid1_status,
3281	.error_handler	= raid1_error,
3282	.hot_add_disk	= raid1_add_disk,
3283	.hot_remove_disk= raid1_remove_disk,
3284	.spare_active	= raid1_spare_active,
3285	.sync_request	= raid1_sync_request,
3286	.resize		= raid1_resize,
3287	.size		= raid1_size,
3288	.check_reshape	= raid1_reshape,
3289	.quiesce	= raid1_quiesce,
3290	.takeover	= raid1_takeover,
3291	.congested	= raid1_congested,
3292};
3293
3294static int __init raid_init(void)
3295{
3296	return register_md_personality(&raid1_personality);
3297}
3298
3299static void raid_exit(void)
3300{
3301	unregister_md_personality(&raid1_personality);
3302}
3303
3304module_init(raid_init);
3305module_exit(raid_exit);
3306MODULE_LICENSE("GPL");
3307MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3308MODULE_ALIAS("md-personality-3"); /* RAID1 */
3309MODULE_ALIAS("md-raid1");
3310MODULE_ALIAS("md-level-1");
3311
3312module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);