Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * raid1.c : Multiple Devices driver for Linux
   3 *
   4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
   5 *
   6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   7 *
   8 * RAID-1 management functions.
   9 *
  10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  11 *
  12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  14 *
  15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  16 * bitmapped intelligence in resync:
  17 *
  18 *      - bitmap marked during normal i/o
  19 *      - bitmap used to skip nondirty blocks during sync
  20 *
  21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  22 * - persistent bitmap code
  23 *
  24 * This program is free software; you can redistribute it and/or modify
  25 * it under the terms of the GNU General Public License as published by
  26 * the Free Software Foundation; either version 2, or (at your option)
  27 * any later version.
  28 *
  29 * You should have received a copy of the GNU General Public License
  30 * (for example /usr/src/linux/COPYING); if not, write to the Free
  31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  32 */
  33
  34#include <linux/slab.h>
  35#include <linux/delay.h>
  36#include <linux/blkdev.h>
 
  37#include <linux/seq_file.h>
  38#include <linux/ratelimit.h>
 
 
 
 
  39#include "md.h"
  40#include "raid1.h"
  41#include "bitmap.h"
  42
  43#define DEBUG 0
  44#define PRINTK(x...) do { if (DEBUG) printk(x); } while (0)
 
 
 
  45
  46/*
  47 * Number of guaranteed r1bios in case of extreme VM load:
  48 */
  49#define	NR_RAID1_BIOS 256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51
  52static void allow_barrier(conf_t *conf);
  53static void lower_barrier(conf_t *conf);
 
 
 
 
 
 
  54
  55static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
  56{
  57	struct pool_info *pi = data;
  58	int size = offsetof(r1bio_t, bios[pi->raid_disks]);
  59
  60	/* allocate a r1bio with room for raid_disks entries in the bios array */
  61	return kzalloc(size, gfp_flags);
  62}
  63
  64static void r1bio_pool_free(void *r1_bio, void *data)
  65{
  66	kfree(r1_bio);
  67}
  68
  69#define RESYNC_BLOCK_SIZE (64*1024)
  70//#define RESYNC_BLOCK_SIZE PAGE_SIZE
  71#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
  72#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  73#define RESYNC_WINDOW (2048*1024)
 
 
  74
  75static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
  76{
  77	struct pool_info *pi = data;
  78	struct page *page;
  79	r1bio_t *r1_bio;
  80	struct bio *bio;
  81	int i, j;
 
 
  82
  83	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
  84	if (!r1_bio)
  85		return NULL;
  86
 
 
 
 
 
  87	/*
  88	 * Allocate bios : 1 for reading, n-1 for writing
  89	 */
  90	for (j = pi->raid_disks ; j-- ; ) {
  91		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
  92		if (!bio)
  93			goto out_free_bio;
  94		r1_bio->bios[j] = bio;
  95	}
  96	/*
  97	 * Allocate RESYNC_PAGES data pages and attach them to
  98	 * the first bio.
  99	 * If this is a user-requested check/repair, allocate
 100	 * RESYNC_PAGES for each bio.
 101	 */
 102	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
 103		j = pi->raid_disks;
 104	else
 105		j = 1;
 106	while(j--) {
 
 
 107		bio = r1_bio->bios[j];
 108		for (i = 0; i < RESYNC_PAGES; i++) {
 109			page = alloc_page(gfp_flags);
 110			if (unlikely(!page))
 111				goto out_free_pages;
 112
 113			bio->bi_io_vec[i].bv_page = page;
 114			bio->bi_vcnt = i+1;
 
 
 
 
 115		}
 116	}
 117	/* If not user-requests, copy the page pointers to all bios */
 118	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
 119		for (i=0; i<RESYNC_PAGES ; i++)
 120			for (j=1; j<pi->raid_disks; j++)
 121				r1_bio->bios[j]->bi_io_vec[i].bv_page =
 122					r1_bio->bios[0]->bi_io_vec[i].bv_page;
 123	}
 124
 125	r1_bio->master_bio = NULL;
 126
 127	return r1_bio;
 128
 129out_free_pages:
 130	for (j=0 ; j < pi->raid_disks; j++)
 131		for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
 132			put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
 133	j = -1;
 134out_free_bio:
 135	while ( ++j < pi->raid_disks )
 136		bio_put(r1_bio->bios[j]);
 137	r1bio_pool_free(r1_bio, data);
 
 
 
 138	return NULL;
 139}
 140
 141static void r1buf_pool_free(void *__r1_bio, void *data)
 142{
 143	struct pool_info *pi = data;
 144	int i,j;
 145	r1bio_t *r1bio = __r1_bio;
 
 146
 147	for (i = 0; i < RESYNC_PAGES; i++)
 148		for (j = pi->raid_disks; j-- ;) {
 149			if (j == 0 ||
 150			    r1bio->bios[j]->bi_io_vec[i].bv_page !=
 151			    r1bio->bios[0]->bi_io_vec[i].bv_page)
 152				safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
 153		}
 154	for (i=0 ; i < pi->raid_disks; i++)
 155		bio_put(r1bio->bios[i]);
 
 156
 157	r1bio_pool_free(r1bio, data);
 
 
 
 158}
 159
 160static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
 161{
 162	int i;
 163
 164	for (i = 0; i < conf->raid_disks; i++) {
 165		struct bio **bio = r1_bio->bios + i;
 166		if (!BIO_SPECIAL(*bio))
 167			bio_put(*bio);
 168		*bio = NULL;
 169	}
 170}
 171
 172static void free_r1bio(r1bio_t *r1_bio)
 173{
 174	conf_t *conf = r1_bio->mddev->private;
 175
 176	put_all_bios(conf, r1_bio);
 177	mempool_free(r1_bio, conf->r1bio_pool);
 178}
 179
 180static void put_buf(r1bio_t *r1_bio)
 181{
 182	conf_t *conf = r1_bio->mddev->private;
 
 183	int i;
 184
 185	for (i=0; i<conf->raid_disks; i++) {
 186		struct bio *bio = r1_bio->bios[i];
 187		if (bio->bi_end_io)
 188			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
 189	}
 190
 191	mempool_free(r1_bio, conf->r1buf_pool);
 192
 193	lower_barrier(conf);
 194}
 195
 196static void reschedule_retry(r1bio_t *r1_bio)
 197{
 198	unsigned long flags;
 199	mddev_t *mddev = r1_bio->mddev;
 200	conf_t *conf = mddev->private;
 
 201
 
 202	spin_lock_irqsave(&conf->device_lock, flags);
 203	list_add(&r1_bio->retry_list, &conf->retry_list);
 204	conf->nr_queued ++;
 205	spin_unlock_irqrestore(&conf->device_lock, flags);
 206
 207	wake_up(&conf->wait_barrier);
 208	md_wakeup_thread(mddev->thread);
 209}
 210
 211/*
 212 * raid_end_bio_io() is called when we have finished servicing a mirrored
 213 * operation and are ready to return a success/failure code to the buffer
 214 * cache layer.
 215 */
 216static void call_bio_endio(r1bio_t *r1_bio)
 217{
 218	struct bio *bio = r1_bio->master_bio;
 219	int done;
 220	conf_t *conf = r1_bio->mddev->private;
 221
 222	if (bio->bi_phys_segments) {
 223		unsigned long flags;
 224		spin_lock_irqsave(&conf->device_lock, flags);
 225		bio->bi_phys_segments--;
 226		done = (bio->bi_phys_segments == 0);
 227		spin_unlock_irqrestore(&conf->device_lock, flags);
 228	} else
 229		done = 1;
 230
 231	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
 232		clear_bit(BIO_UPTODATE, &bio->bi_flags);
 233	if (done) {
 234		bio_endio(bio, 0);
 235		/*
 236		 * Wake up any possible resync thread that waits for the device
 237		 * to go idle.
 238		 */
 239		allow_barrier(conf);
 240	}
 241}
 242
 243static void raid_end_bio_io(r1bio_t *r1_bio)
 244{
 245	struct bio *bio = r1_bio->master_bio;
 
 246
 247	/* if nobody has done the final endio yet, do it now */
 248	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 249		PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
 250			(bio_data_dir(bio) == WRITE) ? "write" : "read",
 251			(unsigned long long) bio->bi_sector,
 252			(unsigned long long) bio->bi_sector +
 253				(bio->bi_size >> 9) - 1);
 254
 255		call_bio_endio(r1_bio);
 256	}
 
 
 
 
 
 
 257	free_r1bio(r1_bio);
 258}
 259
 260/*
 261 * Update disk head position estimator based on IRQ completion info.
 262 */
 263static inline void update_head_pos(int disk, r1bio_t *r1_bio)
 264{
 265	conf_t *conf = r1_bio->mddev->private;
 266
 267	conf->mirrors[disk].head_position =
 268		r1_bio->sector + (r1_bio->sectors);
 269}
 270
 271static void raid1_end_read_request(struct bio *bio, int error)
 
 
 
 272{
 273	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 274	r1bio_t *r1_bio = bio->bi_private;
 275	int mirror;
 276	conf_t *conf = r1_bio->mddev->private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 277
 278	mirror = r1_bio->read_disk;
 279	/*
 280	 * this branch is our 'one mirror IO has finished' event handler:
 281	 */
 282	update_head_pos(mirror, r1_bio);
 283
 284	if (uptodate)
 285		set_bit(R1BIO_Uptodate, &r1_bio->state);
 
 
 
 
 
 286	else {
 287		/* If all other devices have failed, we want to return
 288		 * the error upwards rather than fail the last device.
 289		 * Here we redefine "uptodate" to mean "Don't want to retry"
 290		 */
 291		unsigned long flags;
 292		spin_lock_irqsave(&conf->device_lock, flags);
 293		if (r1_bio->mddev->degraded == conf->raid_disks ||
 294		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
 295		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
 296			uptodate = 1;
 297		spin_unlock_irqrestore(&conf->device_lock, flags);
 298	}
 299
 300	if (uptodate)
 301		raid_end_bio_io(r1_bio);
 302	else {
 
 303		/*
 304		 * oops, read error:
 305		 */
 306		char b[BDEVNAME_SIZE];
 307		printk_ratelimited(
 308			KERN_ERR "md/raid1:%s: %s: "
 309			"rescheduling sector %llu\n",
 310			mdname(conf->mddev),
 311			bdevname(conf->mirrors[mirror].rdev->bdev,
 312				 b),
 313			(unsigned long long)r1_bio->sector);
 314		set_bit(R1BIO_ReadError, &r1_bio->state);
 315		reschedule_retry(r1_bio);
 
 316	}
 317
 318	rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
 319}
 320
 321static void close_write(r1bio_t *r1_bio)
 322{
 323	/* it really is the end of this request */
 324	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
 325		/* free extra copy of the data pages */
 326		int i = r1_bio->behind_page_count;
 327		while (i--)
 328			safe_put_page(r1_bio->behind_bvecs[i].bv_page);
 329		kfree(r1_bio->behind_bvecs);
 330		r1_bio->behind_bvecs = NULL;
 331	}
 332	/* clear the bitmap if all writes complete successfully */
 333	bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
 334			r1_bio->sectors,
 335			!test_bit(R1BIO_Degraded, &r1_bio->state),
 336			test_bit(R1BIO_BehindIO, &r1_bio->state));
 337	md_write_end(r1_bio->mddev);
 338}
 339
 340static void r1_bio_write_done(r1bio_t *r1_bio)
 341{
 342	if (!atomic_dec_and_test(&r1_bio->remaining))
 343		return;
 344
 345	if (test_bit(R1BIO_WriteError, &r1_bio->state))
 346		reschedule_retry(r1_bio);
 347	else {
 348		close_write(r1_bio);
 349		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
 350			reschedule_retry(r1_bio);
 351		else
 352			raid_end_bio_io(r1_bio);
 353	}
 354}
 355
 356static void raid1_end_write_request(struct bio *bio, int error)
 357{
 358	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 359	r1bio_t *r1_bio = bio->bi_private;
 360	int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
 361	conf_t *conf = r1_bio->mddev->private;
 362	struct bio *to_put = NULL;
 
 
 
 
 
 363
 364
 365	for (mirror = 0; mirror < conf->raid_disks; mirror++)
 366		if (r1_bio->bios[mirror] == bio)
 367			break;
 368
 369	/*
 370	 * 'one mirror IO has finished' event handler:
 371	 */
 372	if (!uptodate) {
 373		set_bit(WriteErrorSeen,
 374			&conf->mirrors[mirror].rdev->flags);
 375		set_bit(R1BIO_WriteError, &r1_bio->state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 376	} else {
 377		/*
 378		 * Set R1BIO_Uptodate in our master bio, so that we
 379		 * will return a good error code for to the higher
 380		 * levels even if IO on some other mirrored buffer
 381		 * fails.
 382		 *
 383		 * The 'master' represents the composite IO operation
 384		 * to user-side. So if something waits for IO, then it
 385		 * will wait for the 'master' bio.
 386		 */
 387		sector_t first_bad;
 388		int bad_sectors;
 389
 390		r1_bio->bios[mirror] = NULL;
 391		to_put = bio;
 392		set_bit(R1BIO_Uptodate, &r1_bio->state);
 
 
 
 
 
 
 
 
 
 
 393
 394		/* Maybe we can clear some bad blocks. */
 395		if (is_badblock(conf->mirrors[mirror].rdev,
 396				r1_bio->sector, r1_bio->sectors,
 397				&first_bad, &bad_sectors)) {
 398			r1_bio->bios[mirror] = IO_MADE_GOOD;
 399			set_bit(R1BIO_MadeGood, &r1_bio->state);
 400		}
 401	}
 402
 403	update_head_pos(mirror, r1_bio);
 404
 405	if (behind) {
 406		if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
 
 
 407			atomic_dec(&r1_bio->behind_remaining);
 408
 409		/*
 410		 * In behind mode, we ACK the master bio once the I/O
 411		 * has safely reached all non-writemostly
 412		 * disks. Setting the Returned bit ensures that this
 413		 * gets done only once -- we don't ever want to return
 414		 * -EIO here, instead we'll wait
 415		 */
 416		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
 417		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
 418			/* Maybe we can return now */
 419			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 420				struct bio *mbio = r1_bio->master_bio;
 421				PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
 422				       (unsigned long long) mbio->bi_sector,
 423				       (unsigned long long) mbio->bi_sector +
 424				       (mbio->bi_size >> 9) - 1);
 425				call_bio_endio(r1_bio);
 426			}
 427		}
 428	}
 
 429	if (r1_bio->bios[mirror] == NULL)
 430		rdev_dec_pending(conf->mirrors[mirror].rdev,
 431				 conf->mddev);
 432
 433	/*
 434	 * Let's see if all mirrored write operations have finished
 435	 * already.
 436	 */
 437	r1_bio_write_done(r1_bio);
 438
 439	if (to_put)
 440		bio_put(to_put);
 441}
 442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 443
 444/*
 445 * This routine returns the disk from which the requested read should
 446 * be done. There is a per-array 'next expected sequential IO' sector
 447 * number - if this matches on the next IO then we use the last disk.
 448 * There is also a per-disk 'last know head position' sector that is
 449 * maintained from IRQ contexts, both the normal and the resync IO
 450 * completion handlers update this position correctly. If there is no
 451 * perfect sequential match then we pick the disk whose head is closest.
 452 *
 453 * If there are 2 mirrors in the same 2 devices, performance degrades
 454 * because position is mirror, not device based.
 455 *
 456 * The rdev for the device selected will have nr_pending incremented.
 457 */
 458static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors)
 459{
 460	const sector_t this_sector = r1_bio->sector;
 461	int sectors;
 462	int best_good_sectors;
 463	int start_disk;
 464	int best_disk;
 465	int i;
 466	sector_t best_dist;
 467	mdk_rdev_t *rdev;
 
 468	int choose_first;
 
 469
 470	rcu_read_lock();
 471	/*
 472	 * Check if we can balance. We can balance on the whole
 473	 * device if no resync is going on, or below the resync window.
 474	 * We take the first readable disk when above the resync window.
 475	 */
 476 retry:
 477	sectors = r1_bio->sectors;
 478	best_disk = -1;
 
 479	best_dist = MaxSector;
 
 
 480	best_good_sectors = 0;
 481
 482	if (conf->mddev->recovery_cp < MaxSector &&
 483	    (this_sector + sectors >= conf->next_resync)) {
 
 
 
 
 
 484		choose_first = 1;
 485		start_disk = 0;
 486	} else {
 487		choose_first = 0;
 488		start_disk = conf->last_used;
 489	}
 490
 491	for (i = 0 ; i < conf->raid_disks ; i++) {
 492		sector_t dist;
 493		sector_t first_bad;
 494		int bad_sectors;
 495
 496		int disk = start_disk + i;
 497		if (disk >= conf->raid_disks)
 498			disk -= conf->raid_disks;
 499
 500		rdev = rcu_dereference(conf->mirrors[disk].rdev);
 501		if (r1_bio->bios[disk] == IO_BLOCKED
 502		    || rdev == NULL
 503		    || test_bit(Faulty, &rdev->flags))
 504			continue;
 505		if (!test_bit(In_sync, &rdev->flags) &&
 506		    rdev->recovery_offset < this_sector + sectors)
 507			continue;
 508		if (test_bit(WriteMostly, &rdev->flags)) {
 509			/* Don't balance among write-mostly, just
 510			 * use the first as a last resort */
 511			if (best_disk < 0)
 512				best_disk = disk;
 
 
 
 
 
 
 
 
 
 
 513			continue;
 514		}
 515		/* This is a reasonable device to use.  It might
 516		 * even be best.
 517		 */
 518		if (is_badblock(rdev, this_sector, sectors,
 519				&first_bad, &bad_sectors)) {
 520			if (best_dist < MaxSector)
 521				/* already have a better device */
 522				continue;
 523			if (first_bad <= this_sector) {
 524				/* cannot read here. If this is the 'primary'
 525				 * device, then we must not read beyond
 526				 * bad_sectors from another device..
 527				 */
 528				bad_sectors -= (this_sector - first_bad);
 529				if (choose_first && sectors > bad_sectors)
 530					sectors = bad_sectors;
 531				if (best_good_sectors > sectors)
 532					best_good_sectors = sectors;
 533
 534			} else {
 535				sector_t good_sectors = first_bad - this_sector;
 536				if (good_sectors > best_good_sectors) {
 537					best_good_sectors = good_sectors;
 538					best_disk = disk;
 539				}
 540				if (choose_first)
 541					break;
 542			}
 543			continue;
 544		} else
 
 
 545			best_good_sectors = sectors;
 
 546
 
 
 
 
 
 
 
 547		dist = abs(this_sector - conf->mirrors[disk].head_position);
 548		if (choose_first
 549		    /* Don't change to another disk for sequential reads */
 550		    || conf->next_seq_sect == this_sector
 551		    || dist == 0
 552		    /* If device is idle, use it */
 553		    || atomic_read(&rdev->nr_pending) == 0) {
 
 
 
 
 554			best_disk = disk;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555			break;
 556		}
 
 
 
 
 
 
 
 
 
 557		if (dist < best_dist) {
 558			best_dist = dist;
 559			best_disk = disk;
 560		}
 561	}
 562
 
 
 
 
 
 
 
 
 
 
 
 
 
 563	if (best_disk >= 0) {
 564		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
 565		if (!rdev)
 566			goto retry;
 567		atomic_inc(&rdev->nr_pending);
 568		if (test_bit(Faulty, &rdev->flags)) {
 569			/* cannot risk returning a device that failed
 570			 * before we inc'ed nr_pending
 571			 */
 572			rdev_dec_pending(rdev, conf->mddev);
 573			goto retry;
 574		}
 575		sectors = best_good_sectors;
 576		conf->next_seq_sect = this_sector + sectors;
 577		conf->last_used = best_disk;
 
 
 
 578	}
 579	rcu_read_unlock();
 580	*max_sectors = sectors;
 581
 582	return best_disk;
 583}
 584
 585int md_raid1_congested(mddev_t *mddev, int bits)
 586{
 587	conf_t *conf = mddev->private;
 588	int i, ret = 0;
 589
 590	rcu_read_lock();
 591	for (i = 0; i < mddev->raid_disks; i++) {
 592		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
 593		if (rdev && !test_bit(Faulty, &rdev->flags)) {
 594			struct request_queue *q = bdev_get_queue(rdev->bdev);
 595
 596			BUG_ON(!q);
 597
 598			/* Note the '|| 1' - when read_balance prefers
 599			 * non-congested targets, it can be removed
 600			 */
 601			if ((bits & (1<<BDI_async_congested)) || 1)
 602				ret |= bdi_congested(&q->backing_dev_info, bits);
 603			else
 604				ret &= bdi_congested(&q->backing_dev_info, bits);
 605		}
 
 
 
 
 
 
 
 606	}
 607	rcu_read_unlock();
 608	return ret;
 609}
 610EXPORT_SYMBOL_GPL(md_raid1_congested);
 611
 612static int raid1_congested(void *data, int bits)
 613{
 614	mddev_t *mddev = data;
 615
 616	return mddev_congested(mddev, bits) ||
 617		md_raid1_congested(mddev, bits);
 618}
 619
 620static void flush_pending_writes(conf_t *conf)
 621{
 622	/* Any writes that have been queued but are awaiting
 623	 * bitmap updates get flushed here.
 624	 */
 625	spin_lock_irq(&conf->device_lock);
 626
 627	if (conf->pending_bio_list.head) {
 
 628		struct bio *bio;
 
 629		bio = bio_list_get(&conf->pending_bio_list);
 
 630		spin_unlock_irq(&conf->device_lock);
 631		/* flush any pending bitmap writes to
 632		 * disk before proceeding w/ I/O */
 633		bitmap_unplug(conf->mddev->bitmap);
 634
 635		while (bio) { /* submit pending writes */
 636			struct bio *next = bio->bi_next;
 637			bio->bi_next = NULL;
 638			generic_make_request(bio);
 639			bio = next;
 640		}
 
 
 
 
 641	} else
 642		spin_unlock_irq(&conf->device_lock);
 643}
 644
 645/* Barriers....
 646 * Sometimes we need to suspend IO while we do something else,
 647 * either some resync/recovery, or reconfigure the array.
 648 * To do this we raise a 'barrier'.
 649 * The 'barrier' is a counter that can be raised multiple times
 650 * to count how many activities are happening which preclude
 651 * normal IO.
 652 * We can only raise the barrier if there is no pending IO.
 653 * i.e. if nr_pending == 0.
 654 * We choose only to raise the barrier if no-one is waiting for the
 655 * barrier to go down.  This means that as soon as an IO request
 656 * is ready, no other operations which require a barrier will start
 657 * until the IO request has had a chance.
 658 *
 659 * So: regular IO calls 'wait_barrier'.  When that returns there
 660 *    is no backgroup IO happening,  It must arrange to call
 661 *    allow_barrier when it has finished its IO.
 662 * backgroup IO calls must call raise_barrier.  Once that returns
 663 *    there is no normal IO happeing.  It must arrange to call
 664 *    lower_barrier when the particular background IO completes.
 
 
 
 665 */
 666#define RESYNC_DEPTH 32
 667
 668static void raise_barrier(conf_t *conf)
 669{
 
 
 670	spin_lock_irq(&conf->resync_lock);
 671
 672	/* Wait until no block IO is waiting */
 673	wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
 674			    conf->resync_lock, );
 
 675
 676	/* block any new IO from starting */
 677	conf->barrier++;
 678
 679	/* Now wait for all pending IO to complete */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 680	wait_event_lock_irq(conf->wait_barrier,
 681			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
 682			    conf->resync_lock, );
 
 
 
 
 
 
 
 
 
 
 683
 
 684	spin_unlock_irq(&conf->resync_lock);
 
 
 685}
 686
 687static void lower_barrier(conf_t *conf)
 688{
 689	unsigned long flags;
 690	BUG_ON(conf->barrier <= 0);
 691	spin_lock_irqsave(&conf->resync_lock, flags);
 692	conf->barrier--;
 693	spin_unlock_irqrestore(&conf->resync_lock, flags);
 
 694	wake_up(&conf->wait_barrier);
 695}
 696
 697static void wait_barrier(conf_t *conf)
 698{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699	spin_lock_irq(&conf->resync_lock);
 700	if (conf->barrier) {
 701		conf->nr_waiting++;
 702		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
 703				    conf->resync_lock,
 704				    );
 705		conf->nr_waiting--;
 706	}
 707	conf->nr_pending++;
 
 
 
 
 
 
 708	spin_unlock_irq(&conf->resync_lock);
 709}
 710
 711static void allow_barrier(conf_t *conf)
 712{
 713	unsigned long flags;
 714	spin_lock_irqsave(&conf->resync_lock, flags);
 715	conf->nr_pending--;
 716	spin_unlock_irqrestore(&conf->resync_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717	wake_up(&conf->wait_barrier);
 718}
 719
 720static void freeze_array(conf_t *conf)
 721{
 722	/* stop syncio and normal IO and wait for everything to
 723	 * go quite.
 724	 * We increment barrier and nr_waiting, and then
 725	 * wait until nr_pending match nr_queued+1
 726	 * This is called in the context of one normal IO request
 727	 * that has failed. Thus any sync request that might be pending
 728	 * will be blocked by nr_pending, and we need to wait for
 729	 * pending IO requests to complete or be queued for re-try.
 730	 * Thus the number queued (nr_queued) plus this request (1)
 731	 * must match the number of pending IOs (nr_pending) before
 732	 * we continue.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 733	 */
 734	spin_lock_irq(&conf->resync_lock);
 735	conf->barrier++;
 736	conf->nr_waiting++;
 737	wait_event_lock_irq(conf->wait_barrier,
 738			    conf->nr_pending == conf->nr_queued+1,
 739			    conf->resync_lock,
 740			    flush_pending_writes(conf));
 
 741	spin_unlock_irq(&conf->resync_lock);
 742}
 743static void unfreeze_array(conf_t *conf)
 744{
 745	/* reverse the effect of the freeze */
 746	spin_lock_irq(&conf->resync_lock);
 747	conf->barrier--;
 748	conf->nr_waiting--;
 749	wake_up(&conf->wait_barrier);
 750	spin_unlock_irq(&conf->resync_lock);
 
 751}
 752
 753
 754/* duplicate the data pages for behind I/O 
 755 */
 756static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
 757{
 758	int i;
 759	struct bio_vec *bvec;
 760	struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
 761					GFP_NOIO);
 762	if (unlikely(!bvecs))
 
 
 763		return;
 764
 765	bio_for_each_segment(bvec, bio, i) {
 766		bvecs[i] = *bvec;
 767		bvecs[i].bv_page = alloc_page(GFP_NOIO);
 768		if (unlikely(!bvecs[i].bv_page))
 769			goto do_sync_io;
 770		memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
 771		       kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
 772		kunmap(bvecs[i].bv_page);
 773		kunmap(bvec->bv_page);
 
 
 
 
 
 
 
 
 
 
 
 774	}
 775	r1_bio->behind_bvecs = bvecs;
 776	r1_bio->behind_page_count = bio->bi_vcnt;
 
 
 777	set_bit(R1BIO_BehindIO, &r1_bio->state);
 
 778	return;
 779
 780do_sync_io:
 781	for (i = 0; i < bio->bi_vcnt; i++)
 782		if (bvecs[i].bv_page)
 783			put_page(bvecs[i].bv_page);
 784	kfree(bvecs);
 785	PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 786}
 787
 788static int make_request(mddev_t *mddev, struct bio * bio)
 789{
 790	conf_t *conf = mddev->private;
 791	mirror_info_t *mirror;
 792	r1bio_t *r1_bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793	struct bio *read_bio;
 794	int i, disks;
 795	struct bitmap *bitmap;
 796	unsigned long flags;
 797	const int rw = bio_data_dir(bio);
 798	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
 799	const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
 800	mdk_rdev_t *blocked_rdev;
 801	int plugged;
 802	int first_clone;
 803	int sectors_handled;
 804	int max_sectors;
 
 
 
 805
 806	/*
 807	 * Register the new request and wait if the reconstruction
 808	 * thread has put up a bar for new requests.
 809	 * Continue immediately if no resync is active currently.
 810	 */
 811
 812	md_write_start(mddev, bio); /* wait on superblock update early */
 813
 814	if (bio_data_dir(bio) == WRITE &&
 815	    bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
 816	    bio->bi_sector < mddev->suspend_hi) {
 817		/* As the suspend_* range is controlled by
 818		 * userspace, we want an interruptible
 819		 * wait.
 820		 */
 821		DEFINE_WAIT(w);
 822		for (;;) {
 823			flush_signals(current);
 824			prepare_to_wait(&conf->wait_barrier,
 825					&w, TASK_INTERRUPTIBLE);
 826			if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
 827			    bio->bi_sector >= mddev->suspend_hi)
 828				break;
 829			schedule();
 830		}
 831		finish_wait(&conf->wait_barrier, &w);
 832	}
 833
 834	wait_barrier(conf);
 
 
 
 
 835
 836	bitmap = mddev->bitmap;
 
 
 
 
 837
 838	/*
 839	 * make_request() can abort the operation when READA is being
 840	 * used and no empty request is available.
 841	 *
 842	 */
 843	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 844
 845	r1_bio->master_bio = bio;
 846	r1_bio->sectors = bio->bi_size >> 9;
 847	r1_bio->state = 0;
 848	r1_bio->mddev = mddev;
 849	r1_bio->sector = bio->bi_sector;
 
 
 
 
 
 
 
 850
 851	/* We might need to issue multiple reads to different
 852	 * devices if there are bad blocks around, so we keep
 853	 * track of the number of reads in bio->bi_phys_segments.
 854	 * If this is 0, there is only one r1_bio and no locking
 855	 * will be needed when requests complete.  If it is
 856	 * non-zero, then it is the number of not-completed requests.
 857	 */
 858	bio->bi_phys_segments = 0;
 859	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
 860
 861	if (rw == READ) {
 
 862		/*
 863		 * read balancing logic:
 
 864		 */
 865		int rdisk;
 
 
 
 866
 867read_again:
 868		rdisk = read_balance(conf, r1_bio, &max_sectors);
 
 
 
 
 
 
 
 869
 870		if (rdisk < 0) {
 871			/* couldn't find anywhere to read from */
 872			raid_end_bio_io(r1_bio);
 873			return 0;
 874		}
 875		mirror = conf->mirrors + rdisk;
 876
 877		if (test_bit(WriteMostly, &mirror->rdev->flags) &&
 878		    bitmap) {
 879			/* Reading from a write-mostly device must
 880			 * take care not to over-take any writes
 881			 * that are 'behind'
 882			 */
 883			wait_event(bitmap->behind_wait,
 884				   atomic_read(&bitmap->behind_writes) == 0);
 885		}
 886		r1_bio->read_disk = rdisk;
 887
 888		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
 889		md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
 890			    max_sectors);
 891
 892		r1_bio->bios[rdisk] = read_bio;
 893
 894		read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
 895		read_bio->bi_bdev = mirror->rdev->bdev;
 896		read_bio->bi_end_io = raid1_end_read_request;
 897		read_bio->bi_rw = READ | do_sync;
 898		read_bio->bi_private = r1_bio;
 899
 900		if (max_sectors < r1_bio->sectors) {
 901			/* could not read all from this device, so we will
 902			 * need another r1_bio.
 903			 */
 904
 905			sectors_handled = (r1_bio->sector + max_sectors
 906					   - bio->bi_sector);
 907			r1_bio->sectors = max_sectors;
 908			spin_lock_irq(&conf->device_lock);
 909			if (bio->bi_phys_segments == 0)
 910				bio->bi_phys_segments = 2;
 911			else
 912				bio->bi_phys_segments++;
 913			spin_unlock_irq(&conf->device_lock);
 914			/* Cannot call generic_make_request directly
 915			 * as that will be queued in __make_request
 916			 * and subsequent mempool_alloc might block waiting
 917			 * for it.  So hand bio over to raid1d.
 918			 */
 919			reschedule_retry(r1_bio);
 920
 921			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
 
 
 
 
 
 
 
 922
 923			r1_bio->master_bio = bio;
 924			r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
 925			r1_bio->state = 0;
 926			r1_bio->mddev = mddev;
 927			r1_bio->sector = bio->bi_sector + sectors_handled;
 928			goto read_again;
 929		} else
 930			generic_make_request(read_bio);
 931		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 932	}
 933
 934	/*
 935	 * WRITE:
 
 
 936	 */
 
 
 
 
 
 
 
 
 
 
 
 937	/* first select target devices under rcu_lock and
 938	 * inc refcount on their rdev.  Record them by setting
 939	 * bios[x] to bio
 940	 * If there are known/acknowledged bad blocks on any device on
 941	 * which we have seen a write error, we want to avoid writing those
 942	 * blocks.
 943	 * This potentially requires several writes to write around
 944	 * the bad blocks.  Each set of writes gets it's own r1bio
 945	 * with a set of bios attached.
 946	 */
 947	plugged = mddev_check_plugged(mddev);
 948
 949	disks = conf->raid_disks;
 950 retry_write:
 951	blocked_rdev = NULL;
 952	rcu_read_lock();
 953	max_sectors = r1_bio->sectors;
 954	for (i = 0;  i < disks; i++) {
 955		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
 
 
 
 
 
 
 
 
 
 956		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
 957			atomic_inc(&rdev->nr_pending);
 958			blocked_rdev = rdev;
 959			break;
 960		}
 961		r1_bio->bios[i] = NULL;
 962		if (!rdev || test_bit(Faulty, &rdev->flags)) {
 963			set_bit(R1BIO_Degraded, &r1_bio->state);
 
 964			continue;
 965		}
 966
 967		atomic_inc(&rdev->nr_pending);
 968		if (test_bit(WriteErrorSeen, &rdev->flags)) {
 969			sector_t first_bad;
 970			int bad_sectors;
 971			int is_bad;
 972
 973			is_bad = is_badblock(rdev, r1_bio->sector,
 974					     max_sectors,
 975					     &first_bad, &bad_sectors);
 976			if (is_bad < 0) {
 977				/* mustn't write here until the bad block is
 978				 * acknowledged*/
 979				set_bit(BlockedBadBlocks, &rdev->flags);
 980				blocked_rdev = rdev;
 981				break;
 982			}
 983			if (is_bad && first_bad <= r1_bio->sector) {
 984				/* Cannot write here at all */
 985				bad_sectors -= (r1_bio->sector - first_bad);
 986				if (bad_sectors < max_sectors)
 987					/* mustn't write more than bad_sectors
 988					 * to other devices yet
 989					 */
 990					max_sectors = bad_sectors;
 991				rdev_dec_pending(rdev, mddev);
 992				/* We don't set R1BIO_Degraded as that
 993				 * only applies if the disk is
 994				 * missing, so it might be re-added,
 995				 * and we want to know to recover this
 996				 * chunk.
 997				 * In this case the device is here,
 998				 * and the fact that this chunk is not
 999				 * in-sync is recorded in the bad
1000				 * block log
1001				 */
1002				continue;
1003			}
1004			if (is_bad) {
1005				int good_sectors = first_bad - r1_bio->sector;
1006				if (good_sectors < max_sectors)
1007					max_sectors = good_sectors;
1008			}
1009		}
1010		r1_bio->bios[i] = bio;
1011	}
1012	rcu_read_unlock();
1013
1014	if (unlikely(blocked_rdev)) {
1015		/* Wait for this device to become unblocked */
1016		int j;
1017
1018		for (j = 0; j < i; j++)
1019			if (r1_bio->bios[j])
1020				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1021		r1_bio->state = 0;
1022		allow_barrier(conf);
 
1023		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1024		wait_barrier(conf);
1025		goto retry_write;
1026	}
1027
1028	if (max_sectors < r1_bio->sectors) {
1029		/* We are splitting this write into multiple parts, so
1030		 * we need to prepare for allocating another r1_bio.
1031		 */
 
 
 
 
 
 
 
 
 
 
 
 
1032		r1_bio->sectors = max_sectors;
1033		spin_lock_irq(&conf->device_lock);
1034		if (bio->bi_phys_segments == 0)
1035			bio->bi_phys_segments = 2;
1036		else
1037			bio->bi_phys_segments++;
1038		spin_unlock_irq(&conf->device_lock);
1039	}
1040	sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
1041
 
 
1042	atomic_set(&r1_bio->remaining, 1);
1043	atomic_set(&r1_bio->behind_remaining, 0);
1044
1045	first_clone = 1;
 
1046	for (i = 0; i < disks; i++) {
1047		struct bio *mbio;
 
1048		if (!r1_bio->bios[i])
1049			continue;
1050
1051		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1052		md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
1053
1054		if (first_clone) {
1055			/* do behind I/O ?
1056			 * Not if there are too many, or cannot
1057			 * allocate memory, or a reader on WriteMostly
1058			 * is waiting for behind writes to flush */
1059			if (bitmap &&
1060			    (atomic_read(&bitmap->behind_writes)
1061			     < mddev->bitmap_info.max_write_behind) &&
1062			    !waitqueue_active(&bitmap->behind_wait))
1063				alloc_behind_pages(mbio, r1_bio);
 
1064
1065			bitmap_startwrite(bitmap, r1_bio->sector,
1066					  r1_bio->sectors,
1067					  test_bit(R1BIO_BehindIO,
1068						   &r1_bio->state));
1069			first_clone = 0;
1070		}
1071		if (r1_bio->behind_bvecs) {
1072			struct bio_vec *bvec;
1073			int j;
1074
1075			/* Yes, I really want the '__' version so that
1076			 * we clear any unused pointer in the io_vec, rather
1077			 * than leave them unchanged.  This is important
1078			 * because when we come to free the pages, we won't
1079			 * know the original bi_idx, so we just free
1080			 * them all
1081			 */
1082			__bio_for_each_segment(bvec, mbio, j, 0)
1083				bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1084			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1085				atomic_inc(&r1_bio->behind_remaining);
1086		}
 
1087
1088		r1_bio->bios[i] = mbio;
1089
1090		mbio->bi_sector	= (r1_bio->sector +
1091				   conf->mirrors[i].rdev->data_offset);
1092		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1093		mbio->bi_end_io	= raid1_end_write_request;
1094		mbio->bi_rw = WRITE | do_flush_fua | do_sync;
 
 
 
 
1095		mbio->bi_private = r1_bio;
1096
1097		atomic_inc(&r1_bio->remaining);
1098		spin_lock_irqsave(&conf->device_lock, flags);
1099		bio_list_add(&conf->pending_bio_list, mbio);
1100		spin_unlock_irqrestore(&conf->device_lock, flags);
1101	}
1102	/* Mustn't call r1_bio_write_done before this next test,
1103	 * as it could result in the bio being freed.
1104	 */
1105	if (sectors_handled < (bio->bi_size >> 9)) {
1106		r1_bio_write_done(r1_bio);
1107		/* We need another r1_bio.  It has already been counted
1108		 * in bio->bi_phys_segments
1109		 */
1110		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1111		r1_bio->master_bio = bio;
1112		r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1113		r1_bio->state = 0;
1114		r1_bio->mddev = mddev;
1115		r1_bio->sector = bio->bi_sector + sectors_handled;
1116		goto retry_write;
 
 
 
1117	}
1118
1119	r1_bio_write_done(r1_bio);
1120
1121	/* In case raid1d snuck in to freeze_array */
1122	wake_up(&conf->wait_barrier);
 
1123
1124	if (do_sync || !bitmap || !plugged)
1125		md_wakeup_thread(mddev->thread);
 
1126
1127	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1128}
1129
1130static void status(struct seq_file *seq, mddev_t *mddev)
1131{
1132	conf_t *conf = mddev->private;
1133	int i;
1134
1135	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1136		   conf->raid_disks - mddev->degraded);
1137	rcu_read_lock();
1138	for (i = 0; i < conf->raid_disks; i++) {
1139		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
1140		seq_printf(seq, "%s",
1141			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1142	}
1143	rcu_read_unlock();
1144	seq_printf(seq, "]");
1145}
1146
1147
1148static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1149{
1150	char b[BDEVNAME_SIZE];
1151	conf_t *conf = mddev->private;
 
1152
1153	/*
1154	 * If it is not operational, then we have already marked it as dead
1155	 * else if it is the last working disks, ignore the error, let the
1156	 * next level up know.
1157	 * else mark the drive as failed
1158	 */
1159	if (test_bit(In_sync, &rdev->flags)
 
1160	    && (conf->raid_disks - mddev->degraded) == 1) {
1161		/*
1162		 * Don't fail the drive, act as though we were just a
1163		 * normal single drive.
1164		 * However don't try a recovery from this drive as
1165		 * it is very likely to fail.
1166		 */
1167		conf->recovery_disabled = mddev->recovery_disabled;
 
1168		return;
1169	}
1170	set_bit(Blocked, &rdev->flags);
1171	if (test_and_clear_bit(In_sync, &rdev->flags)) {
1172		unsigned long flags;
1173		spin_lock_irqsave(&conf->device_lock, flags);
1174		mddev->degraded++;
1175		set_bit(Faulty, &rdev->flags);
1176		spin_unlock_irqrestore(&conf->device_lock, flags);
1177		/*
1178		 * if recovery is running, make sure it aborts.
1179		 */
1180		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1181	} else
1182		set_bit(Faulty, &rdev->flags);
1183	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1184	printk(KERN_ALERT
1185	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
1186	       "md/raid1:%s: Operation continuing on %d devices.\n",
1187	       mdname(mddev), bdevname(rdev->bdev, b),
1188	       mdname(mddev), conf->raid_disks - mddev->degraded);
1189}
1190
1191static void print_conf(conf_t *conf)
1192{
1193	int i;
1194
1195	printk(KERN_DEBUG "RAID1 conf printout:\n");
1196	if (!conf) {
1197		printk(KERN_DEBUG "(!conf)\n");
1198		return;
1199	}
1200	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1201		conf->raid_disks);
1202
1203	rcu_read_lock();
1204	for (i = 0; i < conf->raid_disks; i++) {
1205		char b[BDEVNAME_SIZE];
1206		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
1207		if (rdev)
1208			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1209			       i, !test_bit(In_sync, &rdev->flags),
1210			       !test_bit(Faulty, &rdev->flags),
1211			       bdevname(rdev->bdev,b));
1212	}
1213	rcu_read_unlock();
1214}
1215
1216static void close_sync(conf_t *conf)
1217{
1218	wait_barrier(conf);
1219	allow_barrier(conf);
 
 
 
 
1220
1221	mempool_destroy(conf->r1buf_pool);
1222	conf->r1buf_pool = NULL;
1223}
1224
1225static int raid1_spare_active(mddev_t *mddev)
1226{
1227	int i;
1228	conf_t *conf = mddev->private;
1229	int count = 0;
1230	unsigned long flags;
1231
1232	/*
1233	 * Find all failed disks within the RAID1 configuration 
1234	 * and mark them readable.
1235	 * Called under mddev lock, so rcu protection not needed.
 
 
1236	 */
 
1237	for (i = 0; i < conf->raid_disks; i++) {
1238		mdk_rdev_t *rdev = conf->mirrors[i].rdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1239		if (rdev
 
1240		    && !test_bit(Faulty, &rdev->flags)
1241		    && !test_and_set_bit(In_sync, &rdev->flags)) {
1242			count++;
1243			sysfs_notify_dirent_safe(rdev->sysfs_state);
1244		}
1245	}
1246	spin_lock_irqsave(&conf->device_lock, flags);
1247	mddev->degraded -= count;
1248	spin_unlock_irqrestore(&conf->device_lock, flags);
1249
1250	print_conf(conf);
1251	return count;
1252}
1253
1254
1255static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1256{
1257	conf_t *conf = mddev->private;
1258	int err = -EEXIST;
1259	int mirror = 0;
1260	mirror_info_t *p;
1261	int first = 0;
1262	int last = mddev->raid_disks - 1;
1263
1264	if (mddev->recovery_disabled == conf->recovery_disabled)
1265		return -EBUSY;
1266
 
 
 
1267	if (rdev->raid_disk >= 0)
1268		first = last = rdev->raid_disk;
1269
1270	for (mirror = first; mirror <= last; mirror++)
1271		if ( !(p=conf->mirrors+mirror)->rdev) {
1272
1273			disk_stack_limits(mddev->gendisk, rdev->bdev,
1274					  rdev->data_offset << 9);
1275			/* as we don't honour merge_bvec_fn, we must
1276			 * never risk violating it, so limit
1277			 * ->max_segments to one lying with a single
1278			 * page, as a one page request is never in
1279			 * violation.
1280			 */
1281			if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1282				blk_queue_max_segments(mddev->queue, 1);
1283				blk_queue_segment_boundary(mddev->queue,
1284							   PAGE_CACHE_SIZE - 1);
1285			}
1286
1287			p->head_position = 0;
1288			rdev->raid_disk = mirror;
1289			err = 0;
1290			/* As all devices are equivalent, we don't need a full recovery
1291			 * if this was recently any drive of the array
1292			 */
1293			if (rdev->saved_raid_disk < 0)
1294				conf->fullsync = 1;
1295			rcu_assign_pointer(p->rdev, rdev);
1296			break;
1297		}
1298	md_integrity_add_rdev(rdev, mddev);
 
 
 
 
 
 
 
 
 
 
 
 
 
1299	print_conf(conf);
1300	return err;
1301}
1302
1303static int raid1_remove_disk(mddev_t *mddev, int number)
1304{
1305	conf_t *conf = mddev->private;
1306	int err = 0;
1307	mdk_rdev_t *rdev;
1308	mirror_info_t *p = conf->mirrors+ number;
 
 
 
1309
1310	print_conf(conf);
1311	rdev = p->rdev;
1312	if (rdev) {
1313		if (test_bit(In_sync, &rdev->flags) ||
1314		    atomic_read(&rdev->nr_pending)) {
1315			err = -EBUSY;
1316			goto abort;
1317		}
1318		/* Only remove non-faulty devices if recovery
1319		 * is not possible.
1320		 */
1321		if (!test_bit(Faulty, &rdev->flags) &&
1322		    mddev->recovery_disabled != conf->recovery_disabled &&
1323		    mddev->degraded < conf->raid_disks) {
1324			err = -EBUSY;
1325			goto abort;
1326		}
1327		p->rdev = NULL;
1328		synchronize_rcu();
1329		if (atomic_read(&rdev->nr_pending)) {
1330			/* lost the race, try later */
1331			err = -EBUSY;
1332			p->rdev = rdev;
1333			goto abort;
 
 
1334		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1335		err = md_integrity_register(mddev);
1336	}
1337abort:
1338
1339	print_conf(conf);
1340	return err;
1341}
1342
1343
1344static void end_sync_read(struct bio *bio, int error)
1345{
1346	r1bio_t *r1_bio = bio->bi_private;
1347	int i;
 
1348
1349	for (i=r1_bio->mddev->raid_disks; i--; )
1350		if (r1_bio->bios[i] == bio)
1351			break;
1352	BUG_ON(i < 0);
1353	update_head_pos(i, r1_bio);
1354	/*
1355	 * we have read a block, now it needs to be re-written,
1356	 * or re-read if the read failed.
1357	 * We don't do much here, just schedule handling by raid1d
1358	 */
1359	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1360		set_bit(R1BIO_Uptodate, &r1_bio->state);
1361
1362	if (atomic_dec_and_test(&r1_bio->remaining))
1363		reschedule_retry(r1_bio);
1364}
1365
1366static void end_sync_write(struct bio *bio, int error)
1367{
1368	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1369	r1bio_t *r1_bio = bio->bi_private;
1370	mddev_t *mddev = r1_bio->mddev;
1371	conf_t *conf = mddev->private;
1372	int i;
1373	int mirror=0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1374	sector_t first_bad;
1375	int bad_sectors;
 
1376
1377	for (i = 0; i < conf->raid_disks; i++)
1378		if (r1_bio->bios[i] == bio) {
1379			mirror = i;
1380			break;
1381		}
1382	if (!uptodate) {
1383		sector_t sync_blocks = 0;
1384		sector_t s = r1_bio->sector;
1385		long sectors_to_go = r1_bio->sectors;
1386		/* make sure these bits doesn't get cleared. */
1387		do {
1388			bitmap_end_sync(mddev->bitmap, s,
1389					&sync_blocks, 1);
1390			s += sync_blocks;
1391			sectors_to_go -= sync_blocks;
1392		} while (sectors_to_go > 0);
1393		set_bit(WriteErrorSeen,
1394			&conf->mirrors[mirror].rdev->flags);
1395		set_bit(R1BIO_WriteError, &r1_bio->state);
1396	} else if (is_badblock(conf->mirrors[mirror].rdev,
1397			       r1_bio->sector,
1398			       r1_bio->sectors,
1399			       &first_bad, &bad_sectors) &&
1400		   !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1401				r1_bio->sector,
1402				r1_bio->sectors,
1403				&first_bad, &bad_sectors)
1404		)
1405		set_bit(R1BIO_MadeGood, &r1_bio->state);
1406
1407	update_head_pos(mirror, r1_bio);
1408
1409	if (atomic_dec_and_test(&r1_bio->remaining)) {
1410		int s = r1_bio->sectors;
1411		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1412		    test_bit(R1BIO_WriteError, &r1_bio->state))
1413			reschedule_retry(r1_bio);
1414		else {
1415			put_buf(r1_bio);
1416			md_done_sync(mddev, s, uptodate);
1417		}
1418	}
1419}
1420
1421static int r1_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
1422			    int sectors, struct page *page, int rw)
1423{
1424	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1425		/* success */
1426		return 1;
1427	if (rw == WRITE)
1428		set_bit(WriteErrorSeen, &rdev->flags);
 
 
 
 
 
1429	/* need to record an error - either for the block or the device */
1430	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1431		md_error(rdev->mddev, rdev);
1432	return 0;
1433}
1434
1435static int fix_sync_read_error(r1bio_t *r1_bio)
1436{
1437	/* Try some synchronous reads of other devices to get
1438	 * good data, much like with normal read errors.  Only
1439	 * read into the pages we already have so we don't
1440	 * need to re-issue the read request.
1441	 * We don't need to freeze the array, because being in an
1442	 * active sync request, there is no normal IO, and
1443	 * no overlapping syncs.
1444	 * We don't need to check is_badblock() again as we
1445	 * made sure that anything with a bad block in range
1446	 * will have bi_end_io clear.
1447	 */
1448	mddev_t *mddev = r1_bio->mddev;
1449	conf_t *conf = mddev->private;
1450	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
 
1451	sector_t sect = r1_bio->sector;
1452	int sectors = r1_bio->sectors;
1453	int idx = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
1454
1455	while(sectors) {
1456		int s = sectors;
1457		int d = r1_bio->read_disk;
1458		int success = 0;
1459		mdk_rdev_t *rdev;
1460		int start;
1461
1462		if (s > (PAGE_SIZE>>9))
1463			s = PAGE_SIZE >> 9;
1464		do {
1465			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1466				/* No rcu protection needed here devices
1467				 * can only be removed when no resync is
1468				 * active, and resync is currently active
1469				 */
1470				rdev = conf->mirrors[d].rdev;
1471				if (sync_page_io(rdev, sect, s<<9,
1472						 bio->bi_io_vec[idx].bv_page,
1473						 READ, false)) {
1474					success = 1;
1475					break;
1476				}
1477			}
1478			d++;
1479			if (d == conf->raid_disks)
1480				d = 0;
1481		} while (!success && d != r1_bio->read_disk);
1482
1483		if (!success) {
1484			char b[BDEVNAME_SIZE];
1485			int abort = 0;
1486			/* Cannot read from anywhere, this block is lost.
1487			 * Record a bad block on each device.  If that doesn't
1488			 * work just disable and interrupt the recovery.
1489			 * Don't fail devices as that won't really help.
1490			 */
1491			printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
1492			       " for block %llu\n",
1493			       mdname(mddev),
1494			       bdevname(bio->bi_bdev, b),
1495			       (unsigned long long)r1_bio->sector);
1496			for (d = 0; d < conf->raid_disks; d++) {
1497				rdev = conf->mirrors[d].rdev;
1498				if (!rdev || test_bit(Faulty, &rdev->flags))
1499					continue;
1500				if (!rdev_set_badblocks(rdev, sect, s, 0))
1501					abort = 1;
1502			}
1503			if (abort) {
1504				mddev->recovery_disabled = 1;
 
1505				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1506				md_done_sync(mddev, r1_bio->sectors, 0);
1507				put_buf(r1_bio);
1508				return 0;
1509			}
1510			/* Try next page */
1511			sectors -= s;
1512			sect += s;
1513			idx++;
1514			continue;
1515		}
1516
1517		start = d;
1518		/* write it back and re-read */
1519		while (d != r1_bio->read_disk) {
1520			if (d == 0)
1521				d = conf->raid_disks;
1522			d--;
1523			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1524				continue;
1525			rdev = conf->mirrors[d].rdev;
1526			if (r1_sync_page_io(rdev, sect, s,
1527					    bio->bi_io_vec[idx].bv_page,
1528					    WRITE) == 0) {
1529				r1_bio->bios[d]->bi_end_io = NULL;
1530				rdev_dec_pending(rdev, mddev);
1531			}
1532		}
1533		d = start;
1534		while (d != r1_bio->read_disk) {
1535			if (d == 0)
1536				d = conf->raid_disks;
1537			d--;
1538			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1539				continue;
1540			rdev = conf->mirrors[d].rdev;
1541			if (r1_sync_page_io(rdev, sect, s,
1542					    bio->bi_io_vec[idx].bv_page,
1543					    READ) != 0)
1544				atomic_add(s, &rdev->corrected_errors);
1545		}
1546		sectors -= s;
1547		sect += s;
1548		idx ++;
1549	}
1550	set_bit(R1BIO_Uptodate, &r1_bio->state);
1551	set_bit(BIO_UPTODATE, &bio->bi_flags);
1552	return 1;
1553}
1554
1555static int process_checks(r1bio_t *r1_bio)
1556{
1557	/* We have read all readable devices.  If we haven't
1558	 * got the block, then there is no hope left.
1559	 * If we have, then we want to do a comparison
1560	 * and skip the write if everything is the same.
1561	 * If any blocks failed to read, then we need to
1562	 * attempt an over-write
1563	 */
1564	mddev_t *mddev = r1_bio->mddev;
1565	conf_t *conf = mddev->private;
1566	int primary;
1567	int i;
 
1568
1569	for (primary = 0; primary < conf->raid_disks; primary++)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1570		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1571		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1572			r1_bio->bios[primary]->bi_end_io = NULL;
1573			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1574			break;
1575		}
1576	r1_bio->read_disk = primary;
1577	for (i = 0; i < conf->raid_disks; i++) {
1578		int j;
1579		int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1580		struct bio *pbio = r1_bio->bios[primary];
1581		struct bio *sbio = r1_bio->bios[i];
1582		int size;
 
 
 
 
 
1583
1584		if (r1_bio->bios[i]->bi_end_io != end_sync_read)
1585			continue;
 
 
 
 
 
1586
1587		if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
1588			for (j = vcnt; j-- ; ) {
1589				struct page *p, *s;
1590				p = pbio->bi_io_vec[j].bv_page;
1591				s = sbio->bi_io_vec[j].bv_page;
1592				if (memcmp(page_address(p),
1593					   page_address(s),
1594					   PAGE_SIZE))
1595					break;
1596			}
1597		} else
1598			j = 0;
1599		if (j >= 0)
1600			mddev->resync_mismatches += r1_bio->sectors;
1601		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
1602			      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1603			/* No need to write to this device. */
1604			sbio->bi_end_io = NULL;
1605			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1606			continue;
1607		}
1608		/* fixup the bio for reuse */
1609		sbio->bi_vcnt = vcnt;
1610		sbio->bi_size = r1_bio->sectors << 9;
1611		sbio->bi_idx = 0;
1612		sbio->bi_phys_segments = 0;
1613		sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1614		sbio->bi_flags |= 1 << BIO_UPTODATE;
1615		sbio->bi_next = NULL;
1616		sbio->bi_sector = r1_bio->sector +
1617			conf->mirrors[i].rdev->data_offset;
1618		sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1619		size = sbio->bi_size;
1620		for (j = 0; j < vcnt ; j++) {
1621			struct bio_vec *bi;
1622			bi = &sbio->bi_io_vec[j];
1623			bi->bv_offset = 0;
1624			if (size > PAGE_SIZE)
1625				bi->bv_len = PAGE_SIZE;
1626			else
1627				bi->bv_len = size;
1628			size -= PAGE_SIZE;
1629			memcpy(page_address(bi->bv_page),
1630			       page_address(pbio->bi_io_vec[j].bv_page),
1631			       PAGE_SIZE);
1632		}
1633	}
1634	return 0;
1635}
1636
1637static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1638{
1639	conf_t *conf = mddev->private;
1640	int i;
1641	int disks = conf->raid_disks;
1642	struct bio *bio, *wbio;
1643
1644	bio = r1_bio->bios[r1_bio->read_disk];
1645
1646	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
1647		/* ouch - failed to read all of that. */
1648		if (!fix_sync_read_error(r1_bio))
1649			return;
1650
1651	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1652		if (process_checks(r1_bio) < 0)
1653			return;
1654	/*
1655	 * schedule writes
1656	 */
1657	atomic_set(&r1_bio->remaining, 1);
1658	for (i = 0; i < disks ; i++) {
1659		wbio = r1_bio->bios[i];
1660		if (wbio->bi_end_io == NULL ||
1661		    (wbio->bi_end_io == end_sync_read &&
1662		     (i == r1_bio->read_disk ||
1663		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1664			continue;
 
 
 
 
 
 
 
 
1665
1666		wbio->bi_rw = WRITE;
1667		wbio->bi_end_io = end_sync_write;
1668		atomic_inc(&r1_bio->remaining);
1669		md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1670
1671		generic_make_request(wbio);
1672	}
1673
1674	if (atomic_dec_and_test(&r1_bio->remaining)) {
1675		/* if we're here, all write(s) have completed, so clean up */
1676		md_done_sync(mddev, r1_bio->sectors, 1);
1677		put_buf(r1_bio);
1678	}
1679}
1680
1681/*
1682 * This is a kernel thread which:
1683 *
1684 *	1.	Retries failed read operations on working mirrors.
1685 *	2.	Updates the raid superblock when problems encounter.
1686 *	3.	Performs writes following reads for array synchronising.
1687 */
1688
1689static void fix_read_error(conf_t *conf, int read_disk,
1690			   sector_t sect, int sectors)
1691{
1692	mddev_t *mddev = conf->mddev;
1693	while(sectors) {
1694		int s = sectors;
1695		int d = read_disk;
1696		int success = 0;
1697		int start;
1698		mdk_rdev_t *rdev;
1699
1700		if (s > (PAGE_SIZE>>9))
1701			s = PAGE_SIZE >> 9;
1702
1703		do {
1704			/* Note: no rcu protection needed here
1705			 * as this is synchronous in the raid1d thread
1706			 * which is the thread that might remove
1707			 * a device.  If raid1d ever becomes multi-threaded....
1708			 */
1709			sector_t first_bad;
1710			int bad_sectors;
1711
1712			rdev = conf->mirrors[d].rdev;
 
1713			if (rdev &&
1714			    test_bit(In_sync, &rdev->flags) &&
 
 
1715			    is_badblock(rdev, sect, s,
1716					&first_bad, &bad_sectors) == 0 &&
1717			    sync_page_io(rdev, sect, s<<9,
1718					 conf->tmppage, READ, false))
1719				success = 1;
1720			else {
1721				d++;
1722				if (d == conf->raid_disks)
1723					d = 0;
1724			}
 
 
 
 
 
1725		} while (!success && d != read_disk);
1726
1727		if (!success) {
1728			/* Cannot read from anywhere - mark it bad */
1729			mdk_rdev_t *rdev = conf->mirrors[read_disk].rdev;
1730			if (!rdev_set_badblocks(rdev, sect, s, 0))
1731				md_error(mddev, rdev);
1732			break;
1733		}
1734		/* write it back and re-read */
1735		start = d;
1736		while (d != read_disk) {
1737			if (d==0)
1738				d = conf->raid_disks;
1739			d--;
1740			rdev = conf->mirrors[d].rdev;
 
1741			if (rdev &&
1742			    test_bit(In_sync, &rdev->flags))
 
 
1743				r1_sync_page_io(rdev, sect, s,
1744						conf->tmppage, WRITE);
 
 
 
1745		}
1746		d = start;
1747		while (d != read_disk) {
1748			char b[BDEVNAME_SIZE];
1749			if (d==0)
1750				d = conf->raid_disks;
1751			d--;
1752			rdev = conf->mirrors[d].rdev;
 
1753			if (rdev &&
1754			    test_bit(In_sync, &rdev->flags)) {
 
 
1755				if (r1_sync_page_io(rdev, sect, s,
1756						    conf->tmppage, READ)) {
1757					atomic_add(s, &rdev->corrected_errors);
1758					printk(KERN_INFO
1759					       "md/raid1:%s: read error corrected "
1760					       "(%d sectors at %llu on %s)\n",
1761					       mdname(mddev), s,
1762					       (unsigned long long)(sect +
1763					           rdev->data_offset),
1764					       bdevname(rdev->bdev, b));
1765				}
1766			}
 
 
1767		}
1768		sectors -= s;
1769		sect += s;
1770	}
1771}
1772
1773static void bi_complete(struct bio *bio, int error)
1774{
1775	complete((struct completion *)bio->bi_private);
1776}
1777
1778static int submit_bio_wait(int rw, struct bio *bio)
1779{
1780	struct completion event;
1781	rw |= REQ_SYNC;
1782
1783	init_completion(&event);
1784	bio->bi_private = &event;
1785	bio->bi_end_io = bi_complete;
1786	submit_bio(rw, bio);
1787	wait_for_completion(&event);
1788
1789	return test_bit(BIO_UPTODATE, &bio->bi_flags);
1790}
1791
1792static int narrow_write_error(r1bio_t *r1_bio, int i)
1793{
1794	mddev_t *mddev = r1_bio->mddev;
1795	conf_t *conf = mddev->private;
1796	mdk_rdev_t *rdev = conf->mirrors[i].rdev;
1797	int vcnt, idx;
1798	struct bio_vec *vec;
1799
1800	/* bio has the data to be written to device 'i' where
1801	 * we just recently had a write error.
1802	 * We repeatedly clone the bio and trim down to one block,
1803	 * then try the write.  Where the write fails we record
1804	 * a bad block.
1805	 * It is conceivable that the bio doesn't exactly align with
1806	 * blocks.  We must handle this somehow.
1807	 *
1808	 * We currently own a reference on the rdev.
1809	 */
1810
1811	int block_sectors;
1812	sector_t sector;
1813	int sectors;
1814	int sect_to_write = r1_bio->sectors;
1815	int ok = 1;
1816
1817	if (rdev->badblocks.shift < 0)
1818		return 0;
1819
1820	block_sectors = 1 << rdev->badblocks.shift;
 
1821	sector = r1_bio->sector;
1822	sectors = ((sector + block_sectors)
1823		   & ~(sector_t)(block_sectors - 1))
1824		- sector;
1825
1826	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
1827		vcnt = r1_bio->behind_page_count;
1828		vec = r1_bio->behind_bvecs;
1829		idx = 0;
1830		while (vec[idx].bv_page == NULL)
1831			idx++;
1832	} else {
1833		vcnt = r1_bio->master_bio->bi_vcnt;
1834		vec = r1_bio->master_bio->bi_io_vec;
1835		idx = r1_bio->master_bio->bi_idx;
1836	}
1837	while (sect_to_write) {
1838		struct bio *wbio;
1839		if (sectors > sect_to_write)
1840			sectors = sect_to_write;
1841		/* Write at 'sector' for 'sectors'*/
1842
1843		wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
1844		memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
1845		wbio->bi_sector = r1_bio->sector;
1846		wbio->bi_rw = WRITE;
1847		wbio->bi_vcnt = vcnt;
1848		wbio->bi_size = r1_bio->sectors << 9;
1849		wbio->bi_idx = idx;
1850
1851		md_trim_bio(wbio, sector - r1_bio->sector, sectors);
1852		wbio->bi_sector += rdev->data_offset;
1853		wbio->bi_bdev = rdev->bdev;
1854		if (submit_bio_wait(WRITE, wbio) == 0)
 
 
 
 
 
 
1855			/* failure! */
1856			ok = rdev_set_badblocks(rdev, sector,
1857						sectors, 0)
1858				&& ok;
1859
1860		bio_put(wbio);
1861		sect_to_write -= sectors;
1862		sector += sectors;
1863		sectors = block_sectors;
1864	}
1865	return ok;
1866}
1867
1868static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
1869{
1870	int m;
1871	int s = r1_bio->sectors;
1872	for (m = 0; m < conf->raid_disks ; m++) {
1873		mdk_rdev_t *rdev = conf->mirrors[m].rdev;
1874		struct bio *bio = r1_bio->bios[m];
1875		if (bio->bi_end_io == NULL)
1876			continue;
1877		if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
1878		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
1879			rdev_clear_badblocks(rdev, r1_bio->sector, s);
1880		}
1881		if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
1882		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
1883			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
1884				md_error(conf->mddev, rdev);
1885		}
1886	}
1887	put_buf(r1_bio);
1888	md_done_sync(conf->mddev, s, 1);
1889}
1890
1891static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
1892{
1893	int m;
1894	for (m = 0; m < conf->raid_disks ; m++)
 
 
1895		if (r1_bio->bios[m] == IO_MADE_GOOD) {
1896			mdk_rdev_t *rdev = conf->mirrors[m].rdev;
1897			rdev_clear_badblocks(rdev,
1898					     r1_bio->sector,
1899					     r1_bio->sectors);
1900			rdev_dec_pending(rdev, conf->mddev);
1901		} else if (r1_bio->bios[m] != NULL) {
1902			/* This drive got a write error.  We need to
1903			 * narrow down and record precise write
1904			 * errors.
1905			 */
 
1906			if (!narrow_write_error(r1_bio, m)) {
1907				md_error(conf->mddev,
1908					 conf->mirrors[m].rdev);
1909				/* an I/O failed, we can't clear the bitmap */
1910				set_bit(R1BIO_Degraded, &r1_bio->state);
1911			}
1912			rdev_dec_pending(conf->mirrors[m].rdev,
1913					 conf->mddev);
1914		}
1915	if (test_bit(R1BIO_WriteError, &r1_bio->state))
1916		close_write(r1_bio);
1917	raid_end_bio_io(r1_bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1918}
1919
1920static void handle_read_error(conf_t *conf, r1bio_t *r1_bio)
1921{
1922	int disk;
1923	int max_sectors;
1924	mddev_t *mddev = conf->mddev;
1925	struct bio *bio;
1926	char b[BDEVNAME_SIZE];
1927	mdk_rdev_t *rdev;
1928
1929	clear_bit(R1BIO_ReadError, &r1_bio->state);
1930	/* we got a read error. Maybe the drive is bad.  Maybe just
1931	 * the block and we can fix it.
1932	 * We freeze all other IO, and try reading the block from
1933	 * other devices.  When we find one, we re-write
1934	 * and check it that fixes the read error.
1935	 * This is all done synchronously while the array is
1936	 * frozen
1937	 */
1938	if (mddev->ro == 0) {
1939		freeze_array(conf);
 
 
 
 
 
 
 
1940		fix_read_error(conf, r1_bio->read_disk,
1941			       r1_bio->sector, r1_bio->sectors);
1942		unfreeze_array(conf);
1943	} else
1944		md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1945
1946	bio = r1_bio->bios[r1_bio->read_disk];
1947	bdevname(bio->bi_bdev, b);
1948read_more:
1949	disk = read_balance(conf, r1_bio, &max_sectors);
1950	if (disk == -1) {
1951		printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
1952		       " read error for block %llu\n",
1953		       mdname(mddev), b, (unsigned long long)r1_bio->sector);
1954		raid_end_bio_io(r1_bio);
1955	} else {
1956		const unsigned long do_sync
1957			= r1_bio->master_bio->bi_rw & REQ_SYNC;
1958		if (bio) {
1959			r1_bio->bios[r1_bio->read_disk] =
1960				mddev->ro ? IO_BLOCKED : NULL;
1961			bio_put(bio);
1962		}
1963		r1_bio->read_disk = disk;
1964		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
1965		md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
1966		r1_bio->bios[r1_bio->read_disk] = bio;
1967		rdev = conf->mirrors[disk].rdev;
1968		printk_ratelimited(KERN_ERR
1969				   "md/raid1:%s: redirecting sector %llu"
1970				   " to other mirror: %s\n",
1971				   mdname(mddev),
1972				   (unsigned long long)r1_bio->sector,
1973				   bdevname(rdev->bdev, b));
1974		bio->bi_sector = r1_bio->sector + rdev->data_offset;
1975		bio->bi_bdev = rdev->bdev;
1976		bio->bi_end_io = raid1_end_read_request;
1977		bio->bi_rw = READ | do_sync;
1978		bio->bi_private = r1_bio;
1979		if (max_sectors < r1_bio->sectors) {
1980			/* Drat - have to split this up more */
1981			struct bio *mbio = r1_bio->master_bio;
1982			int sectors_handled = (r1_bio->sector + max_sectors
1983					       - mbio->bi_sector);
1984			r1_bio->sectors = max_sectors;
1985			spin_lock_irq(&conf->device_lock);
1986			if (mbio->bi_phys_segments == 0)
1987				mbio->bi_phys_segments = 2;
1988			else
1989				mbio->bi_phys_segments++;
1990			spin_unlock_irq(&conf->device_lock);
1991			generic_make_request(bio);
1992			bio = NULL;
1993
1994			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1995
1996			r1_bio->master_bio = mbio;
1997			r1_bio->sectors = (mbio->bi_size >> 9)
1998					  - sectors_handled;
1999			r1_bio->state = 0;
2000			set_bit(R1BIO_ReadError, &r1_bio->state);
2001			r1_bio->mddev = mddev;
2002			r1_bio->sector = mbio->bi_sector + sectors_handled;
2003
2004			goto read_more;
2005		} else
2006			generic_make_request(bio);
2007	}
 
 
 
 
 
 
 
 
2008}
2009
2010static void raid1d(mddev_t *mddev)
2011{
2012	r1bio_t *r1_bio;
 
2013	unsigned long flags;
2014	conf_t *conf = mddev->private;
2015	struct list_head *head = &conf->retry_list;
2016	struct blk_plug plug;
 
2017
2018	md_check_recovery(mddev);
2019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2020	blk_start_plug(&plug);
2021	for (;;) {
2022
2023		if (atomic_read(&mddev->plug_cnt) == 0)
2024			flush_pending_writes(conf);
2025
2026		spin_lock_irqsave(&conf->device_lock, flags);
2027		if (list_empty(head)) {
2028			spin_unlock_irqrestore(&conf->device_lock, flags);
2029			break;
2030		}
2031		r1_bio = list_entry(head->prev, r1bio_t, retry_list);
2032		list_del(head->prev);
2033		conf->nr_queued--;
 
2034		spin_unlock_irqrestore(&conf->device_lock, flags);
2035
2036		mddev = r1_bio->mddev;
2037		conf = mddev->private;
2038		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2039			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2040			    test_bit(R1BIO_WriteError, &r1_bio->state))
2041				handle_sync_write_finished(conf, r1_bio);
2042			else
2043				sync_request_write(mddev, r1_bio);
2044		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2045			   test_bit(R1BIO_WriteError, &r1_bio->state))
2046			handle_write_finished(conf, r1_bio);
2047		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2048			handle_read_error(conf, r1_bio);
2049		else
2050			/* just a partial read to be scheduled from separate
2051			 * context
2052			 */
2053			generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2054
2055		cond_resched();
2056		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2057			md_check_recovery(mddev);
2058	}
2059	blk_finish_plug(&plug);
2060}
2061
2062
2063static int init_resync(conf_t *conf)
2064{
2065	int buffs;
2066
2067	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2068	BUG_ON(conf->r1buf_pool);
2069	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
2070					  conf->poolinfo);
2071	if (!conf->r1buf_pool)
2072		return -ENOMEM;
2073	conf->next_resync = 0;
2074	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2075}
2076
2077/*
2078 * perform a "sync" on one "block"
2079 *
2080 * We need to make sure that no normal I/O request - particularly write
2081 * requests - conflict with active sync requests.
2082 *
2083 * This is achieved by tracking pending requests and a 'barrier' concept
2084 * that can be installed to exclude normal IO requests.
2085 */
2086
2087static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
 
2088{
2089	conf_t *conf = mddev->private;
2090	r1bio_t *r1_bio;
2091	struct bio *bio;
2092	sector_t max_sector, nr_sectors;
2093	int disk = -1;
2094	int i;
2095	int wonly = -1;
2096	int write_targets = 0, read_targets = 0;
2097	sector_t sync_blocks;
2098	int still_degraded = 0;
2099	int good_sectors = RESYNC_SECTORS;
2100	int min_bad = 0; /* number of sectors that are bad in all devices */
 
 
2101
2102	if (!conf->r1buf_pool)
2103		if (init_resync(conf))
2104			return 0;
2105
2106	max_sector = mddev->dev_sectors;
2107	if (sector_nr >= max_sector) {
2108		/* If we aborted, we need to abort the
2109		 * sync on the 'current' bitmap chunk (there will
2110		 * only be one in raid1 resync.
2111		 * We can find the current addess in mddev->curr_resync
2112		 */
2113		if (mddev->curr_resync < max_sector) /* aborted */
2114			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2115						&sync_blocks, 1);
2116		else /* completed sync */
2117			conf->fullsync = 0;
2118
2119		bitmap_close_sync(mddev->bitmap);
2120		close_sync(conf);
 
 
 
 
 
2121		return 0;
2122	}
2123
2124	if (mddev->bitmap == NULL &&
2125	    mddev->recovery_cp == MaxSector &&
2126	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2127	    conf->fullsync == 0) {
2128		*skipped = 1;
2129		return max_sector - sector_nr;
2130	}
2131	/* before building a request, check if we can skip these blocks..
2132	 * This call the bitmap_start_sync doesn't actually record anything
2133	 */
2134	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2135	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2136		/* We can skip this block, and probably several more */
2137		*skipped = 1;
2138		return sync_blocks;
2139	}
 
2140	/*
2141	 * If there is non-resync activity waiting for a turn,
2142	 * and resync is going fast enough,
2143	 * then let it though before starting on this new sync request.
2144	 */
2145	if (!go_faster && conf->nr_waiting)
2146		msleep_interruptible(1000);
2147
2148	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2149	r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2150	raise_barrier(conf);
 
 
2151
2152	conf->next_resync = sector_nr;
 
 
 
 
2153
2154	rcu_read_lock();
2155	/*
2156	 * If we get a correctably read error during resync or recovery,
2157	 * we might want to read from a different device.  So we
2158	 * flag all drives that could conceivably be read from for READ,
2159	 * and any others (which will be non-In_sync devices) for WRITE.
2160	 * If a read fails, we try reading from something else for which READ
2161	 * is OK.
2162	 */
2163
2164	r1_bio->mddev = mddev;
2165	r1_bio->sector = sector_nr;
2166	r1_bio->state = 0;
2167	set_bit(R1BIO_IsSync, &r1_bio->state);
 
 
2168
2169	for (i=0; i < conf->raid_disks; i++) {
2170		mdk_rdev_t *rdev;
2171		bio = r1_bio->bios[i];
2172
2173		/* take from bio_init */
2174		bio->bi_next = NULL;
2175		bio->bi_flags &= ~(BIO_POOL_MASK-1);
2176		bio->bi_flags |= 1 << BIO_UPTODATE;
2177		bio->bi_comp_cpu = -1;
2178		bio->bi_rw = READ;
2179		bio->bi_vcnt = 0;
2180		bio->bi_idx = 0;
2181		bio->bi_phys_segments = 0;
2182		bio->bi_size = 0;
2183		bio->bi_end_io = NULL;
2184		bio->bi_private = NULL;
2185
2186		rdev = rcu_dereference(conf->mirrors[i].rdev);
2187		if (rdev == NULL ||
2188		    test_bit(Faulty, &rdev->flags)) {
2189			still_degraded = 1;
 
2190		} else if (!test_bit(In_sync, &rdev->flags)) {
2191			bio->bi_rw = WRITE;
2192			bio->bi_end_io = end_sync_write;
2193			write_targets ++;
2194		} else {
2195			/* may need to read from here */
2196			sector_t first_bad = MaxSector;
2197			int bad_sectors;
2198
2199			if (is_badblock(rdev, sector_nr, good_sectors,
2200					&first_bad, &bad_sectors)) {
2201				if (first_bad > sector_nr)
2202					good_sectors = first_bad - sector_nr;
2203				else {
2204					bad_sectors -= (sector_nr - first_bad);
2205					if (min_bad == 0 ||
2206					    min_bad > bad_sectors)
2207						min_bad = bad_sectors;
2208				}
2209			}
2210			if (sector_nr < first_bad) {
2211				if (test_bit(WriteMostly, &rdev->flags)) {
2212					if (wonly < 0)
2213						wonly = i;
2214				} else {
2215					if (disk < 0)
2216						disk = i;
2217				}
2218				bio->bi_rw = READ;
2219				bio->bi_end_io = end_sync_read;
2220				read_targets++;
 
 
 
 
 
 
 
 
 
 
 
 
2221			}
2222		}
2223		if (bio->bi_end_io) {
2224			atomic_inc(&rdev->nr_pending);
2225			bio->bi_sector = sector_nr + rdev->data_offset;
2226			bio->bi_bdev = rdev->bdev;
2227			bio->bi_private = r1_bio;
 
2228		}
2229	}
2230	rcu_read_unlock();
2231	if (disk < 0)
2232		disk = wonly;
2233	r1_bio->read_disk = disk;
2234
2235	if (read_targets == 0 && min_bad > 0) {
2236		/* These sectors are bad on all InSync devices, so we
2237		 * need to mark them bad on all write targets
2238		 */
2239		int ok = 1;
2240		for (i = 0 ; i < conf->raid_disks ; i++)
2241			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2242				mdk_rdev_t *rdev =
2243					rcu_dereference(conf->mirrors[i].rdev);
2244				ok = rdev_set_badblocks(rdev, sector_nr,
2245							min_bad, 0
2246					) && ok;
2247			}
2248		set_bit(MD_CHANGE_DEVS, &mddev->flags);
2249		*skipped = 1;
2250		put_buf(r1_bio);
2251
2252		if (!ok) {
2253			/* Cannot record the badblocks, so need to
2254			 * abort the resync.
2255			 * If there are multiple read targets, could just
2256			 * fail the really bad ones ???
2257			 */
2258			conf->recovery_disabled = mddev->recovery_disabled;
2259			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2260			return 0;
2261		} else
2262			return min_bad;
2263
2264	}
2265	if (min_bad > 0 && min_bad < good_sectors) {
2266		/* only resync enough to reach the next bad->good
2267		 * transition */
2268		good_sectors = min_bad;
2269	}
2270
2271	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2272		/* extra read targets are also write targets */
2273		write_targets += read_targets-1;
2274
2275	if (write_targets == 0 || read_targets == 0) {
2276		/* There is nowhere to write, so all non-sync
2277		 * drives must be failed - so we are finished
2278		 */
2279		sector_t rv = max_sector - sector_nr;
 
 
 
2280		*skipped = 1;
2281		put_buf(r1_bio);
2282		return rv;
2283	}
2284
2285	if (max_sector > mddev->resync_max)
2286		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2287	if (max_sector > sector_nr + good_sectors)
2288		max_sector = sector_nr + good_sectors;
2289	nr_sectors = 0;
2290	sync_blocks = 0;
2291	do {
2292		struct page *page;
2293		int len = PAGE_SIZE;
2294		if (sector_nr + (len>>9) > max_sector)
2295			len = (max_sector - sector_nr) << 9;
2296		if (len == 0)
2297			break;
2298		if (sync_blocks == 0) {
2299			if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2300					       &sync_blocks, still_degraded) &&
2301			    !conf->fullsync &&
2302			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2303				break;
2304			BUG_ON(sync_blocks < (PAGE_SIZE>>9));
2305			if ((len >> 9) > sync_blocks)
2306				len = sync_blocks<<9;
2307		}
2308
2309		for (i=0 ; i < conf->raid_disks; i++) {
 
 
2310			bio = r1_bio->bios[i];
 
2311			if (bio->bi_end_io) {
2312				page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2313				if (bio_add_page(bio, page, len, 0) == 0) {
2314					/* stop here */
2315					bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2316					while (i > 0) {
2317						i--;
2318						bio = r1_bio->bios[i];
2319						if (bio->bi_end_io==NULL)
2320							continue;
2321						/* remove last page from this bio */
2322						bio->bi_vcnt--;
2323						bio->bi_size -= len;
2324						bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2325					}
2326					goto bio_full;
2327				}
2328			}
2329		}
2330		nr_sectors += len>>9;
2331		sector_nr += len>>9;
2332		sync_blocks -= (len>>9);
2333	} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
2334 bio_full:
2335	r1_bio->sectors = nr_sectors;
2336
 
 
 
 
 
 
 
 
 
 
2337	/* For a user-requested sync, we read all readable devices and do a
2338	 * compare
2339	 */
2340	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2341		atomic_set(&r1_bio->remaining, read_targets);
2342		for (i=0; i<conf->raid_disks; i++) {
2343			bio = r1_bio->bios[i];
2344			if (bio->bi_end_io == end_sync_read) {
2345				md_sync_acct(bio->bi_bdev, nr_sectors);
2346				generic_make_request(bio);
 
 
 
2347			}
2348		}
2349	} else {
2350		atomic_set(&r1_bio->remaining, 1);
2351		bio = r1_bio->bios[r1_bio->read_disk];
2352		md_sync_acct(bio->bi_bdev, nr_sectors);
2353		generic_make_request(bio);
2354
 
2355	}
2356	return nr_sectors;
2357}
2358
2359static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2360{
2361	if (sectors)
2362		return sectors;
2363
2364	return mddev->dev_sectors;
2365}
2366
2367static conf_t *setup_conf(mddev_t *mddev)
2368{
2369	conf_t *conf;
2370	int i;
2371	mirror_info_t *disk;
2372	mdk_rdev_t *rdev;
2373	int err = -ENOMEM;
2374
2375	conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
2376	if (!conf)
2377		goto abort;
2378
2379	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
2380				 GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2381	if (!conf->mirrors)
2382		goto abort;
2383
2384	conf->tmppage = alloc_page(GFP_KERNEL);
2385	if (!conf->tmppage)
2386		goto abort;
2387
2388	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2389	if (!conf->poolinfo)
2390		goto abort;
2391	conf->poolinfo->raid_disks = mddev->raid_disks;
2392	conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2393					  r1bio_pool_free,
2394					  conf->poolinfo);
2395	if (!conf->r1bio_pool)
 
 
 
2396		goto abort;
2397
2398	conf->poolinfo->mddev = mddev;
2399
 
2400	spin_lock_init(&conf->device_lock);
2401	list_for_each_entry(rdev, &mddev->disks, same_set) {
2402		int disk_idx = rdev->raid_disk;
2403		if (disk_idx >= mddev->raid_disks
2404		    || disk_idx < 0)
2405			continue;
2406		disk = conf->mirrors + disk_idx;
 
 
 
2407
 
 
2408		disk->rdev = rdev;
2409
2410		disk->head_position = 0;
 
2411	}
2412	conf->raid_disks = mddev->raid_disks;
2413	conf->mddev = mddev;
2414	INIT_LIST_HEAD(&conf->retry_list);
 
2415
2416	spin_lock_init(&conf->resync_lock);
2417	init_waitqueue_head(&conf->wait_barrier);
2418
2419	bio_list_init(&conf->pending_bio_list);
 
 
2420
2421	conf->last_used = -1;
2422	for (i = 0; i < conf->raid_disks; i++) {
2423
2424		disk = conf->mirrors + i;
2425
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2426		if (!disk->rdev ||
2427		    !test_bit(In_sync, &disk->rdev->flags)) {
2428			disk->head_position = 0;
2429			if (disk->rdev)
 
2430				conf->fullsync = 1;
2431		} else if (conf->last_used < 0)
2432			/*
2433			 * The first working device is used as a
2434			 * starting point to read balancing.
2435			 */
2436			conf->last_used = i;
2437	}
2438
2439	err = -EIO;
2440	if (conf->last_used < 0) {
2441		printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
2442		       mdname(mddev));
2443		goto abort;
2444	}
2445	err = -ENOMEM;
2446	conf->thread = md_register_thread(raid1d, mddev, NULL);
2447	if (!conf->thread) {
2448		printk(KERN_ERR
2449		       "md/raid1:%s: couldn't allocate thread\n",
2450		       mdname(mddev));
2451		goto abort;
2452	}
2453
2454	return conf;
2455
2456 abort:
2457	if (conf) {
2458		if (conf->r1bio_pool)
2459			mempool_destroy(conf->r1bio_pool);
2460		kfree(conf->mirrors);
2461		safe_put_page(conf->tmppage);
2462		kfree(conf->poolinfo);
 
 
 
 
 
2463		kfree(conf);
2464	}
2465	return ERR_PTR(err);
2466}
2467
2468static int run(mddev_t *mddev)
 
2469{
2470	conf_t *conf;
2471	int i;
2472	mdk_rdev_t *rdev;
 
 
2473
2474	if (mddev->level != 1) {
2475		printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2476		       mdname(mddev), mddev->level);
2477		return -EIO;
2478	}
2479	if (mddev->reshape_position != MaxSector) {
2480		printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2481		       mdname(mddev));
2482		return -EIO;
2483	}
 
 
2484	/*
2485	 * copy the already verified devices into our private RAID1
2486	 * bookkeeping area. [whatever we allocate in run(),
2487	 * should be freed in stop()]
2488	 */
2489	if (mddev->private == NULL)
2490		conf = setup_conf(mddev);
2491	else
2492		conf = mddev->private;
2493
2494	if (IS_ERR(conf))
2495		return PTR_ERR(conf);
2496
2497	list_for_each_entry(rdev, &mddev->disks, same_set) {
 
 
 
 
 
2498		if (!mddev->gendisk)
2499			continue;
2500		disk_stack_limits(mddev->gendisk, rdev->bdev,
2501				  rdev->data_offset << 9);
2502		/* as we don't honour merge_bvec_fn, we must never risk
2503		 * violating it, so limit ->max_segments to 1 lying within
2504		 * a single page, as a one page request is never in violation.
2505		 */
2506		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2507			blk_queue_max_segments(mddev->queue, 1);
2508			blk_queue_segment_boundary(mddev->queue,
2509						   PAGE_CACHE_SIZE - 1);
2510		}
2511	}
2512
2513	mddev->degraded = 0;
2514	for (i=0; i < conf->raid_disks; i++)
2515		if (conf->mirrors[i].rdev == NULL ||
2516		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2517		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2518			mddev->degraded++;
 
 
 
 
 
 
 
2519
2520	if (conf->raid_disks - mddev->degraded == 1)
2521		mddev->recovery_cp = MaxSector;
2522
2523	if (mddev->recovery_cp != MaxSector)
2524		printk(KERN_NOTICE "md/raid1:%s: not clean"
2525		       " -- starting background reconstruction\n",
2526		       mdname(mddev));
2527	printk(KERN_INFO 
2528		"md/raid1:%s: active with %d out of %d mirrors\n",
2529		mdname(mddev), mddev->raid_disks - mddev->degraded, 
2530		mddev->raid_disks);
2531
2532	/*
2533	 * Ok, everything is just fine now
2534	 */
2535	mddev->thread = conf->thread;
2536	conf->thread = NULL;
2537	mddev->private = conf;
 
2538
2539	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2540
2541	if (mddev->queue) {
2542		mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2543		mddev->queue->backing_dev_info.congested_data = mddev;
 
 
 
 
2544	}
2545	return md_integrity_register(mddev);
2546}
2547
2548static int stop(mddev_t *mddev)
2549{
2550	conf_t *conf = mddev->private;
2551	struct bitmap *bitmap = mddev->bitmap;
2552
2553	/* wait for behind writes to complete */
2554	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
2555		printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
2556		       mdname(mddev));
2557		/* need to kick something here to make sure I/O goes? */
2558		wait_event(bitmap->behind_wait,
2559			   atomic_read(&bitmap->behind_writes) == 0);
2560	}
 
 
 
 
 
 
2561
2562	raise_barrier(conf);
2563	lower_barrier(conf);
 
2564
2565	md_unregister_thread(&mddev->thread);
2566	if (conf->r1bio_pool)
2567		mempool_destroy(conf->r1bio_pool);
2568	kfree(conf->mirrors);
 
2569	kfree(conf->poolinfo);
 
 
 
 
 
2570	kfree(conf);
2571	mddev->private = NULL;
2572	return 0;
2573}
2574
2575static int raid1_resize(mddev_t *mddev, sector_t sectors)
2576{
2577	/* no resync is happening, and there is enough space
2578	 * on all devices, so we can resize.
2579	 * We need to make sure resync covers any new space.
2580	 * If the array is shrinking we should possibly wait until
2581	 * any io in the removed space completes, but it hardly seems
2582	 * worth it.
2583	 */
2584	md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0));
2585	if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
 
2586		return -EINVAL;
2587	set_capacity(mddev->gendisk, mddev->array_sectors);
2588	revalidate_disk(mddev->gendisk);
 
 
 
 
2589	if (sectors > mddev->dev_sectors &&
2590	    mddev->recovery_cp > mddev->dev_sectors) {
2591		mddev->recovery_cp = mddev->dev_sectors;
2592		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2593	}
2594	mddev->dev_sectors = sectors;
2595	mddev->resync_max_sectors = sectors;
2596	return 0;
2597}
2598
2599static int raid1_reshape(mddev_t *mddev)
2600{
2601	/* We need to:
2602	 * 1/ resize the r1bio_pool
2603	 * 2/ resize conf->mirrors
2604	 *
2605	 * We allocate a new r1bio_pool if we can.
2606	 * Then raise a device barrier and wait until all IO stops.
2607	 * Then resize conf->mirrors and swap in the new r1bio pool.
2608	 *
2609	 * At the same time, we "pack" the devices so that all the missing
2610	 * devices have the higher raid_disk numbers.
2611	 */
2612	mempool_t *newpool, *oldpool;
2613	struct pool_info *newpoolinfo;
2614	mirror_info_t *newmirrors;
2615	conf_t *conf = mddev->private;
2616	int cnt, raid_disks;
2617	unsigned long flags;
2618	int d, d2, err;
 
 
 
 
2619
2620	/* Cannot change chunk_size, layout, or level */
2621	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
2622	    mddev->layout != mddev->new_layout ||
2623	    mddev->level != mddev->new_level) {
2624		mddev->new_chunk_sectors = mddev->chunk_sectors;
2625		mddev->new_layout = mddev->layout;
2626		mddev->new_level = mddev->level;
2627		return -EINVAL;
2628	}
2629
2630	err = md_allow_write(mddev);
2631	if (err)
2632		return err;
2633
2634	raid_disks = mddev->raid_disks + mddev->delta_disks;
2635
2636	if (raid_disks < conf->raid_disks) {
2637		cnt=0;
2638		for (d= 0; d < conf->raid_disks; d++)
2639			if (conf->mirrors[d].rdev)
2640				cnt++;
2641		if (cnt > raid_disks)
2642			return -EBUSY;
2643	}
2644
2645	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
2646	if (!newpoolinfo)
2647		return -ENOMEM;
2648	newpoolinfo->mddev = mddev;
2649	newpoolinfo->raid_disks = raid_disks;
2650
2651	newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
2652				 r1bio_pool_free, newpoolinfo);
2653	if (!newpool) {
2654		kfree(newpoolinfo);
2655		return -ENOMEM;
2656	}
2657	newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
 
 
2658	if (!newmirrors) {
2659		kfree(newpoolinfo);
2660		mempool_destroy(newpool);
2661		return -ENOMEM;
2662	}
2663
2664	raise_barrier(conf);
2665
2666	/* ok, everything is stopped */
2667	oldpool = conf->r1bio_pool;
2668	conf->r1bio_pool = newpool;
2669
2670	for (d = d2 = 0; d < conf->raid_disks; d++) {
2671		mdk_rdev_t *rdev = conf->mirrors[d].rdev;
2672		if (rdev && rdev->raid_disk != d2) {
2673			sysfs_unlink_rdev(mddev, rdev);
2674			rdev->raid_disk = d2;
2675			sysfs_unlink_rdev(mddev, rdev);
2676			if (sysfs_link_rdev(mddev, rdev))
2677				printk(KERN_WARNING
2678				       "md/raid1:%s: cannot register rd%d\n",
2679				       mdname(mddev), rdev->raid_disk);
2680		}
2681		if (rdev)
2682			newmirrors[d2++].rdev = rdev;
2683	}
2684	kfree(conf->mirrors);
2685	conf->mirrors = newmirrors;
2686	kfree(conf->poolinfo);
2687	conf->poolinfo = newpoolinfo;
2688
2689	spin_lock_irqsave(&conf->device_lock, flags);
2690	mddev->degraded += (raid_disks - conf->raid_disks);
2691	spin_unlock_irqrestore(&conf->device_lock, flags);
2692	conf->raid_disks = mddev->raid_disks = raid_disks;
2693	mddev->delta_disks = 0;
2694
2695	conf->last_used = 0; /* just make sure it is in-range */
2696	lower_barrier(conf);
2697
 
2698	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2699	md_wakeup_thread(mddev->thread);
2700
2701	mempool_destroy(oldpool);
2702	return 0;
2703}
2704
2705static void raid1_quiesce(mddev_t *mddev, int state)
2706{
2707	conf_t *conf = mddev->private;
2708
2709	switch(state) {
2710	case 2: /* wake for suspend */
2711		wake_up(&conf->wait_barrier);
2712		break;
2713	case 1:
2714		raise_barrier(conf);
2715		break;
2716	case 0:
2717		lower_barrier(conf);
2718		break;
2719	}
2720}
2721
2722static void *raid1_takeover(mddev_t *mddev)
2723{
2724	/* raid1 can take over:
2725	 *  raid5 with 2 devices, any layout or chunk size
2726	 */
2727	if (mddev->level == 5 && mddev->raid_disks == 2) {
2728		conf_t *conf;
2729		mddev->new_level = 1;
2730		mddev->new_layout = 0;
2731		mddev->new_chunk_sectors = 0;
2732		conf = setup_conf(mddev);
2733		if (!IS_ERR(conf))
2734			conf->barrier = 1;
 
 
 
 
2735		return conf;
2736	}
2737	return ERR_PTR(-EINVAL);
2738}
2739
2740static struct mdk_personality raid1_personality =
2741{
2742	.name		= "raid1",
2743	.level		= 1,
2744	.owner		= THIS_MODULE,
2745	.make_request	= make_request,
2746	.run		= run,
2747	.stop		= stop,
2748	.status		= status,
2749	.error_handler	= error,
2750	.hot_add_disk	= raid1_add_disk,
2751	.hot_remove_disk= raid1_remove_disk,
2752	.spare_active	= raid1_spare_active,
2753	.sync_request	= sync_request,
2754	.resize		= raid1_resize,
2755	.size		= raid1_size,
2756	.check_reshape	= raid1_reshape,
2757	.quiesce	= raid1_quiesce,
2758	.takeover	= raid1_takeover,
2759};
2760
2761static int __init raid_init(void)
2762{
2763	return register_md_personality(&raid1_personality);
2764}
2765
2766static void raid_exit(void)
2767{
2768	unregister_md_personality(&raid1_personality);
2769}
2770
2771module_init(raid_init);
2772module_exit(raid_exit);
2773MODULE_LICENSE("GPL");
2774MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
2775MODULE_ALIAS("md-personality-3"); /* RAID1 */
2776MODULE_ALIAS("md-raid1");
2777MODULE_ALIAS("md-level-1");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * raid1.c : Multiple Devices driver for Linux
   4 *
   5 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
   6 *
   7 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   8 *
   9 * RAID-1 management functions.
  10 *
  11 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  12 *
  13 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  14 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  15 *
  16 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  17 * bitmapped intelligence in resync:
  18 *
  19 *      - bitmap marked during normal i/o
  20 *      - bitmap used to skip nondirty blocks during sync
  21 *
  22 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  23 * - persistent bitmap code
 
 
 
 
 
 
 
 
 
  24 */
  25
  26#include <linux/slab.h>
  27#include <linux/delay.h>
  28#include <linux/blkdev.h>
  29#include <linux/module.h>
  30#include <linux/seq_file.h>
  31#include <linux/ratelimit.h>
  32#include <linux/interval_tree_generic.h>
  33
  34#include <trace/events/block.h>
  35
  36#include "md.h"
  37#include "raid1.h"
  38#include "md-bitmap.h"
  39
  40#define UNSUPPORTED_MDDEV_FLAGS		\
  41	((1L << MD_HAS_JOURNAL) |	\
  42	 (1L << MD_JOURNAL_CLEAN) |	\
  43	 (1L << MD_HAS_PPL) |		\
  44	 (1L << MD_HAS_MULTIPLE_PPLS))
  45
  46static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
  47static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
  48
  49#define raid1_log(md, fmt, args...)				\
  50	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
  51
  52#include "raid1-10.c"
  53
  54#define START(node) ((node)->start)
  55#define LAST(node) ((node)->last)
  56INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
  57		     START, LAST, static inline, raid1_rb);
  58
  59static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
  60				struct serial_info *si, int idx)
  61{
  62	unsigned long flags;
  63	int ret = 0;
  64	sector_t lo = r1_bio->sector;
  65	sector_t hi = lo + r1_bio->sectors;
  66	struct serial_in_rdev *serial = &rdev->serial[idx];
  67
  68	spin_lock_irqsave(&serial->serial_lock, flags);
  69	/* collision happened */
  70	if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
  71		ret = -EBUSY;
  72	else {
  73		si->start = lo;
  74		si->last = hi;
  75		raid1_rb_insert(si, &serial->serial_rb);
  76	}
  77	spin_unlock_irqrestore(&serial->serial_lock, flags);
  78
  79	return ret;
  80}
  81
  82static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
  83{
  84	struct mddev *mddev = rdev->mddev;
  85	struct serial_info *si;
  86	int idx = sector_to_idx(r1_bio->sector);
  87	struct serial_in_rdev *serial = &rdev->serial[idx];
  88
  89	if (WARN_ON(!mddev->serial_info_pool))
  90		return;
  91	si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
  92	wait_event(serial->serial_io_wait,
  93		   check_and_add_serial(rdev, r1_bio, si, idx) == 0);
  94}
  95
  96static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
  97{
  98	struct serial_info *si;
  99	unsigned long flags;
 100	int found = 0;
 101	struct mddev *mddev = rdev->mddev;
 102	int idx = sector_to_idx(lo);
 103	struct serial_in_rdev *serial = &rdev->serial[idx];
 104
 105	spin_lock_irqsave(&serial->serial_lock, flags);
 106	for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
 107	     si; si = raid1_rb_iter_next(si, lo, hi)) {
 108		if (si->start == lo && si->last == hi) {
 109			raid1_rb_remove(si, &serial->serial_rb);
 110			mempool_free(si, mddev->serial_info_pool);
 111			found = 1;
 112			break;
 113		}
 114	}
 115	if (!found)
 116		WARN(1, "The write IO is not recorded for serialization\n");
 117	spin_unlock_irqrestore(&serial->serial_lock, flags);
 118	wake_up(&serial->serial_io_wait);
 119}
 120
 121/*
 122 * for resync bio, r1bio pointer can be retrieved from the per-bio
 123 * 'struct resync_pages'.
 124 */
 125static inline struct r1bio *get_resync_r1bio(struct bio *bio)
 126{
 127	return get_resync_pages(bio)->raid_bio;
 128}
 129
 130static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
 131{
 132	struct pool_info *pi = data;
 133	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
 134
 135	/* allocate a r1bio with room for raid_disks entries in the bios array */
 136	return kzalloc(size, gfp_flags);
 137}
 138
 139#define RESYNC_DEPTH 32
 
 
 
 
 
 
 140#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
 141#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
 142#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
 143#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
 144#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
 145
 146static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
 147{
 148	struct pool_info *pi = data;
 149	struct r1bio *r1_bio;
 
 150	struct bio *bio;
 151	int need_pages;
 152	int j;
 153	struct resync_pages *rps;
 154
 155	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
 156	if (!r1_bio)
 157		return NULL;
 158
 159	rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
 160			    gfp_flags);
 161	if (!rps)
 162		goto out_free_r1bio;
 163
 164	/*
 165	 * Allocate bios : 1 for reading, n-1 for writing
 166	 */
 167	for (j = pi->raid_disks ; j-- ; ) {
 168		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 169		if (!bio)
 170			goto out_free_bio;
 171		r1_bio->bios[j] = bio;
 172	}
 173	/*
 174	 * Allocate RESYNC_PAGES data pages and attach them to
 175	 * the first bio.
 176	 * If this is a user-requested check/repair, allocate
 177	 * RESYNC_PAGES for each bio.
 178	 */
 179	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
 180		need_pages = pi->raid_disks;
 181	else
 182		need_pages = 1;
 183	for (j = 0; j < pi->raid_disks; j++) {
 184		struct resync_pages *rp = &rps[j];
 185
 186		bio = r1_bio->bios[j];
 
 
 
 
 187
 188		if (j < need_pages) {
 189			if (resync_alloc_pages(rp, gfp_flags))
 190				goto out_free_pages;
 191		} else {
 192			memcpy(rp, &rps[0], sizeof(*rp));
 193			resync_get_all_pages(rp);
 194		}
 195
 196		rp->raid_bio = r1_bio;
 197		bio->bi_private = rp;
 
 
 
 
 198	}
 199
 200	r1_bio->master_bio = NULL;
 201
 202	return r1_bio;
 203
 204out_free_pages:
 205	while (--j >= 0)
 206		resync_free_pages(&rps[j]);
 207
 
 208out_free_bio:
 209	while (++j < pi->raid_disks)
 210		bio_put(r1_bio->bios[j]);
 211	kfree(rps);
 212
 213out_free_r1bio:
 214	rbio_pool_free(r1_bio, data);
 215	return NULL;
 216}
 217
 218static void r1buf_pool_free(void *__r1_bio, void *data)
 219{
 220	struct pool_info *pi = data;
 221	int i;
 222	struct r1bio *r1bio = __r1_bio;
 223	struct resync_pages *rp = NULL;
 224
 225	for (i = pi->raid_disks; i--; ) {
 226		rp = get_resync_pages(r1bio->bios[i]);
 227		resync_free_pages(rp);
 
 
 
 
 
 228		bio_put(r1bio->bios[i]);
 229	}
 230
 231	/* resync pages array stored in the 1st bio's .bi_private */
 232	kfree(rp);
 233
 234	rbio_pool_free(r1bio, data);
 235}
 236
 237static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
 238{
 239	int i;
 240
 241	for (i = 0; i < conf->raid_disks * 2; i++) {
 242		struct bio **bio = r1_bio->bios + i;
 243		if (!BIO_SPECIAL(*bio))
 244			bio_put(*bio);
 245		*bio = NULL;
 246	}
 247}
 248
 249static void free_r1bio(struct r1bio *r1_bio)
 250{
 251	struct r1conf *conf = r1_bio->mddev->private;
 252
 253	put_all_bios(conf, r1_bio);
 254	mempool_free(r1_bio, &conf->r1bio_pool);
 255}
 256
 257static void put_buf(struct r1bio *r1_bio)
 258{
 259	struct r1conf *conf = r1_bio->mddev->private;
 260	sector_t sect = r1_bio->sector;
 261	int i;
 262
 263	for (i = 0; i < conf->raid_disks * 2; i++) {
 264		struct bio *bio = r1_bio->bios[i];
 265		if (bio->bi_end_io)
 266			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
 267	}
 268
 269	mempool_free(r1_bio, &conf->r1buf_pool);
 270
 271	lower_barrier(conf, sect);
 272}
 273
 274static void reschedule_retry(struct r1bio *r1_bio)
 275{
 276	unsigned long flags;
 277	struct mddev *mddev = r1_bio->mddev;
 278	struct r1conf *conf = mddev->private;
 279	int idx;
 280
 281	idx = sector_to_idx(r1_bio->sector);
 282	spin_lock_irqsave(&conf->device_lock, flags);
 283	list_add(&r1_bio->retry_list, &conf->retry_list);
 284	atomic_inc(&conf->nr_queued[idx]);
 285	spin_unlock_irqrestore(&conf->device_lock, flags);
 286
 287	wake_up(&conf->wait_barrier);
 288	md_wakeup_thread(mddev->thread);
 289}
 290
 291/*
 292 * raid_end_bio_io() is called when we have finished servicing a mirrored
 293 * operation and are ready to return a success/failure code to the buffer
 294 * cache layer.
 295 */
 296static void call_bio_endio(struct r1bio *r1_bio)
 297{
 298	struct bio *bio = r1_bio->master_bio;
 
 
 
 
 
 
 
 
 
 
 
 299
 300	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
 301		bio->bi_status = BLK_STS_IOERR;
 302
 303	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
 304		bio_end_io_acct(bio, r1_bio->start_time);
 305	bio_endio(bio);
 
 
 
 
 306}
 307
 308static void raid_end_bio_io(struct r1bio *r1_bio)
 309{
 310	struct bio *bio = r1_bio->master_bio;
 311	struct r1conf *conf = r1_bio->mddev->private;
 312
 313	/* if nobody has done the final endio yet, do it now */
 314	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 315		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
 316			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
 317			 (unsigned long long) bio->bi_iter.bi_sector,
 318			 (unsigned long long) bio_end_sector(bio) - 1);
 
 319
 320		call_bio_endio(r1_bio);
 321	}
 322	/*
 323	 * Wake up any possible resync thread that waits for the device
 324	 * to go idle.  All I/Os, even write-behind writes, are done.
 325	 */
 326	allow_barrier(conf, r1_bio->sector);
 327
 328	free_r1bio(r1_bio);
 329}
 330
 331/*
 332 * Update disk head position estimator based on IRQ completion info.
 333 */
 334static inline void update_head_pos(int disk, struct r1bio *r1_bio)
 335{
 336	struct r1conf *conf = r1_bio->mddev->private;
 337
 338	conf->mirrors[disk].head_position =
 339		r1_bio->sector + (r1_bio->sectors);
 340}
 341
 342/*
 343 * Find the disk number which triggered given bio
 344 */
 345static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
 346{
 
 
 347	int mirror;
 348	struct r1conf *conf = r1_bio->mddev->private;
 349	int raid_disks = conf->raid_disks;
 350
 351	for (mirror = 0; mirror < raid_disks * 2; mirror++)
 352		if (r1_bio->bios[mirror] == bio)
 353			break;
 354
 355	BUG_ON(mirror == raid_disks * 2);
 356	update_head_pos(mirror, r1_bio);
 357
 358	return mirror;
 359}
 360
 361static void raid1_end_read_request(struct bio *bio)
 362{
 363	int uptodate = !bio->bi_status;
 364	struct r1bio *r1_bio = bio->bi_private;
 365	struct r1conf *conf = r1_bio->mddev->private;
 366	struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
 367
 
 368	/*
 369	 * this branch is our 'one mirror IO has finished' event handler:
 370	 */
 371	update_head_pos(r1_bio->read_disk, r1_bio);
 372
 373	if (uptodate)
 374		set_bit(R1BIO_Uptodate, &r1_bio->state);
 375	else if (test_bit(FailFast, &rdev->flags) &&
 376		 test_bit(R1BIO_FailFast, &r1_bio->state))
 377		/* This was a fail-fast read so we definitely
 378		 * want to retry */
 379		;
 380	else {
 381		/* If all other devices have failed, we want to return
 382		 * the error upwards rather than fail the last device.
 383		 * Here we redefine "uptodate" to mean "Don't want to retry"
 384		 */
 385		unsigned long flags;
 386		spin_lock_irqsave(&conf->device_lock, flags);
 387		if (r1_bio->mddev->degraded == conf->raid_disks ||
 388		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
 389		     test_bit(In_sync, &rdev->flags)))
 390			uptodate = 1;
 391		spin_unlock_irqrestore(&conf->device_lock, flags);
 392	}
 393
 394	if (uptodate) {
 395		raid_end_bio_io(r1_bio);
 396		rdev_dec_pending(rdev, conf->mddev);
 397	} else {
 398		/*
 399		 * oops, read error:
 400		 */
 401		char b[BDEVNAME_SIZE];
 402		pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
 403				   mdname(conf->mddev),
 404				   bdevname(rdev->bdev, b),
 405				   (unsigned long long)r1_bio->sector);
 
 
 
 406		set_bit(R1BIO_ReadError, &r1_bio->state);
 407		reschedule_retry(r1_bio);
 408		/* don't drop the reference on read_disk yet */
 409	}
 
 
 410}
 411
 412static void close_write(struct r1bio *r1_bio)
 413{
 414	/* it really is the end of this request */
 415	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
 416		bio_free_pages(r1_bio->behind_master_bio);
 417		bio_put(r1_bio->behind_master_bio);
 418		r1_bio->behind_master_bio = NULL;
 
 
 
 419	}
 420	/* clear the bitmap if all writes complete successfully */
 421	md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
 422			   r1_bio->sectors,
 423			   !test_bit(R1BIO_Degraded, &r1_bio->state),
 424			   test_bit(R1BIO_BehindIO, &r1_bio->state));
 425	md_write_end(r1_bio->mddev);
 426}
 427
 428static void r1_bio_write_done(struct r1bio *r1_bio)
 429{
 430	if (!atomic_dec_and_test(&r1_bio->remaining))
 431		return;
 432
 433	if (test_bit(R1BIO_WriteError, &r1_bio->state))
 434		reschedule_retry(r1_bio);
 435	else {
 436		close_write(r1_bio);
 437		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
 438			reschedule_retry(r1_bio);
 439		else
 440			raid_end_bio_io(r1_bio);
 441	}
 442}
 443
 444static void raid1_end_write_request(struct bio *bio)
 445{
 446	struct r1bio *r1_bio = bio->bi_private;
 447	int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
 448	struct r1conf *conf = r1_bio->mddev->private;
 
 449	struct bio *to_put = NULL;
 450	int mirror = find_bio_disk(r1_bio, bio);
 451	struct md_rdev *rdev = conf->mirrors[mirror].rdev;
 452	bool discard_error;
 453	sector_t lo = r1_bio->sector;
 454	sector_t hi = r1_bio->sector + r1_bio->sectors;
 455
 456	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
 
 
 457
 458	/*
 459	 * 'one mirror IO has finished' event handler:
 460	 */
 461	if (bio->bi_status && !discard_error) {
 462		set_bit(WriteErrorSeen,	&rdev->flags);
 463		if (!test_and_set_bit(WantReplacement, &rdev->flags))
 464			set_bit(MD_RECOVERY_NEEDED, &
 465				conf->mddev->recovery);
 466
 467		if (test_bit(FailFast, &rdev->flags) &&
 468		    (bio->bi_opf & MD_FAILFAST) &&
 469		    /* We never try FailFast to WriteMostly devices */
 470		    !test_bit(WriteMostly, &rdev->flags)) {
 471			md_error(r1_bio->mddev, rdev);
 472		}
 473
 474		/*
 475		 * When the device is faulty, it is not necessary to
 476		 * handle write error.
 477		 */
 478		if (!test_bit(Faulty, &rdev->flags))
 479			set_bit(R1BIO_WriteError, &r1_bio->state);
 480		else {
 481			/* Fail the request */
 482			set_bit(R1BIO_Degraded, &r1_bio->state);
 483			/* Finished with this branch */
 484			r1_bio->bios[mirror] = NULL;
 485			to_put = bio;
 486		}
 487	} else {
 488		/*
 489		 * Set R1BIO_Uptodate in our master bio, so that we
 490		 * will return a good error code for to the higher
 491		 * levels even if IO on some other mirrored buffer
 492		 * fails.
 493		 *
 494		 * The 'master' represents the composite IO operation
 495		 * to user-side. So if something waits for IO, then it
 496		 * will wait for the 'master' bio.
 497		 */
 498		sector_t first_bad;
 499		int bad_sectors;
 500
 501		r1_bio->bios[mirror] = NULL;
 502		to_put = bio;
 503		/*
 504		 * Do not set R1BIO_Uptodate if the current device is
 505		 * rebuilding or Faulty. This is because we cannot use
 506		 * such device for properly reading the data back (we could
 507		 * potentially use it, if the current write would have felt
 508		 * before rdev->recovery_offset, but for simplicity we don't
 509		 * check this here.
 510		 */
 511		if (test_bit(In_sync, &rdev->flags) &&
 512		    !test_bit(Faulty, &rdev->flags))
 513			set_bit(R1BIO_Uptodate, &r1_bio->state);
 514
 515		/* Maybe we can clear some bad blocks. */
 516		if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
 517				&first_bad, &bad_sectors) && !discard_error) {
 
 518			r1_bio->bios[mirror] = IO_MADE_GOOD;
 519			set_bit(R1BIO_MadeGood, &r1_bio->state);
 520		}
 521	}
 522
 
 
 523	if (behind) {
 524		if (test_bit(CollisionCheck, &rdev->flags))
 525			remove_serial(rdev, lo, hi);
 526		if (test_bit(WriteMostly, &rdev->flags))
 527			atomic_dec(&r1_bio->behind_remaining);
 528
 529		/*
 530		 * In behind mode, we ACK the master bio once the I/O
 531		 * has safely reached all non-writemostly
 532		 * disks. Setting the Returned bit ensures that this
 533		 * gets done only once -- we don't ever want to return
 534		 * -EIO here, instead we'll wait
 535		 */
 536		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
 537		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
 538			/* Maybe we can return now */
 539			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
 540				struct bio *mbio = r1_bio->master_bio;
 541				pr_debug("raid1: behind end write sectors"
 542					 " %llu-%llu\n",
 543					 (unsigned long long) mbio->bi_iter.bi_sector,
 544					 (unsigned long long) bio_end_sector(mbio) - 1);
 545				call_bio_endio(r1_bio);
 546			}
 547		}
 548	} else if (rdev->mddev->serialize_policy)
 549		remove_serial(rdev, lo, hi);
 550	if (r1_bio->bios[mirror] == NULL)
 551		rdev_dec_pending(rdev, conf->mddev);
 
 552
 553	/*
 554	 * Let's see if all mirrored write operations have finished
 555	 * already.
 556	 */
 557	r1_bio_write_done(r1_bio);
 558
 559	if (to_put)
 560		bio_put(to_put);
 561}
 562
 563static sector_t align_to_barrier_unit_end(sector_t start_sector,
 564					  sector_t sectors)
 565{
 566	sector_t len;
 567
 568	WARN_ON(sectors == 0);
 569	/*
 570	 * len is the number of sectors from start_sector to end of the
 571	 * barrier unit which start_sector belongs to.
 572	 */
 573	len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
 574	      start_sector;
 575
 576	if (len > sectors)
 577		len = sectors;
 578
 579	return len;
 580}
 581
 582/*
 583 * This routine returns the disk from which the requested read should
 584 * be done. There is a per-array 'next expected sequential IO' sector
 585 * number - if this matches on the next IO then we use the last disk.
 586 * There is also a per-disk 'last know head position' sector that is
 587 * maintained from IRQ contexts, both the normal and the resync IO
 588 * completion handlers update this position correctly. If there is no
 589 * perfect sequential match then we pick the disk whose head is closest.
 590 *
 591 * If there are 2 mirrors in the same 2 devices, performance degrades
 592 * because position is mirror, not device based.
 593 *
 594 * The rdev for the device selected will have nr_pending incremented.
 595 */
 596static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
 597{
 598	const sector_t this_sector = r1_bio->sector;
 599	int sectors;
 600	int best_good_sectors;
 601	int best_disk, best_dist_disk, best_pending_disk;
 602	int has_nonrot_disk;
 603	int disk;
 604	sector_t best_dist;
 605	unsigned int min_pending;
 606	struct md_rdev *rdev;
 607	int choose_first;
 608	int choose_next_idle;
 609
 610	rcu_read_lock();
 611	/*
 612	 * Check if we can balance. We can balance on the whole
 613	 * device if no resync is going on, or below the resync window.
 614	 * We take the first readable disk when above the resync window.
 615	 */
 616 retry:
 617	sectors = r1_bio->sectors;
 618	best_disk = -1;
 619	best_dist_disk = -1;
 620	best_dist = MaxSector;
 621	best_pending_disk = -1;
 622	min_pending = UINT_MAX;
 623	best_good_sectors = 0;
 624	has_nonrot_disk = 0;
 625	choose_next_idle = 0;
 626	clear_bit(R1BIO_FailFast, &r1_bio->state);
 627
 628	if ((conf->mddev->recovery_cp < this_sector + sectors) ||
 629	    (mddev_is_clustered(conf->mddev) &&
 630	    md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
 631		    this_sector + sectors)))
 632		choose_first = 1;
 633	else
 
 634		choose_first = 0;
 
 
 635
 636	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
 637		sector_t dist;
 638		sector_t first_bad;
 639		int bad_sectors;
 640		unsigned int pending;
 641		bool nonrot;
 
 
 642
 643		rdev = rcu_dereference(conf->mirrors[disk].rdev);
 644		if (r1_bio->bios[disk] == IO_BLOCKED
 645		    || rdev == NULL
 646		    || test_bit(Faulty, &rdev->flags))
 647			continue;
 648		if (!test_bit(In_sync, &rdev->flags) &&
 649		    rdev->recovery_offset < this_sector + sectors)
 650			continue;
 651		if (test_bit(WriteMostly, &rdev->flags)) {
 652			/* Don't balance among write-mostly, just
 653			 * use the first as a last resort */
 654			if (best_dist_disk < 0) {
 655				if (is_badblock(rdev, this_sector, sectors,
 656						&first_bad, &bad_sectors)) {
 657					if (first_bad <= this_sector)
 658						/* Cannot use this */
 659						continue;
 660					best_good_sectors = first_bad - this_sector;
 661				} else
 662					best_good_sectors = sectors;
 663				best_dist_disk = disk;
 664				best_pending_disk = disk;
 665			}
 666			continue;
 667		}
 668		/* This is a reasonable device to use.  It might
 669		 * even be best.
 670		 */
 671		if (is_badblock(rdev, this_sector, sectors,
 672				&first_bad, &bad_sectors)) {
 673			if (best_dist < MaxSector)
 674				/* already have a better device */
 675				continue;
 676			if (first_bad <= this_sector) {
 677				/* cannot read here. If this is the 'primary'
 678				 * device, then we must not read beyond
 679				 * bad_sectors from another device..
 680				 */
 681				bad_sectors -= (this_sector - first_bad);
 682				if (choose_first && sectors > bad_sectors)
 683					sectors = bad_sectors;
 684				if (best_good_sectors > sectors)
 685					best_good_sectors = sectors;
 686
 687			} else {
 688				sector_t good_sectors = first_bad - this_sector;
 689				if (good_sectors > best_good_sectors) {
 690					best_good_sectors = good_sectors;
 691					best_disk = disk;
 692				}
 693				if (choose_first)
 694					break;
 695			}
 696			continue;
 697		} else {
 698			if ((sectors > best_good_sectors) && (best_disk >= 0))
 699				best_disk = -1;
 700			best_good_sectors = sectors;
 701		}
 702
 703		if (best_disk >= 0)
 704			/* At least two disks to choose from so failfast is OK */
 705			set_bit(R1BIO_FailFast, &r1_bio->state);
 706
 707		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
 708		has_nonrot_disk |= nonrot;
 709		pending = atomic_read(&rdev->nr_pending);
 710		dist = abs(this_sector - conf->mirrors[disk].head_position);
 711		if (choose_first) {
 712			best_disk = disk;
 713			break;
 714		}
 715		/* Don't change to another disk for sequential reads */
 716		if (conf->mirrors[disk].next_seq_sect == this_sector
 717		    || dist == 0) {
 718			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
 719			struct raid1_info *mirror = &conf->mirrors[disk];
 720
 721			best_disk = disk;
 722			/*
 723			 * If buffered sequential IO size exceeds optimal
 724			 * iosize, check if there is idle disk. If yes, choose
 725			 * the idle disk. read_balance could already choose an
 726			 * idle disk before noticing it's a sequential IO in
 727			 * this disk. This doesn't matter because this disk
 728			 * will idle, next time it will be utilized after the
 729			 * first disk has IO size exceeds optimal iosize. In
 730			 * this way, iosize of the first disk will be optimal
 731			 * iosize at least. iosize of the second disk might be
 732			 * small, but not a big deal since when the second disk
 733			 * starts IO, the first disk is likely still busy.
 734			 */
 735			if (nonrot && opt_iosize > 0 &&
 736			    mirror->seq_start != MaxSector &&
 737			    mirror->next_seq_sect > opt_iosize &&
 738			    mirror->next_seq_sect - opt_iosize >=
 739			    mirror->seq_start) {
 740				choose_next_idle = 1;
 741				continue;
 742			}
 743			break;
 744		}
 745
 746		if (choose_next_idle)
 747			continue;
 748
 749		if (min_pending > pending) {
 750			min_pending = pending;
 751			best_pending_disk = disk;
 752		}
 753
 754		if (dist < best_dist) {
 755			best_dist = dist;
 756			best_dist_disk = disk;
 757		}
 758	}
 759
 760	/*
 761	 * If all disks are rotational, choose the closest disk. If any disk is
 762	 * non-rotational, choose the disk with less pending request even the
 763	 * disk is rotational, which might/might not be optimal for raids with
 764	 * mixed ratation/non-rotational disks depending on workload.
 765	 */
 766	if (best_disk == -1) {
 767		if (has_nonrot_disk || min_pending == 0)
 768			best_disk = best_pending_disk;
 769		else
 770			best_disk = best_dist_disk;
 771	}
 772
 773	if (best_disk >= 0) {
 774		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
 775		if (!rdev)
 776			goto retry;
 777		atomic_inc(&rdev->nr_pending);
 
 
 
 
 
 
 
 778		sectors = best_good_sectors;
 779
 780		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
 781			conf->mirrors[best_disk].seq_start = this_sector;
 782
 783		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
 784	}
 785	rcu_read_unlock();
 786	*max_sectors = sectors;
 787
 788	return best_disk;
 789}
 790
 791static void flush_bio_list(struct r1conf *conf, struct bio *bio)
 792{
 793	/* flush any pending bitmap writes to disk before proceeding w/ I/O */
 794	md_bitmap_unplug(conf->mddev->bitmap);
 795	wake_up(&conf->wait_barrier);
 
 
 
 
 
 
 
 796
 797	while (bio) { /* submit pending writes */
 798		struct bio *next = bio->bi_next;
 799		struct md_rdev *rdev = (void *)bio->bi_bdev;
 800		bio->bi_next = NULL;
 801		bio_set_dev(bio, rdev->bdev);
 802		if (test_bit(Faulty, &rdev->flags)) {
 803			bio_io_error(bio);
 804		} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
 805				    !blk_queue_discard(bio->bi_bdev->bd_disk->queue)))
 806			/* Just ignore it */
 807			bio_endio(bio);
 808		else
 809			submit_bio_noacct(bio);
 810		bio = next;
 811		cond_resched();
 812	}
 
 
 813}
 
 814
 815static void flush_pending_writes(struct r1conf *conf)
 
 
 
 
 
 
 
 
 816{
 817	/* Any writes that have been queued but are awaiting
 818	 * bitmap updates get flushed here.
 819	 */
 820	spin_lock_irq(&conf->device_lock);
 821
 822	if (conf->pending_bio_list.head) {
 823		struct blk_plug plug;
 824		struct bio *bio;
 825
 826		bio = bio_list_get(&conf->pending_bio_list);
 827		conf->pending_count = 0;
 828		spin_unlock_irq(&conf->device_lock);
 829
 830		/*
 831		 * As this is called in a wait_event() loop (see freeze_array),
 832		 * current->state might be TASK_UNINTERRUPTIBLE which will
 833		 * cause a warning when we prepare to wait again.  As it is
 834		 * rare that this path is taken, it is perfectly safe to force
 835		 * us to go around the wait_event() loop again, so the warning
 836		 * is a false-positive.  Silence the warning by resetting
 837		 * thread state
 838		 */
 839		__set_current_state(TASK_RUNNING);
 840		blk_start_plug(&plug);
 841		flush_bio_list(conf, bio);
 842		blk_finish_plug(&plug);
 843	} else
 844		spin_unlock_irq(&conf->device_lock);
 845}
 846
 847/* Barriers....
 848 * Sometimes we need to suspend IO while we do something else,
 849 * either some resync/recovery, or reconfigure the array.
 850 * To do this we raise a 'barrier'.
 851 * The 'barrier' is a counter that can be raised multiple times
 852 * to count how many activities are happening which preclude
 853 * normal IO.
 854 * We can only raise the barrier if there is no pending IO.
 855 * i.e. if nr_pending == 0.
 856 * We choose only to raise the barrier if no-one is waiting for the
 857 * barrier to go down.  This means that as soon as an IO request
 858 * is ready, no other operations which require a barrier will start
 859 * until the IO request has had a chance.
 860 *
 861 * So: regular IO calls 'wait_barrier'.  When that returns there
 862 *    is no backgroup IO happening,  It must arrange to call
 863 *    allow_barrier when it has finished its IO.
 864 * backgroup IO calls must call raise_barrier.  Once that returns
 865 *    there is no normal IO happeing.  It must arrange to call
 866 *    lower_barrier when the particular background IO completes.
 867 *
 868 * If resync/recovery is interrupted, returns -EINTR;
 869 * Otherwise, returns 0.
 870 */
 871static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
 
 
 872{
 873	int idx = sector_to_idx(sector_nr);
 874
 875	spin_lock_irq(&conf->resync_lock);
 876
 877	/* Wait until no block IO is waiting */
 878	wait_event_lock_irq(conf->wait_barrier,
 879			    !atomic_read(&conf->nr_waiting[idx]),
 880			    conf->resync_lock);
 881
 882	/* block any new IO from starting */
 883	atomic_inc(&conf->barrier[idx]);
 884	/*
 885	 * In raise_barrier() we firstly increase conf->barrier[idx] then
 886	 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
 887	 * increase conf->nr_pending[idx] then check conf->barrier[idx].
 888	 * A memory barrier here to make sure conf->nr_pending[idx] won't
 889	 * be fetched before conf->barrier[idx] is increased. Otherwise
 890	 * there will be a race between raise_barrier() and _wait_barrier().
 891	 */
 892	smp_mb__after_atomic();
 893
 894	/* For these conditions we must wait:
 895	 * A: while the array is in frozen state
 896	 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
 897	 *    existing in corresponding I/O barrier bucket.
 898	 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
 899	 *    max resync count which allowed on current I/O barrier bucket.
 900	 */
 901	wait_event_lock_irq(conf->wait_barrier,
 902			    (!conf->array_frozen &&
 903			     !atomic_read(&conf->nr_pending[idx]) &&
 904			     atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
 905				test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
 906			    conf->resync_lock);
 907
 908	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
 909		atomic_dec(&conf->barrier[idx]);
 910		spin_unlock_irq(&conf->resync_lock);
 911		wake_up(&conf->wait_barrier);
 912		return -EINTR;
 913	}
 914
 915	atomic_inc(&conf->nr_sync_pending);
 916	spin_unlock_irq(&conf->resync_lock);
 917
 918	return 0;
 919}
 920
 921static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
 922{
 923	int idx = sector_to_idx(sector_nr);
 924
 925	BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
 926
 927	atomic_dec(&conf->barrier[idx]);
 928	atomic_dec(&conf->nr_sync_pending);
 929	wake_up(&conf->wait_barrier);
 930}
 931
 932static void _wait_barrier(struct r1conf *conf, int idx)
 933{
 934	/*
 935	 * We need to increase conf->nr_pending[idx] very early here,
 936	 * then raise_barrier() can be blocked when it waits for
 937	 * conf->nr_pending[idx] to be 0. Then we can avoid holding
 938	 * conf->resync_lock when there is no barrier raised in same
 939	 * barrier unit bucket. Also if the array is frozen, I/O
 940	 * should be blocked until array is unfrozen.
 941	 */
 942	atomic_inc(&conf->nr_pending[idx]);
 943	/*
 944	 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
 945	 * check conf->barrier[idx]. In raise_barrier() we firstly increase
 946	 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
 947	 * barrier is necessary here to make sure conf->barrier[idx] won't be
 948	 * fetched before conf->nr_pending[idx] is increased. Otherwise there
 949	 * will be a race between _wait_barrier() and raise_barrier().
 950	 */
 951	smp_mb__after_atomic();
 952
 953	/*
 954	 * Don't worry about checking two atomic_t variables at same time
 955	 * here. If during we check conf->barrier[idx], the array is
 956	 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
 957	 * 0, it is safe to return and make the I/O continue. Because the
 958	 * array is frozen, all I/O returned here will eventually complete
 959	 * or be queued, no race will happen. See code comment in
 960	 * frozen_array().
 961	 */
 962	if (!READ_ONCE(conf->array_frozen) &&
 963	    !atomic_read(&conf->barrier[idx]))
 964		return;
 965
 966	/*
 967	 * After holding conf->resync_lock, conf->nr_pending[idx]
 968	 * should be decreased before waiting for barrier to drop.
 969	 * Otherwise, we may encounter a race condition because
 970	 * raise_barrer() might be waiting for conf->nr_pending[idx]
 971	 * to be 0 at same time.
 972	 */
 973	spin_lock_irq(&conf->resync_lock);
 974	atomic_inc(&conf->nr_waiting[idx]);
 975	atomic_dec(&conf->nr_pending[idx]);
 976	/*
 977	 * In case freeze_array() is waiting for
 978	 * get_unqueued_pending() == extra
 979	 */
 980	wake_up(&conf->wait_barrier);
 981	/* Wait for the barrier in same barrier unit bucket to drop. */
 982	wait_event_lock_irq(conf->wait_barrier,
 983			    !conf->array_frozen &&
 984			     !atomic_read(&conf->barrier[idx]),
 985			    conf->resync_lock);
 986	atomic_inc(&conf->nr_pending[idx]);
 987	atomic_dec(&conf->nr_waiting[idx]);
 988	spin_unlock_irq(&conf->resync_lock);
 989}
 990
 991static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
 992{
 993	int idx = sector_to_idx(sector_nr);
 994
 995	/*
 996	 * Very similar to _wait_barrier(). The difference is, for read
 997	 * I/O we don't need wait for sync I/O, but if the whole array
 998	 * is frozen, the read I/O still has to wait until the array is
 999	 * unfrozen. Since there is no ordering requirement with
1000	 * conf->barrier[idx] here, memory barrier is unnecessary as well.
1001	 */
1002	atomic_inc(&conf->nr_pending[idx]);
1003
1004	if (!READ_ONCE(conf->array_frozen))
1005		return;
1006
1007	spin_lock_irq(&conf->resync_lock);
1008	atomic_inc(&conf->nr_waiting[idx]);
1009	atomic_dec(&conf->nr_pending[idx]);
1010	/*
1011	 * In case freeze_array() is waiting for
1012	 * get_unqueued_pending() == extra
1013	 */
1014	wake_up(&conf->wait_barrier);
1015	/* Wait for array to be unfrozen */
1016	wait_event_lock_irq(conf->wait_barrier,
1017			    !conf->array_frozen,
1018			    conf->resync_lock);
1019	atomic_inc(&conf->nr_pending[idx]);
1020	atomic_dec(&conf->nr_waiting[idx]);
1021	spin_unlock_irq(&conf->resync_lock);
1022}
1023
1024static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1025{
1026	int idx = sector_to_idx(sector_nr);
1027
1028	_wait_barrier(conf, idx);
1029}
1030
1031static void _allow_barrier(struct r1conf *conf, int idx)
1032{
1033	atomic_dec(&conf->nr_pending[idx]);
1034	wake_up(&conf->wait_barrier);
1035}
1036
1037static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1038{
1039	int idx = sector_to_idx(sector_nr);
1040
1041	_allow_barrier(conf, idx);
1042}
1043
1044/* conf->resync_lock should be held */
1045static int get_unqueued_pending(struct r1conf *conf)
1046{
1047	int idx, ret;
1048
1049	ret = atomic_read(&conf->nr_sync_pending);
1050	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1051		ret += atomic_read(&conf->nr_pending[idx]) -
1052			atomic_read(&conf->nr_queued[idx]);
1053
1054	return ret;
1055}
1056
1057static void freeze_array(struct r1conf *conf, int extra)
1058{
1059	/* Stop sync I/O and normal I/O and wait for everything to
1060	 * go quiet.
1061	 * This is called in two situations:
1062	 * 1) management command handlers (reshape, remove disk, quiesce).
1063	 * 2) one normal I/O request failed.
1064
1065	 * After array_frozen is set to 1, new sync IO will be blocked at
1066	 * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1067	 * or wait_read_barrier(). The flying I/Os will either complete or be
1068	 * queued. When everything goes quite, there are only queued I/Os left.
1069
1070	 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1071	 * barrier bucket index which this I/O request hits. When all sync and
1072	 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1073	 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1074	 * in handle_read_error(), we may call freeze_array() before trying to
1075	 * fix the read error. In this case, the error read I/O is not queued,
1076	 * so get_unqueued_pending() == 1.
1077	 *
1078	 * Therefore before this function returns, we need to wait until
1079	 * get_unqueued_pendings(conf) gets equal to extra. For
1080	 * normal I/O context, extra is 1, in rested situations extra is 0.
1081	 */
1082	spin_lock_irq(&conf->resync_lock);
1083	conf->array_frozen = 1;
1084	raid1_log(conf->mddev, "wait freeze");
1085	wait_event_lock_irq_cmd(
1086		conf->wait_barrier,
1087		get_unqueued_pending(conf) == extra,
1088		conf->resync_lock,
1089		flush_pending_writes(conf));
1090	spin_unlock_irq(&conf->resync_lock);
1091}
1092static void unfreeze_array(struct r1conf *conf)
1093{
1094	/* reverse the effect of the freeze */
1095	spin_lock_irq(&conf->resync_lock);
1096	conf->array_frozen = 0;
 
 
1097	spin_unlock_irq(&conf->resync_lock);
1098	wake_up(&conf->wait_barrier);
1099}
1100
1101static void alloc_behind_master_bio(struct r1bio *r1_bio,
1102					   struct bio *bio)
 
 
1103{
1104	int size = bio->bi_iter.bi_size;
1105	unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1106	int i = 0;
1107	struct bio *behind_bio = NULL;
1108
1109	behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set);
1110	if (!behind_bio)
1111		return;
1112
1113	/* discard op, we don't support writezero/writesame yet */
1114	if (!bio_has_data(bio)) {
1115		behind_bio->bi_iter.bi_size = size;
1116		goto skip_copy;
1117	}
1118
1119	behind_bio->bi_write_hint = bio->bi_write_hint;
1120
1121	while (i < vcnt && size) {
1122		struct page *page;
1123		int len = min_t(int, PAGE_SIZE, size);
1124
1125		page = alloc_page(GFP_NOIO);
1126		if (unlikely(!page))
1127			goto free_pages;
1128
1129		bio_add_page(behind_bio, page, len, 0);
1130
1131		size -= len;
1132		i++;
1133	}
1134
1135	bio_copy_data(behind_bio, bio);
1136skip_copy:
1137	r1_bio->behind_master_bio = behind_bio;
1138	set_bit(R1BIO_BehindIO, &r1_bio->state);
1139
1140	return;
1141
1142free_pages:
1143	pr_debug("%dB behind alloc failed, doing sync I/O\n",
1144		 bio->bi_iter.bi_size);
1145	bio_free_pages(behind_bio);
1146	bio_put(behind_bio);
1147}
1148
1149struct raid1_plug_cb {
1150	struct blk_plug_cb	cb;
1151	struct bio_list		pending;
1152	int			pending_cnt;
1153};
1154
1155static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1156{
1157	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1158						  cb);
1159	struct mddev *mddev = plug->cb.data;
1160	struct r1conf *conf = mddev->private;
1161	struct bio *bio;
1162
1163	if (from_schedule || current->bio_list) {
1164		spin_lock_irq(&conf->device_lock);
1165		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1166		conf->pending_count += plug->pending_cnt;
1167		spin_unlock_irq(&conf->device_lock);
1168		wake_up(&conf->wait_barrier);
1169		md_wakeup_thread(mddev->thread);
1170		kfree(plug);
1171		return;
1172	}
1173
1174	/* we aren't scheduling, so we can do the write-out directly. */
1175	bio = bio_list_get(&plug->pending);
1176	flush_bio_list(conf, bio);
1177	kfree(plug);
1178}
1179
1180static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1181{
1182	r1_bio->master_bio = bio;
1183	r1_bio->sectors = bio_sectors(bio);
1184	r1_bio->state = 0;
1185	r1_bio->mddev = mddev;
1186	r1_bio->sector = bio->bi_iter.bi_sector;
1187}
1188
1189static inline struct r1bio *
1190alloc_r1bio(struct mddev *mddev, struct bio *bio)
1191{
1192	struct r1conf *conf = mddev->private;
1193	struct r1bio *r1_bio;
1194
1195	r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1196	/* Ensure no bio records IO_BLOCKED */
1197	memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1198	init_r1bio(r1_bio, mddev, bio);
1199	return r1_bio;
1200}
1201
1202static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1203			       int max_read_sectors, struct r1bio *r1_bio)
1204{
1205	struct r1conf *conf = mddev->private;
1206	struct raid1_info *mirror;
1207	struct bio *read_bio;
1208	struct bitmap *bitmap = mddev->bitmap;
1209	const int op = bio_op(bio);
1210	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
 
 
 
 
 
 
 
1211	int max_sectors;
1212	int rdisk;
1213	bool r1bio_existed = !!r1_bio;
1214	char b[BDEVNAME_SIZE];
1215
1216	/*
1217	 * If r1_bio is set, we are blocking the raid1d thread
1218	 * so there is a tiny risk of deadlock.  So ask for
1219	 * emergency memory if needed.
1220	 */
1221	gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1222
1223	if (r1bio_existed) {
1224		/* Need to get the block device name carefully */
1225		struct md_rdev *rdev;
1226		rcu_read_lock();
1227		rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1228		if (rdev)
1229			bdevname(rdev->bdev, b);
1230		else
1231			strcpy(b, "???");
1232		rcu_read_unlock();
 
 
 
 
 
 
 
 
 
1233	}
1234
1235	/*
1236	 * Still need barrier for READ in case that whole
1237	 * array is frozen.
1238	 */
1239	wait_read_barrier(conf, bio->bi_iter.bi_sector);
1240
1241	if (!r1_bio)
1242		r1_bio = alloc_r1bio(mddev, bio);
1243	else
1244		init_r1bio(r1_bio, mddev, bio);
1245	r1_bio->sectors = max_read_sectors;
1246
1247	/*
1248	 * make_request() can abort the operation when read-ahead is being
1249	 * used and no empty request is available.
 
1250	 */
1251	rdisk = read_balance(conf, r1_bio, &max_sectors);
1252
1253	if (rdisk < 0) {
1254		/* couldn't find anywhere to read from */
1255		if (r1bio_existed) {
1256			pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1257					    mdname(mddev),
1258					    b,
1259					    (unsigned long long)r1_bio->sector);
1260		}
1261		raid_end_bio_io(r1_bio);
1262		return;
1263	}
1264	mirror = conf->mirrors + rdisk;
1265
1266	if (r1bio_existed)
1267		pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1268				    mdname(mddev),
1269				    (unsigned long long)r1_bio->sector,
1270				    bdevname(mirror->rdev->bdev, b));
 
 
 
 
1271
1272	if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1273	    bitmap) {
1274		/*
1275		 * Reading from a write-mostly device must take care not to
1276		 * over-take any writes that are 'behind'
1277		 */
1278		raid1_log(mddev, "wait behind writes");
1279		wait_event(bitmap->behind_wait,
1280			   atomic_read(&bitmap->behind_writes) == 0);
1281	}
1282
1283	if (max_sectors < bio_sectors(bio)) {
1284		struct bio *split = bio_split(bio, max_sectors,
1285					      gfp, &conf->bio_split);
1286		bio_chain(split, bio);
1287		submit_bio_noacct(bio);
1288		bio = split;
1289		r1_bio->master_bio = bio;
1290		r1_bio->sectors = max_sectors;
1291	}
1292
1293	r1_bio->read_disk = rdisk;
 
 
 
 
 
1294
1295	if (!r1bio_existed && blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1296		r1_bio->start_time = bio_start_io_acct(bio);
 
 
 
 
 
 
 
 
1297
1298	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1299
1300	r1_bio->bios[rdisk] = read_bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1301
1302	read_bio->bi_iter.bi_sector = r1_bio->sector +
1303		mirror->rdev->data_offset;
1304	bio_set_dev(read_bio, mirror->rdev->bdev);
1305	read_bio->bi_end_io = raid1_end_read_request;
1306	bio_set_op_attrs(read_bio, op, do_sync);
1307	if (test_bit(FailFast, &mirror->rdev->flags) &&
1308	    test_bit(R1BIO_FailFast, &r1_bio->state))
1309	        read_bio->bi_opf |= MD_FAILFAST;
1310	read_bio->bi_private = r1_bio;
1311
1312	if (mddev->gendisk)
1313	        trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
1314				      r1_bio->sector);
1315
1316	submit_bio_noacct(read_bio);
1317}
1318
1319static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1320				int max_write_sectors)
1321{
1322	struct r1conf *conf = mddev->private;
1323	struct r1bio *r1_bio;
1324	int i, disks;
1325	struct bitmap *bitmap = mddev->bitmap;
1326	unsigned long flags;
1327	struct md_rdev *blocked_rdev;
1328	struct blk_plug_cb *cb;
1329	struct raid1_plug_cb *plug = NULL;
1330	int first_clone;
1331	int max_sectors;
1332	bool write_behind = false;
1333
1334	if (mddev_is_clustered(mddev) &&
1335	     md_cluster_ops->area_resyncing(mddev, WRITE,
1336		     bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1337
1338		DEFINE_WAIT(w);
1339		for (;;) {
1340			prepare_to_wait(&conf->wait_barrier,
1341					&w, TASK_IDLE);
1342			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1343							bio->bi_iter.bi_sector,
1344							bio_end_sector(bio)))
1345				break;
1346			schedule();
1347		}
1348		finish_wait(&conf->wait_barrier, &w);
1349	}
1350
1351	/*
1352	 * Register the new request and wait if the reconstruction
1353	 * thread has put up a bar for new requests.
1354	 * Continue immediately if no resync is active currently.
1355	 */
1356	wait_barrier(conf, bio->bi_iter.bi_sector);
1357
1358	r1_bio = alloc_r1bio(mddev, bio);
1359	r1_bio->sectors = max_write_sectors;
1360
1361	if (conf->pending_count >= max_queued_requests) {
1362		md_wakeup_thread(mddev->thread);
1363		raid1_log(mddev, "wait queued");
1364		wait_event(conf->wait_barrier,
1365			   conf->pending_count < max_queued_requests);
1366	}
1367	/* first select target devices under rcu_lock and
1368	 * inc refcount on their rdev.  Record them by setting
1369	 * bios[x] to bio
1370	 * If there are known/acknowledged bad blocks on any device on
1371	 * which we have seen a write error, we want to avoid writing those
1372	 * blocks.
1373	 * This potentially requires several writes to write around
1374	 * the bad blocks.  Each set of writes gets it's own r1bio
1375	 * with a set of bios attached.
1376	 */
 
1377
1378	disks = conf->raid_disks * 2;
1379 retry_write:
1380	blocked_rdev = NULL;
1381	rcu_read_lock();
1382	max_sectors = r1_bio->sectors;
1383	for (i = 0;  i < disks; i++) {
1384		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1385
1386		/*
1387		 * The write-behind io is only attempted on drives marked as
1388		 * write-mostly, which means we could allocate write behind
1389		 * bio later.
1390		 */
1391		if (rdev && test_bit(WriteMostly, &rdev->flags))
1392			write_behind = true;
1393
1394		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1395			atomic_inc(&rdev->nr_pending);
1396			blocked_rdev = rdev;
1397			break;
1398		}
1399		r1_bio->bios[i] = NULL;
1400		if (!rdev || test_bit(Faulty, &rdev->flags)) {
1401			if (i < conf->raid_disks)
1402				set_bit(R1BIO_Degraded, &r1_bio->state);
1403			continue;
1404		}
1405
1406		atomic_inc(&rdev->nr_pending);
1407		if (test_bit(WriteErrorSeen, &rdev->flags)) {
1408			sector_t first_bad;
1409			int bad_sectors;
1410			int is_bad;
1411
1412			is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
 
1413					     &first_bad, &bad_sectors);
1414			if (is_bad < 0) {
1415				/* mustn't write here until the bad block is
1416				 * acknowledged*/
1417				set_bit(BlockedBadBlocks, &rdev->flags);
1418				blocked_rdev = rdev;
1419				break;
1420			}
1421			if (is_bad && first_bad <= r1_bio->sector) {
1422				/* Cannot write here at all */
1423				bad_sectors -= (r1_bio->sector - first_bad);
1424				if (bad_sectors < max_sectors)
1425					/* mustn't write more than bad_sectors
1426					 * to other devices yet
1427					 */
1428					max_sectors = bad_sectors;
1429				rdev_dec_pending(rdev, mddev);
1430				/* We don't set R1BIO_Degraded as that
1431				 * only applies if the disk is
1432				 * missing, so it might be re-added,
1433				 * and we want to know to recover this
1434				 * chunk.
1435				 * In this case the device is here,
1436				 * and the fact that this chunk is not
1437				 * in-sync is recorded in the bad
1438				 * block log
1439				 */
1440				continue;
1441			}
1442			if (is_bad) {
1443				int good_sectors = first_bad - r1_bio->sector;
1444				if (good_sectors < max_sectors)
1445					max_sectors = good_sectors;
1446			}
1447		}
1448		r1_bio->bios[i] = bio;
1449	}
1450	rcu_read_unlock();
1451
1452	if (unlikely(blocked_rdev)) {
1453		/* Wait for this device to become unblocked */
1454		int j;
1455
1456		for (j = 0; j < i; j++)
1457			if (r1_bio->bios[j])
1458				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1459		r1_bio->state = 0;
1460		allow_barrier(conf, bio->bi_iter.bi_sector);
1461		raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1462		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1463		wait_barrier(conf, bio->bi_iter.bi_sector);
1464		goto retry_write;
1465	}
1466
1467	/*
1468	 * When using a bitmap, we may call alloc_behind_master_bio below.
1469	 * alloc_behind_master_bio allocates a copy of the data payload a page
1470	 * at a time and thus needs a new bio that can fit the whole payload
1471	 * this bio in page sized chunks.
1472	 */
1473	if (write_behind && bitmap)
1474		max_sectors = min_t(int, max_sectors,
1475				    BIO_MAX_VECS * (PAGE_SIZE >> 9));
1476	if (max_sectors < bio_sectors(bio)) {
1477		struct bio *split = bio_split(bio, max_sectors,
1478					      GFP_NOIO, &conf->bio_split);
1479		bio_chain(split, bio);
1480		submit_bio_noacct(bio);
1481		bio = split;
1482		r1_bio->master_bio = bio;
1483		r1_bio->sectors = max_sectors;
 
 
 
 
 
 
1484	}
 
1485
1486	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1487		r1_bio->start_time = bio_start_io_acct(bio);
1488	atomic_set(&r1_bio->remaining, 1);
1489	atomic_set(&r1_bio->behind_remaining, 0);
1490
1491	first_clone = 1;
1492
1493	for (i = 0; i < disks; i++) {
1494		struct bio *mbio = NULL;
1495		struct md_rdev *rdev = conf->mirrors[i].rdev;
1496		if (!r1_bio->bios[i])
1497			continue;
1498
 
 
 
1499		if (first_clone) {
1500			/* do behind I/O ?
1501			 * Not if there are too many, or cannot
1502			 * allocate memory, or a reader on WriteMostly
1503			 * is waiting for behind writes to flush */
1504			if (bitmap &&
1505			    (atomic_read(&bitmap->behind_writes)
1506			     < mddev->bitmap_info.max_write_behind) &&
1507			    !waitqueue_active(&bitmap->behind_wait)) {
1508				alloc_behind_master_bio(r1_bio, bio);
1509			}
1510
1511			md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1512					     test_bit(R1BIO_BehindIO, &r1_bio->state));
 
 
1513			first_clone = 0;
1514		}
1515
1516		if (r1_bio->behind_master_bio)
1517			mbio = bio_clone_fast(r1_bio->behind_master_bio,
1518					      GFP_NOIO, &mddev->bio_set);
1519		else
1520			mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1521
1522		if (r1_bio->behind_master_bio) {
1523			if (test_bit(CollisionCheck, &rdev->flags))
1524				wait_for_serialization(rdev, r1_bio);
1525			if (test_bit(WriteMostly, &rdev->flags))
 
 
 
1526				atomic_inc(&r1_bio->behind_remaining);
1527		} else if (mddev->serialize_policy)
1528			wait_for_serialization(rdev, r1_bio);
1529
1530		r1_bio->bios[i] = mbio;
1531
1532		mbio->bi_iter.bi_sector	= (r1_bio->sector +
1533				   conf->mirrors[i].rdev->data_offset);
1534		bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1535		mbio->bi_end_io	= raid1_end_write_request;
1536		mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1537		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1538		    !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1539		    conf->raid_disks - mddev->degraded > 1)
1540			mbio->bi_opf |= MD_FAILFAST;
1541		mbio->bi_private = r1_bio;
1542
1543		atomic_inc(&r1_bio->remaining);
1544
1545		if (mddev->gendisk)
1546			trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
1547					      r1_bio->sector);
1548		/* flush_pending_writes() needs access to the rdev so...*/
1549		mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
1550
1551		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1552		if (cb)
1553			plug = container_of(cb, struct raid1_plug_cb, cb);
1554		else
1555			plug = NULL;
1556		if (plug) {
1557			bio_list_add(&plug->pending, mbio);
1558			plug->pending_cnt++;
1559		} else {
1560			spin_lock_irqsave(&conf->device_lock, flags);
1561			bio_list_add(&conf->pending_bio_list, mbio);
1562			conf->pending_count++;
1563			spin_unlock_irqrestore(&conf->device_lock, flags);
1564			md_wakeup_thread(mddev->thread);
1565		}
1566	}
1567
1568	r1_bio_write_done(r1_bio);
1569
1570	/* In case raid1d snuck in to freeze_array */
1571	wake_up(&conf->wait_barrier);
1572}
1573
1574static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1575{
1576	sector_t sectors;
1577
1578	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1579	    && md_flush_request(mddev, bio))
1580		return true;
1581
1582	/*
1583	 * There is a limit to the maximum size, but
1584	 * the read/write handler might find a lower limit
1585	 * due to bad blocks.  To avoid multiple splits,
1586	 * we pass the maximum number of sectors down
1587	 * and let the lower level perform the split.
1588	 */
1589	sectors = align_to_barrier_unit_end(
1590		bio->bi_iter.bi_sector, bio_sectors(bio));
1591
1592	if (bio_data_dir(bio) == READ)
1593		raid1_read_request(mddev, bio, sectors, NULL);
1594	else {
1595		if (!md_write_start(mddev,bio))
1596			return false;
1597		raid1_write_request(mddev, bio, sectors);
1598	}
1599	return true;
1600}
1601
1602static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1603{
1604	struct r1conf *conf = mddev->private;
1605	int i;
1606
1607	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1608		   conf->raid_disks - mddev->degraded);
1609	rcu_read_lock();
1610	for (i = 0; i < conf->raid_disks; i++) {
1611		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1612		seq_printf(seq, "%s",
1613			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1614	}
1615	rcu_read_unlock();
1616	seq_printf(seq, "]");
1617}
1618
1619static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
 
1620{
1621	char b[BDEVNAME_SIZE];
1622	struct r1conf *conf = mddev->private;
1623	unsigned long flags;
1624
1625	/*
1626	 * If it is not operational, then we have already marked it as dead
1627	 * else if it is the last working disks with "fail_last_dev == false",
1628	 * ignore the error, let the next level up know.
1629	 * else mark the drive as failed
1630	 */
1631	spin_lock_irqsave(&conf->device_lock, flags);
1632	if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1633	    && (conf->raid_disks - mddev->degraded) == 1) {
1634		/*
1635		 * Don't fail the drive, act as though we were just a
1636		 * normal single drive.
1637		 * However don't try a recovery from this drive as
1638		 * it is very likely to fail.
1639		 */
1640		conf->recovery_disabled = mddev->recovery_disabled;
1641		spin_unlock_irqrestore(&conf->device_lock, flags);
1642		return;
1643	}
1644	set_bit(Blocked, &rdev->flags);
1645	if (test_and_clear_bit(In_sync, &rdev->flags))
 
 
1646		mddev->degraded++;
1647	set_bit(Faulty, &rdev->flags);
1648	spin_unlock_irqrestore(&conf->device_lock, flags);
1649	/*
1650	 * if recovery is running, make sure it aborts.
1651	 */
1652	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1653	set_mask_bits(&mddev->sb_flags, 0,
1654		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1655	pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1656		"md/raid1:%s: Operation continuing on %d devices.\n",
1657		mdname(mddev), bdevname(rdev->bdev, b),
1658		mdname(mddev), conf->raid_disks - mddev->degraded);
 
 
1659}
1660
1661static void print_conf(struct r1conf *conf)
1662{
1663	int i;
1664
1665	pr_debug("RAID1 conf printout:\n");
1666	if (!conf) {
1667		pr_debug("(!conf)\n");
1668		return;
1669	}
1670	pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1671		 conf->raid_disks);
1672
1673	rcu_read_lock();
1674	for (i = 0; i < conf->raid_disks; i++) {
1675		char b[BDEVNAME_SIZE];
1676		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1677		if (rdev)
1678			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1679				 i, !test_bit(In_sync, &rdev->flags),
1680				 !test_bit(Faulty, &rdev->flags),
1681				 bdevname(rdev->bdev,b));
1682	}
1683	rcu_read_unlock();
1684}
1685
1686static void close_sync(struct r1conf *conf)
1687{
1688	int idx;
1689
1690	for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1691		_wait_barrier(conf, idx);
1692		_allow_barrier(conf, idx);
1693	}
1694
1695	mempool_exit(&conf->r1buf_pool);
 
1696}
1697
1698static int raid1_spare_active(struct mddev *mddev)
1699{
1700	int i;
1701	struct r1conf *conf = mddev->private;
1702	int count = 0;
1703	unsigned long flags;
1704
1705	/*
1706	 * Find all failed disks within the RAID1 configuration
1707	 * and mark them readable.
1708	 * Called under mddev lock, so rcu protection not needed.
1709	 * device_lock used to avoid races with raid1_end_read_request
1710	 * which expects 'In_sync' flags and ->degraded to be consistent.
1711	 */
1712	spin_lock_irqsave(&conf->device_lock, flags);
1713	for (i = 0; i < conf->raid_disks; i++) {
1714		struct md_rdev *rdev = conf->mirrors[i].rdev;
1715		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1716		if (repl
1717		    && !test_bit(Candidate, &repl->flags)
1718		    && repl->recovery_offset == MaxSector
1719		    && !test_bit(Faulty, &repl->flags)
1720		    && !test_and_set_bit(In_sync, &repl->flags)) {
1721			/* replacement has just become active */
1722			if (!rdev ||
1723			    !test_and_clear_bit(In_sync, &rdev->flags))
1724				count++;
1725			if (rdev) {
1726				/* Replaced device not technically
1727				 * faulty, but we need to be sure
1728				 * it gets removed and never re-added
1729				 */
1730				set_bit(Faulty, &rdev->flags);
1731				sysfs_notify_dirent_safe(
1732					rdev->sysfs_state);
1733			}
1734		}
1735		if (rdev
1736		    && rdev->recovery_offset == MaxSector
1737		    && !test_bit(Faulty, &rdev->flags)
1738		    && !test_and_set_bit(In_sync, &rdev->flags)) {
1739			count++;
1740			sysfs_notify_dirent_safe(rdev->sysfs_state);
1741		}
1742	}
 
1743	mddev->degraded -= count;
1744	spin_unlock_irqrestore(&conf->device_lock, flags);
1745
1746	print_conf(conf);
1747	return count;
1748}
1749
1750static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
 
1751{
1752	struct r1conf *conf = mddev->private;
1753	int err = -EEXIST;
1754	int mirror = 0;
1755	struct raid1_info *p;
1756	int first = 0;
1757	int last = conf->raid_disks - 1;
1758
1759	if (mddev->recovery_disabled == conf->recovery_disabled)
1760		return -EBUSY;
1761
1762	if (md_integrity_add_rdev(rdev, mddev))
1763		return -ENXIO;
1764
1765	if (rdev->raid_disk >= 0)
1766		first = last = rdev->raid_disk;
1767
1768	/*
1769	 * find the disk ... but prefer rdev->saved_raid_disk
1770	 * if possible.
1771	 */
1772	if (rdev->saved_raid_disk >= 0 &&
1773	    rdev->saved_raid_disk >= first &&
1774	    rdev->saved_raid_disk < conf->raid_disks &&
1775	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1776		first = last = rdev->saved_raid_disk;
1777
1778	for (mirror = first; mirror <= last; mirror++) {
1779		p = conf->mirrors + mirror;
1780		if (!p->rdev) {
1781			if (mddev->gendisk)
1782				disk_stack_limits(mddev->gendisk, rdev->bdev,
1783						  rdev->data_offset << 9);
1784
1785			p->head_position = 0;
1786			rdev->raid_disk = mirror;
1787			err = 0;
1788			/* As all devices are equivalent, we don't need a full recovery
1789			 * if this was recently any drive of the array
1790			 */
1791			if (rdev->saved_raid_disk < 0)
1792				conf->fullsync = 1;
1793			rcu_assign_pointer(p->rdev, rdev);
1794			break;
1795		}
1796		if (test_bit(WantReplacement, &p->rdev->flags) &&
1797		    p[conf->raid_disks].rdev == NULL) {
1798			/* Add this device as a replacement */
1799			clear_bit(In_sync, &rdev->flags);
1800			set_bit(Replacement, &rdev->flags);
1801			rdev->raid_disk = mirror;
1802			err = 0;
1803			conf->fullsync = 1;
1804			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1805			break;
1806		}
1807	}
1808	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1809		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1810	print_conf(conf);
1811	return err;
1812}
1813
1814static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1815{
1816	struct r1conf *conf = mddev->private;
1817	int err = 0;
1818	int number = rdev->raid_disk;
1819	struct raid1_info *p = conf->mirrors + number;
1820
1821	if (rdev != p->rdev)
1822		p = conf->mirrors + conf->raid_disks + number;
1823
1824	print_conf(conf);
1825	if (rdev == p->rdev) {
 
1826		if (test_bit(In_sync, &rdev->flags) ||
1827		    atomic_read(&rdev->nr_pending)) {
1828			err = -EBUSY;
1829			goto abort;
1830		}
1831		/* Only remove non-faulty devices if recovery
1832		 * is not possible.
1833		 */
1834		if (!test_bit(Faulty, &rdev->flags) &&
1835		    mddev->recovery_disabled != conf->recovery_disabled &&
1836		    mddev->degraded < conf->raid_disks) {
1837			err = -EBUSY;
1838			goto abort;
1839		}
1840		p->rdev = NULL;
1841		if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1842			synchronize_rcu();
1843			if (atomic_read(&rdev->nr_pending)) {
1844				/* lost the race, try later */
1845				err = -EBUSY;
1846				p->rdev = rdev;
1847				goto abort;
1848			}
1849		}
1850		if (conf->mirrors[conf->raid_disks + number].rdev) {
1851			/* We just removed a device that is being replaced.
1852			 * Move down the replacement.  We drain all IO before
1853			 * doing this to avoid confusion.
1854			 */
1855			struct md_rdev *repl =
1856				conf->mirrors[conf->raid_disks + number].rdev;
1857			freeze_array(conf, 0);
1858			if (atomic_read(&repl->nr_pending)) {
1859				/* It means that some queued IO of retry_list
1860				 * hold repl. Thus, we cannot set replacement
1861				 * as NULL, avoiding rdev NULL pointer
1862				 * dereference in sync_request_write and
1863				 * handle_write_finished.
1864				 */
1865				err = -EBUSY;
1866				unfreeze_array(conf);
1867				goto abort;
1868			}
1869			clear_bit(Replacement, &repl->flags);
1870			p->rdev = repl;
1871			conf->mirrors[conf->raid_disks + number].rdev = NULL;
1872			unfreeze_array(conf);
1873		}
1874
1875		clear_bit(WantReplacement, &rdev->flags);
1876		err = md_integrity_register(mddev);
1877	}
1878abort:
1879
1880	print_conf(conf);
1881	return err;
1882}
1883
1884static void end_sync_read(struct bio *bio)
 
1885{
1886	struct r1bio *r1_bio = get_resync_r1bio(bio);
1887
1888	update_head_pos(r1_bio->read_disk, r1_bio);
1889
 
 
 
 
 
1890	/*
1891	 * we have read a block, now it needs to be re-written,
1892	 * or re-read if the read failed.
1893	 * We don't do much here, just schedule handling by raid1d
1894	 */
1895	if (!bio->bi_status)
1896		set_bit(R1BIO_Uptodate, &r1_bio->state);
1897
1898	if (atomic_dec_and_test(&r1_bio->remaining))
1899		reschedule_retry(r1_bio);
1900}
1901
1902static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1903{
1904	sector_t sync_blocks = 0;
1905	sector_t s = r1_bio->sector;
1906	long sectors_to_go = r1_bio->sectors;
1907
1908	/* make sure these bits don't get cleared. */
1909	do {
1910		md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1911		s += sync_blocks;
1912		sectors_to_go -= sync_blocks;
1913	} while (sectors_to_go > 0);
1914}
1915
1916static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1917{
1918	if (atomic_dec_and_test(&r1_bio->remaining)) {
1919		struct mddev *mddev = r1_bio->mddev;
1920		int s = r1_bio->sectors;
1921
1922		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1923		    test_bit(R1BIO_WriteError, &r1_bio->state))
1924			reschedule_retry(r1_bio);
1925		else {
1926			put_buf(r1_bio);
1927			md_done_sync(mddev, s, uptodate);
1928		}
1929	}
1930}
1931
1932static void end_sync_write(struct bio *bio)
1933{
1934	int uptodate = !bio->bi_status;
1935	struct r1bio *r1_bio = get_resync_r1bio(bio);
1936	struct mddev *mddev = r1_bio->mddev;
1937	struct r1conf *conf = mddev->private;
1938	sector_t first_bad;
1939	int bad_sectors;
1940	struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1941
 
 
 
 
 
1942	if (!uptodate) {
1943		abort_sync_write(mddev, r1_bio);
1944		set_bit(WriteErrorSeen, &rdev->flags);
1945		if (!test_and_set_bit(WantReplacement, &rdev->flags))
1946			set_bit(MD_RECOVERY_NEEDED, &
1947				mddev->recovery);
 
 
 
 
 
 
 
1948		set_bit(R1BIO_WriteError, &r1_bio->state);
1949	} else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
 
 
1950			       &first_bad, &bad_sectors) &&
1951		   !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1952				r1_bio->sector,
1953				r1_bio->sectors,
1954				&first_bad, &bad_sectors)
1955		)
1956		set_bit(R1BIO_MadeGood, &r1_bio->state);
1957
1958	put_sync_write_buf(r1_bio, uptodate);
 
 
 
 
 
 
 
 
 
 
 
1959}
1960
1961static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1962			    int sectors, struct page *page, int rw)
1963{
1964	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1965		/* success */
1966		return 1;
1967	if (rw == WRITE) {
1968		set_bit(WriteErrorSeen, &rdev->flags);
1969		if (!test_and_set_bit(WantReplacement,
1970				      &rdev->flags))
1971			set_bit(MD_RECOVERY_NEEDED, &
1972				rdev->mddev->recovery);
1973	}
1974	/* need to record an error - either for the block or the device */
1975	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1976		md_error(rdev->mddev, rdev);
1977	return 0;
1978}
1979
1980static int fix_sync_read_error(struct r1bio *r1_bio)
1981{
1982	/* Try some synchronous reads of other devices to get
1983	 * good data, much like with normal read errors.  Only
1984	 * read into the pages we already have so we don't
1985	 * need to re-issue the read request.
1986	 * We don't need to freeze the array, because being in an
1987	 * active sync request, there is no normal IO, and
1988	 * no overlapping syncs.
1989	 * We don't need to check is_badblock() again as we
1990	 * made sure that anything with a bad block in range
1991	 * will have bi_end_io clear.
1992	 */
1993	struct mddev *mddev = r1_bio->mddev;
1994	struct r1conf *conf = mddev->private;
1995	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1996	struct page **pages = get_resync_pages(bio)->pages;
1997	sector_t sect = r1_bio->sector;
1998	int sectors = r1_bio->sectors;
1999	int idx = 0;
2000	struct md_rdev *rdev;
2001
2002	rdev = conf->mirrors[r1_bio->read_disk].rdev;
2003	if (test_bit(FailFast, &rdev->flags)) {
2004		/* Don't try recovering from here - just fail it
2005		 * ... unless it is the last working device of course */
2006		md_error(mddev, rdev);
2007		if (test_bit(Faulty, &rdev->flags))
2008			/* Don't try to read from here, but make sure
2009			 * put_buf does it's thing
2010			 */
2011			bio->bi_end_io = end_sync_write;
2012	}
2013
2014	while(sectors) {
2015		int s = sectors;
2016		int d = r1_bio->read_disk;
2017		int success = 0;
 
2018		int start;
2019
2020		if (s > (PAGE_SIZE>>9))
2021			s = PAGE_SIZE >> 9;
2022		do {
2023			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2024				/* No rcu protection needed here devices
2025				 * can only be removed when no resync is
2026				 * active, and resync is currently active
2027				 */
2028				rdev = conf->mirrors[d].rdev;
2029				if (sync_page_io(rdev, sect, s<<9,
2030						 pages[idx],
2031						 REQ_OP_READ, 0, false)) {
2032					success = 1;
2033					break;
2034				}
2035			}
2036			d++;
2037			if (d == conf->raid_disks * 2)
2038				d = 0;
2039		} while (!success && d != r1_bio->read_disk);
2040
2041		if (!success) {
2042			char b[BDEVNAME_SIZE];
2043			int abort = 0;
2044			/* Cannot read from anywhere, this block is lost.
2045			 * Record a bad block on each device.  If that doesn't
2046			 * work just disable and interrupt the recovery.
2047			 * Don't fail devices as that won't really help.
2048			 */
2049			pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2050					    mdname(mddev), bio_devname(bio, b),
2051					    (unsigned long long)r1_bio->sector);
2052			for (d = 0; d < conf->raid_disks * 2; d++) {
 
 
2053				rdev = conf->mirrors[d].rdev;
2054				if (!rdev || test_bit(Faulty, &rdev->flags))
2055					continue;
2056				if (!rdev_set_badblocks(rdev, sect, s, 0))
2057					abort = 1;
2058			}
2059			if (abort) {
2060				conf->recovery_disabled =
2061					mddev->recovery_disabled;
2062				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2063				md_done_sync(mddev, r1_bio->sectors, 0);
2064				put_buf(r1_bio);
2065				return 0;
2066			}
2067			/* Try next page */
2068			sectors -= s;
2069			sect += s;
2070			idx++;
2071			continue;
2072		}
2073
2074		start = d;
2075		/* write it back and re-read */
2076		while (d != r1_bio->read_disk) {
2077			if (d == 0)
2078				d = conf->raid_disks * 2;
2079			d--;
2080			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2081				continue;
2082			rdev = conf->mirrors[d].rdev;
2083			if (r1_sync_page_io(rdev, sect, s,
2084					    pages[idx],
2085					    WRITE) == 0) {
2086				r1_bio->bios[d]->bi_end_io = NULL;
2087				rdev_dec_pending(rdev, mddev);
2088			}
2089		}
2090		d = start;
2091		while (d != r1_bio->read_disk) {
2092			if (d == 0)
2093				d = conf->raid_disks * 2;
2094			d--;
2095			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2096				continue;
2097			rdev = conf->mirrors[d].rdev;
2098			if (r1_sync_page_io(rdev, sect, s,
2099					    pages[idx],
2100					    READ) != 0)
2101				atomic_add(s, &rdev->corrected_errors);
2102		}
2103		sectors -= s;
2104		sect += s;
2105		idx ++;
2106	}
2107	set_bit(R1BIO_Uptodate, &r1_bio->state);
2108	bio->bi_status = 0;
2109	return 1;
2110}
2111
2112static void process_checks(struct r1bio *r1_bio)
2113{
2114	/* We have read all readable devices.  If we haven't
2115	 * got the block, then there is no hope left.
2116	 * If we have, then we want to do a comparison
2117	 * and skip the write if everything is the same.
2118	 * If any blocks failed to read, then we need to
2119	 * attempt an over-write
2120	 */
2121	struct mddev *mddev = r1_bio->mddev;
2122	struct r1conf *conf = mddev->private;
2123	int primary;
2124	int i;
2125	int vcnt;
2126
2127	/* Fix variable parts of all bios */
2128	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2129	for (i = 0; i < conf->raid_disks * 2; i++) {
2130		blk_status_t status;
2131		struct bio *b = r1_bio->bios[i];
2132		struct resync_pages *rp = get_resync_pages(b);
2133		if (b->bi_end_io != end_sync_read)
2134			continue;
2135		/* fixup the bio for reuse, but preserve errno */
2136		status = b->bi_status;
2137		bio_reset(b);
2138		b->bi_status = status;
2139		b->bi_iter.bi_sector = r1_bio->sector +
2140			conf->mirrors[i].rdev->data_offset;
2141		bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2142		b->bi_end_io = end_sync_read;
2143		rp->raid_bio = r1_bio;
2144		b->bi_private = rp;
2145
2146		/* initialize bvec table again */
2147		md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2148	}
2149	for (primary = 0; primary < conf->raid_disks * 2; primary++)
2150		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2151		    !r1_bio->bios[primary]->bi_status) {
2152			r1_bio->bios[primary]->bi_end_io = NULL;
2153			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2154			break;
2155		}
2156	r1_bio->read_disk = primary;
2157	for (i = 0; i < conf->raid_disks * 2; i++) {
2158		int j = 0;
 
2159		struct bio *pbio = r1_bio->bios[primary];
2160		struct bio *sbio = r1_bio->bios[i];
2161		blk_status_t status = sbio->bi_status;
2162		struct page **ppages = get_resync_pages(pbio)->pages;
2163		struct page **spages = get_resync_pages(sbio)->pages;
2164		struct bio_vec *bi;
2165		int page_len[RESYNC_PAGES] = { 0 };
2166		struct bvec_iter_all iter_all;
2167
2168		if (sbio->bi_end_io != end_sync_read)
2169			continue;
2170		/* Now we can 'fixup' the error value */
2171		sbio->bi_status = 0;
2172
2173		bio_for_each_segment_all(bi, sbio, iter_all)
2174			page_len[j++] = bi->bv_len;
2175
2176		if (!status) {
2177			for (j = vcnt; j-- ; ) {
2178				if (memcmp(page_address(ppages[j]),
2179					   page_address(spages[j]),
2180					   page_len[j]))
 
 
 
2181					break;
2182			}
2183		} else
2184			j = 0;
2185		if (j >= 0)
2186			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2187		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2188			      && !status)) {
2189			/* No need to write to this device. */
2190			sbio->bi_end_io = NULL;
2191			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2192			continue;
2193		}
2194
2195		bio_copy_data(sbio, pbio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2196	}
 
2197}
2198
2199static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2200{
2201	struct r1conf *conf = mddev->private;
2202	int i;
2203	int disks = conf->raid_disks * 2;
2204	struct bio *wbio;
 
 
2205
2206	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2207		/* ouch - failed to read all of that. */
2208		if (!fix_sync_read_error(r1_bio))
2209			return;
2210
2211	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2212		process_checks(r1_bio);
2213
2214	/*
2215	 * schedule writes
2216	 */
2217	atomic_set(&r1_bio->remaining, 1);
2218	for (i = 0; i < disks ; i++) {
2219		wbio = r1_bio->bios[i];
2220		if (wbio->bi_end_io == NULL ||
2221		    (wbio->bi_end_io == end_sync_read &&
2222		     (i == r1_bio->read_disk ||
2223		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2224			continue;
2225		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2226			abort_sync_write(mddev, r1_bio);
2227			continue;
2228		}
2229
2230		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2231		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2232			wbio->bi_opf |= MD_FAILFAST;
2233
 
2234		wbio->bi_end_io = end_sync_write;
2235		atomic_inc(&r1_bio->remaining);
2236		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2237
2238		submit_bio_noacct(wbio);
2239	}
2240
2241	put_sync_write_buf(r1_bio, 1);
 
 
 
 
2242}
2243
2244/*
2245 * This is a kernel thread which:
2246 *
2247 *	1.	Retries failed read operations on working mirrors.
2248 *	2.	Updates the raid superblock when problems encounter.
2249 *	3.	Performs writes following reads for array synchronising.
2250 */
2251
2252static void fix_read_error(struct r1conf *conf, int read_disk,
2253			   sector_t sect, int sectors)
2254{
2255	struct mddev *mddev = conf->mddev;
2256	while(sectors) {
2257		int s = sectors;
2258		int d = read_disk;
2259		int success = 0;
2260		int start;
2261		struct md_rdev *rdev;
2262
2263		if (s > (PAGE_SIZE>>9))
2264			s = PAGE_SIZE >> 9;
2265
2266		do {
 
 
 
 
 
2267			sector_t first_bad;
2268			int bad_sectors;
2269
2270			rcu_read_lock();
2271			rdev = rcu_dereference(conf->mirrors[d].rdev);
2272			if (rdev &&
2273			    (test_bit(In_sync, &rdev->flags) ||
2274			     (!test_bit(Faulty, &rdev->flags) &&
2275			      rdev->recovery_offset >= sect + s)) &&
2276			    is_badblock(rdev, sect, s,
2277					&first_bad, &bad_sectors) == 0) {
2278				atomic_inc(&rdev->nr_pending);
2279				rcu_read_unlock();
2280				if (sync_page_io(rdev, sect, s<<9,
2281					 conf->tmppage, REQ_OP_READ, 0, false))
2282					success = 1;
2283				rdev_dec_pending(rdev, mddev);
2284				if (success)
2285					break;
2286			} else
2287				rcu_read_unlock();
2288			d++;
2289			if (d == conf->raid_disks * 2)
2290				d = 0;
2291		} while (!success && d != read_disk);
2292
2293		if (!success) {
2294			/* Cannot read from anywhere - mark it bad */
2295			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2296			if (!rdev_set_badblocks(rdev, sect, s, 0))
2297				md_error(mddev, rdev);
2298			break;
2299		}
2300		/* write it back and re-read */
2301		start = d;
2302		while (d != read_disk) {
2303			if (d==0)
2304				d = conf->raid_disks * 2;
2305			d--;
2306			rcu_read_lock();
2307			rdev = rcu_dereference(conf->mirrors[d].rdev);
2308			if (rdev &&
2309			    !test_bit(Faulty, &rdev->flags)) {
2310				atomic_inc(&rdev->nr_pending);
2311				rcu_read_unlock();
2312				r1_sync_page_io(rdev, sect, s,
2313						conf->tmppage, WRITE);
2314				rdev_dec_pending(rdev, mddev);
2315			} else
2316				rcu_read_unlock();
2317		}
2318		d = start;
2319		while (d != read_disk) {
2320			char b[BDEVNAME_SIZE];
2321			if (d==0)
2322				d = conf->raid_disks * 2;
2323			d--;
2324			rcu_read_lock();
2325			rdev = rcu_dereference(conf->mirrors[d].rdev);
2326			if (rdev &&
2327			    !test_bit(Faulty, &rdev->flags)) {
2328				atomic_inc(&rdev->nr_pending);
2329				rcu_read_unlock();
2330				if (r1_sync_page_io(rdev, sect, s,
2331						    conf->tmppage, READ)) {
2332					atomic_add(s, &rdev->corrected_errors);
2333					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2334						mdname(mddev), s,
2335						(unsigned long long)(sect +
2336								     rdev->data_offset),
2337						bdevname(rdev->bdev, b));
 
 
2338				}
2339				rdev_dec_pending(rdev, mddev);
2340			} else
2341				rcu_read_unlock();
2342		}
2343		sectors -= s;
2344		sect += s;
2345	}
2346}
2347
2348static int narrow_write_error(struct r1bio *r1_bio, int i)
2349{
2350	struct mddev *mddev = r1_bio->mddev;
2351	struct r1conf *conf = mddev->private;
2352	struct md_rdev *rdev = conf->mirrors[i].rdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2353
2354	/* bio has the data to be written to device 'i' where
2355	 * we just recently had a write error.
2356	 * We repeatedly clone the bio and trim down to one block,
2357	 * then try the write.  Where the write fails we record
2358	 * a bad block.
2359	 * It is conceivable that the bio doesn't exactly align with
2360	 * blocks.  We must handle this somehow.
2361	 *
2362	 * We currently own a reference on the rdev.
2363	 */
2364
2365	int block_sectors;
2366	sector_t sector;
2367	int sectors;
2368	int sect_to_write = r1_bio->sectors;
2369	int ok = 1;
2370
2371	if (rdev->badblocks.shift < 0)
2372		return 0;
2373
2374	block_sectors = roundup(1 << rdev->badblocks.shift,
2375				bdev_logical_block_size(rdev->bdev) >> 9);
2376	sector = r1_bio->sector;
2377	sectors = ((sector + block_sectors)
2378		   & ~(sector_t)(block_sectors - 1))
2379		- sector;
2380
 
 
 
 
 
 
 
 
 
 
 
2381	while (sect_to_write) {
2382		struct bio *wbio;
2383		if (sectors > sect_to_write)
2384			sectors = sect_to_write;
2385		/* Write at 'sector' for 'sectors'*/
2386
2387		if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2388			wbio = bio_clone_fast(r1_bio->behind_master_bio,
2389					      GFP_NOIO,
2390					      &mddev->bio_set);
2391		} else {
2392			wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2393					      &mddev->bio_set);
2394		}
2395
2396		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2397		wbio->bi_iter.bi_sector = r1_bio->sector;
2398		wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2399
2400		bio_trim(wbio, sector - r1_bio->sector, sectors);
2401		wbio->bi_iter.bi_sector += rdev->data_offset;
2402		bio_set_dev(wbio, rdev->bdev);
2403
2404		if (submit_bio_wait(wbio) < 0)
2405			/* failure! */
2406			ok = rdev_set_badblocks(rdev, sector,
2407						sectors, 0)
2408				&& ok;
2409
2410		bio_put(wbio);
2411		sect_to_write -= sectors;
2412		sector += sectors;
2413		sectors = block_sectors;
2414	}
2415	return ok;
2416}
2417
2418static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2419{
2420	int m;
2421	int s = r1_bio->sectors;
2422	for (m = 0; m < conf->raid_disks * 2 ; m++) {
2423		struct md_rdev *rdev = conf->mirrors[m].rdev;
2424		struct bio *bio = r1_bio->bios[m];
2425		if (bio->bi_end_io == NULL)
2426			continue;
2427		if (!bio->bi_status &&
2428		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2429			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2430		}
2431		if (bio->bi_status &&
2432		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
2433			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2434				md_error(conf->mddev, rdev);
2435		}
2436	}
2437	put_buf(r1_bio);
2438	md_done_sync(conf->mddev, s, 1);
2439}
2440
2441static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2442{
2443	int m, idx;
2444	bool fail = false;
2445
2446	for (m = 0; m < conf->raid_disks * 2 ; m++)
2447		if (r1_bio->bios[m] == IO_MADE_GOOD) {
2448			struct md_rdev *rdev = conf->mirrors[m].rdev;
2449			rdev_clear_badblocks(rdev,
2450					     r1_bio->sector,
2451					     r1_bio->sectors, 0);
2452			rdev_dec_pending(rdev, conf->mddev);
2453		} else if (r1_bio->bios[m] != NULL) {
2454			/* This drive got a write error.  We need to
2455			 * narrow down and record precise write
2456			 * errors.
2457			 */
2458			fail = true;
2459			if (!narrow_write_error(r1_bio, m)) {
2460				md_error(conf->mddev,
2461					 conf->mirrors[m].rdev);
2462				/* an I/O failed, we can't clear the bitmap */
2463				set_bit(R1BIO_Degraded, &r1_bio->state);
2464			}
2465			rdev_dec_pending(conf->mirrors[m].rdev,
2466					 conf->mddev);
2467		}
2468	if (fail) {
2469		spin_lock_irq(&conf->device_lock);
2470		list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2471		idx = sector_to_idx(r1_bio->sector);
2472		atomic_inc(&conf->nr_queued[idx]);
2473		spin_unlock_irq(&conf->device_lock);
2474		/*
2475		 * In case freeze_array() is waiting for condition
2476		 * get_unqueued_pending() == extra to be true.
2477		 */
2478		wake_up(&conf->wait_barrier);
2479		md_wakeup_thread(conf->mddev->thread);
2480	} else {
2481		if (test_bit(R1BIO_WriteError, &r1_bio->state))
2482			close_write(r1_bio);
2483		raid_end_bio_io(r1_bio);
2484	}
2485}
2486
2487static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2488{
2489	struct mddev *mddev = conf->mddev;
 
 
2490	struct bio *bio;
2491	struct md_rdev *rdev;
 
2492
2493	clear_bit(R1BIO_ReadError, &r1_bio->state);
2494	/* we got a read error. Maybe the drive is bad.  Maybe just
2495	 * the block and we can fix it.
2496	 * We freeze all other IO, and try reading the block from
2497	 * other devices.  When we find one, we re-write
2498	 * and check it that fixes the read error.
2499	 * This is all done synchronously while the array is
2500	 * frozen
2501	 */
2502
2503	bio = r1_bio->bios[r1_bio->read_disk];
2504	bio_put(bio);
2505	r1_bio->bios[r1_bio->read_disk] = NULL;
2506
2507	rdev = conf->mirrors[r1_bio->read_disk].rdev;
2508	if (mddev->ro == 0
2509	    && !test_bit(FailFast, &rdev->flags)) {
2510		freeze_array(conf, 1);
2511		fix_read_error(conf, r1_bio->read_disk,
2512			       r1_bio->sector, r1_bio->sectors);
2513		unfreeze_array(conf);
2514	} else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2515		md_error(mddev, rdev);
 
 
 
 
 
 
 
 
 
 
2516	} else {
2517		r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2518	}
2519
2520	rdev_dec_pending(rdev, conf->mddev);
2521	allow_barrier(conf, r1_bio->sector);
2522	bio = r1_bio->master_bio;
2523
2524	/* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2525	r1_bio->state = 0;
2526	raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2527}
2528
2529static void raid1d(struct md_thread *thread)
2530{
2531	struct mddev *mddev = thread->mddev;
2532	struct r1bio *r1_bio;
2533	unsigned long flags;
2534	struct r1conf *conf = mddev->private;
2535	struct list_head *head = &conf->retry_list;
2536	struct blk_plug plug;
2537	int idx;
2538
2539	md_check_recovery(mddev);
2540
2541	if (!list_empty_careful(&conf->bio_end_io_list) &&
2542	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2543		LIST_HEAD(tmp);
2544		spin_lock_irqsave(&conf->device_lock, flags);
2545		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2546			list_splice_init(&conf->bio_end_io_list, &tmp);
2547		spin_unlock_irqrestore(&conf->device_lock, flags);
2548		while (!list_empty(&tmp)) {
2549			r1_bio = list_first_entry(&tmp, struct r1bio,
2550						  retry_list);
2551			list_del(&r1_bio->retry_list);
2552			idx = sector_to_idx(r1_bio->sector);
2553			atomic_dec(&conf->nr_queued[idx]);
2554			if (mddev->degraded)
2555				set_bit(R1BIO_Degraded, &r1_bio->state);
2556			if (test_bit(R1BIO_WriteError, &r1_bio->state))
2557				close_write(r1_bio);
2558			raid_end_bio_io(r1_bio);
2559		}
2560	}
2561
2562	blk_start_plug(&plug);
2563	for (;;) {
2564
2565		flush_pending_writes(conf);
 
2566
2567		spin_lock_irqsave(&conf->device_lock, flags);
2568		if (list_empty(head)) {
2569			spin_unlock_irqrestore(&conf->device_lock, flags);
2570			break;
2571		}
2572		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2573		list_del(head->prev);
2574		idx = sector_to_idx(r1_bio->sector);
2575		atomic_dec(&conf->nr_queued[idx]);
2576		spin_unlock_irqrestore(&conf->device_lock, flags);
2577
2578		mddev = r1_bio->mddev;
2579		conf = mddev->private;
2580		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2581			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2582			    test_bit(R1BIO_WriteError, &r1_bio->state))
2583				handle_sync_write_finished(conf, r1_bio);
2584			else
2585				sync_request_write(mddev, r1_bio);
2586		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2587			   test_bit(R1BIO_WriteError, &r1_bio->state))
2588			handle_write_finished(conf, r1_bio);
2589		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2590			handle_read_error(conf, r1_bio);
2591		else
2592			WARN_ON_ONCE(1);
 
 
 
2593
2594		cond_resched();
2595		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2596			md_check_recovery(mddev);
2597	}
2598	blk_finish_plug(&plug);
2599}
2600
2601static int init_resync(struct r1conf *conf)
 
2602{
2603	int buffs;
2604
2605	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2606	BUG_ON(mempool_initialized(&conf->r1buf_pool));
2607
2608	return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2609			    r1buf_pool_free, conf->poolinfo);
2610}
2611
2612static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2613{
2614	struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2615	struct resync_pages *rps;
2616	struct bio *bio;
2617	int i;
2618
2619	for (i = conf->poolinfo->raid_disks; i--; ) {
2620		bio = r1bio->bios[i];
2621		rps = bio->bi_private;
2622		bio_reset(bio);
2623		bio->bi_private = rps;
2624	}
2625	r1bio->master_bio = NULL;
2626	return r1bio;
2627}
2628
2629/*
2630 * perform a "sync" on one "block"
2631 *
2632 * We need to make sure that no normal I/O request - particularly write
2633 * requests - conflict with active sync requests.
2634 *
2635 * This is achieved by tracking pending requests and a 'barrier' concept
2636 * that can be installed to exclude normal IO requests.
2637 */
2638
2639static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2640				   int *skipped)
2641{
2642	struct r1conf *conf = mddev->private;
2643	struct r1bio *r1_bio;
2644	struct bio *bio;
2645	sector_t max_sector, nr_sectors;
2646	int disk = -1;
2647	int i;
2648	int wonly = -1;
2649	int write_targets = 0, read_targets = 0;
2650	sector_t sync_blocks;
2651	int still_degraded = 0;
2652	int good_sectors = RESYNC_SECTORS;
2653	int min_bad = 0; /* number of sectors that are bad in all devices */
2654	int idx = sector_to_idx(sector_nr);
2655	int page_idx = 0;
2656
2657	if (!mempool_initialized(&conf->r1buf_pool))
2658		if (init_resync(conf))
2659			return 0;
2660
2661	max_sector = mddev->dev_sectors;
2662	if (sector_nr >= max_sector) {
2663		/* If we aborted, we need to abort the
2664		 * sync on the 'current' bitmap chunk (there will
2665		 * only be one in raid1 resync.
2666		 * We can find the current addess in mddev->curr_resync
2667		 */
2668		if (mddev->curr_resync < max_sector) /* aborted */
2669			md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2670					   &sync_blocks, 1);
2671		else /* completed sync */
2672			conf->fullsync = 0;
2673
2674		md_bitmap_close_sync(mddev->bitmap);
2675		close_sync(conf);
2676
2677		if (mddev_is_clustered(mddev)) {
2678			conf->cluster_sync_low = 0;
2679			conf->cluster_sync_high = 0;
2680		}
2681		return 0;
2682	}
2683
2684	if (mddev->bitmap == NULL &&
2685	    mddev->recovery_cp == MaxSector &&
2686	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2687	    conf->fullsync == 0) {
2688		*skipped = 1;
2689		return max_sector - sector_nr;
2690	}
2691	/* before building a request, check if we can skip these blocks..
2692	 * This call the bitmap_start_sync doesn't actually record anything
2693	 */
2694	if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2695	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2696		/* We can skip this block, and probably several more */
2697		*skipped = 1;
2698		return sync_blocks;
2699	}
2700
2701	/*
2702	 * If there is non-resync activity waiting for a turn, then let it
2703	 * though before starting on this new sync request.
2704	 */
2705	if (atomic_read(&conf->nr_waiting[idx]))
2706		schedule_timeout_uninterruptible(1);
2707
2708	/* we are incrementing sector_nr below. To be safe, we check against
2709	 * sector_nr + two times RESYNC_SECTORS
2710	 */
2711
2712	md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2713		mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2714
2715
2716	if (raise_barrier(conf, sector_nr))
2717		return 0;
2718
2719	r1_bio = raid1_alloc_init_r1buf(conf);
2720
2721	rcu_read_lock();
2722	/*
2723	 * If we get a correctably read error during resync or recovery,
2724	 * we might want to read from a different device.  So we
2725	 * flag all drives that could conceivably be read from for READ,
2726	 * and any others (which will be non-In_sync devices) for WRITE.
2727	 * If a read fails, we try reading from something else for which READ
2728	 * is OK.
2729	 */
2730
2731	r1_bio->mddev = mddev;
2732	r1_bio->sector = sector_nr;
2733	r1_bio->state = 0;
2734	set_bit(R1BIO_IsSync, &r1_bio->state);
2735	/* make sure good_sectors won't go across barrier unit boundary */
2736	good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2737
2738	for (i = 0; i < conf->raid_disks * 2; i++) {
2739		struct md_rdev *rdev;
2740		bio = r1_bio->bios[i];
2741
 
 
 
 
 
 
 
 
 
 
 
 
 
2742		rdev = rcu_dereference(conf->mirrors[i].rdev);
2743		if (rdev == NULL ||
2744		    test_bit(Faulty, &rdev->flags)) {
2745			if (i < conf->raid_disks)
2746				still_degraded = 1;
2747		} else if (!test_bit(In_sync, &rdev->flags)) {
2748			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2749			bio->bi_end_io = end_sync_write;
2750			write_targets ++;
2751		} else {
2752			/* may need to read from here */
2753			sector_t first_bad = MaxSector;
2754			int bad_sectors;
2755
2756			if (is_badblock(rdev, sector_nr, good_sectors,
2757					&first_bad, &bad_sectors)) {
2758				if (first_bad > sector_nr)
2759					good_sectors = first_bad - sector_nr;
2760				else {
2761					bad_sectors -= (sector_nr - first_bad);
2762					if (min_bad == 0 ||
2763					    min_bad > bad_sectors)
2764						min_bad = bad_sectors;
2765				}
2766			}
2767			if (sector_nr < first_bad) {
2768				if (test_bit(WriteMostly, &rdev->flags)) {
2769					if (wonly < 0)
2770						wonly = i;
2771				} else {
2772					if (disk < 0)
2773						disk = i;
2774				}
2775				bio_set_op_attrs(bio, REQ_OP_READ, 0);
2776				bio->bi_end_io = end_sync_read;
2777				read_targets++;
2778			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2779				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2780				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2781				/*
2782				 * The device is suitable for reading (InSync),
2783				 * but has bad block(s) here. Let's try to correct them,
2784				 * if we are doing resync or repair. Otherwise, leave
2785				 * this device alone for this sync request.
2786				 */
2787				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2788				bio->bi_end_io = end_sync_write;
2789				write_targets++;
2790			}
2791		}
2792		if (rdev && bio->bi_end_io) {
2793			atomic_inc(&rdev->nr_pending);
2794			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2795			bio_set_dev(bio, rdev->bdev);
2796			if (test_bit(FailFast, &rdev->flags))
2797				bio->bi_opf |= MD_FAILFAST;
2798		}
2799	}
2800	rcu_read_unlock();
2801	if (disk < 0)
2802		disk = wonly;
2803	r1_bio->read_disk = disk;
2804
2805	if (read_targets == 0 && min_bad > 0) {
2806		/* These sectors are bad on all InSync devices, so we
2807		 * need to mark them bad on all write targets
2808		 */
2809		int ok = 1;
2810		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2811			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2812				struct md_rdev *rdev = conf->mirrors[i].rdev;
 
2813				ok = rdev_set_badblocks(rdev, sector_nr,
2814							min_bad, 0
2815					) && ok;
2816			}
2817		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2818		*skipped = 1;
2819		put_buf(r1_bio);
2820
2821		if (!ok) {
2822			/* Cannot record the badblocks, so need to
2823			 * abort the resync.
2824			 * If there are multiple read targets, could just
2825			 * fail the really bad ones ???
2826			 */
2827			conf->recovery_disabled = mddev->recovery_disabled;
2828			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2829			return 0;
2830		} else
2831			return min_bad;
2832
2833	}
2834	if (min_bad > 0 && min_bad < good_sectors) {
2835		/* only resync enough to reach the next bad->good
2836		 * transition */
2837		good_sectors = min_bad;
2838	}
2839
2840	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2841		/* extra read targets are also write targets */
2842		write_targets += read_targets-1;
2843
2844	if (write_targets == 0 || read_targets == 0) {
2845		/* There is nowhere to write, so all non-sync
2846		 * drives must be failed - so we are finished
2847		 */
2848		sector_t rv;
2849		if (min_bad > 0)
2850			max_sector = sector_nr + min_bad;
2851		rv = max_sector - sector_nr;
2852		*skipped = 1;
2853		put_buf(r1_bio);
2854		return rv;
2855	}
2856
2857	if (max_sector > mddev->resync_max)
2858		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2859	if (max_sector > sector_nr + good_sectors)
2860		max_sector = sector_nr + good_sectors;
2861	nr_sectors = 0;
2862	sync_blocks = 0;
2863	do {
2864		struct page *page;
2865		int len = PAGE_SIZE;
2866		if (sector_nr + (len>>9) > max_sector)
2867			len = (max_sector - sector_nr) << 9;
2868		if (len == 0)
2869			break;
2870		if (sync_blocks == 0) {
2871			if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2872						  &sync_blocks, still_degraded) &&
2873			    !conf->fullsync &&
2874			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2875				break;
 
2876			if ((len >> 9) > sync_blocks)
2877				len = sync_blocks<<9;
2878		}
2879
2880		for (i = 0 ; i < conf->raid_disks * 2; i++) {
2881			struct resync_pages *rp;
2882
2883			bio = r1_bio->bios[i];
2884			rp = get_resync_pages(bio);
2885			if (bio->bi_end_io) {
2886				page = resync_fetch_page(rp, page_idx);
2887
2888				/*
2889				 * won't fail because the vec table is big
2890				 * enough to hold all these pages
2891				 */
2892				bio_add_page(bio, page, len, 0);
 
 
 
 
 
 
 
 
 
2893			}
2894		}
2895		nr_sectors += len>>9;
2896		sector_nr += len>>9;
2897		sync_blocks -= (len>>9);
2898	} while (++page_idx < RESYNC_PAGES);
2899
2900	r1_bio->sectors = nr_sectors;
2901
2902	if (mddev_is_clustered(mddev) &&
2903			conf->cluster_sync_high < sector_nr + nr_sectors) {
2904		conf->cluster_sync_low = mddev->curr_resync_completed;
2905		conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2906		/* Send resync message */
2907		md_cluster_ops->resync_info_update(mddev,
2908				conf->cluster_sync_low,
2909				conf->cluster_sync_high);
2910	}
2911
2912	/* For a user-requested sync, we read all readable devices and do a
2913	 * compare
2914	 */
2915	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2916		atomic_set(&r1_bio->remaining, read_targets);
2917		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2918			bio = r1_bio->bios[i];
2919			if (bio->bi_end_io == end_sync_read) {
2920				read_targets--;
2921				md_sync_acct_bio(bio, nr_sectors);
2922				if (read_targets == 1)
2923					bio->bi_opf &= ~MD_FAILFAST;
2924				submit_bio_noacct(bio);
2925			}
2926		}
2927	} else {
2928		atomic_set(&r1_bio->remaining, 1);
2929		bio = r1_bio->bios[r1_bio->read_disk];
2930		md_sync_acct_bio(bio, nr_sectors);
2931		if (read_targets == 1)
2932			bio->bi_opf &= ~MD_FAILFAST;
2933		submit_bio_noacct(bio);
2934	}
2935	return nr_sectors;
2936}
2937
2938static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2939{
2940	if (sectors)
2941		return sectors;
2942
2943	return mddev->dev_sectors;
2944}
2945
2946static struct r1conf *setup_conf(struct mddev *mddev)
2947{
2948	struct r1conf *conf;
2949	int i;
2950	struct raid1_info *disk;
2951	struct md_rdev *rdev;
2952	int err = -ENOMEM;
2953
2954	conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2955	if (!conf)
2956		goto abort;
2957
2958	conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2959				   sizeof(atomic_t), GFP_KERNEL);
2960	if (!conf->nr_pending)
2961		goto abort;
2962
2963	conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2964				   sizeof(atomic_t), GFP_KERNEL);
2965	if (!conf->nr_waiting)
2966		goto abort;
2967
2968	conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2969				  sizeof(atomic_t), GFP_KERNEL);
2970	if (!conf->nr_queued)
2971		goto abort;
2972
2973	conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2974				sizeof(atomic_t), GFP_KERNEL);
2975	if (!conf->barrier)
2976		goto abort;
2977
2978	conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2979					    mddev->raid_disks, 2),
2980				GFP_KERNEL);
2981	if (!conf->mirrors)
2982		goto abort;
2983
2984	conf->tmppage = alloc_page(GFP_KERNEL);
2985	if (!conf->tmppage)
2986		goto abort;
2987
2988	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2989	if (!conf->poolinfo)
2990		goto abort;
2991	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2992	err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
2993			   rbio_pool_free, conf->poolinfo);
2994	if (err)
2995		goto abort;
2996
2997	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2998	if (err)
2999		goto abort;
3000
3001	conf->poolinfo->mddev = mddev;
3002
3003	err = -EINVAL;
3004	spin_lock_init(&conf->device_lock);
3005	rdev_for_each(rdev, mddev) {
3006		int disk_idx = rdev->raid_disk;
3007		if (disk_idx >= mddev->raid_disks
3008		    || disk_idx < 0)
3009			continue;
3010		if (test_bit(Replacement, &rdev->flags))
3011			disk = conf->mirrors + mddev->raid_disks + disk_idx;
3012		else
3013			disk = conf->mirrors + disk_idx;
3014
3015		if (disk->rdev)
3016			goto abort;
3017		disk->rdev = rdev;
 
3018		disk->head_position = 0;
3019		disk->seq_start = MaxSector;
3020	}
3021	conf->raid_disks = mddev->raid_disks;
3022	conf->mddev = mddev;
3023	INIT_LIST_HEAD(&conf->retry_list);
3024	INIT_LIST_HEAD(&conf->bio_end_io_list);
3025
3026	spin_lock_init(&conf->resync_lock);
3027	init_waitqueue_head(&conf->wait_barrier);
3028
3029	bio_list_init(&conf->pending_bio_list);
3030	conf->pending_count = 0;
3031	conf->recovery_disabled = mddev->recovery_disabled - 1;
3032
3033	err = -EIO;
3034	for (i = 0; i < conf->raid_disks * 2; i++) {
3035
3036		disk = conf->mirrors + i;
3037
3038		if (i < conf->raid_disks &&
3039		    disk[conf->raid_disks].rdev) {
3040			/* This slot has a replacement. */
3041			if (!disk->rdev) {
3042				/* No original, just make the replacement
3043				 * a recovering spare
3044				 */
3045				disk->rdev =
3046					disk[conf->raid_disks].rdev;
3047				disk[conf->raid_disks].rdev = NULL;
3048			} else if (!test_bit(In_sync, &disk->rdev->flags))
3049				/* Original is not in_sync - bad */
3050				goto abort;
3051		}
3052
3053		if (!disk->rdev ||
3054		    !test_bit(In_sync, &disk->rdev->flags)) {
3055			disk->head_position = 0;
3056			if (disk->rdev &&
3057			    (disk->rdev->saved_raid_disk < 0))
3058				conf->fullsync = 1;
3059		}
 
 
 
 
 
3060	}
3061
 
 
 
 
 
 
3062	err = -ENOMEM;
3063	conf->thread = md_register_thread(raid1d, mddev, "raid1");
3064	if (!conf->thread)
 
 
 
3065		goto abort;
 
3066
3067	return conf;
3068
3069 abort:
3070	if (conf) {
3071		mempool_exit(&conf->r1bio_pool);
 
3072		kfree(conf->mirrors);
3073		safe_put_page(conf->tmppage);
3074		kfree(conf->poolinfo);
3075		kfree(conf->nr_pending);
3076		kfree(conf->nr_waiting);
3077		kfree(conf->nr_queued);
3078		kfree(conf->barrier);
3079		bioset_exit(&conf->bio_split);
3080		kfree(conf);
3081	}
3082	return ERR_PTR(err);
3083}
3084
3085static void raid1_free(struct mddev *mddev, void *priv);
3086static int raid1_run(struct mddev *mddev)
3087{
3088	struct r1conf *conf;
3089	int i;
3090	struct md_rdev *rdev;
3091	int ret;
3092	bool discard_supported = false;
3093
3094	if (mddev->level != 1) {
3095		pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3096			mdname(mddev), mddev->level);
3097		return -EIO;
3098	}
3099	if (mddev->reshape_position != MaxSector) {
3100		pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3101			mdname(mddev));
3102		return -EIO;
3103	}
3104	if (mddev_init_writes_pending(mddev) < 0)
3105		return -ENOMEM;
3106	/*
3107	 * copy the already verified devices into our private RAID1
3108	 * bookkeeping area. [whatever we allocate in run(),
3109	 * should be freed in raid1_free()]
3110	 */
3111	if (mddev->private == NULL)
3112		conf = setup_conf(mddev);
3113	else
3114		conf = mddev->private;
3115
3116	if (IS_ERR(conf))
3117		return PTR_ERR(conf);
3118
3119	if (mddev->queue) {
3120		blk_queue_max_write_same_sectors(mddev->queue, 0);
3121		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3122	}
3123
3124	rdev_for_each(rdev, mddev) {
3125		if (!mddev->gendisk)
3126			continue;
3127		disk_stack_limits(mddev->gendisk, rdev->bdev,
3128				  rdev->data_offset << 9);
3129		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3130			discard_supported = true;
 
 
 
 
 
 
 
3131	}
3132
3133	mddev->degraded = 0;
3134	for (i = 0; i < conf->raid_disks; i++)
3135		if (conf->mirrors[i].rdev == NULL ||
3136		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3137		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3138			mddev->degraded++;
3139	/*
3140	 * RAID1 needs at least one disk in active
3141	 */
3142	if (conf->raid_disks - mddev->degraded < 1) {
3143		ret = -EINVAL;
3144		goto abort;
3145	}
3146
3147	if (conf->raid_disks - mddev->degraded == 1)
3148		mddev->recovery_cp = MaxSector;
3149
3150	if (mddev->recovery_cp != MaxSector)
3151		pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3152			mdname(mddev));
3153	pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3154		mdname(mddev), mddev->raid_disks - mddev->degraded,
 
 
3155		mddev->raid_disks);
3156
3157	/*
3158	 * Ok, everything is just fine now
3159	 */
3160	mddev->thread = conf->thread;
3161	conf->thread = NULL;
3162	mddev->private = conf;
3163	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3164
3165	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3166
3167	if (mddev->queue) {
3168		if (discard_supported)
3169			blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3170						mddev->queue);
3171		else
3172			blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3173						  mddev->queue);
3174	}
 
 
 
 
 
 
 
3175
3176	ret = md_integrity_register(mddev);
3177	if (ret) {
3178		md_unregister_thread(&mddev->thread);
3179		goto abort;
 
 
 
3180	}
3181	return 0;
3182
3183abort:
3184	raid1_free(mddev, conf);
3185	return ret;
3186}
3187
3188static void raid1_free(struct mddev *mddev, void *priv)
3189{
3190	struct r1conf *conf = priv;
3191
3192	mempool_exit(&conf->r1bio_pool);
 
 
3193	kfree(conf->mirrors);
3194	safe_put_page(conf->tmppage);
3195	kfree(conf->poolinfo);
3196	kfree(conf->nr_pending);
3197	kfree(conf->nr_waiting);
3198	kfree(conf->nr_queued);
3199	kfree(conf->barrier);
3200	bioset_exit(&conf->bio_split);
3201	kfree(conf);
 
 
3202}
3203
3204static int raid1_resize(struct mddev *mddev, sector_t sectors)
3205{
3206	/* no resync is happening, and there is enough space
3207	 * on all devices, so we can resize.
3208	 * We need to make sure resync covers any new space.
3209	 * If the array is shrinking we should possibly wait until
3210	 * any io in the removed space completes, but it hardly seems
3211	 * worth it.
3212	 */
3213	sector_t newsize = raid1_size(mddev, sectors, 0);
3214	if (mddev->external_size &&
3215	    mddev->array_sectors > newsize)
3216		return -EINVAL;
3217	if (mddev->bitmap) {
3218		int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3219		if (ret)
3220			return ret;
3221	}
3222	md_set_array_sectors(mddev, newsize);
3223	if (sectors > mddev->dev_sectors &&
3224	    mddev->recovery_cp > mddev->dev_sectors) {
3225		mddev->recovery_cp = mddev->dev_sectors;
3226		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3227	}
3228	mddev->dev_sectors = sectors;
3229	mddev->resync_max_sectors = sectors;
3230	return 0;
3231}
3232
3233static int raid1_reshape(struct mddev *mddev)
3234{
3235	/* We need to:
3236	 * 1/ resize the r1bio_pool
3237	 * 2/ resize conf->mirrors
3238	 *
3239	 * We allocate a new r1bio_pool if we can.
3240	 * Then raise a device barrier and wait until all IO stops.
3241	 * Then resize conf->mirrors and swap in the new r1bio pool.
3242	 *
3243	 * At the same time, we "pack" the devices so that all the missing
3244	 * devices have the higher raid_disk numbers.
3245	 */
3246	mempool_t newpool, oldpool;
3247	struct pool_info *newpoolinfo;
3248	struct raid1_info *newmirrors;
3249	struct r1conf *conf = mddev->private;
3250	int cnt, raid_disks;
3251	unsigned long flags;
3252	int d, d2;
3253	int ret;
3254
3255	memset(&newpool, 0, sizeof(newpool));
3256	memset(&oldpool, 0, sizeof(oldpool));
3257
3258	/* Cannot change chunk_size, layout, or level */
3259	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3260	    mddev->layout != mddev->new_layout ||
3261	    mddev->level != mddev->new_level) {
3262		mddev->new_chunk_sectors = mddev->chunk_sectors;
3263		mddev->new_layout = mddev->layout;
3264		mddev->new_level = mddev->level;
3265		return -EINVAL;
3266	}
3267
3268	if (!mddev_is_clustered(mddev))
3269		md_allow_write(mddev);
 
3270
3271	raid_disks = mddev->raid_disks + mddev->delta_disks;
3272
3273	if (raid_disks < conf->raid_disks) {
3274		cnt=0;
3275		for (d= 0; d < conf->raid_disks; d++)
3276			if (conf->mirrors[d].rdev)
3277				cnt++;
3278		if (cnt > raid_disks)
3279			return -EBUSY;
3280	}
3281
3282	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3283	if (!newpoolinfo)
3284		return -ENOMEM;
3285	newpoolinfo->mddev = mddev;
3286	newpoolinfo->raid_disks = raid_disks * 2;
3287
3288	ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
3289			   rbio_pool_free, newpoolinfo);
3290	if (ret) {
3291		kfree(newpoolinfo);
3292		return ret;
3293	}
3294	newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3295					 raid_disks, 2),
3296			     GFP_KERNEL);
3297	if (!newmirrors) {
3298		kfree(newpoolinfo);
3299		mempool_exit(&newpool);
3300		return -ENOMEM;
3301	}
3302
3303	freeze_array(conf, 0);
3304
3305	/* ok, everything is stopped */
3306	oldpool = conf->r1bio_pool;
3307	conf->r1bio_pool = newpool;
3308
3309	for (d = d2 = 0; d < conf->raid_disks; d++) {
3310		struct md_rdev *rdev = conf->mirrors[d].rdev;
3311		if (rdev && rdev->raid_disk != d2) {
3312			sysfs_unlink_rdev(mddev, rdev);
3313			rdev->raid_disk = d2;
3314			sysfs_unlink_rdev(mddev, rdev);
3315			if (sysfs_link_rdev(mddev, rdev))
3316				pr_warn("md/raid1:%s: cannot register rd%d\n",
3317					mdname(mddev), rdev->raid_disk);
 
3318		}
3319		if (rdev)
3320			newmirrors[d2++].rdev = rdev;
3321	}
3322	kfree(conf->mirrors);
3323	conf->mirrors = newmirrors;
3324	kfree(conf->poolinfo);
3325	conf->poolinfo = newpoolinfo;
3326
3327	spin_lock_irqsave(&conf->device_lock, flags);
3328	mddev->degraded += (raid_disks - conf->raid_disks);
3329	spin_unlock_irqrestore(&conf->device_lock, flags);
3330	conf->raid_disks = mddev->raid_disks = raid_disks;
3331	mddev->delta_disks = 0;
3332
3333	unfreeze_array(conf);
 
3334
3335	set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3336	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3337	md_wakeup_thread(mddev->thread);
3338
3339	mempool_exit(&oldpool);
3340	return 0;
3341}
3342
3343static void raid1_quiesce(struct mddev *mddev, int quiesce)
3344{
3345	struct r1conf *conf = mddev->private;
3346
3347	if (quiesce)
3348		freeze_array(conf, 0);
3349	else
3350		unfreeze_array(conf);
 
 
 
 
 
 
 
3351}
3352
3353static void *raid1_takeover(struct mddev *mddev)
3354{
3355	/* raid1 can take over:
3356	 *  raid5 with 2 devices, any layout or chunk size
3357	 */
3358	if (mddev->level == 5 && mddev->raid_disks == 2) {
3359		struct r1conf *conf;
3360		mddev->new_level = 1;
3361		mddev->new_layout = 0;
3362		mddev->new_chunk_sectors = 0;
3363		conf = setup_conf(mddev);
3364		if (!IS_ERR(conf)) {
3365			/* Array must appear to be quiesced */
3366			conf->array_frozen = 1;
3367			mddev_clear_unsupported_flags(mddev,
3368				UNSUPPORTED_MDDEV_FLAGS);
3369		}
3370		return conf;
3371	}
3372	return ERR_PTR(-EINVAL);
3373}
3374
3375static struct md_personality raid1_personality =
3376{
3377	.name		= "raid1",
3378	.level		= 1,
3379	.owner		= THIS_MODULE,
3380	.make_request	= raid1_make_request,
3381	.run		= raid1_run,
3382	.free		= raid1_free,
3383	.status		= raid1_status,
3384	.error_handler	= raid1_error,
3385	.hot_add_disk	= raid1_add_disk,
3386	.hot_remove_disk= raid1_remove_disk,
3387	.spare_active	= raid1_spare_active,
3388	.sync_request	= raid1_sync_request,
3389	.resize		= raid1_resize,
3390	.size		= raid1_size,
3391	.check_reshape	= raid1_reshape,
3392	.quiesce	= raid1_quiesce,
3393	.takeover	= raid1_takeover,
3394};
3395
3396static int __init raid_init(void)
3397{
3398	return register_md_personality(&raid1_personality);
3399}
3400
3401static void raid_exit(void)
3402{
3403	unregister_md_personality(&raid1_personality);
3404}
3405
3406module_init(raid_init);
3407module_exit(raid_exit);
3408MODULE_LICENSE("GPL");
3409MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3410MODULE_ALIAS("md-personality-3"); /* RAID1 */
3411MODULE_ALIAS("md-raid1");
3412MODULE_ALIAS("md-level-1");
3413
3414module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);