Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * raid10.c : Multiple Devices driver for Linux
   4 *
   5 * Copyright (C) 2000-2004 Neil Brown
   6 *
   7 * RAID-10 support for md.
   8 *
   9 * Base on code in raid1.c.  See raid1.c for further copyright information.
  10 */
  11
  12#include <linux/slab.h>
  13#include <linux/delay.h>
  14#include <linux/blkdev.h>
  15#include <linux/module.h>
  16#include <linux/seq_file.h>
  17#include <linux/ratelimit.h>
  18#include <linux/kthread.h>
  19#include <linux/raid/md_p.h>
  20#include <trace/events/block.h>
  21#include "md.h"
  22
  23#define RAID_1_10_NAME "raid10"
  24#include "raid10.h"
  25#include "raid0.h"
  26#include "md-bitmap.h"
  27
  28/*
  29 * RAID10 provides a combination of RAID0 and RAID1 functionality.
  30 * The layout of data is defined by
  31 *    chunk_size
  32 *    raid_disks
  33 *    near_copies (stored in low byte of layout)
  34 *    far_copies (stored in second byte of layout)
  35 *    far_offset (stored in bit 16 of layout )
  36 *    use_far_sets (stored in bit 17 of layout )
  37 *    use_far_sets_bugfixed (stored in bit 18 of layout )
  38 *
  39 * The data to be stored is divided into chunks using chunksize.  Each device
  40 * is divided into far_copies sections.   In each section, chunks are laid out
  41 * in a style similar to raid0, but near_copies copies of each chunk is stored
  42 * (each on a different drive).  The starting device for each section is offset
  43 * near_copies from the starting device of the previous section.  Thus there
  44 * are (near_copies * far_copies) of each chunk, and each is on a different
  45 * drive.  near_copies and far_copies must be at least one, and their product
  46 * is at most raid_disks.
  47 *
  48 * If far_offset is true, then the far_copies are handled a bit differently.
  49 * The copies are still in different stripes, but instead of being very far
  50 * apart on disk, there are adjacent stripes.
  51 *
  52 * The far and offset algorithms are handled slightly differently if
  53 * 'use_far_sets' is true.  In this case, the array's devices are grouped into
  54 * sets that are (near_copies * far_copies) in size.  The far copied stripes
  55 * are still shifted by 'near_copies' devices, but this shifting stays confined
  56 * to the set rather than the entire array.  This is done to improve the number
  57 * of device combinations that can fail without causing the array to fail.
  58 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
  59 * on a device):
  60 *    A B C D    A B C D E
  61 *      ...         ...
  62 *    D A B C    E A B C D
  63 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
  64 *    [A B] [C D]    [A B] [C D E]
  65 *    |...| |...|    |...| | ... |
  66 *    [B A] [D C]    [B A] [E C D]
  67 */
  68
  69static void allow_barrier(struct r10conf *conf);
  70static void lower_barrier(struct r10conf *conf);
  71static int _enough(struct r10conf *conf, int previous, int ignore);
  72static int enough(struct r10conf *conf, int ignore);
  73static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
  74				int *skipped);
  75static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
  76static void end_reshape_write(struct bio *bio);
  77static void end_reshape(struct r10conf *conf);
  78
  79#define raid10_log(md, fmt, args...)				\
  80	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
  81
  82#include "raid1-10.c"
  83
  84#define NULL_CMD
  85#define cmd_before(conf, cmd) \
  86	do { \
  87		write_sequnlock_irq(&(conf)->resync_lock); \
  88		cmd; \
  89	} while (0)
  90#define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
  91
  92#define wait_event_barrier_cmd(conf, cond, cmd) \
  93	wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
  94		       cmd_after(conf))
  95
  96#define wait_event_barrier(conf, cond) \
  97	wait_event_barrier_cmd(conf, cond, NULL_CMD)
  98
  99/*
 100 * for resync bio, r10bio pointer can be retrieved from the per-bio
 101 * 'struct resync_pages'.
 102 */
 103static inline struct r10bio *get_resync_r10bio(struct bio *bio)
 104{
 105	return get_resync_pages(bio)->raid_bio;
 106}
 107
 108static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 109{
 110	struct r10conf *conf = data;
 111	int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
 112
 113	/* allocate a r10bio with room for raid_disks entries in the
 114	 * bios array */
 115	return kzalloc(size, gfp_flags);
 116}
 117
 118#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
 119/* amount of memory to reserve for resync requests */
 120#define RESYNC_WINDOW (1024*1024)
 121/* maximum number of concurrent requests, memory permitting */
 122#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
 123#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
 124#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
 125
 126/*
 127 * When performing a resync, we need to read and compare, so
 128 * we need as many pages are there are copies.
 129 * When performing a recovery, we need 2 bios, one for read,
 130 * one for write (we recover only one drive per r10buf)
 131 *
 132 */
 133static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 134{
 135	struct r10conf *conf = data;
 136	struct r10bio *r10_bio;
 137	struct bio *bio;
 138	int j;
 139	int nalloc, nalloc_rp;
 140	struct resync_pages *rps;
 141
 142	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
 143	if (!r10_bio)
 144		return NULL;
 145
 146	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
 147	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
 148		nalloc = conf->copies; /* resync */
 149	else
 150		nalloc = 2; /* recovery */
 151
 152	/* allocate once for all bios */
 153	if (!conf->have_replacement)
 154		nalloc_rp = nalloc;
 155	else
 156		nalloc_rp = nalloc * 2;
 157	rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
 158	if (!rps)
 159		goto out_free_r10bio;
 160
 161	/*
 162	 * Allocate bios.
 163	 */
 164	for (j = nalloc ; j-- ; ) {
 165		bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
 166		if (!bio)
 167			goto out_free_bio;
 168		bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
 169		r10_bio->devs[j].bio = bio;
 170		if (!conf->have_replacement)
 171			continue;
 172		bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
 173		if (!bio)
 174			goto out_free_bio;
 175		bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
 176		r10_bio->devs[j].repl_bio = bio;
 177	}
 178	/*
 179	 * Allocate RESYNC_PAGES data pages and attach them
 180	 * where needed.
 181	 */
 182	for (j = 0; j < nalloc; j++) {
 183		struct bio *rbio = r10_bio->devs[j].repl_bio;
 184		struct resync_pages *rp, *rp_repl;
 185
 186		rp = &rps[j];
 187		if (rbio)
 188			rp_repl = &rps[nalloc + j];
 189
 190		bio = r10_bio->devs[j].bio;
 191
 192		if (!j || test_bit(MD_RECOVERY_SYNC,
 193				   &conf->mddev->recovery)) {
 194			if (resync_alloc_pages(rp, gfp_flags))
 195				goto out_free_pages;
 196		} else {
 197			memcpy(rp, &rps[0], sizeof(*rp));
 198			resync_get_all_pages(rp);
 199		}
 200
 201		rp->raid_bio = r10_bio;
 202		bio->bi_private = rp;
 203		if (rbio) {
 204			memcpy(rp_repl, rp, sizeof(*rp));
 205			rbio->bi_private = rp_repl;
 206		}
 207	}
 208
 209	return r10_bio;
 210
 211out_free_pages:
 212	while (--j >= 0)
 213		resync_free_pages(&rps[j]);
 214
 215	j = 0;
 216out_free_bio:
 217	for ( ; j < nalloc; j++) {
 218		if (r10_bio->devs[j].bio)
 219			bio_uninit(r10_bio->devs[j].bio);
 220		kfree(r10_bio->devs[j].bio);
 221		if (r10_bio->devs[j].repl_bio)
 222			bio_uninit(r10_bio->devs[j].repl_bio);
 223		kfree(r10_bio->devs[j].repl_bio);
 224	}
 225	kfree(rps);
 226out_free_r10bio:
 227	rbio_pool_free(r10_bio, conf);
 228	return NULL;
 229}
 230
 231static void r10buf_pool_free(void *__r10_bio, void *data)
 232{
 233	struct r10conf *conf = data;
 234	struct r10bio *r10bio = __r10_bio;
 235	int j;
 236	struct resync_pages *rp = NULL;
 237
 238	for (j = conf->copies; j--; ) {
 239		struct bio *bio = r10bio->devs[j].bio;
 240
 241		if (bio) {
 242			rp = get_resync_pages(bio);
 243			resync_free_pages(rp);
 244			bio_uninit(bio);
 245			kfree(bio);
 246		}
 247
 248		bio = r10bio->devs[j].repl_bio;
 249		if (bio) {
 250			bio_uninit(bio);
 251			kfree(bio);
 252		}
 253	}
 254
 255	/* resync pages array stored in the 1st bio's .bi_private */
 256	kfree(rp);
 257
 258	rbio_pool_free(r10bio, conf);
 259}
 260
 261static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
 262{
 263	int i;
 264
 265	for (i = 0; i < conf->geo.raid_disks; i++) {
 266		struct bio **bio = & r10_bio->devs[i].bio;
 267		if (!BIO_SPECIAL(*bio))
 268			bio_put(*bio);
 269		*bio = NULL;
 270		bio = &r10_bio->devs[i].repl_bio;
 271		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
 272			bio_put(*bio);
 273		*bio = NULL;
 274	}
 275}
 276
 277static void free_r10bio(struct r10bio *r10_bio)
 278{
 279	struct r10conf *conf = r10_bio->mddev->private;
 280
 281	put_all_bios(conf, r10_bio);
 282	mempool_free(r10_bio, &conf->r10bio_pool);
 283}
 284
 285static void put_buf(struct r10bio *r10_bio)
 286{
 287	struct r10conf *conf = r10_bio->mddev->private;
 288
 289	mempool_free(r10_bio, &conf->r10buf_pool);
 290
 291	lower_barrier(conf);
 292}
 293
 294static void wake_up_barrier(struct r10conf *conf)
 295{
 296	if (wq_has_sleeper(&conf->wait_barrier))
 297		wake_up(&conf->wait_barrier);
 298}
 299
 300static void reschedule_retry(struct r10bio *r10_bio)
 301{
 302	unsigned long flags;
 303	struct mddev *mddev = r10_bio->mddev;
 304	struct r10conf *conf = mddev->private;
 305
 306	spin_lock_irqsave(&conf->device_lock, flags);
 307	list_add(&r10_bio->retry_list, &conf->retry_list);
 308	conf->nr_queued ++;
 309	spin_unlock_irqrestore(&conf->device_lock, flags);
 310
 311	/* wake up frozen array... */
 312	wake_up(&conf->wait_barrier);
 313
 314	md_wakeup_thread(mddev->thread);
 315}
 316
 317/*
 318 * raid_end_bio_io() is called when we have finished servicing a mirrored
 319 * operation and are ready to return a success/failure code to the buffer
 320 * cache layer.
 321 */
 322static void raid_end_bio_io(struct r10bio *r10_bio)
 323{
 324	struct bio *bio = r10_bio->master_bio;
 325	struct r10conf *conf = r10_bio->mddev->private;
 326
 327	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
 328		bio->bi_status = BLK_STS_IOERR;
 329
 330	bio_endio(bio);
 331	/*
 332	 * Wake up any possible resync thread that waits for the device
 333	 * to go idle.
 334	 */
 335	allow_barrier(conf);
 336
 337	free_r10bio(r10_bio);
 338}
 339
 340/*
 341 * Update disk head position estimator based on IRQ completion info.
 342 */
 343static inline void update_head_pos(int slot, struct r10bio *r10_bio)
 344{
 345	struct r10conf *conf = r10_bio->mddev->private;
 346
 347	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
 348		r10_bio->devs[slot].addr + (r10_bio->sectors);
 349}
 350
 351/*
 352 * Find the disk number which triggered given bio
 353 */
 354static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
 355			 struct bio *bio, int *slotp, int *replp)
 356{
 357	int slot;
 358	int repl = 0;
 359
 360	for (slot = 0; slot < conf->geo.raid_disks; slot++) {
 361		if (r10_bio->devs[slot].bio == bio)
 362			break;
 363		if (r10_bio->devs[slot].repl_bio == bio) {
 364			repl = 1;
 365			break;
 366		}
 367	}
 368
 
 369	update_head_pos(slot, r10_bio);
 370
 371	if (slotp)
 372		*slotp = slot;
 373	if (replp)
 374		*replp = repl;
 375	return r10_bio->devs[slot].devnum;
 376}
 377
 378static void raid10_end_read_request(struct bio *bio)
 379{
 380	int uptodate = !bio->bi_status;
 381	struct r10bio *r10_bio = bio->bi_private;
 382	int slot;
 383	struct md_rdev *rdev;
 384	struct r10conf *conf = r10_bio->mddev->private;
 385
 386	slot = r10_bio->read_slot;
 387	rdev = r10_bio->devs[slot].rdev;
 388	/*
 389	 * this branch is our 'one mirror IO has finished' event handler:
 390	 */
 391	update_head_pos(slot, r10_bio);
 392
 393	if (uptodate) {
 394		/*
 395		 * Set R10BIO_Uptodate in our master bio, so that
 396		 * we will return a good error code to the higher
 397		 * levels even if IO on some other mirrored buffer fails.
 398		 *
 399		 * The 'master' represents the composite IO operation to
 400		 * user-side. So if something waits for IO, then it will
 401		 * wait for the 'master' bio.
 402		 */
 403		set_bit(R10BIO_Uptodate, &r10_bio->state);
 404	} else {
 405		/* If all other devices that store this block have
 406		 * failed, we want to return the error upwards rather
 407		 * than fail the last device.  Here we redefine
 408		 * "uptodate" to mean "Don't want to retry"
 409		 */
 410		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
 411			     rdev->raid_disk))
 412			uptodate = 1;
 413	}
 414	if (uptodate) {
 415		raid_end_bio_io(r10_bio);
 416		rdev_dec_pending(rdev, conf->mddev);
 417	} else {
 418		/*
 419		 * oops, read error - keep the refcount on the rdev
 420		 */
 421		pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
 
 422				   mdname(conf->mddev),
 423				   rdev->bdev,
 424				   (unsigned long long)r10_bio->sector);
 425		set_bit(R10BIO_ReadError, &r10_bio->state);
 426		reschedule_retry(r10_bio);
 427	}
 428}
 429
 430static void close_write(struct r10bio *r10_bio)
 431{
 432	/* clear the bitmap if all writes complete successfully */
 433	md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
 434			   r10_bio->sectors,
 435			   !test_bit(R10BIO_Degraded, &r10_bio->state),
 436			   0);
 437	md_write_end(r10_bio->mddev);
 438}
 439
 440static void one_write_done(struct r10bio *r10_bio)
 441{
 442	if (atomic_dec_and_test(&r10_bio->remaining)) {
 443		if (test_bit(R10BIO_WriteError, &r10_bio->state))
 444			reschedule_retry(r10_bio);
 445		else {
 446			close_write(r10_bio);
 447			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
 448				reschedule_retry(r10_bio);
 449			else
 450				raid_end_bio_io(r10_bio);
 451		}
 452	}
 453}
 454
 455static void raid10_end_write_request(struct bio *bio)
 456{
 457	struct r10bio *r10_bio = bio->bi_private;
 458	int dev;
 459	int dec_rdev = 1;
 460	struct r10conf *conf = r10_bio->mddev->private;
 461	int slot, repl;
 462	struct md_rdev *rdev = NULL;
 463	struct bio *to_put = NULL;
 464	bool discard_error;
 465
 466	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 467
 468	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 469
 470	if (repl)
 471		rdev = conf->mirrors[dev].replacement;
 472	if (!rdev) {
 473		smp_rmb();
 474		repl = 0;
 475		rdev = conf->mirrors[dev].rdev;
 476	}
 477	/*
 478	 * this branch is our 'one mirror IO has finished' event handler:
 479	 */
 480	if (bio->bi_status && !discard_error) {
 481		if (repl)
 482			/* Never record new bad blocks to replacement,
 483			 * just fail it.
 484			 */
 485			md_error(rdev->mddev, rdev);
 486		else {
 487			set_bit(WriteErrorSeen,	&rdev->flags);
 488			if (!test_and_set_bit(WantReplacement, &rdev->flags))
 489				set_bit(MD_RECOVERY_NEEDED,
 490					&rdev->mddev->recovery);
 491
 492			dec_rdev = 0;
 493			if (test_bit(FailFast, &rdev->flags) &&
 494			    (bio->bi_opf & MD_FAILFAST)) {
 495				md_error(rdev->mddev, rdev);
 496			}
 497
 498			/*
 499			 * When the device is faulty, it is not necessary to
 500			 * handle write error.
 
 
 501			 */
 502			if (!test_bit(Faulty, &rdev->flags))
 503				set_bit(R10BIO_WriteError, &r10_bio->state);
 504			else {
 505				/* Fail the request */
 506				set_bit(R10BIO_Degraded, &r10_bio->state);
 507				r10_bio->devs[slot].bio = NULL;
 508				to_put = bio;
 509				dec_rdev = 1;
 510			}
 511		}
 512	} else {
 513		/*
 514		 * Set R10BIO_Uptodate in our master bio, so that
 515		 * we will return a good error code for to the higher
 516		 * levels even if IO on some other mirrored buffer fails.
 517		 *
 518		 * The 'master' represents the composite IO operation to
 519		 * user-side. So if something waits for IO, then it will
 520		 * wait for the 'master' bio.
 521		 */
 522		sector_t first_bad;
 523		int bad_sectors;
 524
 525		/*
 526		 * Do not set R10BIO_Uptodate if the current device is
 527		 * rebuilding or Faulty. This is because we cannot use
 528		 * such device for properly reading the data back (we could
 529		 * potentially use it, if the current write would have felt
 530		 * before rdev->recovery_offset, but for simplicity we don't
 531		 * check this here.
 532		 */
 533		if (test_bit(In_sync, &rdev->flags) &&
 534		    !test_bit(Faulty, &rdev->flags))
 535			set_bit(R10BIO_Uptodate, &r10_bio->state);
 536
 537		/* Maybe we can clear some bad blocks. */
 538		if (is_badblock(rdev,
 539				r10_bio->devs[slot].addr,
 540				r10_bio->sectors,
 541				&first_bad, &bad_sectors) && !discard_error) {
 542			bio_put(bio);
 543			if (repl)
 544				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
 545			else
 546				r10_bio->devs[slot].bio = IO_MADE_GOOD;
 547			dec_rdev = 0;
 548			set_bit(R10BIO_MadeGood, &r10_bio->state);
 549		}
 550	}
 551
 552	/*
 553	 *
 554	 * Let's see if all mirrored write operations have finished
 555	 * already.
 556	 */
 557	one_write_done(r10_bio);
 558	if (dec_rdev)
 559		rdev_dec_pending(rdev, conf->mddev);
 560	if (to_put)
 561		bio_put(to_put);
 562}
 563
 564/*
 565 * RAID10 layout manager
 566 * As well as the chunksize and raid_disks count, there are two
 567 * parameters: near_copies and far_copies.
 568 * near_copies * far_copies must be <= raid_disks.
 569 * Normally one of these will be 1.
 570 * If both are 1, we get raid0.
 571 * If near_copies == raid_disks, we get raid1.
 572 *
 573 * Chunks are laid out in raid0 style with near_copies copies of the
 574 * first chunk, followed by near_copies copies of the next chunk and
 575 * so on.
 576 * If far_copies > 1, then after 1/far_copies of the array has been assigned
 577 * as described above, we start again with a device offset of near_copies.
 578 * So we effectively have another copy of the whole array further down all
 579 * the drives, but with blocks on different drives.
 580 * With this layout, and block is never stored twice on the one device.
 581 *
 582 * raid10_find_phys finds the sector offset of a given virtual sector
 583 * on each device that it is on.
 584 *
 585 * raid10_find_virt does the reverse mapping, from a device and a
 586 * sector offset to a virtual address
 587 */
 588
 589static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
 590{
 591	int n,f;
 592	sector_t sector;
 593	sector_t chunk;
 594	sector_t stripe;
 595	int dev;
 596	int slot = 0;
 597	int last_far_set_start, last_far_set_size;
 598
 599	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 600	last_far_set_start *= geo->far_set_size;
 601
 602	last_far_set_size = geo->far_set_size;
 603	last_far_set_size += (geo->raid_disks % geo->far_set_size);
 604
 605	/* now calculate first sector/dev */
 606	chunk = r10bio->sector >> geo->chunk_shift;
 607	sector = r10bio->sector & geo->chunk_mask;
 608
 609	chunk *= geo->near_copies;
 610	stripe = chunk;
 611	dev = sector_div(stripe, geo->raid_disks);
 612	if (geo->far_offset)
 613		stripe *= geo->far_copies;
 614
 615	sector += stripe << geo->chunk_shift;
 616
 617	/* and calculate all the others */
 618	for (n = 0; n < geo->near_copies; n++) {
 619		int d = dev;
 620		int set;
 621		sector_t s = sector;
 622		r10bio->devs[slot].devnum = d;
 623		r10bio->devs[slot].addr = s;
 624		slot++;
 625
 626		for (f = 1; f < geo->far_copies; f++) {
 627			set = d / geo->far_set_size;
 628			d += geo->near_copies;
 629
 630			if ((geo->raid_disks % geo->far_set_size) &&
 631			    (d > last_far_set_start)) {
 632				d -= last_far_set_start;
 633				d %= last_far_set_size;
 634				d += last_far_set_start;
 635			} else {
 636				d %= geo->far_set_size;
 637				d += geo->far_set_size * set;
 638			}
 639			s += geo->stride;
 640			r10bio->devs[slot].devnum = d;
 641			r10bio->devs[slot].addr = s;
 642			slot++;
 643		}
 644		dev++;
 645		if (dev >= geo->raid_disks) {
 646			dev = 0;
 647			sector += (geo->chunk_mask + 1);
 648		}
 649	}
 650}
 651
 652static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
 653{
 654	struct geom *geo = &conf->geo;
 655
 656	if (conf->reshape_progress != MaxSector &&
 657	    ((r10bio->sector >= conf->reshape_progress) !=
 658	     conf->mddev->reshape_backwards)) {
 659		set_bit(R10BIO_Previous, &r10bio->state);
 660		geo = &conf->prev;
 661	} else
 662		clear_bit(R10BIO_Previous, &r10bio->state);
 663
 664	__raid10_find_phys(geo, r10bio);
 665}
 666
 667static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
 668{
 669	sector_t offset, chunk, vchunk;
 670	/* Never use conf->prev as this is only called during resync
 671	 * or recovery, so reshape isn't happening
 672	 */
 673	struct geom *geo = &conf->geo;
 674	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
 675	int far_set_size = geo->far_set_size;
 676	int last_far_set_start;
 677
 678	if (geo->raid_disks % geo->far_set_size) {
 679		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 680		last_far_set_start *= geo->far_set_size;
 681
 682		if (dev >= last_far_set_start) {
 683			far_set_size = geo->far_set_size;
 684			far_set_size += (geo->raid_disks % geo->far_set_size);
 685			far_set_start = last_far_set_start;
 686		}
 687	}
 688
 689	offset = sector & geo->chunk_mask;
 690	if (geo->far_offset) {
 691		int fc;
 692		chunk = sector >> geo->chunk_shift;
 693		fc = sector_div(chunk, geo->far_copies);
 694		dev -= fc * geo->near_copies;
 695		if (dev < far_set_start)
 696			dev += far_set_size;
 697	} else {
 698		while (sector >= geo->stride) {
 699			sector -= geo->stride;
 700			if (dev < (geo->near_copies + far_set_start))
 701				dev += far_set_size - geo->near_copies;
 702			else
 703				dev -= geo->near_copies;
 704		}
 705		chunk = sector >> geo->chunk_shift;
 706	}
 707	vchunk = chunk * geo->raid_disks + dev;
 708	sector_div(vchunk, geo->near_copies);
 709	return (vchunk << geo->chunk_shift) + offset;
 710}
 711
 712/*
 713 * This routine returns the disk from which the requested read should
 714 * be done. There is a per-array 'next expected sequential IO' sector
 715 * number - if this matches on the next IO then we use the last disk.
 716 * There is also a per-disk 'last know head position' sector that is
 717 * maintained from IRQ contexts, both the normal and the resync IO
 718 * completion handlers update this position correctly. If there is no
 719 * perfect sequential match then we pick the disk whose head is closest.
 720 *
 721 * If there are 2 mirrors in the same 2 devices, performance degrades
 722 * because position is mirror, not device based.
 723 *
 724 * The rdev for the device selected will have nr_pending incremented.
 725 */
 726
 727/*
 728 * FIXME: possibly should rethink readbalancing and do it differently
 729 * depending on near_copies / far_copies geometry.
 730 */
 731static struct md_rdev *read_balance(struct r10conf *conf,
 732				    struct r10bio *r10_bio,
 733				    int *max_sectors)
 734{
 735	const sector_t this_sector = r10_bio->sector;
 736	int disk, slot;
 737	int sectors = r10_bio->sectors;
 738	int best_good_sectors;
 739	sector_t new_distance, best_dist;
 740	struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
 741	int do_balance;
 742	int best_dist_slot, best_pending_slot;
 743	bool has_nonrot_disk = false;
 744	unsigned int min_pending;
 745	struct geom *geo = &conf->geo;
 746
 747	raid10_find_phys(conf, r10_bio);
 
 748	best_dist_slot = -1;
 749	min_pending = UINT_MAX;
 750	best_dist_rdev = NULL;
 751	best_pending_rdev = NULL;
 752	best_dist = MaxSector;
 753	best_good_sectors = 0;
 754	do_balance = 1;
 755	clear_bit(R10BIO_FailFast, &r10_bio->state);
 756	/*
 757	 * Check if we can balance. We can balance on the whole
 758	 * device if no resync is going on (recovery is ok), or below
 759	 * the resync window. We take the first readable disk when
 760	 * above the resync window.
 761	 */
 762	if ((conf->mddev->recovery_cp < MaxSector
 763	     && (this_sector + sectors >= conf->next_resync)) ||
 764	    (mddev_is_clustered(conf->mddev) &&
 765	     md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
 766					    this_sector + sectors)))
 767		do_balance = 0;
 768
 769	for (slot = 0; slot < conf->copies ; slot++) {
 770		sector_t first_bad;
 771		int bad_sectors;
 772		sector_t dev_sector;
 773		unsigned int pending;
 774		bool nonrot;
 775
 776		if (r10_bio->devs[slot].bio == IO_BLOCKED)
 777			continue;
 778		disk = r10_bio->devs[slot].devnum;
 779		rdev = conf->mirrors[disk].replacement;
 780		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
 781		    r10_bio->devs[slot].addr + sectors >
 782		    rdev->recovery_offset)
 783			rdev = conf->mirrors[disk].rdev;
 784		if (rdev == NULL ||
 785		    test_bit(Faulty, &rdev->flags))
 786			continue;
 787		if (!test_bit(In_sync, &rdev->flags) &&
 788		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 789			continue;
 790
 791		dev_sector = r10_bio->devs[slot].addr;
 792		if (is_badblock(rdev, dev_sector, sectors,
 793				&first_bad, &bad_sectors)) {
 794			if (best_dist < MaxSector)
 795				/* Already have a better slot */
 796				continue;
 797			if (first_bad <= dev_sector) {
 798				/* Cannot read here.  If this is the
 799				 * 'primary' device, then we must not read
 800				 * beyond 'bad_sectors' from another device.
 801				 */
 802				bad_sectors -= (dev_sector - first_bad);
 803				if (!do_balance && sectors > bad_sectors)
 804					sectors = bad_sectors;
 805				if (best_good_sectors > sectors)
 806					best_good_sectors = sectors;
 807			} else {
 808				sector_t good_sectors =
 809					first_bad - dev_sector;
 810				if (good_sectors > best_good_sectors) {
 811					best_good_sectors = good_sectors;
 812					best_dist_slot = slot;
 813					best_dist_rdev = rdev;
 814				}
 815				if (!do_balance)
 816					/* Must read from here */
 817					break;
 818			}
 819			continue;
 820		} else
 821			best_good_sectors = sectors;
 822
 823		if (!do_balance)
 824			break;
 825
 826		nonrot = bdev_nonrot(rdev->bdev);
 827		has_nonrot_disk |= nonrot;
 828		pending = atomic_read(&rdev->nr_pending);
 829		if (min_pending > pending && nonrot) {
 830			min_pending = pending;
 831			best_pending_slot = slot;
 832			best_pending_rdev = rdev;
 833		}
 834
 835		if (best_dist_slot >= 0)
 836			/* At least 2 disks to choose from so failfast is OK */
 837			set_bit(R10BIO_FailFast, &r10_bio->state);
 838		/* This optimisation is debatable, and completely destroys
 839		 * sequential read speed for 'far copies' arrays.  So only
 840		 * keep it for 'near' arrays, and review those later.
 841		 */
 842		if (geo->near_copies > 1 && !pending)
 843			new_distance = 0;
 844
 845		/* for far > 1 always use the lowest address */
 846		else if (geo->far_copies > 1)
 847			new_distance = r10_bio->devs[slot].addr;
 848		else
 849			new_distance = abs(r10_bio->devs[slot].addr -
 850					   conf->mirrors[disk].head_position);
 851
 852		if (new_distance < best_dist) {
 853			best_dist = new_distance;
 854			best_dist_slot = slot;
 855			best_dist_rdev = rdev;
 856		}
 857	}
 858	if (slot >= conf->copies) {
 859		if (has_nonrot_disk) {
 860			slot = best_pending_slot;
 861			rdev = best_pending_rdev;
 862		} else {
 863			slot = best_dist_slot;
 864			rdev = best_dist_rdev;
 865		}
 866	}
 867
 868	if (slot >= 0) {
 869		atomic_inc(&rdev->nr_pending);
 870		r10_bio->read_slot = slot;
 871	} else
 872		rdev = NULL;
 
 873	*max_sectors = best_good_sectors;
 874
 875	return rdev;
 876}
 877
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 878static void flush_pending_writes(struct r10conf *conf)
 879{
 880	/* Any writes that have been queued but are awaiting
 881	 * bitmap updates get flushed here.
 882	 */
 883	spin_lock_irq(&conf->device_lock);
 884
 885	if (conf->pending_bio_list.head) {
 886		struct blk_plug plug;
 887		struct bio *bio;
 888
 889		bio = bio_list_get(&conf->pending_bio_list);
 
 890		spin_unlock_irq(&conf->device_lock);
 891
 892		/*
 893		 * As this is called in a wait_event() loop (see freeze_array),
 894		 * current->state might be TASK_UNINTERRUPTIBLE which will
 895		 * cause a warning when we prepare to wait again.  As it is
 896		 * rare that this path is taken, it is perfectly safe to force
 897		 * us to go around the wait_event() loop again, so the warning
 898		 * is a false-positive. Silence the warning by resetting
 899		 * thread state
 900		 */
 901		__set_current_state(TASK_RUNNING);
 902
 903		blk_start_plug(&plug);
 904		raid1_prepare_flush_writes(conf->mddev->bitmap);
 
 
 905		wake_up(&conf->wait_barrier);
 906
 907		while (bio) { /* submit pending writes */
 908			struct bio *next = bio->bi_next;
 909
 910			raid1_submit_write(bio);
 
 
 
 
 
 
 
 
 
 911			bio = next;
 912			cond_resched();
 913		}
 914		blk_finish_plug(&plug);
 915	} else
 916		spin_unlock_irq(&conf->device_lock);
 917}
 918
 919/* Barriers....
 920 * Sometimes we need to suspend IO while we do something else,
 921 * either some resync/recovery, or reconfigure the array.
 922 * To do this we raise a 'barrier'.
 923 * The 'barrier' is a counter that can be raised multiple times
 924 * to count how many activities are happening which preclude
 925 * normal IO.
 926 * We can only raise the barrier if there is no pending IO.
 927 * i.e. if nr_pending == 0.
 928 * We choose only to raise the barrier if no-one is waiting for the
 929 * barrier to go down.  This means that as soon as an IO request
 930 * is ready, no other operations which require a barrier will start
 931 * until the IO request has had a chance.
 932 *
 933 * So: regular IO calls 'wait_barrier'.  When that returns there
 934 *    is no backgroup IO happening,  It must arrange to call
 935 *    allow_barrier when it has finished its IO.
 936 * backgroup IO calls must call raise_barrier.  Once that returns
 937 *    there is no normal IO happeing.  It must arrange to call
 938 *    lower_barrier when the particular background IO completes.
 939 */
 940
 941static void raise_barrier(struct r10conf *conf, int force)
 942{
 943	write_seqlock_irq(&conf->resync_lock);
 944
 945	if (WARN_ON_ONCE(force && !conf->barrier))
 946		force = false;
 947
 948	/* Wait until no block IO is waiting (unless 'force') */
 949	wait_event_barrier(conf, force || !conf->nr_waiting);
 
 950
 951	/* block any new IO from starting */
 952	WRITE_ONCE(conf->barrier, conf->barrier + 1);
 953
 954	/* Now wait for all pending IO to complete */
 955	wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
 956				 conf->barrier < RESYNC_DEPTH);
 
 957
 958	write_sequnlock_irq(&conf->resync_lock);
 959}
 960
 961static void lower_barrier(struct r10conf *conf)
 962{
 963	unsigned long flags;
 964
 965	write_seqlock_irqsave(&conf->resync_lock, flags);
 966	WRITE_ONCE(conf->barrier, conf->barrier - 1);
 967	write_sequnlock_irqrestore(&conf->resync_lock, flags);
 968	wake_up(&conf->wait_barrier);
 969}
 970
 971static bool stop_waiting_barrier(struct r10conf *conf)
 972{
 973	struct bio_list *bio_list = current->bio_list;
 974	struct md_thread *thread;
 975
 976	/* barrier is dropped */
 977	if (!conf->barrier)
 978		return true;
 979
 980	/*
 981	 * If there are already pending requests (preventing the barrier from
 982	 * rising completely), and the pre-process bio queue isn't empty, then
 983	 * don't wait, as we need to empty that queue to get the nr_pending
 984	 * count down.
 985	 */
 986	if (atomic_read(&conf->nr_pending) && bio_list &&
 987	    (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
 988		return true;
 989
 990	/* daemon thread must exist while handling io */
 991	thread = rcu_dereference_protected(conf->mddev->thread, true);
 992	/*
 993	 * move on if io is issued from raid10d(), nr_pending is not released
 994	 * from original io(see handle_read_error()). All raise barrier is
 995	 * blocked until this io is done.
 996	 */
 997	if (thread->tsk == current) {
 998		WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
 999		return true;
1000	}
1001
1002	return false;
1003}
1004
1005static bool wait_barrier_nolock(struct r10conf *conf)
1006{
1007	unsigned int seq = read_seqbegin(&conf->resync_lock);
1008
1009	if (READ_ONCE(conf->barrier))
1010		return false;
1011
1012	atomic_inc(&conf->nr_pending);
1013	if (!read_seqretry(&conf->resync_lock, seq))
1014		return true;
1015
1016	if (atomic_dec_and_test(&conf->nr_pending))
1017		wake_up_barrier(conf);
1018
1019	return false;
1020}
1021
1022static bool wait_barrier(struct r10conf *conf, bool nowait)
1023{
1024	bool ret = true;
1025
1026	if (wait_barrier_nolock(conf))
1027		return true;
1028
1029	write_seqlock_irq(&conf->resync_lock);
1030	if (conf->barrier) {
1031		/* Return false when nowait flag is set */
1032		if (nowait) {
1033			ret = false;
1034		} else {
1035			conf->nr_waiting++;
1036			raid10_log(conf->mddev, "wait barrier");
1037			wait_event_barrier(conf, stop_waiting_barrier(conf));
1038			conf->nr_waiting--;
1039		}
 
 
 
 
 
 
 
 
 
 
1040		if (!conf->nr_waiting)
1041			wake_up(&conf->wait_barrier);
1042	}
1043	/* Only increment nr_pending when we wait */
1044	if (ret)
1045		atomic_inc(&conf->nr_pending);
1046	write_sequnlock_irq(&conf->resync_lock);
1047	return ret;
1048}
1049
1050static void allow_barrier(struct r10conf *conf)
1051{
1052	if ((atomic_dec_and_test(&conf->nr_pending)) ||
1053			(conf->array_freeze_pending))
1054		wake_up_barrier(conf);
1055}
1056
1057static void freeze_array(struct r10conf *conf, int extra)
1058{
1059	/* stop syncio and normal IO and wait for everything to
1060	 * go quiet.
1061	 * We increment barrier and nr_waiting, and then
1062	 * wait until nr_pending match nr_queued+extra
1063	 * This is called in the context of one normal IO request
1064	 * that has failed. Thus any sync request that might be pending
1065	 * will be blocked by nr_pending, and we need to wait for
1066	 * pending IO requests to complete or be queued for re-try.
1067	 * Thus the number queued (nr_queued) plus this request (extra)
1068	 * must match the number of pending IOs (nr_pending) before
1069	 * we continue.
1070	 */
1071	write_seqlock_irq(&conf->resync_lock);
1072	conf->array_freeze_pending++;
1073	WRITE_ONCE(conf->barrier, conf->barrier + 1);
1074	conf->nr_waiting++;
1075	wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) ==
1076			conf->nr_queued + extra, flush_pending_writes(conf));
 
 
 
1077	conf->array_freeze_pending--;
1078	write_sequnlock_irq(&conf->resync_lock);
1079}
1080
1081static void unfreeze_array(struct r10conf *conf)
1082{
1083	/* reverse the effect of the freeze */
1084	write_seqlock_irq(&conf->resync_lock);
1085	WRITE_ONCE(conf->barrier, conf->barrier - 1);
1086	conf->nr_waiting--;
1087	wake_up(&conf->wait_barrier);
1088	write_sequnlock_irq(&conf->resync_lock);
1089}
1090
1091static sector_t choose_data_offset(struct r10bio *r10_bio,
1092				   struct md_rdev *rdev)
1093{
1094	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1095	    test_bit(R10BIO_Previous, &r10_bio->state))
1096		return rdev->data_offset;
1097	else
1098		return rdev->new_data_offset;
1099}
1100
 
 
 
 
 
 
1101static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1102{
1103	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
 
1104	struct mddev *mddev = plug->cb.data;
1105	struct r10conf *conf = mddev->private;
1106	struct bio *bio;
1107
1108	if (from_schedule) {
1109		spin_lock_irq(&conf->device_lock);
1110		bio_list_merge(&conf->pending_bio_list, &plug->pending);
 
1111		spin_unlock_irq(&conf->device_lock);
1112		wake_up_barrier(conf);
1113		md_wakeup_thread(mddev->thread);
1114		kfree(plug);
1115		return;
1116	}
1117
1118	/* we aren't scheduling, so we can do the write-out directly. */
1119	bio = bio_list_get(&plug->pending);
1120	raid1_prepare_flush_writes(mddev->bitmap);
1121	wake_up_barrier(conf);
1122
1123	while (bio) { /* submit pending writes */
1124		struct bio *next = bio->bi_next;
1125
1126		raid1_submit_write(bio);
 
 
 
 
 
 
 
 
 
1127		bio = next;
1128		cond_resched();
1129	}
1130	kfree(plug);
1131}
1132
1133/*
1134 * 1. Register the new request and wait if the reconstruction thread has put
1135 * up a bar for new requests. Continue immediately if no resync is active
1136 * currently.
1137 * 2. If IO spans the reshape position.  Need to wait for reshape to pass.
1138 */
1139static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1140				 struct bio *bio, sector_t sectors)
1141{
1142	/* Bail out if REQ_NOWAIT is set for the bio */
1143	if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1144		bio_wouldblock_error(bio);
1145		return false;
1146	}
1147	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1148	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1149	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1150		allow_barrier(conf);
1151		if (bio->bi_opf & REQ_NOWAIT) {
1152			bio_wouldblock_error(bio);
1153			return false;
1154		}
1155		raid10_log(conf->mddev, "wait reshape");
 
1156		wait_event(conf->wait_barrier,
1157			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1158			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1159			   sectors);
1160		wait_barrier(conf, false);
1161	}
1162	return true;
1163}
1164
1165static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1166				struct r10bio *r10_bio, bool io_accounting)
1167{
1168	struct r10conf *conf = mddev->private;
1169	struct bio *read_bio;
1170	const enum req_op op = bio_op(bio);
1171	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1172	int max_sectors;
1173	struct md_rdev *rdev;
1174	char b[BDEVNAME_SIZE];
1175	int slot = r10_bio->read_slot;
1176	struct md_rdev *err_rdev = NULL;
1177	gfp_t gfp = GFP_NOIO;
1178
1179	if (slot >= 0 && r10_bio->devs[slot].rdev) {
1180		/*
1181		 * This is an error retry, but we cannot
1182		 * safely dereference the rdev in the r10_bio,
1183		 * we must use the one in conf.
1184		 * If it has already been disconnected (unlikely)
1185		 * we lose the device name in error messages.
1186		 */
1187		int disk;
1188		/*
1189		 * As we are blocking raid10, it is a little safer to
1190		 * use __GFP_HIGH.
1191		 */
1192		gfp = GFP_NOIO | __GFP_HIGH;
1193
 
1194		disk = r10_bio->devs[slot].devnum;
1195		err_rdev = conf->mirrors[disk].rdev;
1196		if (err_rdev)
1197			snprintf(b, sizeof(b), "%pg", err_rdev->bdev);
1198		else {
1199			strcpy(b, "???");
1200			/* This never gets dereferenced */
1201			err_rdev = r10_bio->devs[slot].rdev;
1202		}
 
1203	}
1204
1205	if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1206		return;
1207	rdev = read_balance(conf, r10_bio, &max_sectors);
1208	if (!rdev) {
1209		if (err_rdev) {
1210			pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1211					    mdname(mddev), b,
1212					    (unsigned long long)r10_bio->sector);
1213		}
1214		raid_end_bio_io(r10_bio);
1215		return;
1216	}
1217	if (err_rdev)
1218		pr_err_ratelimited("md/raid10:%s: %pg: redirecting sector %llu to another mirror\n",
1219				   mdname(mddev),
1220				   rdev->bdev,
1221				   (unsigned long long)r10_bio->sector);
1222	if (max_sectors < bio_sectors(bio)) {
1223		struct bio *split = bio_split(bio, max_sectors,
1224					      gfp, &conf->bio_split);
1225		bio_chain(split, bio);
1226		allow_barrier(conf);
1227		submit_bio_noacct(bio);
1228		wait_barrier(conf, false);
1229		bio = split;
1230		r10_bio->master_bio = bio;
1231		r10_bio->sectors = max_sectors;
1232	}
1233	slot = r10_bio->read_slot;
1234
1235	if (io_accounting) {
1236		md_account_bio(mddev, &bio);
1237		r10_bio->master_bio = bio;
1238	}
1239	read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1240
1241	r10_bio->devs[slot].bio = read_bio;
1242	r10_bio->devs[slot].rdev = rdev;
1243
1244	read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1245		choose_data_offset(r10_bio, rdev);
 
1246	read_bio->bi_end_io = raid10_end_read_request;
1247	read_bio->bi_opf = op | do_sync;
1248	if (test_bit(FailFast, &rdev->flags) &&
1249	    test_bit(R10BIO_FailFast, &r10_bio->state))
1250	        read_bio->bi_opf |= MD_FAILFAST;
1251	read_bio->bi_private = r10_bio;
1252
1253	if (mddev->gendisk)
1254	        trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
 
1255	                              r10_bio->sector);
1256	submit_bio_noacct(read_bio);
1257	return;
1258}
1259
1260static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1261				  struct bio *bio, bool replacement,
1262				  int n_copy)
1263{
1264	const enum req_op op = bio_op(bio);
1265	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1266	const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
1267	unsigned long flags;
 
 
1268	struct r10conf *conf = mddev->private;
1269	struct md_rdev *rdev;
1270	int devnum = r10_bio->devs[n_copy].devnum;
1271	struct bio *mbio;
1272
1273	rdev = replacement ? conf->mirrors[devnum].replacement :
1274			     conf->mirrors[devnum].rdev;
 
 
 
 
 
 
 
1275
1276	mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1277	if (replacement)
1278		r10_bio->devs[n_copy].repl_bio = mbio;
1279	else
1280		r10_bio->devs[n_copy].bio = mbio;
1281
1282	mbio->bi_iter.bi_sector	= (r10_bio->devs[n_copy].addr +
1283				   choose_data_offset(r10_bio, rdev));
 
1284	mbio->bi_end_io	= raid10_end_write_request;
1285	mbio->bi_opf = op | do_sync | do_fua;
1286	if (!replacement && test_bit(FailFast,
1287				     &conf->mirrors[devnum].rdev->flags)
1288			 && enough(conf, devnum))
1289		mbio->bi_opf |= MD_FAILFAST;
1290	mbio->bi_private = r10_bio;
1291
1292	if (conf->mddev->gendisk)
1293		trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
 
1294				      r10_bio->sector);
1295	/* flush_pending_writes() needs access to the rdev so...*/
1296	mbio->bi_bdev = (void *)rdev;
1297
1298	atomic_inc(&r10_bio->remaining);
1299
1300	if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) {
 
 
 
 
 
 
 
 
1301		spin_lock_irqsave(&conf->device_lock, flags);
1302		bio_list_add(&conf->pending_bio_list, mbio);
 
1303		spin_unlock_irqrestore(&conf->device_lock, flags);
1304		md_wakeup_thread(mddev->thread);
1305	}
1306}
1307
1308static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1309{
1310	int i;
1311	struct r10conf *conf = mddev->private;
1312	struct md_rdev *blocked_rdev;
1313
1314retry_wait:
1315	blocked_rdev = NULL;
1316	for (i = 0; i < conf->copies; i++) {
1317		struct md_rdev *rdev, *rrdev;
1318
1319		rdev = conf->mirrors[i].rdev;
1320		rrdev = conf->mirrors[i].replacement;
1321		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1322			atomic_inc(&rdev->nr_pending);
1323			blocked_rdev = rdev;
1324			break;
1325		}
1326		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1327			atomic_inc(&rrdev->nr_pending);
1328			blocked_rdev = rrdev;
1329			break;
1330		}
1331
1332		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1333			sector_t first_bad;
1334			sector_t dev_sector = r10_bio->devs[i].addr;
1335			int bad_sectors;
1336			int is_bad;
1337
1338			/*
1339			 * Discard request doesn't care the write result
1340			 * so it doesn't need to wait blocked disk here.
1341			 */
1342			if (!r10_bio->sectors)
1343				continue;
1344
1345			is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
1346					     &first_bad, &bad_sectors);
1347			if (is_bad < 0) {
1348				/*
1349				 * Mustn't write here until the bad block
1350				 * is acknowledged
1351				 */
1352				atomic_inc(&rdev->nr_pending);
1353				set_bit(BlockedBadBlocks, &rdev->flags);
1354				blocked_rdev = rdev;
1355				break;
1356			}
1357		}
1358	}
1359
1360	if (unlikely(blocked_rdev)) {
1361		/* Have to wait for this device to get unblocked, then retry */
1362		allow_barrier(conf);
1363		raid10_log(conf->mddev, "%s wait rdev %d blocked",
1364				__func__, blocked_rdev->raid_disk);
1365		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1366		wait_barrier(conf, false);
1367		goto retry_wait;
1368	}
1369}
1370
1371static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1372				 struct r10bio *r10_bio)
1373{
1374	struct r10conf *conf = mddev->private;
1375	int i;
 
1376	sector_t sectors;
1377	int max_sectors;
1378
1379	if ((mddev_is_clustered(mddev) &&
1380	     md_cluster_ops->area_resyncing(mddev, WRITE,
1381					    bio->bi_iter.bi_sector,
1382					    bio_end_sector(bio)))) {
1383		DEFINE_WAIT(w);
1384		/* Bail out if REQ_NOWAIT is set for the bio */
1385		if (bio->bi_opf & REQ_NOWAIT) {
1386			bio_wouldblock_error(bio);
1387			return;
1388		}
1389		for (;;) {
1390			prepare_to_wait(&conf->wait_barrier,
1391					&w, TASK_IDLE);
1392			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1393				 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1394				break;
1395			schedule();
1396		}
1397		finish_wait(&conf->wait_barrier, &w);
1398	}
1399
1400	sectors = r10_bio->sectors;
1401	if (!regular_request_wait(mddev, conf, bio, sectors))
1402		return;
1403	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1404	    (mddev->reshape_backwards
1405	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1406		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1407	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1408		bio->bi_iter.bi_sector < conf->reshape_progress))) {
1409		/* Need to update reshape_position in metadata */
1410		mddev->reshape_position = conf->reshape_progress;
1411		set_mask_bits(&mddev->sb_flags, 0,
1412			      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1413		md_wakeup_thread(mddev->thread);
1414		if (bio->bi_opf & REQ_NOWAIT) {
1415			allow_barrier(conf);
1416			bio_wouldblock_error(bio);
1417			return;
1418		}
1419		raid10_log(conf->mddev, "wait reshape metadata");
1420		wait_event(mddev->sb_wait,
1421			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1422
1423		conf->reshape_safe = mddev->reshape_position;
1424	}
1425
 
 
 
 
 
 
1426	/* first select target devices under rcu_lock and
1427	 * inc refcount on their rdev.  Record them by setting
1428	 * bios[x] to bio
1429	 * If there are known/acknowledged bad blocks on any device
1430	 * on which we have seen a write error, we want to avoid
1431	 * writing to those blocks.  This potentially requires several
1432	 * writes to write around the bad blocks.  Each set of writes
1433	 * gets its own r10_bio with a set of bios attached.
1434	 */
1435
1436	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1437	raid10_find_phys(conf, r10_bio);
1438
1439	wait_blocked_dev(mddev, r10_bio);
1440
1441	max_sectors = r10_bio->sectors;
1442
1443	for (i = 0;  i < conf->copies; i++) {
1444		int d = r10_bio->devs[i].devnum;
1445		struct md_rdev *rdev, *rrdev;
1446
1447		rdev = conf->mirrors[d].rdev;
1448		rrdev = conf->mirrors[d].replacement;
 
 
 
 
 
 
 
 
 
 
 
1449		if (rdev && (test_bit(Faulty, &rdev->flags)))
1450			rdev = NULL;
1451		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1452			rrdev = NULL;
1453
1454		r10_bio->devs[i].bio = NULL;
1455		r10_bio->devs[i].repl_bio = NULL;
1456
1457		if (!rdev && !rrdev) {
1458			set_bit(R10BIO_Degraded, &r10_bio->state);
1459			continue;
1460		}
1461		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1462			sector_t first_bad;
1463			sector_t dev_sector = r10_bio->devs[i].addr;
1464			int bad_sectors;
1465			int is_bad;
1466
1467			is_bad = is_badblock(rdev, dev_sector, max_sectors,
1468					     &first_bad, &bad_sectors);
 
 
 
 
 
 
 
 
 
1469			if (is_bad && first_bad <= dev_sector) {
1470				/* Cannot write here at all */
1471				bad_sectors -= (dev_sector - first_bad);
1472				if (bad_sectors < max_sectors)
1473					/* Mustn't write more than bad_sectors
1474					 * to other devices yet
1475					 */
1476					max_sectors = bad_sectors;
1477				/* We don't set R10BIO_Degraded as that
1478				 * only applies if the disk is missing,
1479				 * so it might be re-added, and we want to
1480				 * know to recover this chunk.
1481				 * In this case the device is here, and the
1482				 * fact that this chunk is not in-sync is
1483				 * recorded in the bad block log.
1484				 */
1485				continue;
1486			}
1487			if (is_bad) {
1488				int good_sectors = first_bad - dev_sector;
1489				if (good_sectors < max_sectors)
1490					max_sectors = good_sectors;
1491			}
1492		}
1493		if (rdev) {
1494			r10_bio->devs[i].bio = bio;
1495			atomic_inc(&rdev->nr_pending);
1496		}
1497		if (rrdev) {
1498			r10_bio->devs[i].repl_bio = bio;
1499			atomic_inc(&rrdev->nr_pending);
1500		}
1501	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1502
1503	if (max_sectors < r10_bio->sectors)
1504		r10_bio->sectors = max_sectors;
1505
1506	if (r10_bio->sectors < bio_sectors(bio)) {
1507		struct bio *split = bio_split(bio, r10_bio->sectors,
1508					      GFP_NOIO, &conf->bio_split);
1509		bio_chain(split, bio);
1510		allow_barrier(conf);
1511		submit_bio_noacct(bio);
1512		wait_barrier(conf, false);
1513		bio = split;
1514		r10_bio->master_bio = bio;
1515	}
1516
1517	md_account_bio(mddev, &bio);
1518	r10_bio->master_bio = bio;
1519	atomic_set(&r10_bio->remaining, 1);
1520	md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1521
1522	for (i = 0; i < conf->copies; i++) {
1523		if (r10_bio->devs[i].bio)
1524			raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1525		if (r10_bio->devs[i].repl_bio)
1526			raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1527	}
1528	one_write_done(r10_bio);
1529}
1530
1531static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1532{
1533	struct r10conf *conf = mddev->private;
1534	struct r10bio *r10_bio;
1535
1536	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1537
1538	r10_bio->master_bio = bio;
1539	r10_bio->sectors = sectors;
1540
1541	r10_bio->mddev = mddev;
1542	r10_bio->sector = bio->bi_iter.bi_sector;
1543	r10_bio->state = 0;
1544	r10_bio->read_slot = -1;
1545	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1546			conf->geo.raid_disks);
1547
1548	if (bio_data_dir(bio) == READ)
1549		raid10_read_request(mddev, bio, r10_bio, true);
1550	else
1551		raid10_write_request(mddev, bio, r10_bio);
1552}
1553
1554static void raid_end_discard_bio(struct r10bio *r10bio)
1555{
1556	struct r10conf *conf = r10bio->mddev->private;
1557	struct r10bio *first_r10bio;
1558
1559	while (atomic_dec_and_test(&r10bio->remaining)) {
1560
1561		allow_barrier(conf);
1562
1563		if (!test_bit(R10BIO_Discard, &r10bio->state)) {
1564			first_r10bio = (struct r10bio *)r10bio->master_bio;
1565			free_r10bio(r10bio);
1566			r10bio = first_r10bio;
1567		} else {
1568			md_write_end(r10bio->mddev);
1569			bio_endio(r10bio->master_bio);
1570			free_r10bio(r10bio);
1571			break;
1572		}
1573	}
1574}
1575
1576static void raid10_end_discard_request(struct bio *bio)
1577{
1578	struct r10bio *r10_bio = bio->bi_private;
1579	struct r10conf *conf = r10_bio->mddev->private;
1580	struct md_rdev *rdev = NULL;
1581	int dev;
1582	int slot, repl;
1583
1584	/*
1585	 * We don't care the return value of discard bio
1586	 */
1587	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1588		set_bit(R10BIO_Uptodate, &r10_bio->state);
1589
1590	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1591	rdev = repl ? conf->mirrors[dev].replacement :
1592		      conf->mirrors[dev].rdev;
1593
1594	raid_end_discard_bio(r10_bio);
1595	rdev_dec_pending(rdev, conf->mddev);
1596}
1597
1598/*
1599 * There are some limitations to handle discard bio
1600 * 1st, the discard size is bigger than stripe_size*2.
1601 * 2st, if the discard bio spans reshape progress, we use the old way to
1602 * handle discard bio
1603 */
1604static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1605{
1606	struct r10conf *conf = mddev->private;
1607	struct geom *geo = &conf->geo;
1608	int far_copies = geo->far_copies;
1609	bool first_copy = true;
1610	struct r10bio *r10_bio, *first_r10bio;
1611	struct bio *split;
1612	int disk;
1613	sector_t chunk;
1614	unsigned int stripe_size;
1615	unsigned int stripe_data_disks;
1616	sector_t split_size;
1617	sector_t bio_start, bio_end;
1618	sector_t first_stripe_index, last_stripe_index;
1619	sector_t start_disk_offset;
1620	unsigned int start_disk_index;
1621	sector_t end_disk_offset;
1622	unsigned int end_disk_index;
1623	unsigned int remainder;
1624
1625	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1626		return -EAGAIN;
1627
1628	if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
1629		bio_wouldblock_error(bio);
1630		return 0;
1631	}
1632	wait_barrier(conf, false);
1633
1634	/*
1635	 * Check reshape again to avoid reshape happens after checking
1636	 * MD_RECOVERY_RESHAPE and before wait_barrier
1637	 */
1638	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1639		goto out;
1640
1641	if (geo->near_copies)
1642		stripe_data_disks = geo->raid_disks / geo->near_copies +
1643					geo->raid_disks % geo->near_copies;
1644	else
1645		stripe_data_disks = geo->raid_disks;
1646
1647	stripe_size = stripe_data_disks << geo->chunk_shift;
1648
1649	bio_start = bio->bi_iter.bi_sector;
1650	bio_end = bio_end_sector(bio);
1651
1652	/*
1653	 * Maybe one discard bio is smaller than strip size or across one
1654	 * stripe and discard region is larger than one stripe size. For far
1655	 * offset layout, if the discard region is not aligned with stripe
1656	 * size, there is hole when we submit discard bio to member disk.
1657	 * For simplicity, we only handle discard bio which discard region
1658	 * is bigger than stripe_size * 2
1659	 */
1660	if (bio_sectors(bio) < stripe_size*2)
1661		goto out;
1662
1663	/*
1664	 * Keep bio aligned with strip size.
1665	 */
1666	div_u64_rem(bio_start, stripe_size, &remainder);
1667	if (remainder) {
1668		split_size = stripe_size - remainder;
1669		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1670		bio_chain(split, bio);
1671		allow_barrier(conf);
1672		/* Resend the fist split part */
1673		submit_bio_noacct(split);
1674		wait_barrier(conf, false);
1675	}
1676	div_u64_rem(bio_end, stripe_size, &remainder);
1677	if (remainder) {
1678		split_size = bio_sectors(bio) - remainder;
1679		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1680		bio_chain(split, bio);
1681		allow_barrier(conf);
1682		/* Resend the second split part */
1683		submit_bio_noacct(bio);
1684		bio = split;
1685		wait_barrier(conf, false);
1686	}
1687
1688	bio_start = bio->bi_iter.bi_sector;
1689	bio_end = bio_end_sector(bio);
1690
1691	/*
1692	 * Raid10 uses chunk as the unit to store data. It's similar like raid0.
1693	 * One stripe contains the chunks from all member disk (one chunk from
1694	 * one disk at the same HBA address). For layout detail, see 'man md 4'
1695	 */
1696	chunk = bio_start >> geo->chunk_shift;
1697	chunk *= geo->near_copies;
1698	first_stripe_index = chunk;
1699	start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
1700	if (geo->far_offset)
1701		first_stripe_index *= geo->far_copies;
1702	start_disk_offset = (bio_start & geo->chunk_mask) +
1703				(first_stripe_index << geo->chunk_shift);
1704
1705	chunk = bio_end >> geo->chunk_shift;
1706	chunk *= geo->near_copies;
1707	last_stripe_index = chunk;
1708	end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
1709	if (geo->far_offset)
1710		last_stripe_index *= geo->far_copies;
1711	end_disk_offset = (bio_end & geo->chunk_mask) +
1712				(last_stripe_index << geo->chunk_shift);
1713
1714retry_discard:
1715	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1716	r10_bio->mddev = mddev;
1717	r10_bio->state = 0;
1718	r10_bio->sectors = 0;
1719	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1720	wait_blocked_dev(mddev, r10_bio);
1721
1722	/*
1723	 * For far layout it needs more than one r10bio to cover all regions.
1724	 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
1725	 * to record the discard bio. Other r10bio->master_bio record the first
1726	 * r10bio. The first r10bio only release after all other r10bios finish.
1727	 * The discard bio returns only first r10bio finishes
1728	 */
1729	if (first_copy) {
1730		r10_bio->master_bio = bio;
1731		set_bit(R10BIO_Discard, &r10_bio->state);
1732		first_copy = false;
1733		first_r10bio = r10_bio;
1734	} else
1735		r10_bio->master_bio = (struct bio *)first_r10bio;
1736
1737	/*
1738	 * first select target devices under rcu_lock and
1739	 * inc refcount on their rdev.  Record them by setting
1740	 * bios[x] to bio
1741	 */
1742	for (disk = 0; disk < geo->raid_disks; disk++) {
1743		struct md_rdev *rdev, *rrdev;
1744
1745		rdev = conf->mirrors[disk].rdev;
1746		rrdev = conf->mirrors[disk].replacement;
1747		r10_bio->devs[disk].bio = NULL;
1748		r10_bio->devs[disk].repl_bio = NULL;
1749
1750		if (rdev && (test_bit(Faulty, &rdev->flags)))
1751			rdev = NULL;
1752		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1753			rrdev = NULL;
1754		if (!rdev && !rrdev)
1755			continue;
1756
1757		if (rdev) {
1758			r10_bio->devs[disk].bio = bio;
1759			atomic_inc(&rdev->nr_pending);
1760		}
1761		if (rrdev) {
1762			r10_bio->devs[disk].repl_bio = bio;
1763			atomic_inc(&rrdev->nr_pending);
1764		}
1765	}
1766
1767	atomic_set(&r10_bio->remaining, 1);
1768	for (disk = 0; disk < geo->raid_disks; disk++) {
1769		sector_t dev_start, dev_end;
1770		struct bio *mbio, *rbio = NULL;
1771
1772		/*
1773		 * Now start to calculate the start and end address for each disk.
1774		 * The space between dev_start and dev_end is the discard region.
1775		 *
1776		 * For dev_start, it needs to consider three conditions:
1777		 * 1st, the disk is before start_disk, you can imagine the disk in
1778		 * the next stripe. So the dev_start is the start address of next
1779		 * stripe.
1780		 * 2st, the disk is after start_disk, it means the disk is at the
1781		 * same stripe of first disk
1782		 * 3st, the first disk itself, we can use start_disk_offset directly
1783		 */
1784		if (disk < start_disk_index)
1785			dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
1786		else if (disk > start_disk_index)
1787			dev_start = first_stripe_index * mddev->chunk_sectors;
1788		else
1789			dev_start = start_disk_offset;
1790
1791		if (disk < end_disk_index)
1792			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
1793		else if (disk > end_disk_index)
1794			dev_end = last_stripe_index * mddev->chunk_sectors;
1795		else
1796			dev_end = end_disk_offset;
1797
1798		/*
1799		 * It only handles discard bio which size is >= stripe size, so
1800		 * dev_end > dev_start all the time.
1801		 * It doesn't need to use rcu lock to get rdev here. We already
1802		 * add rdev->nr_pending in the first loop.
1803		 */
1804		if (r10_bio->devs[disk].bio) {
1805			struct md_rdev *rdev = conf->mirrors[disk].rdev;
1806			mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1807					       &mddev->bio_set);
1808			mbio->bi_end_io = raid10_end_discard_request;
1809			mbio->bi_private = r10_bio;
1810			r10_bio->devs[disk].bio = mbio;
1811			r10_bio->devs[disk].devnum = disk;
1812			atomic_inc(&r10_bio->remaining);
1813			md_submit_discard_bio(mddev, rdev, mbio,
1814					dev_start + choose_data_offset(r10_bio, rdev),
1815					dev_end - dev_start);
1816			bio_endio(mbio);
1817		}
1818		if (r10_bio->devs[disk].repl_bio) {
1819			struct md_rdev *rrdev = conf->mirrors[disk].replacement;
1820			rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1821					       &mddev->bio_set);
1822			rbio->bi_end_io = raid10_end_discard_request;
1823			rbio->bi_private = r10_bio;
1824			r10_bio->devs[disk].repl_bio = rbio;
1825			r10_bio->devs[disk].devnum = disk;
1826			atomic_inc(&r10_bio->remaining);
1827			md_submit_discard_bio(mddev, rrdev, rbio,
1828					dev_start + choose_data_offset(r10_bio, rrdev),
1829					dev_end - dev_start);
1830			bio_endio(rbio);
1831		}
1832	}
1833
1834	if (!geo->far_offset && --far_copies) {
1835		first_stripe_index += geo->stride >> geo->chunk_shift;
1836		start_disk_offset += geo->stride;
1837		last_stripe_index += geo->stride >> geo->chunk_shift;
1838		end_disk_offset += geo->stride;
1839		atomic_inc(&first_r10bio->remaining);
1840		raid_end_discard_bio(r10_bio);
1841		wait_barrier(conf, false);
1842		goto retry_discard;
1843	}
1844
1845	raid_end_discard_bio(r10_bio);
1846
1847	return 0;
1848out:
1849	allow_barrier(conf);
1850	return -EAGAIN;
1851}
1852
1853static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1854{
1855	struct r10conf *conf = mddev->private;
1856	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1857	int chunk_sects = chunk_mask + 1;
1858	int sectors = bio_sectors(bio);
1859
1860	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1861	    && md_flush_request(mddev, bio))
1862		return true;
 
1863
1864	if (!md_write_start(mddev, bio))
1865		return false;
1866
1867	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1868		if (!raid10_handle_discard(mddev, bio))
1869			return true;
1870
1871	/*
1872	 * If this request crosses a chunk boundary, we need to split
1873	 * it.
1874	 */
1875	if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1876		     sectors > chunk_sects
1877		     && (conf->geo.near_copies < conf->geo.raid_disks
1878			 || conf->prev.near_copies <
1879			 conf->prev.raid_disks)))
1880		sectors = chunk_sects -
1881			(bio->bi_iter.bi_sector &
1882			 (chunk_sects - 1));
1883	__make_request(mddev, bio, sectors);
1884
1885	/* In case raid10d snuck in to freeze_array */
1886	wake_up_barrier(conf);
1887	return true;
1888}
1889
1890static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1891{
1892	struct r10conf *conf = mddev->private;
1893	int i;
1894
1895	lockdep_assert_held(&mddev->lock);
1896
1897	if (conf->geo.near_copies < conf->geo.raid_disks)
1898		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1899	if (conf->geo.near_copies > 1)
1900		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1901	if (conf->geo.far_copies > 1) {
1902		if (conf->geo.far_offset)
1903			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1904		else
1905			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1906		if (conf->geo.far_set_size != conf->geo.raid_disks)
1907			seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1908	}
1909	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1910					conf->geo.raid_disks - mddev->degraded);
 
1911	for (i = 0; i < conf->geo.raid_disks; i++) {
1912		struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
1913
1914		seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1915	}
 
1916	seq_printf(seq, "]");
1917}
1918
1919/* check if there are enough drives for
1920 * every block to appear on atleast one.
1921 * Don't consider the device numbered 'ignore'
1922 * as we might be about to remove it.
1923 */
1924static int _enough(struct r10conf *conf, int previous, int ignore)
1925{
1926	int first = 0;
1927	int has_enough = 0;
1928	int disks, ncopies;
1929	if (previous) {
1930		disks = conf->prev.raid_disks;
1931		ncopies = conf->prev.near_copies;
1932	} else {
1933		disks = conf->geo.raid_disks;
1934		ncopies = conf->geo.near_copies;
1935	}
1936
 
1937	do {
1938		int n = conf->copies;
1939		int cnt = 0;
1940		int this = first;
1941		while (n--) {
1942			struct md_rdev *rdev;
1943			if (this != ignore &&
1944			    (rdev = conf->mirrors[this].rdev) &&
1945			    test_bit(In_sync, &rdev->flags))
1946				cnt++;
1947			this = (this+1) % disks;
1948		}
1949		if (cnt == 0)
1950			goto out;
1951		first = (first + ncopies) % disks;
1952	} while (first != 0);
1953	has_enough = 1;
1954out:
 
1955	return has_enough;
1956}
1957
1958static int enough(struct r10conf *conf, int ignore)
1959{
1960	/* when calling 'enough', both 'prev' and 'geo' must
1961	 * be stable.
1962	 * This is ensured if ->reconfig_mutex or ->device_lock
1963	 * is held.
1964	 */
1965	return _enough(conf, 0, ignore) &&
1966		_enough(conf, 1, ignore);
1967}
1968
1969/**
1970 * raid10_error() - RAID10 error handler.
1971 * @mddev: affected md device.
1972 * @rdev: member device to fail.
1973 *
1974 * The routine acknowledges &rdev failure and determines new @mddev state.
1975 * If it failed, then:
1976 *	- &MD_BROKEN flag is set in &mddev->flags.
1977 * Otherwise, it must be degraded:
1978 *	- recovery is interrupted.
1979 *	- &mddev->degraded is bumped.
1980 *
1981 * @rdev is marked as &Faulty excluding case when array is failed and
1982 * &mddev->fail_last_dev is off.
1983 */
1984static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1985{
 
1986	struct r10conf *conf = mddev->private;
1987	unsigned long flags;
1988
 
 
 
 
 
 
1989	spin_lock_irqsave(&conf->device_lock, flags);
1990
1991	if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
1992		set_bit(MD_BROKEN, &mddev->flags);
1993
1994		if (!mddev->fail_last_dev) {
1995			spin_unlock_irqrestore(&conf->device_lock, flags);
1996			return;
1997		}
1998	}
1999	if (test_and_clear_bit(In_sync, &rdev->flags))
2000		mddev->degraded++;
2001
 
 
2002	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2003	set_bit(Blocked, &rdev->flags);
2004	set_bit(Faulty, &rdev->flags);
2005	set_mask_bits(&mddev->sb_flags, 0,
2006		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2007	spin_unlock_irqrestore(&conf->device_lock, flags);
2008	pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n"
2009		"md/raid10:%s: Operation continuing on %d devices.\n",
2010		mdname(mddev), rdev->bdev,
2011		mdname(mddev), conf->geo.raid_disks - mddev->degraded);
2012}
2013
2014static void print_conf(struct r10conf *conf)
2015{
2016	int i;
2017	struct md_rdev *rdev;
2018
2019	pr_debug("RAID10 conf printout:\n");
2020	if (!conf) {
2021		pr_debug("(!conf)\n");
2022		return;
2023	}
2024	pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2025		 conf->geo.raid_disks);
2026
2027	lockdep_assert_held(&conf->mddev->reconfig_mutex);
 
2028	for (i = 0; i < conf->geo.raid_disks; i++) {
 
2029		rdev = conf->mirrors[i].rdev;
2030		if (rdev)
2031			pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
2032				 i, !test_bit(In_sync, &rdev->flags),
2033				 !test_bit(Faulty, &rdev->flags),
2034				 rdev->bdev);
2035	}
2036}
2037
2038static void close_sync(struct r10conf *conf)
2039{
2040	wait_barrier(conf, false);
2041	allow_barrier(conf);
2042
2043	mempool_exit(&conf->r10buf_pool);
2044}
2045
2046static int raid10_spare_active(struct mddev *mddev)
2047{
2048	int i;
2049	struct r10conf *conf = mddev->private;
2050	struct raid10_info *tmp;
2051	int count = 0;
2052	unsigned long flags;
2053
2054	/*
2055	 * Find all non-in_sync disks within the RAID10 configuration
2056	 * and mark them in_sync
2057	 */
2058	for (i = 0; i < conf->geo.raid_disks; i++) {
2059		tmp = conf->mirrors + i;
2060		if (tmp->replacement
2061		    && tmp->replacement->recovery_offset == MaxSector
2062		    && !test_bit(Faulty, &tmp->replacement->flags)
2063		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
2064			/* Replacement has just become active */
2065			if (!tmp->rdev
2066			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
2067				count++;
2068			if (tmp->rdev) {
2069				/* Replaced device not technically faulty,
2070				 * but we need to be sure it gets removed
2071				 * and never re-added.
2072				 */
2073				set_bit(Faulty, &tmp->rdev->flags);
2074				sysfs_notify_dirent_safe(
2075					tmp->rdev->sysfs_state);
2076			}
2077			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
2078		} else if (tmp->rdev
2079			   && tmp->rdev->recovery_offset == MaxSector
2080			   && !test_bit(Faulty, &tmp->rdev->flags)
2081			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
2082			count++;
2083			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
2084		}
2085	}
2086	spin_lock_irqsave(&conf->device_lock, flags);
2087	mddev->degraded -= count;
2088	spin_unlock_irqrestore(&conf->device_lock, flags);
2089
2090	print_conf(conf);
2091	return count;
2092}
2093
2094static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2095{
2096	struct r10conf *conf = mddev->private;
2097	int err = -EEXIST;
2098	int mirror, repl_slot = -1;
2099	int first = 0;
2100	int last = conf->geo.raid_disks - 1;
2101	struct raid10_info *p;
2102
2103	if (mddev->recovery_cp < MaxSector)
2104		/* only hot-add to in-sync arrays, as recovery is
2105		 * very different from resync
2106		 */
2107		return -EBUSY;
2108	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2109		return -EINVAL;
2110
2111	if (md_integrity_add_rdev(rdev, mddev))
2112		return -ENXIO;
2113
2114	if (rdev->raid_disk >= 0)
2115		first = last = rdev->raid_disk;
2116
2117	if (rdev->saved_raid_disk >= first &&
2118	    rdev->saved_raid_disk < conf->geo.raid_disks &&
2119	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2120		mirror = rdev->saved_raid_disk;
2121	else
2122		mirror = first;
2123	for ( ; mirror <= last ; mirror++) {
2124		p = &conf->mirrors[mirror];
2125		if (p->recovery_disabled == mddev->recovery_disabled)
2126			continue;
2127		if (p->rdev) {
2128			if (test_bit(WantReplacement, &p->rdev->flags) &&
2129			    p->replacement == NULL && repl_slot < 0)
2130				repl_slot = mirror;
2131			continue;
 
 
 
 
 
 
 
 
 
2132		}
2133
2134		if (mddev->gendisk)
2135			disk_stack_limits(mddev->gendisk, rdev->bdev,
2136					  rdev->data_offset << 9);
2137
2138		p->head_position = 0;
2139		p->recovery_disabled = mddev->recovery_disabled - 1;
2140		rdev->raid_disk = mirror;
2141		err = 0;
2142		if (rdev->saved_raid_disk != mirror)
2143			conf->fullsync = 1;
2144		WRITE_ONCE(p->rdev, rdev);
2145		break;
2146	}
2147
2148	if (err && repl_slot >= 0) {
2149		p = &conf->mirrors[repl_slot];
2150		clear_bit(In_sync, &rdev->flags);
2151		set_bit(Replacement, &rdev->flags);
2152		rdev->raid_disk = repl_slot;
2153		err = 0;
2154		if (mddev->gendisk)
2155			disk_stack_limits(mddev->gendisk, rdev->bdev,
2156					  rdev->data_offset << 9);
2157		conf->fullsync = 1;
2158		WRITE_ONCE(p->replacement, rdev);
2159	}
2160
2161	print_conf(conf);
2162	return err;
2163}
2164
2165static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2166{
2167	struct r10conf *conf = mddev->private;
2168	int err = 0;
2169	int number = rdev->raid_disk;
2170	struct md_rdev **rdevp;
2171	struct raid10_info *p;
2172
2173	print_conf(conf);
2174	if (unlikely(number >= mddev->raid_disks))
2175		return 0;
2176	p = conf->mirrors + number;
2177	if (rdev == p->rdev)
2178		rdevp = &p->rdev;
2179	else if (rdev == p->replacement)
2180		rdevp = &p->replacement;
2181	else
2182		return 0;
2183
2184	if (test_bit(In_sync, &rdev->flags) ||
2185	    atomic_read(&rdev->nr_pending)) {
2186		err = -EBUSY;
2187		goto abort;
2188	}
2189	/* Only remove non-faulty devices if recovery
2190	 * is not possible.
2191	 */
2192	if (!test_bit(Faulty, &rdev->flags) &&
2193	    mddev->recovery_disabled != p->recovery_disabled &&
2194	    (!p->replacement || p->replacement == rdev) &&
2195	    number < conf->geo.raid_disks &&
2196	    enough(conf, -1)) {
2197		err = -EBUSY;
2198		goto abort;
2199	}
2200	WRITE_ONCE(*rdevp, NULL);
 
 
 
 
 
 
 
 
 
2201	if (p->replacement) {
2202		/* We must have just cleared 'rdev' */
2203		WRITE_ONCE(p->rdev, p->replacement);
2204		clear_bit(Replacement, &p->replacement->flags);
2205		WRITE_ONCE(p->replacement, NULL);
 
 
 
2206	}
2207
2208	clear_bit(WantReplacement, &rdev->flags);
2209	err = md_integrity_register(mddev);
2210
2211abort:
2212
2213	print_conf(conf);
2214	return err;
2215}
2216
2217static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2218{
2219	struct r10conf *conf = r10_bio->mddev->private;
2220
2221	if (!bio->bi_status)
2222		set_bit(R10BIO_Uptodate, &r10_bio->state);
2223	else
2224		/* The write handler will notice the lack of
2225		 * R10BIO_Uptodate and record any errors etc
2226		 */
2227		atomic_add(r10_bio->sectors,
2228			   &conf->mirrors[d].rdev->corrected_errors);
2229
2230	/* for reconstruct, we always reschedule after a read.
2231	 * for resync, only after all reads
2232	 */
2233	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2234	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2235	    atomic_dec_and_test(&r10_bio->remaining)) {
2236		/* we have read all the blocks,
2237		 * do the comparison in process context in raid10d
2238		 */
2239		reschedule_retry(r10_bio);
2240	}
2241}
2242
2243static void end_sync_read(struct bio *bio)
2244{
2245	struct r10bio *r10_bio = get_resync_r10bio(bio);
2246	struct r10conf *conf = r10_bio->mddev->private;
2247	int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2248
2249	__end_sync_read(r10_bio, bio, d);
2250}
2251
2252static void end_reshape_read(struct bio *bio)
2253{
2254	/* reshape read bio isn't allocated from r10buf_pool */
2255	struct r10bio *r10_bio = bio->bi_private;
2256
2257	__end_sync_read(r10_bio, bio, r10_bio->read_slot);
2258}
2259
2260static void end_sync_request(struct r10bio *r10_bio)
2261{
2262	struct mddev *mddev = r10_bio->mddev;
2263
2264	while (atomic_dec_and_test(&r10_bio->remaining)) {
2265		if (r10_bio->master_bio == NULL) {
2266			/* the primary of several recovery bios */
2267			sector_t s = r10_bio->sectors;
2268			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2269			    test_bit(R10BIO_WriteError, &r10_bio->state))
2270				reschedule_retry(r10_bio);
2271			else
2272				put_buf(r10_bio);
2273			md_done_sync(mddev, s, 1);
2274			break;
2275		} else {
2276			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2277			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2278			    test_bit(R10BIO_WriteError, &r10_bio->state))
2279				reschedule_retry(r10_bio);
2280			else
2281				put_buf(r10_bio);
2282			r10_bio = r10_bio2;
2283		}
2284	}
2285}
2286
2287static void end_sync_write(struct bio *bio)
2288{
2289	struct r10bio *r10_bio = get_resync_r10bio(bio);
2290	struct mddev *mddev = r10_bio->mddev;
2291	struct r10conf *conf = mddev->private;
2292	int d;
2293	sector_t first_bad;
2294	int bad_sectors;
2295	int slot;
2296	int repl;
2297	struct md_rdev *rdev = NULL;
2298
2299	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2300	if (repl)
2301		rdev = conf->mirrors[d].replacement;
2302	else
2303		rdev = conf->mirrors[d].rdev;
2304
2305	if (bio->bi_status) {
2306		if (repl)
2307			md_error(mddev, rdev);
2308		else {
2309			set_bit(WriteErrorSeen, &rdev->flags);
2310			if (!test_and_set_bit(WantReplacement, &rdev->flags))
2311				set_bit(MD_RECOVERY_NEEDED,
2312					&rdev->mddev->recovery);
2313			set_bit(R10BIO_WriteError, &r10_bio->state);
2314		}
2315	} else if (is_badblock(rdev,
2316			     r10_bio->devs[slot].addr,
2317			     r10_bio->sectors,
2318			     &first_bad, &bad_sectors))
2319		set_bit(R10BIO_MadeGood, &r10_bio->state);
2320
2321	rdev_dec_pending(rdev, mddev);
2322
2323	end_sync_request(r10_bio);
2324}
2325
2326/*
2327 * Note: sync and recover and handled very differently for raid10
2328 * This code is for resync.
2329 * For resync, we read through virtual addresses and read all blocks.
2330 * If there is any error, we schedule a write.  The lowest numbered
2331 * drive is authoritative.
2332 * However requests come for physical address, so we need to map.
2333 * For every physical address there are raid_disks/copies virtual addresses,
2334 * which is always are least one, but is not necessarly an integer.
2335 * This means that a physical address can span multiple chunks, so we may
2336 * have to submit multiple io requests for a single sync request.
2337 */
2338/*
2339 * We check if all blocks are in-sync and only write to blocks that
2340 * aren't in sync
2341 */
2342static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2343{
2344	struct r10conf *conf = mddev->private;
2345	int i, first;
2346	struct bio *tbio, *fbio;
2347	int vcnt;
2348	struct page **tpages, **fpages;
2349
2350	atomic_set(&r10_bio->remaining, 1);
2351
2352	/* find the first device with a block */
2353	for (i=0; i<conf->copies; i++)
2354		if (!r10_bio->devs[i].bio->bi_status)
2355			break;
2356
2357	if (i == conf->copies)
2358		goto done;
2359
2360	first = i;
2361	fbio = r10_bio->devs[i].bio;
2362	fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2363	fbio->bi_iter.bi_idx = 0;
2364	fpages = get_resync_pages(fbio)->pages;
2365
2366	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2367	/* now find blocks with errors */
2368	for (i=0 ; i < conf->copies ; i++) {
2369		int  j, d;
2370		struct md_rdev *rdev;
2371		struct resync_pages *rp;
2372
2373		tbio = r10_bio->devs[i].bio;
2374
2375		if (tbio->bi_end_io != end_sync_read)
2376			continue;
2377		if (i == first)
2378			continue;
2379
2380		tpages = get_resync_pages(tbio)->pages;
2381		d = r10_bio->devs[i].devnum;
2382		rdev = conf->mirrors[d].rdev;
2383		if (!r10_bio->devs[i].bio->bi_status) {
2384			/* We know that the bi_io_vec layout is the same for
2385			 * both 'first' and 'i', so we just compare them.
2386			 * All vec entries are PAGE_SIZE;
2387			 */
2388			int sectors = r10_bio->sectors;
2389			for (j = 0; j < vcnt; j++) {
2390				int len = PAGE_SIZE;
2391				if (sectors < (len / 512))
2392					len = sectors * 512;
2393				if (memcmp(page_address(fpages[j]),
2394					   page_address(tpages[j]),
2395					   len))
2396					break;
2397				sectors -= len/512;
2398			}
2399			if (j == vcnt)
2400				continue;
2401			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2402			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2403				/* Don't fix anything. */
2404				continue;
2405		} else if (test_bit(FailFast, &rdev->flags)) {
2406			/* Just give up on this device */
2407			md_error(rdev->mddev, rdev);
2408			continue;
2409		}
2410		/* Ok, we need to write this bio, either to correct an
2411		 * inconsistency or to correct an unreadable block.
2412		 * First we need to fixup bv_offset, bv_len and
2413		 * bi_vecs, as the read request might have corrupted these
2414		 */
2415		rp = get_resync_pages(tbio);
2416		bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2417
2418		md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2419
2420		rp->raid_bio = r10_bio;
2421		tbio->bi_private = rp;
2422		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2423		tbio->bi_end_io = end_sync_write;
 
2424
2425		bio_copy_data(tbio, fbio);
2426
2427		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2428		atomic_inc(&r10_bio->remaining);
2429		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2430
2431		if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2432			tbio->bi_opf |= MD_FAILFAST;
2433		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2434		submit_bio_noacct(tbio);
 
2435	}
2436
2437	/* Now write out to any replacement devices
2438	 * that are active
2439	 */
2440	for (i = 0; i < conf->copies; i++) {
2441		int d;
2442
2443		tbio = r10_bio->devs[i].repl_bio;
2444		if (!tbio || !tbio->bi_end_io)
2445			continue;
2446		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2447		    && r10_bio->devs[i].bio != fbio)
2448			bio_copy_data(tbio, fbio);
2449		d = r10_bio->devs[i].devnum;
2450		atomic_inc(&r10_bio->remaining);
2451		md_sync_acct(conf->mirrors[d].replacement->bdev,
2452			     bio_sectors(tbio));
2453		submit_bio_noacct(tbio);
2454	}
2455
2456done:
2457	if (atomic_dec_and_test(&r10_bio->remaining)) {
2458		md_done_sync(mddev, r10_bio->sectors, 1);
2459		put_buf(r10_bio);
2460	}
2461}
2462
2463/*
2464 * Now for the recovery code.
2465 * Recovery happens across physical sectors.
2466 * We recover all non-is_sync drives by finding the virtual address of
2467 * each, and then choose a working drive that also has that virt address.
2468 * There is a separate r10_bio for each non-in_sync drive.
2469 * Only the first two slots are in use. The first for reading,
2470 * The second for writing.
2471 *
2472 */
2473static void fix_recovery_read_error(struct r10bio *r10_bio)
2474{
2475	/* We got a read error during recovery.
2476	 * We repeat the read in smaller page-sized sections.
2477	 * If a read succeeds, write it to the new device or record
2478	 * a bad block if we cannot.
2479	 * If a read fails, record a bad block on both old and
2480	 * new devices.
2481	 */
2482	struct mddev *mddev = r10_bio->mddev;
2483	struct r10conf *conf = mddev->private;
2484	struct bio *bio = r10_bio->devs[0].bio;
2485	sector_t sect = 0;
2486	int sectors = r10_bio->sectors;
2487	int idx = 0;
2488	int dr = r10_bio->devs[0].devnum;
2489	int dw = r10_bio->devs[1].devnum;
2490	struct page **pages = get_resync_pages(bio)->pages;
2491
2492	while (sectors) {
2493		int s = sectors;
2494		struct md_rdev *rdev;
2495		sector_t addr;
2496		int ok;
2497
2498		if (s > (PAGE_SIZE>>9))
2499			s = PAGE_SIZE >> 9;
2500
2501		rdev = conf->mirrors[dr].rdev;
2502		addr = r10_bio->devs[0].addr + sect,
2503		ok = sync_page_io(rdev,
2504				  addr,
2505				  s << 9,
2506				  pages[idx],
2507				  REQ_OP_READ, false);
2508		if (ok) {
2509			rdev = conf->mirrors[dw].rdev;
2510			addr = r10_bio->devs[1].addr + sect;
2511			ok = sync_page_io(rdev,
2512					  addr,
2513					  s << 9,
2514					  pages[idx],
2515					  REQ_OP_WRITE, false);
2516			if (!ok) {
2517				set_bit(WriteErrorSeen, &rdev->flags);
2518				if (!test_and_set_bit(WantReplacement,
2519						      &rdev->flags))
2520					set_bit(MD_RECOVERY_NEEDED,
2521						&rdev->mddev->recovery);
2522			}
2523		}
2524		if (!ok) {
2525			/* We don't worry if we cannot set a bad block -
2526			 * it really is bad so there is no loss in not
2527			 * recording it yet
2528			 */
2529			rdev_set_badblocks(rdev, addr, s, 0);
2530
2531			if (rdev != conf->mirrors[dw].rdev) {
2532				/* need bad block on destination too */
2533				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2534				addr = r10_bio->devs[1].addr + sect;
2535				ok = rdev_set_badblocks(rdev2, addr, s, 0);
2536				if (!ok) {
2537					/* just abort the recovery */
2538					pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2539						  mdname(mddev));
2540
2541					conf->mirrors[dw].recovery_disabled
2542						= mddev->recovery_disabled;
2543					set_bit(MD_RECOVERY_INTR,
2544						&mddev->recovery);
2545					break;
2546				}
2547			}
2548		}
2549
2550		sectors -= s;
2551		sect += s;
2552		idx++;
2553	}
2554}
2555
2556static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2557{
2558	struct r10conf *conf = mddev->private;
2559	int d;
2560	struct bio *wbio = r10_bio->devs[1].bio;
2561	struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2562
2563	/* Need to test wbio2->bi_end_io before we call
2564	 * submit_bio_noacct as if the former is NULL,
2565	 * the latter is free to free wbio2.
2566	 */
2567	if (wbio2 && !wbio2->bi_end_io)
2568		wbio2 = NULL;
2569
2570	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2571		fix_recovery_read_error(r10_bio);
2572		if (wbio->bi_end_io)
2573			end_sync_request(r10_bio);
2574		if (wbio2)
2575			end_sync_request(r10_bio);
2576		return;
2577	}
2578
2579	/*
2580	 * share the pages with the first bio
2581	 * and submit the write request
2582	 */
2583	d = r10_bio->devs[1].devnum;
 
 
 
 
 
 
 
 
2584	if (wbio->bi_end_io) {
2585		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2586		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2587		submit_bio_noacct(wbio);
2588	}
2589	if (wbio2) {
2590		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2591		md_sync_acct(conf->mirrors[d].replacement->bdev,
2592			     bio_sectors(wbio2));
2593		submit_bio_noacct(wbio2);
2594	}
2595}
2596
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2597static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2598			    int sectors, struct page *page, enum req_op op)
2599{
2600	sector_t first_bad;
2601	int bad_sectors;
2602
2603	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2604	    && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
2605		return -1;
2606	if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
2607		/* success */
2608		return 1;
2609	if (op == REQ_OP_WRITE) {
2610		set_bit(WriteErrorSeen, &rdev->flags);
2611		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2612			set_bit(MD_RECOVERY_NEEDED,
2613				&rdev->mddev->recovery);
2614	}
2615	/* need to record an error - either for the block or the device */
2616	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2617		md_error(rdev->mddev, rdev);
2618	return 0;
2619}
2620
2621/*
2622 * This is a kernel thread which:
2623 *
2624 *	1.	Retries failed read operations on working mirrors.
2625 *	2.	Updates the raid superblock when problems encounter.
2626 *	3.	Performs writes following reads for array synchronising.
2627 */
2628
2629static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2630{
2631	int sect = 0; /* Offset from r10_bio->sector */
2632	int sectors = r10_bio->sectors, slot = r10_bio->read_slot;
2633	struct md_rdev *rdev;
2634	int d = r10_bio->devs[slot].devnum;
 
2635
2636	/* still own a reference to this rdev, so it cannot
2637	 * have been cleared recently.
2638	 */
2639	rdev = conf->mirrors[d].rdev;
2640
2641	if (test_bit(Faulty, &rdev->flags))
2642		/* drive has already been failed, just ignore any
2643		   more fix_read_error() attempts */
2644		return;
2645
2646	if (exceed_read_errors(mddev, rdev)) {
2647		r10_bio->devs[slot].bio = IO_BLOCKED;
 
 
 
 
 
 
 
 
 
 
 
2648		return;
2649	}
2650
2651	while(sectors) {
2652		int s = sectors;
2653		int sl = slot;
2654		int success = 0;
2655		int start;
2656
2657		if (s > (PAGE_SIZE>>9))
2658			s = PAGE_SIZE >> 9;
2659
 
2660		do {
2661			sector_t first_bad;
2662			int bad_sectors;
2663
2664			d = r10_bio->devs[sl].devnum;
2665			rdev = conf->mirrors[d].rdev;
2666			if (rdev &&
2667			    test_bit(In_sync, &rdev->flags) &&
2668			    !test_bit(Faulty, &rdev->flags) &&
2669			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2670					&first_bad, &bad_sectors) == 0) {
2671				atomic_inc(&rdev->nr_pending);
 
2672				success = sync_page_io(rdev,
2673						       r10_bio->devs[sl].addr +
2674						       sect,
2675						       s<<9,
2676						       conf->tmppage,
2677						       REQ_OP_READ, false);
2678				rdev_dec_pending(rdev, mddev);
 
2679				if (success)
2680					break;
2681			}
2682			sl++;
2683			if (sl == conf->copies)
2684				sl = 0;
2685		} while (sl != slot);
 
2686
2687		if (!success) {
2688			/* Cannot read from anywhere, just mark the block
2689			 * as bad on the first device to discourage future
2690			 * reads.
2691			 */
2692			int dn = r10_bio->devs[slot].devnum;
2693			rdev = conf->mirrors[dn].rdev;
2694
2695			if (!rdev_set_badblocks(
2696				    rdev,
2697				    r10_bio->devs[slot].addr
2698				    + sect,
2699				    s, 0)) {
2700				md_error(mddev, rdev);
2701				r10_bio->devs[slot].bio
2702					= IO_BLOCKED;
2703			}
2704			break;
2705		}
2706
2707		start = sl;
2708		/* write it back and re-read */
2709		while (sl != slot) {
 
 
 
2710			if (sl==0)
2711				sl = conf->copies;
2712			sl--;
2713			d = r10_bio->devs[sl].devnum;
2714			rdev = conf->mirrors[d].rdev;
2715			if (!rdev ||
2716			    test_bit(Faulty, &rdev->flags) ||
2717			    !test_bit(In_sync, &rdev->flags))
2718				continue;
2719
2720			atomic_inc(&rdev->nr_pending);
 
2721			if (r10_sync_page_io(rdev,
2722					     r10_bio->devs[sl].addr +
2723					     sect,
2724					     s, conf->tmppage, REQ_OP_WRITE)
2725			    == 0) {
2726				/* Well, this device is dead */
2727				pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
2728					  mdname(mddev), s,
2729					  (unsigned long long)(
2730						  sect +
2731						  choose_data_offset(r10_bio,
2732								     rdev)),
2733					  rdev->bdev);
2734				pr_notice("md/raid10:%s: %pg: failing drive\n",
2735					  mdname(mddev),
2736					  rdev->bdev);
2737			}
2738			rdev_dec_pending(rdev, mddev);
 
2739		}
2740		sl = start;
2741		while (sl != slot) {
 
 
2742			if (sl==0)
2743				sl = conf->copies;
2744			sl--;
2745			d = r10_bio->devs[sl].devnum;
2746			rdev = conf->mirrors[d].rdev;
2747			if (!rdev ||
2748			    test_bit(Faulty, &rdev->flags) ||
2749			    !test_bit(In_sync, &rdev->flags))
2750				continue;
2751
2752			atomic_inc(&rdev->nr_pending);
 
2753			switch (r10_sync_page_io(rdev,
2754					     r10_bio->devs[sl].addr +
2755					     sect,
2756					     s, conf->tmppage, REQ_OP_READ)) {
 
2757			case 0:
2758				/* Well, this device is dead */
2759				pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
2760				       mdname(mddev), s,
2761				       (unsigned long long)(
2762					       sect +
2763					       choose_data_offset(r10_bio, rdev)),
2764				       rdev->bdev);
2765				pr_notice("md/raid10:%s: %pg: failing drive\n",
2766				       mdname(mddev),
2767				       rdev->bdev);
2768				break;
2769			case 1:
2770				pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %pg)\n",
2771				       mdname(mddev), s,
2772				       (unsigned long long)(
2773					       sect +
2774					       choose_data_offset(r10_bio, rdev)),
2775				       rdev->bdev);
2776				atomic_add(s, &rdev->corrected_errors);
2777			}
2778
2779			rdev_dec_pending(rdev, mddev);
 
2780		}
 
2781
2782		sectors -= s;
2783		sect += s;
2784	}
2785}
2786
2787static int narrow_write_error(struct r10bio *r10_bio, int i)
2788{
2789	struct bio *bio = r10_bio->master_bio;
2790	struct mddev *mddev = r10_bio->mddev;
2791	struct r10conf *conf = mddev->private;
2792	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2793	/* bio has the data to be written to slot 'i' where
2794	 * we just recently had a write error.
2795	 * We repeatedly clone the bio and trim down to one block,
2796	 * then try the write.  Where the write fails we record
2797	 * a bad block.
2798	 * It is conceivable that the bio doesn't exactly align with
2799	 * blocks.  We must handle this.
2800	 *
2801	 * We currently own a reference to the rdev.
2802	 */
2803
2804	int block_sectors;
2805	sector_t sector;
2806	int sectors;
2807	int sect_to_write = r10_bio->sectors;
2808	int ok = 1;
2809
2810	if (rdev->badblocks.shift < 0)
2811		return 0;
2812
2813	block_sectors = roundup(1 << rdev->badblocks.shift,
2814				bdev_logical_block_size(rdev->bdev) >> 9);
2815	sector = r10_bio->sector;
2816	sectors = ((r10_bio->sector + block_sectors)
2817		   & ~(sector_t)(block_sectors - 1))
2818		- sector;
2819
2820	while (sect_to_write) {
2821		struct bio *wbio;
2822		sector_t wsector;
2823		if (sectors > sect_to_write)
2824			sectors = sect_to_write;
2825		/* Write at 'sector' for 'sectors' */
2826		wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2827				       &mddev->bio_set);
2828		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2829		wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2830		wbio->bi_iter.bi_sector = wsector +
2831				   choose_data_offset(r10_bio, rdev);
2832		wbio->bi_opf = REQ_OP_WRITE;
 
2833
2834		if (submit_bio_wait(wbio) < 0)
2835			/* Failure! */
2836			ok = rdev_set_badblocks(rdev, wsector,
2837						sectors, 0)
2838				&& ok;
2839
2840		bio_put(wbio);
2841		sect_to_write -= sectors;
2842		sector += sectors;
2843		sectors = block_sectors;
2844	}
2845	return ok;
2846}
2847
2848static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2849{
2850	int slot = r10_bio->read_slot;
2851	struct bio *bio;
2852	struct r10conf *conf = mddev->private;
2853	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2854
2855	/* we got a read error. Maybe the drive is bad.  Maybe just
2856	 * the block and we can fix it.
2857	 * We freeze all other IO, and try reading the block from
2858	 * other devices.  When we find one, we re-write
2859	 * and check it that fixes the read error.
2860	 * This is all done synchronously while the array is
2861	 * frozen.
2862	 */
2863	bio = r10_bio->devs[slot].bio;
2864	bio_put(bio);
2865	r10_bio->devs[slot].bio = NULL;
2866
2867	if (mddev->ro)
2868		r10_bio->devs[slot].bio = IO_BLOCKED;
2869	else if (!test_bit(FailFast, &rdev->flags)) {
2870		freeze_array(conf, 1);
2871		fix_read_error(conf, mddev, r10_bio);
2872		unfreeze_array(conf);
2873	} else
2874		md_error(mddev, rdev);
2875
2876	rdev_dec_pending(rdev, mddev);
2877	r10_bio->state = 0;
2878	raid10_read_request(mddev, r10_bio->master_bio, r10_bio, false);
2879	/*
2880	 * allow_barrier after re-submit to ensure no sync io
2881	 * can be issued while regular io pending.
2882	 */
2883	allow_barrier(conf);
 
 
2884}
2885
2886static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2887{
2888	/* Some sort of write request has finished and it
2889	 * succeeded in writing where we thought there was a
2890	 * bad block.  So forget the bad block.
2891	 * Or possibly if failed and we need to record
2892	 * a bad block.
2893	 */
2894	int m;
2895	struct md_rdev *rdev;
2896
2897	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2898	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2899		for (m = 0; m < conf->copies; m++) {
2900			int dev = r10_bio->devs[m].devnum;
2901			rdev = conf->mirrors[dev].rdev;
2902			if (r10_bio->devs[m].bio == NULL ||
2903				r10_bio->devs[m].bio->bi_end_io == NULL)
2904				continue;
2905			if (!r10_bio->devs[m].bio->bi_status) {
2906				rdev_clear_badblocks(
2907					rdev,
2908					r10_bio->devs[m].addr,
2909					r10_bio->sectors, 0);
2910			} else {
2911				if (!rdev_set_badblocks(
2912					    rdev,
2913					    r10_bio->devs[m].addr,
2914					    r10_bio->sectors, 0))
2915					md_error(conf->mddev, rdev);
2916			}
2917			rdev = conf->mirrors[dev].replacement;
2918			if (r10_bio->devs[m].repl_bio == NULL ||
2919				r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2920				continue;
2921
2922			if (!r10_bio->devs[m].repl_bio->bi_status) {
2923				rdev_clear_badblocks(
2924					rdev,
2925					r10_bio->devs[m].addr,
2926					r10_bio->sectors, 0);
2927			} else {
2928				if (!rdev_set_badblocks(
2929					    rdev,
2930					    r10_bio->devs[m].addr,
2931					    r10_bio->sectors, 0))
2932					md_error(conf->mddev, rdev);
2933			}
2934		}
2935		put_buf(r10_bio);
2936	} else {
2937		bool fail = false;
2938		for (m = 0; m < conf->copies; m++) {
2939			int dev = r10_bio->devs[m].devnum;
2940			struct bio *bio = r10_bio->devs[m].bio;
2941			rdev = conf->mirrors[dev].rdev;
2942			if (bio == IO_MADE_GOOD) {
2943				rdev_clear_badblocks(
2944					rdev,
2945					r10_bio->devs[m].addr,
2946					r10_bio->sectors, 0);
2947				rdev_dec_pending(rdev, conf->mddev);
2948			} else if (bio != NULL && bio->bi_status) {
2949				fail = true;
2950				if (!narrow_write_error(r10_bio, m)) {
2951					md_error(conf->mddev, rdev);
2952					set_bit(R10BIO_Degraded,
2953						&r10_bio->state);
2954				}
2955				rdev_dec_pending(rdev, conf->mddev);
2956			}
2957			bio = r10_bio->devs[m].repl_bio;
2958			rdev = conf->mirrors[dev].replacement;
2959			if (rdev && bio == IO_MADE_GOOD) {
2960				rdev_clear_badblocks(
2961					rdev,
2962					r10_bio->devs[m].addr,
2963					r10_bio->sectors, 0);
2964				rdev_dec_pending(rdev, conf->mddev);
2965			}
2966		}
2967		if (fail) {
2968			spin_lock_irq(&conf->device_lock);
2969			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2970			conf->nr_queued++;
2971			spin_unlock_irq(&conf->device_lock);
2972			/*
2973			 * In case freeze_array() is waiting for condition
2974			 * nr_pending == nr_queued + extra to be true.
2975			 */
2976			wake_up(&conf->wait_barrier);
2977			md_wakeup_thread(conf->mddev->thread);
2978		} else {
2979			if (test_bit(R10BIO_WriteError,
2980				     &r10_bio->state))
2981				close_write(r10_bio);
2982			raid_end_bio_io(r10_bio);
2983		}
2984	}
2985}
2986
2987static void raid10d(struct md_thread *thread)
2988{
2989	struct mddev *mddev = thread->mddev;
2990	struct r10bio *r10_bio;
2991	unsigned long flags;
2992	struct r10conf *conf = mddev->private;
2993	struct list_head *head = &conf->retry_list;
2994	struct blk_plug plug;
2995
2996	md_check_recovery(mddev);
2997
2998	if (!list_empty_careful(&conf->bio_end_io_list) &&
2999	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3000		LIST_HEAD(tmp);
3001		spin_lock_irqsave(&conf->device_lock, flags);
3002		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3003			while (!list_empty(&conf->bio_end_io_list)) {
3004				list_move(conf->bio_end_io_list.prev, &tmp);
3005				conf->nr_queued--;
3006			}
3007		}
3008		spin_unlock_irqrestore(&conf->device_lock, flags);
3009		while (!list_empty(&tmp)) {
3010			r10_bio = list_first_entry(&tmp, struct r10bio,
3011						   retry_list);
3012			list_del(&r10_bio->retry_list);
3013			if (mddev->degraded)
3014				set_bit(R10BIO_Degraded, &r10_bio->state);
3015
3016			if (test_bit(R10BIO_WriteError,
3017				     &r10_bio->state))
3018				close_write(r10_bio);
3019			raid_end_bio_io(r10_bio);
3020		}
3021	}
3022
3023	blk_start_plug(&plug);
3024	for (;;) {
3025
3026		flush_pending_writes(conf);
3027
3028		spin_lock_irqsave(&conf->device_lock, flags);
3029		if (list_empty(head)) {
3030			spin_unlock_irqrestore(&conf->device_lock, flags);
3031			break;
3032		}
3033		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
3034		list_del(head->prev);
3035		conf->nr_queued--;
3036		spin_unlock_irqrestore(&conf->device_lock, flags);
3037
3038		mddev = r10_bio->mddev;
3039		conf = mddev->private;
3040		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3041		    test_bit(R10BIO_WriteError, &r10_bio->state))
3042			handle_write_completed(conf, r10_bio);
3043		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3044			reshape_request_write(mddev, r10_bio);
3045		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3046			sync_request_write(mddev, r10_bio);
3047		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3048			recovery_request_write(mddev, r10_bio);
3049		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3050			handle_read_error(mddev, r10_bio);
3051		else
3052			WARN_ON_ONCE(1);
3053
3054		cond_resched();
3055		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
3056			md_check_recovery(mddev);
3057	}
3058	blk_finish_plug(&plug);
3059}
3060
3061static int init_resync(struct r10conf *conf)
3062{
3063	int ret, buffs, i;
3064
3065	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
3066	BUG_ON(mempool_initialized(&conf->r10buf_pool));
3067	conf->have_replacement = 0;
3068	for (i = 0; i < conf->geo.raid_disks; i++)
3069		if (conf->mirrors[i].replacement)
3070			conf->have_replacement = 1;
3071	ret = mempool_init(&conf->r10buf_pool, buffs,
3072			   r10buf_pool_alloc, r10buf_pool_free, conf);
3073	if (ret)
3074		return ret;
3075	conf->next_resync = 0;
3076	return 0;
3077}
3078
3079static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
3080{
3081	struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
3082	struct rsync_pages *rp;
3083	struct bio *bio;
3084	int nalloc;
3085	int i;
3086
3087	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3088	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3089		nalloc = conf->copies; /* resync */
3090	else
3091		nalloc = 2; /* recovery */
3092
3093	for (i = 0; i < nalloc; i++) {
3094		bio = r10bio->devs[i].bio;
3095		rp = bio->bi_private;
3096		bio_reset(bio, NULL, 0);
3097		bio->bi_private = rp;
3098		bio = r10bio->devs[i].repl_bio;
3099		if (bio) {
3100			rp = bio->bi_private;
3101			bio_reset(bio, NULL, 0);
3102			bio->bi_private = rp;
3103		}
3104	}
3105	return r10bio;
3106}
3107
3108/*
3109 * Set cluster_sync_high since we need other nodes to add the
3110 * range [cluster_sync_low, cluster_sync_high] to suspend list.
3111 */
3112static void raid10_set_cluster_sync_high(struct r10conf *conf)
3113{
3114	sector_t window_size;
3115	int extra_chunk, chunks;
3116
3117	/*
3118	 * First, here we define "stripe" as a unit which across
3119	 * all member devices one time, so we get chunks by use
3120	 * raid_disks / near_copies. Otherwise, if near_copies is
3121	 * close to raid_disks, then resync window could increases
3122	 * linearly with the increase of raid_disks, which means
3123	 * we will suspend a really large IO window while it is not
3124	 * necessary. If raid_disks is not divisible by near_copies,
3125	 * an extra chunk is needed to ensure the whole "stripe" is
3126	 * covered.
3127	 */
3128
3129	chunks = conf->geo.raid_disks / conf->geo.near_copies;
3130	if (conf->geo.raid_disks % conf->geo.near_copies == 0)
3131		extra_chunk = 0;
3132	else
3133		extra_chunk = 1;
3134	window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3135
3136	/*
3137	 * At least use a 32M window to align with raid1's resync window
3138	 */
3139	window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
3140			CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
3141
3142	conf->cluster_sync_high = conf->cluster_sync_low + window_size;
3143}
3144
3145/*
3146 * perform a "sync" on one "block"
3147 *
3148 * We need to make sure that no normal I/O request - particularly write
3149 * requests - conflict with active sync requests.
3150 *
3151 * This is achieved by tracking pending requests and a 'barrier' concept
3152 * that can be installed to exclude normal IO requests.
3153 *
3154 * Resync and recovery are handled very differently.
3155 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
3156 *
3157 * For resync, we iterate over virtual addresses, read all copies,
3158 * and update if there are differences.  If only one copy is live,
3159 * skip it.
3160 * For recovery, we iterate over physical addresses, read a good
3161 * value for each non-in_sync drive, and over-write.
3162 *
3163 * So, for recovery we may have several outstanding complex requests for a
3164 * given address, one for each out-of-sync device.  We model this by allocating
3165 * a number of r10_bio structures, one for each out-of-sync device.
3166 * As we setup these structures, we collect all bio's together into a list
3167 * which we then process collectively to add pages, and then process again
3168 * to pass to submit_bio_noacct.
3169 *
3170 * The r10_bio structures are linked using a borrowed master_bio pointer.
3171 * This link is counted in ->remaining.  When the r10_bio that points to NULL
3172 * has its remaining count decremented to 0, the whole complex operation
3173 * is complete.
3174 *
3175 */
3176
3177static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3178			     int *skipped)
3179{
3180	struct r10conf *conf = mddev->private;
3181	struct r10bio *r10_bio;
3182	struct bio *biolist = NULL, *bio;
3183	sector_t max_sector, nr_sectors;
3184	int i;
3185	int max_sync;
3186	sector_t sync_blocks;
3187	sector_t sectors_skipped = 0;
3188	int chunks_skipped = 0;
3189	sector_t chunk_mask = conf->geo.chunk_mask;
3190	int page_idx = 0;
3191	int error_disk = -1;
 
 
 
3192
3193	/*
3194	 * Allow skipping a full rebuild for incremental assembly
3195	 * of a clean array, like RAID1 does.
3196	 */
3197	if (mddev->bitmap == NULL &&
3198	    mddev->recovery_cp == MaxSector &&
3199	    mddev->reshape_position == MaxSector &&
3200	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
3201	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3202	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
3203	    conf->fullsync == 0) {
3204		*skipped = 1;
3205		return mddev->dev_sectors - sector_nr;
3206	}
3207
3208	if (!mempool_initialized(&conf->r10buf_pool))
3209		if (init_resync(conf))
3210			return 0;
3211
3212 skipped:
3213	max_sector = mddev->dev_sectors;
3214	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
3215	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3216		max_sector = mddev->resync_max_sectors;
3217	if (sector_nr >= max_sector) {
3218		conf->cluster_sync_low = 0;
3219		conf->cluster_sync_high = 0;
3220
3221		/* If we aborted, we need to abort the
3222		 * sync on the 'current' bitmap chucks (there can
3223		 * be several when recovering multiple devices).
3224		 * as we may have started syncing it but not finished.
3225		 * We can find the current address in
3226		 * mddev->curr_resync, but for recovery,
3227		 * we need to convert that to several
3228		 * virtual addresses.
3229		 */
3230		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3231			end_reshape(conf);
3232			close_sync(conf);
3233			return 0;
3234		}
3235
3236		if (mddev->curr_resync < max_sector) { /* aborted */
3237			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3238				md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
3239						   &sync_blocks, 1);
3240			else for (i = 0; i < conf->geo.raid_disks; i++) {
3241				sector_t sect =
3242					raid10_find_virt(conf, mddev->curr_resync, i);
3243				md_bitmap_end_sync(mddev->bitmap, sect,
3244						   &sync_blocks, 1);
3245			}
3246		} else {
3247			/* completed sync */
3248			if ((!mddev->bitmap || conf->fullsync)
3249			    && conf->have_replacement
3250			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3251				/* Completed a full sync so the replacements
3252				 * are now fully recovered.
3253				 */
 
3254				for (i = 0; i < conf->geo.raid_disks; i++) {
3255					struct md_rdev *rdev =
3256						conf->mirrors[i].replacement;
3257
3258					if (rdev)
3259						rdev->recovery_offset = MaxSector;
3260				}
 
3261			}
3262			conf->fullsync = 0;
3263		}
3264		md_bitmap_close_sync(mddev->bitmap);
3265		close_sync(conf);
3266		*skipped = 1;
3267		return sectors_skipped;
3268	}
3269
3270	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3271		return reshape_request(mddev, sector_nr, skipped);
3272
3273	if (chunks_skipped >= conf->geo.raid_disks) {
3274		pr_err("md/raid10:%s: %s fails\n", mdname(mddev),
3275			test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?  "resync" : "recovery");
3276		if (error_disk >= 0 &&
3277		    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3278			/*
3279			 * recovery fails, set mirrors.recovery_disabled,
3280			 * device shouldn't be added to there.
3281			 */
3282			conf->mirrors[error_disk].recovery_disabled =
3283						mddev->recovery_disabled;
3284			return 0;
3285		}
3286		/*
3287		 * if there has been nothing to do on any drive,
3288		 * then there is nothing to do at all.
3289		 */
3290		*skipped = 1;
3291		return (max_sector - sector_nr) + sectors_skipped;
3292	}
3293
3294	if (max_sector > mddev->resync_max)
3295		max_sector = mddev->resync_max; /* Don't do IO beyond here */
3296
3297	/* make sure whole request will fit in a chunk - if chunks
3298	 * are meaningful
3299	 */
3300	if (conf->geo.near_copies < conf->geo.raid_disks &&
3301	    max_sector > (sector_nr | chunk_mask))
3302		max_sector = (sector_nr | chunk_mask) + 1;
3303
3304	/*
3305	 * If there is non-resync activity waiting for a turn, then let it
3306	 * though before starting on this new sync request.
3307	 */
3308	if (conf->nr_waiting)
3309		schedule_timeout_uninterruptible(1);
3310
3311	/* Again, very different code for resync and recovery.
3312	 * Both must result in an r10bio with a list of bios that
3313	 * have bi_end_io, bi_sector, bi_bdev set,
3314	 * and bi_private set to the r10bio.
3315	 * For recovery, we may actually create several r10bios
3316	 * with 2 bios in each, that correspond to the bios in the main one.
3317	 * In this case, the subordinate r10bios link back through a
3318	 * borrowed master_bio pointer, and the counter in the master
3319	 * includes a ref from each subordinate.
3320	 */
3321	/* First, we decide what to do and set ->bi_end_io
3322	 * To end_sync_read if we want to read, and
3323	 * end_sync_write if we will want to write.
3324	 */
3325
3326	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3327	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3328		/* recovery... the complicated one */
3329		int j;
3330		r10_bio = NULL;
3331
3332		for (i = 0 ; i < conf->geo.raid_disks; i++) {
3333			int still_degraded;
3334			struct r10bio *rb2;
3335			sector_t sect;
3336			int must_sync;
3337			int any_working;
 
 
3338			struct raid10_info *mirror = &conf->mirrors[i];
3339			struct md_rdev *mrdev, *mreplace;
3340
3341			mrdev = mirror->rdev;
3342			mreplace = mirror->replacement;
 
 
 
 
 
 
 
 
 
3343
3344			if (mrdev && (test_bit(Faulty, &mrdev->flags) ||
3345			    test_bit(In_sync, &mrdev->flags)))
3346				mrdev = NULL;
3347			if (mreplace && test_bit(Faulty, &mreplace->flags))
3348				mreplace = NULL;
3349
3350			if (!mrdev && !mreplace)
3351				continue;
 
3352
3353			still_degraded = 0;
3354			/* want to reconstruct this device */
3355			rb2 = r10_bio;
3356			sect = raid10_find_virt(conf, sector_nr, i);
3357			if (sect >= mddev->resync_max_sectors)
3358				/* last stripe is not complete - don't
3359				 * try to recover this sector.
3360				 */
 
3361				continue;
 
 
 
3362			/* Unless we are doing a full sync, or a replacement
3363			 * we only need to recover the block if it is set in
3364			 * the bitmap
3365			 */
3366			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3367							 &sync_blocks, 1);
3368			if (sync_blocks < max_sync)
3369				max_sync = sync_blocks;
3370			if (!must_sync &&
3371			    mreplace == NULL &&
3372			    !conf->fullsync) {
3373				/* yep, skip the sync_blocks here, but don't assume
3374				 * that there will never be anything to do here
3375				 */
3376				chunks_skipped = -1;
 
3377				continue;
3378			}
3379			if (mrdev)
3380				atomic_inc(&mrdev->nr_pending);
3381			if (mreplace)
3382				atomic_inc(&mreplace->nr_pending);
 
3383
3384			r10_bio = raid10_alloc_init_r10buf(conf);
3385			r10_bio->state = 0;
3386			raise_barrier(conf, rb2 != NULL);
3387			atomic_set(&r10_bio->remaining, 0);
3388
3389			r10_bio->master_bio = (struct bio*)rb2;
3390			if (rb2)
3391				atomic_inc(&rb2->remaining);
3392			r10_bio->mddev = mddev;
3393			set_bit(R10BIO_IsRecover, &r10_bio->state);
3394			r10_bio->sector = sect;
3395
3396			raid10_find_phys(conf, r10_bio);
3397
3398			/* Need to check if the array will still be
3399			 * degraded
3400			 */
 
3401			for (j = 0; j < conf->geo.raid_disks; j++) {
3402				struct md_rdev *rdev = conf->mirrors[j].rdev;
3403
3404				if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3405					still_degraded = 1;
3406					break;
3407				}
3408			}
3409
3410			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3411							 &sync_blocks, still_degraded);
3412
3413			any_working = 0;
3414			for (j=0; j<conf->copies;j++) {
3415				int k;
3416				int d = r10_bio->devs[j].devnum;
3417				sector_t from_addr, to_addr;
3418				struct md_rdev *rdev = conf->mirrors[d].rdev;
 
3419				sector_t sector, first_bad;
3420				int bad_sectors;
3421				if (!rdev ||
3422				    !test_bit(In_sync, &rdev->flags))
3423					continue;
3424				/* This is where we read from */
3425				any_working = 1;
3426				sector = r10_bio->devs[j].addr;
3427
3428				if (is_badblock(rdev, sector, max_sync,
3429						&first_bad, &bad_sectors)) {
3430					if (first_bad > sector)
3431						max_sync = first_bad - sector;
3432					else {
3433						bad_sectors -= (sector
3434								- first_bad);
3435						if (max_sync > bad_sectors)
3436							max_sync = bad_sectors;
3437						continue;
3438					}
3439				}
3440				bio = r10_bio->devs[0].bio;
3441				bio->bi_next = biolist;
3442				biolist = bio;
3443				bio->bi_end_io = end_sync_read;
3444				bio->bi_opf = REQ_OP_READ;
3445				if (test_bit(FailFast, &rdev->flags))
3446					bio->bi_opf |= MD_FAILFAST;
3447				from_addr = r10_bio->devs[j].addr;
3448				bio->bi_iter.bi_sector = from_addr +
3449					rdev->data_offset;
3450				bio_set_dev(bio, rdev->bdev);
3451				atomic_inc(&rdev->nr_pending);
3452				/* and we write to 'i' (if not in_sync) */
3453
3454				for (k=0; k<conf->copies; k++)
3455					if (r10_bio->devs[k].devnum == i)
3456						break;
3457				BUG_ON(k == conf->copies);
3458				to_addr = r10_bio->devs[k].addr;
3459				r10_bio->devs[0].devnum = d;
3460				r10_bio->devs[0].addr = from_addr;
3461				r10_bio->devs[1].devnum = i;
3462				r10_bio->devs[1].addr = to_addr;
3463
3464				if (mrdev) {
3465					bio = r10_bio->devs[1].bio;
3466					bio->bi_next = biolist;
3467					biolist = bio;
3468					bio->bi_end_io = end_sync_write;
3469					bio->bi_opf = REQ_OP_WRITE;
3470					bio->bi_iter.bi_sector = to_addr
3471						+ mrdev->data_offset;
3472					bio_set_dev(bio, mrdev->bdev);
3473					atomic_inc(&r10_bio->remaining);
3474				} else
3475					r10_bio->devs[1].bio->bi_end_io = NULL;
3476
3477				/* and maybe write to replacement */
3478				bio = r10_bio->devs[1].repl_bio;
3479				if (bio)
3480					bio->bi_end_io = NULL;
3481				/* Note: if replace is not NULL, then bio
3482				 * cannot be NULL as r10buf_pool_alloc will
3483				 * have allocated it.
3484				 */
3485				if (!mreplace)
3486					break;
3487				bio->bi_next = biolist;
3488				biolist = bio;
3489				bio->bi_end_io = end_sync_write;
3490				bio->bi_opf = REQ_OP_WRITE;
3491				bio->bi_iter.bi_sector = to_addr +
3492					mreplace->data_offset;
3493				bio_set_dev(bio, mreplace->bdev);
3494				atomic_inc(&r10_bio->remaining);
3495				break;
3496			}
 
3497			if (j == conf->copies) {
3498				/* Cannot recover, so abort the recovery or
3499				 * record a bad block */
3500				if (any_working) {
3501					/* problem is that there are bad blocks
3502					 * on other device(s)
3503					 */
3504					int k;
3505					for (k = 0; k < conf->copies; k++)
3506						if (r10_bio->devs[k].devnum == i)
3507							break;
3508					if (mrdev && !test_bit(In_sync,
3509						      &mrdev->flags)
3510					    && !rdev_set_badblocks(
3511						    mrdev,
3512						    r10_bio->devs[k].addr,
3513						    max_sync, 0))
3514						any_working = 0;
3515					if (mreplace &&
3516					    !rdev_set_badblocks(
3517						    mreplace,
3518						    r10_bio->devs[k].addr,
3519						    max_sync, 0))
3520						any_working = 0;
3521				}
3522				if (!any_working)  {
3523					if (!test_and_set_bit(MD_RECOVERY_INTR,
3524							      &mddev->recovery))
3525						pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
3526						       mdname(mddev));
3527					mirror->recovery_disabled
3528						= mddev->recovery_disabled;
3529				} else {
3530					error_disk = i;
3531				}
3532				put_buf(r10_bio);
3533				if (rb2)
3534					atomic_dec(&rb2->remaining);
3535				r10_bio = rb2;
3536				if (mrdev)
3537					rdev_dec_pending(mrdev, mddev);
3538				if (mreplace)
3539					rdev_dec_pending(mreplace, mddev);
3540				break;
3541			}
3542			if (mrdev)
3543				rdev_dec_pending(mrdev, mddev);
3544			if (mreplace)
3545				rdev_dec_pending(mreplace, mddev);
3546			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3547				/* Only want this if there is elsewhere to
3548				 * read from. 'j' is currently the first
3549				 * readable copy.
3550				 */
3551				int targets = 1;
3552				for (; j < conf->copies; j++) {
3553					int d = r10_bio->devs[j].devnum;
3554					if (conf->mirrors[d].rdev &&
3555					    test_bit(In_sync,
3556						      &conf->mirrors[d].rdev->flags))
3557						targets++;
3558				}
3559				if (targets == 1)
3560					r10_bio->devs[0].bio->bi_opf
3561						&= ~MD_FAILFAST;
3562			}
3563		}
3564		if (biolist == NULL) {
3565			while (r10_bio) {
3566				struct r10bio *rb2 = r10_bio;
3567				r10_bio = (struct r10bio*) rb2->master_bio;
3568				rb2->master_bio = NULL;
3569				put_buf(rb2);
3570			}
3571			goto giveup;
3572		}
3573	} else {
3574		/* resync. Schedule a read for every block at this virt offset */
3575		int count = 0;
3576
3577		/*
3578		 * Since curr_resync_completed could probably not update in
3579		 * time, and we will set cluster_sync_low based on it.
3580		 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3581		 * safety reason, which ensures curr_resync_completed is
3582		 * updated in bitmap_cond_end_sync.
3583		 */
3584		md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3585					mddev_is_clustered(mddev) &&
3586					(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3587
3588		if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3589					  &sync_blocks, mddev->degraded) &&
3590		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3591						 &mddev->recovery)) {
3592			/* We can skip this block */
3593			*skipped = 1;
3594			return sync_blocks + sectors_skipped;
3595		}
3596		if (sync_blocks < max_sync)
3597			max_sync = sync_blocks;
3598		r10_bio = raid10_alloc_init_r10buf(conf);
3599		r10_bio->state = 0;
3600
3601		r10_bio->mddev = mddev;
3602		atomic_set(&r10_bio->remaining, 0);
3603		raise_barrier(conf, 0);
3604		conf->next_resync = sector_nr;
3605
3606		r10_bio->master_bio = NULL;
3607		r10_bio->sector = sector_nr;
3608		set_bit(R10BIO_IsSync, &r10_bio->state);
3609		raid10_find_phys(conf, r10_bio);
3610		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3611
3612		for (i = 0; i < conf->copies; i++) {
3613			int d = r10_bio->devs[i].devnum;
3614			sector_t first_bad, sector;
3615			int bad_sectors;
3616			struct md_rdev *rdev;
3617
3618			if (r10_bio->devs[i].repl_bio)
3619				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3620
3621			bio = r10_bio->devs[i].bio;
3622			bio->bi_status = BLK_STS_IOERR;
3623			rdev = conf->mirrors[d].rdev;
3624			if (rdev == NULL || test_bit(Faulty, &rdev->flags))
 
 
3625				continue;
3626
3627			sector = r10_bio->devs[i].addr;
3628			if (is_badblock(rdev, sector, max_sync,
3629					&first_bad, &bad_sectors)) {
3630				if (first_bad > sector)
3631					max_sync = first_bad - sector;
3632				else {
3633					bad_sectors -= (sector - first_bad);
3634					if (max_sync > bad_sectors)
3635						max_sync = bad_sectors;
 
3636					continue;
3637				}
3638			}
3639			atomic_inc(&rdev->nr_pending);
3640			atomic_inc(&r10_bio->remaining);
3641			bio->bi_next = biolist;
3642			biolist = bio;
3643			bio->bi_end_io = end_sync_read;
3644			bio->bi_opf = REQ_OP_READ;
3645			if (test_bit(FailFast, &rdev->flags))
3646				bio->bi_opf |= MD_FAILFAST;
3647			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3648			bio_set_dev(bio, rdev->bdev);
3649			count++;
3650
3651			rdev = conf->mirrors[d].replacement;
3652			if (rdev == NULL || test_bit(Faulty, &rdev->flags))
 
3653				continue;
3654
3655			atomic_inc(&rdev->nr_pending);
3656
3657			/* Need to set up for writing to the replacement */
3658			bio = r10_bio->devs[i].repl_bio;
3659			bio->bi_status = BLK_STS_IOERR;
3660
3661			sector = r10_bio->devs[i].addr;
3662			bio->bi_next = biolist;
3663			biolist = bio;
3664			bio->bi_end_io = end_sync_write;
3665			bio->bi_opf = REQ_OP_WRITE;
3666			if (test_bit(FailFast, &rdev->flags))
3667				bio->bi_opf |= MD_FAILFAST;
3668			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3669			bio_set_dev(bio, rdev->bdev);
3670			count++;
 
3671		}
3672
3673		if (count < 2) {
3674			for (i=0; i<conf->copies; i++) {
3675				int d = r10_bio->devs[i].devnum;
3676				if (r10_bio->devs[i].bio->bi_end_io)
3677					rdev_dec_pending(conf->mirrors[d].rdev,
3678							 mddev);
3679				if (r10_bio->devs[i].repl_bio &&
3680				    r10_bio->devs[i].repl_bio->bi_end_io)
3681					rdev_dec_pending(
3682						conf->mirrors[d].replacement,
3683						mddev);
3684			}
3685			put_buf(r10_bio);
3686			biolist = NULL;
3687			goto giveup;
3688		}
3689	}
3690
3691	nr_sectors = 0;
3692	if (sector_nr + max_sync < max_sector)
3693		max_sector = sector_nr + max_sync;
3694	do {
3695		struct page *page;
3696		int len = PAGE_SIZE;
3697		if (sector_nr + (len>>9) > max_sector)
3698			len = (max_sector - sector_nr) << 9;
3699		if (len == 0)
3700			break;
3701		for (bio= biolist ; bio ; bio=bio->bi_next) {
3702			struct resync_pages *rp = get_resync_pages(bio);
3703			page = resync_fetch_page(rp, page_idx);
3704			if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
3705				bio->bi_status = BLK_STS_RESOURCE;
3706				bio_endio(bio);
3707				goto giveup;
3708			}
3709		}
3710		nr_sectors += len>>9;
3711		sector_nr += len>>9;
3712	} while (++page_idx < RESYNC_PAGES);
3713	r10_bio->sectors = nr_sectors;
3714
3715	if (mddev_is_clustered(mddev) &&
3716	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3717		/* It is resync not recovery */
3718		if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3719			conf->cluster_sync_low = mddev->curr_resync_completed;
3720			raid10_set_cluster_sync_high(conf);
3721			/* Send resync message */
3722			md_cluster_ops->resync_info_update(mddev,
3723						conf->cluster_sync_low,
3724						conf->cluster_sync_high);
3725		}
3726	} else if (mddev_is_clustered(mddev)) {
3727		/* This is recovery not resync */
3728		sector_t sect_va1, sect_va2;
3729		bool broadcast_msg = false;
3730
3731		for (i = 0; i < conf->geo.raid_disks; i++) {
3732			/*
3733			 * sector_nr is a device address for recovery, so we
3734			 * need translate it to array address before compare
3735			 * with cluster_sync_high.
3736			 */
3737			sect_va1 = raid10_find_virt(conf, sector_nr, i);
3738
3739			if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3740				broadcast_msg = true;
3741				/*
3742				 * curr_resync_completed is similar as
3743				 * sector_nr, so make the translation too.
3744				 */
3745				sect_va2 = raid10_find_virt(conf,
3746					mddev->curr_resync_completed, i);
3747
3748				if (conf->cluster_sync_low == 0 ||
3749				    conf->cluster_sync_low > sect_va2)
3750					conf->cluster_sync_low = sect_va2;
3751			}
3752		}
3753		if (broadcast_msg) {
3754			raid10_set_cluster_sync_high(conf);
3755			md_cluster_ops->resync_info_update(mddev,
3756						conf->cluster_sync_low,
3757						conf->cluster_sync_high);
3758		}
3759	}
3760
3761	while (biolist) {
3762		bio = biolist;
3763		biolist = biolist->bi_next;
3764
3765		bio->bi_next = NULL;
3766		r10_bio = get_resync_r10bio(bio);
3767		r10_bio->sectors = nr_sectors;
3768
3769		if (bio->bi_end_io == end_sync_read) {
3770			md_sync_acct_bio(bio, nr_sectors);
3771			bio->bi_status = 0;
3772			submit_bio_noacct(bio);
3773		}
3774	}
3775
3776	if (sectors_skipped)
3777		/* pretend they weren't skipped, it makes
3778		 * no important difference in this case
3779		 */
3780		md_done_sync(mddev, sectors_skipped, 1);
3781
3782	return sectors_skipped + nr_sectors;
3783 giveup:
3784	/* There is nowhere to write, so all non-sync
3785	 * drives must be failed or in resync, all drives
3786	 * have a bad block, so try the next chunk...
3787	 */
3788	if (sector_nr + max_sync < max_sector)
3789		max_sector = sector_nr + max_sync;
3790
3791	sectors_skipped += (max_sector - sector_nr);
3792	chunks_skipped ++;
3793	sector_nr = max_sector;
3794	goto skipped;
3795}
3796
3797static sector_t
3798raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3799{
3800	sector_t size;
3801	struct r10conf *conf = mddev->private;
3802
3803	if (!raid_disks)
3804		raid_disks = min(conf->geo.raid_disks,
3805				 conf->prev.raid_disks);
3806	if (!sectors)
3807		sectors = conf->dev_sectors;
3808
3809	size = sectors >> conf->geo.chunk_shift;
3810	sector_div(size, conf->geo.far_copies);
3811	size = size * raid_disks;
3812	sector_div(size, conf->geo.near_copies);
3813
3814	return size << conf->geo.chunk_shift;
3815}
3816
3817static void calc_sectors(struct r10conf *conf, sector_t size)
3818{
3819	/* Calculate the number of sectors-per-device that will
3820	 * actually be used, and set conf->dev_sectors and
3821	 * conf->stride
3822	 */
3823
3824	size = size >> conf->geo.chunk_shift;
3825	sector_div(size, conf->geo.far_copies);
3826	size = size * conf->geo.raid_disks;
3827	sector_div(size, conf->geo.near_copies);
3828	/* 'size' is now the number of chunks in the array */
3829	/* calculate "used chunks per device" */
3830	size = size * conf->copies;
3831
3832	/* We need to round up when dividing by raid_disks to
3833	 * get the stride size.
3834	 */
3835	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3836
3837	conf->dev_sectors = size << conf->geo.chunk_shift;
3838
3839	if (conf->geo.far_offset)
3840		conf->geo.stride = 1 << conf->geo.chunk_shift;
3841	else {
3842		sector_div(size, conf->geo.far_copies);
3843		conf->geo.stride = size << conf->geo.chunk_shift;
3844	}
3845}
3846
3847enum geo_type {geo_new, geo_old, geo_start};
3848static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3849{
3850	int nc, fc, fo;
3851	int layout, chunk, disks;
3852	switch (new) {
3853	case geo_old:
3854		layout = mddev->layout;
3855		chunk = mddev->chunk_sectors;
3856		disks = mddev->raid_disks - mddev->delta_disks;
3857		break;
3858	case geo_new:
3859		layout = mddev->new_layout;
3860		chunk = mddev->new_chunk_sectors;
3861		disks = mddev->raid_disks;
3862		break;
3863	default: /* avoid 'may be unused' warnings */
3864	case geo_start: /* new when starting reshape - raid_disks not
3865			 * updated yet. */
3866		layout = mddev->new_layout;
3867		chunk = mddev->new_chunk_sectors;
3868		disks = mddev->raid_disks + mddev->delta_disks;
3869		break;
3870	}
3871	if (layout >> 19)
3872		return -1;
3873	if (chunk < (PAGE_SIZE >> 9) ||
3874	    !is_power_of_2(chunk))
3875		return -2;
3876	nc = layout & 255;
3877	fc = (layout >> 8) & 255;
3878	fo = layout & (1<<16);
3879	geo->raid_disks = disks;
3880	geo->near_copies = nc;
3881	geo->far_copies = fc;
3882	geo->far_offset = fo;
3883	switch (layout >> 17) {
3884	case 0:	/* original layout.  simple but not always optimal */
3885		geo->far_set_size = disks;
3886		break;
3887	case 1: /* "improved" layout which was buggy.  Hopefully no-one is
3888		 * actually using this, but leave code here just in case.*/
3889		geo->far_set_size = disks/fc;
3890		WARN(geo->far_set_size < fc,
3891		     "This RAID10 layout does not provide data safety - please backup and create new array\n");
3892		break;
3893	case 2: /* "improved" layout fixed to match documentation */
3894		geo->far_set_size = fc * nc;
3895		break;
3896	default: /* Not a valid layout */
3897		return -1;
3898	}
3899	geo->chunk_mask = chunk - 1;
3900	geo->chunk_shift = ffz(~chunk);
3901	return nc*fc;
3902}
3903
3904static void raid10_free_conf(struct r10conf *conf)
3905{
3906	if (!conf)
3907		return;
3908
3909	mempool_exit(&conf->r10bio_pool);
3910	kfree(conf->mirrors);
3911	kfree(conf->mirrors_old);
3912	kfree(conf->mirrors_new);
3913	safe_put_page(conf->tmppage);
3914	bioset_exit(&conf->bio_split);
3915	kfree(conf);
3916}
3917
3918static struct r10conf *setup_conf(struct mddev *mddev)
3919{
3920	struct r10conf *conf = NULL;
3921	int err = -EINVAL;
3922	struct geom geo;
3923	int copies;
3924
3925	copies = setup_geo(&geo, mddev, geo_new);
3926
3927	if (copies == -2) {
3928		pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3929			mdname(mddev), PAGE_SIZE);
3930		goto out;
3931	}
3932
3933	if (copies < 2 || copies > mddev->raid_disks) {
3934		pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3935			mdname(mddev), mddev->new_layout);
3936		goto out;
3937	}
3938
3939	err = -ENOMEM;
3940	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3941	if (!conf)
3942		goto out;
3943
3944	/* FIXME calc properly */
3945	conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
3946				sizeof(struct raid10_info),
3947				GFP_KERNEL);
3948	if (!conf->mirrors)
3949		goto out;
3950
3951	conf->tmppage = alloc_page(GFP_KERNEL);
3952	if (!conf->tmppage)
3953		goto out;
3954
3955	conf->geo = geo;
3956	conf->copies = copies;
3957	err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
3958			   rbio_pool_free, conf);
3959	if (err)
3960		goto out;
3961
3962	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3963	if (err)
3964		goto out;
3965
3966	calc_sectors(conf, mddev->dev_sectors);
3967	if (mddev->reshape_position == MaxSector) {
3968		conf->prev = conf->geo;
3969		conf->reshape_progress = MaxSector;
3970	} else {
3971		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3972			err = -EINVAL;
3973			goto out;
3974		}
3975		conf->reshape_progress = mddev->reshape_position;
3976		if (conf->prev.far_offset)
3977			conf->prev.stride = 1 << conf->prev.chunk_shift;
3978		else
3979			/* far_copies must be 1 */
3980			conf->prev.stride = conf->dev_sectors;
3981	}
3982	conf->reshape_safe = conf->reshape_progress;
3983	spin_lock_init(&conf->device_lock);
3984	INIT_LIST_HEAD(&conf->retry_list);
3985	INIT_LIST_HEAD(&conf->bio_end_io_list);
3986
3987	seqlock_init(&conf->resync_lock);
3988	init_waitqueue_head(&conf->wait_barrier);
3989	atomic_set(&conf->nr_pending, 0);
3990
3991	err = -ENOMEM;
3992	rcu_assign_pointer(conf->thread,
3993			   md_register_thread(raid10d, mddev, "raid10"));
3994	if (!conf->thread)
3995		goto out;
3996
3997	conf->mddev = mddev;
3998	return conf;
3999
4000 out:
4001	raid10_free_conf(conf);
 
 
 
 
 
 
4002	return ERR_PTR(err);
4003}
4004
4005static void raid10_set_io_opt(struct r10conf *conf)
4006{
4007	int raid_disks = conf->geo.raid_disks;
4008
4009	if (!(conf->geo.raid_disks % conf->geo.near_copies))
4010		raid_disks /= conf->geo.near_copies;
4011	blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
4012			 raid_disks);
4013}
4014
4015static int raid10_run(struct mddev *mddev)
4016{
4017	struct r10conf *conf;
4018	int i, disk_idx;
4019	struct raid10_info *disk;
4020	struct md_rdev *rdev;
4021	sector_t size;
4022	sector_t min_offset_diff = 0;
4023	int first = 1;
 
 
 
 
4024
4025	if (mddev->private == NULL) {
4026		conf = setup_conf(mddev);
4027		if (IS_ERR(conf))
4028			return PTR_ERR(conf);
4029		mddev->private = conf;
4030	}
4031	conf = mddev->private;
4032	if (!conf)
4033		goto out;
4034
4035	rcu_assign_pointer(mddev->thread, conf->thread);
4036	rcu_assign_pointer(conf->thread, NULL);
4037
4038	if (mddev_is_clustered(conf->mddev)) {
4039		int fc, fo;
4040
4041		fc = (mddev->layout >> 8) & 255;
4042		fo = mddev->layout & (1<<16);
4043		if (fc > 1 || fo > 0) {
4044			pr_err("only near layout is supported by clustered"
4045				" raid10\n");
4046			goto out_free_conf;
4047		}
4048	}
4049
 
 
 
 
4050	if (mddev->queue) {
 
 
 
4051		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
4052		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
4053		raid10_set_io_opt(conf);
 
 
 
 
4054	}
4055
4056	rdev_for_each(rdev, mddev) {
4057		long long diff;
4058
4059		disk_idx = rdev->raid_disk;
4060		if (disk_idx < 0)
4061			continue;
4062		if (disk_idx >= conf->geo.raid_disks &&
4063		    disk_idx >= conf->prev.raid_disks)
4064			continue;
4065		disk = conf->mirrors + disk_idx;
4066
4067		if (test_bit(Replacement, &rdev->flags)) {
4068			if (disk->replacement)
4069				goto out_free_conf;
4070			disk->replacement = rdev;
4071		} else {
4072			if (disk->rdev)
4073				goto out_free_conf;
4074			disk->rdev = rdev;
4075		}
4076		diff = (rdev->new_data_offset - rdev->data_offset);
4077		if (!mddev->reshape_backwards)
4078			diff = -diff;
4079		if (diff < 0)
4080			diff = 0;
4081		if (first || diff < min_offset_diff)
4082			min_offset_diff = diff;
4083
4084		if (mddev->gendisk)
4085			disk_stack_limits(mddev->gendisk, rdev->bdev,
4086					  rdev->data_offset << 9);
4087
4088		disk->head_position = 0;
 
 
 
4089		first = 0;
4090	}
4091
 
 
 
 
 
 
 
 
4092	/* need to check that every block has at least one working mirror */
4093	if (!enough(conf, -1)) {
4094		pr_err("md/raid10:%s: not enough operational mirrors.\n",
4095		       mdname(mddev));
4096		goto out_free_conf;
4097	}
4098
4099	if (conf->reshape_progress != MaxSector) {
4100		/* must ensure that shape change is supported */
4101		if (conf->geo.far_copies != 1 &&
4102		    conf->geo.far_offset == 0)
4103			goto out_free_conf;
4104		if (conf->prev.far_copies != 1 &&
4105		    conf->prev.far_offset == 0)
4106			goto out_free_conf;
4107	}
4108
4109	mddev->degraded = 0;
4110	for (i = 0;
4111	     i < conf->geo.raid_disks
4112		     || i < conf->prev.raid_disks;
4113	     i++) {
4114
4115		disk = conf->mirrors + i;
4116
4117		if (!disk->rdev && disk->replacement) {
4118			/* The replacement is all we have - use it */
4119			disk->rdev = disk->replacement;
4120			disk->replacement = NULL;
4121			clear_bit(Replacement, &disk->rdev->flags);
4122		}
4123
4124		if (!disk->rdev ||
4125		    !test_bit(In_sync, &disk->rdev->flags)) {
4126			disk->head_position = 0;
4127			mddev->degraded++;
4128			if (disk->rdev &&
4129			    disk->rdev->saved_raid_disk < 0)
4130				conf->fullsync = 1;
4131		}
4132
4133		if (disk->replacement &&
4134		    !test_bit(In_sync, &disk->replacement->flags) &&
4135		    disk->replacement->saved_raid_disk < 0) {
4136			conf->fullsync = 1;
4137		}
4138
4139		disk->recovery_disabled = mddev->recovery_disabled - 1;
4140	}
4141
4142	if (mddev->recovery_cp != MaxSector)
4143		pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
4144			  mdname(mddev));
4145	pr_info("md/raid10:%s: active with %d out of %d devices\n",
4146		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4147		conf->geo.raid_disks);
4148	/*
4149	 * Ok, everything is just fine now
4150	 */
4151	mddev->dev_sectors = conf->dev_sectors;
4152	size = raid10_size(mddev, 0, 0);
4153	md_set_array_sectors(mddev, size);
4154	mddev->resync_max_sectors = size;
4155	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
4156
 
 
 
 
 
 
 
 
 
 
 
 
 
4157	if (md_integrity_register(mddev))
4158		goto out_free_conf;
4159
4160	if (conf->reshape_progress != MaxSector) {
4161		unsigned long before_length, after_length;
4162
4163		before_length = ((1 << conf->prev.chunk_shift) *
4164				 conf->prev.far_copies);
4165		after_length = ((1 << conf->geo.chunk_shift) *
4166				conf->geo.far_copies);
4167
4168		if (max(before_length, after_length) > min_offset_diff) {
4169			/* This cannot work */
4170			pr_warn("md/raid10: offset difference not enough to continue reshape\n");
4171			goto out_free_conf;
4172		}
4173		conf->offset_diff = min_offset_diff;
4174
4175		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4176		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4177		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4178		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 
 
 
 
4179	}
4180
4181	return 0;
4182
4183out_free_conf:
4184	md_unregister_thread(mddev, &mddev->thread);
4185	raid10_free_conf(conf);
 
 
 
4186	mddev->private = NULL;
4187out:
4188	return -EIO;
4189}
4190
4191static void raid10_free(struct mddev *mddev, void *priv)
4192{
4193	raid10_free_conf(priv);
 
 
 
 
 
 
 
 
4194}
4195
4196static void raid10_quiesce(struct mddev *mddev, int quiesce)
4197{
4198	struct r10conf *conf = mddev->private;
4199
4200	if (quiesce)
4201		raise_barrier(conf, 0);
4202	else
4203		lower_barrier(conf);
4204}
4205
4206static int raid10_resize(struct mddev *mddev, sector_t sectors)
4207{
4208	/* Resize of 'far' arrays is not supported.
4209	 * For 'near' and 'offset' arrays we can set the
4210	 * number of sectors used to be an appropriate multiple
4211	 * of the chunk size.
4212	 * For 'offset', this is far_copies*chunksize.
4213	 * For 'near' the multiplier is the LCM of
4214	 * near_copies and raid_disks.
4215	 * So if far_copies > 1 && !far_offset, fail.
4216	 * Else find LCM(raid_disks, near_copy)*far_copies and
4217	 * multiply by chunk_size.  Then round to this number.
4218	 * This is mostly done by raid10_size()
4219	 */
4220	struct r10conf *conf = mddev->private;
4221	sector_t oldsize, size;
4222
4223	if (mddev->reshape_position != MaxSector)
4224		return -EBUSY;
4225
4226	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4227		return -EINVAL;
4228
4229	oldsize = raid10_size(mddev, 0, 0);
4230	size = raid10_size(mddev, sectors, 0);
4231	if (mddev->external_size &&
4232	    mddev->array_sectors > size)
4233		return -EINVAL;
4234	if (mddev->bitmap) {
4235		int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
4236		if (ret)
4237			return ret;
4238	}
4239	md_set_array_sectors(mddev, size);
4240	if (sectors > mddev->dev_sectors &&
4241	    mddev->recovery_cp > oldsize) {
4242		mddev->recovery_cp = oldsize;
4243		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4244	}
4245	calc_sectors(conf, sectors);
4246	mddev->dev_sectors = conf->dev_sectors;
4247	mddev->resync_max_sectors = size;
4248	return 0;
4249}
4250
4251static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4252{
4253	struct md_rdev *rdev;
4254	struct r10conf *conf;
4255
4256	if (mddev->degraded > 0) {
4257		pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4258			mdname(mddev));
4259		return ERR_PTR(-EINVAL);
4260	}
4261	sector_div(size, devs);
4262
4263	/* Set new parameters */
4264	mddev->new_level = 10;
4265	/* new layout: far_copies = 1, near_copies = 2 */
4266	mddev->new_layout = (1<<8) + 2;
4267	mddev->new_chunk_sectors = mddev->chunk_sectors;
4268	mddev->delta_disks = mddev->raid_disks;
4269	mddev->raid_disks *= 2;
4270	/* make sure it will be not marked as dirty */
4271	mddev->recovery_cp = MaxSector;
4272	mddev->dev_sectors = size;
4273
4274	conf = setup_conf(mddev);
4275	if (!IS_ERR(conf)) {
4276		rdev_for_each(rdev, mddev)
4277			if (rdev->raid_disk >= 0) {
4278				rdev->new_raid_disk = rdev->raid_disk * 2;
4279				rdev->sectors = size;
4280			}
 
4281	}
4282
4283	return conf;
4284}
4285
4286static void *raid10_takeover(struct mddev *mddev)
4287{
4288	struct r0conf *raid0_conf;
4289
4290	/* raid10 can take over:
4291	 *  raid0 - providing it has only two drives
4292	 */
4293	if (mddev->level == 0) {
4294		/* for raid0 takeover only one zone is supported */
4295		raid0_conf = mddev->private;
4296		if (raid0_conf->nr_strip_zones > 1) {
4297			pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4298				mdname(mddev));
4299			return ERR_PTR(-EINVAL);
4300		}
4301		return raid10_takeover_raid0(mddev,
4302			raid0_conf->strip_zone->zone_end,
4303			raid0_conf->strip_zone->nb_dev);
4304	}
4305	return ERR_PTR(-EINVAL);
4306}
4307
4308static int raid10_check_reshape(struct mddev *mddev)
4309{
4310	/* Called when there is a request to change
4311	 * - layout (to ->new_layout)
4312	 * - chunk size (to ->new_chunk_sectors)
4313	 * - raid_disks (by delta_disks)
4314	 * or when trying to restart a reshape that was ongoing.
4315	 *
4316	 * We need to validate the request and possibly allocate
4317	 * space if that might be an issue later.
4318	 *
4319	 * Currently we reject any reshape of a 'far' mode array,
4320	 * allow chunk size to change if new is generally acceptable,
4321	 * allow raid_disks to increase, and allow
4322	 * a switch between 'near' mode and 'offset' mode.
4323	 */
4324	struct r10conf *conf = mddev->private;
4325	struct geom geo;
4326
4327	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4328		return -EINVAL;
4329
4330	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4331		/* mustn't change number of copies */
4332		return -EINVAL;
4333	if (geo.far_copies > 1 && !geo.far_offset)
4334		/* Cannot switch to 'far' mode */
4335		return -EINVAL;
4336
4337	if (mddev->array_sectors & geo.chunk_mask)
4338			/* not factor of array size */
4339			return -EINVAL;
4340
4341	if (!enough(conf, -1))
4342		return -EINVAL;
4343
4344	kfree(conf->mirrors_new);
4345	conf->mirrors_new = NULL;
4346	if (mddev->delta_disks > 0) {
4347		/* allocate new 'mirrors' list */
4348		conf->mirrors_new =
4349			kcalloc(mddev->raid_disks + mddev->delta_disks,
4350				sizeof(struct raid10_info),
4351				GFP_KERNEL);
4352		if (!conf->mirrors_new)
4353			return -ENOMEM;
4354	}
4355	return 0;
4356}
4357
4358/*
4359 * Need to check if array has failed when deciding whether to:
4360 *  - start an array
4361 *  - remove non-faulty devices
4362 *  - add a spare
4363 *  - allow a reshape
4364 * This determination is simple when no reshape is happening.
4365 * However if there is a reshape, we need to carefully check
4366 * both the before and after sections.
4367 * This is because some failed devices may only affect one
4368 * of the two sections, and some non-in_sync devices may
4369 * be insync in the section most affected by failed devices.
4370 */
4371static int calc_degraded(struct r10conf *conf)
4372{
4373	int degraded, degraded2;
4374	int i;
4375
 
4376	degraded = 0;
4377	/* 'prev' section first */
4378	for (i = 0; i < conf->prev.raid_disks; i++) {
4379		struct md_rdev *rdev = conf->mirrors[i].rdev;
4380
4381		if (!rdev || test_bit(Faulty, &rdev->flags))
4382			degraded++;
4383		else if (!test_bit(In_sync, &rdev->flags))
4384			/* When we can reduce the number of devices in
4385			 * an array, this might not contribute to
4386			 * 'degraded'.  It does now.
4387			 */
4388			degraded++;
4389	}
 
4390	if (conf->geo.raid_disks == conf->prev.raid_disks)
4391		return degraded;
 
4392	degraded2 = 0;
4393	for (i = 0; i < conf->geo.raid_disks; i++) {
4394		struct md_rdev *rdev = conf->mirrors[i].rdev;
4395
4396		if (!rdev || test_bit(Faulty, &rdev->flags))
4397			degraded2++;
4398		else if (!test_bit(In_sync, &rdev->flags)) {
4399			/* If reshape is increasing the number of devices,
4400			 * this section has already been recovered, so
4401			 * it doesn't contribute to degraded.
4402			 * else it does.
4403			 */
4404			if (conf->geo.raid_disks <= conf->prev.raid_disks)
4405				degraded2++;
4406		}
4407	}
 
4408	if (degraded2 > degraded)
4409		return degraded2;
4410	return degraded;
4411}
4412
4413static int raid10_start_reshape(struct mddev *mddev)
4414{
4415	/* A 'reshape' has been requested. This commits
4416	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4417	 * This also checks if there are enough spares and adds them
4418	 * to the array.
4419	 * We currently require enough spares to make the final
4420	 * array non-degraded.  We also require that the difference
4421	 * between old and new data_offset - on each device - is
4422	 * enough that we never risk over-writing.
4423	 */
4424
4425	unsigned long before_length, after_length;
4426	sector_t min_offset_diff = 0;
4427	int first = 1;
4428	struct geom new;
4429	struct r10conf *conf = mddev->private;
4430	struct md_rdev *rdev;
4431	int spares = 0;
4432	int ret;
4433
4434	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4435		return -EBUSY;
4436
4437	if (setup_geo(&new, mddev, geo_start) != conf->copies)
4438		return -EINVAL;
4439
4440	before_length = ((1 << conf->prev.chunk_shift) *
4441			 conf->prev.far_copies);
4442	after_length = ((1 << conf->geo.chunk_shift) *
4443			conf->geo.far_copies);
4444
4445	rdev_for_each(rdev, mddev) {
4446		if (!test_bit(In_sync, &rdev->flags)
4447		    && !test_bit(Faulty, &rdev->flags))
4448			spares++;
4449		if (rdev->raid_disk >= 0) {
4450			long long diff = (rdev->new_data_offset
4451					  - rdev->data_offset);
4452			if (!mddev->reshape_backwards)
4453				diff = -diff;
4454			if (diff < 0)
4455				diff = 0;
4456			if (first || diff < min_offset_diff)
4457				min_offset_diff = diff;
4458			first = 0;
4459		}
4460	}
4461
4462	if (max(before_length, after_length) > min_offset_diff)
4463		return -EINVAL;
4464
4465	if (spares < mddev->delta_disks)
4466		return -EINVAL;
4467
4468	conf->offset_diff = min_offset_diff;
4469	spin_lock_irq(&conf->device_lock);
4470	if (conf->mirrors_new) {
4471		memcpy(conf->mirrors_new, conf->mirrors,
4472		       sizeof(struct raid10_info)*conf->prev.raid_disks);
4473		smp_mb();
4474		kfree(conf->mirrors_old);
4475		conf->mirrors_old = conf->mirrors;
4476		conf->mirrors = conf->mirrors_new;
4477		conf->mirrors_new = NULL;
4478	}
4479	setup_geo(&conf->geo, mddev, geo_start);
4480	smp_mb();
4481	if (mddev->reshape_backwards) {
4482		sector_t size = raid10_size(mddev, 0, 0);
4483		if (size < mddev->array_sectors) {
4484			spin_unlock_irq(&conf->device_lock);
4485			pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4486				mdname(mddev));
4487			return -EINVAL;
4488		}
4489		mddev->resync_max_sectors = size;
4490		conf->reshape_progress = size;
4491	} else
4492		conf->reshape_progress = 0;
4493	conf->reshape_safe = conf->reshape_progress;
4494	spin_unlock_irq(&conf->device_lock);
4495
4496	if (mddev->delta_disks && mddev->bitmap) {
4497		struct mdp_superblock_1 *sb = NULL;
4498		sector_t oldsize, newsize;
4499
4500		oldsize = raid10_size(mddev, 0, 0);
4501		newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4502
4503		if (!mddev_is_clustered(mddev)) {
4504			ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4505			if (ret)
4506				goto abort;
4507			else
4508				goto out;
4509		}
4510
4511		rdev_for_each(rdev, mddev) {
4512			if (rdev->raid_disk > -1 &&
4513			    !test_bit(Faulty, &rdev->flags))
4514				sb = page_address(rdev->sb_page);
4515		}
4516
4517		/*
4518		 * some node is already performing reshape, and no need to
4519		 * call md_bitmap_resize again since it should be called when
4520		 * receiving BITMAP_RESIZE msg
4521		 */
4522		if ((sb && (le32_to_cpu(sb->feature_map) &
4523			    MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4524			goto out;
4525
4526		ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4527		if (ret)
4528			goto abort;
4529
4530		ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4531		if (ret) {
4532			md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
4533			goto abort;
4534		}
4535	}
4536out:
4537	if (mddev->delta_disks > 0) {
4538		rdev_for_each(rdev, mddev)
4539			if (rdev->raid_disk < 0 &&
4540			    !test_bit(Faulty, &rdev->flags)) {
4541				if (raid10_add_disk(mddev, rdev) == 0) {
4542					if (rdev->raid_disk >=
4543					    conf->prev.raid_disks)
4544						set_bit(In_sync, &rdev->flags);
4545					else
4546						rdev->recovery_offset = 0;
4547
4548					/* Failure here is OK */
4549					sysfs_link_rdev(mddev, rdev);
4550				}
4551			} else if (rdev->raid_disk >= conf->prev.raid_disks
4552				   && !test_bit(Faulty, &rdev->flags)) {
4553				/* This is a spare that was manually added */
4554				set_bit(In_sync, &rdev->flags);
4555			}
4556	}
4557	/* When a reshape changes the number of devices,
4558	 * ->degraded is measured against the larger of the
4559	 * pre and  post numbers.
4560	 */
4561	spin_lock_irq(&conf->device_lock);
4562	mddev->degraded = calc_degraded(conf);
4563	spin_unlock_irq(&conf->device_lock);
4564	mddev->raid_disks = conf->geo.raid_disks;
4565	mddev->reshape_position = conf->reshape_progress;
4566	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4567
4568	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4569	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4570	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4571	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4572	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 
 
 
 
 
 
 
4573	conf->reshape_checkpoint = jiffies;
4574	md_new_event();
 
4575	return 0;
4576
4577abort:
4578	mddev->recovery = 0;
4579	spin_lock_irq(&conf->device_lock);
4580	conf->geo = conf->prev;
4581	mddev->raid_disks = conf->geo.raid_disks;
4582	rdev_for_each(rdev, mddev)
4583		rdev->new_data_offset = rdev->data_offset;
4584	smp_wmb();
4585	conf->reshape_progress = MaxSector;
4586	conf->reshape_safe = MaxSector;
4587	mddev->reshape_position = MaxSector;
4588	spin_unlock_irq(&conf->device_lock);
4589	return ret;
4590}
4591
4592/* Calculate the last device-address that could contain
4593 * any block from the chunk that includes the array-address 's'
4594 * and report the next address.
4595 * i.e. the address returned will be chunk-aligned and after
4596 * any data that is in the chunk containing 's'.
4597 */
4598static sector_t last_dev_address(sector_t s, struct geom *geo)
4599{
4600	s = (s | geo->chunk_mask) + 1;
4601	s >>= geo->chunk_shift;
4602	s *= geo->near_copies;
4603	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4604	s *= geo->far_copies;
4605	s <<= geo->chunk_shift;
4606	return s;
4607}
4608
4609/* Calculate the first device-address that could contain
4610 * any block from the chunk that includes the array-address 's'.
4611 * This too will be the start of a chunk
4612 */
4613static sector_t first_dev_address(sector_t s, struct geom *geo)
4614{
4615	s >>= geo->chunk_shift;
4616	s *= geo->near_copies;
4617	sector_div(s, geo->raid_disks);
4618	s *= geo->far_copies;
4619	s <<= geo->chunk_shift;
4620	return s;
4621}
4622
4623static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4624				int *skipped)
4625{
4626	/* We simply copy at most one chunk (smallest of old and new)
4627	 * at a time, possibly less if that exceeds RESYNC_PAGES,
4628	 * or we hit a bad block or something.
4629	 * This might mean we pause for normal IO in the middle of
4630	 * a chunk, but that is not a problem as mddev->reshape_position
4631	 * can record any location.
4632	 *
4633	 * If we will want to write to a location that isn't
4634	 * yet recorded as 'safe' (i.e. in metadata on disk) then
4635	 * we need to flush all reshape requests and update the metadata.
4636	 *
4637	 * When reshaping forwards (e.g. to more devices), we interpret
4638	 * 'safe' as the earliest block which might not have been copied
4639	 * down yet.  We divide this by previous stripe size and multiply
4640	 * by previous stripe length to get lowest device offset that we
4641	 * cannot write to yet.
4642	 * We interpret 'sector_nr' as an address that we want to write to.
4643	 * From this we use last_device_address() to find where we might
4644	 * write to, and first_device_address on the  'safe' position.
4645	 * If this 'next' write position is after the 'safe' position,
4646	 * we must update the metadata to increase the 'safe' position.
4647	 *
4648	 * When reshaping backwards, we round in the opposite direction
4649	 * and perform the reverse test:  next write position must not be
4650	 * less than current safe position.
4651	 *
4652	 * In all this the minimum difference in data offsets
4653	 * (conf->offset_diff - always positive) allows a bit of slack,
4654	 * so next can be after 'safe', but not by more than offset_diff
4655	 *
4656	 * We need to prepare all the bios here before we start any IO
4657	 * to ensure the size we choose is acceptable to all devices.
4658	 * The means one for each copy for write-out and an extra one for
4659	 * read-in.
4660	 * We store the read-in bio in ->master_bio and the others in
4661	 * ->devs[x].bio and ->devs[x].repl_bio.
4662	 */
4663	struct r10conf *conf = mddev->private;
4664	struct r10bio *r10_bio;
4665	sector_t next, safe, last;
4666	int max_sectors;
4667	int nr_sectors;
4668	int s;
4669	struct md_rdev *rdev;
4670	int need_flush = 0;
4671	struct bio *blist;
4672	struct bio *bio, *read_bio;
4673	int sectors_done = 0;
4674	struct page **pages;
4675
4676	if (sector_nr == 0) {
4677		/* If restarting in the middle, skip the initial sectors */
4678		if (mddev->reshape_backwards &&
4679		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4680			sector_nr = (raid10_size(mddev, 0, 0)
4681				     - conf->reshape_progress);
4682		} else if (!mddev->reshape_backwards &&
4683			   conf->reshape_progress > 0)
4684			sector_nr = conf->reshape_progress;
4685		if (sector_nr) {
4686			mddev->curr_resync_completed = sector_nr;
4687			sysfs_notify_dirent_safe(mddev->sysfs_completed);
4688			*skipped = 1;
4689			return sector_nr;
4690		}
4691	}
4692
4693	/* We don't use sector_nr to track where we are up to
4694	 * as that doesn't work well for ->reshape_backwards.
4695	 * So just use ->reshape_progress.
4696	 */
4697	if (mddev->reshape_backwards) {
4698		/* 'next' is the earliest device address that we might
4699		 * write to for this chunk in the new layout
4700		 */
4701		next = first_dev_address(conf->reshape_progress - 1,
4702					 &conf->geo);
4703
4704		/* 'safe' is the last device address that we might read from
4705		 * in the old layout after a restart
4706		 */
4707		safe = last_dev_address(conf->reshape_safe - 1,
4708					&conf->prev);
4709
4710		if (next + conf->offset_diff < safe)
4711			need_flush = 1;
4712
4713		last = conf->reshape_progress - 1;
4714		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4715					       & conf->prev.chunk_mask);
4716		if (sector_nr + RESYNC_SECTORS < last)
4717			sector_nr = last + 1 - RESYNC_SECTORS;
4718	} else {
4719		/* 'next' is after the last device address that we
4720		 * might write to for this chunk in the new layout
4721		 */
4722		next = last_dev_address(conf->reshape_progress, &conf->geo);
4723
4724		/* 'safe' is the earliest device address that we might
4725		 * read from in the old layout after a restart
4726		 */
4727		safe = first_dev_address(conf->reshape_safe, &conf->prev);
4728
4729		/* Need to update metadata if 'next' might be beyond 'safe'
4730		 * as that would possibly corrupt data
4731		 */
4732		if (next > safe + conf->offset_diff)
4733			need_flush = 1;
4734
4735		sector_nr = conf->reshape_progress;
4736		last  = sector_nr | (conf->geo.chunk_mask
4737				     & conf->prev.chunk_mask);
4738
4739		if (sector_nr + RESYNC_SECTORS <= last)
4740			last = sector_nr + RESYNC_SECTORS - 1;
4741	}
4742
4743	if (need_flush ||
4744	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4745		/* Need to update reshape_position in metadata */
4746		wait_barrier(conf, false);
4747		mddev->reshape_position = conf->reshape_progress;
4748		if (mddev->reshape_backwards)
4749			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4750				- conf->reshape_progress;
4751		else
4752			mddev->curr_resync_completed = conf->reshape_progress;
4753		conf->reshape_checkpoint = jiffies;
4754		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4755		md_wakeup_thread(mddev->thread);
4756		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4757			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4758		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4759			allow_barrier(conf);
4760			return sectors_done;
4761		}
4762		conf->reshape_safe = mddev->reshape_position;
4763		allow_barrier(conf);
4764	}
4765
4766	raise_barrier(conf, 0);
4767read_more:
4768	/* Now schedule reads for blocks from sector_nr to last */
4769	r10_bio = raid10_alloc_init_r10buf(conf);
4770	r10_bio->state = 0;
4771	raise_barrier(conf, 1);
4772	atomic_set(&r10_bio->remaining, 0);
4773	r10_bio->mddev = mddev;
4774	r10_bio->sector = sector_nr;
4775	set_bit(R10BIO_IsReshape, &r10_bio->state);
4776	r10_bio->sectors = last - sector_nr + 1;
4777	rdev = read_balance(conf, r10_bio, &max_sectors);
4778	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4779
4780	if (!rdev) {
4781		/* Cannot read from here, so need to record bad blocks
4782		 * on all the target devices.
4783		 */
4784		// FIXME
4785		mempool_free(r10_bio, &conf->r10buf_pool);
4786		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4787		return sectors_done;
4788	}
4789
4790	read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
4791				    GFP_KERNEL, &mddev->bio_set);
 
4792	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4793			       + rdev->data_offset);
4794	read_bio->bi_private = r10_bio;
4795	read_bio->bi_end_io = end_reshape_read;
 
 
 
 
 
4796	r10_bio->master_bio = read_bio;
4797	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4798
4799	/*
4800	 * Broadcast RESYNC message to other nodes, so all nodes would not
4801	 * write to the region to avoid conflict.
4802	*/
4803	if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4804		struct mdp_superblock_1 *sb = NULL;
4805		int sb_reshape_pos = 0;
4806
4807		conf->cluster_sync_low = sector_nr;
4808		conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4809		sb = page_address(rdev->sb_page);
4810		if (sb) {
4811			sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4812			/*
4813			 * Set cluster_sync_low again if next address for array
4814			 * reshape is less than cluster_sync_low. Since we can't
4815			 * update cluster_sync_low until it has finished reshape.
4816			 */
4817			if (sb_reshape_pos < conf->cluster_sync_low)
4818				conf->cluster_sync_low = sb_reshape_pos;
4819		}
4820
4821		md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4822							  conf->cluster_sync_high);
4823	}
4824
4825	/* Now find the locations in the new layout */
4826	__raid10_find_phys(&conf->geo, r10_bio);
4827
4828	blist = read_bio;
4829	read_bio->bi_next = NULL;
4830
 
4831	for (s = 0; s < conf->copies*2; s++) {
4832		struct bio *b;
4833		int d = r10_bio->devs[s/2].devnum;
4834		struct md_rdev *rdev2;
4835		if (s&1) {
4836			rdev2 = conf->mirrors[d].replacement;
4837			b = r10_bio->devs[s/2].repl_bio;
4838		} else {
4839			rdev2 = conf->mirrors[d].rdev;
4840			b = r10_bio->devs[s/2].bio;
4841		}
4842		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4843			continue;
4844
4845		bio_set_dev(b, rdev2->bdev);
4846		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4847			rdev2->new_data_offset;
4848		b->bi_end_io = end_reshape_write;
4849		b->bi_opf = REQ_OP_WRITE;
4850		b->bi_next = blist;
4851		blist = b;
4852	}
4853
4854	/* Now add as many pages as possible to all of these bios. */
4855
4856	nr_sectors = 0;
4857	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4858	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4859		struct page *page = pages[s / (PAGE_SIZE >> 9)];
4860		int len = (max_sectors - s) << 9;
4861		if (len > PAGE_SIZE)
4862			len = PAGE_SIZE;
4863		for (bio = blist; bio ; bio = bio->bi_next) {
4864			if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
4865				bio->bi_status = BLK_STS_RESOURCE;
4866				bio_endio(bio);
4867				return sectors_done;
4868			}
4869		}
4870		sector_nr += len >> 9;
4871		nr_sectors += len >> 9;
4872	}
 
4873	r10_bio->sectors = nr_sectors;
4874
4875	/* Now submit the read */
4876	md_sync_acct_bio(read_bio, r10_bio->sectors);
4877	atomic_inc(&r10_bio->remaining);
4878	read_bio->bi_next = NULL;
4879	submit_bio_noacct(read_bio);
4880	sectors_done += nr_sectors;
4881	if (sector_nr <= last)
4882		goto read_more;
4883
4884	lower_barrier(conf);
4885
4886	/* Now that we have done the whole section we can
4887	 * update reshape_progress
4888	 */
4889	if (mddev->reshape_backwards)
4890		conf->reshape_progress -= sectors_done;
4891	else
4892		conf->reshape_progress += sectors_done;
4893
4894	return sectors_done;
4895}
4896
4897static void end_reshape_request(struct r10bio *r10_bio);
4898static int handle_reshape_read_error(struct mddev *mddev,
4899				     struct r10bio *r10_bio);
4900static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4901{
4902	/* Reshape read completed.  Hopefully we have a block
4903	 * to write out.
4904	 * If we got a read error then we do sync 1-page reads from
4905	 * elsewhere until we find the data - or give up.
4906	 */
4907	struct r10conf *conf = mddev->private;
4908	int s;
4909
4910	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4911		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4912			/* Reshape has been aborted */
4913			md_done_sync(mddev, r10_bio->sectors, 0);
4914			return;
4915		}
4916
4917	/* We definitely have the data in the pages, schedule the
4918	 * writes.
4919	 */
4920	atomic_set(&r10_bio->remaining, 1);
4921	for (s = 0; s < conf->copies*2; s++) {
4922		struct bio *b;
4923		int d = r10_bio->devs[s/2].devnum;
4924		struct md_rdev *rdev;
 
4925		if (s&1) {
4926			rdev = conf->mirrors[d].replacement;
4927			b = r10_bio->devs[s/2].repl_bio;
4928		} else {
4929			rdev = conf->mirrors[d].rdev;
4930			b = r10_bio->devs[s/2].bio;
4931		}
4932		if (!rdev || test_bit(Faulty, &rdev->flags))
 
4933			continue;
4934
4935		atomic_inc(&rdev->nr_pending);
 
4936		md_sync_acct_bio(b, r10_bio->sectors);
4937		atomic_inc(&r10_bio->remaining);
4938		b->bi_next = NULL;
4939		submit_bio_noacct(b);
4940	}
4941	end_reshape_request(r10_bio);
4942}
4943
4944static void end_reshape(struct r10conf *conf)
4945{
4946	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4947		return;
4948
4949	spin_lock_irq(&conf->device_lock);
4950	conf->prev = conf->geo;
4951	md_finish_reshape(conf->mddev);
4952	smp_wmb();
4953	conf->reshape_progress = MaxSector;
4954	conf->reshape_safe = MaxSector;
4955	spin_unlock_irq(&conf->device_lock);
4956
4957	if (conf->mddev->queue)
4958		raid10_set_io_opt(conf);
 
 
 
 
 
 
 
 
4959	conf->fullsync = 0;
4960}
4961
4962static void raid10_update_reshape_pos(struct mddev *mddev)
4963{
4964	struct r10conf *conf = mddev->private;
4965	sector_t lo, hi;
4966
4967	md_cluster_ops->resync_info_get(mddev, &lo, &hi);
4968	if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
4969	    || mddev->reshape_position == MaxSector)
4970		conf->reshape_progress = mddev->reshape_position;
4971	else
4972		WARN_ON_ONCE(1);
4973}
4974
4975static int handle_reshape_read_error(struct mddev *mddev,
4976				     struct r10bio *r10_bio)
4977{
4978	/* Use sync reads to get the blocks from somewhere else */
4979	int sectors = r10_bio->sectors;
4980	struct r10conf *conf = mddev->private;
4981	struct r10bio *r10b;
4982	int slot = 0;
4983	int idx = 0;
4984	struct page **pages;
4985
4986	r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
4987	if (!r10b) {
4988		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4989		return -ENOMEM;
4990	}
4991
4992	/* reshape IOs share pages from .devs[0].bio */
4993	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4994
4995	r10b->sector = r10_bio->sector;
4996	__raid10_find_phys(&conf->prev, r10b);
4997
4998	while (sectors) {
4999		int s = sectors;
5000		int success = 0;
5001		int first_slot = slot;
5002
5003		if (s > (PAGE_SIZE >> 9))
5004			s = PAGE_SIZE >> 9;
5005
 
5006		while (!success) {
5007			int d = r10b->devs[slot].devnum;
5008			struct md_rdev *rdev = conf->mirrors[d].rdev;
5009			sector_t addr;
5010			if (rdev == NULL ||
5011			    test_bit(Faulty, &rdev->flags) ||
5012			    !test_bit(In_sync, &rdev->flags))
5013				goto failed;
5014
5015			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
5016			atomic_inc(&rdev->nr_pending);
 
5017			success = sync_page_io(rdev,
5018					       addr,
5019					       s << 9,
5020					       pages[idx],
5021					       REQ_OP_READ, false);
5022			rdev_dec_pending(rdev, mddev);
 
5023			if (success)
5024				break;
5025		failed:
5026			slot++;
5027			if (slot >= conf->copies)
5028				slot = 0;
5029			if (slot == first_slot)
5030				break;
5031		}
 
5032		if (!success) {
5033			/* couldn't read this block, must give up */
5034			set_bit(MD_RECOVERY_INTR,
5035				&mddev->recovery);
5036			kfree(r10b);
5037			return -EIO;
5038		}
5039		sectors -= s;
5040		idx++;
5041	}
5042	kfree(r10b);
5043	return 0;
5044}
5045
5046static void end_reshape_write(struct bio *bio)
5047{
5048	struct r10bio *r10_bio = get_resync_r10bio(bio);
5049	struct mddev *mddev = r10_bio->mddev;
5050	struct r10conf *conf = mddev->private;
5051	int d;
5052	int slot;
5053	int repl;
5054	struct md_rdev *rdev = NULL;
5055
5056	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5057	rdev = repl ? conf->mirrors[d].replacement :
5058		      conf->mirrors[d].rdev;
 
 
 
 
5059
5060	if (bio->bi_status) {
5061		/* FIXME should record badblock */
5062		md_error(mddev, rdev);
5063	}
5064
5065	rdev_dec_pending(rdev, mddev);
5066	end_reshape_request(r10_bio);
5067}
5068
5069static void end_reshape_request(struct r10bio *r10_bio)
5070{
5071	if (!atomic_dec_and_test(&r10_bio->remaining))
5072		return;
5073	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
5074	bio_put(r10_bio->master_bio);
5075	put_buf(r10_bio);
5076}
5077
5078static void raid10_finish_reshape(struct mddev *mddev)
5079{
5080	struct r10conf *conf = mddev->private;
5081
5082	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5083		return;
5084
5085	if (mddev->delta_disks > 0) {
5086		if (mddev->recovery_cp > mddev->resync_max_sectors) {
5087			mddev->recovery_cp = mddev->resync_max_sectors;
5088			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5089		}
5090		mddev->resync_max_sectors = mddev->array_sectors;
5091	} else {
5092		int d;
 
5093		for (d = conf->geo.raid_disks ;
5094		     d < conf->geo.raid_disks - mddev->delta_disks;
5095		     d++) {
5096			struct md_rdev *rdev = conf->mirrors[d].rdev;
5097			if (rdev)
5098				clear_bit(In_sync, &rdev->flags);
5099			rdev = conf->mirrors[d].replacement;
5100			if (rdev)
5101				clear_bit(In_sync, &rdev->flags);
5102		}
 
5103	}
5104	mddev->layout = mddev->new_layout;
5105	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
5106	mddev->reshape_position = MaxSector;
5107	mddev->delta_disks = 0;
5108	mddev->reshape_backwards = 0;
5109}
5110
5111static struct md_personality raid10_personality =
5112{
5113	.name		= "raid10",
5114	.level		= 10,
5115	.owner		= THIS_MODULE,
5116	.make_request	= raid10_make_request,
5117	.run		= raid10_run,
5118	.free		= raid10_free,
5119	.status		= raid10_status,
5120	.error_handler	= raid10_error,
5121	.hot_add_disk	= raid10_add_disk,
5122	.hot_remove_disk= raid10_remove_disk,
5123	.spare_active	= raid10_spare_active,
5124	.sync_request	= raid10_sync_request,
5125	.quiesce	= raid10_quiesce,
5126	.size		= raid10_size,
5127	.resize		= raid10_resize,
5128	.takeover	= raid10_takeover,
5129	.check_reshape	= raid10_check_reshape,
5130	.start_reshape	= raid10_start_reshape,
5131	.finish_reshape	= raid10_finish_reshape,
5132	.update_reshape_pos = raid10_update_reshape_pos,
 
5133};
5134
5135static int __init raid_init(void)
5136{
5137	return register_md_personality(&raid10_personality);
5138}
5139
5140static void raid_exit(void)
5141{
5142	unregister_md_personality(&raid10_personality);
5143}
5144
5145module_init(raid_init);
5146module_exit(raid_exit);
5147MODULE_LICENSE("GPL");
5148MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
5149MODULE_ALIAS("md-personality-9"); /* RAID10 */
5150MODULE_ALIAS("md-raid10");
5151MODULE_ALIAS("md-level-10");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * raid10.c : Multiple Devices driver for Linux
   4 *
   5 * Copyright (C) 2000-2004 Neil Brown
   6 *
   7 * RAID-10 support for md.
   8 *
   9 * Base on code in raid1.c.  See raid1.c for further copyright information.
  10 */
  11
  12#include <linux/slab.h>
  13#include <linux/delay.h>
  14#include <linux/blkdev.h>
  15#include <linux/module.h>
  16#include <linux/seq_file.h>
  17#include <linux/ratelimit.h>
  18#include <linux/kthread.h>
  19#include <linux/raid/md_p.h>
  20#include <trace/events/block.h>
  21#include "md.h"
 
 
  22#include "raid10.h"
  23#include "raid0.h"
  24#include "md-bitmap.h"
  25
  26/*
  27 * RAID10 provides a combination of RAID0 and RAID1 functionality.
  28 * The layout of data is defined by
  29 *    chunk_size
  30 *    raid_disks
  31 *    near_copies (stored in low byte of layout)
  32 *    far_copies (stored in second byte of layout)
  33 *    far_offset (stored in bit 16 of layout )
  34 *    use_far_sets (stored in bit 17 of layout )
  35 *    use_far_sets_bugfixed (stored in bit 18 of layout )
  36 *
  37 * The data to be stored is divided into chunks using chunksize.  Each device
  38 * is divided into far_copies sections.   In each section, chunks are laid out
  39 * in a style similar to raid0, but near_copies copies of each chunk is stored
  40 * (each on a different drive).  The starting device for each section is offset
  41 * near_copies from the starting device of the previous section.  Thus there
  42 * are (near_copies * far_copies) of each chunk, and each is on a different
  43 * drive.  near_copies and far_copies must be at least one, and their product
  44 * is at most raid_disks.
  45 *
  46 * If far_offset is true, then the far_copies are handled a bit differently.
  47 * The copies are still in different stripes, but instead of being very far
  48 * apart on disk, there are adjacent stripes.
  49 *
  50 * The far and offset algorithms are handled slightly differently if
  51 * 'use_far_sets' is true.  In this case, the array's devices are grouped into
  52 * sets that are (near_copies * far_copies) in size.  The far copied stripes
  53 * are still shifted by 'near_copies' devices, but this shifting stays confined
  54 * to the set rather than the entire array.  This is done to improve the number
  55 * of device combinations that can fail without causing the array to fail.
  56 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
  57 * on a device):
  58 *    A B C D    A B C D E
  59 *      ...         ...
  60 *    D A B C    E A B C D
  61 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
  62 *    [A B] [C D]    [A B] [C D E]
  63 *    |...| |...|    |...| | ... |
  64 *    [B A] [D C]    [B A] [E C D]
  65 */
  66
  67static void allow_barrier(struct r10conf *conf);
  68static void lower_barrier(struct r10conf *conf);
  69static int _enough(struct r10conf *conf, int previous, int ignore);
  70static int enough(struct r10conf *conf, int ignore);
  71static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
  72				int *skipped);
  73static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
  74static void end_reshape_write(struct bio *bio);
  75static void end_reshape(struct r10conf *conf);
  76
  77#define raid10_log(md, fmt, args...)				\
  78	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
  79
  80#include "raid1-10.c"
  81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  82/*
  83 * for resync bio, r10bio pointer can be retrieved from the per-bio
  84 * 'struct resync_pages'.
  85 */
  86static inline struct r10bio *get_resync_r10bio(struct bio *bio)
  87{
  88	return get_resync_pages(bio)->raid_bio;
  89}
  90
  91static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
  92{
  93	struct r10conf *conf = data;
  94	int size = offsetof(struct r10bio, devs[conf->copies]);
  95
  96	/* allocate a r10bio with room for raid_disks entries in the
  97	 * bios array */
  98	return kzalloc(size, gfp_flags);
  99}
 100
 101#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
 102/* amount of memory to reserve for resync requests */
 103#define RESYNC_WINDOW (1024*1024)
 104/* maximum number of concurrent requests, memory permitting */
 105#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
 106#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
 107#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
 108
 109/*
 110 * When performing a resync, we need to read and compare, so
 111 * we need as many pages are there are copies.
 112 * When performing a recovery, we need 2 bios, one for read,
 113 * one for write (we recover only one drive per r10buf)
 114 *
 115 */
 116static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 117{
 118	struct r10conf *conf = data;
 119	struct r10bio *r10_bio;
 120	struct bio *bio;
 121	int j;
 122	int nalloc, nalloc_rp;
 123	struct resync_pages *rps;
 124
 125	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
 126	if (!r10_bio)
 127		return NULL;
 128
 129	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
 130	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
 131		nalloc = conf->copies; /* resync */
 132	else
 133		nalloc = 2; /* recovery */
 134
 135	/* allocate once for all bios */
 136	if (!conf->have_replacement)
 137		nalloc_rp = nalloc;
 138	else
 139		nalloc_rp = nalloc * 2;
 140	rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
 141	if (!rps)
 142		goto out_free_r10bio;
 143
 144	/*
 145	 * Allocate bios.
 146	 */
 147	for (j = nalloc ; j-- ; ) {
 148		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 149		if (!bio)
 150			goto out_free_bio;
 
 151		r10_bio->devs[j].bio = bio;
 152		if (!conf->have_replacement)
 153			continue;
 154		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 155		if (!bio)
 156			goto out_free_bio;
 
 157		r10_bio->devs[j].repl_bio = bio;
 158	}
 159	/*
 160	 * Allocate RESYNC_PAGES data pages and attach them
 161	 * where needed.
 162	 */
 163	for (j = 0; j < nalloc; j++) {
 164		struct bio *rbio = r10_bio->devs[j].repl_bio;
 165		struct resync_pages *rp, *rp_repl;
 166
 167		rp = &rps[j];
 168		if (rbio)
 169			rp_repl = &rps[nalloc + j];
 170
 171		bio = r10_bio->devs[j].bio;
 172
 173		if (!j || test_bit(MD_RECOVERY_SYNC,
 174				   &conf->mddev->recovery)) {
 175			if (resync_alloc_pages(rp, gfp_flags))
 176				goto out_free_pages;
 177		} else {
 178			memcpy(rp, &rps[0], sizeof(*rp));
 179			resync_get_all_pages(rp);
 180		}
 181
 182		rp->raid_bio = r10_bio;
 183		bio->bi_private = rp;
 184		if (rbio) {
 185			memcpy(rp_repl, rp, sizeof(*rp));
 186			rbio->bi_private = rp_repl;
 187		}
 188	}
 189
 190	return r10_bio;
 191
 192out_free_pages:
 193	while (--j >= 0)
 194		resync_free_pages(&rps[j * 2]);
 195
 196	j = 0;
 197out_free_bio:
 198	for ( ; j < nalloc; j++) {
 199		if (r10_bio->devs[j].bio)
 200			bio_put(r10_bio->devs[j].bio);
 
 201		if (r10_bio->devs[j].repl_bio)
 202			bio_put(r10_bio->devs[j].repl_bio);
 
 203	}
 204	kfree(rps);
 205out_free_r10bio:
 206	rbio_pool_free(r10_bio, conf);
 207	return NULL;
 208}
 209
 210static void r10buf_pool_free(void *__r10_bio, void *data)
 211{
 212	struct r10conf *conf = data;
 213	struct r10bio *r10bio = __r10_bio;
 214	int j;
 215	struct resync_pages *rp = NULL;
 216
 217	for (j = conf->copies; j--; ) {
 218		struct bio *bio = r10bio->devs[j].bio;
 219
 220		if (bio) {
 221			rp = get_resync_pages(bio);
 222			resync_free_pages(rp);
 223			bio_put(bio);
 
 224		}
 225
 226		bio = r10bio->devs[j].repl_bio;
 227		if (bio)
 228			bio_put(bio);
 
 
 229	}
 230
 231	/* resync pages array stored in the 1st bio's .bi_private */
 232	kfree(rp);
 233
 234	rbio_pool_free(r10bio, conf);
 235}
 236
 237static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
 238{
 239	int i;
 240
 241	for (i = 0; i < conf->copies; i++) {
 242		struct bio **bio = & r10_bio->devs[i].bio;
 243		if (!BIO_SPECIAL(*bio))
 244			bio_put(*bio);
 245		*bio = NULL;
 246		bio = &r10_bio->devs[i].repl_bio;
 247		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
 248			bio_put(*bio);
 249		*bio = NULL;
 250	}
 251}
 252
 253static void free_r10bio(struct r10bio *r10_bio)
 254{
 255	struct r10conf *conf = r10_bio->mddev->private;
 256
 257	put_all_bios(conf, r10_bio);
 258	mempool_free(r10_bio, &conf->r10bio_pool);
 259}
 260
 261static void put_buf(struct r10bio *r10_bio)
 262{
 263	struct r10conf *conf = r10_bio->mddev->private;
 264
 265	mempool_free(r10_bio, &conf->r10buf_pool);
 266
 267	lower_barrier(conf);
 268}
 269
 
 
 
 
 
 
 270static void reschedule_retry(struct r10bio *r10_bio)
 271{
 272	unsigned long flags;
 273	struct mddev *mddev = r10_bio->mddev;
 274	struct r10conf *conf = mddev->private;
 275
 276	spin_lock_irqsave(&conf->device_lock, flags);
 277	list_add(&r10_bio->retry_list, &conf->retry_list);
 278	conf->nr_queued ++;
 279	spin_unlock_irqrestore(&conf->device_lock, flags);
 280
 281	/* wake up frozen array... */
 282	wake_up(&conf->wait_barrier);
 283
 284	md_wakeup_thread(mddev->thread);
 285}
 286
 287/*
 288 * raid_end_bio_io() is called when we have finished servicing a mirrored
 289 * operation and are ready to return a success/failure code to the buffer
 290 * cache layer.
 291 */
 292static void raid_end_bio_io(struct r10bio *r10_bio)
 293{
 294	struct bio *bio = r10_bio->master_bio;
 295	struct r10conf *conf = r10_bio->mddev->private;
 296
 297	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
 298		bio->bi_status = BLK_STS_IOERR;
 299
 300	bio_endio(bio);
 301	/*
 302	 * Wake up any possible resync thread that waits for the device
 303	 * to go idle.
 304	 */
 305	allow_barrier(conf);
 306
 307	free_r10bio(r10_bio);
 308}
 309
 310/*
 311 * Update disk head position estimator based on IRQ completion info.
 312 */
 313static inline void update_head_pos(int slot, struct r10bio *r10_bio)
 314{
 315	struct r10conf *conf = r10_bio->mddev->private;
 316
 317	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
 318		r10_bio->devs[slot].addr + (r10_bio->sectors);
 319}
 320
 321/*
 322 * Find the disk number which triggered given bio
 323 */
 324static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
 325			 struct bio *bio, int *slotp, int *replp)
 326{
 327	int slot;
 328	int repl = 0;
 329
 330	for (slot = 0; slot < conf->copies; slot++) {
 331		if (r10_bio->devs[slot].bio == bio)
 332			break;
 333		if (r10_bio->devs[slot].repl_bio == bio) {
 334			repl = 1;
 335			break;
 336		}
 337	}
 338
 339	BUG_ON(slot == conf->copies);
 340	update_head_pos(slot, r10_bio);
 341
 342	if (slotp)
 343		*slotp = slot;
 344	if (replp)
 345		*replp = repl;
 346	return r10_bio->devs[slot].devnum;
 347}
 348
 349static void raid10_end_read_request(struct bio *bio)
 350{
 351	int uptodate = !bio->bi_status;
 352	struct r10bio *r10_bio = bio->bi_private;
 353	int slot;
 354	struct md_rdev *rdev;
 355	struct r10conf *conf = r10_bio->mddev->private;
 356
 357	slot = r10_bio->read_slot;
 358	rdev = r10_bio->devs[slot].rdev;
 359	/*
 360	 * this branch is our 'one mirror IO has finished' event handler:
 361	 */
 362	update_head_pos(slot, r10_bio);
 363
 364	if (uptodate) {
 365		/*
 366		 * Set R10BIO_Uptodate in our master bio, so that
 367		 * we will return a good error code to the higher
 368		 * levels even if IO on some other mirrored buffer fails.
 369		 *
 370		 * The 'master' represents the composite IO operation to
 371		 * user-side. So if something waits for IO, then it will
 372		 * wait for the 'master' bio.
 373		 */
 374		set_bit(R10BIO_Uptodate, &r10_bio->state);
 375	} else {
 376		/* If all other devices that store this block have
 377		 * failed, we want to return the error upwards rather
 378		 * than fail the last device.  Here we redefine
 379		 * "uptodate" to mean "Don't want to retry"
 380		 */
 381		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
 382			     rdev->raid_disk))
 383			uptodate = 1;
 384	}
 385	if (uptodate) {
 386		raid_end_bio_io(r10_bio);
 387		rdev_dec_pending(rdev, conf->mddev);
 388	} else {
 389		/*
 390		 * oops, read error - keep the refcount on the rdev
 391		 */
 392		char b[BDEVNAME_SIZE];
 393		pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
 394				   mdname(conf->mddev),
 395				   bdevname(rdev->bdev, b),
 396				   (unsigned long long)r10_bio->sector);
 397		set_bit(R10BIO_ReadError, &r10_bio->state);
 398		reschedule_retry(r10_bio);
 399	}
 400}
 401
 402static void close_write(struct r10bio *r10_bio)
 403{
 404	/* clear the bitmap if all writes complete successfully */
 405	md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
 406			   r10_bio->sectors,
 407			   !test_bit(R10BIO_Degraded, &r10_bio->state),
 408			   0);
 409	md_write_end(r10_bio->mddev);
 410}
 411
 412static void one_write_done(struct r10bio *r10_bio)
 413{
 414	if (atomic_dec_and_test(&r10_bio->remaining)) {
 415		if (test_bit(R10BIO_WriteError, &r10_bio->state))
 416			reschedule_retry(r10_bio);
 417		else {
 418			close_write(r10_bio);
 419			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
 420				reschedule_retry(r10_bio);
 421			else
 422				raid_end_bio_io(r10_bio);
 423		}
 424	}
 425}
 426
 427static void raid10_end_write_request(struct bio *bio)
 428{
 429	struct r10bio *r10_bio = bio->bi_private;
 430	int dev;
 431	int dec_rdev = 1;
 432	struct r10conf *conf = r10_bio->mddev->private;
 433	int slot, repl;
 434	struct md_rdev *rdev = NULL;
 435	struct bio *to_put = NULL;
 436	bool discard_error;
 437
 438	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 439
 440	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 441
 442	if (repl)
 443		rdev = conf->mirrors[dev].replacement;
 444	if (!rdev) {
 445		smp_rmb();
 446		repl = 0;
 447		rdev = conf->mirrors[dev].rdev;
 448	}
 449	/*
 450	 * this branch is our 'one mirror IO has finished' event handler:
 451	 */
 452	if (bio->bi_status && !discard_error) {
 453		if (repl)
 454			/* Never record new bad blocks to replacement,
 455			 * just fail it.
 456			 */
 457			md_error(rdev->mddev, rdev);
 458		else {
 459			set_bit(WriteErrorSeen,	&rdev->flags);
 460			if (!test_and_set_bit(WantReplacement, &rdev->flags))
 461				set_bit(MD_RECOVERY_NEEDED,
 462					&rdev->mddev->recovery);
 463
 464			dec_rdev = 0;
 465			if (test_bit(FailFast, &rdev->flags) &&
 466			    (bio->bi_opf & MD_FAILFAST)) {
 467				md_error(rdev->mddev, rdev);
 468			}
 469
 470			/*
 471			 * When the device is faulty, it is not necessary to
 472			 * handle write error.
 473			 * For failfast, this is the only remaining device,
 474			 * We need to retry the write without FailFast.
 475			 */
 476			if (!test_bit(Faulty, &rdev->flags))
 477				set_bit(R10BIO_WriteError, &r10_bio->state);
 478			else {
 
 
 479				r10_bio->devs[slot].bio = NULL;
 480				to_put = bio;
 481				dec_rdev = 1;
 482			}
 483		}
 484	} else {
 485		/*
 486		 * Set R10BIO_Uptodate in our master bio, so that
 487		 * we will return a good error code for to the higher
 488		 * levels even if IO on some other mirrored buffer fails.
 489		 *
 490		 * The 'master' represents the composite IO operation to
 491		 * user-side. So if something waits for IO, then it will
 492		 * wait for the 'master' bio.
 493		 */
 494		sector_t first_bad;
 495		int bad_sectors;
 496
 497		/*
 498		 * Do not set R10BIO_Uptodate if the current device is
 499		 * rebuilding or Faulty. This is because we cannot use
 500		 * such device for properly reading the data back (we could
 501		 * potentially use it, if the current write would have felt
 502		 * before rdev->recovery_offset, but for simplicity we don't
 503		 * check this here.
 504		 */
 505		if (test_bit(In_sync, &rdev->flags) &&
 506		    !test_bit(Faulty, &rdev->flags))
 507			set_bit(R10BIO_Uptodate, &r10_bio->state);
 508
 509		/* Maybe we can clear some bad blocks. */
 510		if (is_badblock(rdev,
 511				r10_bio->devs[slot].addr,
 512				r10_bio->sectors,
 513				&first_bad, &bad_sectors) && !discard_error) {
 514			bio_put(bio);
 515			if (repl)
 516				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
 517			else
 518				r10_bio->devs[slot].bio = IO_MADE_GOOD;
 519			dec_rdev = 0;
 520			set_bit(R10BIO_MadeGood, &r10_bio->state);
 521		}
 522	}
 523
 524	/*
 525	 *
 526	 * Let's see if all mirrored write operations have finished
 527	 * already.
 528	 */
 529	one_write_done(r10_bio);
 530	if (dec_rdev)
 531		rdev_dec_pending(rdev, conf->mddev);
 532	if (to_put)
 533		bio_put(to_put);
 534}
 535
 536/*
 537 * RAID10 layout manager
 538 * As well as the chunksize and raid_disks count, there are two
 539 * parameters: near_copies and far_copies.
 540 * near_copies * far_copies must be <= raid_disks.
 541 * Normally one of these will be 1.
 542 * If both are 1, we get raid0.
 543 * If near_copies == raid_disks, we get raid1.
 544 *
 545 * Chunks are laid out in raid0 style with near_copies copies of the
 546 * first chunk, followed by near_copies copies of the next chunk and
 547 * so on.
 548 * If far_copies > 1, then after 1/far_copies of the array has been assigned
 549 * as described above, we start again with a device offset of near_copies.
 550 * So we effectively have another copy of the whole array further down all
 551 * the drives, but with blocks on different drives.
 552 * With this layout, and block is never stored twice on the one device.
 553 *
 554 * raid10_find_phys finds the sector offset of a given virtual sector
 555 * on each device that it is on.
 556 *
 557 * raid10_find_virt does the reverse mapping, from a device and a
 558 * sector offset to a virtual address
 559 */
 560
 561static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
 562{
 563	int n,f;
 564	sector_t sector;
 565	sector_t chunk;
 566	sector_t stripe;
 567	int dev;
 568	int slot = 0;
 569	int last_far_set_start, last_far_set_size;
 570
 571	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 572	last_far_set_start *= geo->far_set_size;
 573
 574	last_far_set_size = geo->far_set_size;
 575	last_far_set_size += (geo->raid_disks % geo->far_set_size);
 576
 577	/* now calculate first sector/dev */
 578	chunk = r10bio->sector >> geo->chunk_shift;
 579	sector = r10bio->sector & geo->chunk_mask;
 580
 581	chunk *= geo->near_copies;
 582	stripe = chunk;
 583	dev = sector_div(stripe, geo->raid_disks);
 584	if (geo->far_offset)
 585		stripe *= geo->far_copies;
 586
 587	sector += stripe << geo->chunk_shift;
 588
 589	/* and calculate all the others */
 590	for (n = 0; n < geo->near_copies; n++) {
 591		int d = dev;
 592		int set;
 593		sector_t s = sector;
 594		r10bio->devs[slot].devnum = d;
 595		r10bio->devs[slot].addr = s;
 596		slot++;
 597
 598		for (f = 1; f < geo->far_copies; f++) {
 599			set = d / geo->far_set_size;
 600			d += geo->near_copies;
 601
 602			if ((geo->raid_disks % geo->far_set_size) &&
 603			    (d > last_far_set_start)) {
 604				d -= last_far_set_start;
 605				d %= last_far_set_size;
 606				d += last_far_set_start;
 607			} else {
 608				d %= geo->far_set_size;
 609				d += geo->far_set_size * set;
 610			}
 611			s += geo->stride;
 612			r10bio->devs[slot].devnum = d;
 613			r10bio->devs[slot].addr = s;
 614			slot++;
 615		}
 616		dev++;
 617		if (dev >= geo->raid_disks) {
 618			dev = 0;
 619			sector += (geo->chunk_mask + 1);
 620		}
 621	}
 622}
 623
 624static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
 625{
 626	struct geom *geo = &conf->geo;
 627
 628	if (conf->reshape_progress != MaxSector &&
 629	    ((r10bio->sector >= conf->reshape_progress) !=
 630	     conf->mddev->reshape_backwards)) {
 631		set_bit(R10BIO_Previous, &r10bio->state);
 632		geo = &conf->prev;
 633	} else
 634		clear_bit(R10BIO_Previous, &r10bio->state);
 635
 636	__raid10_find_phys(geo, r10bio);
 637}
 638
 639static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
 640{
 641	sector_t offset, chunk, vchunk;
 642	/* Never use conf->prev as this is only called during resync
 643	 * or recovery, so reshape isn't happening
 644	 */
 645	struct geom *geo = &conf->geo;
 646	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
 647	int far_set_size = geo->far_set_size;
 648	int last_far_set_start;
 649
 650	if (geo->raid_disks % geo->far_set_size) {
 651		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 652		last_far_set_start *= geo->far_set_size;
 653
 654		if (dev >= last_far_set_start) {
 655			far_set_size = geo->far_set_size;
 656			far_set_size += (geo->raid_disks % geo->far_set_size);
 657			far_set_start = last_far_set_start;
 658		}
 659	}
 660
 661	offset = sector & geo->chunk_mask;
 662	if (geo->far_offset) {
 663		int fc;
 664		chunk = sector >> geo->chunk_shift;
 665		fc = sector_div(chunk, geo->far_copies);
 666		dev -= fc * geo->near_copies;
 667		if (dev < far_set_start)
 668			dev += far_set_size;
 669	} else {
 670		while (sector >= geo->stride) {
 671			sector -= geo->stride;
 672			if (dev < (geo->near_copies + far_set_start))
 673				dev += far_set_size - geo->near_copies;
 674			else
 675				dev -= geo->near_copies;
 676		}
 677		chunk = sector >> geo->chunk_shift;
 678	}
 679	vchunk = chunk * geo->raid_disks + dev;
 680	sector_div(vchunk, geo->near_copies);
 681	return (vchunk << geo->chunk_shift) + offset;
 682}
 683
 684/*
 685 * This routine returns the disk from which the requested read should
 686 * be done. There is a per-array 'next expected sequential IO' sector
 687 * number - if this matches on the next IO then we use the last disk.
 688 * There is also a per-disk 'last know head position' sector that is
 689 * maintained from IRQ contexts, both the normal and the resync IO
 690 * completion handlers update this position correctly. If there is no
 691 * perfect sequential match then we pick the disk whose head is closest.
 692 *
 693 * If there are 2 mirrors in the same 2 devices, performance degrades
 694 * because position is mirror, not device based.
 695 *
 696 * The rdev for the device selected will have nr_pending incremented.
 697 */
 698
 699/*
 700 * FIXME: possibly should rethink readbalancing and do it differently
 701 * depending on near_copies / far_copies geometry.
 702 */
 703static struct md_rdev *read_balance(struct r10conf *conf,
 704				    struct r10bio *r10_bio,
 705				    int *max_sectors)
 706{
 707	const sector_t this_sector = r10_bio->sector;
 708	int disk, slot;
 709	int sectors = r10_bio->sectors;
 710	int best_good_sectors;
 711	sector_t new_distance, best_dist;
 712	struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
 713	int do_balance;
 714	int best_dist_slot, best_pending_slot;
 715	bool has_nonrot_disk = false;
 716	unsigned int min_pending;
 717	struct geom *geo = &conf->geo;
 718
 719	raid10_find_phys(conf, r10_bio);
 720	rcu_read_lock();
 721	best_dist_slot = -1;
 722	min_pending = UINT_MAX;
 723	best_dist_rdev = NULL;
 724	best_pending_rdev = NULL;
 725	best_dist = MaxSector;
 726	best_good_sectors = 0;
 727	do_balance = 1;
 728	clear_bit(R10BIO_FailFast, &r10_bio->state);
 729	/*
 730	 * Check if we can balance. We can balance on the whole
 731	 * device if no resync is going on (recovery is ok), or below
 732	 * the resync window. We take the first readable disk when
 733	 * above the resync window.
 734	 */
 735	if ((conf->mddev->recovery_cp < MaxSector
 736	     && (this_sector + sectors >= conf->next_resync)) ||
 737	    (mddev_is_clustered(conf->mddev) &&
 738	     md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
 739					    this_sector + sectors)))
 740		do_balance = 0;
 741
 742	for (slot = 0; slot < conf->copies ; slot++) {
 743		sector_t first_bad;
 744		int bad_sectors;
 745		sector_t dev_sector;
 746		unsigned int pending;
 747		bool nonrot;
 748
 749		if (r10_bio->devs[slot].bio == IO_BLOCKED)
 750			continue;
 751		disk = r10_bio->devs[slot].devnum;
 752		rdev = rcu_dereference(conf->mirrors[disk].replacement);
 753		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
 754		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 755			rdev = rcu_dereference(conf->mirrors[disk].rdev);
 
 756		if (rdev == NULL ||
 757		    test_bit(Faulty, &rdev->flags))
 758			continue;
 759		if (!test_bit(In_sync, &rdev->flags) &&
 760		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 761			continue;
 762
 763		dev_sector = r10_bio->devs[slot].addr;
 764		if (is_badblock(rdev, dev_sector, sectors,
 765				&first_bad, &bad_sectors)) {
 766			if (best_dist < MaxSector)
 767				/* Already have a better slot */
 768				continue;
 769			if (first_bad <= dev_sector) {
 770				/* Cannot read here.  If this is the
 771				 * 'primary' device, then we must not read
 772				 * beyond 'bad_sectors' from another device.
 773				 */
 774				bad_sectors -= (dev_sector - first_bad);
 775				if (!do_balance && sectors > bad_sectors)
 776					sectors = bad_sectors;
 777				if (best_good_sectors > sectors)
 778					best_good_sectors = sectors;
 779			} else {
 780				sector_t good_sectors =
 781					first_bad - dev_sector;
 782				if (good_sectors > best_good_sectors) {
 783					best_good_sectors = good_sectors;
 784					best_dist_slot = slot;
 785					best_dist_rdev = rdev;
 786				}
 787				if (!do_balance)
 788					/* Must read from here */
 789					break;
 790			}
 791			continue;
 792		} else
 793			best_good_sectors = sectors;
 794
 795		if (!do_balance)
 796			break;
 797
 798		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
 799		has_nonrot_disk |= nonrot;
 800		pending = atomic_read(&rdev->nr_pending);
 801		if (min_pending > pending && nonrot) {
 802			min_pending = pending;
 803			best_pending_slot = slot;
 804			best_pending_rdev = rdev;
 805		}
 806
 807		if (best_dist_slot >= 0)
 808			/* At least 2 disks to choose from so failfast is OK */
 809			set_bit(R10BIO_FailFast, &r10_bio->state);
 810		/* This optimisation is debatable, and completely destroys
 811		 * sequential read speed for 'far copies' arrays.  So only
 812		 * keep it for 'near' arrays, and review those later.
 813		 */
 814		if (geo->near_copies > 1 && !pending)
 815			new_distance = 0;
 816
 817		/* for far > 1 always use the lowest address */
 818		else if (geo->far_copies > 1)
 819			new_distance = r10_bio->devs[slot].addr;
 820		else
 821			new_distance = abs(r10_bio->devs[slot].addr -
 822					   conf->mirrors[disk].head_position);
 823
 824		if (new_distance < best_dist) {
 825			best_dist = new_distance;
 826			best_dist_slot = slot;
 827			best_dist_rdev = rdev;
 828		}
 829	}
 830	if (slot >= conf->copies) {
 831		if (has_nonrot_disk) {
 832			slot = best_pending_slot;
 833			rdev = best_pending_rdev;
 834		} else {
 835			slot = best_dist_slot;
 836			rdev = best_dist_rdev;
 837		}
 838	}
 839
 840	if (slot >= 0) {
 841		atomic_inc(&rdev->nr_pending);
 842		r10_bio->read_slot = slot;
 843	} else
 844		rdev = NULL;
 845	rcu_read_unlock();
 846	*max_sectors = best_good_sectors;
 847
 848	return rdev;
 849}
 850
 851static int raid10_congested(struct mddev *mddev, int bits)
 852{
 853	struct r10conf *conf = mddev->private;
 854	int i, ret = 0;
 855
 856	if ((bits & (1 << WB_async_congested)) &&
 857	    conf->pending_count >= max_queued_requests)
 858		return 1;
 859
 860	rcu_read_lock();
 861	for (i = 0;
 862	     (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
 863		     && ret == 0;
 864	     i++) {
 865		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 866		if (rdev && !test_bit(Faulty, &rdev->flags)) {
 867			struct request_queue *q = bdev_get_queue(rdev->bdev);
 868
 869			ret |= bdi_congested(q->backing_dev_info, bits);
 870		}
 871	}
 872	rcu_read_unlock();
 873	return ret;
 874}
 875
 876static void flush_pending_writes(struct r10conf *conf)
 877{
 878	/* Any writes that have been queued but are awaiting
 879	 * bitmap updates get flushed here.
 880	 */
 881	spin_lock_irq(&conf->device_lock);
 882
 883	if (conf->pending_bio_list.head) {
 884		struct blk_plug plug;
 885		struct bio *bio;
 886
 887		bio = bio_list_get(&conf->pending_bio_list);
 888		conf->pending_count = 0;
 889		spin_unlock_irq(&conf->device_lock);
 890
 891		/*
 892		 * As this is called in a wait_event() loop (see freeze_array),
 893		 * current->state might be TASK_UNINTERRUPTIBLE which will
 894		 * cause a warning when we prepare to wait again.  As it is
 895		 * rare that this path is taken, it is perfectly safe to force
 896		 * us to go around the wait_event() loop again, so the warning
 897		 * is a false-positive. Silence the warning by resetting
 898		 * thread state
 899		 */
 900		__set_current_state(TASK_RUNNING);
 901
 902		blk_start_plug(&plug);
 903		/* flush any pending bitmap writes to disk
 904		 * before proceeding w/ I/O */
 905		md_bitmap_unplug(conf->mddev->bitmap);
 906		wake_up(&conf->wait_barrier);
 907
 908		while (bio) { /* submit pending writes */
 909			struct bio *next = bio->bi_next;
 910			struct md_rdev *rdev = (void*)bio->bi_disk;
 911			bio->bi_next = NULL;
 912			bio_set_dev(bio, rdev->bdev);
 913			if (test_bit(Faulty, &rdev->flags)) {
 914				bio_io_error(bio);
 915			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
 916					    !blk_queue_discard(bio->bi_disk->queue)))
 917				/* Just ignore it */
 918				bio_endio(bio);
 919			else
 920				generic_make_request(bio);
 921			bio = next;
 
 922		}
 923		blk_finish_plug(&plug);
 924	} else
 925		spin_unlock_irq(&conf->device_lock);
 926}
 927
 928/* Barriers....
 929 * Sometimes we need to suspend IO while we do something else,
 930 * either some resync/recovery, or reconfigure the array.
 931 * To do this we raise a 'barrier'.
 932 * The 'barrier' is a counter that can be raised multiple times
 933 * to count how many activities are happening which preclude
 934 * normal IO.
 935 * We can only raise the barrier if there is no pending IO.
 936 * i.e. if nr_pending == 0.
 937 * We choose only to raise the barrier if no-one is waiting for the
 938 * barrier to go down.  This means that as soon as an IO request
 939 * is ready, no other operations which require a barrier will start
 940 * until the IO request has had a chance.
 941 *
 942 * So: regular IO calls 'wait_barrier'.  When that returns there
 943 *    is no backgroup IO happening,  It must arrange to call
 944 *    allow_barrier when it has finished its IO.
 945 * backgroup IO calls must call raise_barrier.  Once that returns
 946 *    there is no normal IO happeing.  It must arrange to call
 947 *    lower_barrier when the particular background IO completes.
 948 */
 949
 950static void raise_barrier(struct r10conf *conf, int force)
 951{
 952	BUG_ON(force && !conf->barrier);
 953	spin_lock_irq(&conf->resync_lock);
 
 
 954
 955	/* Wait until no block IO is waiting (unless 'force') */
 956	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
 957			    conf->resync_lock);
 958
 959	/* block any new IO from starting */
 960	conf->barrier++;
 961
 962	/* Now wait for all pending IO to complete */
 963	wait_event_lock_irq(conf->wait_barrier,
 964			    !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
 965			    conf->resync_lock);
 966
 967	spin_unlock_irq(&conf->resync_lock);
 968}
 969
 970static void lower_barrier(struct r10conf *conf)
 971{
 972	unsigned long flags;
 973	spin_lock_irqsave(&conf->resync_lock, flags);
 974	conf->barrier--;
 975	spin_unlock_irqrestore(&conf->resync_lock, flags);
 
 976	wake_up(&conf->wait_barrier);
 977}
 978
 979static void wait_barrier(struct r10conf *conf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 980{
 981	spin_lock_irq(&conf->resync_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982	if (conf->barrier) {
 983		conf->nr_waiting++;
 984		/* Wait for the barrier to drop.
 985		 * However if there are already pending
 986		 * requests (preventing the barrier from
 987		 * rising completely), and the
 988		 * pre-process bio queue isn't empty,
 989		 * then don't wait, as we need to empty
 990		 * that queue to get the nr_pending
 991		 * count down.
 992		 */
 993		raid10_log(conf->mddev, "wait barrier");
 994		wait_event_lock_irq(conf->wait_barrier,
 995				    !conf->barrier ||
 996				    (atomic_read(&conf->nr_pending) &&
 997				     current->bio_list &&
 998				     (!bio_list_empty(&current->bio_list[0]) ||
 999				      !bio_list_empty(&current->bio_list[1]))),
1000				    conf->resync_lock);
1001		conf->nr_waiting--;
1002		if (!conf->nr_waiting)
1003			wake_up(&conf->wait_barrier);
1004	}
1005	atomic_inc(&conf->nr_pending);
1006	spin_unlock_irq(&conf->resync_lock);
 
 
 
1007}
1008
1009static void allow_barrier(struct r10conf *conf)
1010{
1011	if ((atomic_dec_and_test(&conf->nr_pending)) ||
1012			(conf->array_freeze_pending))
1013		wake_up(&conf->wait_barrier);
1014}
1015
1016static void freeze_array(struct r10conf *conf, int extra)
1017{
1018	/* stop syncio and normal IO and wait for everything to
1019	 * go quiet.
1020	 * We increment barrier and nr_waiting, and then
1021	 * wait until nr_pending match nr_queued+extra
1022	 * This is called in the context of one normal IO request
1023	 * that has failed. Thus any sync request that might be pending
1024	 * will be blocked by nr_pending, and we need to wait for
1025	 * pending IO requests to complete or be queued for re-try.
1026	 * Thus the number queued (nr_queued) plus this request (extra)
1027	 * must match the number of pending IOs (nr_pending) before
1028	 * we continue.
1029	 */
1030	spin_lock_irq(&conf->resync_lock);
1031	conf->array_freeze_pending++;
1032	conf->barrier++;
1033	conf->nr_waiting++;
1034	wait_event_lock_irq_cmd(conf->wait_barrier,
1035				atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
1036				conf->resync_lock,
1037				flush_pending_writes(conf));
1038
1039	conf->array_freeze_pending--;
1040	spin_unlock_irq(&conf->resync_lock);
1041}
1042
1043static void unfreeze_array(struct r10conf *conf)
1044{
1045	/* reverse the effect of the freeze */
1046	spin_lock_irq(&conf->resync_lock);
1047	conf->barrier--;
1048	conf->nr_waiting--;
1049	wake_up(&conf->wait_barrier);
1050	spin_unlock_irq(&conf->resync_lock);
1051}
1052
1053static sector_t choose_data_offset(struct r10bio *r10_bio,
1054				   struct md_rdev *rdev)
1055{
1056	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1057	    test_bit(R10BIO_Previous, &r10_bio->state))
1058		return rdev->data_offset;
1059	else
1060		return rdev->new_data_offset;
1061}
1062
1063struct raid10_plug_cb {
1064	struct blk_plug_cb	cb;
1065	struct bio_list		pending;
1066	int			pending_cnt;
1067};
1068
1069static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1070{
1071	struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1072						   cb);
1073	struct mddev *mddev = plug->cb.data;
1074	struct r10conf *conf = mddev->private;
1075	struct bio *bio;
1076
1077	if (from_schedule || current->bio_list) {
1078		spin_lock_irq(&conf->device_lock);
1079		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1080		conf->pending_count += plug->pending_cnt;
1081		spin_unlock_irq(&conf->device_lock);
1082		wake_up(&conf->wait_barrier);
1083		md_wakeup_thread(mddev->thread);
1084		kfree(plug);
1085		return;
1086	}
1087
1088	/* we aren't scheduling, so we can do the write-out directly. */
1089	bio = bio_list_get(&plug->pending);
1090	md_bitmap_unplug(mddev->bitmap);
1091	wake_up(&conf->wait_barrier);
1092
1093	while (bio) { /* submit pending writes */
1094		struct bio *next = bio->bi_next;
1095		struct md_rdev *rdev = (void*)bio->bi_disk;
1096		bio->bi_next = NULL;
1097		bio_set_dev(bio, rdev->bdev);
1098		if (test_bit(Faulty, &rdev->flags)) {
1099			bio_io_error(bio);
1100		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
1101				    !blk_queue_discard(bio->bi_disk->queue)))
1102			/* Just ignore it */
1103			bio_endio(bio);
1104		else
1105			generic_make_request(bio);
1106		bio = next;
 
1107	}
1108	kfree(plug);
1109}
1110
1111/*
1112 * 1. Register the new request and wait if the reconstruction thread has put
1113 * up a bar for new requests. Continue immediately if no resync is active
1114 * currently.
1115 * 2. If IO spans the reshape position.  Need to wait for reshape to pass.
1116 */
1117static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1118				 struct bio *bio, sector_t sectors)
1119{
1120	wait_barrier(conf);
 
 
 
 
1121	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1122	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1123	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
 
 
 
 
 
1124		raid10_log(conf->mddev, "wait reshape");
1125		allow_barrier(conf);
1126		wait_event(conf->wait_barrier,
1127			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1128			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1129			   sectors);
1130		wait_barrier(conf);
1131	}
 
1132}
1133
1134static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1135				struct r10bio *r10_bio)
1136{
1137	struct r10conf *conf = mddev->private;
1138	struct bio *read_bio;
1139	const int op = bio_op(bio);
1140	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1141	int max_sectors;
1142	struct md_rdev *rdev;
1143	char b[BDEVNAME_SIZE];
1144	int slot = r10_bio->read_slot;
1145	struct md_rdev *err_rdev = NULL;
1146	gfp_t gfp = GFP_NOIO;
1147
1148	if (r10_bio->devs[slot].rdev) {
1149		/*
1150		 * This is an error retry, but we cannot
1151		 * safely dereference the rdev in the r10_bio,
1152		 * we must use the one in conf.
1153		 * If it has already been disconnected (unlikely)
1154		 * we lose the device name in error messages.
1155		 */
1156		int disk;
1157		/*
1158		 * As we are blocking raid10, it is a little safer to
1159		 * use __GFP_HIGH.
1160		 */
1161		gfp = GFP_NOIO | __GFP_HIGH;
1162
1163		rcu_read_lock();
1164		disk = r10_bio->devs[slot].devnum;
1165		err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1166		if (err_rdev)
1167			bdevname(err_rdev->bdev, b);
1168		else {
1169			strcpy(b, "???");
1170			/* This never gets dereferenced */
1171			err_rdev = r10_bio->devs[slot].rdev;
1172		}
1173		rcu_read_unlock();
1174	}
1175
1176	regular_request_wait(mddev, conf, bio, r10_bio->sectors);
 
1177	rdev = read_balance(conf, r10_bio, &max_sectors);
1178	if (!rdev) {
1179		if (err_rdev) {
1180			pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1181					    mdname(mddev), b,
1182					    (unsigned long long)r10_bio->sector);
1183		}
1184		raid_end_bio_io(r10_bio);
1185		return;
1186	}
1187	if (err_rdev)
1188		pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
1189				   mdname(mddev),
1190				   bdevname(rdev->bdev, b),
1191				   (unsigned long long)r10_bio->sector);
1192	if (max_sectors < bio_sectors(bio)) {
1193		struct bio *split = bio_split(bio, max_sectors,
1194					      gfp, &conf->bio_split);
1195		bio_chain(split, bio);
1196		allow_barrier(conf);
1197		generic_make_request(bio);
1198		wait_barrier(conf);
1199		bio = split;
1200		r10_bio->master_bio = bio;
1201		r10_bio->sectors = max_sectors;
1202	}
1203	slot = r10_bio->read_slot;
1204
1205	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
 
 
 
 
1206
1207	r10_bio->devs[slot].bio = read_bio;
1208	r10_bio->devs[slot].rdev = rdev;
1209
1210	read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1211		choose_data_offset(r10_bio, rdev);
1212	bio_set_dev(read_bio, rdev->bdev);
1213	read_bio->bi_end_io = raid10_end_read_request;
1214	bio_set_op_attrs(read_bio, op, do_sync);
1215	if (test_bit(FailFast, &rdev->flags) &&
1216	    test_bit(R10BIO_FailFast, &r10_bio->state))
1217	        read_bio->bi_opf |= MD_FAILFAST;
1218	read_bio->bi_private = r10_bio;
1219
1220	if (mddev->gendisk)
1221	        trace_block_bio_remap(read_bio->bi_disk->queue,
1222	                              read_bio, disk_devt(mddev->gendisk),
1223	                              r10_bio->sector);
1224	generic_make_request(read_bio);
1225	return;
1226}
1227
1228static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1229				  struct bio *bio, bool replacement,
1230				  int n_copy)
1231{
1232	const int op = bio_op(bio);
1233	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1234	const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
1235	unsigned long flags;
1236	struct blk_plug_cb *cb;
1237	struct raid10_plug_cb *plug = NULL;
1238	struct r10conf *conf = mddev->private;
1239	struct md_rdev *rdev;
1240	int devnum = r10_bio->devs[n_copy].devnum;
1241	struct bio *mbio;
1242
1243	if (replacement) {
1244		rdev = conf->mirrors[devnum].replacement;
1245		if (rdev == NULL) {
1246			/* Replacement just got moved to main 'rdev' */
1247			smp_mb();
1248			rdev = conf->mirrors[devnum].rdev;
1249		}
1250	} else
1251		rdev = conf->mirrors[devnum].rdev;
1252
1253	mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1254	if (replacement)
1255		r10_bio->devs[n_copy].repl_bio = mbio;
1256	else
1257		r10_bio->devs[n_copy].bio = mbio;
1258
1259	mbio->bi_iter.bi_sector	= (r10_bio->devs[n_copy].addr +
1260				   choose_data_offset(r10_bio, rdev));
1261	bio_set_dev(mbio, rdev->bdev);
1262	mbio->bi_end_io	= raid10_end_write_request;
1263	bio_set_op_attrs(mbio, op, do_sync | do_fua);
1264	if (!replacement && test_bit(FailFast,
1265				     &conf->mirrors[devnum].rdev->flags)
1266			 && enough(conf, devnum))
1267		mbio->bi_opf |= MD_FAILFAST;
1268	mbio->bi_private = r10_bio;
1269
1270	if (conf->mddev->gendisk)
1271		trace_block_bio_remap(mbio->bi_disk->queue,
1272				      mbio, disk_devt(conf->mddev->gendisk),
1273				      r10_bio->sector);
1274	/* flush_pending_writes() needs access to the rdev so...*/
1275	mbio->bi_disk = (void *)rdev;
1276
1277	atomic_inc(&r10_bio->remaining);
1278
1279	cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1280	if (cb)
1281		plug = container_of(cb, struct raid10_plug_cb, cb);
1282	else
1283		plug = NULL;
1284	if (plug) {
1285		bio_list_add(&plug->pending, mbio);
1286		plug->pending_cnt++;
1287	} else {
1288		spin_lock_irqsave(&conf->device_lock, flags);
1289		bio_list_add(&conf->pending_bio_list, mbio);
1290		conf->pending_count++;
1291		spin_unlock_irqrestore(&conf->device_lock, flags);
1292		md_wakeup_thread(mddev->thread);
1293	}
1294}
1295
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1296static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1297				 struct r10bio *r10_bio)
1298{
1299	struct r10conf *conf = mddev->private;
1300	int i;
1301	struct md_rdev *blocked_rdev;
1302	sector_t sectors;
1303	int max_sectors;
1304
1305	if ((mddev_is_clustered(mddev) &&
1306	     md_cluster_ops->area_resyncing(mddev, WRITE,
1307					    bio->bi_iter.bi_sector,
1308					    bio_end_sector(bio)))) {
1309		DEFINE_WAIT(w);
 
 
 
 
 
1310		for (;;) {
1311			prepare_to_wait(&conf->wait_barrier,
1312					&w, TASK_IDLE);
1313			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1314				 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1315				break;
1316			schedule();
1317		}
1318		finish_wait(&conf->wait_barrier, &w);
1319	}
1320
1321	sectors = r10_bio->sectors;
1322	regular_request_wait(mddev, conf, bio, sectors);
 
1323	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1324	    (mddev->reshape_backwards
1325	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1326		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1327	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1328		bio->bi_iter.bi_sector < conf->reshape_progress))) {
1329		/* Need to update reshape_position in metadata */
1330		mddev->reshape_position = conf->reshape_progress;
1331		set_mask_bits(&mddev->sb_flags, 0,
1332			      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1333		md_wakeup_thread(mddev->thread);
 
 
 
 
 
1334		raid10_log(conf->mddev, "wait reshape metadata");
1335		wait_event(mddev->sb_wait,
1336			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1337
1338		conf->reshape_safe = mddev->reshape_position;
1339	}
1340
1341	if (conf->pending_count >= max_queued_requests) {
1342		md_wakeup_thread(mddev->thread);
1343		raid10_log(mddev, "wait queued");
1344		wait_event(conf->wait_barrier,
1345			   conf->pending_count < max_queued_requests);
1346	}
1347	/* first select target devices under rcu_lock and
1348	 * inc refcount on their rdev.  Record them by setting
1349	 * bios[x] to bio
1350	 * If there are known/acknowledged bad blocks on any device
1351	 * on which we have seen a write error, we want to avoid
1352	 * writing to those blocks.  This potentially requires several
1353	 * writes to write around the bad blocks.  Each set of writes
1354	 * gets its own r10_bio with a set of bios attached.
1355	 */
1356
1357	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1358	raid10_find_phys(conf, r10_bio);
1359retry_write:
1360	blocked_rdev = NULL;
1361	rcu_read_lock();
1362	max_sectors = r10_bio->sectors;
1363
1364	for (i = 0;  i < conf->copies; i++) {
1365		int d = r10_bio->devs[i].devnum;
1366		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1367		struct md_rdev *rrdev = rcu_dereference(
1368			conf->mirrors[d].replacement);
1369		if (rdev == rrdev)
1370			rrdev = NULL;
1371		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1372			atomic_inc(&rdev->nr_pending);
1373			blocked_rdev = rdev;
1374			break;
1375		}
1376		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1377			atomic_inc(&rrdev->nr_pending);
1378			blocked_rdev = rrdev;
1379			break;
1380		}
1381		if (rdev && (test_bit(Faulty, &rdev->flags)))
1382			rdev = NULL;
1383		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1384			rrdev = NULL;
1385
1386		r10_bio->devs[i].bio = NULL;
1387		r10_bio->devs[i].repl_bio = NULL;
1388
1389		if (!rdev && !rrdev) {
1390			set_bit(R10BIO_Degraded, &r10_bio->state);
1391			continue;
1392		}
1393		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1394			sector_t first_bad;
1395			sector_t dev_sector = r10_bio->devs[i].addr;
1396			int bad_sectors;
1397			int is_bad;
1398
1399			is_bad = is_badblock(rdev, dev_sector, max_sectors,
1400					     &first_bad, &bad_sectors);
1401			if (is_bad < 0) {
1402				/* Mustn't write here until the bad block
1403				 * is acknowledged
1404				 */
1405				atomic_inc(&rdev->nr_pending);
1406				set_bit(BlockedBadBlocks, &rdev->flags);
1407				blocked_rdev = rdev;
1408				break;
1409			}
1410			if (is_bad && first_bad <= dev_sector) {
1411				/* Cannot write here at all */
1412				bad_sectors -= (dev_sector - first_bad);
1413				if (bad_sectors < max_sectors)
1414					/* Mustn't write more than bad_sectors
1415					 * to other devices yet
1416					 */
1417					max_sectors = bad_sectors;
1418				/* We don't set R10BIO_Degraded as that
1419				 * only applies if the disk is missing,
1420				 * so it might be re-added, and we want to
1421				 * know to recover this chunk.
1422				 * In this case the device is here, and the
1423				 * fact that this chunk is not in-sync is
1424				 * recorded in the bad block log.
1425				 */
1426				continue;
1427			}
1428			if (is_bad) {
1429				int good_sectors = first_bad - dev_sector;
1430				if (good_sectors < max_sectors)
1431					max_sectors = good_sectors;
1432			}
1433		}
1434		if (rdev) {
1435			r10_bio->devs[i].bio = bio;
1436			atomic_inc(&rdev->nr_pending);
1437		}
1438		if (rrdev) {
1439			r10_bio->devs[i].repl_bio = bio;
1440			atomic_inc(&rrdev->nr_pending);
1441		}
1442	}
1443	rcu_read_unlock();
1444
1445	if (unlikely(blocked_rdev)) {
1446		/* Have to wait for this device to get unblocked, then retry */
1447		int j;
1448		int d;
1449
1450		for (j = 0; j < i; j++) {
1451			if (r10_bio->devs[j].bio) {
1452				d = r10_bio->devs[j].devnum;
1453				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1454			}
1455			if (r10_bio->devs[j].repl_bio) {
1456				struct md_rdev *rdev;
1457				d = r10_bio->devs[j].devnum;
1458				rdev = conf->mirrors[d].replacement;
1459				if (!rdev) {
1460					/* Race with remove_disk */
1461					smp_mb();
1462					rdev = conf->mirrors[d].rdev;
1463				}
1464				rdev_dec_pending(rdev, mddev);
1465			}
1466		}
1467		allow_barrier(conf);
1468		raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1469		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1470		wait_barrier(conf);
1471		goto retry_write;
1472	}
1473
1474	if (max_sectors < r10_bio->sectors)
1475		r10_bio->sectors = max_sectors;
1476
1477	if (r10_bio->sectors < bio_sectors(bio)) {
1478		struct bio *split = bio_split(bio, r10_bio->sectors,
1479					      GFP_NOIO, &conf->bio_split);
1480		bio_chain(split, bio);
1481		allow_barrier(conf);
1482		generic_make_request(bio);
1483		wait_barrier(conf);
1484		bio = split;
1485		r10_bio->master_bio = bio;
1486	}
1487
 
 
1488	atomic_set(&r10_bio->remaining, 1);
1489	md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1490
1491	for (i = 0; i < conf->copies; i++) {
1492		if (r10_bio->devs[i].bio)
1493			raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1494		if (r10_bio->devs[i].repl_bio)
1495			raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1496	}
1497	one_write_done(r10_bio);
1498}
1499
1500static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1501{
1502	struct r10conf *conf = mddev->private;
1503	struct r10bio *r10_bio;
1504
1505	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1506
1507	r10_bio->master_bio = bio;
1508	r10_bio->sectors = sectors;
1509
1510	r10_bio->mddev = mddev;
1511	r10_bio->sector = bio->bi_iter.bi_sector;
1512	r10_bio->state = 0;
1513	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
 
 
1514
1515	if (bio_data_dir(bio) == READ)
1516		raid10_read_request(mddev, bio, r10_bio);
1517	else
1518		raid10_write_request(mddev, bio, r10_bio);
1519}
1520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1521static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1522{
1523	struct r10conf *conf = mddev->private;
1524	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1525	int chunk_sects = chunk_mask + 1;
1526	int sectors = bio_sectors(bio);
1527
1528	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1529		md_flush_request(mddev, bio);
1530		return true;
1531	}
1532
1533	if (!md_write_start(mddev, bio))
1534		return false;
1535
 
 
 
 
1536	/*
1537	 * If this request crosses a chunk boundary, we need to split
1538	 * it.
1539	 */
1540	if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1541		     sectors > chunk_sects
1542		     && (conf->geo.near_copies < conf->geo.raid_disks
1543			 || conf->prev.near_copies <
1544			 conf->prev.raid_disks)))
1545		sectors = chunk_sects -
1546			(bio->bi_iter.bi_sector &
1547			 (chunk_sects - 1));
1548	__make_request(mddev, bio, sectors);
1549
1550	/* In case raid10d snuck in to freeze_array */
1551	wake_up(&conf->wait_barrier);
1552	return true;
1553}
1554
1555static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1556{
1557	struct r10conf *conf = mddev->private;
1558	int i;
1559
 
 
1560	if (conf->geo.near_copies < conf->geo.raid_disks)
1561		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1562	if (conf->geo.near_copies > 1)
1563		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1564	if (conf->geo.far_copies > 1) {
1565		if (conf->geo.far_offset)
1566			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1567		else
1568			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1569		if (conf->geo.far_set_size != conf->geo.raid_disks)
1570			seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1571	}
1572	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1573					conf->geo.raid_disks - mddev->degraded);
1574	rcu_read_lock();
1575	for (i = 0; i < conf->geo.raid_disks; i++) {
1576		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 
1577		seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1578	}
1579	rcu_read_unlock();
1580	seq_printf(seq, "]");
1581}
1582
1583/* check if there are enough drives for
1584 * every block to appear on atleast one.
1585 * Don't consider the device numbered 'ignore'
1586 * as we might be about to remove it.
1587 */
1588static int _enough(struct r10conf *conf, int previous, int ignore)
1589{
1590	int first = 0;
1591	int has_enough = 0;
1592	int disks, ncopies;
1593	if (previous) {
1594		disks = conf->prev.raid_disks;
1595		ncopies = conf->prev.near_copies;
1596	} else {
1597		disks = conf->geo.raid_disks;
1598		ncopies = conf->geo.near_copies;
1599	}
1600
1601	rcu_read_lock();
1602	do {
1603		int n = conf->copies;
1604		int cnt = 0;
1605		int this = first;
1606		while (n--) {
1607			struct md_rdev *rdev;
1608			if (this != ignore &&
1609			    (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1610			    test_bit(In_sync, &rdev->flags))
1611				cnt++;
1612			this = (this+1) % disks;
1613		}
1614		if (cnt == 0)
1615			goto out;
1616		first = (first + ncopies) % disks;
1617	} while (first != 0);
1618	has_enough = 1;
1619out:
1620	rcu_read_unlock();
1621	return has_enough;
1622}
1623
1624static int enough(struct r10conf *conf, int ignore)
1625{
1626	/* when calling 'enough', both 'prev' and 'geo' must
1627	 * be stable.
1628	 * This is ensured if ->reconfig_mutex or ->device_lock
1629	 * is held.
1630	 */
1631	return _enough(conf, 0, ignore) &&
1632		_enough(conf, 1, ignore);
1633}
1634
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1635static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1636{
1637	char b[BDEVNAME_SIZE];
1638	struct r10conf *conf = mddev->private;
1639	unsigned long flags;
1640
1641	/*
1642	 * If it is not operational, then we have already marked it as dead
1643	 * else if it is the last working disks with "fail_last_dev == false",
1644	 * ignore the error, let the next level up know.
1645	 * else mark the drive as failed
1646	 */
1647	spin_lock_irqsave(&conf->device_lock, flags);
1648	if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1649	    && !enough(conf, rdev->raid_disk)) {
1650		/*
1651		 * Don't fail the drive, just return an IO error.
1652		 */
1653		spin_unlock_irqrestore(&conf->device_lock, flags);
1654		return;
 
1655	}
1656	if (test_and_clear_bit(In_sync, &rdev->flags))
1657		mddev->degraded++;
1658	/*
1659	 * If recovery is running, make sure it aborts.
1660	 */
1661	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1662	set_bit(Blocked, &rdev->flags);
1663	set_bit(Faulty, &rdev->flags);
1664	set_mask_bits(&mddev->sb_flags, 0,
1665		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1666	spin_unlock_irqrestore(&conf->device_lock, flags);
1667	pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
1668		"md/raid10:%s: Operation continuing on %d devices.\n",
1669		mdname(mddev), bdevname(rdev->bdev, b),
1670		mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1671}
1672
1673static void print_conf(struct r10conf *conf)
1674{
1675	int i;
1676	struct md_rdev *rdev;
1677
1678	pr_debug("RAID10 conf printout:\n");
1679	if (!conf) {
1680		pr_debug("(!conf)\n");
1681		return;
1682	}
1683	pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1684		 conf->geo.raid_disks);
1685
1686	/* This is only called with ->reconfix_mutex held, so
1687	 * rcu protection of rdev is not needed */
1688	for (i = 0; i < conf->geo.raid_disks; i++) {
1689		char b[BDEVNAME_SIZE];
1690		rdev = conf->mirrors[i].rdev;
1691		if (rdev)
1692			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1693				 i, !test_bit(In_sync, &rdev->flags),
1694				 !test_bit(Faulty, &rdev->flags),
1695				 bdevname(rdev->bdev,b));
1696	}
1697}
1698
1699static void close_sync(struct r10conf *conf)
1700{
1701	wait_barrier(conf);
1702	allow_barrier(conf);
1703
1704	mempool_exit(&conf->r10buf_pool);
1705}
1706
1707static int raid10_spare_active(struct mddev *mddev)
1708{
1709	int i;
1710	struct r10conf *conf = mddev->private;
1711	struct raid10_info *tmp;
1712	int count = 0;
1713	unsigned long flags;
1714
1715	/*
1716	 * Find all non-in_sync disks within the RAID10 configuration
1717	 * and mark them in_sync
1718	 */
1719	for (i = 0; i < conf->geo.raid_disks; i++) {
1720		tmp = conf->mirrors + i;
1721		if (tmp->replacement
1722		    && tmp->replacement->recovery_offset == MaxSector
1723		    && !test_bit(Faulty, &tmp->replacement->flags)
1724		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1725			/* Replacement has just become active */
1726			if (!tmp->rdev
1727			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1728				count++;
1729			if (tmp->rdev) {
1730				/* Replaced device not technically faulty,
1731				 * but we need to be sure it gets removed
1732				 * and never re-added.
1733				 */
1734				set_bit(Faulty, &tmp->rdev->flags);
1735				sysfs_notify_dirent_safe(
1736					tmp->rdev->sysfs_state);
1737			}
1738			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1739		} else if (tmp->rdev
1740			   && tmp->rdev->recovery_offset == MaxSector
1741			   && !test_bit(Faulty, &tmp->rdev->flags)
1742			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1743			count++;
1744			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1745		}
1746	}
1747	spin_lock_irqsave(&conf->device_lock, flags);
1748	mddev->degraded -= count;
1749	spin_unlock_irqrestore(&conf->device_lock, flags);
1750
1751	print_conf(conf);
1752	return count;
1753}
1754
1755static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1756{
1757	struct r10conf *conf = mddev->private;
1758	int err = -EEXIST;
1759	int mirror;
1760	int first = 0;
1761	int last = conf->geo.raid_disks - 1;
 
1762
1763	if (mddev->recovery_cp < MaxSector)
1764		/* only hot-add to in-sync arrays, as recovery is
1765		 * very different from resync
1766		 */
1767		return -EBUSY;
1768	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
1769		return -EINVAL;
1770
1771	if (md_integrity_add_rdev(rdev, mddev))
1772		return -ENXIO;
1773
1774	if (rdev->raid_disk >= 0)
1775		first = last = rdev->raid_disk;
1776
1777	if (rdev->saved_raid_disk >= first &&
1778	    rdev->saved_raid_disk < conf->geo.raid_disks &&
1779	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1780		mirror = rdev->saved_raid_disk;
1781	else
1782		mirror = first;
1783	for ( ; mirror <= last ; mirror++) {
1784		struct raid10_info *p = &conf->mirrors[mirror];
1785		if (p->recovery_disabled == mddev->recovery_disabled)
1786			continue;
1787		if (p->rdev) {
1788			if (!test_bit(WantReplacement, &p->rdev->flags) ||
1789			    p->replacement != NULL)
1790				continue;
1791			clear_bit(In_sync, &rdev->flags);
1792			set_bit(Replacement, &rdev->flags);
1793			rdev->raid_disk = mirror;
1794			err = 0;
1795			if (mddev->gendisk)
1796				disk_stack_limits(mddev->gendisk, rdev->bdev,
1797						  rdev->data_offset << 9);
1798			conf->fullsync = 1;
1799			rcu_assign_pointer(p->replacement, rdev);
1800			break;
1801		}
1802
1803		if (mddev->gendisk)
1804			disk_stack_limits(mddev->gendisk, rdev->bdev,
1805					  rdev->data_offset << 9);
1806
1807		p->head_position = 0;
1808		p->recovery_disabled = mddev->recovery_disabled - 1;
1809		rdev->raid_disk = mirror;
1810		err = 0;
1811		if (rdev->saved_raid_disk != mirror)
1812			conf->fullsync = 1;
1813		rcu_assign_pointer(p->rdev, rdev);
1814		break;
1815	}
1816	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1817		blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
 
 
 
 
 
 
 
 
 
 
 
1818
1819	print_conf(conf);
1820	return err;
1821}
1822
1823static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1824{
1825	struct r10conf *conf = mddev->private;
1826	int err = 0;
1827	int number = rdev->raid_disk;
1828	struct md_rdev **rdevp;
1829	struct raid10_info *p = conf->mirrors + number;
1830
1831	print_conf(conf);
 
 
 
1832	if (rdev == p->rdev)
1833		rdevp = &p->rdev;
1834	else if (rdev == p->replacement)
1835		rdevp = &p->replacement;
1836	else
1837		return 0;
1838
1839	if (test_bit(In_sync, &rdev->flags) ||
1840	    atomic_read(&rdev->nr_pending)) {
1841		err = -EBUSY;
1842		goto abort;
1843	}
1844	/* Only remove non-faulty devices if recovery
1845	 * is not possible.
1846	 */
1847	if (!test_bit(Faulty, &rdev->flags) &&
1848	    mddev->recovery_disabled != p->recovery_disabled &&
1849	    (!p->replacement || p->replacement == rdev) &&
1850	    number < conf->geo.raid_disks &&
1851	    enough(conf, -1)) {
1852		err = -EBUSY;
1853		goto abort;
1854	}
1855	*rdevp = NULL;
1856	if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1857		synchronize_rcu();
1858		if (atomic_read(&rdev->nr_pending)) {
1859			/* lost the race, try later */
1860			err = -EBUSY;
1861			*rdevp = rdev;
1862			goto abort;
1863		}
1864	}
1865	if (p->replacement) {
1866		/* We must have just cleared 'rdev' */
1867		p->rdev = p->replacement;
1868		clear_bit(Replacement, &p->replacement->flags);
1869		smp_mb(); /* Make sure other CPUs may see both as identical
1870			   * but will never see neither -- if they are careful.
1871			   */
1872		p->replacement = NULL;
1873	}
1874
1875	clear_bit(WantReplacement, &rdev->flags);
1876	err = md_integrity_register(mddev);
1877
1878abort:
1879
1880	print_conf(conf);
1881	return err;
1882}
1883
1884static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
1885{
1886	struct r10conf *conf = r10_bio->mddev->private;
1887
1888	if (!bio->bi_status)
1889		set_bit(R10BIO_Uptodate, &r10_bio->state);
1890	else
1891		/* The write handler will notice the lack of
1892		 * R10BIO_Uptodate and record any errors etc
1893		 */
1894		atomic_add(r10_bio->sectors,
1895			   &conf->mirrors[d].rdev->corrected_errors);
1896
1897	/* for reconstruct, we always reschedule after a read.
1898	 * for resync, only after all reads
1899	 */
1900	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1901	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1902	    atomic_dec_and_test(&r10_bio->remaining)) {
1903		/* we have read all the blocks,
1904		 * do the comparison in process context in raid10d
1905		 */
1906		reschedule_retry(r10_bio);
1907	}
1908}
1909
1910static void end_sync_read(struct bio *bio)
1911{
1912	struct r10bio *r10_bio = get_resync_r10bio(bio);
1913	struct r10conf *conf = r10_bio->mddev->private;
1914	int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1915
1916	__end_sync_read(r10_bio, bio, d);
1917}
1918
1919static void end_reshape_read(struct bio *bio)
1920{
1921	/* reshape read bio isn't allocated from r10buf_pool */
1922	struct r10bio *r10_bio = bio->bi_private;
1923
1924	__end_sync_read(r10_bio, bio, r10_bio->read_slot);
1925}
1926
1927static void end_sync_request(struct r10bio *r10_bio)
1928{
1929	struct mddev *mddev = r10_bio->mddev;
1930
1931	while (atomic_dec_and_test(&r10_bio->remaining)) {
1932		if (r10_bio->master_bio == NULL) {
1933			/* the primary of several recovery bios */
1934			sector_t s = r10_bio->sectors;
1935			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1936			    test_bit(R10BIO_WriteError, &r10_bio->state))
1937				reschedule_retry(r10_bio);
1938			else
1939				put_buf(r10_bio);
1940			md_done_sync(mddev, s, 1);
1941			break;
1942		} else {
1943			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1944			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1945			    test_bit(R10BIO_WriteError, &r10_bio->state))
1946				reschedule_retry(r10_bio);
1947			else
1948				put_buf(r10_bio);
1949			r10_bio = r10_bio2;
1950		}
1951	}
1952}
1953
1954static void end_sync_write(struct bio *bio)
1955{
1956	struct r10bio *r10_bio = get_resync_r10bio(bio);
1957	struct mddev *mddev = r10_bio->mddev;
1958	struct r10conf *conf = mddev->private;
1959	int d;
1960	sector_t first_bad;
1961	int bad_sectors;
1962	int slot;
1963	int repl;
1964	struct md_rdev *rdev = NULL;
1965
1966	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1967	if (repl)
1968		rdev = conf->mirrors[d].replacement;
1969	else
1970		rdev = conf->mirrors[d].rdev;
1971
1972	if (bio->bi_status) {
1973		if (repl)
1974			md_error(mddev, rdev);
1975		else {
1976			set_bit(WriteErrorSeen, &rdev->flags);
1977			if (!test_and_set_bit(WantReplacement, &rdev->flags))
1978				set_bit(MD_RECOVERY_NEEDED,
1979					&rdev->mddev->recovery);
1980			set_bit(R10BIO_WriteError, &r10_bio->state);
1981		}
1982	} else if (is_badblock(rdev,
1983			     r10_bio->devs[slot].addr,
1984			     r10_bio->sectors,
1985			     &first_bad, &bad_sectors))
1986		set_bit(R10BIO_MadeGood, &r10_bio->state);
1987
1988	rdev_dec_pending(rdev, mddev);
1989
1990	end_sync_request(r10_bio);
1991}
1992
1993/*
1994 * Note: sync and recover and handled very differently for raid10
1995 * This code is for resync.
1996 * For resync, we read through virtual addresses and read all blocks.
1997 * If there is any error, we schedule a write.  The lowest numbered
1998 * drive is authoritative.
1999 * However requests come for physical address, so we need to map.
2000 * For every physical address there are raid_disks/copies virtual addresses,
2001 * which is always are least one, but is not necessarly an integer.
2002 * This means that a physical address can span multiple chunks, so we may
2003 * have to submit multiple io requests for a single sync request.
2004 */
2005/*
2006 * We check if all blocks are in-sync and only write to blocks that
2007 * aren't in sync
2008 */
2009static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2010{
2011	struct r10conf *conf = mddev->private;
2012	int i, first;
2013	struct bio *tbio, *fbio;
2014	int vcnt;
2015	struct page **tpages, **fpages;
2016
2017	atomic_set(&r10_bio->remaining, 1);
2018
2019	/* find the first device with a block */
2020	for (i=0; i<conf->copies; i++)
2021		if (!r10_bio->devs[i].bio->bi_status)
2022			break;
2023
2024	if (i == conf->copies)
2025		goto done;
2026
2027	first = i;
2028	fbio = r10_bio->devs[i].bio;
2029	fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2030	fbio->bi_iter.bi_idx = 0;
2031	fpages = get_resync_pages(fbio)->pages;
2032
2033	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2034	/* now find blocks with errors */
2035	for (i=0 ; i < conf->copies ; i++) {
2036		int  j, d;
2037		struct md_rdev *rdev;
2038		struct resync_pages *rp;
2039
2040		tbio = r10_bio->devs[i].bio;
2041
2042		if (tbio->bi_end_io != end_sync_read)
2043			continue;
2044		if (i == first)
2045			continue;
2046
2047		tpages = get_resync_pages(tbio)->pages;
2048		d = r10_bio->devs[i].devnum;
2049		rdev = conf->mirrors[d].rdev;
2050		if (!r10_bio->devs[i].bio->bi_status) {
2051			/* We know that the bi_io_vec layout is the same for
2052			 * both 'first' and 'i', so we just compare them.
2053			 * All vec entries are PAGE_SIZE;
2054			 */
2055			int sectors = r10_bio->sectors;
2056			for (j = 0; j < vcnt; j++) {
2057				int len = PAGE_SIZE;
2058				if (sectors < (len / 512))
2059					len = sectors * 512;
2060				if (memcmp(page_address(fpages[j]),
2061					   page_address(tpages[j]),
2062					   len))
2063					break;
2064				sectors -= len/512;
2065			}
2066			if (j == vcnt)
2067				continue;
2068			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2069			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2070				/* Don't fix anything. */
2071				continue;
2072		} else if (test_bit(FailFast, &rdev->flags)) {
2073			/* Just give up on this device */
2074			md_error(rdev->mddev, rdev);
2075			continue;
2076		}
2077		/* Ok, we need to write this bio, either to correct an
2078		 * inconsistency or to correct an unreadable block.
2079		 * First we need to fixup bv_offset, bv_len and
2080		 * bi_vecs, as the read request might have corrupted these
2081		 */
2082		rp = get_resync_pages(tbio);
2083		bio_reset(tbio);
2084
2085		md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2086
2087		rp->raid_bio = r10_bio;
2088		tbio->bi_private = rp;
2089		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2090		tbio->bi_end_io = end_sync_write;
2091		bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
2092
2093		bio_copy_data(tbio, fbio);
2094
2095		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2096		atomic_inc(&r10_bio->remaining);
2097		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2098
2099		if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2100			tbio->bi_opf |= MD_FAILFAST;
2101		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2102		bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
2103		generic_make_request(tbio);
2104	}
2105
2106	/* Now write out to any replacement devices
2107	 * that are active
2108	 */
2109	for (i = 0; i < conf->copies; i++) {
2110		int d;
2111
2112		tbio = r10_bio->devs[i].repl_bio;
2113		if (!tbio || !tbio->bi_end_io)
2114			continue;
2115		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2116		    && r10_bio->devs[i].bio != fbio)
2117			bio_copy_data(tbio, fbio);
2118		d = r10_bio->devs[i].devnum;
2119		atomic_inc(&r10_bio->remaining);
2120		md_sync_acct(conf->mirrors[d].replacement->bdev,
2121			     bio_sectors(tbio));
2122		generic_make_request(tbio);
2123	}
2124
2125done:
2126	if (atomic_dec_and_test(&r10_bio->remaining)) {
2127		md_done_sync(mddev, r10_bio->sectors, 1);
2128		put_buf(r10_bio);
2129	}
2130}
2131
2132/*
2133 * Now for the recovery code.
2134 * Recovery happens across physical sectors.
2135 * We recover all non-is_sync drives by finding the virtual address of
2136 * each, and then choose a working drive that also has that virt address.
2137 * There is a separate r10_bio for each non-in_sync drive.
2138 * Only the first two slots are in use. The first for reading,
2139 * The second for writing.
2140 *
2141 */
2142static void fix_recovery_read_error(struct r10bio *r10_bio)
2143{
2144	/* We got a read error during recovery.
2145	 * We repeat the read in smaller page-sized sections.
2146	 * If a read succeeds, write it to the new device or record
2147	 * a bad block if we cannot.
2148	 * If a read fails, record a bad block on both old and
2149	 * new devices.
2150	 */
2151	struct mddev *mddev = r10_bio->mddev;
2152	struct r10conf *conf = mddev->private;
2153	struct bio *bio = r10_bio->devs[0].bio;
2154	sector_t sect = 0;
2155	int sectors = r10_bio->sectors;
2156	int idx = 0;
2157	int dr = r10_bio->devs[0].devnum;
2158	int dw = r10_bio->devs[1].devnum;
2159	struct page **pages = get_resync_pages(bio)->pages;
2160
2161	while (sectors) {
2162		int s = sectors;
2163		struct md_rdev *rdev;
2164		sector_t addr;
2165		int ok;
2166
2167		if (s > (PAGE_SIZE>>9))
2168			s = PAGE_SIZE >> 9;
2169
2170		rdev = conf->mirrors[dr].rdev;
2171		addr = r10_bio->devs[0].addr + sect,
2172		ok = sync_page_io(rdev,
2173				  addr,
2174				  s << 9,
2175				  pages[idx],
2176				  REQ_OP_READ, 0, false);
2177		if (ok) {
2178			rdev = conf->mirrors[dw].rdev;
2179			addr = r10_bio->devs[1].addr + sect;
2180			ok = sync_page_io(rdev,
2181					  addr,
2182					  s << 9,
2183					  pages[idx],
2184					  REQ_OP_WRITE, 0, false);
2185			if (!ok) {
2186				set_bit(WriteErrorSeen, &rdev->flags);
2187				if (!test_and_set_bit(WantReplacement,
2188						      &rdev->flags))
2189					set_bit(MD_RECOVERY_NEEDED,
2190						&rdev->mddev->recovery);
2191			}
2192		}
2193		if (!ok) {
2194			/* We don't worry if we cannot set a bad block -
2195			 * it really is bad so there is no loss in not
2196			 * recording it yet
2197			 */
2198			rdev_set_badblocks(rdev, addr, s, 0);
2199
2200			if (rdev != conf->mirrors[dw].rdev) {
2201				/* need bad block on destination too */
2202				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2203				addr = r10_bio->devs[1].addr + sect;
2204				ok = rdev_set_badblocks(rdev2, addr, s, 0);
2205				if (!ok) {
2206					/* just abort the recovery */
2207					pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2208						  mdname(mddev));
2209
2210					conf->mirrors[dw].recovery_disabled
2211						= mddev->recovery_disabled;
2212					set_bit(MD_RECOVERY_INTR,
2213						&mddev->recovery);
2214					break;
2215				}
2216			}
2217		}
2218
2219		sectors -= s;
2220		sect += s;
2221		idx++;
2222	}
2223}
2224
2225static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2226{
2227	struct r10conf *conf = mddev->private;
2228	int d;
2229	struct bio *wbio, *wbio2;
 
 
 
 
 
 
 
 
2230
2231	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2232		fix_recovery_read_error(r10_bio);
2233		end_sync_request(r10_bio);
 
 
 
2234		return;
2235	}
2236
2237	/*
2238	 * share the pages with the first bio
2239	 * and submit the write request
2240	 */
2241	d = r10_bio->devs[1].devnum;
2242	wbio = r10_bio->devs[1].bio;
2243	wbio2 = r10_bio->devs[1].repl_bio;
2244	/* Need to test wbio2->bi_end_io before we call
2245	 * generic_make_request as if the former is NULL,
2246	 * the latter is free to free wbio2.
2247	 */
2248	if (wbio2 && !wbio2->bi_end_io)
2249		wbio2 = NULL;
2250	if (wbio->bi_end_io) {
2251		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2252		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2253		generic_make_request(wbio);
2254	}
2255	if (wbio2) {
2256		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2257		md_sync_acct(conf->mirrors[d].replacement->bdev,
2258			     bio_sectors(wbio2));
2259		generic_make_request(wbio2);
2260	}
2261}
2262
2263/*
2264 * Used by fix_read_error() to decay the per rdev read_errors.
2265 * We halve the read error count for every hour that has elapsed
2266 * since the last recorded read error.
2267 *
2268 */
2269static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2270{
2271	long cur_time_mon;
2272	unsigned long hours_since_last;
2273	unsigned int read_errors = atomic_read(&rdev->read_errors);
2274
2275	cur_time_mon = ktime_get_seconds();
2276
2277	if (rdev->last_read_error == 0) {
2278		/* first time we've seen a read error */
2279		rdev->last_read_error = cur_time_mon;
2280		return;
2281	}
2282
2283	hours_since_last = (long)(cur_time_mon -
2284			    rdev->last_read_error) / 3600;
2285
2286	rdev->last_read_error = cur_time_mon;
2287
2288	/*
2289	 * if hours_since_last is > the number of bits in read_errors
2290	 * just set read errors to 0. We do this to avoid
2291	 * overflowing the shift of read_errors by hours_since_last.
2292	 */
2293	if (hours_since_last >= 8 * sizeof(read_errors))
2294		atomic_set(&rdev->read_errors, 0);
2295	else
2296		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2297}
2298
2299static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2300			    int sectors, struct page *page, int rw)
2301{
2302	sector_t first_bad;
2303	int bad_sectors;
2304
2305	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2306	    && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2307		return -1;
2308	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
2309		/* success */
2310		return 1;
2311	if (rw == WRITE) {
2312		set_bit(WriteErrorSeen, &rdev->flags);
2313		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2314			set_bit(MD_RECOVERY_NEEDED,
2315				&rdev->mddev->recovery);
2316	}
2317	/* need to record an error - either for the block or the device */
2318	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2319		md_error(rdev->mddev, rdev);
2320	return 0;
2321}
2322
2323/*
2324 * This is a kernel thread which:
2325 *
2326 *	1.	Retries failed read operations on working mirrors.
2327 *	2.	Updates the raid superblock when problems encounter.
2328 *	3.	Performs writes following reads for array synchronising.
2329 */
2330
2331static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2332{
2333	int sect = 0; /* Offset from r10_bio->sector */
2334	int sectors = r10_bio->sectors;
2335	struct md_rdev *rdev;
2336	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2337	int d = r10_bio->devs[r10_bio->read_slot].devnum;
2338
2339	/* still own a reference to this rdev, so it cannot
2340	 * have been cleared recently.
2341	 */
2342	rdev = conf->mirrors[d].rdev;
2343
2344	if (test_bit(Faulty, &rdev->flags))
2345		/* drive has already been failed, just ignore any
2346		   more fix_read_error() attempts */
2347		return;
2348
2349	check_decay_read_errors(mddev, rdev);
2350	atomic_inc(&rdev->read_errors);
2351	if (atomic_read(&rdev->read_errors) > max_read_errors) {
2352		char b[BDEVNAME_SIZE];
2353		bdevname(rdev->bdev, b);
2354
2355		pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
2356			  mdname(mddev), b,
2357			  atomic_read(&rdev->read_errors), max_read_errors);
2358		pr_notice("md/raid10:%s: %s: Failing raid device\n",
2359			  mdname(mddev), b);
2360		md_error(mddev, rdev);
2361		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2362		return;
2363	}
2364
2365	while(sectors) {
2366		int s = sectors;
2367		int sl = r10_bio->read_slot;
2368		int success = 0;
2369		int start;
2370
2371		if (s > (PAGE_SIZE>>9))
2372			s = PAGE_SIZE >> 9;
2373
2374		rcu_read_lock();
2375		do {
2376			sector_t first_bad;
2377			int bad_sectors;
2378
2379			d = r10_bio->devs[sl].devnum;
2380			rdev = rcu_dereference(conf->mirrors[d].rdev);
2381			if (rdev &&
2382			    test_bit(In_sync, &rdev->flags) &&
2383			    !test_bit(Faulty, &rdev->flags) &&
2384			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2385					&first_bad, &bad_sectors) == 0) {
2386				atomic_inc(&rdev->nr_pending);
2387				rcu_read_unlock();
2388				success = sync_page_io(rdev,
2389						       r10_bio->devs[sl].addr +
2390						       sect,
2391						       s<<9,
2392						       conf->tmppage,
2393						       REQ_OP_READ, 0, false);
2394				rdev_dec_pending(rdev, mddev);
2395				rcu_read_lock();
2396				if (success)
2397					break;
2398			}
2399			sl++;
2400			if (sl == conf->copies)
2401				sl = 0;
2402		} while (!success && sl != r10_bio->read_slot);
2403		rcu_read_unlock();
2404
2405		if (!success) {
2406			/* Cannot read from anywhere, just mark the block
2407			 * as bad on the first device to discourage future
2408			 * reads.
2409			 */
2410			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2411			rdev = conf->mirrors[dn].rdev;
2412
2413			if (!rdev_set_badblocks(
2414				    rdev,
2415				    r10_bio->devs[r10_bio->read_slot].addr
2416				    + sect,
2417				    s, 0)) {
2418				md_error(mddev, rdev);
2419				r10_bio->devs[r10_bio->read_slot].bio
2420					= IO_BLOCKED;
2421			}
2422			break;
2423		}
2424
2425		start = sl;
2426		/* write it back and re-read */
2427		rcu_read_lock();
2428		while (sl != r10_bio->read_slot) {
2429			char b[BDEVNAME_SIZE];
2430
2431			if (sl==0)
2432				sl = conf->copies;
2433			sl--;
2434			d = r10_bio->devs[sl].devnum;
2435			rdev = rcu_dereference(conf->mirrors[d].rdev);
2436			if (!rdev ||
2437			    test_bit(Faulty, &rdev->flags) ||
2438			    !test_bit(In_sync, &rdev->flags))
2439				continue;
2440
2441			atomic_inc(&rdev->nr_pending);
2442			rcu_read_unlock();
2443			if (r10_sync_page_io(rdev,
2444					     r10_bio->devs[sl].addr +
2445					     sect,
2446					     s, conf->tmppage, WRITE)
2447			    == 0) {
2448				/* Well, this device is dead */
2449				pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
2450					  mdname(mddev), s,
2451					  (unsigned long long)(
2452						  sect +
2453						  choose_data_offset(r10_bio,
2454								     rdev)),
2455					  bdevname(rdev->bdev, b));
2456				pr_notice("md/raid10:%s: %s: failing drive\n",
2457					  mdname(mddev),
2458					  bdevname(rdev->bdev, b));
2459			}
2460			rdev_dec_pending(rdev, mddev);
2461			rcu_read_lock();
2462		}
2463		sl = start;
2464		while (sl != r10_bio->read_slot) {
2465			char b[BDEVNAME_SIZE];
2466
2467			if (sl==0)
2468				sl = conf->copies;
2469			sl--;
2470			d = r10_bio->devs[sl].devnum;
2471			rdev = rcu_dereference(conf->mirrors[d].rdev);
2472			if (!rdev ||
2473			    test_bit(Faulty, &rdev->flags) ||
2474			    !test_bit(In_sync, &rdev->flags))
2475				continue;
2476
2477			atomic_inc(&rdev->nr_pending);
2478			rcu_read_unlock();
2479			switch (r10_sync_page_io(rdev,
2480					     r10_bio->devs[sl].addr +
2481					     sect,
2482					     s, conf->tmppage,
2483						 READ)) {
2484			case 0:
2485				/* Well, this device is dead */
2486				pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
2487				       mdname(mddev), s,
2488				       (unsigned long long)(
2489					       sect +
2490					       choose_data_offset(r10_bio, rdev)),
2491				       bdevname(rdev->bdev, b));
2492				pr_notice("md/raid10:%s: %s: failing drive\n",
2493				       mdname(mddev),
2494				       bdevname(rdev->bdev, b));
2495				break;
2496			case 1:
2497				pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
2498				       mdname(mddev), s,
2499				       (unsigned long long)(
2500					       sect +
2501					       choose_data_offset(r10_bio, rdev)),
2502				       bdevname(rdev->bdev, b));
2503				atomic_add(s, &rdev->corrected_errors);
2504			}
2505
2506			rdev_dec_pending(rdev, mddev);
2507			rcu_read_lock();
2508		}
2509		rcu_read_unlock();
2510
2511		sectors -= s;
2512		sect += s;
2513	}
2514}
2515
2516static int narrow_write_error(struct r10bio *r10_bio, int i)
2517{
2518	struct bio *bio = r10_bio->master_bio;
2519	struct mddev *mddev = r10_bio->mddev;
2520	struct r10conf *conf = mddev->private;
2521	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2522	/* bio has the data to be written to slot 'i' where
2523	 * we just recently had a write error.
2524	 * We repeatedly clone the bio and trim down to one block,
2525	 * then try the write.  Where the write fails we record
2526	 * a bad block.
2527	 * It is conceivable that the bio doesn't exactly align with
2528	 * blocks.  We must handle this.
2529	 *
2530	 * We currently own a reference to the rdev.
2531	 */
2532
2533	int block_sectors;
2534	sector_t sector;
2535	int sectors;
2536	int sect_to_write = r10_bio->sectors;
2537	int ok = 1;
2538
2539	if (rdev->badblocks.shift < 0)
2540		return 0;
2541
2542	block_sectors = roundup(1 << rdev->badblocks.shift,
2543				bdev_logical_block_size(rdev->bdev) >> 9);
2544	sector = r10_bio->sector;
2545	sectors = ((r10_bio->sector + block_sectors)
2546		   & ~(sector_t)(block_sectors - 1))
2547		- sector;
2548
2549	while (sect_to_write) {
2550		struct bio *wbio;
2551		sector_t wsector;
2552		if (sectors > sect_to_write)
2553			sectors = sect_to_write;
2554		/* Write at 'sector' for 'sectors' */
2555		wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
 
2556		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2557		wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2558		wbio->bi_iter.bi_sector = wsector +
2559				   choose_data_offset(r10_bio, rdev);
2560		bio_set_dev(wbio, rdev->bdev);
2561		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2562
2563		if (submit_bio_wait(wbio) < 0)
2564			/* Failure! */
2565			ok = rdev_set_badblocks(rdev, wsector,
2566						sectors, 0)
2567				&& ok;
2568
2569		bio_put(wbio);
2570		sect_to_write -= sectors;
2571		sector += sectors;
2572		sectors = block_sectors;
2573	}
2574	return ok;
2575}
2576
2577static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2578{
2579	int slot = r10_bio->read_slot;
2580	struct bio *bio;
2581	struct r10conf *conf = mddev->private;
2582	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2583
2584	/* we got a read error. Maybe the drive is bad.  Maybe just
2585	 * the block and we can fix it.
2586	 * We freeze all other IO, and try reading the block from
2587	 * other devices.  When we find one, we re-write
2588	 * and check it that fixes the read error.
2589	 * This is all done synchronously while the array is
2590	 * frozen.
2591	 */
2592	bio = r10_bio->devs[slot].bio;
2593	bio_put(bio);
2594	r10_bio->devs[slot].bio = NULL;
2595
2596	if (mddev->ro)
2597		r10_bio->devs[slot].bio = IO_BLOCKED;
2598	else if (!test_bit(FailFast, &rdev->flags)) {
2599		freeze_array(conf, 1);
2600		fix_read_error(conf, mddev, r10_bio);
2601		unfreeze_array(conf);
2602	} else
2603		md_error(mddev, rdev);
2604
2605	rdev_dec_pending(rdev, mddev);
 
 
 
 
 
 
2606	allow_barrier(conf);
2607	r10_bio->state = 0;
2608	raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
2609}
2610
2611static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2612{
2613	/* Some sort of write request has finished and it
2614	 * succeeded in writing where we thought there was a
2615	 * bad block.  So forget the bad block.
2616	 * Or possibly if failed and we need to record
2617	 * a bad block.
2618	 */
2619	int m;
2620	struct md_rdev *rdev;
2621
2622	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2623	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2624		for (m = 0; m < conf->copies; m++) {
2625			int dev = r10_bio->devs[m].devnum;
2626			rdev = conf->mirrors[dev].rdev;
2627			if (r10_bio->devs[m].bio == NULL ||
2628				r10_bio->devs[m].bio->bi_end_io == NULL)
2629				continue;
2630			if (!r10_bio->devs[m].bio->bi_status) {
2631				rdev_clear_badblocks(
2632					rdev,
2633					r10_bio->devs[m].addr,
2634					r10_bio->sectors, 0);
2635			} else {
2636				if (!rdev_set_badblocks(
2637					    rdev,
2638					    r10_bio->devs[m].addr,
2639					    r10_bio->sectors, 0))
2640					md_error(conf->mddev, rdev);
2641			}
2642			rdev = conf->mirrors[dev].replacement;
2643			if (r10_bio->devs[m].repl_bio == NULL ||
2644				r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2645				continue;
2646
2647			if (!r10_bio->devs[m].repl_bio->bi_status) {
2648				rdev_clear_badblocks(
2649					rdev,
2650					r10_bio->devs[m].addr,
2651					r10_bio->sectors, 0);
2652			} else {
2653				if (!rdev_set_badblocks(
2654					    rdev,
2655					    r10_bio->devs[m].addr,
2656					    r10_bio->sectors, 0))
2657					md_error(conf->mddev, rdev);
2658			}
2659		}
2660		put_buf(r10_bio);
2661	} else {
2662		bool fail = false;
2663		for (m = 0; m < conf->copies; m++) {
2664			int dev = r10_bio->devs[m].devnum;
2665			struct bio *bio = r10_bio->devs[m].bio;
2666			rdev = conf->mirrors[dev].rdev;
2667			if (bio == IO_MADE_GOOD) {
2668				rdev_clear_badblocks(
2669					rdev,
2670					r10_bio->devs[m].addr,
2671					r10_bio->sectors, 0);
2672				rdev_dec_pending(rdev, conf->mddev);
2673			} else if (bio != NULL && bio->bi_status) {
2674				fail = true;
2675				if (!narrow_write_error(r10_bio, m)) {
2676					md_error(conf->mddev, rdev);
2677					set_bit(R10BIO_Degraded,
2678						&r10_bio->state);
2679				}
2680				rdev_dec_pending(rdev, conf->mddev);
2681			}
2682			bio = r10_bio->devs[m].repl_bio;
2683			rdev = conf->mirrors[dev].replacement;
2684			if (rdev && bio == IO_MADE_GOOD) {
2685				rdev_clear_badblocks(
2686					rdev,
2687					r10_bio->devs[m].addr,
2688					r10_bio->sectors, 0);
2689				rdev_dec_pending(rdev, conf->mddev);
2690			}
2691		}
2692		if (fail) {
2693			spin_lock_irq(&conf->device_lock);
2694			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2695			conf->nr_queued++;
2696			spin_unlock_irq(&conf->device_lock);
2697			/*
2698			 * In case freeze_array() is waiting for condition
2699			 * nr_pending == nr_queued + extra to be true.
2700			 */
2701			wake_up(&conf->wait_barrier);
2702			md_wakeup_thread(conf->mddev->thread);
2703		} else {
2704			if (test_bit(R10BIO_WriteError,
2705				     &r10_bio->state))
2706				close_write(r10_bio);
2707			raid_end_bio_io(r10_bio);
2708		}
2709	}
2710}
2711
2712static void raid10d(struct md_thread *thread)
2713{
2714	struct mddev *mddev = thread->mddev;
2715	struct r10bio *r10_bio;
2716	unsigned long flags;
2717	struct r10conf *conf = mddev->private;
2718	struct list_head *head = &conf->retry_list;
2719	struct blk_plug plug;
2720
2721	md_check_recovery(mddev);
2722
2723	if (!list_empty_careful(&conf->bio_end_io_list) &&
2724	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2725		LIST_HEAD(tmp);
2726		spin_lock_irqsave(&conf->device_lock, flags);
2727		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2728			while (!list_empty(&conf->bio_end_io_list)) {
2729				list_move(conf->bio_end_io_list.prev, &tmp);
2730				conf->nr_queued--;
2731			}
2732		}
2733		spin_unlock_irqrestore(&conf->device_lock, flags);
2734		while (!list_empty(&tmp)) {
2735			r10_bio = list_first_entry(&tmp, struct r10bio,
2736						   retry_list);
2737			list_del(&r10_bio->retry_list);
2738			if (mddev->degraded)
2739				set_bit(R10BIO_Degraded, &r10_bio->state);
2740
2741			if (test_bit(R10BIO_WriteError,
2742				     &r10_bio->state))
2743				close_write(r10_bio);
2744			raid_end_bio_io(r10_bio);
2745		}
2746	}
2747
2748	blk_start_plug(&plug);
2749	for (;;) {
2750
2751		flush_pending_writes(conf);
2752
2753		spin_lock_irqsave(&conf->device_lock, flags);
2754		if (list_empty(head)) {
2755			spin_unlock_irqrestore(&conf->device_lock, flags);
2756			break;
2757		}
2758		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2759		list_del(head->prev);
2760		conf->nr_queued--;
2761		spin_unlock_irqrestore(&conf->device_lock, flags);
2762
2763		mddev = r10_bio->mddev;
2764		conf = mddev->private;
2765		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2766		    test_bit(R10BIO_WriteError, &r10_bio->state))
2767			handle_write_completed(conf, r10_bio);
2768		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2769			reshape_request_write(mddev, r10_bio);
2770		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2771			sync_request_write(mddev, r10_bio);
2772		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2773			recovery_request_write(mddev, r10_bio);
2774		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2775			handle_read_error(mddev, r10_bio);
2776		else
2777			WARN_ON_ONCE(1);
2778
2779		cond_resched();
2780		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2781			md_check_recovery(mddev);
2782	}
2783	blk_finish_plug(&plug);
2784}
2785
2786static int init_resync(struct r10conf *conf)
2787{
2788	int ret, buffs, i;
2789
2790	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2791	BUG_ON(mempool_initialized(&conf->r10buf_pool));
2792	conf->have_replacement = 0;
2793	for (i = 0; i < conf->geo.raid_disks; i++)
2794		if (conf->mirrors[i].replacement)
2795			conf->have_replacement = 1;
2796	ret = mempool_init(&conf->r10buf_pool, buffs,
2797			   r10buf_pool_alloc, r10buf_pool_free, conf);
2798	if (ret)
2799		return ret;
2800	conf->next_resync = 0;
2801	return 0;
2802}
2803
2804static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
2805{
2806	struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
2807	struct rsync_pages *rp;
2808	struct bio *bio;
2809	int nalloc;
2810	int i;
2811
2812	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
2813	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
2814		nalloc = conf->copies; /* resync */
2815	else
2816		nalloc = 2; /* recovery */
2817
2818	for (i = 0; i < nalloc; i++) {
2819		bio = r10bio->devs[i].bio;
2820		rp = bio->bi_private;
2821		bio_reset(bio);
2822		bio->bi_private = rp;
2823		bio = r10bio->devs[i].repl_bio;
2824		if (bio) {
2825			rp = bio->bi_private;
2826			bio_reset(bio);
2827			bio->bi_private = rp;
2828		}
2829	}
2830	return r10bio;
2831}
2832
2833/*
2834 * Set cluster_sync_high since we need other nodes to add the
2835 * range [cluster_sync_low, cluster_sync_high] to suspend list.
2836 */
2837static void raid10_set_cluster_sync_high(struct r10conf *conf)
2838{
2839	sector_t window_size;
2840	int extra_chunk, chunks;
2841
2842	/*
2843	 * First, here we define "stripe" as a unit which across
2844	 * all member devices one time, so we get chunks by use
2845	 * raid_disks / near_copies. Otherwise, if near_copies is
2846	 * close to raid_disks, then resync window could increases
2847	 * linearly with the increase of raid_disks, which means
2848	 * we will suspend a really large IO window while it is not
2849	 * necessary. If raid_disks is not divisible by near_copies,
2850	 * an extra chunk is needed to ensure the whole "stripe" is
2851	 * covered.
2852	 */
2853
2854	chunks = conf->geo.raid_disks / conf->geo.near_copies;
2855	if (conf->geo.raid_disks % conf->geo.near_copies == 0)
2856		extra_chunk = 0;
2857	else
2858		extra_chunk = 1;
2859	window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
2860
2861	/*
2862	 * At least use a 32M window to align with raid1's resync window
2863	 */
2864	window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
2865			CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
2866
2867	conf->cluster_sync_high = conf->cluster_sync_low + window_size;
2868}
2869
2870/*
2871 * perform a "sync" on one "block"
2872 *
2873 * We need to make sure that no normal I/O request - particularly write
2874 * requests - conflict with active sync requests.
2875 *
2876 * This is achieved by tracking pending requests and a 'barrier' concept
2877 * that can be installed to exclude normal IO requests.
2878 *
2879 * Resync and recovery are handled very differently.
2880 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2881 *
2882 * For resync, we iterate over virtual addresses, read all copies,
2883 * and update if there are differences.  If only one copy is live,
2884 * skip it.
2885 * For recovery, we iterate over physical addresses, read a good
2886 * value for each non-in_sync drive, and over-write.
2887 *
2888 * So, for recovery we may have several outstanding complex requests for a
2889 * given address, one for each out-of-sync device.  We model this by allocating
2890 * a number of r10_bio structures, one for each out-of-sync device.
2891 * As we setup these structures, we collect all bio's together into a list
2892 * which we then process collectively to add pages, and then process again
2893 * to pass to generic_make_request.
2894 *
2895 * The r10_bio structures are linked using a borrowed master_bio pointer.
2896 * This link is counted in ->remaining.  When the r10_bio that points to NULL
2897 * has its remaining count decremented to 0, the whole complex operation
2898 * is complete.
2899 *
2900 */
2901
2902static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2903			     int *skipped)
2904{
2905	struct r10conf *conf = mddev->private;
2906	struct r10bio *r10_bio;
2907	struct bio *biolist = NULL, *bio;
2908	sector_t max_sector, nr_sectors;
2909	int i;
2910	int max_sync;
2911	sector_t sync_blocks;
2912	sector_t sectors_skipped = 0;
2913	int chunks_skipped = 0;
2914	sector_t chunk_mask = conf->geo.chunk_mask;
2915	int page_idx = 0;
2916
2917	if (!mempool_initialized(&conf->r10buf_pool))
2918		if (init_resync(conf))
2919			return 0;
2920
2921	/*
2922	 * Allow skipping a full rebuild for incremental assembly
2923	 * of a clean array, like RAID1 does.
2924	 */
2925	if (mddev->bitmap == NULL &&
2926	    mddev->recovery_cp == MaxSector &&
2927	    mddev->reshape_position == MaxSector &&
2928	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2929	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2930	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2931	    conf->fullsync == 0) {
2932		*skipped = 1;
2933		return mddev->dev_sectors - sector_nr;
2934	}
2935
 
 
 
 
2936 skipped:
2937	max_sector = mddev->dev_sectors;
2938	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2939	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2940		max_sector = mddev->resync_max_sectors;
2941	if (sector_nr >= max_sector) {
2942		conf->cluster_sync_low = 0;
2943		conf->cluster_sync_high = 0;
2944
2945		/* If we aborted, we need to abort the
2946		 * sync on the 'current' bitmap chucks (there can
2947		 * be several when recovering multiple devices).
2948		 * as we may have started syncing it but not finished.
2949		 * We can find the current address in
2950		 * mddev->curr_resync, but for recovery,
2951		 * we need to convert that to several
2952		 * virtual addresses.
2953		 */
2954		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2955			end_reshape(conf);
2956			close_sync(conf);
2957			return 0;
2958		}
2959
2960		if (mddev->curr_resync < max_sector) { /* aborted */
2961			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2962				md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2963						   &sync_blocks, 1);
2964			else for (i = 0; i < conf->geo.raid_disks; i++) {
2965				sector_t sect =
2966					raid10_find_virt(conf, mddev->curr_resync, i);
2967				md_bitmap_end_sync(mddev->bitmap, sect,
2968						   &sync_blocks, 1);
2969			}
2970		} else {
2971			/* completed sync */
2972			if ((!mddev->bitmap || conf->fullsync)
2973			    && conf->have_replacement
2974			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2975				/* Completed a full sync so the replacements
2976				 * are now fully recovered.
2977				 */
2978				rcu_read_lock();
2979				for (i = 0; i < conf->geo.raid_disks; i++) {
2980					struct md_rdev *rdev =
2981						rcu_dereference(conf->mirrors[i].replacement);
 
2982					if (rdev)
2983						rdev->recovery_offset = MaxSector;
2984				}
2985				rcu_read_unlock();
2986			}
2987			conf->fullsync = 0;
2988		}
2989		md_bitmap_close_sync(mddev->bitmap);
2990		close_sync(conf);
2991		*skipped = 1;
2992		return sectors_skipped;
2993	}
2994
2995	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2996		return reshape_request(mddev, sector_nr, skipped);
2997
2998	if (chunks_skipped >= conf->geo.raid_disks) {
2999		/* if there has been nothing to do on any drive,
3000		 * then there is nothing to do at all..
 
 
 
 
 
 
 
 
 
 
 
 
 
3001		 */
3002		*skipped = 1;
3003		return (max_sector - sector_nr) + sectors_skipped;
3004	}
3005
3006	if (max_sector > mddev->resync_max)
3007		max_sector = mddev->resync_max; /* Don't do IO beyond here */
3008
3009	/* make sure whole request will fit in a chunk - if chunks
3010	 * are meaningful
3011	 */
3012	if (conf->geo.near_copies < conf->geo.raid_disks &&
3013	    max_sector > (sector_nr | chunk_mask))
3014		max_sector = (sector_nr | chunk_mask) + 1;
3015
3016	/*
3017	 * If there is non-resync activity waiting for a turn, then let it
3018	 * though before starting on this new sync request.
3019	 */
3020	if (conf->nr_waiting)
3021		schedule_timeout_uninterruptible(1);
3022
3023	/* Again, very different code for resync and recovery.
3024	 * Both must result in an r10bio with a list of bios that
3025	 * have bi_end_io, bi_sector, bi_disk set,
3026	 * and bi_private set to the r10bio.
3027	 * For recovery, we may actually create several r10bios
3028	 * with 2 bios in each, that correspond to the bios in the main one.
3029	 * In this case, the subordinate r10bios link back through a
3030	 * borrowed master_bio pointer, and the counter in the master
3031	 * includes a ref from each subordinate.
3032	 */
3033	/* First, we decide what to do and set ->bi_end_io
3034	 * To end_sync_read if we want to read, and
3035	 * end_sync_write if we will want to write.
3036	 */
3037
3038	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3039	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3040		/* recovery... the complicated one */
3041		int j;
3042		r10_bio = NULL;
3043
3044		for (i = 0 ; i < conf->geo.raid_disks; i++) {
3045			int still_degraded;
3046			struct r10bio *rb2;
3047			sector_t sect;
3048			int must_sync;
3049			int any_working;
3050			int need_recover = 0;
3051			int need_replace = 0;
3052			struct raid10_info *mirror = &conf->mirrors[i];
3053			struct md_rdev *mrdev, *mreplace;
3054
3055			rcu_read_lock();
3056			mrdev = rcu_dereference(mirror->rdev);
3057			mreplace = rcu_dereference(mirror->replacement);
3058
3059			if (mrdev != NULL &&
3060			    !test_bit(Faulty, &mrdev->flags) &&
3061			    !test_bit(In_sync, &mrdev->flags))
3062				need_recover = 1;
3063			if (mreplace != NULL &&
3064			    !test_bit(Faulty, &mreplace->flags))
3065				need_replace = 1;
3066
3067			if (!need_recover && !need_replace) {
3068				rcu_read_unlock();
 
 
 
 
 
3069				continue;
3070			}
3071
3072			still_degraded = 0;
3073			/* want to reconstruct this device */
3074			rb2 = r10_bio;
3075			sect = raid10_find_virt(conf, sector_nr, i);
3076			if (sect >= mddev->resync_max_sectors) {
3077				/* last stripe is not complete - don't
3078				 * try to recover this sector.
3079				 */
3080				rcu_read_unlock();
3081				continue;
3082			}
3083			if (mreplace && test_bit(Faulty, &mreplace->flags))
3084				mreplace = NULL;
3085			/* Unless we are doing a full sync, or a replacement
3086			 * we only need to recover the block if it is set in
3087			 * the bitmap
3088			 */
3089			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3090							 &sync_blocks, 1);
3091			if (sync_blocks < max_sync)
3092				max_sync = sync_blocks;
3093			if (!must_sync &&
3094			    mreplace == NULL &&
3095			    !conf->fullsync) {
3096				/* yep, skip the sync_blocks here, but don't assume
3097				 * that there will never be anything to do here
3098				 */
3099				chunks_skipped = -1;
3100				rcu_read_unlock();
3101				continue;
3102			}
3103			atomic_inc(&mrdev->nr_pending);
 
3104			if (mreplace)
3105				atomic_inc(&mreplace->nr_pending);
3106			rcu_read_unlock();
3107
3108			r10_bio = raid10_alloc_init_r10buf(conf);
3109			r10_bio->state = 0;
3110			raise_barrier(conf, rb2 != NULL);
3111			atomic_set(&r10_bio->remaining, 0);
3112
3113			r10_bio->master_bio = (struct bio*)rb2;
3114			if (rb2)
3115				atomic_inc(&rb2->remaining);
3116			r10_bio->mddev = mddev;
3117			set_bit(R10BIO_IsRecover, &r10_bio->state);
3118			r10_bio->sector = sect;
3119
3120			raid10_find_phys(conf, r10_bio);
3121
3122			/* Need to check if the array will still be
3123			 * degraded
3124			 */
3125			rcu_read_lock();
3126			for (j = 0; j < conf->geo.raid_disks; j++) {
3127				struct md_rdev *rdev = rcu_dereference(
3128					conf->mirrors[j].rdev);
3129				if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3130					still_degraded = 1;
3131					break;
3132				}
3133			}
3134
3135			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3136							 &sync_blocks, still_degraded);
3137
3138			any_working = 0;
3139			for (j=0; j<conf->copies;j++) {
3140				int k;
3141				int d = r10_bio->devs[j].devnum;
3142				sector_t from_addr, to_addr;
3143				struct md_rdev *rdev =
3144					rcu_dereference(conf->mirrors[d].rdev);
3145				sector_t sector, first_bad;
3146				int bad_sectors;
3147				if (!rdev ||
3148				    !test_bit(In_sync, &rdev->flags))
3149					continue;
3150				/* This is where we read from */
3151				any_working = 1;
3152				sector = r10_bio->devs[j].addr;
3153
3154				if (is_badblock(rdev, sector, max_sync,
3155						&first_bad, &bad_sectors)) {
3156					if (first_bad > sector)
3157						max_sync = first_bad - sector;
3158					else {
3159						bad_sectors -= (sector
3160								- first_bad);
3161						if (max_sync > bad_sectors)
3162							max_sync = bad_sectors;
3163						continue;
3164					}
3165				}
3166				bio = r10_bio->devs[0].bio;
3167				bio->bi_next = biolist;
3168				biolist = bio;
3169				bio->bi_end_io = end_sync_read;
3170				bio_set_op_attrs(bio, REQ_OP_READ, 0);
3171				if (test_bit(FailFast, &rdev->flags))
3172					bio->bi_opf |= MD_FAILFAST;
3173				from_addr = r10_bio->devs[j].addr;
3174				bio->bi_iter.bi_sector = from_addr +
3175					rdev->data_offset;
3176				bio_set_dev(bio, rdev->bdev);
3177				atomic_inc(&rdev->nr_pending);
3178				/* and we write to 'i' (if not in_sync) */
3179
3180				for (k=0; k<conf->copies; k++)
3181					if (r10_bio->devs[k].devnum == i)
3182						break;
3183				BUG_ON(k == conf->copies);
3184				to_addr = r10_bio->devs[k].addr;
3185				r10_bio->devs[0].devnum = d;
3186				r10_bio->devs[0].addr = from_addr;
3187				r10_bio->devs[1].devnum = i;
3188				r10_bio->devs[1].addr = to_addr;
3189
3190				if (need_recover) {
3191					bio = r10_bio->devs[1].bio;
3192					bio->bi_next = biolist;
3193					biolist = bio;
3194					bio->bi_end_io = end_sync_write;
3195					bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3196					bio->bi_iter.bi_sector = to_addr
3197						+ mrdev->data_offset;
3198					bio_set_dev(bio, mrdev->bdev);
3199					atomic_inc(&r10_bio->remaining);
3200				} else
3201					r10_bio->devs[1].bio->bi_end_io = NULL;
3202
3203				/* and maybe write to replacement */
3204				bio = r10_bio->devs[1].repl_bio;
3205				if (bio)
3206					bio->bi_end_io = NULL;
3207				/* Note: if need_replace, then bio
3208				 * cannot be NULL as r10buf_pool_alloc will
3209				 * have allocated it.
3210				 */
3211				if (!need_replace)
3212					break;
3213				bio->bi_next = biolist;
3214				biolist = bio;
3215				bio->bi_end_io = end_sync_write;
3216				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3217				bio->bi_iter.bi_sector = to_addr +
3218					mreplace->data_offset;
3219				bio_set_dev(bio, mreplace->bdev);
3220				atomic_inc(&r10_bio->remaining);
3221				break;
3222			}
3223			rcu_read_unlock();
3224			if (j == conf->copies) {
3225				/* Cannot recover, so abort the recovery or
3226				 * record a bad block */
3227				if (any_working) {
3228					/* problem is that there are bad blocks
3229					 * on other device(s)
3230					 */
3231					int k;
3232					for (k = 0; k < conf->copies; k++)
3233						if (r10_bio->devs[k].devnum == i)
3234							break;
3235					if (!test_bit(In_sync,
3236						      &mrdev->flags)
3237					    && !rdev_set_badblocks(
3238						    mrdev,
3239						    r10_bio->devs[k].addr,
3240						    max_sync, 0))
3241						any_working = 0;
3242					if (mreplace &&
3243					    !rdev_set_badblocks(
3244						    mreplace,
3245						    r10_bio->devs[k].addr,
3246						    max_sync, 0))
3247						any_working = 0;
3248				}
3249				if (!any_working)  {
3250					if (!test_and_set_bit(MD_RECOVERY_INTR,
3251							      &mddev->recovery))
3252						pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
3253						       mdname(mddev));
3254					mirror->recovery_disabled
3255						= mddev->recovery_disabled;
 
 
3256				}
3257				put_buf(r10_bio);
3258				if (rb2)
3259					atomic_dec(&rb2->remaining);
3260				r10_bio = rb2;
3261				rdev_dec_pending(mrdev, mddev);
 
3262				if (mreplace)
3263					rdev_dec_pending(mreplace, mddev);
3264				break;
3265			}
3266			rdev_dec_pending(mrdev, mddev);
 
3267			if (mreplace)
3268				rdev_dec_pending(mreplace, mddev);
3269			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3270				/* Only want this if there is elsewhere to
3271				 * read from. 'j' is currently the first
3272				 * readable copy.
3273				 */
3274				int targets = 1;
3275				for (; j < conf->copies; j++) {
3276					int d = r10_bio->devs[j].devnum;
3277					if (conf->mirrors[d].rdev &&
3278					    test_bit(In_sync,
3279						      &conf->mirrors[d].rdev->flags))
3280						targets++;
3281				}
3282				if (targets == 1)
3283					r10_bio->devs[0].bio->bi_opf
3284						&= ~MD_FAILFAST;
3285			}
3286		}
3287		if (biolist == NULL) {
3288			while (r10_bio) {
3289				struct r10bio *rb2 = r10_bio;
3290				r10_bio = (struct r10bio*) rb2->master_bio;
3291				rb2->master_bio = NULL;
3292				put_buf(rb2);
3293			}
3294			goto giveup;
3295		}
3296	} else {
3297		/* resync. Schedule a read for every block at this virt offset */
3298		int count = 0;
3299
3300		/*
3301		 * Since curr_resync_completed could probably not update in
3302		 * time, and we will set cluster_sync_low based on it.
3303		 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3304		 * safety reason, which ensures curr_resync_completed is
3305		 * updated in bitmap_cond_end_sync.
3306		 */
3307		md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3308					mddev_is_clustered(mddev) &&
3309					(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3310
3311		if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3312					  &sync_blocks, mddev->degraded) &&
3313		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3314						 &mddev->recovery)) {
3315			/* We can skip this block */
3316			*skipped = 1;
3317			return sync_blocks + sectors_skipped;
3318		}
3319		if (sync_blocks < max_sync)
3320			max_sync = sync_blocks;
3321		r10_bio = raid10_alloc_init_r10buf(conf);
3322		r10_bio->state = 0;
3323
3324		r10_bio->mddev = mddev;
3325		atomic_set(&r10_bio->remaining, 0);
3326		raise_barrier(conf, 0);
3327		conf->next_resync = sector_nr;
3328
3329		r10_bio->master_bio = NULL;
3330		r10_bio->sector = sector_nr;
3331		set_bit(R10BIO_IsSync, &r10_bio->state);
3332		raid10_find_phys(conf, r10_bio);
3333		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3334
3335		for (i = 0; i < conf->copies; i++) {
3336			int d = r10_bio->devs[i].devnum;
3337			sector_t first_bad, sector;
3338			int bad_sectors;
3339			struct md_rdev *rdev;
3340
3341			if (r10_bio->devs[i].repl_bio)
3342				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3343
3344			bio = r10_bio->devs[i].bio;
3345			bio->bi_status = BLK_STS_IOERR;
3346			rcu_read_lock();
3347			rdev = rcu_dereference(conf->mirrors[d].rdev);
3348			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3349				rcu_read_unlock();
3350				continue;
3351			}
3352			sector = r10_bio->devs[i].addr;
3353			if (is_badblock(rdev, sector, max_sync,
3354					&first_bad, &bad_sectors)) {
3355				if (first_bad > sector)
3356					max_sync = first_bad - sector;
3357				else {
3358					bad_sectors -= (sector - first_bad);
3359					if (max_sync > bad_sectors)
3360						max_sync = bad_sectors;
3361					rcu_read_unlock();
3362					continue;
3363				}
3364			}
3365			atomic_inc(&rdev->nr_pending);
3366			atomic_inc(&r10_bio->remaining);
3367			bio->bi_next = biolist;
3368			biolist = bio;
3369			bio->bi_end_io = end_sync_read;
3370			bio_set_op_attrs(bio, REQ_OP_READ, 0);
3371			if (test_bit(FailFast, &rdev->flags))
3372				bio->bi_opf |= MD_FAILFAST;
3373			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3374			bio_set_dev(bio, rdev->bdev);
3375			count++;
3376
3377			rdev = rcu_dereference(conf->mirrors[d].replacement);
3378			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3379				rcu_read_unlock();
3380				continue;
3381			}
3382			atomic_inc(&rdev->nr_pending);
3383
3384			/* Need to set up for writing to the replacement */
3385			bio = r10_bio->devs[i].repl_bio;
3386			bio->bi_status = BLK_STS_IOERR;
3387
3388			sector = r10_bio->devs[i].addr;
3389			bio->bi_next = biolist;
3390			biolist = bio;
3391			bio->bi_end_io = end_sync_write;
3392			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3393			if (test_bit(FailFast, &rdev->flags))
3394				bio->bi_opf |= MD_FAILFAST;
3395			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3396			bio_set_dev(bio, rdev->bdev);
3397			count++;
3398			rcu_read_unlock();
3399		}
3400
3401		if (count < 2) {
3402			for (i=0; i<conf->copies; i++) {
3403				int d = r10_bio->devs[i].devnum;
3404				if (r10_bio->devs[i].bio->bi_end_io)
3405					rdev_dec_pending(conf->mirrors[d].rdev,
3406							 mddev);
3407				if (r10_bio->devs[i].repl_bio &&
3408				    r10_bio->devs[i].repl_bio->bi_end_io)
3409					rdev_dec_pending(
3410						conf->mirrors[d].replacement,
3411						mddev);
3412			}
3413			put_buf(r10_bio);
3414			biolist = NULL;
3415			goto giveup;
3416		}
3417	}
3418
3419	nr_sectors = 0;
3420	if (sector_nr + max_sync < max_sector)
3421		max_sector = sector_nr + max_sync;
3422	do {
3423		struct page *page;
3424		int len = PAGE_SIZE;
3425		if (sector_nr + (len>>9) > max_sector)
3426			len = (max_sector - sector_nr) << 9;
3427		if (len == 0)
3428			break;
3429		for (bio= biolist ; bio ; bio=bio->bi_next) {
3430			struct resync_pages *rp = get_resync_pages(bio);
3431			page = resync_fetch_page(rp, page_idx);
3432			/*
3433			 * won't fail because the vec table is big enough
3434			 * to hold all these pages
3435			 */
3436			bio_add_page(bio, page, len, 0);
3437		}
3438		nr_sectors += len>>9;
3439		sector_nr += len>>9;
3440	} while (++page_idx < RESYNC_PAGES);
3441	r10_bio->sectors = nr_sectors;
3442
3443	if (mddev_is_clustered(mddev) &&
3444	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3445		/* It is resync not recovery */
3446		if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3447			conf->cluster_sync_low = mddev->curr_resync_completed;
3448			raid10_set_cluster_sync_high(conf);
3449			/* Send resync message */
3450			md_cluster_ops->resync_info_update(mddev,
3451						conf->cluster_sync_low,
3452						conf->cluster_sync_high);
3453		}
3454	} else if (mddev_is_clustered(mddev)) {
3455		/* This is recovery not resync */
3456		sector_t sect_va1, sect_va2;
3457		bool broadcast_msg = false;
3458
3459		for (i = 0; i < conf->geo.raid_disks; i++) {
3460			/*
3461			 * sector_nr is a device address for recovery, so we
3462			 * need translate it to array address before compare
3463			 * with cluster_sync_high.
3464			 */
3465			sect_va1 = raid10_find_virt(conf, sector_nr, i);
3466
3467			if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3468				broadcast_msg = true;
3469				/*
3470				 * curr_resync_completed is similar as
3471				 * sector_nr, so make the translation too.
3472				 */
3473				sect_va2 = raid10_find_virt(conf,
3474					mddev->curr_resync_completed, i);
3475
3476				if (conf->cluster_sync_low == 0 ||
3477				    conf->cluster_sync_low > sect_va2)
3478					conf->cluster_sync_low = sect_va2;
3479			}
3480		}
3481		if (broadcast_msg) {
3482			raid10_set_cluster_sync_high(conf);
3483			md_cluster_ops->resync_info_update(mddev,
3484						conf->cluster_sync_low,
3485						conf->cluster_sync_high);
3486		}
3487	}
3488
3489	while (biolist) {
3490		bio = biolist;
3491		biolist = biolist->bi_next;
3492
3493		bio->bi_next = NULL;
3494		r10_bio = get_resync_r10bio(bio);
3495		r10_bio->sectors = nr_sectors;
3496
3497		if (bio->bi_end_io == end_sync_read) {
3498			md_sync_acct_bio(bio, nr_sectors);
3499			bio->bi_status = 0;
3500			generic_make_request(bio);
3501		}
3502	}
3503
3504	if (sectors_skipped)
3505		/* pretend they weren't skipped, it makes
3506		 * no important difference in this case
3507		 */
3508		md_done_sync(mddev, sectors_skipped, 1);
3509
3510	return sectors_skipped + nr_sectors;
3511 giveup:
3512	/* There is nowhere to write, so all non-sync
3513	 * drives must be failed or in resync, all drives
3514	 * have a bad block, so try the next chunk...
3515	 */
3516	if (sector_nr + max_sync < max_sector)
3517		max_sector = sector_nr + max_sync;
3518
3519	sectors_skipped += (max_sector - sector_nr);
3520	chunks_skipped ++;
3521	sector_nr = max_sector;
3522	goto skipped;
3523}
3524
3525static sector_t
3526raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3527{
3528	sector_t size;
3529	struct r10conf *conf = mddev->private;
3530
3531	if (!raid_disks)
3532		raid_disks = min(conf->geo.raid_disks,
3533				 conf->prev.raid_disks);
3534	if (!sectors)
3535		sectors = conf->dev_sectors;
3536
3537	size = sectors >> conf->geo.chunk_shift;
3538	sector_div(size, conf->geo.far_copies);
3539	size = size * raid_disks;
3540	sector_div(size, conf->geo.near_copies);
3541
3542	return size << conf->geo.chunk_shift;
3543}
3544
3545static void calc_sectors(struct r10conf *conf, sector_t size)
3546{
3547	/* Calculate the number of sectors-per-device that will
3548	 * actually be used, and set conf->dev_sectors and
3549	 * conf->stride
3550	 */
3551
3552	size = size >> conf->geo.chunk_shift;
3553	sector_div(size, conf->geo.far_copies);
3554	size = size * conf->geo.raid_disks;
3555	sector_div(size, conf->geo.near_copies);
3556	/* 'size' is now the number of chunks in the array */
3557	/* calculate "used chunks per device" */
3558	size = size * conf->copies;
3559
3560	/* We need to round up when dividing by raid_disks to
3561	 * get the stride size.
3562	 */
3563	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3564
3565	conf->dev_sectors = size << conf->geo.chunk_shift;
3566
3567	if (conf->geo.far_offset)
3568		conf->geo.stride = 1 << conf->geo.chunk_shift;
3569	else {
3570		sector_div(size, conf->geo.far_copies);
3571		conf->geo.stride = size << conf->geo.chunk_shift;
3572	}
3573}
3574
3575enum geo_type {geo_new, geo_old, geo_start};
3576static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3577{
3578	int nc, fc, fo;
3579	int layout, chunk, disks;
3580	switch (new) {
3581	case geo_old:
3582		layout = mddev->layout;
3583		chunk = mddev->chunk_sectors;
3584		disks = mddev->raid_disks - mddev->delta_disks;
3585		break;
3586	case geo_new:
3587		layout = mddev->new_layout;
3588		chunk = mddev->new_chunk_sectors;
3589		disks = mddev->raid_disks;
3590		break;
3591	default: /* avoid 'may be unused' warnings */
3592	case geo_start: /* new when starting reshape - raid_disks not
3593			 * updated yet. */
3594		layout = mddev->new_layout;
3595		chunk = mddev->new_chunk_sectors;
3596		disks = mddev->raid_disks + mddev->delta_disks;
3597		break;
3598	}
3599	if (layout >> 19)
3600		return -1;
3601	if (chunk < (PAGE_SIZE >> 9) ||
3602	    !is_power_of_2(chunk))
3603		return -2;
3604	nc = layout & 255;
3605	fc = (layout >> 8) & 255;
3606	fo = layout & (1<<16);
3607	geo->raid_disks = disks;
3608	geo->near_copies = nc;
3609	geo->far_copies = fc;
3610	geo->far_offset = fo;
3611	switch (layout >> 17) {
3612	case 0:	/* original layout.  simple but not always optimal */
3613		geo->far_set_size = disks;
3614		break;
3615	case 1: /* "improved" layout which was buggy.  Hopefully no-one is
3616		 * actually using this, but leave code here just in case.*/
3617		geo->far_set_size = disks/fc;
3618		WARN(geo->far_set_size < fc,
3619		     "This RAID10 layout does not provide data safety - please backup and create new array\n");
3620		break;
3621	case 2: /* "improved" layout fixed to match documentation */
3622		geo->far_set_size = fc * nc;
3623		break;
3624	default: /* Not a valid layout */
3625		return -1;
3626	}
3627	geo->chunk_mask = chunk - 1;
3628	geo->chunk_shift = ffz(~chunk);
3629	return nc*fc;
3630}
3631
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3632static struct r10conf *setup_conf(struct mddev *mddev)
3633{
3634	struct r10conf *conf = NULL;
3635	int err = -EINVAL;
3636	struct geom geo;
3637	int copies;
3638
3639	copies = setup_geo(&geo, mddev, geo_new);
3640
3641	if (copies == -2) {
3642		pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3643			mdname(mddev), PAGE_SIZE);
3644		goto out;
3645	}
3646
3647	if (copies < 2 || copies > mddev->raid_disks) {
3648		pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3649			mdname(mddev), mddev->new_layout);
3650		goto out;
3651	}
3652
3653	err = -ENOMEM;
3654	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3655	if (!conf)
3656		goto out;
3657
3658	/* FIXME calc properly */
3659	conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
3660				sizeof(struct raid10_info),
3661				GFP_KERNEL);
3662	if (!conf->mirrors)
3663		goto out;
3664
3665	conf->tmppage = alloc_page(GFP_KERNEL);
3666	if (!conf->tmppage)
3667		goto out;
3668
3669	conf->geo = geo;
3670	conf->copies = copies;
3671	err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
3672			   rbio_pool_free, conf);
3673	if (err)
3674		goto out;
3675
3676	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3677	if (err)
3678		goto out;
3679
3680	calc_sectors(conf, mddev->dev_sectors);
3681	if (mddev->reshape_position == MaxSector) {
3682		conf->prev = conf->geo;
3683		conf->reshape_progress = MaxSector;
3684	} else {
3685		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3686			err = -EINVAL;
3687			goto out;
3688		}
3689		conf->reshape_progress = mddev->reshape_position;
3690		if (conf->prev.far_offset)
3691			conf->prev.stride = 1 << conf->prev.chunk_shift;
3692		else
3693			/* far_copies must be 1 */
3694			conf->prev.stride = conf->dev_sectors;
3695	}
3696	conf->reshape_safe = conf->reshape_progress;
3697	spin_lock_init(&conf->device_lock);
3698	INIT_LIST_HEAD(&conf->retry_list);
3699	INIT_LIST_HEAD(&conf->bio_end_io_list);
3700
3701	spin_lock_init(&conf->resync_lock);
3702	init_waitqueue_head(&conf->wait_barrier);
3703	atomic_set(&conf->nr_pending, 0);
3704
3705	err = -ENOMEM;
3706	conf->thread = md_register_thread(raid10d, mddev, "raid10");
 
3707	if (!conf->thread)
3708		goto out;
3709
3710	conf->mddev = mddev;
3711	return conf;
3712
3713 out:
3714	if (conf) {
3715		mempool_exit(&conf->r10bio_pool);
3716		kfree(conf->mirrors);
3717		safe_put_page(conf->tmppage);
3718		bioset_exit(&conf->bio_split);
3719		kfree(conf);
3720	}
3721	return ERR_PTR(err);
3722}
3723
 
 
 
 
 
 
 
 
 
 
3724static int raid10_run(struct mddev *mddev)
3725{
3726	struct r10conf *conf;
3727	int i, disk_idx, chunk_size;
3728	struct raid10_info *disk;
3729	struct md_rdev *rdev;
3730	sector_t size;
3731	sector_t min_offset_diff = 0;
3732	int first = 1;
3733	bool discard_supported = false;
3734
3735	if (mddev_init_writes_pending(mddev) < 0)
3736		return -ENOMEM;
3737
3738	if (mddev->private == NULL) {
3739		conf = setup_conf(mddev);
3740		if (IS_ERR(conf))
3741			return PTR_ERR(conf);
3742		mddev->private = conf;
3743	}
3744	conf = mddev->private;
3745	if (!conf)
3746		goto out;
3747
 
 
 
3748	if (mddev_is_clustered(conf->mddev)) {
3749		int fc, fo;
3750
3751		fc = (mddev->layout >> 8) & 255;
3752		fo = mddev->layout & (1<<16);
3753		if (fc > 1 || fo > 0) {
3754			pr_err("only near layout is supported by clustered"
3755				" raid10\n");
3756			goto out_free_conf;
3757		}
3758	}
3759
3760	mddev->thread = conf->thread;
3761	conf->thread = NULL;
3762
3763	chunk_size = mddev->chunk_sectors << 9;
3764	if (mddev->queue) {
3765		blk_queue_max_discard_sectors(mddev->queue,
3766					      mddev->chunk_sectors);
3767		blk_queue_max_write_same_sectors(mddev->queue, 0);
3768		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3769		blk_queue_io_min(mddev->queue, chunk_size);
3770		if (conf->geo.raid_disks % conf->geo.near_copies)
3771			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3772		else
3773			blk_queue_io_opt(mddev->queue, chunk_size *
3774					 (conf->geo.raid_disks / conf->geo.near_copies));
3775	}
3776
3777	rdev_for_each(rdev, mddev) {
3778		long long diff;
3779
3780		disk_idx = rdev->raid_disk;
3781		if (disk_idx < 0)
3782			continue;
3783		if (disk_idx >= conf->geo.raid_disks &&
3784		    disk_idx >= conf->prev.raid_disks)
3785			continue;
3786		disk = conf->mirrors + disk_idx;
3787
3788		if (test_bit(Replacement, &rdev->flags)) {
3789			if (disk->replacement)
3790				goto out_free_conf;
3791			disk->replacement = rdev;
3792		} else {
3793			if (disk->rdev)
3794				goto out_free_conf;
3795			disk->rdev = rdev;
3796		}
3797		diff = (rdev->new_data_offset - rdev->data_offset);
3798		if (!mddev->reshape_backwards)
3799			diff = -diff;
3800		if (diff < 0)
3801			diff = 0;
3802		if (first || diff < min_offset_diff)
3803			min_offset_diff = diff;
3804
3805		if (mddev->gendisk)
3806			disk_stack_limits(mddev->gendisk, rdev->bdev,
3807					  rdev->data_offset << 9);
3808
3809		disk->head_position = 0;
3810
3811		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3812			discard_supported = true;
3813		first = 0;
3814	}
3815
3816	if (mddev->queue) {
3817		if (discard_supported)
3818			blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3819						mddev->queue);
3820		else
3821			blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3822						  mddev->queue);
3823	}
3824	/* need to check that every block has at least one working mirror */
3825	if (!enough(conf, -1)) {
3826		pr_err("md/raid10:%s: not enough operational mirrors.\n",
3827		       mdname(mddev));
3828		goto out_free_conf;
3829	}
3830
3831	if (conf->reshape_progress != MaxSector) {
3832		/* must ensure that shape change is supported */
3833		if (conf->geo.far_copies != 1 &&
3834		    conf->geo.far_offset == 0)
3835			goto out_free_conf;
3836		if (conf->prev.far_copies != 1 &&
3837		    conf->prev.far_offset == 0)
3838			goto out_free_conf;
3839	}
3840
3841	mddev->degraded = 0;
3842	for (i = 0;
3843	     i < conf->geo.raid_disks
3844		     || i < conf->prev.raid_disks;
3845	     i++) {
3846
3847		disk = conf->mirrors + i;
3848
3849		if (!disk->rdev && disk->replacement) {
3850			/* The replacement is all we have - use it */
3851			disk->rdev = disk->replacement;
3852			disk->replacement = NULL;
3853			clear_bit(Replacement, &disk->rdev->flags);
3854		}
3855
3856		if (!disk->rdev ||
3857		    !test_bit(In_sync, &disk->rdev->flags)) {
3858			disk->head_position = 0;
3859			mddev->degraded++;
3860			if (disk->rdev &&
3861			    disk->rdev->saved_raid_disk < 0)
3862				conf->fullsync = 1;
3863		}
3864
3865		if (disk->replacement &&
3866		    !test_bit(In_sync, &disk->replacement->flags) &&
3867		    disk->replacement->saved_raid_disk < 0) {
3868			conf->fullsync = 1;
3869		}
3870
3871		disk->recovery_disabled = mddev->recovery_disabled - 1;
3872	}
3873
3874	if (mddev->recovery_cp != MaxSector)
3875		pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
3876			  mdname(mddev));
3877	pr_info("md/raid10:%s: active with %d out of %d devices\n",
3878		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3879		conf->geo.raid_disks);
3880	/*
3881	 * Ok, everything is just fine now
3882	 */
3883	mddev->dev_sectors = conf->dev_sectors;
3884	size = raid10_size(mddev, 0, 0);
3885	md_set_array_sectors(mddev, size);
3886	mddev->resync_max_sectors = size;
3887	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3888
3889	if (mddev->queue) {
3890		int stripe = conf->geo.raid_disks *
3891			((mddev->chunk_sectors << 9) / PAGE_SIZE);
3892
3893		/* Calculate max read-ahead size.
3894		 * We need to readahead at least twice a whole stripe....
3895		 * maybe...
3896		 */
3897		stripe /= conf->geo.near_copies;
3898		if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
3899			mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
3900	}
3901
3902	if (md_integrity_register(mddev))
3903		goto out_free_conf;
3904
3905	if (conf->reshape_progress != MaxSector) {
3906		unsigned long before_length, after_length;
3907
3908		before_length = ((1 << conf->prev.chunk_shift) *
3909				 conf->prev.far_copies);
3910		after_length = ((1 << conf->geo.chunk_shift) *
3911				conf->geo.far_copies);
3912
3913		if (max(before_length, after_length) > min_offset_diff) {
3914			/* This cannot work */
3915			pr_warn("md/raid10: offset difference not enough to continue reshape\n");
3916			goto out_free_conf;
3917		}
3918		conf->offset_diff = min_offset_diff;
3919
3920		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3921		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3922		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3923		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3924		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3925							"reshape");
3926		if (!mddev->sync_thread)
3927			goto out_free_conf;
3928	}
3929
3930	return 0;
3931
3932out_free_conf:
3933	md_unregister_thread(&mddev->thread);
3934	mempool_exit(&conf->r10bio_pool);
3935	safe_put_page(conf->tmppage);
3936	kfree(conf->mirrors);
3937	kfree(conf);
3938	mddev->private = NULL;
3939out:
3940	return -EIO;
3941}
3942
3943static void raid10_free(struct mddev *mddev, void *priv)
3944{
3945	struct r10conf *conf = priv;
3946
3947	mempool_exit(&conf->r10bio_pool);
3948	safe_put_page(conf->tmppage);
3949	kfree(conf->mirrors);
3950	kfree(conf->mirrors_old);
3951	kfree(conf->mirrors_new);
3952	bioset_exit(&conf->bio_split);
3953	kfree(conf);
3954}
3955
3956static void raid10_quiesce(struct mddev *mddev, int quiesce)
3957{
3958	struct r10conf *conf = mddev->private;
3959
3960	if (quiesce)
3961		raise_barrier(conf, 0);
3962	else
3963		lower_barrier(conf);
3964}
3965
3966static int raid10_resize(struct mddev *mddev, sector_t sectors)
3967{
3968	/* Resize of 'far' arrays is not supported.
3969	 * For 'near' and 'offset' arrays we can set the
3970	 * number of sectors used to be an appropriate multiple
3971	 * of the chunk size.
3972	 * For 'offset', this is far_copies*chunksize.
3973	 * For 'near' the multiplier is the LCM of
3974	 * near_copies and raid_disks.
3975	 * So if far_copies > 1 && !far_offset, fail.
3976	 * Else find LCM(raid_disks, near_copy)*far_copies and
3977	 * multiply by chunk_size.  Then round to this number.
3978	 * This is mostly done by raid10_size()
3979	 */
3980	struct r10conf *conf = mddev->private;
3981	sector_t oldsize, size;
3982
3983	if (mddev->reshape_position != MaxSector)
3984		return -EBUSY;
3985
3986	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3987		return -EINVAL;
3988
3989	oldsize = raid10_size(mddev, 0, 0);
3990	size = raid10_size(mddev, sectors, 0);
3991	if (mddev->external_size &&
3992	    mddev->array_sectors > size)
3993		return -EINVAL;
3994	if (mddev->bitmap) {
3995		int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
3996		if (ret)
3997			return ret;
3998	}
3999	md_set_array_sectors(mddev, size);
4000	if (sectors > mddev->dev_sectors &&
4001	    mddev->recovery_cp > oldsize) {
4002		mddev->recovery_cp = oldsize;
4003		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4004	}
4005	calc_sectors(conf, sectors);
4006	mddev->dev_sectors = conf->dev_sectors;
4007	mddev->resync_max_sectors = size;
4008	return 0;
4009}
4010
4011static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4012{
4013	struct md_rdev *rdev;
4014	struct r10conf *conf;
4015
4016	if (mddev->degraded > 0) {
4017		pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4018			mdname(mddev));
4019		return ERR_PTR(-EINVAL);
4020	}
4021	sector_div(size, devs);
4022
4023	/* Set new parameters */
4024	mddev->new_level = 10;
4025	/* new layout: far_copies = 1, near_copies = 2 */
4026	mddev->new_layout = (1<<8) + 2;
4027	mddev->new_chunk_sectors = mddev->chunk_sectors;
4028	mddev->delta_disks = mddev->raid_disks;
4029	mddev->raid_disks *= 2;
4030	/* make sure it will be not marked as dirty */
4031	mddev->recovery_cp = MaxSector;
4032	mddev->dev_sectors = size;
4033
4034	conf = setup_conf(mddev);
4035	if (!IS_ERR(conf)) {
4036		rdev_for_each(rdev, mddev)
4037			if (rdev->raid_disk >= 0) {
4038				rdev->new_raid_disk = rdev->raid_disk * 2;
4039				rdev->sectors = size;
4040			}
4041		conf->barrier = 1;
4042	}
4043
4044	return conf;
4045}
4046
4047static void *raid10_takeover(struct mddev *mddev)
4048{
4049	struct r0conf *raid0_conf;
4050
4051	/* raid10 can take over:
4052	 *  raid0 - providing it has only two drives
4053	 */
4054	if (mddev->level == 0) {
4055		/* for raid0 takeover only one zone is supported */
4056		raid0_conf = mddev->private;
4057		if (raid0_conf->nr_strip_zones > 1) {
4058			pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4059				mdname(mddev));
4060			return ERR_PTR(-EINVAL);
4061		}
4062		return raid10_takeover_raid0(mddev,
4063			raid0_conf->strip_zone->zone_end,
4064			raid0_conf->strip_zone->nb_dev);
4065	}
4066	return ERR_PTR(-EINVAL);
4067}
4068
4069static int raid10_check_reshape(struct mddev *mddev)
4070{
4071	/* Called when there is a request to change
4072	 * - layout (to ->new_layout)
4073	 * - chunk size (to ->new_chunk_sectors)
4074	 * - raid_disks (by delta_disks)
4075	 * or when trying to restart a reshape that was ongoing.
4076	 *
4077	 * We need to validate the request and possibly allocate
4078	 * space if that might be an issue later.
4079	 *
4080	 * Currently we reject any reshape of a 'far' mode array,
4081	 * allow chunk size to change if new is generally acceptable,
4082	 * allow raid_disks to increase, and allow
4083	 * a switch between 'near' mode and 'offset' mode.
4084	 */
4085	struct r10conf *conf = mddev->private;
4086	struct geom geo;
4087
4088	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4089		return -EINVAL;
4090
4091	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4092		/* mustn't change number of copies */
4093		return -EINVAL;
4094	if (geo.far_copies > 1 && !geo.far_offset)
4095		/* Cannot switch to 'far' mode */
4096		return -EINVAL;
4097
4098	if (mddev->array_sectors & geo.chunk_mask)
4099			/* not factor of array size */
4100			return -EINVAL;
4101
4102	if (!enough(conf, -1))
4103		return -EINVAL;
4104
4105	kfree(conf->mirrors_new);
4106	conf->mirrors_new = NULL;
4107	if (mddev->delta_disks > 0) {
4108		/* allocate new 'mirrors' list */
4109		conf->mirrors_new =
4110			kcalloc(mddev->raid_disks + mddev->delta_disks,
4111				sizeof(struct raid10_info),
4112				GFP_KERNEL);
4113		if (!conf->mirrors_new)
4114			return -ENOMEM;
4115	}
4116	return 0;
4117}
4118
4119/*
4120 * Need to check if array has failed when deciding whether to:
4121 *  - start an array
4122 *  - remove non-faulty devices
4123 *  - add a spare
4124 *  - allow a reshape
4125 * This determination is simple when no reshape is happening.
4126 * However if there is a reshape, we need to carefully check
4127 * both the before and after sections.
4128 * This is because some failed devices may only affect one
4129 * of the two sections, and some non-in_sync devices may
4130 * be insync in the section most affected by failed devices.
4131 */
4132static int calc_degraded(struct r10conf *conf)
4133{
4134	int degraded, degraded2;
4135	int i;
4136
4137	rcu_read_lock();
4138	degraded = 0;
4139	/* 'prev' section first */
4140	for (i = 0; i < conf->prev.raid_disks; i++) {
4141		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 
4142		if (!rdev || test_bit(Faulty, &rdev->flags))
4143			degraded++;
4144		else if (!test_bit(In_sync, &rdev->flags))
4145			/* When we can reduce the number of devices in
4146			 * an array, this might not contribute to
4147			 * 'degraded'.  It does now.
4148			 */
4149			degraded++;
4150	}
4151	rcu_read_unlock();
4152	if (conf->geo.raid_disks == conf->prev.raid_disks)
4153		return degraded;
4154	rcu_read_lock();
4155	degraded2 = 0;
4156	for (i = 0; i < conf->geo.raid_disks; i++) {
4157		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 
4158		if (!rdev || test_bit(Faulty, &rdev->flags))
4159			degraded2++;
4160		else if (!test_bit(In_sync, &rdev->flags)) {
4161			/* If reshape is increasing the number of devices,
4162			 * this section has already been recovered, so
4163			 * it doesn't contribute to degraded.
4164			 * else it does.
4165			 */
4166			if (conf->geo.raid_disks <= conf->prev.raid_disks)
4167				degraded2++;
4168		}
4169	}
4170	rcu_read_unlock();
4171	if (degraded2 > degraded)
4172		return degraded2;
4173	return degraded;
4174}
4175
4176static int raid10_start_reshape(struct mddev *mddev)
4177{
4178	/* A 'reshape' has been requested. This commits
4179	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4180	 * This also checks if there are enough spares and adds them
4181	 * to the array.
4182	 * We currently require enough spares to make the final
4183	 * array non-degraded.  We also require that the difference
4184	 * between old and new data_offset - on each device - is
4185	 * enough that we never risk over-writing.
4186	 */
4187
4188	unsigned long before_length, after_length;
4189	sector_t min_offset_diff = 0;
4190	int first = 1;
4191	struct geom new;
4192	struct r10conf *conf = mddev->private;
4193	struct md_rdev *rdev;
4194	int spares = 0;
4195	int ret;
4196
4197	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4198		return -EBUSY;
4199
4200	if (setup_geo(&new, mddev, geo_start) != conf->copies)
4201		return -EINVAL;
4202
4203	before_length = ((1 << conf->prev.chunk_shift) *
4204			 conf->prev.far_copies);
4205	after_length = ((1 << conf->geo.chunk_shift) *
4206			conf->geo.far_copies);
4207
4208	rdev_for_each(rdev, mddev) {
4209		if (!test_bit(In_sync, &rdev->flags)
4210		    && !test_bit(Faulty, &rdev->flags))
4211			spares++;
4212		if (rdev->raid_disk >= 0) {
4213			long long diff = (rdev->new_data_offset
4214					  - rdev->data_offset);
4215			if (!mddev->reshape_backwards)
4216				diff = -diff;
4217			if (diff < 0)
4218				diff = 0;
4219			if (first || diff < min_offset_diff)
4220				min_offset_diff = diff;
4221			first = 0;
4222		}
4223	}
4224
4225	if (max(before_length, after_length) > min_offset_diff)
4226		return -EINVAL;
4227
4228	if (spares < mddev->delta_disks)
4229		return -EINVAL;
4230
4231	conf->offset_diff = min_offset_diff;
4232	spin_lock_irq(&conf->device_lock);
4233	if (conf->mirrors_new) {
4234		memcpy(conf->mirrors_new, conf->mirrors,
4235		       sizeof(struct raid10_info)*conf->prev.raid_disks);
4236		smp_mb();
4237		kfree(conf->mirrors_old);
4238		conf->mirrors_old = conf->mirrors;
4239		conf->mirrors = conf->mirrors_new;
4240		conf->mirrors_new = NULL;
4241	}
4242	setup_geo(&conf->geo, mddev, geo_start);
4243	smp_mb();
4244	if (mddev->reshape_backwards) {
4245		sector_t size = raid10_size(mddev, 0, 0);
4246		if (size < mddev->array_sectors) {
4247			spin_unlock_irq(&conf->device_lock);
4248			pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4249				mdname(mddev));
4250			return -EINVAL;
4251		}
4252		mddev->resync_max_sectors = size;
4253		conf->reshape_progress = size;
4254	} else
4255		conf->reshape_progress = 0;
4256	conf->reshape_safe = conf->reshape_progress;
4257	spin_unlock_irq(&conf->device_lock);
4258
4259	if (mddev->delta_disks && mddev->bitmap) {
4260		struct mdp_superblock_1 *sb = NULL;
4261		sector_t oldsize, newsize;
4262
4263		oldsize = raid10_size(mddev, 0, 0);
4264		newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4265
4266		if (!mddev_is_clustered(mddev)) {
4267			ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4268			if (ret)
4269				goto abort;
4270			else
4271				goto out;
4272		}
4273
4274		rdev_for_each(rdev, mddev) {
4275			if (rdev->raid_disk > -1 &&
4276			    !test_bit(Faulty, &rdev->flags))
4277				sb = page_address(rdev->sb_page);
4278		}
4279
4280		/*
4281		 * some node is already performing reshape, and no need to
4282		 * call md_bitmap_resize again since it should be called when
4283		 * receiving BITMAP_RESIZE msg
4284		 */
4285		if ((sb && (le32_to_cpu(sb->feature_map) &
4286			    MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4287			goto out;
4288
4289		ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4290		if (ret)
4291			goto abort;
4292
4293		ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4294		if (ret) {
4295			md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
4296			goto abort;
4297		}
4298	}
4299out:
4300	if (mddev->delta_disks > 0) {
4301		rdev_for_each(rdev, mddev)
4302			if (rdev->raid_disk < 0 &&
4303			    !test_bit(Faulty, &rdev->flags)) {
4304				if (raid10_add_disk(mddev, rdev) == 0) {
4305					if (rdev->raid_disk >=
4306					    conf->prev.raid_disks)
4307						set_bit(In_sync, &rdev->flags);
4308					else
4309						rdev->recovery_offset = 0;
4310
4311					if (sysfs_link_rdev(mddev, rdev))
4312						/* Failure here  is OK */;
4313				}
4314			} else if (rdev->raid_disk >= conf->prev.raid_disks
4315				   && !test_bit(Faulty, &rdev->flags)) {
4316				/* This is a spare that was manually added */
4317				set_bit(In_sync, &rdev->flags);
4318			}
4319	}
4320	/* When a reshape changes the number of devices,
4321	 * ->degraded is measured against the larger of the
4322	 * pre and  post numbers.
4323	 */
4324	spin_lock_irq(&conf->device_lock);
4325	mddev->degraded = calc_degraded(conf);
4326	spin_unlock_irq(&conf->device_lock);
4327	mddev->raid_disks = conf->geo.raid_disks;
4328	mddev->reshape_position = conf->reshape_progress;
4329	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4330
4331	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4332	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4333	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4334	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4335	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4336
4337	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4338						"reshape");
4339	if (!mddev->sync_thread) {
4340		ret = -EAGAIN;
4341		goto abort;
4342	}
4343	conf->reshape_checkpoint = jiffies;
4344	md_wakeup_thread(mddev->sync_thread);
4345	md_new_event(mddev);
4346	return 0;
4347
4348abort:
4349	mddev->recovery = 0;
4350	spin_lock_irq(&conf->device_lock);
4351	conf->geo = conf->prev;
4352	mddev->raid_disks = conf->geo.raid_disks;
4353	rdev_for_each(rdev, mddev)
4354		rdev->new_data_offset = rdev->data_offset;
4355	smp_wmb();
4356	conf->reshape_progress = MaxSector;
4357	conf->reshape_safe = MaxSector;
4358	mddev->reshape_position = MaxSector;
4359	spin_unlock_irq(&conf->device_lock);
4360	return ret;
4361}
4362
4363/* Calculate the last device-address that could contain
4364 * any block from the chunk that includes the array-address 's'
4365 * and report the next address.
4366 * i.e. the address returned will be chunk-aligned and after
4367 * any data that is in the chunk containing 's'.
4368 */
4369static sector_t last_dev_address(sector_t s, struct geom *geo)
4370{
4371	s = (s | geo->chunk_mask) + 1;
4372	s >>= geo->chunk_shift;
4373	s *= geo->near_copies;
4374	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4375	s *= geo->far_copies;
4376	s <<= geo->chunk_shift;
4377	return s;
4378}
4379
4380/* Calculate the first device-address that could contain
4381 * any block from the chunk that includes the array-address 's'.
4382 * This too will be the start of a chunk
4383 */
4384static sector_t first_dev_address(sector_t s, struct geom *geo)
4385{
4386	s >>= geo->chunk_shift;
4387	s *= geo->near_copies;
4388	sector_div(s, geo->raid_disks);
4389	s *= geo->far_copies;
4390	s <<= geo->chunk_shift;
4391	return s;
4392}
4393
4394static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4395				int *skipped)
4396{
4397	/* We simply copy at most one chunk (smallest of old and new)
4398	 * at a time, possibly less if that exceeds RESYNC_PAGES,
4399	 * or we hit a bad block or something.
4400	 * This might mean we pause for normal IO in the middle of
4401	 * a chunk, but that is not a problem as mddev->reshape_position
4402	 * can record any location.
4403	 *
4404	 * If we will want to write to a location that isn't
4405	 * yet recorded as 'safe' (i.e. in metadata on disk) then
4406	 * we need to flush all reshape requests and update the metadata.
4407	 *
4408	 * When reshaping forwards (e.g. to more devices), we interpret
4409	 * 'safe' as the earliest block which might not have been copied
4410	 * down yet.  We divide this by previous stripe size and multiply
4411	 * by previous stripe length to get lowest device offset that we
4412	 * cannot write to yet.
4413	 * We interpret 'sector_nr' as an address that we want to write to.
4414	 * From this we use last_device_address() to find where we might
4415	 * write to, and first_device_address on the  'safe' position.
4416	 * If this 'next' write position is after the 'safe' position,
4417	 * we must update the metadata to increase the 'safe' position.
4418	 *
4419	 * When reshaping backwards, we round in the opposite direction
4420	 * and perform the reverse test:  next write position must not be
4421	 * less than current safe position.
4422	 *
4423	 * In all this the minimum difference in data offsets
4424	 * (conf->offset_diff - always positive) allows a bit of slack,
4425	 * so next can be after 'safe', but not by more than offset_diff
4426	 *
4427	 * We need to prepare all the bios here before we start any IO
4428	 * to ensure the size we choose is acceptable to all devices.
4429	 * The means one for each copy for write-out and an extra one for
4430	 * read-in.
4431	 * We store the read-in bio in ->master_bio and the others in
4432	 * ->devs[x].bio and ->devs[x].repl_bio.
4433	 */
4434	struct r10conf *conf = mddev->private;
4435	struct r10bio *r10_bio;
4436	sector_t next, safe, last;
4437	int max_sectors;
4438	int nr_sectors;
4439	int s;
4440	struct md_rdev *rdev;
4441	int need_flush = 0;
4442	struct bio *blist;
4443	struct bio *bio, *read_bio;
4444	int sectors_done = 0;
4445	struct page **pages;
4446
4447	if (sector_nr == 0) {
4448		/* If restarting in the middle, skip the initial sectors */
4449		if (mddev->reshape_backwards &&
4450		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4451			sector_nr = (raid10_size(mddev, 0, 0)
4452				     - conf->reshape_progress);
4453		} else if (!mddev->reshape_backwards &&
4454			   conf->reshape_progress > 0)
4455			sector_nr = conf->reshape_progress;
4456		if (sector_nr) {
4457			mddev->curr_resync_completed = sector_nr;
4458			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4459			*skipped = 1;
4460			return sector_nr;
4461		}
4462	}
4463
4464	/* We don't use sector_nr to track where we are up to
4465	 * as that doesn't work well for ->reshape_backwards.
4466	 * So just use ->reshape_progress.
4467	 */
4468	if (mddev->reshape_backwards) {
4469		/* 'next' is the earliest device address that we might
4470		 * write to for this chunk in the new layout
4471		 */
4472		next = first_dev_address(conf->reshape_progress - 1,
4473					 &conf->geo);
4474
4475		/* 'safe' is the last device address that we might read from
4476		 * in the old layout after a restart
4477		 */
4478		safe = last_dev_address(conf->reshape_safe - 1,
4479					&conf->prev);
4480
4481		if (next + conf->offset_diff < safe)
4482			need_flush = 1;
4483
4484		last = conf->reshape_progress - 1;
4485		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4486					       & conf->prev.chunk_mask);
4487		if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4488			sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4489	} else {
4490		/* 'next' is after the last device address that we
4491		 * might write to for this chunk in the new layout
4492		 */
4493		next = last_dev_address(conf->reshape_progress, &conf->geo);
4494
4495		/* 'safe' is the earliest device address that we might
4496		 * read from in the old layout after a restart
4497		 */
4498		safe = first_dev_address(conf->reshape_safe, &conf->prev);
4499
4500		/* Need to update metadata if 'next' might be beyond 'safe'
4501		 * as that would possibly corrupt data
4502		 */
4503		if (next > safe + conf->offset_diff)
4504			need_flush = 1;
4505
4506		sector_nr = conf->reshape_progress;
4507		last  = sector_nr | (conf->geo.chunk_mask
4508				     & conf->prev.chunk_mask);
4509
4510		if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4511			last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4512	}
4513
4514	if (need_flush ||
4515	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4516		/* Need to update reshape_position in metadata */
4517		wait_barrier(conf);
4518		mddev->reshape_position = conf->reshape_progress;
4519		if (mddev->reshape_backwards)
4520			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4521				- conf->reshape_progress;
4522		else
4523			mddev->curr_resync_completed = conf->reshape_progress;
4524		conf->reshape_checkpoint = jiffies;
4525		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4526		md_wakeup_thread(mddev->thread);
4527		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4528			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4529		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4530			allow_barrier(conf);
4531			return sectors_done;
4532		}
4533		conf->reshape_safe = mddev->reshape_position;
4534		allow_barrier(conf);
4535	}
4536
4537	raise_barrier(conf, 0);
4538read_more:
4539	/* Now schedule reads for blocks from sector_nr to last */
4540	r10_bio = raid10_alloc_init_r10buf(conf);
4541	r10_bio->state = 0;
4542	raise_barrier(conf, 1);
4543	atomic_set(&r10_bio->remaining, 0);
4544	r10_bio->mddev = mddev;
4545	r10_bio->sector = sector_nr;
4546	set_bit(R10BIO_IsReshape, &r10_bio->state);
4547	r10_bio->sectors = last - sector_nr + 1;
4548	rdev = read_balance(conf, r10_bio, &max_sectors);
4549	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4550
4551	if (!rdev) {
4552		/* Cannot read from here, so need to record bad blocks
4553		 * on all the target devices.
4554		 */
4555		// FIXME
4556		mempool_free(r10_bio, &conf->r10buf_pool);
4557		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4558		return sectors_done;
4559	}
4560
4561	read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4562
4563	bio_set_dev(read_bio, rdev->bdev);
4564	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4565			       + rdev->data_offset);
4566	read_bio->bi_private = r10_bio;
4567	read_bio->bi_end_io = end_reshape_read;
4568	bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
4569	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4570	read_bio->bi_status = 0;
4571	read_bio->bi_vcnt = 0;
4572	read_bio->bi_iter.bi_size = 0;
4573	r10_bio->master_bio = read_bio;
4574	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4575
4576	/*
4577	 * Broadcast RESYNC message to other nodes, so all nodes would not
4578	 * write to the region to avoid conflict.
4579	*/
4580	if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4581		struct mdp_superblock_1 *sb = NULL;
4582		int sb_reshape_pos = 0;
4583
4584		conf->cluster_sync_low = sector_nr;
4585		conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4586		sb = page_address(rdev->sb_page);
4587		if (sb) {
4588			sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4589			/*
4590			 * Set cluster_sync_low again if next address for array
4591			 * reshape is less than cluster_sync_low. Since we can't
4592			 * update cluster_sync_low until it has finished reshape.
4593			 */
4594			if (sb_reshape_pos < conf->cluster_sync_low)
4595				conf->cluster_sync_low = sb_reshape_pos;
4596		}
4597
4598		md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4599							  conf->cluster_sync_high);
4600	}
4601
4602	/* Now find the locations in the new layout */
4603	__raid10_find_phys(&conf->geo, r10_bio);
4604
4605	blist = read_bio;
4606	read_bio->bi_next = NULL;
4607
4608	rcu_read_lock();
4609	for (s = 0; s < conf->copies*2; s++) {
4610		struct bio *b;
4611		int d = r10_bio->devs[s/2].devnum;
4612		struct md_rdev *rdev2;
4613		if (s&1) {
4614			rdev2 = rcu_dereference(conf->mirrors[d].replacement);
4615			b = r10_bio->devs[s/2].repl_bio;
4616		} else {
4617			rdev2 = rcu_dereference(conf->mirrors[d].rdev);
4618			b = r10_bio->devs[s/2].bio;
4619		}
4620		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4621			continue;
4622
4623		bio_set_dev(b, rdev2->bdev);
4624		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4625			rdev2->new_data_offset;
4626		b->bi_end_io = end_reshape_write;
4627		bio_set_op_attrs(b, REQ_OP_WRITE, 0);
4628		b->bi_next = blist;
4629		blist = b;
4630	}
4631
4632	/* Now add as many pages as possible to all of these bios. */
4633
4634	nr_sectors = 0;
4635	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4636	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4637		struct page *page = pages[s / (PAGE_SIZE >> 9)];
4638		int len = (max_sectors - s) << 9;
4639		if (len > PAGE_SIZE)
4640			len = PAGE_SIZE;
4641		for (bio = blist; bio ; bio = bio->bi_next) {
4642			/*
4643			 * won't fail because the vec table is big enough
4644			 * to hold all these pages
4645			 */
4646			bio_add_page(bio, page, len, 0);
4647		}
4648		sector_nr += len >> 9;
4649		nr_sectors += len >> 9;
4650	}
4651	rcu_read_unlock();
4652	r10_bio->sectors = nr_sectors;
4653
4654	/* Now submit the read */
4655	md_sync_acct_bio(read_bio, r10_bio->sectors);
4656	atomic_inc(&r10_bio->remaining);
4657	read_bio->bi_next = NULL;
4658	generic_make_request(read_bio);
4659	sectors_done += nr_sectors;
4660	if (sector_nr <= last)
4661		goto read_more;
4662
4663	lower_barrier(conf);
4664
4665	/* Now that we have done the whole section we can
4666	 * update reshape_progress
4667	 */
4668	if (mddev->reshape_backwards)
4669		conf->reshape_progress -= sectors_done;
4670	else
4671		conf->reshape_progress += sectors_done;
4672
4673	return sectors_done;
4674}
4675
4676static void end_reshape_request(struct r10bio *r10_bio);
4677static int handle_reshape_read_error(struct mddev *mddev,
4678				     struct r10bio *r10_bio);
4679static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4680{
4681	/* Reshape read completed.  Hopefully we have a block
4682	 * to write out.
4683	 * If we got a read error then we do sync 1-page reads from
4684	 * elsewhere until we find the data - or give up.
4685	 */
4686	struct r10conf *conf = mddev->private;
4687	int s;
4688
4689	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4690		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4691			/* Reshape has been aborted */
4692			md_done_sync(mddev, r10_bio->sectors, 0);
4693			return;
4694		}
4695
4696	/* We definitely have the data in the pages, schedule the
4697	 * writes.
4698	 */
4699	atomic_set(&r10_bio->remaining, 1);
4700	for (s = 0; s < conf->copies*2; s++) {
4701		struct bio *b;
4702		int d = r10_bio->devs[s/2].devnum;
4703		struct md_rdev *rdev;
4704		rcu_read_lock();
4705		if (s&1) {
4706			rdev = rcu_dereference(conf->mirrors[d].replacement);
4707			b = r10_bio->devs[s/2].repl_bio;
4708		} else {
4709			rdev = rcu_dereference(conf->mirrors[d].rdev);
4710			b = r10_bio->devs[s/2].bio;
4711		}
4712		if (!rdev || test_bit(Faulty, &rdev->flags)) {
4713			rcu_read_unlock();
4714			continue;
4715		}
4716		atomic_inc(&rdev->nr_pending);
4717		rcu_read_unlock();
4718		md_sync_acct_bio(b, r10_bio->sectors);
4719		atomic_inc(&r10_bio->remaining);
4720		b->bi_next = NULL;
4721		generic_make_request(b);
4722	}
4723	end_reshape_request(r10_bio);
4724}
4725
4726static void end_reshape(struct r10conf *conf)
4727{
4728	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4729		return;
4730
4731	spin_lock_irq(&conf->device_lock);
4732	conf->prev = conf->geo;
4733	md_finish_reshape(conf->mddev);
4734	smp_wmb();
4735	conf->reshape_progress = MaxSector;
4736	conf->reshape_safe = MaxSector;
4737	spin_unlock_irq(&conf->device_lock);
4738
4739	/* read-ahead size must cover two whole stripes, which is
4740	 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4741	 */
4742	if (conf->mddev->queue) {
4743		int stripe = conf->geo.raid_disks *
4744			((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4745		stripe /= conf->geo.near_copies;
4746		if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
4747			conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
4748	}
4749	conf->fullsync = 0;
4750}
4751
4752static void raid10_update_reshape_pos(struct mddev *mddev)
4753{
4754	struct r10conf *conf = mddev->private;
4755	sector_t lo, hi;
4756
4757	md_cluster_ops->resync_info_get(mddev, &lo, &hi);
4758	if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
4759	    || mddev->reshape_position == MaxSector)
4760		conf->reshape_progress = mddev->reshape_position;
4761	else
4762		WARN_ON_ONCE(1);
4763}
4764
4765static int handle_reshape_read_error(struct mddev *mddev,
4766				     struct r10bio *r10_bio)
4767{
4768	/* Use sync reads to get the blocks from somewhere else */
4769	int sectors = r10_bio->sectors;
4770	struct r10conf *conf = mddev->private;
4771	struct r10bio *r10b;
4772	int slot = 0;
4773	int idx = 0;
4774	struct page **pages;
4775
4776	r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
4777	if (!r10b) {
4778		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4779		return -ENOMEM;
4780	}
4781
4782	/* reshape IOs share pages from .devs[0].bio */
4783	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4784
4785	r10b->sector = r10_bio->sector;
4786	__raid10_find_phys(&conf->prev, r10b);
4787
4788	while (sectors) {
4789		int s = sectors;
4790		int success = 0;
4791		int first_slot = slot;
4792
4793		if (s > (PAGE_SIZE >> 9))
4794			s = PAGE_SIZE >> 9;
4795
4796		rcu_read_lock();
4797		while (!success) {
4798			int d = r10b->devs[slot].devnum;
4799			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4800			sector_t addr;
4801			if (rdev == NULL ||
4802			    test_bit(Faulty, &rdev->flags) ||
4803			    !test_bit(In_sync, &rdev->flags))
4804				goto failed;
4805
4806			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4807			atomic_inc(&rdev->nr_pending);
4808			rcu_read_unlock();
4809			success = sync_page_io(rdev,
4810					       addr,
4811					       s << 9,
4812					       pages[idx],
4813					       REQ_OP_READ, 0, false);
4814			rdev_dec_pending(rdev, mddev);
4815			rcu_read_lock();
4816			if (success)
4817				break;
4818		failed:
4819			slot++;
4820			if (slot >= conf->copies)
4821				slot = 0;
4822			if (slot == first_slot)
4823				break;
4824		}
4825		rcu_read_unlock();
4826		if (!success) {
4827			/* couldn't read this block, must give up */
4828			set_bit(MD_RECOVERY_INTR,
4829				&mddev->recovery);
4830			kfree(r10b);
4831			return -EIO;
4832		}
4833		sectors -= s;
4834		idx++;
4835	}
4836	kfree(r10b);
4837	return 0;
4838}
4839
4840static void end_reshape_write(struct bio *bio)
4841{
4842	struct r10bio *r10_bio = get_resync_r10bio(bio);
4843	struct mddev *mddev = r10_bio->mddev;
4844	struct r10conf *conf = mddev->private;
4845	int d;
4846	int slot;
4847	int repl;
4848	struct md_rdev *rdev = NULL;
4849
4850	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4851	if (repl)
4852		rdev = conf->mirrors[d].replacement;
4853	if (!rdev) {
4854		smp_mb();
4855		rdev = conf->mirrors[d].rdev;
4856	}
4857
4858	if (bio->bi_status) {
4859		/* FIXME should record badblock */
4860		md_error(mddev, rdev);
4861	}
4862
4863	rdev_dec_pending(rdev, mddev);
4864	end_reshape_request(r10_bio);
4865}
4866
4867static void end_reshape_request(struct r10bio *r10_bio)
4868{
4869	if (!atomic_dec_and_test(&r10_bio->remaining))
4870		return;
4871	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4872	bio_put(r10_bio->master_bio);
4873	put_buf(r10_bio);
4874}
4875
4876static void raid10_finish_reshape(struct mddev *mddev)
4877{
4878	struct r10conf *conf = mddev->private;
4879
4880	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4881		return;
4882
4883	if (mddev->delta_disks > 0) {
4884		if (mddev->recovery_cp > mddev->resync_max_sectors) {
4885			mddev->recovery_cp = mddev->resync_max_sectors;
4886			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4887		}
4888		mddev->resync_max_sectors = mddev->array_sectors;
4889	} else {
4890		int d;
4891		rcu_read_lock();
4892		for (d = conf->geo.raid_disks ;
4893		     d < conf->geo.raid_disks - mddev->delta_disks;
4894		     d++) {
4895			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4896			if (rdev)
4897				clear_bit(In_sync, &rdev->flags);
4898			rdev = rcu_dereference(conf->mirrors[d].replacement);
4899			if (rdev)
4900				clear_bit(In_sync, &rdev->flags);
4901		}
4902		rcu_read_unlock();
4903	}
4904	mddev->layout = mddev->new_layout;
4905	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4906	mddev->reshape_position = MaxSector;
4907	mddev->delta_disks = 0;
4908	mddev->reshape_backwards = 0;
4909}
4910
4911static struct md_personality raid10_personality =
4912{
4913	.name		= "raid10",
4914	.level		= 10,
4915	.owner		= THIS_MODULE,
4916	.make_request	= raid10_make_request,
4917	.run		= raid10_run,
4918	.free		= raid10_free,
4919	.status		= raid10_status,
4920	.error_handler	= raid10_error,
4921	.hot_add_disk	= raid10_add_disk,
4922	.hot_remove_disk= raid10_remove_disk,
4923	.spare_active	= raid10_spare_active,
4924	.sync_request	= raid10_sync_request,
4925	.quiesce	= raid10_quiesce,
4926	.size		= raid10_size,
4927	.resize		= raid10_resize,
4928	.takeover	= raid10_takeover,
4929	.check_reshape	= raid10_check_reshape,
4930	.start_reshape	= raid10_start_reshape,
4931	.finish_reshape	= raid10_finish_reshape,
4932	.update_reshape_pos = raid10_update_reshape_pos,
4933	.congested	= raid10_congested,
4934};
4935
4936static int __init raid_init(void)
4937{
4938	return register_md_personality(&raid10_personality);
4939}
4940
4941static void raid_exit(void)
4942{
4943	unregister_md_personality(&raid10_personality);
4944}
4945
4946module_init(raid_init);
4947module_exit(raid_exit);
4948MODULE_LICENSE("GPL");
4949MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
4950MODULE_ALIAS("md-personality-9"); /* RAID10 */
4951MODULE_ALIAS("md-raid10");
4952MODULE_ALIAS("md-level-10");
4953
4954module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);